xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision ec1f4e7c3b17656658c9cf49c33bc06c4bc747c2)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "../PassDetail.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
13 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/StandardOps/IR/Ops.h"
16 #include "mlir/Dialect/Vector/VectorOps.h"
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/Attributes.h"
19 #include "mlir/IR/Builders.h"
20 #include "mlir/IR/MLIRContext.h"
21 #include "mlir/IR/Module.h"
22 #include "mlir/IR/Operation.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/IR/StandardTypes.h"
25 #include "mlir/IR/Types.h"
26 #include "mlir/Target/LLVMIR/TypeTranslation.h"
27 #include "mlir/Transforms/DialectConversion.h"
28 #include "mlir/Transforms/Passes.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Allocator.h"
33 #include "llvm/Support/ErrorHandling.h"
34 
35 using namespace mlir;
36 using namespace mlir::vector;
37 
38 // Helper to reduce vector type by one rank at front.
39 static VectorType reducedVectorTypeFront(VectorType tp) {
40   assert((tp.getRank() > 1) && "unlowerable vector type");
41   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
42 }
43 
44 // Helper to reduce vector type by *all* but one rank at back.
45 static VectorType reducedVectorTypeBack(VectorType tp) {
46   assert((tp.getRank() > 1) && "unlowerable vector type");
47   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
48 }
49 
50 // Helper that picks the proper sequence for inserting.
51 static Value insertOne(ConversionPatternRewriter &rewriter,
52                        LLVMTypeConverter &typeConverter, Location loc,
53                        Value val1, Value val2, Type llvmType, int64_t rank,
54                        int64_t pos) {
55   if (rank == 1) {
56     auto idxType = rewriter.getIndexType();
57     auto constant = rewriter.create<LLVM::ConstantOp>(
58         loc, typeConverter.convertType(idxType),
59         rewriter.getIntegerAttr(idxType, pos));
60     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
61                                                   constant);
62   }
63   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
64                                               rewriter.getI64ArrayAttr(pos));
65 }
66 
67 // Helper that picks the proper sequence for inserting.
68 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
69                        Value into, int64_t offset) {
70   auto vectorType = into.getType().cast<VectorType>();
71   if (vectorType.getRank() > 1)
72     return rewriter.create<InsertOp>(loc, from, into, offset);
73   return rewriter.create<vector::InsertElementOp>(
74       loc, vectorType, from, into,
75       rewriter.create<ConstantIndexOp>(loc, offset));
76 }
77 
78 // Helper that picks the proper sequence for extracting.
79 static Value extractOne(ConversionPatternRewriter &rewriter,
80                         LLVMTypeConverter &typeConverter, Location loc,
81                         Value val, Type llvmType, int64_t rank, int64_t pos) {
82   if (rank == 1) {
83     auto idxType = rewriter.getIndexType();
84     auto constant = rewriter.create<LLVM::ConstantOp>(
85         loc, typeConverter.convertType(idxType),
86         rewriter.getIntegerAttr(idxType, pos));
87     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
88                                                    constant);
89   }
90   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
91                                                rewriter.getI64ArrayAttr(pos));
92 }
93 
94 // Helper that picks the proper sequence for extracting.
95 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
96                         int64_t offset) {
97   auto vectorType = vector.getType().cast<VectorType>();
98   if (vectorType.getRank() > 1)
99     return rewriter.create<ExtractOp>(loc, vector, offset);
100   return rewriter.create<vector::ExtractElementOp>(
101       loc, vectorType.getElementType(), vector,
102       rewriter.create<ConstantIndexOp>(loc, offset));
103 }
104 
105 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
106 // TODO: Better support for attribute subtype forwarding + slicing.
107 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
108                                               unsigned dropFront = 0,
109                                               unsigned dropBack = 0) {
110   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
111   auto range = arrayAttr.getAsRange<IntegerAttr>();
112   SmallVector<int64_t, 4> res;
113   res.reserve(arrayAttr.size() - dropFront - dropBack);
114   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
115        it != eit; ++it)
116     res.push_back((*it).getValue().getSExtValue());
117   return res;
118 }
119 
120 // Helper that returns data layout alignment of an operation with memref.
121 template <typename T>
122 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter, T op,
123                                  unsigned &align) {
124   Type elementTy =
125       typeConverter.convertType(op.getMemRefType().getElementType());
126   if (!elementTy)
127     return failure();
128 
129   auto dataLayout = typeConverter.getDialect()->getLLVMModule().getDataLayout();
130   // TODO: this should be abstracted away to avoid depending on translation.
131   align = dataLayout.getPrefTypeAlignment(LLVM::translateTypeToLLVMIR(
132       elementTy.cast<LLVM::LLVMType>(),
133       typeConverter.getDialect()->getLLVMContext()));
134   return success();
135 }
136 
137 // Helper that returns vector of pointers given a base and an index vector.
138 LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
139                              LLVMTypeConverter &typeConverter, Location loc,
140                              Value memref, Value indices, MemRefType memRefType,
141                              VectorType vType, Type iType, Value &ptrs) {
142   // Inspect stride and offset structure.
143   //
144   // TODO: flat memory only for now, generalize
145   //
146   int64_t offset;
147   SmallVector<int64_t, 4> strides;
148   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
149   if (failed(successStrides) || strides.size() != 1 || strides[0] != 1 ||
150       offset != 0 || memRefType.getMemorySpace() != 0)
151     return failure();
152 
153   // Create a vector of pointers from base and indices.
154   MemRefDescriptor memRefDescriptor(memref);
155   Value base = memRefDescriptor.alignedPtr(rewriter, loc);
156   int64_t size = vType.getDimSize(0);
157   auto pType = memRefDescriptor.getElementType();
158   auto ptrsType = LLVM::LLVMType::getVectorTy(pType, size);
159   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, indices);
160   return success();
161 }
162 
163 static LogicalResult
164 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
165                                  LLVMTypeConverter &typeConverter, Location loc,
166                                  TransferReadOp xferOp,
167                                  ArrayRef<Value> operands, Value dataPtr) {
168   unsigned align;
169   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
170     return failure();
171   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
172   return success();
173 }
174 
175 static LogicalResult
176 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
177                             LLVMTypeConverter &typeConverter, Location loc,
178                             TransferReadOp xferOp, ArrayRef<Value> operands,
179                             Value dataPtr, Value mask) {
180   auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
181   VectorType fillType = xferOp.getVectorType();
182   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
183   fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
184 
185   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
186   if (!vecTy)
187     return failure();
188 
189   unsigned align;
190   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
191     return failure();
192 
193   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
194       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
195       rewriter.getI32IntegerAttr(align));
196   return success();
197 }
198 
199 static LogicalResult
200 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
201                                  LLVMTypeConverter &typeConverter, Location loc,
202                                  TransferWriteOp xferOp,
203                                  ArrayRef<Value> operands, Value dataPtr) {
204   unsigned align;
205   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
206     return failure();
207   auto adaptor = TransferWriteOpAdaptor(operands);
208   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
209                                              align);
210   return success();
211 }
212 
213 static LogicalResult
214 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
215                             LLVMTypeConverter &typeConverter, Location loc,
216                             TransferWriteOp xferOp, ArrayRef<Value> operands,
217                             Value dataPtr, Value mask) {
218   unsigned align;
219   if (failed(getMemRefAlignment(typeConverter, xferOp, align)))
220     return failure();
221 
222   auto adaptor = TransferWriteOpAdaptor(operands);
223   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
224       xferOp, adaptor.vector(), dataPtr, mask,
225       rewriter.getI32IntegerAttr(align));
226   return success();
227 }
228 
229 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
230                                                   ArrayRef<Value> operands) {
231   return TransferReadOpAdaptor(operands);
232 }
233 
234 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
235                                                    ArrayRef<Value> operands) {
236   return TransferWriteOpAdaptor(operands);
237 }
238 
239 namespace {
240 
241 /// Conversion pattern for a vector.matrix_multiply.
242 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
243 class VectorMatmulOpConversion : public ConvertToLLVMPattern {
244 public:
245   explicit VectorMatmulOpConversion(MLIRContext *context,
246                                     LLVMTypeConverter &typeConverter)
247       : ConvertToLLVMPattern(vector::MatmulOp::getOperationName(), context,
248                              typeConverter) {}
249 
250   LogicalResult
251   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
252                   ConversionPatternRewriter &rewriter) const override {
253     auto matmulOp = cast<vector::MatmulOp>(op);
254     auto adaptor = vector::MatmulOpAdaptor(operands);
255     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
256         op, typeConverter.convertType(matmulOp.res().getType()), adaptor.lhs(),
257         adaptor.rhs(), matmulOp.lhs_rows(), matmulOp.lhs_columns(),
258         matmulOp.rhs_columns());
259     return success();
260   }
261 };
262 
263 /// Conversion pattern for a vector.flat_transpose.
264 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
265 class VectorFlatTransposeOpConversion : public ConvertToLLVMPattern {
266 public:
267   explicit VectorFlatTransposeOpConversion(MLIRContext *context,
268                                            LLVMTypeConverter &typeConverter)
269       : ConvertToLLVMPattern(vector::FlatTransposeOp::getOperationName(),
270                              context, typeConverter) {}
271 
272   LogicalResult
273   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
274                   ConversionPatternRewriter &rewriter) const override {
275     auto transOp = cast<vector::FlatTransposeOp>(op);
276     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
277     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
278         transOp, typeConverter.convertType(transOp.res().getType()),
279         adaptor.matrix(), transOp.rows(), transOp.columns());
280     return success();
281   }
282 };
283 
284 /// Conversion pattern for a vector.gather.
285 class VectorGatherOpConversion : public ConvertToLLVMPattern {
286 public:
287   explicit VectorGatherOpConversion(MLIRContext *context,
288                                     LLVMTypeConverter &typeConverter)
289       : ConvertToLLVMPattern(vector::GatherOp::getOperationName(), context,
290                              typeConverter) {}
291 
292   LogicalResult
293   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
294                   ConversionPatternRewriter &rewriter) const override {
295     auto loc = op->getLoc();
296     auto gather = cast<vector::GatherOp>(op);
297     auto adaptor = vector::GatherOpAdaptor(operands);
298 
299     // Resolve alignment.
300     unsigned align;
301     if (failed(getMemRefAlignment(typeConverter, gather, align)))
302       return failure();
303 
304     // Get index ptrs.
305     VectorType vType = gather.getResultVectorType();
306     Type iType = gather.getIndicesVectorType().getElementType();
307     Value ptrs;
308     if (failed(getIndexedPtrs(rewriter, typeConverter, loc, adaptor.base(),
309                               adaptor.indices(), gather.getMemRefType(), vType,
310                               iType, ptrs)))
311       return failure();
312 
313     // Replace with the gather intrinsic.
314     ValueRange v = (llvm::size(adaptor.pass_thru()) == 0) ? ValueRange({})
315                                                           : adaptor.pass_thru();
316     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
317         gather, typeConverter.convertType(vType), ptrs, adaptor.mask(), v,
318         rewriter.getI32IntegerAttr(align));
319     return success();
320   }
321 };
322 
323 /// Conversion pattern for a vector.scatter.
324 class VectorScatterOpConversion : public ConvertToLLVMPattern {
325 public:
326   explicit VectorScatterOpConversion(MLIRContext *context,
327                                      LLVMTypeConverter &typeConverter)
328       : ConvertToLLVMPattern(vector::ScatterOp::getOperationName(), context,
329                              typeConverter) {}
330 
331   LogicalResult
332   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
333                   ConversionPatternRewriter &rewriter) const override {
334     auto loc = op->getLoc();
335     auto scatter = cast<vector::ScatterOp>(op);
336     auto adaptor = vector::ScatterOpAdaptor(operands);
337 
338     // Resolve alignment.
339     unsigned align;
340     if (failed(getMemRefAlignment(typeConverter, scatter, align)))
341       return failure();
342 
343     // Get index ptrs.
344     VectorType vType = scatter.getValueVectorType();
345     Type iType = scatter.getIndicesVectorType().getElementType();
346     Value ptrs;
347     if (failed(getIndexedPtrs(rewriter, typeConverter, loc, adaptor.base(),
348                               adaptor.indices(), scatter.getMemRefType(), vType,
349                               iType, ptrs)))
350       return failure();
351 
352     // Replace with the scatter intrinsic.
353     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
354         scatter, adaptor.value(), ptrs, adaptor.mask(),
355         rewriter.getI32IntegerAttr(align));
356     return success();
357   }
358 };
359 
360 /// Conversion pattern for all vector reductions.
361 class VectorReductionOpConversion : public ConvertToLLVMPattern {
362 public:
363   explicit VectorReductionOpConversion(MLIRContext *context,
364                                        LLVMTypeConverter &typeConverter,
365                                        bool reassociateFP)
366       : ConvertToLLVMPattern(vector::ReductionOp::getOperationName(), context,
367                              typeConverter),
368         reassociateFPReductions(reassociateFP) {}
369 
370   LogicalResult
371   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
372                   ConversionPatternRewriter &rewriter) const override {
373     auto reductionOp = cast<vector::ReductionOp>(op);
374     auto kind = reductionOp.kind();
375     Type eltType = reductionOp.dest().getType();
376     Type llvmType = typeConverter.convertType(eltType);
377     if (eltType.isSignlessInteger(32) || eltType.isSignlessInteger(64)) {
378       // Integer reductions: add/mul/min/max/and/or/xor.
379       if (kind == "add")
380         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_add>(
381             op, llvmType, operands[0]);
382       else if (kind == "mul")
383         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_mul>(
384             op, llvmType, operands[0]);
385       else if (kind == "min")
386         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smin>(
387             op, llvmType, operands[0]);
388       else if (kind == "max")
389         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smax>(
390             op, llvmType, operands[0]);
391       else if (kind == "and")
392         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_and>(
393             op, llvmType, operands[0]);
394       else if (kind == "or")
395         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_or>(
396             op, llvmType, operands[0]);
397       else if (kind == "xor")
398         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_xor>(
399             op, llvmType, operands[0]);
400       else
401         return failure();
402       return success();
403 
404     } else if (eltType.isF32() || eltType.isF64()) {
405       // Floating-point reductions: add/mul/min/max
406       if (kind == "add") {
407         // Optional accumulator (or zero).
408         Value acc = operands.size() > 1 ? operands[1]
409                                         : rewriter.create<LLVM::ConstantOp>(
410                                               op->getLoc(), llvmType,
411                                               rewriter.getZeroAttr(eltType));
412         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fadd>(
413             op, llvmType, acc, operands[0],
414             rewriter.getBoolAttr(reassociateFPReductions));
415       } else if (kind == "mul") {
416         // Optional accumulator (or one).
417         Value acc = operands.size() > 1
418                         ? operands[1]
419                         : rewriter.create<LLVM::ConstantOp>(
420                               op->getLoc(), llvmType,
421                               rewriter.getFloatAttr(eltType, 1.0));
422         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fmul>(
423             op, llvmType, acc, operands[0],
424             rewriter.getBoolAttr(reassociateFPReductions));
425       } else if (kind == "min")
426         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmin>(
427             op, llvmType, operands[0]);
428       else if (kind == "max")
429         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmax>(
430             op, llvmType, operands[0]);
431       else
432         return failure();
433       return success();
434     }
435     return failure();
436   }
437 
438 private:
439   const bool reassociateFPReductions;
440 };
441 
442 class VectorShuffleOpConversion : public ConvertToLLVMPattern {
443 public:
444   explicit VectorShuffleOpConversion(MLIRContext *context,
445                                      LLVMTypeConverter &typeConverter)
446       : ConvertToLLVMPattern(vector::ShuffleOp::getOperationName(), context,
447                              typeConverter) {}
448 
449   LogicalResult
450   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
451                   ConversionPatternRewriter &rewriter) const override {
452     auto loc = op->getLoc();
453     auto adaptor = vector::ShuffleOpAdaptor(operands);
454     auto shuffleOp = cast<vector::ShuffleOp>(op);
455     auto v1Type = shuffleOp.getV1VectorType();
456     auto v2Type = shuffleOp.getV2VectorType();
457     auto vectorType = shuffleOp.getVectorType();
458     Type llvmType = typeConverter.convertType(vectorType);
459     auto maskArrayAttr = shuffleOp.mask();
460 
461     // Bail if result type cannot be lowered.
462     if (!llvmType)
463       return failure();
464 
465     // Get rank and dimension sizes.
466     int64_t rank = vectorType.getRank();
467     assert(v1Type.getRank() == rank);
468     assert(v2Type.getRank() == rank);
469     int64_t v1Dim = v1Type.getDimSize(0);
470 
471     // For rank 1, where both operands have *exactly* the same vector type,
472     // there is direct shuffle support in LLVM. Use it!
473     if (rank == 1 && v1Type == v2Type) {
474       Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
475           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
476       rewriter.replaceOp(op, shuffle);
477       return success();
478     }
479 
480     // For all other cases, insert the individual values individually.
481     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
482     int64_t insPos = 0;
483     for (auto en : llvm::enumerate(maskArrayAttr)) {
484       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
485       Value value = adaptor.v1();
486       if (extPos >= v1Dim) {
487         extPos -= v1Dim;
488         value = adaptor.v2();
489       }
490       Value extract = extractOne(rewriter, typeConverter, loc, value, llvmType,
491                                  rank, extPos);
492       insert = insertOne(rewriter, typeConverter, loc, insert, extract,
493                          llvmType, rank, insPos++);
494     }
495     rewriter.replaceOp(op, insert);
496     return success();
497   }
498 };
499 
500 class VectorExtractElementOpConversion : public ConvertToLLVMPattern {
501 public:
502   explicit VectorExtractElementOpConversion(MLIRContext *context,
503                                             LLVMTypeConverter &typeConverter)
504       : ConvertToLLVMPattern(vector::ExtractElementOp::getOperationName(),
505                              context, typeConverter) {}
506 
507   LogicalResult
508   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
509                   ConversionPatternRewriter &rewriter) const override {
510     auto adaptor = vector::ExtractElementOpAdaptor(operands);
511     auto extractEltOp = cast<vector::ExtractElementOp>(op);
512     auto vectorType = extractEltOp.getVectorType();
513     auto llvmType = typeConverter.convertType(vectorType.getElementType());
514 
515     // Bail if result type cannot be lowered.
516     if (!llvmType)
517       return failure();
518 
519     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
520         op, llvmType, adaptor.vector(), adaptor.position());
521     return success();
522   }
523 };
524 
525 class VectorExtractOpConversion : public ConvertToLLVMPattern {
526 public:
527   explicit VectorExtractOpConversion(MLIRContext *context,
528                                      LLVMTypeConverter &typeConverter)
529       : ConvertToLLVMPattern(vector::ExtractOp::getOperationName(), context,
530                              typeConverter) {}
531 
532   LogicalResult
533   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
534                   ConversionPatternRewriter &rewriter) const override {
535     auto loc = op->getLoc();
536     auto adaptor = vector::ExtractOpAdaptor(operands);
537     auto extractOp = cast<vector::ExtractOp>(op);
538     auto vectorType = extractOp.getVectorType();
539     auto resultType = extractOp.getResult().getType();
540     auto llvmResultType = typeConverter.convertType(resultType);
541     auto positionArrayAttr = extractOp.position();
542 
543     // Bail if result type cannot be lowered.
544     if (!llvmResultType)
545       return failure();
546 
547     // One-shot extraction of vector from array (only requires extractvalue).
548     if (resultType.isa<VectorType>()) {
549       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
550           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
551       rewriter.replaceOp(op, extracted);
552       return success();
553     }
554 
555     // Potential extraction of 1-D vector from array.
556     auto *context = op->getContext();
557     Value extracted = adaptor.vector();
558     auto positionAttrs = positionArrayAttr.getValue();
559     if (positionAttrs.size() > 1) {
560       auto oneDVectorType = reducedVectorTypeBack(vectorType);
561       auto nMinusOnePositionAttrs =
562           ArrayAttr::get(positionAttrs.drop_back(), context);
563       extracted = rewriter.create<LLVM::ExtractValueOp>(
564           loc, typeConverter.convertType(oneDVectorType), extracted,
565           nMinusOnePositionAttrs);
566     }
567 
568     // Remaining extraction of element from 1-D LLVM vector
569     auto position = positionAttrs.back().cast<IntegerAttr>();
570     auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
571     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
572     extracted =
573         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
574     rewriter.replaceOp(op, extracted);
575 
576     return success();
577   }
578 };
579 
580 /// Conversion pattern that turns a vector.fma on a 1-D vector
581 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
582 /// This does not match vectors of n >= 2 rank.
583 ///
584 /// Example:
585 /// ```
586 ///  vector.fma %a, %a, %a : vector<8xf32>
587 /// ```
588 /// is converted to:
589 /// ```
590 ///  llvm.intr.fmuladd %va, %va, %va:
591 ///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
592 ///    -> !llvm<"<8 x float>">
593 /// ```
594 class VectorFMAOp1DConversion : public ConvertToLLVMPattern {
595 public:
596   explicit VectorFMAOp1DConversion(MLIRContext *context,
597                                    LLVMTypeConverter &typeConverter)
598       : ConvertToLLVMPattern(vector::FMAOp::getOperationName(), context,
599                              typeConverter) {}
600 
601   LogicalResult
602   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
603                   ConversionPatternRewriter &rewriter) const override {
604     auto adaptor = vector::FMAOpAdaptor(operands);
605     vector::FMAOp fmaOp = cast<vector::FMAOp>(op);
606     VectorType vType = fmaOp.getVectorType();
607     if (vType.getRank() != 1)
608       return failure();
609     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(op, adaptor.lhs(),
610                                                  adaptor.rhs(), adaptor.acc());
611     return success();
612   }
613 };
614 
615 class VectorInsertElementOpConversion : public ConvertToLLVMPattern {
616 public:
617   explicit VectorInsertElementOpConversion(MLIRContext *context,
618                                            LLVMTypeConverter &typeConverter)
619       : ConvertToLLVMPattern(vector::InsertElementOp::getOperationName(),
620                              context, typeConverter) {}
621 
622   LogicalResult
623   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
624                   ConversionPatternRewriter &rewriter) const override {
625     auto adaptor = vector::InsertElementOpAdaptor(operands);
626     auto insertEltOp = cast<vector::InsertElementOp>(op);
627     auto vectorType = insertEltOp.getDestVectorType();
628     auto llvmType = typeConverter.convertType(vectorType);
629 
630     // Bail if result type cannot be lowered.
631     if (!llvmType)
632       return failure();
633 
634     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
635         op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position());
636     return success();
637   }
638 };
639 
640 class VectorInsertOpConversion : public ConvertToLLVMPattern {
641 public:
642   explicit VectorInsertOpConversion(MLIRContext *context,
643                                     LLVMTypeConverter &typeConverter)
644       : ConvertToLLVMPattern(vector::InsertOp::getOperationName(), context,
645                              typeConverter) {}
646 
647   LogicalResult
648   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
649                   ConversionPatternRewriter &rewriter) const override {
650     auto loc = op->getLoc();
651     auto adaptor = vector::InsertOpAdaptor(operands);
652     auto insertOp = cast<vector::InsertOp>(op);
653     auto sourceType = insertOp.getSourceType();
654     auto destVectorType = insertOp.getDestVectorType();
655     auto llvmResultType = typeConverter.convertType(destVectorType);
656     auto positionArrayAttr = insertOp.position();
657 
658     // Bail if result type cannot be lowered.
659     if (!llvmResultType)
660       return failure();
661 
662     // One-shot insertion of a vector into an array (only requires insertvalue).
663     if (sourceType.isa<VectorType>()) {
664       Value inserted = rewriter.create<LLVM::InsertValueOp>(
665           loc, llvmResultType, adaptor.dest(), adaptor.source(),
666           positionArrayAttr);
667       rewriter.replaceOp(op, inserted);
668       return success();
669     }
670 
671     // Potential extraction of 1-D vector from array.
672     auto *context = op->getContext();
673     Value extracted = adaptor.dest();
674     auto positionAttrs = positionArrayAttr.getValue();
675     auto position = positionAttrs.back().cast<IntegerAttr>();
676     auto oneDVectorType = destVectorType;
677     if (positionAttrs.size() > 1) {
678       oneDVectorType = reducedVectorTypeBack(destVectorType);
679       auto nMinusOnePositionAttrs =
680           ArrayAttr::get(positionAttrs.drop_back(), context);
681       extracted = rewriter.create<LLVM::ExtractValueOp>(
682           loc, typeConverter.convertType(oneDVectorType), extracted,
683           nMinusOnePositionAttrs);
684     }
685 
686     // Insertion of an element into a 1-D LLVM vector.
687     auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
688     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
689     Value inserted = rewriter.create<LLVM::InsertElementOp>(
690         loc, typeConverter.convertType(oneDVectorType), extracted,
691         adaptor.source(), constant);
692 
693     // Potential insertion of resulting 1-D vector into array.
694     if (positionAttrs.size() > 1) {
695       auto nMinusOnePositionAttrs =
696           ArrayAttr::get(positionAttrs.drop_back(), context);
697       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
698                                                       adaptor.dest(), inserted,
699                                                       nMinusOnePositionAttrs);
700     }
701 
702     rewriter.replaceOp(op, inserted);
703     return success();
704   }
705 };
706 
707 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
708 ///
709 /// Example:
710 /// ```
711 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
712 /// ```
713 /// is rewritten into:
714 /// ```
715 ///  %r = splat %f0: vector<2x4xf32>
716 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
717 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
718 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
719 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
720 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
721 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
722 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
723 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
724 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
725 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
726 ///  // %r3 holds the final value.
727 /// ```
728 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
729 public:
730   using OpRewritePattern<FMAOp>::OpRewritePattern;
731 
732   LogicalResult matchAndRewrite(FMAOp op,
733                                 PatternRewriter &rewriter) const override {
734     auto vType = op.getVectorType();
735     if (vType.getRank() < 2)
736       return failure();
737 
738     auto loc = op.getLoc();
739     auto elemType = vType.getElementType();
740     Value zero = rewriter.create<ConstantOp>(loc, elemType,
741                                              rewriter.getZeroAttr(elemType));
742     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
743     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
744       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
745       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
746       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
747       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
748       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
749     }
750     rewriter.replaceOp(op, desc);
751     return success();
752   }
753 };
754 
755 // When ranks are different, InsertStridedSlice needs to extract a properly
756 // ranked vector from the destination vector into which to insert. This pattern
757 // only takes care of this part and forwards the rest of the conversion to
758 // another pattern that converts InsertStridedSlice for operands of the same
759 // rank.
760 //
761 // RewritePattern for InsertStridedSliceOp where source and destination vectors
762 // have different ranks. In this case:
763 //   1. the proper subvector is extracted from the destination vector
764 //   2. a new InsertStridedSlice op is created to insert the source in the
765 //   destination subvector
766 //   3. the destination subvector is inserted back in the proper place
767 //   4. the op is replaced by the result of step 3.
768 // The new InsertStridedSlice from step 2. will be picked up by a
769 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
770 class VectorInsertStridedSliceOpDifferentRankRewritePattern
771     : public OpRewritePattern<InsertStridedSliceOp> {
772 public:
773   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
774 
775   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
776                                 PatternRewriter &rewriter) const override {
777     auto srcType = op.getSourceVectorType();
778     auto dstType = op.getDestVectorType();
779 
780     if (op.offsets().getValue().empty())
781       return failure();
782 
783     auto loc = op.getLoc();
784     int64_t rankDiff = dstType.getRank() - srcType.getRank();
785     assert(rankDiff >= 0);
786     if (rankDiff == 0)
787       return failure();
788 
789     int64_t rankRest = dstType.getRank() - rankDiff;
790     // Extract / insert the subvector of matching rank and InsertStridedSlice
791     // on it.
792     Value extracted =
793         rewriter.create<ExtractOp>(loc, op.dest(),
794                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
795                                                   /*dropFront=*/rankRest));
796     // A different pattern will kick in for InsertStridedSlice with matching
797     // ranks.
798     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
799         loc, op.source(), extracted,
800         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
801         getI64SubArray(op.strides(), /*dropFront=*/0));
802     rewriter.replaceOpWithNewOp<InsertOp>(
803         op, stridedSliceInnerOp.getResult(), op.dest(),
804         getI64SubArray(op.offsets(), /*dropFront=*/0,
805                        /*dropFront=*/rankRest));
806     return success();
807   }
808 };
809 
810 // RewritePattern for InsertStridedSliceOp where source and destination vectors
811 // have the same rank. In this case, we reduce
812 //   1. the proper subvector is extracted from the destination vector
813 //   2. a new InsertStridedSlice op is created to insert the source in the
814 //   destination subvector
815 //   3. the destination subvector is inserted back in the proper place
816 //   4. the op is replaced by the result of step 3.
817 // The new InsertStridedSlice from step 2. will be picked up by a
818 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
819 class VectorInsertStridedSliceOpSameRankRewritePattern
820     : public OpRewritePattern<InsertStridedSliceOp> {
821 public:
822   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
823 
824   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
825                                 PatternRewriter &rewriter) const override {
826     auto srcType = op.getSourceVectorType();
827     auto dstType = op.getDestVectorType();
828 
829     if (op.offsets().getValue().empty())
830       return failure();
831 
832     int64_t rankDiff = dstType.getRank() - srcType.getRank();
833     assert(rankDiff >= 0);
834     if (rankDiff != 0)
835       return failure();
836 
837     if (srcType == dstType) {
838       rewriter.replaceOp(op, op.source());
839       return success();
840     }
841 
842     int64_t offset =
843         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
844     int64_t size = srcType.getShape().front();
845     int64_t stride =
846         op.strides().getValue().front().cast<IntegerAttr>().getInt();
847 
848     auto loc = op.getLoc();
849     Value res = op.dest();
850     // For each slice of the source vector along the most major dimension.
851     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
852          off += stride, ++idx) {
853       // 1. extract the proper subvector (or element) from source
854       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
855       if (extractedSource.getType().isa<VectorType>()) {
856         // 2. If we have a vector, extract the proper subvector from destination
857         // Otherwise we are at the element level and no need to recurse.
858         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
859         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
860         // smaller rank.
861         extractedSource = rewriter.create<InsertStridedSliceOp>(
862             loc, extractedSource, extractedDest,
863             getI64SubArray(op.offsets(), /* dropFront=*/1),
864             getI64SubArray(op.strides(), /* dropFront=*/1));
865       }
866       // 4. Insert the extractedSource into the res vector.
867       res = insertOne(rewriter, loc, extractedSource, res, off);
868     }
869 
870     rewriter.replaceOp(op, res);
871     return success();
872   }
873   /// This pattern creates recursive InsertStridedSliceOp, but the recursion is
874   /// bounded as the rank is strictly decreasing.
875   bool hasBoundedRewriteRecursion() const final { return true; }
876 };
877 
878 class VectorTypeCastOpConversion : public ConvertToLLVMPattern {
879 public:
880   explicit VectorTypeCastOpConversion(MLIRContext *context,
881                                       LLVMTypeConverter &typeConverter)
882       : ConvertToLLVMPattern(vector::TypeCastOp::getOperationName(), context,
883                              typeConverter) {}
884 
885   LogicalResult
886   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
887                   ConversionPatternRewriter &rewriter) const override {
888     auto loc = op->getLoc();
889     vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
890     MemRefType sourceMemRefType =
891         castOp.getOperand().getType().cast<MemRefType>();
892     MemRefType targetMemRefType =
893         castOp.getResult().getType().cast<MemRefType>();
894 
895     // Only static shape casts supported atm.
896     if (!sourceMemRefType.hasStaticShape() ||
897         !targetMemRefType.hasStaticShape())
898       return failure();
899 
900     auto llvmSourceDescriptorTy =
901         operands[0].getType().dyn_cast<LLVM::LLVMType>();
902     if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
903       return failure();
904     MemRefDescriptor sourceMemRef(operands[0]);
905 
906     auto llvmTargetDescriptorTy = typeConverter.convertType(targetMemRefType)
907                                       .dyn_cast_or_null<LLVM::LLVMType>();
908     if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
909       return failure();
910 
911     int64_t offset;
912     SmallVector<int64_t, 4> strides;
913     auto successStrides =
914         getStridesAndOffset(sourceMemRefType, strides, offset);
915     bool isContiguous = (strides.back() == 1);
916     if (isContiguous) {
917       auto sizes = sourceMemRefType.getShape();
918       for (int index = 0, e = strides.size() - 2; index < e; ++index) {
919         if (strides[index] != strides[index + 1] * sizes[index + 1]) {
920           isContiguous = false;
921           break;
922         }
923       }
924     }
925     // Only contiguous source tensors supported atm.
926     if (failed(successStrides) || !isContiguous)
927       return failure();
928 
929     auto int64Ty = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
930 
931     // Create descriptor.
932     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
933     Type llvmTargetElementTy = desc.getElementType();
934     // Set allocated ptr.
935     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
936     allocated =
937         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
938     desc.setAllocatedPtr(rewriter, loc, allocated);
939     // Set aligned ptr.
940     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
941     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
942     desc.setAlignedPtr(rewriter, loc, ptr);
943     // Fill offset 0.
944     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
945     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
946     desc.setOffset(rewriter, loc, zero);
947 
948     // Fill size and stride descriptors in memref.
949     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
950       int64_t index = indexedSize.index();
951       auto sizeAttr =
952           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
953       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
954       desc.setSize(rewriter, loc, index, size);
955       auto strideAttr =
956           rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]);
957       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
958       desc.setStride(rewriter, loc, index, stride);
959     }
960 
961     rewriter.replaceOp(op, {desc});
962     return success();
963   }
964 };
965 
966 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
967 /// sequence of:
968 /// 1. Bitcast or addrspacecast to vector form.
969 /// 2. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
970 /// 3. Create a mask where offsetVector is compared against memref upper bound.
971 /// 4. Rewrite op as a masked read or write.
972 template <typename ConcreteOp>
973 class VectorTransferConversion : public ConvertToLLVMPattern {
974 public:
975   explicit VectorTransferConversion(MLIRContext *context,
976                                     LLVMTypeConverter &typeConv)
977       : ConvertToLLVMPattern(ConcreteOp::getOperationName(), context,
978                              typeConv) {}
979 
980   LogicalResult
981   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
982                   ConversionPatternRewriter &rewriter) const override {
983     auto xferOp = cast<ConcreteOp>(op);
984     auto adaptor = getTransferOpAdapter(xferOp, operands);
985 
986     if (xferOp.getVectorType().getRank() > 1 ||
987         llvm::size(xferOp.indices()) == 0)
988       return failure();
989     if (xferOp.permutation_map() !=
990         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
991                                        xferOp.getVectorType().getRank(),
992                                        op->getContext()))
993       return failure();
994 
995     auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
996 
997     Location loc = op->getLoc();
998     Type i64Type = rewriter.getIntegerType(64);
999     MemRefType memRefType = xferOp.getMemRefType();
1000 
1001     // 1. Get the source/dst address as an LLVM vector pointer.
1002     //    The vector pointer would always be on address space 0, therefore
1003     //    addrspacecast shall be used when source/dst memrefs are not on
1004     //    address space 0.
1005     // TODO: support alignment when possible.
1006     Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(),
1007                                adaptor.indices(), rewriter, getModule());
1008     auto vecTy =
1009         toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
1010     Value vectorDataPtr;
1011     if (memRefType.getMemorySpace() == 0)
1012       vectorDataPtr =
1013           rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
1014     else
1015       vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
1016           loc, vecTy.getPointerTo(), dataPtr);
1017 
1018     if (!xferOp.isMaskedDim(0))
1019       return replaceTransferOpWithLoadOrStore(rewriter, typeConverter, loc,
1020                                               xferOp, operands, vectorDataPtr);
1021 
1022     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1023     unsigned vecWidth = vecTy.getVectorNumElements();
1024     VectorType vectorCmpType = VectorType::get(vecWidth, i64Type);
1025     SmallVector<int64_t, 8> indices;
1026     indices.reserve(vecWidth);
1027     for (unsigned i = 0; i < vecWidth; ++i)
1028       indices.push_back(i);
1029     Value linearIndices = rewriter.create<ConstantOp>(
1030         loc, vectorCmpType,
1031         DenseElementsAttr::get(vectorCmpType, ArrayRef<int64_t>(indices)));
1032     linearIndices = rewriter.create<LLVM::DialectCastOp>(
1033         loc, toLLVMTy(vectorCmpType), linearIndices);
1034 
1035     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1036     // TODO: when the leaf transfer rank is k > 1 we need the last
1037     // `k` dimensions here.
1038     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1039     Value offsetIndex = *(xferOp.indices().begin() + lastIndex);
1040     offsetIndex = rewriter.create<IndexCastOp>(loc, i64Type, offsetIndex);
1041     Value base = rewriter.create<SplatOp>(loc, vectorCmpType, offsetIndex);
1042     Value offsetVector = rewriter.create<AddIOp>(loc, base, linearIndices);
1043 
1044     // 4. Let dim the memref dimension, compute the vector comparison mask:
1045     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1046     Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex);
1047     dim = rewriter.create<IndexCastOp>(loc, i64Type, dim);
1048     dim = rewriter.create<SplatOp>(loc, vectorCmpType, dim);
1049     Value mask =
1050         rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, offsetVector, dim);
1051     mask = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(mask.getType()),
1052                                                 mask);
1053 
1054     // 5. Rewrite as a masked read / write.
1055     return replaceTransferOpWithMasked(rewriter, typeConverter, loc, xferOp,
1056                                        operands, vectorDataPtr, mask);
1057   }
1058 };
1059 
1060 class VectorPrintOpConversion : public ConvertToLLVMPattern {
1061 public:
1062   explicit VectorPrintOpConversion(MLIRContext *context,
1063                                    LLVMTypeConverter &typeConverter)
1064       : ConvertToLLVMPattern(vector::PrintOp::getOperationName(), context,
1065                              typeConverter) {}
1066 
1067   // Proof-of-concept lowering implementation that relies on a small
1068   // runtime support library, which only needs to provide a few
1069   // printing methods (single value for all data types, opening/closing
1070   // bracket, comma, newline). The lowering fully unrolls a vector
1071   // in terms of these elementary printing operations. The advantage
1072   // of this approach is that the library can remain unaware of all
1073   // low-level implementation details of vectors while still supporting
1074   // output of any shaped and dimensioned vector. Due to full unrolling,
1075   // this approach is less suited for very large vectors though.
1076   //
1077   // TODO: rely solely on libc in future? something else?
1078   //
1079   LogicalResult
1080   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
1081                   ConversionPatternRewriter &rewriter) const override {
1082     auto printOp = cast<vector::PrintOp>(op);
1083     auto adaptor = vector::PrintOpAdaptor(operands);
1084     Type printType = printOp.getPrintType();
1085 
1086     if (typeConverter.convertType(printType) == nullptr)
1087       return failure();
1088 
1089     // Make sure element type has runtime support (currently just Float/Double).
1090     VectorType vectorType = printType.dyn_cast<VectorType>();
1091     Type eltType = vectorType ? vectorType.getElementType() : printType;
1092     int64_t rank = vectorType ? vectorType.getRank() : 0;
1093     Operation *printer;
1094     if (eltType.isSignlessInteger(1) || eltType.isSignlessInteger(32))
1095       printer = getPrintI32(op);
1096     else if (eltType.isSignlessInteger(64))
1097       printer = getPrintI64(op);
1098     else if (eltType.isF32())
1099       printer = getPrintFloat(op);
1100     else if (eltType.isF64())
1101       printer = getPrintDouble(op);
1102     else
1103       return failure();
1104 
1105     // Unroll vector into elementary print calls.
1106     emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank);
1107     emitCall(rewriter, op->getLoc(), getPrintNewline(op));
1108     rewriter.eraseOp(op);
1109     return success();
1110   }
1111 
1112 private:
1113   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1114                  Value value, VectorType vectorType, Operation *printer,
1115                  int64_t rank) const {
1116     Location loc = op->getLoc();
1117     if (rank == 0) {
1118       if (value.getType() ==
1119           LLVM::LLVMType::getInt1Ty(typeConverter.getDialect())) {
1120         // Convert i1 (bool) to i32 so we can use the print_i32 method.
1121         // This avoids the need for a print_i1 method with an unclear ABI.
1122         auto i32Type = LLVM::LLVMType::getInt32Ty(typeConverter.getDialect());
1123         auto trueVal = rewriter.create<ConstantOp>(
1124             loc, i32Type, rewriter.getI32IntegerAttr(1));
1125         auto falseVal = rewriter.create<ConstantOp>(
1126             loc, i32Type, rewriter.getI32IntegerAttr(0));
1127         value = rewriter.create<SelectOp>(loc, value, trueVal, falseVal);
1128       }
1129       emitCall(rewriter, loc, printer, value);
1130       return;
1131     }
1132 
1133     emitCall(rewriter, loc, getPrintOpen(op));
1134     Operation *printComma = getPrintComma(op);
1135     int64_t dim = vectorType.getDimSize(0);
1136     for (int64_t d = 0; d < dim; ++d) {
1137       auto reducedType =
1138           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1139       auto llvmType = typeConverter.convertType(
1140           rank > 1 ? reducedType : vectorType.getElementType());
1141       Value nestedVal =
1142           extractOne(rewriter, typeConverter, loc, value, llvmType, rank, d);
1143       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1);
1144       if (d != dim - 1)
1145         emitCall(rewriter, loc, printComma);
1146     }
1147     emitCall(rewriter, loc, getPrintClose(op));
1148   }
1149 
1150   // Helper to emit a call.
1151   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1152                        Operation *ref, ValueRange params = ValueRange()) {
1153     rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{},
1154                                   rewriter.getSymbolRefAttr(ref), params);
1155   }
1156 
1157   // Helper for printer method declaration (first hit) and lookup.
1158   static Operation *getPrint(Operation *op, LLVM::LLVMDialect *dialect,
1159                              StringRef name, ArrayRef<LLVM::LLVMType> params) {
1160     auto module = op->getParentOfType<ModuleOp>();
1161     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1162     if (func)
1163       return func;
1164     OpBuilder moduleBuilder(module.getBodyRegion());
1165     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1166         op->getLoc(), name,
1167         LLVM::LLVMType::getFunctionTy(LLVM::LLVMType::getVoidTy(dialect),
1168                                       params, /*isVarArg=*/false));
1169   }
1170 
1171   // Helpers for method names.
1172   Operation *getPrintI32(Operation *op) const {
1173     LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1174     return getPrint(op, dialect, "print_i32",
1175                     LLVM::LLVMType::getInt32Ty(dialect));
1176   }
1177   Operation *getPrintI64(Operation *op) const {
1178     LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1179     return getPrint(op, dialect, "print_i64",
1180                     LLVM::LLVMType::getInt64Ty(dialect));
1181   }
1182   Operation *getPrintFloat(Operation *op) const {
1183     LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1184     return getPrint(op, dialect, "print_f32",
1185                     LLVM::LLVMType::getFloatTy(dialect));
1186   }
1187   Operation *getPrintDouble(Operation *op) const {
1188     LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1189     return getPrint(op, dialect, "print_f64",
1190                     LLVM::LLVMType::getDoubleTy(dialect));
1191   }
1192   Operation *getPrintOpen(Operation *op) const {
1193     return getPrint(op, typeConverter.getDialect(), "print_open", {});
1194   }
1195   Operation *getPrintClose(Operation *op) const {
1196     return getPrint(op, typeConverter.getDialect(), "print_close", {});
1197   }
1198   Operation *getPrintComma(Operation *op) const {
1199     return getPrint(op, typeConverter.getDialect(), "print_comma", {});
1200   }
1201   Operation *getPrintNewline(Operation *op) const {
1202     return getPrint(op, typeConverter.getDialect(), "print_newline", {});
1203   }
1204 };
1205 
1206 /// Progressive lowering of ExtractStridedSliceOp to either:
1207 ///   1. extractelement + insertelement for the 1-D case
1208 ///   2. extract + optional strided_slice + insert for the n-D case.
1209 class VectorStridedSliceOpConversion
1210     : public OpRewritePattern<ExtractStridedSliceOp> {
1211 public:
1212   using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
1213 
1214   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1215                                 PatternRewriter &rewriter) const override {
1216     auto dstType = op.getResult().getType().cast<VectorType>();
1217 
1218     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1219 
1220     int64_t offset =
1221         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1222     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1223     int64_t stride =
1224         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1225 
1226     auto loc = op.getLoc();
1227     auto elemType = dstType.getElementType();
1228     assert(elemType.isSignlessIntOrIndexOrFloat());
1229     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1230                                              rewriter.getZeroAttr(elemType));
1231     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1232     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1233          off += stride, ++idx) {
1234       Value extracted = extractOne(rewriter, loc, op.vector(), off);
1235       if (op.offsets().getValue().size() > 1) {
1236         extracted = rewriter.create<ExtractStridedSliceOp>(
1237             loc, extracted, getI64SubArray(op.offsets(), /* dropFront=*/1),
1238             getI64SubArray(op.sizes(), /* dropFront=*/1),
1239             getI64SubArray(op.strides(), /* dropFront=*/1));
1240       }
1241       res = insertOne(rewriter, loc, extracted, res, idx);
1242     }
1243     rewriter.replaceOp(op, {res});
1244     return success();
1245   }
1246   /// This pattern creates recursive ExtractStridedSliceOp, but the recursion is
1247   /// bounded as the rank is strictly decreasing.
1248   bool hasBoundedRewriteRecursion() const final { return true; }
1249 };
1250 
1251 } // namespace
1252 
1253 /// Populate the given list with patterns that convert from Vector to LLVM.
1254 void mlir::populateVectorToLLVMConversionPatterns(
1255     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1256     bool reassociateFPReductions) {
1257   MLIRContext *ctx = converter.getDialect()->getContext();
1258   // clang-format off
1259   patterns.insert<VectorFMAOpNDRewritePattern,
1260                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1261                   VectorInsertStridedSliceOpSameRankRewritePattern,
1262                   VectorStridedSliceOpConversion>(ctx);
1263   patterns.insert<VectorReductionOpConversion>(
1264       ctx, converter, reassociateFPReductions);
1265   patterns
1266       .insert<VectorShuffleOpConversion,
1267               VectorExtractElementOpConversion,
1268               VectorExtractOpConversion,
1269               VectorFMAOp1DConversion,
1270               VectorInsertElementOpConversion,
1271               VectorInsertOpConversion,
1272               VectorPrintOpConversion,
1273               VectorTransferConversion<TransferReadOp>,
1274               VectorTransferConversion<TransferWriteOp>,
1275               VectorTypeCastOpConversion,
1276               VectorGatherOpConversion,
1277               VectorScatterOpConversion>(ctx, converter);
1278   // clang-format on
1279 }
1280 
1281 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1282     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1283   MLIRContext *ctx = converter.getDialect()->getContext();
1284   patterns.insert<VectorMatmulOpConversion>(ctx, converter);
1285   patterns.insert<VectorFlatTransposeOpConversion>(ctx, converter);
1286 }
1287 
1288 namespace {
1289 struct LowerVectorToLLVMPass
1290     : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
1291   LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1292     this->reassociateFPReductions = options.reassociateFPReductions;
1293   }
1294   void runOnOperation() override;
1295 };
1296 } // namespace
1297 
1298 void LowerVectorToLLVMPass::runOnOperation() {
1299   // Perform progressive lowering of operations on slices and
1300   // all contraction operations. Also applies folding and DCE.
1301   {
1302     OwningRewritePatternList patterns;
1303     populateVectorToVectorCanonicalizationPatterns(patterns, &getContext());
1304     populateVectorSlicesLoweringPatterns(patterns, &getContext());
1305     populateVectorContractLoweringPatterns(patterns, &getContext());
1306     applyPatternsAndFoldGreedily(getOperation(), patterns);
1307   }
1308 
1309   // Convert to the LLVM IR dialect.
1310   LLVMTypeConverter converter(&getContext());
1311   OwningRewritePatternList patterns;
1312   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1313   populateVectorToLLVMConversionPatterns(converter, patterns,
1314                                          reassociateFPReductions);
1315   populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1316   populateStdToLLVMConversionPatterns(converter, patterns);
1317 
1318   LLVMConversionTarget target(getContext());
1319   if (failed(applyPartialConversion(getOperation(), target, patterns))) {
1320     signalPassFailure();
1321   }
1322 }
1323 
1324 std::unique_ptr<OperationPass<ModuleOp>>
1325 mlir::createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1326   return std::make_unique<LowerVectorToLLVMPass>(options);
1327 }
1328