xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision e83b7b99da2e0385c567cd3883cad66fb5ce271c)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
12 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
13 #include "mlir/Dialect/StandardOps/Ops.h"
14 #include "mlir/Dialect/VectorOps/VectorOps.h"
15 #include "mlir/IR/Attributes.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/MLIRContext.h"
18 #include "mlir/IR/Module.h"
19 #include "mlir/IR/Operation.h"
20 #include "mlir/IR/PatternMatch.h"
21 #include "mlir/IR/StandardTypes.h"
22 #include "mlir/IR/Types.h"
23 #include "mlir/Pass/Pass.h"
24 #include "mlir/Pass/PassManager.h"
25 #include "mlir/Transforms/DialectConversion.h"
26 #include "mlir/Transforms/Passes.h"
27 
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Allocator.h"
32 #include "llvm/Support/ErrorHandling.h"
33 
34 using namespace mlir;
35 using namespace mlir::vector;
36 
37 template <typename T>
38 static LLVM::LLVMType getPtrToElementType(T containerType,
39                                           LLVMTypeConverter &lowering) {
40   return lowering.convertType(containerType.getElementType())
41       .template cast<LLVM::LLVMType>()
42       .getPointerTo();
43 }
44 
45 // Helper to reduce vector type by one rank at front.
46 static VectorType reducedVectorTypeFront(VectorType tp) {
47   assert((tp.getRank() > 1) && "unlowerable vector type");
48   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
49 }
50 
51 // Helper to reduce vector type by *all* but one rank at back.
52 static VectorType reducedVectorTypeBack(VectorType tp) {
53   assert((tp.getRank() > 1) && "unlowerable vector type");
54   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
55 }
56 
57 // Helper that picks the proper sequence for inserting.
58 static Value insertOne(ConversionPatternRewriter &rewriter,
59                        LLVMTypeConverter &lowering, Location loc, Value val1,
60                        Value val2, Type llvmType, int64_t rank, int64_t pos) {
61   if (rank == 1) {
62     auto idxType = rewriter.getIndexType();
63     auto constant = rewriter.create<LLVM::ConstantOp>(
64         loc, lowering.convertType(idxType),
65         rewriter.getIntegerAttr(idxType, pos));
66     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
67                                                   constant);
68   }
69   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
70                                               rewriter.getI64ArrayAttr(pos));
71 }
72 
73 // Helper that picks the proper sequence for inserting.
74 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
75                        Value into, int64_t offset) {
76   auto vectorType = into.getType().cast<VectorType>();
77   if (vectorType.getRank() > 1)
78     return rewriter.create<InsertOp>(loc, from, into, offset);
79   return rewriter.create<vector::InsertElementOp>(
80       loc, vectorType, from, into,
81       rewriter.create<ConstantIndexOp>(loc, offset));
82 }
83 
84 // Helper that picks the proper sequence for extracting.
85 static Value extractOne(ConversionPatternRewriter &rewriter,
86                         LLVMTypeConverter &lowering, Location loc, Value val,
87                         Type llvmType, int64_t rank, int64_t pos) {
88   if (rank == 1) {
89     auto idxType = rewriter.getIndexType();
90     auto constant = rewriter.create<LLVM::ConstantOp>(
91         loc, lowering.convertType(idxType),
92         rewriter.getIntegerAttr(idxType, pos));
93     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
94                                                    constant);
95   }
96   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
97                                                rewriter.getI64ArrayAttr(pos));
98 }
99 
100 // Helper that picks the proper sequence for extracting.
101 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
102                         int64_t offset) {
103   auto vectorType = vector.getType().cast<VectorType>();
104   if (vectorType.getRank() > 1)
105     return rewriter.create<ExtractOp>(loc, vector, offset);
106   return rewriter.create<vector::ExtractElementOp>(
107       loc, vectorType.getElementType(), vector,
108       rewriter.create<ConstantIndexOp>(loc, offset));
109 }
110 
111 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
112 // TODO(rriddle): Better support for attribute subtype forwarding + slicing.
113 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
114                                               unsigned dropFront = 0,
115                                               unsigned dropBack = 0) {
116   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
117   auto range = arrayAttr.getAsRange<IntegerAttr>();
118   SmallVector<int64_t, 4> res;
119   res.reserve(arrayAttr.size() - dropFront - dropBack);
120   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
121        it != eit; ++it)
122     res.push_back((*it).getValue().getSExtValue());
123   return res;
124 }
125 
126 namespace {
127 
128 class VectorBroadcastOpConversion : public LLVMOpLowering {
129 public:
130   explicit VectorBroadcastOpConversion(MLIRContext *context,
131                                        LLVMTypeConverter &typeConverter)
132       : LLVMOpLowering(vector::BroadcastOp::getOperationName(), context,
133                        typeConverter) {}
134 
135   PatternMatchResult
136   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
137                   ConversionPatternRewriter &rewriter) const override {
138     auto broadcastOp = cast<vector::BroadcastOp>(op);
139     VectorType dstVectorType = broadcastOp.getVectorType();
140     if (lowering.convertType(dstVectorType) == nullptr)
141       return matchFailure();
142     // Rewrite when the full vector type can be lowered (which
143     // implies all 'reduced' types can be lowered too).
144     auto adaptor = vector::BroadcastOpOperandAdaptor(operands);
145     VectorType srcVectorType =
146         broadcastOp.getSourceType().dyn_cast<VectorType>();
147     rewriter.replaceOp(
148         op, expandRanks(adaptor.source(), // source value to be expanded
149                         op->getLoc(),     // location of original broadcast
150                         srcVectorType, dstVectorType, rewriter));
151     return matchSuccess();
152   }
153 
154 private:
155   // Expands the given source value over all the ranks, as defined
156   // by the source and destination type (a null source type denotes
157   // expansion from a scalar value into a vector).
158   //
159   // TODO(ajcbik): consider replacing this one-pattern lowering
160   //               with a two-pattern lowering using other vector
161   //               ops once all insert/extract/shuffle operations
162   //               are available with lowering implementation.
163   //
164   Value expandRanks(Value value, Location loc, VectorType srcVectorType,
165                     VectorType dstVectorType,
166                     ConversionPatternRewriter &rewriter) const {
167     assert((dstVectorType != nullptr) && "invalid result type in broadcast");
168     // Determine rank of source and destination.
169     int64_t srcRank = srcVectorType ? srcVectorType.getRank() : 0;
170     int64_t dstRank = dstVectorType.getRank();
171     int64_t curDim = dstVectorType.getDimSize(0);
172     if (srcRank < dstRank)
173       // Duplicate this rank.
174       return duplicateOneRank(value, loc, srcVectorType, dstVectorType, dstRank,
175                               curDim, rewriter);
176     // If all trailing dimensions are the same, the broadcast consists of
177     // simply passing through the source value and we are done. Otherwise,
178     // any non-matching dimension forces a stretch along this rank.
179     assert((srcVectorType != nullptr) && (srcRank > 0) &&
180            (srcRank == dstRank) && "invalid rank in broadcast");
181     for (int64_t r = 0; r < dstRank; r++) {
182       if (srcVectorType.getDimSize(r) != dstVectorType.getDimSize(r)) {
183         return stretchOneRank(value, loc, srcVectorType, dstVectorType, dstRank,
184                               curDim, rewriter);
185       }
186     }
187     return value;
188   }
189 
190   // Picks the best way to duplicate a single rank. For the 1-D case, a
191   // single insert-elt/shuffle is the most efficient expansion. For higher
192   // dimensions, however, we need dim x insert-values on a new broadcast
193   // with one less leading dimension, which will be lowered "recursively"
194   // to matching LLVM IR.
195   // For example:
196   //   v = broadcast s : f32 to vector<4x2xf32>
197   // becomes:
198   //   x = broadcast s : f32 to vector<2xf32>
199   //   v = [x,x,x,x]
200   // becomes:
201   //   x = [s,s]
202   //   v = [x,x,x,x]
203   Value duplicateOneRank(Value value, Location loc, VectorType srcVectorType,
204                          VectorType dstVectorType, int64_t rank, int64_t dim,
205                          ConversionPatternRewriter &rewriter) const {
206     Type llvmType = lowering.convertType(dstVectorType);
207     assert((llvmType != nullptr) && "unlowerable vector type");
208     if (rank == 1) {
209       Value undef = rewriter.create<LLVM::UndefOp>(loc, llvmType);
210       Value expand =
211           insertOne(rewriter, lowering, loc, undef, value, llvmType, rank, 0);
212       SmallVector<int32_t, 4> zeroValues(dim, 0);
213       return rewriter.create<LLVM::ShuffleVectorOp>(
214           loc, expand, undef, rewriter.getI32ArrayAttr(zeroValues));
215     }
216     Value expand = expandRanks(value, loc, srcVectorType,
217                                reducedVectorTypeFront(dstVectorType), rewriter);
218     Value result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
219     for (int64_t d = 0; d < dim; ++d) {
220       result =
221           insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d);
222     }
223     return result;
224   }
225 
226   // Picks the best way to stretch a single rank. For the 1-D case, a
227   // single insert-elt/shuffle is the most efficient expansion when at
228   // a stretch. Otherwise, every dimension needs to be expanded
229   // individually and individually inserted in the resulting vector.
230   // For example:
231   //   v = broadcast w : vector<4x1x2xf32> to vector<4x2x2xf32>
232   // becomes:
233   //   a = broadcast w[0] : vector<1x2xf32> to vector<2x2xf32>
234   //   b = broadcast w[1] : vector<1x2xf32> to vector<2x2xf32>
235   //   c = broadcast w[2] : vector<1x2xf32> to vector<2x2xf32>
236   //   d = broadcast w[3] : vector<1x2xf32> to vector<2x2xf32>
237   //   v = [a,b,c,d]
238   // becomes:
239   //   x = broadcast w[0][0] : vector<2xf32> to vector <2x2xf32>
240   //   y = broadcast w[1][0] : vector<2xf32> to vector <2x2xf32>
241   //   a = [x, y]
242   //   etc.
243   Value stretchOneRank(Value value, Location loc, VectorType srcVectorType,
244                        VectorType dstVectorType, int64_t rank, int64_t dim,
245                        ConversionPatternRewriter &rewriter) const {
246     Type llvmType = lowering.convertType(dstVectorType);
247     assert((llvmType != nullptr) && "unlowerable vector type");
248     Value result = rewriter.create<LLVM::UndefOp>(loc, llvmType);
249     bool atStretch = dim != srcVectorType.getDimSize(0);
250     if (rank == 1) {
251       assert(atStretch);
252       Type redLlvmType = lowering.convertType(dstVectorType.getElementType());
253       Value one =
254           extractOne(rewriter, lowering, loc, value, redLlvmType, rank, 0);
255       Value expand =
256           insertOne(rewriter, lowering, loc, result, one, llvmType, rank, 0);
257       SmallVector<int32_t, 4> zeroValues(dim, 0);
258       return rewriter.create<LLVM::ShuffleVectorOp>(
259           loc, expand, result, rewriter.getI32ArrayAttr(zeroValues));
260     }
261     VectorType redSrcType = reducedVectorTypeFront(srcVectorType);
262     VectorType redDstType = reducedVectorTypeFront(dstVectorType);
263     Type redLlvmType = lowering.convertType(redSrcType);
264     for (int64_t d = 0; d < dim; ++d) {
265       int64_t pos = atStretch ? 0 : d;
266       Value one =
267           extractOne(rewriter, lowering, loc, value, redLlvmType, rank, pos);
268       Value expand = expandRanks(one, loc, redSrcType, redDstType, rewriter);
269       result =
270           insertOne(rewriter, lowering, loc, result, expand, llvmType, rank, d);
271     }
272     return result;
273   }
274 };
275 
276 class VectorReductionOpConversion : public LLVMOpLowering {
277 public:
278   explicit VectorReductionOpConversion(MLIRContext *context,
279                                        LLVMTypeConverter &typeConverter)
280       : LLVMOpLowering(vector::ReductionOp::getOperationName(), context,
281                        typeConverter) {}
282 
283   PatternMatchResult
284   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
285                   ConversionPatternRewriter &rewriter) const override {
286     auto reductionOp = cast<vector::ReductionOp>(op);
287     auto kind = reductionOp.kind();
288     Type eltType = reductionOp.dest().getType();
289     Type llvmType = lowering.convertType(eltType);
290     if (eltType.isInteger(32) || eltType.isInteger(64)) {
291       // Integer reductions: add/mul/min/max/and/or/xor.
292       if (kind == "add")
293         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_add>(
294             op, llvmType, operands[0]);
295       else if (kind == "mul")
296         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_mul>(
297             op, llvmType, operands[0]);
298       else if (kind == "min")
299         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smin>(
300             op, llvmType, operands[0]);
301       else if (kind == "max")
302         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smax>(
303             op, llvmType, operands[0]);
304       else if (kind == "and")
305         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_and>(
306             op, llvmType, operands[0]);
307       else if (kind == "or")
308         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_or>(
309             op, llvmType, operands[0]);
310       else if (kind == "xor")
311         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_xor>(
312             op, llvmType, operands[0]);
313       else
314         return matchFailure();
315       return matchSuccess();
316 
317     } else if (eltType.isF32() || eltType.isF64()) {
318       // Floating-point reductions: add/mul/min/max
319       if (kind == "add") {
320         Value zero = rewriter.create<LLVM::ConstantOp>(
321             op->getLoc(), llvmType, rewriter.getZeroAttr(eltType));
322         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fadd>(
323             op, llvmType, zero, operands[0]);
324       } else if (kind == "mul") {
325         Value one = rewriter.create<LLVM::ConstantOp>(
326             op->getLoc(), llvmType, rewriter.getFloatAttr(eltType, 1.0));
327         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fmul>(
328             op, llvmType, one, operands[0]);
329       } else if (kind == "min")
330         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmin>(
331             op, llvmType, operands[0]);
332       else if (kind == "max")
333         rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmax>(
334             op, llvmType, operands[0]);
335       else
336         return matchFailure();
337       return matchSuccess();
338     }
339     return matchFailure();
340   }
341 };
342 
343 class VectorShuffleOpConversion : public LLVMOpLowering {
344 public:
345   explicit VectorShuffleOpConversion(MLIRContext *context,
346                                      LLVMTypeConverter &typeConverter)
347       : LLVMOpLowering(vector::ShuffleOp::getOperationName(), context,
348                        typeConverter) {}
349 
350   PatternMatchResult
351   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
352                   ConversionPatternRewriter &rewriter) const override {
353     auto loc = op->getLoc();
354     auto adaptor = vector::ShuffleOpOperandAdaptor(operands);
355     auto shuffleOp = cast<vector::ShuffleOp>(op);
356     auto v1Type = shuffleOp.getV1VectorType();
357     auto v2Type = shuffleOp.getV2VectorType();
358     auto vectorType = shuffleOp.getVectorType();
359     Type llvmType = lowering.convertType(vectorType);
360     auto maskArrayAttr = shuffleOp.mask();
361 
362     // Bail if result type cannot be lowered.
363     if (!llvmType)
364       return matchFailure();
365 
366     // Get rank and dimension sizes.
367     int64_t rank = vectorType.getRank();
368     assert(v1Type.getRank() == rank);
369     assert(v2Type.getRank() == rank);
370     int64_t v1Dim = v1Type.getDimSize(0);
371 
372     // For rank 1, where both operands have *exactly* the same vector type,
373     // there is direct shuffle support in LLVM. Use it!
374     if (rank == 1 && v1Type == v2Type) {
375       Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
376           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
377       rewriter.replaceOp(op, shuffle);
378       return matchSuccess();
379     }
380 
381     // For all other cases, insert the individual values individually.
382     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
383     int64_t insPos = 0;
384     for (auto en : llvm::enumerate(maskArrayAttr)) {
385       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
386       Value value = adaptor.v1();
387       if (extPos >= v1Dim) {
388         extPos -= v1Dim;
389         value = adaptor.v2();
390       }
391       Value extract =
392           extractOne(rewriter, lowering, loc, value, llvmType, rank, extPos);
393       insert = insertOne(rewriter, lowering, loc, insert, extract, llvmType,
394                          rank, insPos++);
395     }
396     rewriter.replaceOp(op, insert);
397     return matchSuccess();
398   }
399 };
400 
401 class VectorExtractElementOpConversion : public LLVMOpLowering {
402 public:
403   explicit VectorExtractElementOpConversion(MLIRContext *context,
404                                             LLVMTypeConverter &typeConverter)
405       : LLVMOpLowering(vector::ExtractElementOp::getOperationName(), context,
406                        typeConverter) {}
407 
408   PatternMatchResult
409   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
410                   ConversionPatternRewriter &rewriter) const override {
411     auto adaptor = vector::ExtractElementOpOperandAdaptor(operands);
412     auto extractEltOp = cast<vector::ExtractElementOp>(op);
413     auto vectorType = extractEltOp.getVectorType();
414     auto llvmType = lowering.convertType(vectorType.getElementType());
415 
416     // Bail if result type cannot be lowered.
417     if (!llvmType)
418       return matchFailure();
419 
420     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
421         op, llvmType, adaptor.vector(), adaptor.position());
422     return matchSuccess();
423   }
424 };
425 
426 class VectorExtractOpConversion : public LLVMOpLowering {
427 public:
428   explicit VectorExtractOpConversion(MLIRContext *context,
429                                      LLVMTypeConverter &typeConverter)
430       : LLVMOpLowering(vector::ExtractOp::getOperationName(), context,
431                        typeConverter) {}
432 
433   PatternMatchResult
434   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
435                   ConversionPatternRewriter &rewriter) const override {
436     auto loc = op->getLoc();
437     auto adaptor = vector::ExtractOpOperandAdaptor(operands);
438     auto extractOp = cast<vector::ExtractOp>(op);
439     auto vectorType = extractOp.getVectorType();
440     auto resultType = extractOp.getResult().getType();
441     auto llvmResultType = lowering.convertType(resultType);
442     auto positionArrayAttr = extractOp.position();
443 
444     // Bail if result type cannot be lowered.
445     if (!llvmResultType)
446       return matchFailure();
447 
448     // One-shot extraction of vector from array (only requires extractvalue).
449     if (resultType.isa<VectorType>()) {
450       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
451           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
452       rewriter.replaceOp(op, extracted);
453       return matchSuccess();
454     }
455 
456     // Potential extraction of 1-D vector from array.
457     auto *context = op->getContext();
458     Value extracted = adaptor.vector();
459     auto positionAttrs = positionArrayAttr.getValue();
460     if (positionAttrs.size() > 1) {
461       auto oneDVectorType = reducedVectorTypeBack(vectorType);
462       auto nMinusOnePositionAttrs =
463           ArrayAttr::get(positionAttrs.drop_back(), context);
464       extracted = rewriter.create<LLVM::ExtractValueOp>(
465           loc, lowering.convertType(oneDVectorType), extracted,
466           nMinusOnePositionAttrs);
467     }
468 
469     // Remaining extraction of element from 1-D LLVM vector
470     auto position = positionAttrs.back().cast<IntegerAttr>();
471     auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
472     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
473     extracted =
474         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
475     rewriter.replaceOp(op, extracted);
476 
477     return matchSuccess();
478   }
479 };
480 
481 /// Conversion pattern that turns a vector.fma on a 1-D vector
482 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
483 /// This does not match vectors of n >= 2 rank.
484 ///
485 /// Example:
486 /// ```
487 ///  vector.fma %a, %a, %a : vector<8xf32>
488 /// ```
489 /// is converted to:
490 /// ```
491 ///  llvm.intr.fma %va, %va, %va:
492 ///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
493 ///    -> !llvm<"<8 x float>">
494 /// ```
495 class VectorFMAOp1DConversion : public LLVMOpLowering {
496 public:
497   explicit VectorFMAOp1DConversion(MLIRContext *context,
498                                    LLVMTypeConverter &typeConverter)
499       : LLVMOpLowering(vector::FMAOp::getOperationName(), context,
500                        typeConverter) {}
501 
502   PatternMatchResult
503   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
504                   ConversionPatternRewriter &rewriter) const override {
505     auto adaptor = vector::FMAOpOperandAdaptor(operands);
506     vector::FMAOp fmaOp = cast<vector::FMAOp>(op);
507     VectorType vType = fmaOp.getVectorType();
508     if (vType.getRank() != 1)
509       return matchFailure();
510     rewriter.replaceOpWithNewOp<LLVM::FMAOp>(op, adaptor.lhs(), adaptor.rhs(),
511                                              adaptor.acc());
512     return matchSuccess();
513   }
514 };
515 
516 class VectorInsertElementOpConversion : public LLVMOpLowering {
517 public:
518   explicit VectorInsertElementOpConversion(MLIRContext *context,
519                                            LLVMTypeConverter &typeConverter)
520       : LLVMOpLowering(vector::InsertElementOp::getOperationName(), context,
521                        typeConverter) {}
522 
523   PatternMatchResult
524   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
525                   ConversionPatternRewriter &rewriter) const override {
526     auto adaptor = vector::InsertElementOpOperandAdaptor(operands);
527     auto insertEltOp = cast<vector::InsertElementOp>(op);
528     auto vectorType = insertEltOp.getDestVectorType();
529     auto llvmType = lowering.convertType(vectorType);
530 
531     // Bail if result type cannot be lowered.
532     if (!llvmType)
533       return matchFailure();
534 
535     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
536         op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position());
537     return matchSuccess();
538   }
539 };
540 
541 class VectorInsertOpConversion : public LLVMOpLowering {
542 public:
543   explicit VectorInsertOpConversion(MLIRContext *context,
544                                     LLVMTypeConverter &typeConverter)
545       : LLVMOpLowering(vector::InsertOp::getOperationName(), context,
546                        typeConverter) {}
547 
548   PatternMatchResult
549   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
550                   ConversionPatternRewriter &rewriter) const override {
551     auto loc = op->getLoc();
552     auto adaptor = vector::InsertOpOperandAdaptor(operands);
553     auto insertOp = cast<vector::InsertOp>(op);
554     auto sourceType = insertOp.getSourceType();
555     auto destVectorType = insertOp.getDestVectorType();
556     auto llvmResultType = lowering.convertType(destVectorType);
557     auto positionArrayAttr = insertOp.position();
558 
559     // Bail if result type cannot be lowered.
560     if (!llvmResultType)
561       return matchFailure();
562 
563     // One-shot insertion of a vector into an array (only requires insertvalue).
564     if (sourceType.isa<VectorType>()) {
565       Value inserted = rewriter.create<LLVM::InsertValueOp>(
566           loc, llvmResultType, adaptor.dest(), adaptor.source(),
567           positionArrayAttr);
568       rewriter.replaceOp(op, inserted);
569       return matchSuccess();
570     }
571 
572     // Potential extraction of 1-D vector from array.
573     auto *context = op->getContext();
574     Value extracted = adaptor.dest();
575     auto positionAttrs = positionArrayAttr.getValue();
576     auto position = positionAttrs.back().cast<IntegerAttr>();
577     auto oneDVectorType = destVectorType;
578     if (positionAttrs.size() > 1) {
579       oneDVectorType = reducedVectorTypeBack(destVectorType);
580       auto nMinusOnePositionAttrs =
581           ArrayAttr::get(positionAttrs.drop_back(), context);
582       extracted = rewriter.create<LLVM::ExtractValueOp>(
583           loc, lowering.convertType(oneDVectorType), extracted,
584           nMinusOnePositionAttrs);
585     }
586 
587     // Insertion of an element into a 1-D LLVM vector.
588     auto i64Type = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
589     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
590     Value inserted = rewriter.create<LLVM::InsertElementOp>(
591         loc, lowering.convertType(oneDVectorType), extracted, adaptor.source(),
592         constant);
593 
594     // Potential insertion of resulting 1-D vector into array.
595     if (positionAttrs.size() > 1) {
596       auto nMinusOnePositionAttrs =
597           ArrayAttr::get(positionAttrs.drop_back(), context);
598       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
599                                                       adaptor.dest(), inserted,
600                                                       nMinusOnePositionAttrs);
601     }
602 
603     rewriter.replaceOp(op, inserted);
604     return matchSuccess();
605   }
606 };
607 
608 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
609 ///
610 /// Example:
611 /// ```
612 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
613 /// ```
614 /// is rewritten into:
615 /// ```
616 ///  %r = splat %f0: vector<2x4xf32>
617 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
618 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
619 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
620 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
621 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
622 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
623 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
624 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
625 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
626 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
627 ///  // %r3 holds the final value.
628 /// ```
629 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
630 public:
631   using OpRewritePattern<FMAOp>::OpRewritePattern;
632 
633   PatternMatchResult matchAndRewrite(FMAOp op,
634                                      PatternRewriter &rewriter) const override {
635     auto vType = op.getVectorType();
636     if (vType.getRank() < 2)
637       return matchFailure();
638 
639     auto loc = op.getLoc();
640     auto elemType = vType.getElementType();
641     Value zero = rewriter.create<ConstantOp>(loc, elemType,
642                                              rewriter.getZeroAttr(elemType));
643     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
644     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
645       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
646       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
647       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
648       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
649       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
650     }
651     rewriter.replaceOp(op, desc);
652     return matchSuccess();
653   }
654 };
655 
656 // When ranks are different, InsertStridedSlice needs to extract a properly
657 // ranked vector from the destination vector into which to insert. This pattern
658 // only takes care of this part and forwards the rest of the conversion to
659 // another pattern that converts InsertStridedSlice for operands of the same
660 // rank.
661 //
662 // RewritePattern for InsertStridedSliceOp where source and destination vectors
663 // have different ranks. In this case:
664 //   1. the proper subvector is extracted from the destination vector
665 //   2. a new InsertStridedSlice op is created to insert the source in the
666 //   destination subvector
667 //   3. the destination subvector is inserted back in the proper place
668 //   4. the op is replaced by the result of step 3.
669 // The new InsertStridedSlice from step 2. will be picked up by a
670 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
671 class VectorInsertStridedSliceOpDifferentRankRewritePattern
672     : public OpRewritePattern<InsertStridedSliceOp> {
673 public:
674   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
675 
676   PatternMatchResult matchAndRewrite(InsertStridedSliceOp op,
677                                      PatternRewriter &rewriter) const override {
678     auto srcType = op.getSourceVectorType();
679     auto dstType = op.getDestVectorType();
680 
681     if (op.offsets().getValue().empty())
682       return matchFailure();
683 
684     auto loc = op.getLoc();
685     int64_t rankDiff = dstType.getRank() - srcType.getRank();
686     assert(rankDiff >= 0);
687     if (rankDiff == 0)
688       return matchFailure();
689 
690     int64_t rankRest = dstType.getRank() - rankDiff;
691     // Extract / insert the subvector of matching rank and InsertStridedSlice
692     // on it.
693     Value extracted =
694         rewriter.create<ExtractOp>(loc, op.dest(),
695                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
696                                                   /*dropFront=*/rankRest));
697     // A different pattern will kick in for InsertStridedSlice with matching
698     // ranks.
699     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
700         loc, op.source(), extracted,
701         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
702         getI64SubArray(op.strides(), /*dropFront=*/0));
703     rewriter.replaceOpWithNewOp<InsertOp>(
704         op, stridedSliceInnerOp.getResult(), op.dest(),
705         getI64SubArray(op.offsets(), /*dropFront=*/0,
706                        /*dropFront=*/rankRest));
707     return matchSuccess();
708   }
709 };
710 
711 // RewritePattern for InsertStridedSliceOp where source and destination vectors
712 // have the same rank. In this case, we reduce
713 //   1. the proper subvector is extracted from the destination vector
714 //   2. a new InsertStridedSlice op is created to insert the source in the
715 //   destination subvector
716 //   3. the destination subvector is inserted back in the proper place
717 //   4. the op is replaced by the result of step 3.
718 // The new InsertStridedSlice from step 2. will be picked up by a
719 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
720 class VectorInsertStridedSliceOpSameRankRewritePattern
721     : public OpRewritePattern<InsertStridedSliceOp> {
722 public:
723   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
724 
725   PatternMatchResult matchAndRewrite(InsertStridedSliceOp op,
726                                      PatternRewriter &rewriter) const override {
727     auto srcType = op.getSourceVectorType();
728     auto dstType = op.getDestVectorType();
729 
730     if (op.offsets().getValue().empty())
731       return matchFailure();
732 
733     int64_t rankDiff = dstType.getRank() - srcType.getRank();
734     assert(rankDiff >= 0);
735     if (rankDiff != 0)
736       return matchFailure();
737 
738     if (srcType == dstType) {
739       rewriter.replaceOp(op, op.source());
740       return matchSuccess();
741     }
742 
743     int64_t offset =
744         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
745     int64_t size = srcType.getShape().front();
746     int64_t stride =
747         op.strides().getValue().front().cast<IntegerAttr>().getInt();
748 
749     auto loc = op.getLoc();
750     Value res = op.dest();
751     // For each slice of the source vector along the most major dimension.
752     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
753          off += stride, ++idx) {
754       // 1. extract the proper subvector (or element) from source
755       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
756       if (extractedSource.getType().isa<VectorType>()) {
757         // 2. If we have a vector, extract the proper subvector from destination
758         // Otherwise we are at the element level and no need to recurse.
759         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
760         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
761         // smaller rank.
762         InsertStridedSliceOp insertStridedSliceOp =
763             rewriter.create<InsertStridedSliceOp>(
764                 loc, extractedSource, extractedDest,
765                 getI64SubArray(op.offsets(), /* dropFront=*/1),
766                 getI64SubArray(op.strides(), /* dropFront=*/1));
767         // Call matchAndRewrite recursively from within the pattern. This
768         // circumvents the current limitation that a given pattern cannot
769         // be called multiple times by the PatternRewrite infrastructure (to
770         // avoid infinite recursion, but in this case, infinite recursion
771         // cannot happen because the rank is strictly decreasing).
772         // TODO(rriddle, nicolasvasilache) Implement something like a hook for
773         // a potential function that must decrease and allow the same pattern
774         // multiple times.
775         auto success = matchAndRewrite(insertStridedSliceOp, rewriter);
776         (void)success;
777         assert(success && "Unexpected failure");
778         extractedSource = insertStridedSliceOp;
779       }
780       // 4. Insert the extractedSource into the res vector.
781       res = insertOne(rewriter, loc, extractedSource, res, off);
782     }
783 
784     rewriter.replaceOp(op, res);
785     return matchSuccess();
786   }
787 };
788 
789 class VectorOuterProductOpConversion : public LLVMOpLowering {
790 public:
791   explicit VectorOuterProductOpConversion(MLIRContext *context,
792                                           LLVMTypeConverter &typeConverter)
793       : LLVMOpLowering(vector::OuterProductOp::getOperationName(), context,
794                        typeConverter) {}
795 
796   PatternMatchResult
797   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
798                   ConversionPatternRewriter &rewriter) const override {
799     auto loc = op->getLoc();
800     auto adaptor = vector::OuterProductOpOperandAdaptor(operands);
801     auto *ctx = op->getContext();
802     auto vLHS = adaptor.lhs().getType().cast<LLVM::LLVMType>();
803     auto vRHS = adaptor.rhs().getType().cast<LLVM::LLVMType>();
804     auto rankLHS = vLHS.getUnderlyingType()->getVectorNumElements();
805     auto rankRHS = vRHS.getUnderlyingType()->getVectorNumElements();
806     auto llvmArrayOfVectType = lowering.convertType(
807         cast<vector::OuterProductOp>(op).getResult().getType());
808     Value desc = rewriter.create<LLVM::UndefOp>(loc, llvmArrayOfVectType);
809     Value a = adaptor.lhs(), b = adaptor.rhs();
810     Value acc = adaptor.acc().empty() ? nullptr : adaptor.acc().front();
811     SmallVector<Value, 8> lhs, accs;
812     lhs.reserve(rankLHS);
813     accs.reserve(rankLHS);
814     for (unsigned d = 0, e = rankLHS; d < e; ++d) {
815       // shufflevector explicitly requires i32.
816       auto attr = rewriter.getI32IntegerAttr(d);
817       SmallVector<Attribute, 4> bcastAttr(rankRHS, attr);
818       auto bcastArrayAttr = ArrayAttr::get(bcastAttr, ctx);
819       Value aD = nullptr, accD = nullptr;
820       // 1. Broadcast the element a[d] into vector aD.
821       aD = rewriter.create<LLVM::ShuffleVectorOp>(loc, a, a, bcastArrayAttr);
822       // 2. If acc is present, extract 1-d vector acc[d] into accD.
823       if (acc)
824         accD = rewriter.create<LLVM::ExtractValueOp>(
825             loc, vRHS, acc, rewriter.getI64ArrayAttr(d));
826       // 3. Compute aD outer b (plus accD, if relevant).
827       Value aOuterbD =
828           accD
829               ? rewriter.create<LLVM::FMAOp>(loc, vRHS, aD, b, accD).getResult()
830               : rewriter.create<LLVM::FMulOp>(loc, aD, b).getResult();
831       // 4. Insert as value `d` in the descriptor.
832       desc = rewriter.create<LLVM::InsertValueOp>(loc, llvmArrayOfVectType,
833                                                   desc, aOuterbD,
834                                                   rewriter.getI64ArrayAttr(d));
835     }
836     rewriter.replaceOp(op, desc);
837     return matchSuccess();
838   }
839 };
840 
841 class VectorTypeCastOpConversion : public LLVMOpLowering {
842 public:
843   explicit VectorTypeCastOpConversion(MLIRContext *context,
844                                       LLVMTypeConverter &typeConverter)
845       : LLVMOpLowering(vector::TypeCastOp::getOperationName(), context,
846                        typeConverter) {}
847 
848   PatternMatchResult
849   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
850                   ConversionPatternRewriter &rewriter) const override {
851     auto loc = op->getLoc();
852     vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
853     MemRefType sourceMemRefType =
854         castOp.getOperand().getType().cast<MemRefType>();
855     MemRefType targetMemRefType =
856         castOp.getResult().getType().cast<MemRefType>();
857 
858     // Only static shape casts supported atm.
859     if (!sourceMemRefType.hasStaticShape() ||
860         !targetMemRefType.hasStaticShape())
861       return matchFailure();
862 
863     auto llvmSourceDescriptorTy =
864         operands[0].getType().dyn_cast<LLVM::LLVMType>();
865     if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
866       return matchFailure();
867     MemRefDescriptor sourceMemRef(operands[0]);
868 
869     auto llvmTargetDescriptorTy = lowering.convertType(targetMemRefType)
870                                       .dyn_cast_or_null<LLVM::LLVMType>();
871     if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
872       return matchFailure();
873 
874     int64_t offset;
875     SmallVector<int64_t, 4> strides;
876     auto successStrides =
877         getStridesAndOffset(sourceMemRefType, strides, offset);
878     bool isContiguous = (strides.back() == 1);
879     if (isContiguous) {
880       auto sizes = sourceMemRefType.getShape();
881       for (int index = 0, e = strides.size() - 2; index < e; ++index) {
882         if (strides[index] != strides[index + 1] * sizes[index + 1]) {
883           isContiguous = false;
884           break;
885         }
886       }
887     }
888     // Only contiguous source tensors supported atm.
889     if (failed(successStrides) || !isContiguous)
890       return matchFailure();
891 
892     auto int64Ty = LLVM::LLVMType::getInt64Ty(lowering.getDialect());
893 
894     // Create descriptor.
895     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
896     Type llvmTargetElementTy = desc.getElementType();
897     // Set allocated ptr.
898     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
899     allocated =
900         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
901     desc.setAllocatedPtr(rewriter, loc, allocated);
902     // Set aligned ptr.
903     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
904     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
905     desc.setAlignedPtr(rewriter, loc, ptr);
906     // Fill offset 0.
907     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
908     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
909     desc.setOffset(rewriter, loc, zero);
910 
911     // Fill size and stride descriptors in memref.
912     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
913       int64_t index = indexedSize.index();
914       auto sizeAttr =
915           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
916       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
917       desc.setSize(rewriter, loc, index, size);
918       auto strideAttr =
919           rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]);
920       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
921       desc.setStride(rewriter, loc, index, stride);
922     }
923 
924     rewriter.replaceOp(op, {desc});
925     return matchSuccess();
926   }
927 };
928 
929 class VectorPrintOpConversion : public LLVMOpLowering {
930 public:
931   explicit VectorPrintOpConversion(MLIRContext *context,
932                                    LLVMTypeConverter &typeConverter)
933       : LLVMOpLowering(vector::PrintOp::getOperationName(), context,
934                        typeConverter) {}
935 
936   // Proof-of-concept lowering implementation that relies on a small
937   // runtime support library, which only needs to provide a few
938   // printing methods (single value for all data types, opening/closing
939   // bracket, comma, newline). The lowering fully unrolls a vector
940   // in terms of these elementary printing operations. The advantage
941   // of this approach is that the library can remain unaware of all
942   // low-level implementation details of vectors while still supporting
943   // output of any shaped and dimensioned vector. Due to full unrolling,
944   // this approach is less suited for very large vectors though.
945   //
946   // TODO(ajcbik): rely solely on libc in future? something else?
947   //
948   PatternMatchResult
949   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
950                   ConversionPatternRewriter &rewriter) const override {
951     auto printOp = cast<vector::PrintOp>(op);
952     auto adaptor = vector::PrintOpOperandAdaptor(operands);
953     Type printType = printOp.getPrintType();
954 
955     if (lowering.convertType(printType) == nullptr)
956       return matchFailure();
957 
958     // Make sure element type has runtime support (currently just Float/Double).
959     VectorType vectorType = printType.dyn_cast<VectorType>();
960     Type eltType = vectorType ? vectorType.getElementType() : printType;
961     int64_t rank = vectorType ? vectorType.getRank() : 0;
962     Operation *printer;
963     if (eltType.isInteger(32))
964       printer = getPrintI32(op);
965     else if (eltType.isInteger(64))
966       printer = getPrintI64(op);
967     else if (eltType.isF32())
968       printer = getPrintFloat(op);
969     else if (eltType.isF64())
970       printer = getPrintDouble(op);
971     else
972       return matchFailure();
973 
974     // Unroll vector into elementary print calls.
975     emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank);
976     emitCall(rewriter, op->getLoc(), getPrintNewline(op));
977     rewriter.eraseOp(op);
978     return matchSuccess();
979   }
980 
981 private:
982   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
983                  Value value, VectorType vectorType, Operation *printer,
984                  int64_t rank) const {
985     Location loc = op->getLoc();
986     if (rank == 0) {
987       emitCall(rewriter, loc, printer, value);
988       return;
989     }
990 
991     emitCall(rewriter, loc, getPrintOpen(op));
992     Operation *printComma = getPrintComma(op);
993     int64_t dim = vectorType.getDimSize(0);
994     for (int64_t d = 0; d < dim; ++d) {
995       auto reducedType =
996           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
997       auto llvmType = lowering.convertType(
998           rank > 1 ? reducedType : vectorType.getElementType());
999       Value nestedVal =
1000           extractOne(rewriter, lowering, loc, value, llvmType, rank, d);
1001       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1);
1002       if (d != dim - 1)
1003         emitCall(rewriter, loc, printComma);
1004     }
1005     emitCall(rewriter, loc, getPrintClose(op));
1006   }
1007 
1008   // Helper to emit a call.
1009   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1010                        Operation *ref, ValueRange params = ValueRange()) {
1011     rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{},
1012                                   rewriter.getSymbolRefAttr(ref), params);
1013   }
1014 
1015   // Helper for printer method declaration (first hit) and lookup.
1016   static Operation *getPrint(Operation *op, LLVM::LLVMDialect *dialect,
1017                              StringRef name, ArrayRef<LLVM::LLVMType> params) {
1018     auto module = op->getParentOfType<ModuleOp>();
1019     auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1020     if (func)
1021       return func;
1022     OpBuilder moduleBuilder(module.getBodyRegion());
1023     return moduleBuilder.create<LLVM::LLVMFuncOp>(
1024         op->getLoc(), name,
1025         LLVM::LLVMType::getFunctionTy(LLVM::LLVMType::getVoidTy(dialect),
1026                                       params, /*isVarArg=*/false));
1027   }
1028 
1029   // Helpers for method names.
1030   Operation *getPrintI32(Operation *op) const {
1031     LLVM::LLVMDialect *dialect = lowering.getDialect();
1032     return getPrint(op, dialect, "print_i32",
1033                     LLVM::LLVMType::getInt32Ty(dialect));
1034   }
1035   Operation *getPrintI64(Operation *op) const {
1036     LLVM::LLVMDialect *dialect = lowering.getDialect();
1037     return getPrint(op, dialect, "print_i64",
1038                     LLVM::LLVMType::getInt64Ty(dialect));
1039   }
1040   Operation *getPrintFloat(Operation *op) const {
1041     LLVM::LLVMDialect *dialect = lowering.getDialect();
1042     return getPrint(op, dialect, "print_f32",
1043                     LLVM::LLVMType::getFloatTy(dialect));
1044   }
1045   Operation *getPrintDouble(Operation *op) const {
1046     LLVM::LLVMDialect *dialect = lowering.getDialect();
1047     return getPrint(op, dialect, "print_f64",
1048                     LLVM::LLVMType::getDoubleTy(dialect));
1049   }
1050   Operation *getPrintOpen(Operation *op) const {
1051     return getPrint(op, lowering.getDialect(), "print_open", {});
1052   }
1053   Operation *getPrintClose(Operation *op) const {
1054     return getPrint(op, lowering.getDialect(), "print_close", {});
1055   }
1056   Operation *getPrintComma(Operation *op) const {
1057     return getPrint(op, lowering.getDialect(), "print_comma", {});
1058   }
1059   Operation *getPrintNewline(Operation *op) const {
1060     return getPrint(op, lowering.getDialect(), "print_newline", {});
1061   }
1062 };
1063 
1064 /// Progressive lowering of StridedSliceOp to either:
1065 ///   1. extractelement + insertelement for the 1-D case
1066 ///   2. extract + optional strided_slice + insert for the n-D case.
1067 class VectorStridedSliceOpConversion : public OpRewritePattern<StridedSliceOp> {
1068 public:
1069   using OpRewritePattern<StridedSliceOp>::OpRewritePattern;
1070 
1071   PatternMatchResult matchAndRewrite(StridedSliceOp op,
1072                                      PatternRewriter &rewriter) const override {
1073     auto dstType = op.getResult().getType().cast<VectorType>();
1074 
1075     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1076 
1077     int64_t offset =
1078         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1079     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1080     int64_t stride =
1081         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1082 
1083     auto loc = op.getLoc();
1084     auto elemType = dstType.getElementType();
1085     assert(elemType.isIntOrIndexOrFloat());
1086     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1087                                              rewriter.getZeroAttr(elemType));
1088     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1089     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1090          off += stride, ++idx) {
1091       Value extracted = extractOne(rewriter, loc, op.vector(), off);
1092       if (op.offsets().getValue().size() > 1) {
1093         StridedSliceOp stridedSliceOp = rewriter.create<StridedSliceOp>(
1094             loc, extracted, getI64SubArray(op.offsets(), /* dropFront=*/1),
1095             getI64SubArray(op.sizes(), /* dropFront=*/1),
1096             getI64SubArray(op.strides(), /* dropFront=*/1));
1097         // Call matchAndRewrite recursively from within the pattern. This
1098         // circumvents the current limitation that a given pattern cannot
1099         // be called multiple times by the PatternRewrite infrastructure (to
1100         // avoid infinite recursion, but in this case, infinite recursion
1101         // cannot happen because the rank is strictly decreasing).
1102         // TODO(rriddle, nicolasvasilache) Implement something like a hook for
1103         // a potential function that must decrease and allow the same pattern
1104         // multiple times.
1105         auto success = matchAndRewrite(stridedSliceOp, rewriter);
1106         (void)success;
1107         assert(success && "Unexpected failure");
1108         extracted = stridedSliceOp;
1109       }
1110       res = insertOne(rewriter, loc, extracted, res, idx);
1111     }
1112     rewriter.replaceOp(op, {res});
1113     return matchSuccess();
1114   }
1115 };
1116 
1117 } // namespace
1118 
1119 /// Populate the given list with patterns that convert from Vector to LLVM.
1120 void mlir::populateVectorToLLVMConversionPatterns(
1121     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1122   MLIRContext *ctx = converter.getDialect()->getContext();
1123   patterns.insert<VectorFMAOpNDRewritePattern,
1124                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1125                   VectorInsertStridedSliceOpSameRankRewritePattern,
1126                   VectorStridedSliceOpConversion>(ctx);
1127   patterns.insert<VectorBroadcastOpConversion, VectorReductionOpConversion,
1128                   VectorShuffleOpConversion, VectorExtractElementOpConversion,
1129                   VectorExtractOpConversion, VectorFMAOp1DConversion,
1130                   VectorInsertElementOpConversion, VectorInsertOpConversion,
1131                   VectorOuterProductOpConversion, VectorTypeCastOpConversion,
1132                   VectorPrintOpConversion>(ctx, converter);
1133 }
1134 
1135 namespace {
1136 struct LowerVectorToLLVMPass : public ModulePass<LowerVectorToLLVMPass> {
1137   void runOnModule() override;
1138 };
1139 } // namespace
1140 
1141 void LowerVectorToLLVMPass::runOnModule() {
1142   // Perform progressive lowering of operations on "slices".
1143   // Folding and DCE get rid of all non-leaking tuple ops.
1144   {
1145     OwningRewritePatternList patterns;
1146     populateVectorSlicesLoweringPatterns(patterns, &getContext());
1147     applyPatternsGreedily(getModule(), patterns);
1148   }
1149 
1150   // Convert to the LLVM IR dialect.
1151   LLVMTypeConverter converter(&getContext());
1152   OwningRewritePatternList patterns;
1153   populateVectorToLLVMConversionPatterns(converter, patterns);
1154   populateStdToLLVMConversionPatterns(converter, patterns);
1155 
1156   ConversionTarget target(getContext());
1157   target.addLegalDialect<LLVM::LLVMDialect>();
1158   target.addDynamicallyLegalOp<FuncOp>(
1159       [&](FuncOp op) { return converter.isSignatureLegal(op.getType()); });
1160   if (failed(
1161           applyPartialConversion(getModule(), target, patterns, &converter))) {
1162     signalPassFailure();
1163   }
1164 }
1165 
1166 OpPassBase<ModuleOp> *mlir::createLowerVectorToLLVMPass() {
1167   return new LowerVectorToLLVMPass();
1168 }
1169 
1170 static PassRegistration<LowerVectorToLLVMPass>
1171     pass("convert-vector-to-llvm",
1172          "Lower the operations from the vector dialect into the LLVM dialect");
1173