xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision d1a9e9a7cbad4044ccc8e08d0217c23aca417714)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/LLVMCommon/VectorPattern.h"
12 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h"
13 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
14 #include "mlir/Dialect/MemRef/IR/MemRef.h"
15 #include "mlir/Dialect/StandardOps/IR/Ops.h"
16 #include "mlir/Dialect/Vector/VectorOps.h"
17 #include "mlir/IR/BuiltinTypes.h"
18 #include "mlir/Support/MathExtras.h"
19 #include "mlir/Target/LLVMIR/TypeToLLVM.h"
20 #include "mlir/Transforms/DialectConversion.h"
21 
22 using namespace mlir;
23 using namespace mlir::vector;
24 
25 // Helper to reduce vector type by one rank at front.
26 static VectorType reducedVectorTypeFront(VectorType tp) {
27   assert((tp.getRank() > 1) && "unlowerable vector type");
28   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
29 }
30 
31 // Helper to reduce vector type by *all* but one rank at back.
32 static VectorType reducedVectorTypeBack(VectorType tp) {
33   assert((tp.getRank() > 1) && "unlowerable vector type");
34   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
35 }
36 
37 // Helper that picks the proper sequence for inserting.
38 static Value insertOne(ConversionPatternRewriter &rewriter,
39                        LLVMTypeConverter &typeConverter, Location loc,
40                        Value val1, Value val2, Type llvmType, int64_t rank,
41                        int64_t pos) {
42   if (rank == 1) {
43     auto idxType = rewriter.getIndexType();
44     auto constant = rewriter.create<LLVM::ConstantOp>(
45         loc, typeConverter.convertType(idxType),
46         rewriter.getIntegerAttr(idxType, pos));
47     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
48                                                   constant);
49   }
50   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
51                                               rewriter.getI64ArrayAttr(pos));
52 }
53 
54 // Helper that picks the proper sequence for inserting.
55 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
56                        Value into, int64_t offset) {
57   auto vectorType = into.getType().cast<VectorType>();
58   if (vectorType.getRank() > 1)
59     return rewriter.create<InsertOp>(loc, from, into, offset);
60   return rewriter.create<vector::InsertElementOp>(
61       loc, vectorType, from, into,
62       rewriter.create<ConstantIndexOp>(loc, offset));
63 }
64 
65 // Helper that picks the proper sequence for extracting.
66 static Value extractOne(ConversionPatternRewriter &rewriter,
67                         LLVMTypeConverter &typeConverter, Location loc,
68                         Value val, Type llvmType, int64_t rank, int64_t pos) {
69   if (rank == 1) {
70     auto idxType = rewriter.getIndexType();
71     auto constant = rewriter.create<LLVM::ConstantOp>(
72         loc, typeConverter.convertType(idxType),
73         rewriter.getIntegerAttr(idxType, pos));
74     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
75                                                    constant);
76   }
77   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
78                                                rewriter.getI64ArrayAttr(pos));
79 }
80 
81 // Helper that picks the proper sequence for extracting.
82 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
83                         int64_t offset) {
84   auto vectorType = vector.getType().cast<VectorType>();
85   if (vectorType.getRank() > 1)
86     return rewriter.create<ExtractOp>(loc, vector, offset);
87   return rewriter.create<vector::ExtractElementOp>(
88       loc, vectorType.getElementType(), vector,
89       rewriter.create<ConstantIndexOp>(loc, offset));
90 }
91 
92 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
93 // TODO: Better support for attribute subtype forwarding + slicing.
94 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
95                                               unsigned dropFront = 0,
96                                               unsigned dropBack = 0) {
97   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
98   auto range = arrayAttr.getAsRange<IntegerAttr>();
99   SmallVector<int64_t, 4> res;
100   res.reserve(arrayAttr.size() - dropFront - dropBack);
101   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
102        it != eit; ++it)
103     res.push_back((*it).getValue().getSExtValue());
104   return res;
105 }
106 
107 // Helper that returns data layout alignment of a memref.
108 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
109                                  MemRefType memrefType, unsigned &align) {
110   Type elementTy = typeConverter.convertType(memrefType.getElementType());
111   if (!elementTy)
112     return failure();
113 
114   // TODO: this should use the MLIR data layout when it becomes available and
115   // stop depending on translation.
116   llvm::LLVMContext llvmContext;
117   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
118               .getPreferredAlignment(elementTy, typeConverter.getDataLayout());
119   return success();
120 }
121 
122 // Return the minimal alignment value that satisfies all the AssumeAlignment
123 // uses of `value`. If no such uses exist, return 1.
124 static unsigned getAssumedAlignment(Value value) {
125   unsigned align = 1;
126   for (auto &u : value.getUses()) {
127     Operation *owner = u.getOwner();
128     if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner))
129       align = mlir::lcm(align, op.alignment());
130   }
131   return align;
132 }
133 
134 // Helper that returns data layout alignment of a memref associated with a
135 // load, store, scatter, or gather op, including additional information from
136 // assume_alignment calls on the source of the transfer
137 template <class OpAdaptor>
138 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter,
139                                    OpAdaptor op, unsigned &align) {
140   if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align)))
141     return failure();
142   align = std::max(align, getAssumedAlignment(op.base()));
143   return success();
144 }
145 
146 // Add an index vector component to a base pointer. This almost always succeeds
147 // unless the last stride is non-unit or the memory space is not zero.
148 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
149                                     Location loc, Value memref, Value base,
150                                     Value index, MemRefType memRefType,
151                                     VectorType vType, Value &ptrs) {
152   int64_t offset;
153   SmallVector<int64_t, 4> strides;
154   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
155   if (failed(successStrides) || strides.back() != 1 ||
156       memRefType.getMemorySpaceAsInt() != 0)
157     return failure();
158   auto pType = MemRefDescriptor(memref).getElementPtrType();
159   auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0));
160   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index);
161   return success();
162 }
163 
164 // Casts a strided element pointer to a vector pointer.  The vector pointer
165 // will be in the same address space as the incoming memref type.
166 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
167                          Value ptr, MemRefType memRefType, Type vt) {
168   auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt());
169   return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
170 }
171 
172 namespace {
173 
174 /// Conversion pattern for a vector.bitcast.
175 class VectorBitCastOpConversion
176     : public ConvertOpToLLVMPattern<vector::BitCastOp> {
177 public:
178   using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern;
179 
180   LogicalResult
181   matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands,
182                   ConversionPatternRewriter &rewriter) const override {
183     // Only 1-D vectors can be lowered to LLVM.
184     VectorType resultTy = bitCastOp.getType();
185     if (resultTy.getRank() != 1)
186       return failure();
187     Type newResultTy = typeConverter->convertType(resultTy);
188     rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy,
189                                                  operands[0]);
190     return success();
191   }
192 };
193 
194 /// Conversion pattern for a vector.matrix_multiply.
195 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
196 class VectorMatmulOpConversion
197     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
198 public:
199   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
200 
201   LogicalResult
202   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
203                   ConversionPatternRewriter &rewriter) const override {
204     auto adaptor = vector::MatmulOpAdaptor(operands);
205     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
206         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
207         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
208         matmulOp.lhs_columns(), matmulOp.rhs_columns());
209     return success();
210   }
211 };
212 
213 /// Conversion pattern for a vector.flat_transpose.
214 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
215 class VectorFlatTransposeOpConversion
216     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
217 public:
218   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
219 
220   LogicalResult
221   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
222                   ConversionPatternRewriter &rewriter) const override {
223     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
224     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
225         transOp, typeConverter->convertType(transOp.res().getType()),
226         adaptor.matrix(), transOp.rows(), transOp.columns());
227     return success();
228   }
229 };
230 
231 /// Overloaded utility that replaces a vector.load, vector.store,
232 /// vector.maskedload and vector.maskedstore with their respective LLVM
233 /// couterparts.
234 static void replaceLoadOrStoreOp(vector::LoadOp loadOp,
235                                  vector::LoadOpAdaptor adaptor,
236                                  VectorType vectorTy, Value ptr, unsigned align,
237                                  ConversionPatternRewriter &rewriter) {
238   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align);
239 }
240 
241 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp,
242                                  vector::MaskedLoadOpAdaptor adaptor,
243                                  VectorType vectorTy, Value ptr, unsigned align,
244                                  ConversionPatternRewriter &rewriter) {
245   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
246       loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align);
247 }
248 
249 static void replaceLoadOrStoreOp(vector::StoreOp storeOp,
250                                  vector::StoreOpAdaptor adaptor,
251                                  VectorType vectorTy, Value ptr, unsigned align,
252                                  ConversionPatternRewriter &rewriter) {
253   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(),
254                                              ptr, align);
255 }
256 
257 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp,
258                                  vector::MaskedStoreOpAdaptor adaptor,
259                                  VectorType vectorTy, Value ptr, unsigned align,
260                                  ConversionPatternRewriter &rewriter) {
261   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
262       storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align);
263 }
264 
265 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and
266 /// vector.maskedstore.
267 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor>
268 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
269 public:
270   using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern;
271 
272   LogicalResult
273   matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands,
274                   ConversionPatternRewriter &rewriter) const override {
275     // Only 1-D vectors can be lowered to LLVM.
276     VectorType vectorTy = loadOrStoreOp.getVectorType();
277     if (vectorTy.getRank() > 1)
278       return failure();
279 
280     auto loc = loadOrStoreOp->getLoc();
281     auto adaptor = LoadOrStoreOpAdaptor(operands);
282     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
283 
284     // Resolve alignment.
285     unsigned align;
286     if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp,
287                                     align)))
288       return failure();
289 
290     // Resolve address.
291     auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType())
292                      .template cast<VectorType>();
293     Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(),
294                                                adaptor.indices(), rewriter);
295     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype);
296 
297     replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter);
298     return success();
299   }
300 };
301 
302 /// Conversion pattern for a vector.gather.
303 class VectorGatherOpConversion
304     : public ConvertOpToLLVMPattern<vector::GatherOp> {
305 public:
306   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
307 
308   LogicalResult
309   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
310                   ConversionPatternRewriter &rewriter) const override {
311     auto loc = gather->getLoc();
312     auto adaptor = vector::GatherOpAdaptor(operands);
313     MemRefType memRefType = gather.getMemRefType();
314 
315     // Resolve alignment.
316     unsigned align;
317     if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align)))
318       return failure();
319 
320     // Resolve address.
321     Value ptrs;
322     VectorType vType = gather.getVectorType();
323     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
324                                      adaptor.indices(), rewriter);
325     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
326                               adaptor.index_vec(), memRefType, vType, ptrs)))
327       return failure();
328 
329     // Replace with the gather intrinsic.
330     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
331         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
332         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
333     return success();
334   }
335 };
336 
337 /// Conversion pattern for a vector.scatter.
338 class VectorScatterOpConversion
339     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
340 public:
341   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
342 
343   LogicalResult
344   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
345                   ConversionPatternRewriter &rewriter) const override {
346     auto loc = scatter->getLoc();
347     auto adaptor = vector::ScatterOpAdaptor(operands);
348     MemRefType memRefType = scatter.getMemRefType();
349 
350     // Resolve alignment.
351     unsigned align;
352     if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align)))
353       return failure();
354 
355     // Resolve address.
356     Value ptrs;
357     VectorType vType = scatter.getVectorType();
358     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
359                                      adaptor.indices(), rewriter);
360     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
361                               adaptor.index_vec(), memRefType, vType, ptrs)))
362       return failure();
363 
364     // Replace with the scatter intrinsic.
365     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
366         scatter, adaptor.valueToStore(), ptrs, adaptor.mask(),
367         rewriter.getI32IntegerAttr(align));
368     return success();
369   }
370 };
371 
372 /// Conversion pattern for a vector.expandload.
373 class VectorExpandLoadOpConversion
374     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
375 public:
376   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
377 
378   LogicalResult
379   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
380                   ConversionPatternRewriter &rewriter) const override {
381     auto loc = expand->getLoc();
382     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
383     MemRefType memRefType = expand.getMemRefType();
384 
385     // Resolve address.
386     auto vtype = typeConverter->convertType(expand.getVectorType());
387     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
388                                      adaptor.indices(), rewriter);
389 
390     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
391         expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru());
392     return success();
393   }
394 };
395 
396 /// Conversion pattern for a vector.compressstore.
397 class VectorCompressStoreOpConversion
398     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
399 public:
400   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
401 
402   LogicalResult
403   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
404                   ConversionPatternRewriter &rewriter) const override {
405     auto loc = compress->getLoc();
406     auto adaptor = vector::CompressStoreOpAdaptor(operands);
407     MemRefType memRefType = compress.getMemRefType();
408 
409     // Resolve address.
410     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
411                                      adaptor.indices(), rewriter);
412 
413     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
414         compress, adaptor.valueToStore(), ptr, adaptor.mask());
415     return success();
416   }
417 };
418 
419 /// Conversion pattern for all vector reductions.
420 class VectorReductionOpConversion
421     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
422 public:
423   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
424                                        bool reassociateFPRed)
425       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
426         reassociateFPReductions(reassociateFPRed) {}
427 
428   LogicalResult
429   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
430                   ConversionPatternRewriter &rewriter) const override {
431     auto kind = reductionOp.kind();
432     Type eltType = reductionOp.dest().getType();
433     Type llvmType = typeConverter->convertType(eltType);
434     if (eltType.isIntOrIndex()) {
435       // Integer reductions: add/mul/min/max/and/or/xor.
436       if (kind == "add")
437         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
438             reductionOp, llvmType, operands[0]);
439       else if (kind == "mul")
440         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
441             reductionOp, llvmType, operands[0]);
442       else if (kind == "min" &&
443                (eltType.isIndex() || eltType.isUnsignedInteger()))
444         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
445             reductionOp, llvmType, operands[0]);
446       else if (kind == "min")
447         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
448             reductionOp, llvmType, operands[0]);
449       else if (kind == "max" &&
450                (eltType.isIndex() || eltType.isUnsignedInteger()))
451         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
452             reductionOp, llvmType, operands[0]);
453       else if (kind == "max")
454         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
455             reductionOp, llvmType, operands[0]);
456       else if (kind == "and")
457         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
458             reductionOp, llvmType, operands[0]);
459       else if (kind == "or")
460         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
461             reductionOp, llvmType, operands[0]);
462       else if (kind == "xor")
463         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
464             reductionOp, llvmType, operands[0]);
465       else
466         return failure();
467       return success();
468     }
469 
470     if (!eltType.isa<FloatType>())
471       return failure();
472 
473     // Floating-point reductions: add/mul/min/max
474     if (kind == "add") {
475       // Optional accumulator (or zero).
476       Value acc = operands.size() > 1 ? operands[1]
477                                       : rewriter.create<LLVM::ConstantOp>(
478                                             reductionOp->getLoc(), llvmType,
479                                             rewriter.getZeroAttr(eltType));
480       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
481           reductionOp, llvmType, acc, operands[0],
482           rewriter.getBoolAttr(reassociateFPReductions));
483     } else if (kind == "mul") {
484       // Optional accumulator (or one).
485       Value acc = operands.size() > 1
486                       ? operands[1]
487                       : rewriter.create<LLVM::ConstantOp>(
488                             reductionOp->getLoc(), llvmType,
489                             rewriter.getFloatAttr(eltType, 1.0));
490       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
491           reductionOp, llvmType, acc, operands[0],
492           rewriter.getBoolAttr(reassociateFPReductions));
493     } else if (kind == "min")
494       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
495           reductionOp, llvmType, operands[0]);
496     else if (kind == "max")
497       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
498           reductionOp, llvmType, operands[0]);
499     else
500       return failure();
501     return success();
502   }
503 
504 private:
505   const bool reassociateFPReductions;
506 };
507 
508 class VectorShuffleOpConversion
509     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
510 public:
511   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
512 
513   LogicalResult
514   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
515                   ConversionPatternRewriter &rewriter) const override {
516     auto loc = shuffleOp->getLoc();
517     auto adaptor = vector::ShuffleOpAdaptor(operands);
518     auto v1Type = shuffleOp.getV1VectorType();
519     auto v2Type = shuffleOp.getV2VectorType();
520     auto vectorType = shuffleOp.getVectorType();
521     Type llvmType = typeConverter->convertType(vectorType);
522     auto maskArrayAttr = shuffleOp.mask();
523 
524     // Bail if result type cannot be lowered.
525     if (!llvmType)
526       return failure();
527 
528     // Get rank and dimension sizes.
529     int64_t rank = vectorType.getRank();
530     assert(v1Type.getRank() == rank);
531     assert(v2Type.getRank() == rank);
532     int64_t v1Dim = v1Type.getDimSize(0);
533 
534     // For rank 1, where both operands have *exactly* the same vector type,
535     // there is direct shuffle support in LLVM. Use it!
536     if (rank == 1 && v1Type == v2Type) {
537       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
538           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
539       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
540       return success();
541     }
542 
543     // For all other cases, insert the individual values individually.
544     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
545     int64_t insPos = 0;
546     for (auto en : llvm::enumerate(maskArrayAttr)) {
547       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
548       Value value = adaptor.v1();
549       if (extPos >= v1Dim) {
550         extPos -= v1Dim;
551         value = adaptor.v2();
552       }
553       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
554                                  llvmType, rank, extPos);
555       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
556                          llvmType, rank, insPos++);
557     }
558     rewriter.replaceOp(shuffleOp, insert);
559     return success();
560   }
561 };
562 
563 class VectorExtractElementOpConversion
564     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
565 public:
566   using ConvertOpToLLVMPattern<
567       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
568 
569   LogicalResult
570   matchAndRewrite(vector::ExtractElementOp extractEltOp,
571                   ArrayRef<Value> operands,
572                   ConversionPatternRewriter &rewriter) const override {
573     auto adaptor = vector::ExtractElementOpAdaptor(operands);
574     auto vectorType = extractEltOp.getVectorType();
575     auto llvmType = typeConverter->convertType(vectorType.getElementType());
576 
577     // Bail if result type cannot be lowered.
578     if (!llvmType)
579       return failure();
580 
581     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
582         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
583     return success();
584   }
585 };
586 
587 class VectorExtractOpConversion
588     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
589 public:
590   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
591 
592   LogicalResult
593   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
594                   ConversionPatternRewriter &rewriter) const override {
595     auto loc = extractOp->getLoc();
596     auto adaptor = vector::ExtractOpAdaptor(operands);
597     auto vectorType = extractOp.getVectorType();
598     auto resultType = extractOp.getResult().getType();
599     auto llvmResultType = typeConverter->convertType(resultType);
600     auto positionArrayAttr = extractOp.position();
601 
602     // Bail if result type cannot be lowered.
603     if (!llvmResultType)
604       return failure();
605 
606     // Extract entire vector. Should be handled by folder, but just to be safe.
607     if (positionArrayAttr.empty()) {
608       rewriter.replaceOp(extractOp, adaptor.vector());
609       return success();
610     }
611 
612     // One-shot extraction of vector from array (only requires extractvalue).
613     if (resultType.isa<VectorType>()) {
614       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
615           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
616       rewriter.replaceOp(extractOp, extracted);
617       return success();
618     }
619 
620     // Potential extraction of 1-D vector from array.
621     auto *context = extractOp->getContext();
622     Value extracted = adaptor.vector();
623     auto positionAttrs = positionArrayAttr.getValue();
624     if (positionAttrs.size() > 1) {
625       auto oneDVectorType = reducedVectorTypeBack(vectorType);
626       auto nMinusOnePositionAttrs =
627           ArrayAttr::get(context, positionAttrs.drop_back());
628       extracted = rewriter.create<LLVM::ExtractValueOp>(
629           loc, typeConverter->convertType(oneDVectorType), extracted,
630           nMinusOnePositionAttrs);
631     }
632 
633     // Remaining extraction of element from 1-D LLVM vector
634     auto position = positionAttrs.back().cast<IntegerAttr>();
635     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
636     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
637     extracted =
638         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
639     rewriter.replaceOp(extractOp, extracted);
640 
641     return success();
642   }
643 };
644 
645 /// Conversion pattern that turns a vector.fma on a 1-D vector
646 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
647 /// This does not match vectors of n >= 2 rank.
648 ///
649 /// Example:
650 /// ```
651 ///  vector.fma %a, %a, %a : vector<8xf32>
652 /// ```
653 /// is converted to:
654 /// ```
655 ///  llvm.intr.fmuladd %va, %va, %va:
656 ///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
657 ///    -> !llvm."<8 x f32>">
658 /// ```
659 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
660 public:
661   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
662 
663   LogicalResult
664   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
665                   ConversionPatternRewriter &rewriter) const override {
666     auto adaptor = vector::FMAOpAdaptor(operands);
667     VectorType vType = fmaOp.getVectorType();
668     if (vType.getRank() != 1)
669       return failure();
670     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
671                                                  adaptor.rhs(), adaptor.acc());
672     return success();
673   }
674 };
675 
676 class VectorInsertElementOpConversion
677     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
678 public:
679   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
680 
681   LogicalResult
682   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
683                   ConversionPatternRewriter &rewriter) const override {
684     auto adaptor = vector::InsertElementOpAdaptor(operands);
685     auto vectorType = insertEltOp.getDestVectorType();
686     auto llvmType = typeConverter->convertType(vectorType);
687 
688     // Bail if result type cannot be lowered.
689     if (!llvmType)
690       return failure();
691 
692     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
693         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
694         adaptor.position());
695     return success();
696   }
697 };
698 
699 class VectorInsertOpConversion
700     : public ConvertOpToLLVMPattern<vector::InsertOp> {
701 public:
702   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
703 
704   LogicalResult
705   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
706                   ConversionPatternRewriter &rewriter) const override {
707     auto loc = insertOp->getLoc();
708     auto adaptor = vector::InsertOpAdaptor(operands);
709     auto sourceType = insertOp.getSourceType();
710     auto destVectorType = insertOp.getDestVectorType();
711     auto llvmResultType = typeConverter->convertType(destVectorType);
712     auto positionArrayAttr = insertOp.position();
713 
714     // Bail if result type cannot be lowered.
715     if (!llvmResultType)
716       return failure();
717 
718     // Overwrite entire vector with value. Should be handled by folder, but
719     // just to be safe.
720     if (positionArrayAttr.empty()) {
721       rewriter.replaceOp(insertOp, adaptor.source());
722       return success();
723     }
724 
725     // One-shot insertion of a vector into an array (only requires insertvalue).
726     if (sourceType.isa<VectorType>()) {
727       Value inserted = rewriter.create<LLVM::InsertValueOp>(
728           loc, llvmResultType, adaptor.dest(), adaptor.source(),
729           positionArrayAttr);
730       rewriter.replaceOp(insertOp, inserted);
731       return success();
732     }
733 
734     // Potential extraction of 1-D vector from array.
735     auto *context = insertOp->getContext();
736     Value extracted = adaptor.dest();
737     auto positionAttrs = positionArrayAttr.getValue();
738     auto position = positionAttrs.back().cast<IntegerAttr>();
739     auto oneDVectorType = destVectorType;
740     if (positionAttrs.size() > 1) {
741       oneDVectorType = reducedVectorTypeBack(destVectorType);
742       auto nMinusOnePositionAttrs =
743           ArrayAttr::get(context, positionAttrs.drop_back());
744       extracted = rewriter.create<LLVM::ExtractValueOp>(
745           loc, typeConverter->convertType(oneDVectorType), extracted,
746           nMinusOnePositionAttrs);
747     }
748 
749     // Insertion of an element into a 1-D LLVM vector.
750     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
751     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
752     Value inserted = rewriter.create<LLVM::InsertElementOp>(
753         loc, typeConverter->convertType(oneDVectorType), extracted,
754         adaptor.source(), constant);
755 
756     // Potential insertion of resulting 1-D vector into array.
757     if (positionAttrs.size() > 1) {
758       auto nMinusOnePositionAttrs =
759           ArrayAttr::get(context, positionAttrs.drop_back());
760       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
761                                                       adaptor.dest(), inserted,
762                                                       nMinusOnePositionAttrs);
763     }
764 
765     rewriter.replaceOp(insertOp, inserted);
766     return success();
767   }
768 };
769 
770 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
771 ///
772 /// Example:
773 /// ```
774 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
775 /// ```
776 /// is rewritten into:
777 /// ```
778 ///  %r = splat %f0: vector<2x4xf32>
779 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
780 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
781 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
782 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
783 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
784 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
785 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
786 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
787 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
788 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
789 ///  // %r3 holds the final value.
790 /// ```
791 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
792 public:
793   using OpRewritePattern<FMAOp>::OpRewritePattern;
794 
795   LogicalResult matchAndRewrite(FMAOp op,
796                                 PatternRewriter &rewriter) const override {
797     auto vType = op.getVectorType();
798     if (vType.getRank() < 2)
799       return failure();
800 
801     auto loc = op.getLoc();
802     auto elemType = vType.getElementType();
803     Value zero = rewriter.create<ConstantOp>(loc, elemType,
804                                              rewriter.getZeroAttr(elemType));
805     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
806     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
807       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
808       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
809       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
810       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
811       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
812     }
813     rewriter.replaceOp(op, desc);
814     return success();
815   }
816 };
817 
818 // When ranks are different, InsertStridedSlice needs to extract a properly
819 // ranked vector from the destination vector into which to insert. This pattern
820 // only takes care of this part and forwards the rest of the conversion to
821 // another pattern that converts InsertStridedSlice for operands of the same
822 // rank.
823 //
824 // RewritePattern for InsertStridedSliceOp where source and destination vectors
825 // have different ranks. In this case:
826 //   1. the proper subvector is extracted from the destination vector
827 //   2. a new InsertStridedSlice op is created to insert the source in the
828 //   destination subvector
829 //   3. the destination subvector is inserted back in the proper place
830 //   4. the op is replaced by the result of step 3.
831 // The new InsertStridedSlice from step 2. will be picked up by a
832 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
833 class VectorInsertStridedSliceOpDifferentRankRewritePattern
834     : public OpRewritePattern<InsertStridedSliceOp> {
835 public:
836   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
837 
838   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
839                                 PatternRewriter &rewriter) const override {
840     auto srcType = op.getSourceVectorType();
841     auto dstType = op.getDestVectorType();
842 
843     if (op.offsets().getValue().empty())
844       return failure();
845 
846     auto loc = op.getLoc();
847     int64_t rankDiff = dstType.getRank() - srcType.getRank();
848     assert(rankDiff >= 0);
849     if (rankDiff == 0)
850       return failure();
851 
852     int64_t rankRest = dstType.getRank() - rankDiff;
853     // Extract / insert the subvector of matching rank and InsertStridedSlice
854     // on it.
855     Value extracted =
856         rewriter.create<ExtractOp>(loc, op.dest(),
857                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
858                                                   /*dropBack=*/rankRest));
859     // A different pattern will kick in for InsertStridedSlice with matching
860     // ranks.
861     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
862         loc, op.source(), extracted,
863         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
864         getI64SubArray(op.strides(), /*dropFront=*/0));
865     rewriter.replaceOpWithNewOp<InsertOp>(
866         op, stridedSliceInnerOp.getResult(), op.dest(),
867         getI64SubArray(op.offsets(), /*dropFront=*/0,
868                        /*dropBack=*/rankRest));
869     return success();
870   }
871 };
872 
873 // RewritePattern for InsertStridedSliceOp where source and destination vectors
874 // have the same rank. In this case, we reduce
875 //   1. the proper subvector is extracted from the destination vector
876 //   2. a new InsertStridedSlice op is created to insert the source in the
877 //   destination subvector
878 //   3. the destination subvector is inserted back in the proper place
879 //   4. the op is replaced by the result of step 3.
880 // The new InsertStridedSlice from step 2. will be picked up by a
881 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
882 class VectorInsertStridedSliceOpSameRankRewritePattern
883     : public OpRewritePattern<InsertStridedSliceOp> {
884 public:
885   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
886 
887   void initialize() {
888     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
889     // bounded as the rank is strictly decreasing.
890     setHasBoundedRewriteRecursion();
891   }
892 
893   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
894                                 PatternRewriter &rewriter) const override {
895     auto srcType = op.getSourceVectorType();
896     auto dstType = op.getDestVectorType();
897 
898     if (op.offsets().getValue().empty())
899       return failure();
900 
901     int64_t rankDiff = dstType.getRank() - srcType.getRank();
902     assert(rankDiff >= 0);
903     if (rankDiff != 0)
904       return failure();
905 
906     if (srcType == dstType) {
907       rewriter.replaceOp(op, op.source());
908       return success();
909     }
910 
911     int64_t offset =
912         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
913     int64_t size = srcType.getShape().front();
914     int64_t stride =
915         op.strides().getValue().front().cast<IntegerAttr>().getInt();
916 
917     auto loc = op.getLoc();
918     Value res = op.dest();
919     // For each slice of the source vector along the most major dimension.
920     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
921          off += stride, ++idx) {
922       // 1. extract the proper subvector (or element) from source
923       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
924       if (extractedSource.getType().isa<VectorType>()) {
925         // 2. If we have a vector, extract the proper subvector from destination
926         // Otherwise we are at the element level and no need to recurse.
927         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
928         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
929         // smaller rank.
930         extractedSource = rewriter.create<InsertStridedSliceOp>(
931             loc, extractedSource, extractedDest,
932             getI64SubArray(op.offsets(), /* dropFront=*/1),
933             getI64SubArray(op.strides(), /* dropFront=*/1));
934       }
935       // 4. Insert the extractedSource into the res vector.
936       res = insertOne(rewriter, loc, extractedSource, res, off);
937     }
938 
939     rewriter.replaceOp(op, res);
940     return success();
941   }
942 };
943 
944 /// Returns the strides if the memory underlying `memRefType` has a contiguous
945 /// static layout.
946 static llvm::Optional<SmallVector<int64_t, 4>>
947 computeContiguousStrides(MemRefType memRefType) {
948   int64_t offset;
949   SmallVector<int64_t, 4> strides;
950   if (failed(getStridesAndOffset(memRefType, strides, offset)))
951     return None;
952   if (!strides.empty() && strides.back() != 1)
953     return None;
954   // If no layout or identity layout, this is contiguous by definition.
955   if (memRefType.getAffineMaps().empty() ||
956       memRefType.getAffineMaps().front().isIdentity())
957     return strides;
958 
959   // Otherwise, we must determine contiguity form shapes. This can only ever
960   // work in static cases because MemRefType is underspecified to represent
961   // contiguous dynamic shapes in other ways than with just empty/identity
962   // layout.
963   auto sizes = memRefType.getShape();
964   for (int index = 0, e = strides.size() - 1; index < e; ++index) {
965     if (ShapedType::isDynamic(sizes[index + 1]) ||
966         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
967         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
968       return None;
969     if (strides[index] != strides[index + 1] * sizes[index + 1])
970       return None;
971   }
972   return strides;
973 }
974 
975 class VectorTypeCastOpConversion
976     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
977 public:
978   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
979 
980   LogicalResult
981   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
982                   ConversionPatternRewriter &rewriter) const override {
983     auto loc = castOp->getLoc();
984     MemRefType sourceMemRefType =
985         castOp.getOperand().getType().cast<MemRefType>();
986     MemRefType targetMemRefType = castOp.getType();
987 
988     // Only static shape casts supported atm.
989     if (!sourceMemRefType.hasStaticShape() ||
990         !targetMemRefType.hasStaticShape())
991       return failure();
992 
993     auto llvmSourceDescriptorTy =
994         operands[0].getType().dyn_cast<LLVM::LLVMStructType>();
995     if (!llvmSourceDescriptorTy)
996       return failure();
997     MemRefDescriptor sourceMemRef(operands[0]);
998 
999     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1000                                       .dyn_cast_or_null<LLVM::LLVMStructType>();
1001     if (!llvmTargetDescriptorTy)
1002       return failure();
1003 
1004     // Only contiguous source buffers supported atm.
1005     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1006     if (!sourceStrides)
1007       return failure();
1008     auto targetStrides = computeContiguousStrides(targetMemRefType);
1009     if (!targetStrides)
1010       return failure();
1011     // Only support static strides for now, regardless of contiguity.
1012     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1013           return ShapedType::isDynamicStrideOrOffset(stride);
1014         }))
1015       return failure();
1016 
1017     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
1018 
1019     // Create descriptor.
1020     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1021     Type llvmTargetElementTy = desc.getElementPtrType();
1022     // Set allocated ptr.
1023     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1024     allocated =
1025         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1026     desc.setAllocatedPtr(rewriter, loc, allocated);
1027     // Set aligned ptr.
1028     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1029     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1030     desc.setAlignedPtr(rewriter, loc, ptr);
1031     // Fill offset 0.
1032     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1033     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1034     desc.setOffset(rewriter, loc, zero);
1035 
1036     // Fill size and stride descriptors in memref.
1037     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1038       int64_t index = indexedSize.index();
1039       auto sizeAttr =
1040           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1041       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1042       desc.setSize(rewriter, loc, index, size);
1043       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1044                                                 (*targetStrides)[index]);
1045       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1046       desc.setStride(rewriter, loc, index, stride);
1047     }
1048 
1049     rewriter.replaceOp(castOp, {desc});
1050     return success();
1051   }
1052 };
1053 
1054 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1055 public:
1056   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1057 
1058   // Proof-of-concept lowering implementation that relies on a small
1059   // runtime support library, which only needs to provide a few
1060   // printing methods (single value for all data types, opening/closing
1061   // bracket, comma, newline). The lowering fully unrolls a vector
1062   // in terms of these elementary printing operations. The advantage
1063   // of this approach is that the library can remain unaware of all
1064   // low-level implementation details of vectors while still supporting
1065   // output of any shaped and dimensioned vector. Due to full unrolling,
1066   // this approach is less suited for very large vectors though.
1067   //
1068   // TODO: rely solely on libc in future? something else?
1069   //
1070   LogicalResult
1071   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1072                   ConversionPatternRewriter &rewriter) const override {
1073     auto adaptor = vector::PrintOpAdaptor(operands);
1074     Type printType = printOp.getPrintType();
1075 
1076     if (typeConverter->convertType(printType) == nullptr)
1077       return failure();
1078 
1079     // Make sure element type has runtime support.
1080     PrintConversion conversion = PrintConversion::None;
1081     VectorType vectorType = printType.dyn_cast<VectorType>();
1082     Type eltType = vectorType ? vectorType.getElementType() : printType;
1083     Operation *printer;
1084     if (eltType.isF32()) {
1085       printer =
1086           LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>());
1087     } else if (eltType.isF64()) {
1088       printer =
1089           LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>());
1090     } else if (eltType.isIndex()) {
1091       printer =
1092           LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>());
1093     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1094       // Integers need a zero or sign extension on the operand
1095       // (depending on the source type) as well as a signed or
1096       // unsigned print method. Up to 64-bit is supported.
1097       unsigned width = intTy.getWidth();
1098       if (intTy.isUnsigned()) {
1099         if (width <= 64) {
1100           if (width < 64)
1101             conversion = PrintConversion::ZeroExt64;
1102           printer = LLVM::lookupOrCreatePrintU64Fn(
1103               printOp->getParentOfType<ModuleOp>());
1104         } else {
1105           return failure();
1106         }
1107       } else {
1108         assert(intTy.isSignless() || intTy.isSigned());
1109         if (width <= 64) {
1110           // Note that we *always* zero extend booleans (1-bit integers),
1111           // so that true/false is printed as 1/0 rather than -1/0.
1112           if (width == 1)
1113             conversion = PrintConversion::ZeroExt64;
1114           else if (width < 64)
1115             conversion = PrintConversion::SignExt64;
1116           printer = LLVM::lookupOrCreatePrintI64Fn(
1117               printOp->getParentOfType<ModuleOp>());
1118         } else {
1119           return failure();
1120         }
1121       }
1122     } else {
1123       return failure();
1124     }
1125 
1126     // Unroll vector into elementary print calls.
1127     int64_t rank = vectorType ? vectorType.getRank() : 0;
1128     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1129               conversion);
1130     emitCall(rewriter, printOp->getLoc(),
1131              LLVM::lookupOrCreatePrintNewlineFn(
1132                  printOp->getParentOfType<ModuleOp>()));
1133     rewriter.eraseOp(printOp);
1134     return success();
1135   }
1136 
1137 private:
1138   enum class PrintConversion {
1139     // clang-format off
1140     None,
1141     ZeroExt64,
1142     SignExt64
1143     // clang-format on
1144   };
1145 
1146   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1147                  Value value, VectorType vectorType, Operation *printer,
1148                  int64_t rank, PrintConversion conversion) const {
1149     Location loc = op->getLoc();
1150     if (rank == 0) {
1151       switch (conversion) {
1152       case PrintConversion::ZeroExt64:
1153         value = rewriter.create<ZeroExtendIOp>(
1154             loc, value, IntegerType::get(rewriter.getContext(), 64));
1155         break;
1156       case PrintConversion::SignExt64:
1157         value = rewriter.create<SignExtendIOp>(
1158             loc, value, IntegerType::get(rewriter.getContext(), 64));
1159         break;
1160       case PrintConversion::None:
1161         break;
1162       }
1163       emitCall(rewriter, loc, printer, value);
1164       return;
1165     }
1166 
1167     emitCall(rewriter, loc,
1168              LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>()));
1169     Operation *printComma =
1170         LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>());
1171     int64_t dim = vectorType.getDimSize(0);
1172     for (int64_t d = 0; d < dim; ++d) {
1173       auto reducedType =
1174           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1175       auto llvmType = typeConverter->convertType(
1176           rank > 1 ? reducedType : vectorType.getElementType());
1177       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1178                                    llvmType, rank, d);
1179       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1180                 conversion);
1181       if (d != dim - 1)
1182         emitCall(rewriter, loc, printComma);
1183     }
1184     emitCall(rewriter, loc,
1185              LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>()));
1186   }
1187 
1188   // Helper to emit a call.
1189   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1190                        Operation *ref, ValueRange params = ValueRange()) {
1191     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1192                                   rewriter.getSymbolRefAttr(ref), params);
1193   }
1194 };
1195 
1196 /// Progressive lowering of ExtractStridedSliceOp to either:
1197 ///   1. express single offset extract as a direct shuffle.
1198 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1199 class VectorExtractStridedSliceOpConversion
1200     : public OpRewritePattern<ExtractStridedSliceOp> {
1201 public:
1202   using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
1203 
1204   void initialize() {
1205     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1206     // is bounded as the rank is strictly decreasing.
1207     setHasBoundedRewriteRecursion();
1208   }
1209 
1210   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1211                                 PatternRewriter &rewriter) const override {
1212     auto dstType = op.getType();
1213 
1214     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1215 
1216     int64_t offset =
1217         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1218     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1219     int64_t stride =
1220         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1221 
1222     auto loc = op.getLoc();
1223     auto elemType = dstType.getElementType();
1224     assert(elemType.isSignlessIntOrIndexOrFloat());
1225 
1226     // Single offset can be more efficiently shuffled.
1227     if (op.offsets().getValue().size() == 1) {
1228       SmallVector<int64_t, 4> offsets;
1229       offsets.reserve(size);
1230       for (int64_t off = offset, e = offset + size * stride; off < e;
1231            off += stride)
1232         offsets.push_back(off);
1233       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1234                                              op.vector(),
1235                                              rewriter.getI64ArrayAttr(offsets));
1236       return success();
1237     }
1238 
1239     // Extract/insert on a lower ranked extract strided slice op.
1240     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1241                                              rewriter.getZeroAttr(elemType));
1242     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1243     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1244          off += stride, ++idx) {
1245       Value one = extractOne(rewriter, loc, op.vector(), off);
1246       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1247           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1248           getI64SubArray(op.sizes(), /* dropFront=*/1),
1249           getI64SubArray(op.strides(), /* dropFront=*/1));
1250       res = insertOne(rewriter, loc, extracted, res, idx);
1251     }
1252     rewriter.replaceOp(op, res);
1253     return success();
1254   }
1255 };
1256 
1257 } // namespace
1258 
1259 /// Populate the given list with patterns that convert from Vector to LLVM.
1260 void mlir::populateVectorToLLVMConversionPatterns(
1261     LLVMTypeConverter &converter, RewritePatternSet &patterns,
1262     bool reassociateFPReductions) {
1263   MLIRContext *ctx = converter.getDialect()->getContext();
1264   patterns.add<VectorFMAOpNDRewritePattern,
1265                VectorInsertStridedSliceOpDifferentRankRewritePattern,
1266                VectorInsertStridedSliceOpSameRankRewritePattern,
1267                VectorExtractStridedSliceOpConversion>(ctx);
1268   patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions);
1269   patterns
1270       .add<VectorBitCastOpConversion, VectorShuffleOpConversion,
1271            VectorExtractElementOpConversion, VectorExtractOpConversion,
1272            VectorFMAOp1DConversion, VectorInsertElementOpConversion,
1273            VectorInsertOpConversion, VectorPrintOpConversion,
1274            VectorTypeCastOpConversion,
1275            VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>,
1276            VectorLoadStoreConversion<vector::MaskedLoadOp,
1277                                      vector::MaskedLoadOpAdaptor>,
1278            VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>,
1279            VectorLoadStoreConversion<vector::MaskedStoreOp,
1280                                      vector::MaskedStoreOpAdaptor>,
1281            VectorGatherOpConversion, VectorScatterOpConversion,
1282            VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>(
1283           converter);
1284   // Transfer ops with rank > 1 are handled by VectorToSCF.
1285   populateVectorTransferLoweringPatterns(patterns, /*maxTransferRank=*/1);
1286 }
1287 
1288 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1289     LLVMTypeConverter &converter, RewritePatternSet &patterns) {
1290   patterns.add<VectorMatmulOpConversion>(converter);
1291   patterns.add<VectorFlatTransposeOpConversion>(converter);
1292 }
1293