xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision 5017b0f88b81083d3f723e7a8e5cc19b1c4eb366)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/MemRef/IR/MemRef.h"
16 #include "mlir/Dialect/StandardOps/IR/Ops.h"
17 #include "mlir/Dialect/Vector/VectorOps.h"
18 #include "mlir/IR/BuiltinTypes.h"
19 #include "mlir/Support/MathExtras.h"
20 #include "mlir/Target/LLVMIR/TypeTranslation.h"
21 #include "mlir/Transforms/DialectConversion.h"
22 
23 using namespace mlir;
24 using namespace mlir::vector;
25 
26 // Helper to reduce vector type by one rank at front.
27 static VectorType reducedVectorTypeFront(VectorType tp) {
28   assert((tp.getRank() > 1) && "unlowerable vector type");
29   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
30 }
31 
32 // Helper to reduce vector type by *all* but one rank at back.
33 static VectorType reducedVectorTypeBack(VectorType tp) {
34   assert((tp.getRank() > 1) && "unlowerable vector type");
35   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
36 }
37 
38 // Helper that picks the proper sequence for inserting.
39 static Value insertOne(ConversionPatternRewriter &rewriter,
40                        LLVMTypeConverter &typeConverter, Location loc,
41                        Value val1, Value val2, Type llvmType, int64_t rank,
42                        int64_t pos) {
43   if (rank == 1) {
44     auto idxType = rewriter.getIndexType();
45     auto constant = rewriter.create<LLVM::ConstantOp>(
46         loc, typeConverter.convertType(idxType),
47         rewriter.getIntegerAttr(idxType, pos));
48     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
49                                                   constant);
50   }
51   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
52                                               rewriter.getI64ArrayAttr(pos));
53 }
54 
55 // Helper that picks the proper sequence for inserting.
56 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
57                        Value into, int64_t offset) {
58   auto vectorType = into.getType().cast<VectorType>();
59   if (vectorType.getRank() > 1)
60     return rewriter.create<InsertOp>(loc, from, into, offset);
61   return rewriter.create<vector::InsertElementOp>(
62       loc, vectorType, from, into,
63       rewriter.create<ConstantIndexOp>(loc, offset));
64 }
65 
66 // Helper that picks the proper sequence for extracting.
67 static Value extractOne(ConversionPatternRewriter &rewriter,
68                         LLVMTypeConverter &typeConverter, Location loc,
69                         Value val, Type llvmType, int64_t rank, int64_t pos) {
70   if (rank == 1) {
71     auto idxType = rewriter.getIndexType();
72     auto constant = rewriter.create<LLVM::ConstantOp>(
73         loc, typeConverter.convertType(idxType),
74         rewriter.getIntegerAttr(idxType, pos));
75     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
76                                                    constant);
77   }
78   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
79                                                rewriter.getI64ArrayAttr(pos));
80 }
81 
82 // Helper that picks the proper sequence for extracting.
83 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
84                         int64_t offset) {
85   auto vectorType = vector.getType().cast<VectorType>();
86   if (vectorType.getRank() > 1)
87     return rewriter.create<ExtractOp>(loc, vector, offset);
88   return rewriter.create<vector::ExtractElementOp>(
89       loc, vectorType.getElementType(), vector,
90       rewriter.create<ConstantIndexOp>(loc, offset));
91 }
92 
93 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
94 // TODO: Better support for attribute subtype forwarding + slicing.
95 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
96                                               unsigned dropFront = 0,
97                                               unsigned dropBack = 0) {
98   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
99   auto range = arrayAttr.getAsRange<IntegerAttr>();
100   SmallVector<int64_t, 4> res;
101   res.reserve(arrayAttr.size() - dropFront - dropBack);
102   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
103        it != eit; ++it)
104     res.push_back((*it).getValue().getSExtValue());
105   return res;
106 }
107 
108 // Helper that returns data layout alignment of a memref.
109 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
110                                  MemRefType memrefType, unsigned &align) {
111   Type elementTy = typeConverter.convertType(memrefType.getElementType());
112   if (!elementTy)
113     return failure();
114 
115   // TODO: this should use the MLIR data layout when it becomes available and
116   // stop depending on translation.
117   llvm::LLVMContext llvmContext;
118   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
119               .getPreferredAlignment(elementTy, typeConverter.getDataLayout());
120   return success();
121 }
122 
123 // Return the minimal alignment value that satisfies all the AssumeAlignment
124 // uses of `value`. If no such uses exist, return 1.
125 static unsigned getAssumedAlignment(Value value) {
126   unsigned align = 1;
127   for (auto &u : value.getUses()) {
128     Operation *owner = u.getOwner();
129     if (auto op = dyn_cast<memref::AssumeAlignmentOp>(owner))
130       align = mlir::lcm(align, op.alignment());
131   }
132   return align;
133 }
134 // Helper that returns data layout alignment of a memref associated with a
135 // transfer op, including additional information from assume_alignment calls
136 // on the source of the transfer
137 LogicalResult getTransferOpAlignment(LLVMTypeConverter &typeConverter,
138                                      VectorTransferOpInterface xfer,
139                                      unsigned &align) {
140   if (failed(getMemRefAlignment(
141           typeConverter, xfer.getShapedType().cast<MemRefType>(), align)))
142     return failure();
143   align = std::max(align, getAssumedAlignment(xfer.source()));
144   return success();
145 }
146 
147 // Helper that returns data layout alignment of a memref associated with a
148 // load, store, scatter, or gather op, including additional information from
149 // assume_alignment calls on the source of the transfer
150 template <class OpAdaptor>
151 LogicalResult getMemRefOpAlignment(LLVMTypeConverter &typeConverter,
152                                    OpAdaptor op, unsigned &align) {
153   if (failed(getMemRefAlignment(typeConverter, op.getMemRefType(), align)))
154     return failure();
155   align = std::max(align, getAssumedAlignment(op.base()));
156   return success();
157 }
158 
159 // Add an index vector component to a base pointer. This almost always succeeds
160 // unless the last stride is non-unit or the memory space is not zero.
161 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
162                                     Location loc, Value memref, Value base,
163                                     Value index, MemRefType memRefType,
164                                     VectorType vType, Value &ptrs) {
165   int64_t offset;
166   SmallVector<int64_t, 4> strides;
167   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
168   if (failed(successStrides) || strides.back() != 1 ||
169       memRefType.getMemorySpaceAsInt() != 0)
170     return failure();
171   auto pType = MemRefDescriptor(memref).getElementPtrType();
172   auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0));
173   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index);
174   return success();
175 }
176 
177 // Casts a strided element pointer to a vector pointer.  The vector pointer
178 // will be in the same address space as the incoming memref type.
179 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
180                          Value ptr, MemRefType memRefType, Type vt) {
181   auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt());
182   return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
183 }
184 
185 static LogicalResult
186 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
187                                  LLVMTypeConverter &typeConverter, Location loc,
188                                  TransferReadOp xferOp,
189                                  ArrayRef<Value> operands, Value dataPtr) {
190   unsigned align;
191   if (failed(getTransferOpAlignment(typeConverter, xferOp, align)))
192     return failure();
193   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
194   return success();
195 }
196 
197 static LogicalResult
198 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
199                             LLVMTypeConverter &typeConverter, Location loc,
200                             TransferReadOp xferOp, ArrayRef<Value> operands,
201                             Value dataPtr, Value mask) {
202   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
203   if (!vecTy)
204     return failure();
205 
206   auto adaptor = TransferReadOpAdaptor(operands, xferOp->getAttrDictionary());
207   Value fill = rewriter.create<SplatOp>(loc, vecTy, adaptor.padding());
208 
209   unsigned align;
210   if (failed(getTransferOpAlignment(typeConverter, xferOp, align)))
211     return failure();
212   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
213       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
214       rewriter.getI32IntegerAttr(align));
215   return success();
216 }
217 
218 static LogicalResult
219 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
220                                  LLVMTypeConverter &typeConverter, Location loc,
221                                  TransferWriteOp xferOp,
222                                  ArrayRef<Value> operands, Value dataPtr) {
223   unsigned align;
224   if (failed(getTransferOpAlignment(typeConverter, xferOp, align)))
225     return failure();
226   auto adaptor = TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary());
227   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
228                                              align);
229   return success();
230 }
231 
232 static LogicalResult
233 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
234                             LLVMTypeConverter &typeConverter, Location loc,
235                             TransferWriteOp xferOp, ArrayRef<Value> operands,
236                             Value dataPtr, Value mask) {
237   unsigned align;
238   if (failed(getTransferOpAlignment(typeConverter, xferOp, align)))
239     return failure();
240 
241   auto adaptor = TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary());
242   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
243       xferOp, adaptor.vector(), dataPtr, mask,
244       rewriter.getI32IntegerAttr(align));
245   return success();
246 }
247 
248 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
249                                                   ArrayRef<Value> operands) {
250   return TransferReadOpAdaptor(operands, xferOp->getAttrDictionary());
251 }
252 
253 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
254                                                    ArrayRef<Value> operands) {
255   return TransferWriteOpAdaptor(operands, xferOp->getAttrDictionary());
256 }
257 
258 namespace {
259 
260 /// Conversion pattern for a vector.bitcast.
261 class VectorBitCastOpConversion
262     : public ConvertOpToLLVMPattern<vector::BitCastOp> {
263 public:
264   using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern;
265 
266   LogicalResult
267   matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands,
268                   ConversionPatternRewriter &rewriter) const override {
269     // Only 1-D vectors can be lowered to LLVM.
270     VectorType resultTy = bitCastOp.getType();
271     if (resultTy.getRank() != 1)
272       return failure();
273     Type newResultTy = typeConverter->convertType(resultTy);
274     rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy,
275                                                  operands[0]);
276     return success();
277   }
278 };
279 
280 /// Conversion pattern for a vector.matrix_multiply.
281 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
282 class VectorMatmulOpConversion
283     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
284 public:
285   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
286 
287   LogicalResult
288   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
289                   ConversionPatternRewriter &rewriter) const override {
290     auto adaptor = vector::MatmulOpAdaptor(operands);
291     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
292         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
293         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
294         matmulOp.lhs_columns(), matmulOp.rhs_columns());
295     return success();
296   }
297 };
298 
299 /// Conversion pattern for a vector.flat_transpose.
300 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
301 class VectorFlatTransposeOpConversion
302     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
303 public:
304   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
305 
306   LogicalResult
307   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
308                   ConversionPatternRewriter &rewriter) const override {
309     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
310     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
311         transOp, typeConverter->convertType(transOp.res().getType()),
312         adaptor.matrix(), transOp.rows(), transOp.columns());
313     return success();
314   }
315 };
316 
317 /// Overloaded utility that replaces a vector.load, vector.store,
318 /// vector.maskedload and vector.maskedstore with their respective LLVM
319 /// couterparts.
320 static void replaceLoadOrStoreOp(vector::LoadOp loadOp,
321                                  vector::LoadOpAdaptor adaptor,
322                                  VectorType vectorTy, Value ptr, unsigned align,
323                                  ConversionPatternRewriter &rewriter) {
324   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align);
325 }
326 
327 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp,
328                                  vector::MaskedLoadOpAdaptor adaptor,
329                                  VectorType vectorTy, Value ptr, unsigned align,
330                                  ConversionPatternRewriter &rewriter) {
331   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
332       loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align);
333 }
334 
335 static void replaceLoadOrStoreOp(vector::StoreOp storeOp,
336                                  vector::StoreOpAdaptor adaptor,
337                                  VectorType vectorTy, Value ptr, unsigned align,
338                                  ConversionPatternRewriter &rewriter) {
339   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(),
340                                              ptr, align);
341 }
342 
343 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp,
344                                  vector::MaskedStoreOpAdaptor adaptor,
345                                  VectorType vectorTy, Value ptr, unsigned align,
346                                  ConversionPatternRewriter &rewriter) {
347   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
348       storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align);
349 }
350 
351 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and
352 /// vector.maskedstore.
353 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor>
354 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
355 public:
356   using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern;
357 
358   LogicalResult
359   matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands,
360                   ConversionPatternRewriter &rewriter) const override {
361     // Only 1-D vectors can be lowered to LLVM.
362     VectorType vectorTy = loadOrStoreOp.getVectorType();
363     if (vectorTy.getRank() > 1)
364       return failure();
365 
366     auto loc = loadOrStoreOp->getLoc();
367     auto adaptor = LoadOrStoreOpAdaptor(operands);
368     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
369 
370     // Resolve alignment.
371     unsigned align;
372     if (failed(getMemRefOpAlignment(*this->getTypeConverter(), loadOrStoreOp,
373                                     align)))
374       return failure();
375 
376     // Resolve address.
377     auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType())
378                      .template cast<VectorType>();
379     Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(),
380                                                adaptor.indices(), rewriter);
381     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype);
382 
383     replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter);
384     return success();
385   }
386 };
387 
388 /// Conversion pattern for a vector.gather.
389 class VectorGatherOpConversion
390     : public ConvertOpToLLVMPattern<vector::GatherOp> {
391 public:
392   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
393 
394   LogicalResult
395   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
396                   ConversionPatternRewriter &rewriter) const override {
397     auto loc = gather->getLoc();
398     auto adaptor = vector::GatherOpAdaptor(operands);
399     MemRefType memRefType = gather.getMemRefType();
400 
401     // Resolve alignment.
402     unsigned align;
403     if (failed(getMemRefOpAlignment(*getTypeConverter(), gather, align)))
404       return failure();
405 
406     // Resolve address.
407     Value ptrs;
408     VectorType vType = gather.getVectorType();
409     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
410                                      adaptor.indices(), rewriter);
411     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
412                               adaptor.index_vec(), memRefType, vType, ptrs)))
413       return failure();
414 
415     // Replace with the gather intrinsic.
416     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
417         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
418         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
419     return success();
420   }
421 };
422 
423 /// Conversion pattern for a vector.scatter.
424 class VectorScatterOpConversion
425     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
426 public:
427   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
428 
429   LogicalResult
430   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
431                   ConversionPatternRewriter &rewriter) const override {
432     auto loc = scatter->getLoc();
433     auto adaptor = vector::ScatterOpAdaptor(operands);
434     MemRefType memRefType = scatter.getMemRefType();
435 
436     // Resolve alignment.
437     unsigned align;
438     if (failed(getMemRefOpAlignment(*getTypeConverter(), scatter, align)))
439       return failure();
440 
441     // Resolve address.
442     Value ptrs;
443     VectorType vType = scatter.getVectorType();
444     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
445                                      adaptor.indices(), rewriter);
446     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
447                               adaptor.index_vec(), memRefType, vType, ptrs)))
448       return failure();
449 
450     // Replace with the scatter intrinsic.
451     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
452         scatter, adaptor.valueToStore(), ptrs, adaptor.mask(),
453         rewriter.getI32IntegerAttr(align));
454     return success();
455   }
456 };
457 
458 /// Conversion pattern for a vector.expandload.
459 class VectorExpandLoadOpConversion
460     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
461 public:
462   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
463 
464   LogicalResult
465   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
466                   ConversionPatternRewriter &rewriter) const override {
467     auto loc = expand->getLoc();
468     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
469     MemRefType memRefType = expand.getMemRefType();
470 
471     // Resolve address.
472     auto vtype = typeConverter->convertType(expand.getVectorType());
473     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
474                                      adaptor.indices(), rewriter);
475 
476     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
477         expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru());
478     return success();
479   }
480 };
481 
482 /// Conversion pattern for a vector.compressstore.
483 class VectorCompressStoreOpConversion
484     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
485 public:
486   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
487 
488   LogicalResult
489   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
490                   ConversionPatternRewriter &rewriter) const override {
491     auto loc = compress->getLoc();
492     auto adaptor = vector::CompressStoreOpAdaptor(operands);
493     MemRefType memRefType = compress.getMemRefType();
494 
495     // Resolve address.
496     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
497                                      adaptor.indices(), rewriter);
498 
499     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
500         compress, adaptor.valueToStore(), ptr, adaptor.mask());
501     return success();
502   }
503 };
504 
505 /// Conversion pattern for all vector reductions.
506 class VectorReductionOpConversion
507     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
508 public:
509   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
510                                        bool reassociateFPRed)
511       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
512         reassociateFPReductions(reassociateFPRed) {}
513 
514   LogicalResult
515   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
516                   ConversionPatternRewriter &rewriter) const override {
517     auto kind = reductionOp.kind();
518     Type eltType = reductionOp.dest().getType();
519     Type llvmType = typeConverter->convertType(eltType);
520     if (eltType.isIntOrIndex()) {
521       // Integer reductions: add/mul/min/max/and/or/xor.
522       if (kind == "add")
523         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
524             reductionOp, llvmType, operands[0]);
525       else if (kind == "mul")
526         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
527             reductionOp, llvmType, operands[0]);
528       else if (kind == "min" &&
529                (eltType.isIndex() || eltType.isUnsignedInteger()))
530         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
531             reductionOp, llvmType, operands[0]);
532       else if (kind == "min")
533         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
534             reductionOp, llvmType, operands[0]);
535       else if (kind == "max" &&
536                (eltType.isIndex() || eltType.isUnsignedInteger()))
537         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
538             reductionOp, llvmType, operands[0]);
539       else if (kind == "max")
540         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
541             reductionOp, llvmType, operands[0]);
542       else if (kind == "and")
543         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
544             reductionOp, llvmType, operands[0]);
545       else if (kind == "or")
546         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
547             reductionOp, llvmType, operands[0]);
548       else if (kind == "xor")
549         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
550             reductionOp, llvmType, operands[0]);
551       else
552         return failure();
553       return success();
554     }
555 
556     if (!eltType.isa<FloatType>())
557       return failure();
558 
559     // Floating-point reductions: add/mul/min/max
560     if (kind == "add") {
561       // Optional accumulator (or zero).
562       Value acc = operands.size() > 1 ? operands[1]
563                                       : rewriter.create<LLVM::ConstantOp>(
564                                             reductionOp->getLoc(), llvmType,
565                                             rewriter.getZeroAttr(eltType));
566       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
567           reductionOp, llvmType, acc, operands[0],
568           rewriter.getBoolAttr(reassociateFPReductions));
569     } else if (kind == "mul") {
570       // Optional accumulator (or one).
571       Value acc = operands.size() > 1
572                       ? operands[1]
573                       : rewriter.create<LLVM::ConstantOp>(
574                             reductionOp->getLoc(), llvmType,
575                             rewriter.getFloatAttr(eltType, 1.0));
576       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
577           reductionOp, llvmType, acc, operands[0],
578           rewriter.getBoolAttr(reassociateFPReductions));
579     } else if (kind == "min")
580       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
581           reductionOp, llvmType, operands[0]);
582     else if (kind == "max")
583       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
584           reductionOp, llvmType, operands[0]);
585     else
586       return failure();
587     return success();
588   }
589 
590 private:
591   const bool reassociateFPReductions;
592 };
593 
594 class VectorShuffleOpConversion
595     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
596 public:
597   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
598 
599   LogicalResult
600   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
601                   ConversionPatternRewriter &rewriter) const override {
602     auto loc = shuffleOp->getLoc();
603     auto adaptor = vector::ShuffleOpAdaptor(operands);
604     auto v1Type = shuffleOp.getV1VectorType();
605     auto v2Type = shuffleOp.getV2VectorType();
606     auto vectorType = shuffleOp.getVectorType();
607     Type llvmType = typeConverter->convertType(vectorType);
608     auto maskArrayAttr = shuffleOp.mask();
609 
610     // Bail if result type cannot be lowered.
611     if (!llvmType)
612       return failure();
613 
614     // Get rank and dimension sizes.
615     int64_t rank = vectorType.getRank();
616     assert(v1Type.getRank() == rank);
617     assert(v2Type.getRank() == rank);
618     int64_t v1Dim = v1Type.getDimSize(0);
619 
620     // For rank 1, where both operands have *exactly* the same vector type,
621     // there is direct shuffle support in LLVM. Use it!
622     if (rank == 1 && v1Type == v2Type) {
623       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
624           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
625       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
626       return success();
627     }
628 
629     // For all other cases, insert the individual values individually.
630     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
631     int64_t insPos = 0;
632     for (auto en : llvm::enumerate(maskArrayAttr)) {
633       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
634       Value value = adaptor.v1();
635       if (extPos >= v1Dim) {
636         extPos -= v1Dim;
637         value = adaptor.v2();
638       }
639       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
640                                  llvmType, rank, extPos);
641       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
642                          llvmType, rank, insPos++);
643     }
644     rewriter.replaceOp(shuffleOp, insert);
645     return success();
646   }
647 };
648 
649 class VectorExtractElementOpConversion
650     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
651 public:
652   using ConvertOpToLLVMPattern<
653       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
654 
655   LogicalResult
656   matchAndRewrite(vector::ExtractElementOp extractEltOp,
657                   ArrayRef<Value> operands,
658                   ConversionPatternRewriter &rewriter) const override {
659     auto adaptor = vector::ExtractElementOpAdaptor(operands);
660     auto vectorType = extractEltOp.getVectorType();
661     auto llvmType = typeConverter->convertType(vectorType.getElementType());
662 
663     // Bail if result type cannot be lowered.
664     if (!llvmType)
665       return failure();
666 
667     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
668         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
669     return success();
670   }
671 };
672 
673 class VectorExtractOpConversion
674     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
675 public:
676   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
677 
678   LogicalResult
679   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
680                   ConversionPatternRewriter &rewriter) const override {
681     auto loc = extractOp->getLoc();
682     auto adaptor = vector::ExtractOpAdaptor(operands);
683     auto vectorType = extractOp.getVectorType();
684     auto resultType = extractOp.getResult().getType();
685     auto llvmResultType = typeConverter->convertType(resultType);
686     auto positionArrayAttr = extractOp.position();
687 
688     // Bail if result type cannot be lowered.
689     if (!llvmResultType)
690       return failure();
691 
692     // Extract entire vector. Should be handled by folder, but just to be safe.
693     if (positionArrayAttr.empty()) {
694       rewriter.replaceOp(extractOp, adaptor.vector());
695       return success();
696     }
697 
698     // One-shot extraction of vector from array (only requires extractvalue).
699     if (resultType.isa<VectorType>()) {
700       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
701           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
702       rewriter.replaceOp(extractOp, extracted);
703       return success();
704     }
705 
706     // Potential extraction of 1-D vector from array.
707     auto *context = extractOp->getContext();
708     Value extracted = adaptor.vector();
709     auto positionAttrs = positionArrayAttr.getValue();
710     if (positionAttrs.size() > 1) {
711       auto oneDVectorType = reducedVectorTypeBack(vectorType);
712       auto nMinusOnePositionAttrs =
713           ArrayAttr::get(context, positionAttrs.drop_back());
714       extracted = rewriter.create<LLVM::ExtractValueOp>(
715           loc, typeConverter->convertType(oneDVectorType), extracted,
716           nMinusOnePositionAttrs);
717     }
718 
719     // Remaining extraction of element from 1-D LLVM vector
720     auto position = positionAttrs.back().cast<IntegerAttr>();
721     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
722     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
723     extracted =
724         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
725     rewriter.replaceOp(extractOp, extracted);
726 
727     return success();
728   }
729 };
730 
731 /// Conversion pattern that turns a vector.fma on a 1-D vector
732 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
733 /// This does not match vectors of n >= 2 rank.
734 ///
735 /// Example:
736 /// ```
737 ///  vector.fma %a, %a, %a : vector<8xf32>
738 /// ```
739 /// is converted to:
740 /// ```
741 ///  llvm.intr.fmuladd %va, %va, %va:
742 ///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
743 ///    -> !llvm."<8 x f32>">
744 /// ```
745 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
746 public:
747   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
748 
749   LogicalResult
750   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
751                   ConversionPatternRewriter &rewriter) const override {
752     auto adaptor = vector::FMAOpAdaptor(operands);
753     VectorType vType = fmaOp.getVectorType();
754     if (vType.getRank() != 1)
755       return failure();
756     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
757                                                  adaptor.rhs(), adaptor.acc());
758     return success();
759   }
760 };
761 
762 class VectorInsertElementOpConversion
763     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
764 public:
765   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
766 
767   LogicalResult
768   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
769                   ConversionPatternRewriter &rewriter) const override {
770     auto adaptor = vector::InsertElementOpAdaptor(operands);
771     auto vectorType = insertEltOp.getDestVectorType();
772     auto llvmType = typeConverter->convertType(vectorType);
773 
774     // Bail if result type cannot be lowered.
775     if (!llvmType)
776       return failure();
777 
778     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
779         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
780         adaptor.position());
781     return success();
782   }
783 };
784 
785 class VectorInsertOpConversion
786     : public ConvertOpToLLVMPattern<vector::InsertOp> {
787 public:
788   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
789 
790   LogicalResult
791   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
792                   ConversionPatternRewriter &rewriter) const override {
793     auto loc = insertOp->getLoc();
794     auto adaptor = vector::InsertOpAdaptor(operands);
795     auto sourceType = insertOp.getSourceType();
796     auto destVectorType = insertOp.getDestVectorType();
797     auto llvmResultType = typeConverter->convertType(destVectorType);
798     auto positionArrayAttr = insertOp.position();
799 
800     // Bail if result type cannot be lowered.
801     if (!llvmResultType)
802       return failure();
803 
804     // Overwrite entire vector with value. Should be handled by folder, but
805     // just to be safe.
806     if (positionArrayAttr.empty()) {
807       rewriter.replaceOp(insertOp, adaptor.source());
808       return success();
809     }
810 
811     // One-shot insertion of a vector into an array (only requires insertvalue).
812     if (sourceType.isa<VectorType>()) {
813       Value inserted = rewriter.create<LLVM::InsertValueOp>(
814           loc, llvmResultType, adaptor.dest(), adaptor.source(),
815           positionArrayAttr);
816       rewriter.replaceOp(insertOp, inserted);
817       return success();
818     }
819 
820     // Potential extraction of 1-D vector from array.
821     auto *context = insertOp->getContext();
822     Value extracted = adaptor.dest();
823     auto positionAttrs = positionArrayAttr.getValue();
824     auto position = positionAttrs.back().cast<IntegerAttr>();
825     auto oneDVectorType = destVectorType;
826     if (positionAttrs.size() > 1) {
827       oneDVectorType = reducedVectorTypeBack(destVectorType);
828       auto nMinusOnePositionAttrs =
829           ArrayAttr::get(context, positionAttrs.drop_back());
830       extracted = rewriter.create<LLVM::ExtractValueOp>(
831           loc, typeConverter->convertType(oneDVectorType), extracted,
832           nMinusOnePositionAttrs);
833     }
834 
835     // Insertion of an element into a 1-D LLVM vector.
836     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
837     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
838     Value inserted = rewriter.create<LLVM::InsertElementOp>(
839         loc, typeConverter->convertType(oneDVectorType), extracted,
840         adaptor.source(), constant);
841 
842     // Potential insertion of resulting 1-D vector into array.
843     if (positionAttrs.size() > 1) {
844       auto nMinusOnePositionAttrs =
845           ArrayAttr::get(context, positionAttrs.drop_back());
846       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
847                                                       adaptor.dest(), inserted,
848                                                       nMinusOnePositionAttrs);
849     }
850 
851     rewriter.replaceOp(insertOp, inserted);
852     return success();
853   }
854 };
855 
856 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
857 ///
858 /// Example:
859 /// ```
860 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
861 /// ```
862 /// is rewritten into:
863 /// ```
864 ///  %r = splat %f0: vector<2x4xf32>
865 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
866 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
867 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
868 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
869 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
870 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
871 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
872 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
873 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
874 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
875 ///  // %r3 holds the final value.
876 /// ```
877 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
878 public:
879   using OpRewritePattern<FMAOp>::OpRewritePattern;
880 
881   LogicalResult matchAndRewrite(FMAOp op,
882                                 PatternRewriter &rewriter) const override {
883     auto vType = op.getVectorType();
884     if (vType.getRank() < 2)
885       return failure();
886 
887     auto loc = op.getLoc();
888     auto elemType = vType.getElementType();
889     Value zero = rewriter.create<ConstantOp>(loc, elemType,
890                                              rewriter.getZeroAttr(elemType));
891     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
892     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
893       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
894       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
895       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
896       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
897       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
898     }
899     rewriter.replaceOp(op, desc);
900     return success();
901   }
902 };
903 
904 // When ranks are different, InsertStridedSlice needs to extract a properly
905 // ranked vector from the destination vector into which to insert. This pattern
906 // only takes care of this part and forwards the rest of the conversion to
907 // another pattern that converts InsertStridedSlice for operands of the same
908 // rank.
909 //
910 // RewritePattern for InsertStridedSliceOp where source and destination vectors
911 // have different ranks. In this case:
912 //   1. the proper subvector is extracted from the destination vector
913 //   2. a new InsertStridedSlice op is created to insert the source in the
914 //   destination subvector
915 //   3. the destination subvector is inserted back in the proper place
916 //   4. the op is replaced by the result of step 3.
917 // The new InsertStridedSlice from step 2. will be picked up by a
918 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
919 class VectorInsertStridedSliceOpDifferentRankRewritePattern
920     : public OpRewritePattern<InsertStridedSliceOp> {
921 public:
922   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
923 
924   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
925                                 PatternRewriter &rewriter) const override {
926     auto srcType = op.getSourceVectorType();
927     auto dstType = op.getDestVectorType();
928 
929     if (op.offsets().getValue().empty())
930       return failure();
931 
932     auto loc = op.getLoc();
933     int64_t rankDiff = dstType.getRank() - srcType.getRank();
934     assert(rankDiff >= 0);
935     if (rankDiff == 0)
936       return failure();
937 
938     int64_t rankRest = dstType.getRank() - rankDiff;
939     // Extract / insert the subvector of matching rank and InsertStridedSlice
940     // on it.
941     Value extracted =
942         rewriter.create<ExtractOp>(loc, op.dest(),
943                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
944                                                   /*dropBack=*/rankRest));
945     // A different pattern will kick in for InsertStridedSlice with matching
946     // ranks.
947     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
948         loc, op.source(), extracted,
949         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
950         getI64SubArray(op.strides(), /*dropFront=*/0));
951     rewriter.replaceOpWithNewOp<InsertOp>(
952         op, stridedSliceInnerOp.getResult(), op.dest(),
953         getI64SubArray(op.offsets(), /*dropFront=*/0,
954                        /*dropBack=*/rankRest));
955     return success();
956   }
957 };
958 
959 // RewritePattern for InsertStridedSliceOp where source and destination vectors
960 // have the same rank. In this case, we reduce
961 //   1. the proper subvector is extracted from the destination vector
962 //   2. a new InsertStridedSlice op is created to insert the source in the
963 //   destination subvector
964 //   3. the destination subvector is inserted back in the proper place
965 //   4. the op is replaced by the result of step 3.
966 // The new InsertStridedSlice from step 2. will be picked up by a
967 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
968 class VectorInsertStridedSliceOpSameRankRewritePattern
969     : public OpRewritePattern<InsertStridedSliceOp> {
970 public:
971   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
972 
973   void initialize() {
974     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
975     // bounded as the rank is strictly decreasing.
976     setHasBoundedRewriteRecursion();
977   }
978 
979   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
980                                 PatternRewriter &rewriter) const override {
981     auto srcType = op.getSourceVectorType();
982     auto dstType = op.getDestVectorType();
983 
984     if (op.offsets().getValue().empty())
985       return failure();
986 
987     int64_t rankDiff = dstType.getRank() - srcType.getRank();
988     assert(rankDiff >= 0);
989     if (rankDiff != 0)
990       return failure();
991 
992     if (srcType == dstType) {
993       rewriter.replaceOp(op, op.source());
994       return success();
995     }
996 
997     int64_t offset =
998         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
999     int64_t size = srcType.getShape().front();
1000     int64_t stride =
1001         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1002 
1003     auto loc = op.getLoc();
1004     Value res = op.dest();
1005     // For each slice of the source vector along the most major dimension.
1006     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1007          off += stride, ++idx) {
1008       // 1. extract the proper subvector (or element) from source
1009       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1010       if (extractedSource.getType().isa<VectorType>()) {
1011         // 2. If we have a vector, extract the proper subvector from destination
1012         // Otherwise we are at the element level and no need to recurse.
1013         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1014         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1015         // smaller rank.
1016         extractedSource = rewriter.create<InsertStridedSliceOp>(
1017             loc, extractedSource, extractedDest,
1018             getI64SubArray(op.offsets(), /* dropFront=*/1),
1019             getI64SubArray(op.strides(), /* dropFront=*/1));
1020       }
1021       // 4. Insert the extractedSource into the res vector.
1022       res = insertOne(rewriter, loc, extractedSource, res, off);
1023     }
1024 
1025     rewriter.replaceOp(op, res);
1026     return success();
1027   }
1028 };
1029 
1030 /// Return true if the last dimension of the MemRefType has unit stride. Also
1031 /// return true for memrefs with no strides.
1032 static bool isLastMemrefDimUnitStride(MemRefType type) {
1033   int64_t offset;
1034   SmallVector<int64_t> strides;
1035   auto successStrides = getStridesAndOffset(type, strides, offset);
1036   return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
1037 }
1038 
1039 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1040 /// static layout.
1041 static llvm::Optional<SmallVector<int64_t, 4>>
1042 computeContiguousStrides(MemRefType memRefType) {
1043   int64_t offset;
1044   SmallVector<int64_t, 4> strides;
1045   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1046     return None;
1047   if (!strides.empty() && strides.back() != 1)
1048     return None;
1049   // If no layout or identity layout, this is contiguous by definition.
1050   if (memRefType.getAffineMaps().empty() ||
1051       memRefType.getAffineMaps().front().isIdentity())
1052     return strides;
1053 
1054   // Otherwise, we must determine contiguity form shapes. This can only ever
1055   // work in static cases because MemRefType is underspecified to represent
1056   // contiguous dynamic shapes in other ways than with just empty/identity
1057   // layout.
1058   auto sizes = memRefType.getShape();
1059   for (int index = 0, e = strides.size() - 1; index < e; ++index) {
1060     if (ShapedType::isDynamic(sizes[index + 1]) ||
1061         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1062         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1063       return None;
1064     if (strides[index] != strides[index + 1] * sizes[index + 1])
1065       return None;
1066   }
1067   return strides;
1068 }
1069 
1070 class VectorTypeCastOpConversion
1071     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
1072 public:
1073   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
1074 
1075   LogicalResult
1076   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
1077                   ConversionPatternRewriter &rewriter) const override {
1078     auto loc = castOp->getLoc();
1079     MemRefType sourceMemRefType =
1080         castOp.getOperand().getType().cast<MemRefType>();
1081     MemRefType targetMemRefType = castOp.getType();
1082 
1083     // Only static shape casts supported atm.
1084     if (!sourceMemRefType.hasStaticShape() ||
1085         !targetMemRefType.hasStaticShape())
1086       return failure();
1087 
1088     auto llvmSourceDescriptorTy =
1089         operands[0].getType().dyn_cast<LLVM::LLVMStructType>();
1090     if (!llvmSourceDescriptorTy)
1091       return failure();
1092     MemRefDescriptor sourceMemRef(operands[0]);
1093 
1094     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1095                                       .dyn_cast_or_null<LLVM::LLVMStructType>();
1096     if (!llvmTargetDescriptorTy)
1097       return failure();
1098 
1099     // Only contiguous source buffers supported atm.
1100     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1101     if (!sourceStrides)
1102       return failure();
1103     auto targetStrides = computeContiguousStrides(targetMemRefType);
1104     if (!targetStrides)
1105       return failure();
1106     // Only support static strides for now, regardless of contiguity.
1107     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1108           return ShapedType::isDynamicStrideOrOffset(stride);
1109         }))
1110       return failure();
1111 
1112     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
1113 
1114     // Create descriptor.
1115     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1116     Type llvmTargetElementTy = desc.getElementPtrType();
1117     // Set allocated ptr.
1118     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1119     allocated =
1120         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1121     desc.setAllocatedPtr(rewriter, loc, allocated);
1122     // Set aligned ptr.
1123     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1124     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1125     desc.setAlignedPtr(rewriter, loc, ptr);
1126     // Fill offset 0.
1127     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1128     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1129     desc.setOffset(rewriter, loc, zero);
1130 
1131     // Fill size and stride descriptors in memref.
1132     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1133       int64_t index = indexedSize.index();
1134       auto sizeAttr =
1135           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1136       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1137       desc.setSize(rewriter, loc, index, size);
1138       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1139                                                 (*targetStrides)[index]);
1140       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1141       desc.setStride(rewriter, loc, index, stride);
1142     }
1143 
1144     rewriter.replaceOp(castOp, {desc});
1145     return success();
1146   }
1147 };
1148 
1149 /// Conversion pattern that converts a 1-D vector transfer read/write op into a
1150 /// a masked or unmasked read/write.
1151 template <typename ConcreteOp>
1152 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> {
1153 public:
1154   using ConvertOpToLLVMPattern<ConcreteOp>::ConvertOpToLLVMPattern;
1155 
1156   LogicalResult
1157   matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands,
1158                   ConversionPatternRewriter &rewriter) const override {
1159     auto adaptor = getTransferOpAdapter(xferOp, operands);
1160 
1161     if (xferOp.getVectorType().getRank() > 1 || xferOp.indices().empty())
1162       return failure();
1163     if (xferOp.permutation_map() !=
1164         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1165                                        xferOp.getVectorType().getRank(),
1166                                        xferOp->getContext()))
1167       return failure();
1168     auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>();
1169     if (!memRefType)
1170       return failure();
1171     // Last dimension must be contiguous. (Otherwise: Use VectorToSCF.)
1172     if (!isLastMemrefDimUnitStride(memRefType))
1173       return failure();
1174     // Out-of-bounds dims are handled by MaterializeTransferMask.
1175     if (xferOp.hasOutOfBoundsDim())
1176       return failure();
1177 
1178     auto toLLVMTy = [&](Type t) {
1179       return this->getTypeConverter()->convertType(t);
1180     };
1181 
1182     Location loc = xferOp->getLoc();
1183 
1184     if (auto memrefVectorElementType =
1185             memRefType.getElementType().template dyn_cast<VectorType>()) {
1186       // Memref has vector element type.
1187       if (memrefVectorElementType.getElementType() !=
1188           xferOp.getVectorType().getElementType())
1189         return failure();
1190 #ifndef NDEBUG
1191       // Check that memref vector type is a suffix of 'vectorType.
1192       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1193       unsigned resultVecRank = xferOp.getVectorType().getRank();
1194       assert(memrefVecEltRank <= resultVecRank);
1195       // TODO: Move this to isSuffix in Vector/Utils.h.
1196       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1197       auto memrefVecEltShape = memrefVectorElementType.getShape();
1198       auto resultVecShape = xferOp.getVectorType().getShape();
1199       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1200         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1201                "memref vector element shape should match suffix of vector "
1202                "result shape.");
1203 #endif // ifndef NDEBUG
1204     }
1205 
1206     // Get the source/dst address as an LLVM vector pointer.
1207     VectorType vtp = xferOp.getVectorType();
1208     Value dataPtr = this->getStridedElementPtr(
1209         loc, memRefType, adaptor.source(), adaptor.indices(), rewriter);
1210     Value vectorDataPtr =
1211         castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp));
1212 
1213     // Rewrite as an unmasked masked read / write.
1214     if (!xferOp.mask())
1215       return replaceTransferOpWithLoadOrStore(rewriter,
1216                                               *this->getTypeConverter(), loc,
1217                                               xferOp, operands, vectorDataPtr);
1218 
1219     // Rewrite as a masked read / write.
1220     return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc,
1221                                        xferOp, operands, vectorDataPtr,
1222                                        xferOp.mask());
1223   }
1224 };
1225 
1226 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1227 public:
1228   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1229 
1230   // Proof-of-concept lowering implementation that relies on a small
1231   // runtime support library, which only needs to provide a few
1232   // printing methods (single value for all data types, opening/closing
1233   // bracket, comma, newline). The lowering fully unrolls a vector
1234   // in terms of these elementary printing operations. The advantage
1235   // of this approach is that the library can remain unaware of all
1236   // low-level implementation details of vectors while still supporting
1237   // output of any shaped and dimensioned vector. Due to full unrolling,
1238   // this approach is less suited for very large vectors though.
1239   //
1240   // TODO: rely solely on libc in future? something else?
1241   //
1242   LogicalResult
1243   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1244                   ConversionPatternRewriter &rewriter) const override {
1245     auto adaptor = vector::PrintOpAdaptor(operands);
1246     Type printType = printOp.getPrintType();
1247 
1248     if (typeConverter->convertType(printType) == nullptr)
1249       return failure();
1250 
1251     // Make sure element type has runtime support.
1252     PrintConversion conversion = PrintConversion::None;
1253     VectorType vectorType = printType.dyn_cast<VectorType>();
1254     Type eltType = vectorType ? vectorType.getElementType() : printType;
1255     Operation *printer;
1256     if (eltType.isF32()) {
1257       printer =
1258           LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>());
1259     } else if (eltType.isF64()) {
1260       printer =
1261           LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>());
1262     } else if (eltType.isIndex()) {
1263       printer =
1264           LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>());
1265     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1266       // Integers need a zero or sign extension on the operand
1267       // (depending on the source type) as well as a signed or
1268       // unsigned print method. Up to 64-bit is supported.
1269       unsigned width = intTy.getWidth();
1270       if (intTy.isUnsigned()) {
1271         if (width <= 64) {
1272           if (width < 64)
1273             conversion = PrintConversion::ZeroExt64;
1274           printer = LLVM::lookupOrCreatePrintU64Fn(
1275               printOp->getParentOfType<ModuleOp>());
1276         } else {
1277           return failure();
1278         }
1279       } else {
1280         assert(intTy.isSignless() || intTy.isSigned());
1281         if (width <= 64) {
1282           // Note that we *always* zero extend booleans (1-bit integers),
1283           // so that true/false is printed as 1/0 rather than -1/0.
1284           if (width == 1)
1285             conversion = PrintConversion::ZeroExt64;
1286           else if (width < 64)
1287             conversion = PrintConversion::SignExt64;
1288           printer = LLVM::lookupOrCreatePrintI64Fn(
1289               printOp->getParentOfType<ModuleOp>());
1290         } else {
1291           return failure();
1292         }
1293       }
1294     } else {
1295       return failure();
1296     }
1297 
1298     // Unroll vector into elementary print calls.
1299     int64_t rank = vectorType ? vectorType.getRank() : 0;
1300     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1301               conversion);
1302     emitCall(rewriter, printOp->getLoc(),
1303              LLVM::lookupOrCreatePrintNewlineFn(
1304                  printOp->getParentOfType<ModuleOp>()));
1305     rewriter.eraseOp(printOp);
1306     return success();
1307   }
1308 
1309 private:
1310   enum class PrintConversion {
1311     // clang-format off
1312     None,
1313     ZeroExt64,
1314     SignExt64
1315     // clang-format on
1316   };
1317 
1318   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1319                  Value value, VectorType vectorType, Operation *printer,
1320                  int64_t rank, PrintConversion conversion) const {
1321     Location loc = op->getLoc();
1322     if (rank == 0) {
1323       switch (conversion) {
1324       case PrintConversion::ZeroExt64:
1325         value = rewriter.create<ZeroExtendIOp>(
1326             loc, value, IntegerType::get(rewriter.getContext(), 64));
1327         break;
1328       case PrintConversion::SignExt64:
1329         value = rewriter.create<SignExtendIOp>(
1330             loc, value, IntegerType::get(rewriter.getContext(), 64));
1331         break;
1332       case PrintConversion::None:
1333         break;
1334       }
1335       emitCall(rewriter, loc, printer, value);
1336       return;
1337     }
1338 
1339     emitCall(rewriter, loc,
1340              LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>()));
1341     Operation *printComma =
1342         LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>());
1343     int64_t dim = vectorType.getDimSize(0);
1344     for (int64_t d = 0; d < dim; ++d) {
1345       auto reducedType =
1346           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1347       auto llvmType = typeConverter->convertType(
1348           rank > 1 ? reducedType : vectorType.getElementType());
1349       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1350                                    llvmType, rank, d);
1351       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1352                 conversion);
1353       if (d != dim - 1)
1354         emitCall(rewriter, loc, printComma);
1355     }
1356     emitCall(rewriter, loc,
1357              LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>()));
1358   }
1359 
1360   // Helper to emit a call.
1361   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1362                        Operation *ref, ValueRange params = ValueRange()) {
1363     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1364                                   rewriter.getSymbolRefAttr(ref), params);
1365   }
1366 };
1367 
1368 /// Progressive lowering of ExtractStridedSliceOp to either:
1369 ///   1. express single offset extract as a direct shuffle.
1370 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1371 class VectorExtractStridedSliceOpConversion
1372     : public OpRewritePattern<ExtractStridedSliceOp> {
1373 public:
1374   using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
1375 
1376   void initialize() {
1377     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1378     // is bounded as the rank is strictly decreasing.
1379     setHasBoundedRewriteRecursion();
1380   }
1381 
1382   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1383                                 PatternRewriter &rewriter) const override {
1384     auto dstType = op.getType();
1385 
1386     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1387 
1388     int64_t offset =
1389         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1390     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1391     int64_t stride =
1392         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1393 
1394     auto loc = op.getLoc();
1395     auto elemType = dstType.getElementType();
1396     assert(elemType.isSignlessIntOrIndexOrFloat());
1397 
1398     // Single offset can be more efficiently shuffled.
1399     if (op.offsets().getValue().size() == 1) {
1400       SmallVector<int64_t, 4> offsets;
1401       offsets.reserve(size);
1402       for (int64_t off = offset, e = offset + size * stride; off < e;
1403            off += stride)
1404         offsets.push_back(off);
1405       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1406                                              op.vector(),
1407                                              rewriter.getI64ArrayAttr(offsets));
1408       return success();
1409     }
1410 
1411     // Extract/insert on a lower ranked extract strided slice op.
1412     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1413                                              rewriter.getZeroAttr(elemType));
1414     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1415     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1416          off += stride, ++idx) {
1417       Value one = extractOne(rewriter, loc, op.vector(), off);
1418       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1419           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1420           getI64SubArray(op.sizes(), /* dropFront=*/1),
1421           getI64SubArray(op.strides(), /* dropFront=*/1));
1422       res = insertOne(rewriter, loc, extracted, res, idx);
1423     }
1424     rewriter.replaceOp(op, res);
1425     return success();
1426   }
1427 };
1428 
1429 } // namespace
1430 
1431 /// Populate the given list with patterns that convert from Vector to LLVM.
1432 void mlir::populateVectorToLLVMConversionPatterns(
1433     LLVMTypeConverter &converter, RewritePatternSet &patterns,
1434     bool reassociateFPReductions) {
1435   MLIRContext *ctx = converter.getDialect()->getContext();
1436   patterns.add<VectorFMAOpNDRewritePattern,
1437                VectorInsertStridedSliceOpDifferentRankRewritePattern,
1438                VectorInsertStridedSliceOpSameRankRewritePattern,
1439                VectorExtractStridedSliceOpConversion>(ctx);
1440   patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions);
1441   patterns
1442       .add<VectorBitCastOpConversion, VectorShuffleOpConversion,
1443            VectorExtractElementOpConversion, VectorExtractOpConversion,
1444            VectorFMAOp1DConversion, VectorInsertElementOpConversion,
1445            VectorInsertOpConversion, VectorPrintOpConversion,
1446            VectorTypeCastOpConversion,
1447            VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>,
1448            VectorLoadStoreConversion<vector::MaskedLoadOp,
1449                                      vector::MaskedLoadOpAdaptor>,
1450            VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>,
1451            VectorLoadStoreConversion<vector::MaskedStoreOp,
1452                                      vector::MaskedStoreOpAdaptor>,
1453            VectorGatherOpConversion, VectorScatterOpConversion,
1454            VectorExpandLoadOpConversion, VectorCompressStoreOpConversion,
1455            VectorTransferConversion<TransferReadOp>,
1456            VectorTransferConversion<TransferWriteOp>>(converter);
1457 }
1458 
1459 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1460     LLVMTypeConverter &converter, RewritePatternSet &patterns) {
1461   patterns.add<VectorMatmulOpConversion>(converter);
1462   patterns.add<VectorFlatTransposeOpConversion>(converter);
1463 }
1464