xref: /llvm-project/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp (revision e2310704d890ad252aeb1ca28b4b84d29514b1d1)
1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10 
11 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
13 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/MemRef/IR/MemRef.h"
16 #include "mlir/Dialect/StandardOps/IR/Ops.h"
17 #include "mlir/Dialect/Vector/VectorOps.h"
18 #include "mlir/IR/BuiltinTypes.h"
19 #include "mlir/Target/LLVMIR/TypeTranslation.h"
20 #include "mlir/Transforms/DialectConversion.h"
21 
22 using namespace mlir;
23 using namespace mlir::vector;
24 
25 // Helper to reduce vector type by one rank at front.
26 static VectorType reducedVectorTypeFront(VectorType tp) {
27   assert((tp.getRank() > 1) && "unlowerable vector type");
28   return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
29 }
30 
31 // Helper to reduce vector type by *all* but one rank at back.
32 static VectorType reducedVectorTypeBack(VectorType tp) {
33   assert((tp.getRank() > 1) && "unlowerable vector type");
34   return VectorType::get(tp.getShape().take_back(), tp.getElementType());
35 }
36 
37 // Helper that picks the proper sequence for inserting.
38 static Value insertOne(ConversionPatternRewriter &rewriter,
39                        LLVMTypeConverter &typeConverter, Location loc,
40                        Value val1, Value val2, Type llvmType, int64_t rank,
41                        int64_t pos) {
42   if (rank == 1) {
43     auto idxType = rewriter.getIndexType();
44     auto constant = rewriter.create<LLVM::ConstantOp>(
45         loc, typeConverter.convertType(idxType),
46         rewriter.getIntegerAttr(idxType, pos));
47     return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
48                                                   constant);
49   }
50   return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
51                                               rewriter.getI64ArrayAttr(pos));
52 }
53 
54 // Helper that picks the proper sequence for inserting.
55 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
56                        Value into, int64_t offset) {
57   auto vectorType = into.getType().cast<VectorType>();
58   if (vectorType.getRank() > 1)
59     return rewriter.create<InsertOp>(loc, from, into, offset);
60   return rewriter.create<vector::InsertElementOp>(
61       loc, vectorType, from, into,
62       rewriter.create<ConstantIndexOp>(loc, offset));
63 }
64 
65 // Helper that picks the proper sequence for extracting.
66 static Value extractOne(ConversionPatternRewriter &rewriter,
67                         LLVMTypeConverter &typeConverter, Location loc,
68                         Value val, Type llvmType, int64_t rank, int64_t pos) {
69   if (rank == 1) {
70     auto idxType = rewriter.getIndexType();
71     auto constant = rewriter.create<LLVM::ConstantOp>(
72         loc, typeConverter.convertType(idxType),
73         rewriter.getIntegerAttr(idxType, pos));
74     return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
75                                                    constant);
76   }
77   return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
78                                                rewriter.getI64ArrayAttr(pos));
79 }
80 
81 // Helper that picks the proper sequence for extracting.
82 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
83                         int64_t offset) {
84   auto vectorType = vector.getType().cast<VectorType>();
85   if (vectorType.getRank() > 1)
86     return rewriter.create<ExtractOp>(loc, vector, offset);
87   return rewriter.create<vector::ExtractElementOp>(
88       loc, vectorType.getElementType(), vector,
89       rewriter.create<ConstantIndexOp>(loc, offset));
90 }
91 
92 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
93 // TODO: Better support for attribute subtype forwarding + slicing.
94 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
95                                               unsigned dropFront = 0,
96                                               unsigned dropBack = 0) {
97   assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
98   auto range = arrayAttr.getAsRange<IntegerAttr>();
99   SmallVector<int64_t, 4> res;
100   res.reserve(arrayAttr.size() - dropFront - dropBack);
101   for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
102        it != eit; ++it)
103     res.push_back((*it).getValue().getSExtValue());
104   return res;
105 }
106 
107 static Value createCastToIndexLike(ConversionPatternRewriter &rewriter,
108                                    Location loc, Type targetType, Value value) {
109   if (targetType == value.getType())
110     return value;
111 
112   bool targetIsIndex = targetType.isIndex();
113   bool valueIsIndex = value.getType().isIndex();
114   if (targetIsIndex ^ valueIsIndex)
115     return rewriter.create<IndexCastOp>(loc, targetType, value);
116 
117   auto targetIntegerType = targetType.dyn_cast<IntegerType>();
118   auto valueIntegerType = value.getType().dyn_cast<IntegerType>();
119   assert(targetIntegerType && valueIntegerType &&
120          "unexpected cast between types other than integers and index");
121   assert(targetIntegerType.getSignedness() == valueIntegerType.getSignedness());
122 
123   if (targetIntegerType.getWidth() > valueIntegerType.getWidth())
124     return rewriter.create<SignExtendIOp>(loc, targetIntegerType, value);
125   return rewriter.create<TruncateIOp>(loc, targetIntegerType, value);
126 }
127 
128 // Helper that returns a vector comparison that constructs a mask:
129 //     mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
130 //
131 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
132 //       much more compact, IR for this operation, but LLVM eventually
133 //       generates more elaborate instructions for this intrinsic since it
134 //       is very conservative on the boundary conditions.
135 static Value buildVectorComparison(ConversionPatternRewriter &rewriter,
136                                    Operation *op, bool enableIndexOptimizations,
137                                    int64_t dim, Value b, Value *off = nullptr) {
138   auto loc = op->getLoc();
139   // If we can assume all indices fit in 32-bit, we perform the vector
140   // comparison in 32-bit to get a higher degree of SIMD parallelism.
141   // Otherwise we perform the vector comparison using 64-bit indices.
142   Value indices;
143   Type idxType;
144   if (enableIndexOptimizations) {
145     indices = rewriter.create<ConstantOp>(
146         loc, rewriter.getI32VectorAttr(
147                  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim))));
148     idxType = rewriter.getI32Type();
149   } else {
150     indices = rewriter.create<ConstantOp>(
151         loc, rewriter.getI64VectorAttr(
152                  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim))));
153     idxType = rewriter.getI64Type();
154   }
155   // Add in an offset if requested.
156   if (off) {
157     Value o = createCastToIndexLike(rewriter, loc, idxType, *off);
158     Value ov = rewriter.create<SplatOp>(loc, indices.getType(), o);
159     indices = rewriter.create<AddIOp>(loc, ov, indices);
160   }
161   // Construct the vector comparison.
162   Value bound = createCastToIndexLike(rewriter, loc, idxType, b);
163   Value bounds = rewriter.create<SplatOp>(loc, indices.getType(), bound);
164   return rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, indices, bounds);
165 }
166 
167 // Helper that returns data layout alignment of a memref.
168 LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
169                                  MemRefType memrefType, unsigned &align) {
170   Type elementTy = typeConverter.convertType(memrefType.getElementType());
171   if (!elementTy)
172     return failure();
173 
174   // TODO: this should use the MLIR data layout when it becomes available and
175   // stop depending on translation.
176   llvm::LLVMContext llvmContext;
177   align = LLVM::TypeToLLVMIRTranslator(llvmContext)
178               .getPreferredAlignment(elementTy, typeConverter.getDataLayout());
179   return success();
180 }
181 
182 // Add an index vector component to a base pointer. This almost always succeeds
183 // unless the last stride is non-unit or the memory space is not zero.
184 static LogicalResult getIndexedPtrs(ConversionPatternRewriter &rewriter,
185                                     Location loc, Value memref, Value base,
186                                     Value index, MemRefType memRefType,
187                                     VectorType vType, Value &ptrs) {
188   int64_t offset;
189   SmallVector<int64_t, 4> strides;
190   auto successStrides = getStridesAndOffset(memRefType, strides, offset);
191   if (failed(successStrides) || strides.back() != 1 ||
192       memRefType.getMemorySpaceAsInt() != 0)
193     return failure();
194   auto pType = MemRefDescriptor(memref).getElementPtrType();
195   auto ptrsType = LLVM::getFixedVectorType(pType, vType.getDimSize(0));
196   ptrs = rewriter.create<LLVM::GEPOp>(loc, ptrsType, base, index);
197   return success();
198 }
199 
200 // Casts a strided element pointer to a vector pointer.  The vector pointer
201 // will be in the same address space as the incoming memref type.
202 static Value castDataPtr(ConversionPatternRewriter &rewriter, Location loc,
203                          Value ptr, MemRefType memRefType, Type vt) {
204   auto pType = LLVM::LLVMPointerType::get(vt, memRefType.getMemorySpaceAsInt());
205   return rewriter.create<LLVM::BitcastOp>(loc, pType, ptr);
206 }
207 
208 static LogicalResult
209 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
210                                  LLVMTypeConverter &typeConverter, Location loc,
211                                  TransferReadOp xferOp,
212                                  ArrayRef<Value> operands, Value dataPtr) {
213   unsigned align;
214   if (failed(getMemRefAlignment(
215           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
216     return failure();
217   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
218   return success();
219 }
220 
221 static LogicalResult
222 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
223                             LLVMTypeConverter &typeConverter, Location loc,
224                             TransferReadOp xferOp, ArrayRef<Value> operands,
225                             Value dataPtr, Value mask) {
226   VectorType fillType = xferOp.getVectorType();
227   Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
228 
229   Type vecTy = typeConverter.convertType(xferOp.getVectorType());
230   if (!vecTy)
231     return failure();
232 
233   unsigned align;
234   if (failed(getMemRefAlignment(
235           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
236     return failure();
237 
238   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
239       xferOp, vecTy, dataPtr, mask, ValueRange{fill},
240       rewriter.getI32IntegerAttr(align));
241   return success();
242 }
243 
244 static LogicalResult
245 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
246                                  LLVMTypeConverter &typeConverter, Location loc,
247                                  TransferWriteOp xferOp,
248                                  ArrayRef<Value> operands, Value dataPtr) {
249   unsigned align;
250   if (failed(getMemRefAlignment(
251           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
252     return failure();
253   auto adaptor = TransferWriteOpAdaptor(operands);
254   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
255                                              align);
256   return success();
257 }
258 
259 static LogicalResult
260 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
261                             LLVMTypeConverter &typeConverter, Location loc,
262                             TransferWriteOp xferOp, ArrayRef<Value> operands,
263                             Value dataPtr, Value mask) {
264   unsigned align;
265   if (failed(getMemRefAlignment(
266           typeConverter, xferOp.getShapedType().cast<MemRefType>(), align)))
267     return failure();
268 
269   auto adaptor = TransferWriteOpAdaptor(operands);
270   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
271       xferOp, adaptor.vector(), dataPtr, mask,
272       rewriter.getI32IntegerAttr(align));
273   return success();
274 }
275 
276 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
277                                                   ArrayRef<Value> operands) {
278   return TransferReadOpAdaptor(operands);
279 }
280 
281 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
282                                                    ArrayRef<Value> operands) {
283   return TransferWriteOpAdaptor(operands);
284 }
285 
286 namespace {
287 
288 /// Conversion pattern for a vector.bitcast.
289 class VectorBitCastOpConversion
290     : public ConvertOpToLLVMPattern<vector::BitCastOp> {
291 public:
292   using ConvertOpToLLVMPattern<vector::BitCastOp>::ConvertOpToLLVMPattern;
293 
294   LogicalResult
295   matchAndRewrite(vector::BitCastOp bitCastOp, ArrayRef<Value> operands,
296                   ConversionPatternRewriter &rewriter) const override {
297     // Only 1-D vectors can be lowered to LLVM.
298     VectorType resultTy = bitCastOp.getType();
299     if (resultTy.getRank() != 1)
300       return failure();
301     Type newResultTy = typeConverter->convertType(resultTy);
302     rewriter.replaceOpWithNewOp<LLVM::BitcastOp>(bitCastOp, newResultTy,
303                                                  operands[0]);
304     return success();
305   }
306 };
307 
308 /// Conversion pattern for a vector.matrix_multiply.
309 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
310 class VectorMatmulOpConversion
311     : public ConvertOpToLLVMPattern<vector::MatmulOp> {
312 public:
313   using ConvertOpToLLVMPattern<vector::MatmulOp>::ConvertOpToLLVMPattern;
314 
315   LogicalResult
316   matchAndRewrite(vector::MatmulOp matmulOp, ArrayRef<Value> operands,
317                   ConversionPatternRewriter &rewriter) const override {
318     auto adaptor = vector::MatmulOpAdaptor(operands);
319     rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
320         matmulOp, typeConverter->convertType(matmulOp.res().getType()),
321         adaptor.lhs(), adaptor.rhs(), matmulOp.lhs_rows(),
322         matmulOp.lhs_columns(), matmulOp.rhs_columns());
323     return success();
324   }
325 };
326 
327 /// Conversion pattern for a vector.flat_transpose.
328 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
329 class VectorFlatTransposeOpConversion
330     : public ConvertOpToLLVMPattern<vector::FlatTransposeOp> {
331 public:
332   using ConvertOpToLLVMPattern<vector::FlatTransposeOp>::ConvertOpToLLVMPattern;
333 
334   LogicalResult
335   matchAndRewrite(vector::FlatTransposeOp transOp, ArrayRef<Value> operands,
336                   ConversionPatternRewriter &rewriter) const override {
337     auto adaptor = vector::FlatTransposeOpAdaptor(operands);
338     rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
339         transOp, typeConverter->convertType(transOp.res().getType()),
340         adaptor.matrix(), transOp.rows(), transOp.columns());
341     return success();
342   }
343 };
344 
345 /// Overloaded utility that replaces a vector.load, vector.store,
346 /// vector.maskedload and vector.maskedstore with their respective LLVM
347 /// couterparts.
348 static void replaceLoadOrStoreOp(vector::LoadOp loadOp,
349                                  vector::LoadOpAdaptor adaptor,
350                                  VectorType vectorTy, Value ptr, unsigned align,
351                                  ConversionPatternRewriter &rewriter) {
352   rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, ptr, align);
353 }
354 
355 static void replaceLoadOrStoreOp(vector::MaskedLoadOp loadOp,
356                                  vector::MaskedLoadOpAdaptor adaptor,
357                                  VectorType vectorTy, Value ptr, unsigned align,
358                                  ConversionPatternRewriter &rewriter) {
359   rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
360       loadOp, vectorTy, ptr, adaptor.mask(), adaptor.pass_thru(), align);
361 }
362 
363 static void replaceLoadOrStoreOp(vector::StoreOp storeOp,
364                                  vector::StoreOpAdaptor adaptor,
365                                  VectorType vectorTy, Value ptr, unsigned align,
366                                  ConversionPatternRewriter &rewriter) {
367   rewriter.replaceOpWithNewOp<LLVM::StoreOp>(storeOp, adaptor.valueToStore(),
368                                              ptr, align);
369 }
370 
371 static void replaceLoadOrStoreOp(vector::MaskedStoreOp storeOp,
372                                  vector::MaskedStoreOpAdaptor adaptor,
373                                  VectorType vectorTy, Value ptr, unsigned align,
374                                  ConversionPatternRewriter &rewriter) {
375   rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
376       storeOp, adaptor.valueToStore(), ptr, adaptor.mask(), align);
377 }
378 
379 /// Conversion pattern for a vector.load, vector.store, vector.maskedload, and
380 /// vector.maskedstore.
381 template <class LoadOrStoreOp, class LoadOrStoreOpAdaptor>
382 class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
383 public:
384   using ConvertOpToLLVMPattern<LoadOrStoreOp>::ConvertOpToLLVMPattern;
385 
386   LogicalResult
387   matchAndRewrite(LoadOrStoreOp loadOrStoreOp, ArrayRef<Value> operands,
388                   ConversionPatternRewriter &rewriter) const override {
389     // Only 1-D vectors can be lowered to LLVM.
390     VectorType vectorTy = loadOrStoreOp.getVectorType();
391     if (vectorTy.getRank() > 1)
392       return failure();
393 
394     auto loc = loadOrStoreOp->getLoc();
395     auto adaptor = LoadOrStoreOpAdaptor(operands);
396     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
397 
398     // Resolve alignment.
399     unsigned align;
400     if (failed(getMemRefAlignment(*this->getTypeConverter(), memRefTy, align)))
401       return failure();
402 
403     // Resolve address.
404     auto vtype = this->typeConverter->convertType(loadOrStoreOp.getVectorType())
405                      .template cast<VectorType>();
406     Value dataPtr = this->getStridedElementPtr(loc, memRefTy, adaptor.base(),
407                                                adaptor.indices(), rewriter);
408     Value ptr = castDataPtr(rewriter, loc, dataPtr, memRefTy, vtype);
409 
410     replaceLoadOrStoreOp(loadOrStoreOp, adaptor, vtype, ptr, align, rewriter);
411     return success();
412   }
413 };
414 
415 /// Conversion pattern for a vector.gather.
416 class VectorGatherOpConversion
417     : public ConvertOpToLLVMPattern<vector::GatherOp> {
418 public:
419   using ConvertOpToLLVMPattern<vector::GatherOp>::ConvertOpToLLVMPattern;
420 
421   LogicalResult
422   matchAndRewrite(vector::GatherOp gather, ArrayRef<Value> operands,
423                   ConversionPatternRewriter &rewriter) const override {
424     auto loc = gather->getLoc();
425     auto adaptor = vector::GatherOpAdaptor(operands);
426     MemRefType memRefType = gather.getMemRefType();
427 
428     // Resolve alignment.
429     unsigned align;
430     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
431       return failure();
432 
433     // Resolve address.
434     Value ptrs;
435     VectorType vType = gather.getVectorType();
436     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
437                                      adaptor.indices(), rewriter);
438     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
439                               adaptor.index_vec(), memRefType, vType, ptrs)))
440       return failure();
441 
442     // Replace with the gather intrinsic.
443     rewriter.replaceOpWithNewOp<LLVM::masked_gather>(
444         gather, typeConverter->convertType(vType), ptrs, adaptor.mask(),
445         adaptor.pass_thru(), rewriter.getI32IntegerAttr(align));
446     return success();
447   }
448 };
449 
450 /// Conversion pattern for a vector.scatter.
451 class VectorScatterOpConversion
452     : public ConvertOpToLLVMPattern<vector::ScatterOp> {
453 public:
454   using ConvertOpToLLVMPattern<vector::ScatterOp>::ConvertOpToLLVMPattern;
455 
456   LogicalResult
457   matchAndRewrite(vector::ScatterOp scatter, ArrayRef<Value> operands,
458                   ConversionPatternRewriter &rewriter) const override {
459     auto loc = scatter->getLoc();
460     auto adaptor = vector::ScatterOpAdaptor(operands);
461     MemRefType memRefType = scatter.getMemRefType();
462 
463     // Resolve alignment.
464     unsigned align;
465     if (failed(getMemRefAlignment(*getTypeConverter(), memRefType, align)))
466       return failure();
467 
468     // Resolve address.
469     Value ptrs;
470     VectorType vType = scatter.getVectorType();
471     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
472                                      adaptor.indices(), rewriter);
473     if (failed(getIndexedPtrs(rewriter, loc, adaptor.base(), ptr,
474                               adaptor.index_vec(), memRefType, vType, ptrs)))
475       return failure();
476 
477     // Replace with the scatter intrinsic.
478     rewriter.replaceOpWithNewOp<LLVM::masked_scatter>(
479         scatter, adaptor.valueToStore(), ptrs, adaptor.mask(),
480         rewriter.getI32IntegerAttr(align));
481     return success();
482   }
483 };
484 
485 /// Conversion pattern for a vector.expandload.
486 class VectorExpandLoadOpConversion
487     : public ConvertOpToLLVMPattern<vector::ExpandLoadOp> {
488 public:
489   using ConvertOpToLLVMPattern<vector::ExpandLoadOp>::ConvertOpToLLVMPattern;
490 
491   LogicalResult
492   matchAndRewrite(vector::ExpandLoadOp expand, ArrayRef<Value> operands,
493                   ConversionPatternRewriter &rewriter) const override {
494     auto loc = expand->getLoc();
495     auto adaptor = vector::ExpandLoadOpAdaptor(operands);
496     MemRefType memRefType = expand.getMemRefType();
497 
498     // Resolve address.
499     auto vtype = typeConverter->convertType(expand.getVectorType());
500     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
501                                      adaptor.indices(), rewriter);
502 
503     rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
504         expand, vtype, ptr, adaptor.mask(), adaptor.pass_thru());
505     return success();
506   }
507 };
508 
509 /// Conversion pattern for a vector.compressstore.
510 class VectorCompressStoreOpConversion
511     : public ConvertOpToLLVMPattern<vector::CompressStoreOp> {
512 public:
513   using ConvertOpToLLVMPattern<vector::CompressStoreOp>::ConvertOpToLLVMPattern;
514 
515   LogicalResult
516   matchAndRewrite(vector::CompressStoreOp compress, ArrayRef<Value> operands,
517                   ConversionPatternRewriter &rewriter) const override {
518     auto loc = compress->getLoc();
519     auto adaptor = vector::CompressStoreOpAdaptor(operands);
520     MemRefType memRefType = compress.getMemRefType();
521 
522     // Resolve address.
523     Value ptr = getStridedElementPtr(loc, memRefType, adaptor.base(),
524                                      adaptor.indices(), rewriter);
525 
526     rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
527         compress, adaptor.valueToStore(), ptr, adaptor.mask());
528     return success();
529   }
530 };
531 
532 /// Conversion pattern for all vector reductions.
533 class VectorReductionOpConversion
534     : public ConvertOpToLLVMPattern<vector::ReductionOp> {
535 public:
536   explicit VectorReductionOpConversion(LLVMTypeConverter &typeConv,
537                                        bool reassociateFPRed)
538       : ConvertOpToLLVMPattern<vector::ReductionOp>(typeConv),
539         reassociateFPReductions(reassociateFPRed) {}
540 
541   LogicalResult
542   matchAndRewrite(vector::ReductionOp reductionOp, ArrayRef<Value> operands,
543                   ConversionPatternRewriter &rewriter) const override {
544     auto kind = reductionOp.kind();
545     Type eltType = reductionOp.dest().getType();
546     Type llvmType = typeConverter->convertType(eltType);
547     if (eltType.isIntOrIndex()) {
548       // Integer reductions: add/mul/min/max/and/or/xor.
549       if (kind == "add")
550         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_add>(
551             reductionOp, llvmType, operands[0]);
552       else if (kind == "mul")
553         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_mul>(
554             reductionOp, llvmType, operands[0]);
555       else if (kind == "min" &&
556                (eltType.isIndex() || eltType.isUnsignedInteger()))
557         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umin>(
558             reductionOp, llvmType, operands[0]);
559       else if (kind == "min")
560         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smin>(
561             reductionOp, llvmType, operands[0]);
562       else if (kind == "max" &&
563                (eltType.isIndex() || eltType.isUnsignedInteger()))
564         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_umax>(
565             reductionOp, llvmType, operands[0]);
566       else if (kind == "max")
567         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_smax>(
568             reductionOp, llvmType, operands[0]);
569       else if (kind == "and")
570         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_and>(
571             reductionOp, llvmType, operands[0]);
572       else if (kind == "or")
573         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_or>(
574             reductionOp, llvmType, operands[0]);
575       else if (kind == "xor")
576         rewriter.replaceOpWithNewOp<LLVM::vector_reduce_xor>(
577             reductionOp, llvmType, operands[0]);
578       else
579         return failure();
580       return success();
581     }
582 
583     if (!eltType.isa<FloatType>())
584       return failure();
585 
586     // Floating-point reductions: add/mul/min/max
587     if (kind == "add") {
588       // Optional accumulator (or zero).
589       Value acc = operands.size() > 1 ? operands[1]
590                                       : rewriter.create<LLVM::ConstantOp>(
591                                             reductionOp->getLoc(), llvmType,
592                                             rewriter.getZeroAttr(eltType));
593       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fadd>(
594           reductionOp, llvmType, acc, operands[0],
595           rewriter.getBoolAttr(reassociateFPReductions));
596     } else if (kind == "mul") {
597       // Optional accumulator (or one).
598       Value acc = operands.size() > 1
599                       ? operands[1]
600                       : rewriter.create<LLVM::ConstantOp>(
601                             reductionOp->getLoc(), llvmType,
602                             rewriter.getFloatAttr(eltType, 1.0));
603       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmul>(
604           reductionOp, llvmType, acc, operands[0],
605           rewriter.getBoolAttr(reassociateFPReductions));
606     } else if (kind == "min")
607       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmin>(
608           reductionOp, llvmType, operands[0]);
609     else if (kind == "max")
610       rewriter.replaceOpWithNewOp<LLVM::vector_reduce_fmax>(
611           reductionOp, llvmType, operands[0]);
612     else
613       return failure();
614     return success();
615   }
616 
617 private:
618   const bool reassociateFPReductions;
619 };
620 
621 /// Conversion pattern for a vector.create_mask (1-D only).
622 class VectorCreateMaskOpConversion
623     : public ConvertOpToLLVMPattern<vector::CreateMaskOp> {
624 public:
625   explicit VectorCreateMaskOpConversion(LLVMTypeConverter &typeConv,
626                                         bool enableIndexOpt)
627       : ConvertOpToLLVMPattern<vector::CreateMaskOp>(typeConv),
628         enableIndexOptimizations(enableIndexOpt) {}
629 
630   LogicalResult
631   matchAndRewrite(vector::CreateMaskOp op, ArrayRef<Value> operands,
632                   ConversionPatternRewriter &rewriter) const override {
633     auto dstType = op.getType();
634     int64_t rank = dstType.getRank();
635     if (rank == 1) {
636       rewriter.replaceOp(
637           op, buildVectorComparison(rewriter, op, enableIndexOptimizations,
638                                     dstType.getDimSize(0), operands[0]));
639       return success();
640     }
641     return failure();
642   }
643 
644 private:
645   const bool enableIndexOptimizations;
646 };
647 
648 class VectorShuffleOpConversion
649     : public ConvertOpToLLVMPattern<vector::ShuffleOp> {
650 public:
651   using ConvertOpToLLVMPattern<vector::ShuffleOp>::ConvertOpToLLVMPattern;
652 
653   LogicalResult
654   matchAndRewrite(vector::ShuffleOp shuffleOp, ArrayRef<Value> operands,
655                   ConversionPatternRewriter &rewriter) const override {
656     auto loc = shuffleOp->getLoc();
657     auto adaptor = vector::ShuffleOpAdaptor(operands);
658     auto v1Type = shuffleOp.getV1VectorType();
659     auto v2Type = shuffleOp.getV2VectorType();
660     auto vectorType = shuffleOp.getVectorType();
661     Type llvmType = typeConverter->convertType(vectorType);
662     auto maskArrayAttr = shuffleOp.mask();
663 
664     // Bail if result type cannot be lowered.
665     if (!llvmType)
666       return failure();
667 
668     // Get rank and dimension sizes.
669     int64_t rank = vectorType.getRank();
670     assert(v1Type.getRank() == rank);
671     assert(v2Type.getRank() == rank);
672     int64_t v1Dim = v1Type.getDimSize(0);
673 
674     // For rank 1, where both operands have *exactly* the same vector type,
675     // there is direct shuffle support in LLVM. Use it!
676     if (rank == 1 && v1Type == v2Type) {
677       Value llvmShuffleOp = rewriter.create<LLVM::ShuffleVectorOp>(
678           loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
679       rewriter.replaceOp(shuffleOp, llvmShuffleOp);
680       return success();
681     }
682 
683     // For all other cases, insert the individual values individually.
684     Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
685     int64_t insPos = 0;
686     for (auto en : llvm::enumerate(maskArrayAttr)) {
687       int64_t extPos = en.value().cast<IntegerAttr>().getInt();
688       Value value = adaptor.v1();
689       if (extPos >= v1Dim) {
690         extPos -= v1Dim;
691         value = adaptor.v2();
692       }
693       Value extract = extractOne(rewriter, *getTypeConverter(), loc, value,
694                                  llvmType, rank, extPos);
695       insert = insertOne(rewriter, *getTypeConverter(), loc, insert, extract,
696                          llvmType, rank, insPos++);
697     }
698     rewriter.replaceOp(shuffleOp, insert);
699     return success();
700   }
701 };
702 
703 class VectorExtractElementOpConversion
704     : public ConvertOpToLLVMPattern<vector::ExtractElementOp> {
705 public:
706   using ConvertOpToLLVMPattern<
707       vector::ExtractElementOp>::ConvertOpToLLVMPattern;
708 
709   LogicalResult
710   matchAndRewrite(vector::ExtractElementOp extractEltOp,
711                   ArrayRef<Value> operands,
712                   ConversionPatternRewriter &rewriter) const override {
713     auto adaptor = vector::ExtractElementOpAdaptor(operands);
714     auto vectorType = extractEltOp.getVectorType();
715     auto llvmType = typeConverter->convertType(vectorType.getElementType());
716 
717     // Bail if result type cannot be lowered.
718     if (!llvmType)
719       return failure();
720 
721     rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
722         extractEltOp, llvmType, adaptor.vector(), adaptor.position());
723     return success();
724   }
725 };
726 
727 class VectorExtractOpConversion
728     : public ConvertOpToLLVMPattern<vector::ExtractOp> {
729 public:
730   using ConvertOpToLLVMPattern<vector::ExtractOp>::ConvertOpToLLVMPattern;
731 
732   LogicalResult
733   matchAndRewrite(vector::ExtractOp extractOp, ArrayRef<Value> operands,
734                   ConversionPatternRewriter &rewriter) const override {
735     auto loc = extractOp->getLoc();
736     auto adaptor = vector::ExtractOpAdaptor(operands);
737     auto vectorType = extractOp.getVectorType();
738     auto resultType = extractOp.getResult().getType();
739     auto llvmResultType = typeConverter->convertType(resultType);
740     auto positionArrayAttr = extractOp.position();
741 
742     // Bail if result type cannot be lowered.
743     if (!llvmResultType)
744       return failure();
745 
746     // One-shot extraction of vector from array (only requires extractvalue).
747     if (resultType.isa<VectorType>()) {
748       Value extracted = rewriter.create<LLVM::ExtractValueOp>(
749           loc, llvmResultType, adaptor.vector(), positionArrayAttr);
750       rewriter.replaceOp(extractOp, extracted);
751       return success();
752     }
753 
754     // Potential extraction of 1-D vector from array.
755     auto *context = extractOp->getContext();
756     Value extracted = adaptor.vector();
757     auto positionAttrs = positionArrayAttr.getValue();
758     if (positionAttrs.size() > 1) {
759       auto oneDVectorType = reducedVectorTypeBack(vectorType);
760       auto nMinusOnePositionAttrs =
761           ArrayAttr::get(context, positionAttrs.drop_back());
762       extracted = rewriter.create<LLVM::ExtractValueOp>(
763           loc, typeConverter->convertType(oneDVectorType), extracted,
764           nMinusOnePositionAttrs);
765     }
766 
767     // Remaining extraction of element from 1-D LLVM vector
768     auto position = positionAttrs.back().cast<IntegerAttr>();
769     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
770     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
771     extracted =
772         rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
773     rewriter.replaceOp(extractOp, extracted);
774 
775     return success();
776   }
777 };
778 
779 /// Conversion pattern that turns a vector.fma on a 1-D vector
780 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
781 /// This does not match vectors of n >= 2 rank.
782 ///
783 /// Example:
784 /// ```
785 ///  vector.fma %a, %a, %a : vector<8xf32>
786 /// ```
787 /// is converted to:
788 /// ```
789 ///  llvm.intr.fmuladd %va, %va, %va:
790 ///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
791 ///    -> !llvm."<8 x f32>">
792 /// ```
793 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
794 public:
795   using ConvertOpToLLVMPattern<vector::FMAOp>::ConvertOpToLLVMPattern;
796 
797   LogicalResult
798   matchAndRewrite(vector::FMAOp fmaOp, ArrayRef<Value> operands,
799                   ConversionPatternRewriter &rewriter) const override {
800     auto adaptor = vector::FMAOpAdaptor(operands);
801     VectorType vType = fmaOp.getVectorType();
802     if (vType.getRank() != 1)
803       return failure();
804     rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(fmaOp, adaptor.lhs(),
805                                                  adaptor.rhs(), adaptor.acc());
806     return success();
807   }
808 };
809 
810 class VectorInsertElementOpConversion
811     : public ConvertOpToLLVMPattern<vector::InsertElementOp> {
812 public:
813   using ConvertOpToLLVMPattern<vector::InsertElementOp>::ConvertOpToLLVMPattern;
814 
815   LogicalResult
816   matchAndRewrite(vector::InsertElementOp insertEltOp, ArrayRef<Value> operands,
817                   ConversionPatternRewriter &rewriter) const override {
818     auto adaptor = vector::InsertElementOpAdaptor(operands);
819     auto vectorType = insertEltOp.getDestVectorType();
820     auto llvmType = typeConverter->convertType(vectorType);
821 
822     // Bail if result type cannot be lowered.
823     if (!llvmType)
824       return failure();
825 
826     rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
827         insertEltOp, llvmType, adaptor.dest(), adaptor.source(),
828         adaptor.position());
829     return success();
830   }
831 };
832 
833 class VectorInsertOpConversion
834     : public ConvertOpToLLVMPattern<vector::InsertOp> {
835 public:
836   using ConvertOpToLLVMPattern<vector::InsertOp>::ConvertOpToLLVMPattern;
837 
838   LogicalResult
839   matchAndRewrite(vector::InsertOp insertOp, ArrayRef<Value> operands,
840                   ConversionPatternRewriter &rewriter) const override {
841     auto loc = insertOp->getLoc();
842     auto adaptor = vector::InsertOpAdaptor(operands);
843     auto sourceType = insertOp.getSourceType();
844     auto destVectorType = insertOp.getDestVectorType();
845     auto llvmResultType = typeConverter->convertType(destVectorType);
846     auto positionArrayAttr = insertOp.position();
847 
848     // Bail if result type cannot be lowered.
849     if (!llvmResultType)
850       return failure();
851 
852     // One-shot insertion of a vector into an array (only requires insertvalue).
853     if (sourceType.isa<VectorType>()) {
854       Value inserted = rewriter.create<LLVM::InsertValueOp>(
855           loc, llvmResultType, adaptor.dest(), adaptor.source(),
856           positionArrayAttr);
857       rewriter.replaceOp(insertOp, inserted);
858       return success();
859     }
860 
861     // Potential extraction of 1-D vector from array.
862     auto *context = insertOp->getContext();
863     Value extracted = adaptor.dest();
864     auto positionAttrs = positionArrayAttr.getValue();
865     auto position = positionAttrs.back().cast<IntegerAttr>();
866     auto oneDVectorType = destVectorType;
867     if (positionAttrs.size() > 1) {
868       oneDVectorType = reducedVectorTypeBack(destVectorType);
869       auto nMinusOnePositionAttrs =
870           ArrayAttr::get(context, positionAttrs.drop_back());
871       extracted = rewriter.create<LLVM::ExtractValueOp>(
872           loc, typeConverter->convertType(oneDVectorType), extracted,
873           nMinusOnePositionAttrs);
874     }
875 
876     // Insertion of an element into a 1-D LLVM vector.
877     auto i64Type = IntegerType::get(rewriter.getContext(), 64);
878     auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
879     Value inserted = rewriter.create<LLVM::InsertElementOp>(
880         loc, typeConverter->convertType(oneDVectorType), extracted,
881         adaptor.source(), constant);
882 
883     // Potential insertion of resulting 1-D vector into array.
884     if (positionAttrs.size() > 1) {
885       auto nMinusOnePositionAttrs =
886           ArrayAttr::get(context, positionAttrs.drop_back());
887       inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
888                                                       adaptor.dest(), inserted,
889                                                       nMinusOnePositionAttrs);
890     }
891 
892     rewriter.replaceOp(insertOp, inserted);
893     return success();
894   }
895 };
896 
897 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
898 ///
899 /// Example:
900 /// ```
901 ///   %d = vector.fma %a, %b, %c : vector<2x4xf32>
902 /// ```
903 /// is rewritten into:
904 /// ```
905 ///  %r = splat %f0: vector<2x4xf32>
906 ///  %va = vector.extractvalue %a[0] : vector<2x4xf32>
907 ///  %vb = vector.extractvalue %b[0] : vector<2x4xf32>
908 ///  %vc = vector.extractvalue %c[0] : vector<2x4xf32>
909 ///  %vd = vector.fma %va, %vb, %vc : vector<4xf32>
910 ///  %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
911 ///  %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
912 ///  %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
913 ///  %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
914 ///  %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
915 ///  %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
916 ///  // %r3 holds the final value.
917 /// ```
918 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
919 public:
920   using OpRewritePattern<FMAOp>::OpRewritePattern;
921 
922   LogicalResult matchAndRewrite(FMAOp op,
923                                 PatternRewriter &rewriter) const override {
924     auto vType = op.getVectorType();
925     if (vType.getRank() < 2)
926       return failure();
927 
928     auto loc = op.getLoc();
929     auto elemType = vType.getElementType();
930     Value zero = rewriter.create<ConstantOp>(loc, elemType,
931                                              rewriter.getZeroAttr(elemType));
932     Value desc = rewriter.create<SplatOp>(loc, vType, zero);
933     for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
934       Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
935       Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
936       Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
937       Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
938       desc = rewriter.create<InsertOp>(loc, fma, desc, i);
939     }
940     rewriter.replaceOp(op, desc);
941     return success();
942   }
943 };
944 
945 // When ranks are different, InsertStridedSlice needs to extract a properly
946 // ranked vector from the destination vector into which to insert. This pattern
947 // only takes care of this part and forwards the rest of the conversion to
948 // another pattern that converts InsertStridedSlice for operands of the same
949 // rank.
950 //
951 // RewritePattern for InsertStridedSliceOp where source and destination vectors
952 // have different ranks. In this case:
953 //   1. the proper subvector is extracted from the destination vector
954 //   2. a new InsertStridedSlice op is created to insert the source in the
955 //   destination subvector
956 //   3. the destination subvector is inserted back in the proper place
957 //   4. the op is replaced by the result of step 3.
958 // The new InsertStridedSlice from step 2. will be picked up by a
959 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
960 class VectorInsertStridedSliceOpDifferentRankRewritePattern
961     : public OpRewritePattern<InsertStridedSliceOp> {
962 public:
963   using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
964 
965   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
966                                 PatternRewriter &rewriter) const override {
967     auto srcType = op.getSourceVectorType();
968     auto dstType = op.getDestVectorType();
969 
970     if (op.offsets().getValue().empty())
971       return failure();
972 
973     auto loc = op.getLoc();
974     int64_t rankDiff = dstType.getRank() - srcType.getRank();
975     assert(rankDiff >= 0);
976     if (rankDiff == 0)
977       return failure();
978 
979     int64_t rankRest = dstType.getRank() - rankDiff;
980     // Extract / insert the subvector of matching rank and InsertStridedSlice
981     // on it.
982     Value extracted =
983         rewriter.create<ExtractOp>(loc, op.dest(),
984                                    getI64SubArray(op.offsets(), /*dropFront=*/0,
985                                                   /*dropBack=*/rankRest));
986     // A different pattern will kick in for InsertStridedSlice with matching
987     // ranks.
988     auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
989         loc, op.source(), extracted,
990         getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
991         getI64SubArray(op.strides(), /*dropFront=*/0));
992     rewriter.replaceOpWithNewOp<InsertOp>(
993         op, stridedSliceInnerOp.getResult(), op.dest(),
994         getI64SubArray(op.offsets(), /*dropFront=*/0,
995                        /*dropBack=*/rankRest));
996     return success();
997   }
998 };
999 
1000 // RewritePattern for InsertStridedSliceOp where source and destination vectors
1001 // have the same rank. In this case, we reduce
1002 //   1. the proper subvector is extracted from the destination vector
1003 //   2. a new InsertStridedSlice op is created to insert the source in the
1004 //   destination subvector
1005 //   3. the destination subvector is inserted back in the proper place
1006 //   4. the op is replaced by the result of step 3.
1007 // The new InsertStridedSlice from step 2. will be picked up by a
1008 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
1009 class VectorInsertStridedSliceOpSameRankRewritePattern
1010     : public OpRewritePattern<InsertStridedSliceOp> {
1011 public:
1012   VectorInsertStridedSliceOpSameRankRewritePattern(MLIRContext *ctx)
1013       : OpRewritePattern<InsertStridedSliceOp>(ctx) {
1014     // This pattern creates recursive InsertStridedSliceOp, but the recursion is
1015     // bounded as the rank is strictly decreasing.
1016     setHasBoundedRewriteRecursion();
1017   }
1018 
1019   LogicalResult matchAndRewrite(InsertStridedSliceOp op,
1020                                 PatternRewriter &rewriter) const override {
1021     auto srcType = op.getSourceVectorType();
1022     auto dstType = op.getDestVectorType();
1023 
1024     if (op.offsets().getValue().empty())
1025       return failure();
1026 
1027     int64_t rankDiff = dstType.getRank() - srcType.getRank();
1028     assert(rankDiff >= 0);
1029     if (rankDiff != 0)
1030       return failure();
1031 
1032     if (srcType == dstType) {
1033       rewriter.replaceOp(op, op.source());
1034       return success();
1035     }
1036 
1037     int64_t offset =
1038         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1039     int64_t size = srcType.getShape().front();
1040     int64_t stride =
1041         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1042 
1043     auto loc = op.getLoc();
1044     Value res = op.dest();
1045     // For each slice of the source vector along the most major dimension.
1046     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1047          off += stride, ++idx) {
1048       // 1. extract the proper subvector (or element) from source
1049       Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
1050       if (extractedSource.getType().isa<VectorType>()) {
1051         // 2. If we have a vector, extract the proper subvector from destination
1052         // Otherwise we are at the element level and no need to recurse.
1053         Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
1054         // 3. Reduce the problem to lowering a new InsertStridedSlice op with
1055         // smaller rank.
1056         extractedSource = rewriter.create<InsertStridedSliceOp>(
1057             loc, extractedSource, extractedDest,
1058             getI64SubArray(op.offsets(), /* dropFront=*/1),
1059             getI64SubArray(op.strides(), /* dropFront=*/1));
1060       }
1061       // 4. Insert the extractedSource into the res vector.
1062       res = insertOne(rewriter, loc, extractedSource, res, off);
1063     }
1064 
1065     rewriter.replaceOp(op, res);
1066     return success();
1067   }
1068 };
1069 
1070 /// Returns the strides if the memory underlying `memRefType` has a contiguous
1071 /// static layout.
1072 static llvm::Optional<SmallVector<int64_t, 4>>
1073 computeContiguousStrides(MemRefType memRefType) {
1074   int64_t offset;
1075   SmallVector<int64_t, 4> strides;
1076   if (failed(getStridesAndOffset(memRefType, strides, offset)))
1077     return None;
1078   if (!strides.empty() && strides.back() != 1)
1079     return None;
1080   // If no layout or identity layout, this is contiguous by definition.
1081   if (memRefType.getAffineMaps().empty() ||
1082       memRefType.getAffineMaps().front().isIdentity())
1083     return strides;
1084 
1085   // Otherwise, we must determine contiguity form shapes. This can only ever
1086   // work in static cases because MemRefType is underspecified to represent
1087   // contiguous dynamic shapes in other ways than with just empty/identity
1088   // layout.
1089   auto sizes = memRefType.getShape();
1090   for (int index = 0, e = strides.size() - 2; index < e; ++index) {
1091     if (ShapedType::isDynamic(sizes[index + 1]) ||
1092         ShapedType::isDynamicStrideOrOffset(strides[index]) ||
1093         ShapedType::isDynamicStrideOrOffset(strides[index + 1]))
1094       return None;
1095     if (strides[index] != strides[index + 1] * sizes[index + 1])
1096       return None;
1097   }
1098   return strides;
1099 }
1100 
1101 class VectorTypeCastOpConversion
1102     : public ConvertOpToLLVMPattern<vector::TypeCastOp> {
1103 public:
1104   using ConvertOpToLLVMPattern<vector::TypeCastOp>::ConvertOpToLLVMPattern;
1105 
1106   LogicalResult
1107   matchAndRewrite(vector::TypeCastOp castOp, ArrayRef<Value> operands,
1108                   ConversionPatternRewriter &rewriter) const override {
1109     auto loc = castOp->getLoc();
1110     MemRefType sourceMemRefType =
1111         castOp.getOperand().getType().cast<MemRefType>();
1112     MemRefType targetMemRefType = castOp.getType();
1113 
1114     // Only static shape casts supported atm.
1115     if (!sourceMemRefType.hasStaticShape() ||
1116         !targetMemRefType.hasStaticShape())
1117       return failure();
1118 
1119     auto llvmSourceDescriptorTy =
1120         operands[0].getType().dyn_cast<LLVM::LLVMStructType>();
1121     if (!llvmSourceDescriptorTy)
1122       return failure();
1123     MemRefDescriptor sourceMemRef(operands[0]);
1124 
1125     auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType)
1126                                       .dyn_cast_or_null<LLVM::LLVMStructType>();
1127     if (!llvmTargetDescriptorTy)
1128       return failure();
1129 
1130     // Only contiguous source buffers supported atm.
1131     auto sourceStrides = computeContiguousStrides(sourceMemRefType);
1132     if (!sourceStrides)
1133       return failure();
1134     auto targetStrides = computeContiguousStrides(targetMemRefType);
1135     if (!targetStrides)
1136       return failure();
1137     // Only support static strides for now, regardless of contiguity.
1138     if (llvm::any_of(*targetStrides, [](int64_t stride) {
1139           return ShapedType::isDynamicStrideOrOffset(stride);
1140         }))
1141       return failure();
1142 
1143     auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
1144 
1145     // Create descriptor.
1146     auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
1147     Type llvmTargetElementTy = desc.getElementPtrType();
1148     // Set allocated ptr.
1149     Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
1150     allocated =
1151         rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
1152     desc.setAllocatedPtr(rewriter, loc, allocated);
1153     // Set aligned ptr.
1154     Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
1155     ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
1156     desc.setAlignedPtr(rewriter, loc, ptr);
1157     // Fill offset 0.
1158     auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
1159     auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
1160     desc.setOffset(rewriter, loc, zero);
1161 
1162     // Fill size and stride descriptors in memref.
1163     for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
1164       int64_t index = indexedSize.index();
1165       auto sizeAttr =
1166           rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
1167       auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
1168       desc.setSize(rewriter, loc, index, size);
1169       auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(),
1170                                                 (*targetStrides)[index]);
1171       auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
1172       desc.setStride(rewriter, loc, index, stride);
1173     }
1174 
1175     rewriter.replaceOp(castOp, {desc});
1176     return success();
1177   }
1178 };
1179 
1180 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
1181 /// sequence of:
1182 /// 1. Get the source/dst address as an LLVM vector pointer.
1183 /// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1184 /// 3. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1185 /// 4. Create a mask where offsetVector is compared against memref upper bound.
1186 /// 5. Rewrite op as a masked read or write.
1187 template <typename ConcreteOp>
1188 class VectorTransferConversion : public ConvertOpToLLVMPattern<ConcreteOp> {
1189 public:
1190   explicit VectorTransferConversion(LLVMTypeConverter &typeConv,
1191                                     bool enableIndexOpt)
1192       : ConvertOpToLLVMPattern<ConcreteOp>(typeConv),
1193         enableIndexOptimizations(enableIndexOpt) {}
1194 
1195   LogicalResult
1196   matchAndRewrite(ConcreteOp xferOp, ArrayRef<Value> operands,
1197                   ConversionPatternRewriter &rewriter) const override {
1198     auto adaptor = getTransferOpAdapter(xferOp, operands);
1199 
1200     if (xferOp.getVectorType().getRank() > 1 ||
1201         llvm::size(xferOp.indices()) == 0)
1202       return failure();
1203     if (xferOp.permutation_map() !=
1204         AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
1205                                        xferOp.getVectorType().getRank(),
1206                                        xferOp->getContext()))
1207       return failure();
1208     auto memRefType = xferOp.getShapedType().template dyn_cast<MemRefType>();
1209     if (!memRefType)
1210       return failure();
1211     // Only contiguous source tensors supported atm.
1212     auto strides = computeContiguousStrides(memRefType);
1213     if (!strides)
1214       return failure();
1215 
1216     auto toLLVMTy = [&](Type t) {
1217       return this->getTypeConverter()->convertType(t);
1218     };
1219 
1220     Location loc = xferOp->getLoc();
1221 
1222     if (auto memrefVectorElementType =
1223             memRefType.getElementType().template dyn_cast<VectorType>()) {
1224       // Memref has vector element type.
1225       if (memrefVectorElementType.getElementType() !=
1226           xferOp.getVectorType().getElementType())
1227         return failure();
1228 #ifndef NDEBUG
1229       // Check that memref vector type is a suffix of 'vectorType.
1230       unsigned memrefVecEltRank = memrefVectorElementType.getRank();
1231       unsigned resultVecRank = xferOp.getVectorType().getRank();
1232       assert(memrefVecEltRank <= resultVecRank);
1233       // TODO: Move this to isSuffix in Vector/Utils.h.
1234       unsigned rankOffset = resultVecRank - memrefVecEltRank;
1235       auto memrefVecEltShape = memrefVectorElementType.getShape();
1236       auto resultVecShape = xferOp.getVectorType().getShape();
1237       for (unsigned i = 0; i < memrefVecEltRank; ++i)
1238         assert(memrefVecEltShape[i] != resultVecShape[rankOffset + i] &&
1239                "memref vector element shape should match suffix of vector "
1240                "result shape.");
1241 #endif // ifndef NDEBUG
1242     }
1243 
1244     // 1. Get the source/dst address as an LLVM vector pointer.
1245     VectorType vtp = xferOp.getVectorType();
1246     Value dataPtr = this->getStridedElementPtr(
1247         loc, memRefType, adaptor.source(), adaptor.indices(), rewriter);
1248     Value vectorDataPtr =
1249         castDataPtr(rewriter, loc, dataPtr, memRefType, toLLVMTy(vtp));
1250 
1251     if (!xferOp.isMaskedDim(0))
1252       return replaceTransferOpWithLoadOrStore(rewriter,
1253                                               *this->getTypeConverter(), loc,
1254                                               xferOp, operands, vectorDataPtr);
1255 
1256     // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
1257     // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
1258     // 4. Let dim the memref dimension, compute the vector comparison mask:
1259     //   [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
1260     //
1261     // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1262     //       dimensions here.
1263     unsigned vecWidth = LLVM::getVectorNumElements(vtp).getFixedValue();
1264     unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
1265     Value off = xferOp.indices()[lastIndex];
1266     Value dim = rewriter.create<memref::DimOp>(loc, xferOp.source(), lastIndex);
1267     Value mask = buildVectorComparison(
1268         rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);
1269 
1270     // 5. Rewrite as a masked read / write.
1271     return replaceTransferOpWithMasked(rewriter, *this->getTypeConverter(), loc,
1272                                        xferOp, operands, vectorDataPtr, mask);
1273   }
1274 
1275 private:
1276   const bool enableIndexOptimizations;
1277 };
1278 
1279 class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
1280 public:
1281   using ConvertOpToLLVMPattern<vector::PrintOp>::ConvertOpToLLVMPattern;
1282 
1283   // Proof-of-concept lowering implementation that relies on a small
1284   // runtime support library, which only needs to provide a few
1285   // printing methods (single value for all data types, opening/closing
1286   // bracket, comma, newline). The lowering fully unrolls a vector
1287   // in terms of these elementary printing operations. The advantage
1288   // of this approach is that the library can remain unaware of all
1289   // low-level implementation details of vectors while still supporting
1290   // output of any shaped and dimensioned vector. Due to full unrolling,
1291   // this approach is less suited for very large vectors though.
1292   //
1293   // TODO: rely solely on libc in future? something else?
1294   //
1295   LogicalResult
1296   matchAndRewrite(vector::PrintOp printOp, ArrayRef<Value> operands,
1297                   ConversionPatternRewriter &rewriter) const override {
1298     auto adaptor = vector::PrintOpAdaptor(operands);
1299     Type printType = printOp.getPrintType();
1300 
1301     if (typeConverter->convertType(printType) == nullptr)
1302       return failure();
1303 
1304     // Make sure element type has runtime support.
1305     PrintConversion conversion = PrintConversion::None;
1306     VectorType vectorType = printType.dyn_cast<VectorType>();
1307     Type eltType = vectorType ? vectorType.getElementType() : printType;
1308     Operation *printer;
1309     if (eltType.isF32()) {
1310       printer =
1311           LLVM::lookupOrCreatePrintF32Fn(printOp->getParentOfType<ModuleOp>());
1312     } else if (eltType.isF64()) {
1313       printer =
1314           LLVM::lookupOrCreatePrintF64Fn(printOp->getParentOfType<ModuleOp>());
1315     } else if (eltType.isIndex()) {
1316       printer =
1317           LLVM::lookupOrCreatePrintU64Fn(printOp->getParentOfType<ModuleOp>());
1318     } else if (auto intTy = eltType.dyn_cast<IntegerType>()) {
1319       // Integers need a zero or sign extension on the operand
1320       // (depending on the source type) as well as a signed or
1321       // unsigned print method. Up to 64-bit is supported.
1322       unsigned width = intTy.getWidth();
1323       if (intTy.isUnsigned()) {
1324         if (width <= 64) {
1325           if (width < 64)
1326             conversion = PrintConversion::ZeroExt64;
1327           printer = LLVM::lookupOrCreatePrintU64Fn(
1328               printOp->getParentOfType<ModuleOp>());
1329         } else {
1330           return failure();
1331         }
1332       } else {
1333         assert(intTy.isSignless() || intTy.isSigned());
1334         if (width <= 64) {
1335           // Note that we *always* zero extend booleans (1-bit integers),
1336           // so that true/false is printed as 1/0 rather than -1/0.
1337           if (width == 1)
1338             conversion = PrintConversion::ZeroExt64;
1339           else if (width < 64)
1340             conversion = PrintConversion::SignExt64;
1341           printer = LLVM::lookupOrCreatePrintI64Fn(
1342               printOp->getParentOfType<ModuleOp>());
1343         } else {
1344           return failure();
1345         }
1346       }
1347     } else {
1348       return failure();
1349     }
1350 
1351     // Unroll vector into elementary print calls.
1352     int64_t rank = vectorType ? vectorType.getRank() : 0;
1353     emitRanks(rewriter, printOp, adaptor.source(), vectorType, printer, rank,
1354               conversion);
1355     emitCall(rewriter, printOp->getLoc(),
1356              LLVM::lookupOrCreatePrintNewlineFn(
1357                  printOp->getParentOfType<ModuleOp>()));
1358     rewriter.eraseOp(printOp);
1359     return success();
1360   }
1361 
1362 private:
1363   enum class PrintConversion {
1364     // clang-format off
1365     None,
1366     ZeroExt64,
1367     SignExt64
1368     // clang-format on
1369   };
1370 
1371   void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1372                  Value value, VectorType vectorType, Operation *printer,
1373                  int64_t rank, PrintConversion conversion) const {
1374     Location loc = op->getLoc();
1375     if (rank == 0) {
1376       switch (conversion) {
1377       case PrintConversion::ZeroExt64:
1378         value = rewriter.create<ZeroExtendIOp>(
1379             loc, value, IntegerType::get(rewriter.getContext(), 64));
1380         break;
1381       case PrintConversion::SignExt64:
1382         value = rewriter.create<SignExtendIOp>(
1383             loc, value, IntegerType::get(rewriter.getContext(), 64));
1384         break;
1385       case PrintConversion::None:
1386         break;
1387       }
1388       emitCall(rewriter, loc, printer, value);
1389       return;
1390     }
1391 
1392     emitCall(rewriter, loc,
1393              LLVM::lookupOrCreatePrintOpenFn(op->getParentOfType<ModuleOp>()));
1394     Operation *printComma =
1395         LLVM::lookupOrCreatePrintCommaFn(op->getParentOfType<ModuleOp>());
1396     int64_t dim = vectorType.getDimSize(0);
1397     for (int64_t d = 0; d < dim; ++d) {
1398       auto reducedType =
1399           rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1400       auto llvmType = typeConverter->convertType(
1401           rank > 1 ? reducedType : vectorType.getElementType());
1402       Value nestedVal = extractOne(rewriter, *getTypeConverter(), loc, value,
1403                                    llvmType, rank, d);
1404       emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1,
1405                 conversion);
1406       if (d != dim - 1)
1407         emitCall(rewriter, loc, printComma);
1408     }
1409     emitCall(rewriter, loc,
1410              LLVM::lookupOrCreatePrintCloseFn(op->getParentOfType<ModuleOp>()));
1411   }
1412 
1413   // Helper to emit a call.
1414   static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1415                        Operation *ref, ValueRange params = ValueRange()) {
1416     rewriter.create<LLVM::CallOp>(loc, TypeRange(),
1417                                   rewriter.getSymbolRefAttr(ref), params);
1418   }
1419 };
1420 
1421 /// Progressive lowering of ExtractStridedSliceOp to either:
1422 ///   1. express single offset extract as a direct shuffle.
1423 ///   2. extract + lower rank strided_slice + insert for the n-D case.
1424 class VectorExtractStridedSliceOpConversion
1425     : public OpRewritePattern<ExtractStridedSliceOp> {
1426 public:
1427   VectorExtractStridedSliceOpConversion(MLIRContext *ctx)
1428       : OpRewritePattern<ExtractStridedSliceOp>(ctx) {
1429     // This pattern creates recursive ExtractStridedSliceOp, but the recursion
1430     // is bounded as the rank is strictly decreasing.
1431     setHasBoundedRewriteRecursion();
1432   }
1433 
1434   LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1435                                 PatternRewriter &rewriter) const override {
1436     auto dstType = op.getType();
1437 
1438     assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1439 
1440     int64_t offset =
1441         op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1442     int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1443     int64_t stride =
1444         op.strides().getValue().front().cast<IntegerAttr>().getInt();
1445 
1446     auto loc = op.getLoc();
1447     auto elemType = dstType.getElementType();
1448     assert(elemType.isSignlessIntOrIndexOrFloat());
1449 
1450     // Single offset can be more efficiently shuffled.
1451     if (op.offsets().getValue().size() == 1) {
1452       SmallVector<int64_t, 4> offsets;
1453       offsets.reserve(size);
1454       for (int64_t off = offset, e = offset + size * stride; off < e;
1455            off += stride)
1456         offsets.push_back(off);
1457       rewriter.replaceOpWithNewOp<ShuffleOp>(op, dstType, op.vector(),
1458                                              op.vector(),
1459                                              rewriter.getI64ArrayAttr(offsets));
1460       return success();
1461     }
1462 
1463     // Extract/insert on a lower ranked extract strided slice op.
1464     Value zero = rewriter.create<ConstantOp>(loc, elemType,
1465                                              rewriter.getZeroAttr(elemType));
1466     Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1467     for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1468          off += stride, ++idx) {
1469       Value one = extractOne(rewriter, loc, op.vector(), off);
1470       Value extracted = rewriter.create<ExtractStridedSliceOp>(
1471           loc, one, getI64SubArray(op.offsets(), /* dropFront=*/1),
1472           getI64SubArray(op.sizes(), /* dropFront=*/1),
1473           getI64SubArray(op.strides(), /* dropFront=*/1));
1474       res = insertOne(rewriter, loc, extracted, res, idx);
1475     }
1476     rewriter.replaceOp(op, res);
1477     return success();
1478   }
1479 };
1480 
1481 } // namespace
1482 
1483 /// Populate the given list with patterns that convert from Vector to LLVM.
1484 void mlir::populateVectorToLLVMConversionPatterns(
1485     LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1486     bool reassociateFPReductions, bool enableIndexOptimizations) {
1487   MLIRContext *ctx = converter.getDialect()->getContext();
1488   // clang-format off
1489   patterns.insert<VectorFMAOpNDRewritePattern,
1490                   VectorInsertStridedSliceOpDifferentRankRewritePattern,
1491                   VectorInsertStridedSliceOpSameRankRewritePattern,
1492                   VectorExtractStridedSliceOpConversion>(ctx);
1493   patterns.insert<VectorReductionOpConversion>(
1494       converter, reassociateFPReductions);
1495   patterns.insert<VectorCreateMaskOpConversion,
1496                   VectorTransferConversion<TransferReadOp>,
1497                   VectorTransferConversion<TransferWriteOp>>(
1498       converter, enableIndexOptimizations);
1499   patterns
1500       .insert<VectorBitCastOpConversion,
1501               VectorShuffleOpConversion,
1502               VectorExtractElementOpConversion,
1503               VectorExtractOpConversion,
1504               VectorFMAOp1DConversion,
1505               VectorInsertElementOpConversion,
1506               VectorInsertOpConversion,
1507               VectorPrintOpConversion,
1508               VectorTypeCastOpConversion,
1509               VectorLoadStoreConversion<vector::LoadOp,
1510                                         vector::LoadOpAdaptor>,
1511               VectorLoadStoreConversion<vector::MaskedLoadOp,
1512                                         vector::MaskedLoadOpAdaptor>,
1513               VectorLoadStoreConversion<vector::StoreOp,
1514                                         vector::StoreOpAdaptor>,
1515               VectorLoadStoreConversion<vector::MaskedStoreOp,
1516                                         vector::MaskedStoreOpAdaptor>,
1517               VectorGatherOpConversion,
1518               VectorScatterOpConversion,
1519               VectorExpandLoadOpConversion,
1520               VectorCompressStoreOpConversion>(converter);
1521   // clang-format on
1522 }
1523 
1524 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1525     LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1526   patterns.insert<VectorMatmulOpConversion>(converter);
1527   patterns.insert<VectorFlatTransposeOpConversion>(converter);
1528 }
1529