xref: /llvm-project/llvm/lib/Target/X86/X86FixupVectorConstants.cpp (revision f407be32fe8084fe02c4f16842548d21afdb447f)
1 //===-- X86FixupVectorConstants.cpp - optimize constant generation  -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file examines all full size vector constant pool loads and attempts to
10 // replace them with smaller constant pool entries, including:
11 // * Converting AVX512 memory-fold instructions to their broadcast-fold form.
12 // * Using vzload scalar loads.
13 // * Broadcasting of full width loads.
14 // * Sign/Zero extension of full width loads.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "X86.h"
19 #include "X86InstrFoldTables.h"
20 #include "X86InstrInfo.h"
21 #include "X86Subtarget.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "x86-fixup-vector-constants"
28 
29 STATISTIC(NumInstChanges, "Number of instructions changes");
30 
31 namespace {
32 class X86FixupVectorConstantsPass : public MachineFunctionPass {
33 public:
34   static char ID;
35 
36   X86FixupVectorConstantsPass() : MachineFunctionPass(ID) {}
37 
38   StringRef getPassName() const override {
39     return "X86 Fixup Vector Constants";
40   }
41 
42   bool runOnMachineFunction(MachineFunction &MF) override;
43   bool processInstruction(MachineFunction &MF, MachineBasicBlock &MBB,
44                           MachineInstr &MI);
45 
46   // This pass runs after regalloc and doesn't support VReg operands.
47   MachineFunctionProperties getRequiredProperties() const override {
48     return MachineFunctionProperties().set(
49         MachineFunctionProperties::Property::NoVRegs);
50   }
51 
52 private:
53   const X86InstrInfo *TII = nullptr;
54   const X86Subtarget *ST = nullptr;
55   const MCSchedModel *SM = nullptr;
56 };
57 } // end anonymous namespace
58 
59 char X86FixupVectorConstantsPass::ID = 0;
60 
61 INITIALIZE_PASS(X86FixupVectorConstantsPass, DEBUG_TYPE, DEBUG_TYPE, false, false)
62 
63 FunctionPass *llvm::createX86FixupVectorConstants() {
64   return new X86FixupVectorConstantsPass();
65 }
66 
67 // Attempt to extract the full width of bits data from the constant.
68 static std::optional<APInt> extractConstantBits(const Constant *C) {
69   unsigned NumBits = C->getType()->getPrimitiveSizeInBits();
70 
71   if (isa<UndefValue>(C))
72     return APInt::getZero(NumBits);
73 
74   if (auto *CInt = dyn_cast<ConstantInt>(C))
75     return CInt->getValue();
76 
77   if (auto *CFP = dyn_cast<ConstantFP>(C))
78     return CFP->getValue().bitcastToAPInt();
79 
80   if (auto *CV = dyn_cast<ConstantVector>(C)) {
81     if (auto *CVSplat = CV->getSplatValue(/*AllowUndefs*/ true)) {
82       if (std::optional<APInt> Bits = extractConstantBits(CVSplat)) {
83         assert((NumBits % Bits->getBitWidth()) == 0 && "Illegal splat");
84         return APInt::getSplat(NumBits, *Bits);
85       }
86     }
87 
88     APInt Bits = APInt::getZero(NumBits);
89     for (unsigned I = 0, E = CV->getNumOperands(); I != E; ++I) {
90       Constant *Elt = CV->getOperand(I);
91       std::optional<APInt> SubBits = extractConstantBits(Elt);
92       if (!SubBits)
93         return std::nullopt;
94       assert(NumBits == (E * SubBits->getBitWidth()) &&
95              "Illegal vector element size");
96       Bits.insertBits(*SubBits, I * SubBits->getBitWidth());
97     }
98     return Bits;
99   }
100 
101   if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
102     bool IsInteger = CDS->getElementType()->isIntegerTy();
103     bool IsFloat = CDS->getElementType()->isHalfTy() ||
104                    CDS->getElementType()->isBFloatTy() ||
105                    CDS->getElementType()->isFloatTy() ||
106                    CDS->getElementType()->isDoubleTy();
107     if (IsInteger || IsFloat) {
108       APInt Bits = APInt::getZero(NumBits);
109       unsigned EltBits = CDS->getElementType()->getPrimitiveSizeInBits();
110       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
111         if (IsInteger)
112           Bits.insertBits(CDS->getElementAsAPInt(I), I * EltBits);
113         else
114           Bits.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),
115                           I * EltBits);
116       }
117       return Bits;
118     }
119   }
120 
121   return std::nullopt;
122 }
123 
124 static std::optional<APInt> extractConstantBits(const Constant *C,
125                                                 unsigned NumBits) {
126   if (std::optional<APInt> Bits = extractConstantBits(C))
127     return Bits->zextOrTrunc(NumBits);
128   return std::nullopt;
129 }
130 
131 // Attempt to compute the splat width of bits data by normalizing the splat to
132 // remove undefs.
133 static std::optional<APInt> getSplatableConstant(const Constant *C,
134                                                  unsigned SplatBitWidth) {
135   const Type *Ty = C->getType();
136   assert((Ty->getPrimitiveSizeInBits() % SplatBitWidth) == 0 &&
137          "Illegal splat width");
138 
139   if (std::optional<APInt> Bits = extractConstantBits(C))
140     if (Bits->isSplat(SplatBitWidth))
141       return Bits->trunc(SplatBitWidth);
142 
143   // Detect general splats with undefs.
144   // TODO: Do we need to handle NumEltsBits > SplatBitWidth splitting?
145   if (auto *CV = dyn_cast<ConstantVector>(C)) {
146     unsigned NumOps = CV->getNumOperands();
147     unsigned NumEltsBits = Ty->getScalarSizeInBits();
148     unsigned NumScaleOps = SplatBitWidth / NumEltsBits;
149     if ((SplatBitWidth % NumEltsBits) == 0) {
150       // Collect the elements and ensure that within the repeated splat sequence
151       // they either match or are undef.
152       SmallVector<Constant *, 16> Sequence(NumScaleOps, nullptr);
153       for (unsigned Idx = 0; Idx != NumOps; ++Idx) {
154         if (Constant *Elt = CV->getAggregateElement(Idx)) {
155           if (isa<UndefValue>(Elt))
156             continue;
157           unsigned SplatIdx = Idx % NumScaleOps;
158           if (!Sequence[SplatIdx] || Sequence[SplatIdx] == Elt) {
159             Sequence[SplatIdx] = Elt;
160             continue;
161           }
162         }
163         return std::nullopt;
164       }
165       // Extract the constant bits forming the splat and insert into the bits
166       // data, leave undef as zero.
167       APInt SplatBits = APInt::getZero(SplatBitWidth);
168       for (unsigned I = 0; I != NumScaleOps; ++I) {
169         if (!Sequence[I])
170           continue;
171         if (std::optional<APInt> Bits = extractConstantBits(Sequence[I])) {
172           SplatBits.insertBits(*Bits, I * Bits->getBitWidth());
173           continue;
174         }
175         return std::nullopt;
176       }
177       return SplatBits;
178     }
179   }
180 
181   return std::nullopt;
182 }
183 
184 // Split raw bits into a constant vector of elements of a specific bit width.
185 // NOTE: We don't always bother converting to scalars if the vector length is 1.
186 static Constant *rebuildConstant(LLVMContext &Ctx, Type *SclTy,
187                                  const APInt &Bits, unsigned NumSclBits) {
188   unsigned BitWidth = Bits.getBitWidth();
189 
190   if (NumSclBits == 8) {
191     SmallVector<uint8_t> RawBits;
192     for (unsigned I = 0; I != BitWidth; I += 8)
193       RawBits.push_back(Bits.extractBits(8, I).getZExtValue());
194     return ConstantDataVector::get(Ctx, RawBits);
195   }
196 
197   if (NumSclBits == 16) {
198     SmallVector<uint16_t> RawBits;
199     for (unsigned I = 0; I != BitWidth; I += 16)
200       RawBits.push_back(Bits.extractBits(16, I).getZExtValue());
201     if (SclTy->is16bitFPTy())
202       return ConstantDataVector::getFP(SclTy, RawBits);
203     return ConstantDataVector::get(Ctx, RawBits);
204   }
205 
206   if (NumSclBits == 32) {
207     SmallVector<uint32_t> RawBits;
208     for (unsigned I = 0; I != BitWidth; I += 32)
209       RawBits.push_back(Bits.extractBits(32, I).getZExtValue());
210     if (SclTy->isFloatTy())
211       return ConstantDataVector::getFP(SclTy, RawBits);
212     return ConstantDataVector::get(Ctx, RawBits);
213   }
214 
215   assert(NumSclBits == 64 && "Unhandled vector element width");
216 
217   SmallVector<uint64_t> RawBits;
218   for (unsigned I = 0; I != BitWidth; I += 64)
219     RawBits.push_back(Bits.extractBits(64, I).getZExtValue());
220   if (SclTy->isDoubleTy())
221     return ConstantDataVector::getFP(SclTy, RawBits);
222   return ConstantDataVector::get(Ctx, RawBits);
223 }
224 
225 // Attempt to rebuild a normalized splat vector constant of the requested splat
226 // width, built up of potentially smaller scalar values.
227 static Constant *rebuildSplatCst(const Constant *C, unsigned /*NumBits*/,
228                                  unsigned /*NumElts*/, unsigned SplatBitWidth) {
229   std::optional<APInt> Splat = getSplatableConstant(C, SplatBitWidth);
230   if (!Splat)
231     return nullptr;
232 
233   // Determine scalar size to use for the constant splat vector, clamping as we
234   // might have found a splat smaller than the original constant data.
235   Type *SclTy = C->getType()->getScalarType();
236   unsigned NumSclBits = SclTy->getPrimitiveSizeInBits();
237   NumSclBits = std::min<unsigned>(NumSclBits, SplatBitWidth);
238 
239   // Fallback to i64 / double.
240   NumSclBits = (NumSclBits == 8 || NumSclBits == 16 || NumSclBits == 32)
241                    ? NumSclBits
242                    : 64;
243 
244   // Extract per-element bits.
245   return rebuildConstant(C->getContext(), SclTy, *Splat, NumSclBits);
246 }
247 
248 static Constant *rebuildZeroUpperCst(const Constant *C, unsigned NumBits,
249                                      unsigned /*NumElts*/,
250                                      unsigned ScalarBitWidth) {
251   Type *SclTy = C->getType()->getScalarType();
252   unsigned NumSclBits = SclTy->getPrimitiveSizeInBits();
253   LLVMContext &Ctx = C->getContext();
254 
255   if (NumBits > ScalarBitWidth) {
256     // Determine if the upper bits are all zero.
257     if (std::optional<APInt> Bits = extractConstantBits(C, NumBits)) {
258       if (Bits->countLeadingZeros() >= (NumBits - ScalarBitWidth)) {
259         // If the original constant was made of smaller elements, try to retain
260         // those types.
261         if (ScalarBitWidth > NumSclBits && (ScalarBitWidth % NumSclBits) == 0)
262           return rebuildConstant(Ctx, SclTy, *Bits, NumSclBits);
263 
264         // Fallback to raw integer bits.
265         APInt RawBits = Bits->zextOrTrunc(ScalarBitWidth);
266         return ConstantInt::get(Ctx, RawBits);
267       }
268     }
269   }
270 
271   return nullptr;
272 }
273 
274 static Constant *rebuildExtCst(const Constant *C, bool IsSExt,
275                                unsigned NumBits, unsigned NumElts,
276                                unsigned SrcEltBitWidth) {
277   unsigned DstEltBitWidth = NumBits / NumElts;
278   assert((NumBits % NumElts) == 0 && (NumBits % SrcEltBitWidth) == 0 &&
279          (DstEltBitWidth % SrcEltBitWidth) == 0 &&
280          (DstEltBitWidth > SrcEltBitWidth) && "Illegal extension width");
281 
282   if (std::optional<APInt> Bits = extractConstantBits(C, NumBits)) {
283     assert((Bits->getBitWidth() / DstEltBitWidth) == NumElts &&
284            (Bits->getBitWidth() % DstEltBitWidth) == 0 &&
285            "Unexpected constant extension");
286 
287     // Ensure every vector element can be represented by the src bitwidth.
288     APInt TruncBits = APInt::getZero(NumElts * SrcEltBitWidth);
289     for (unsigned I = 0; I != NumElts; ++I) {
290       APInt Elt = Bits->extractBits(DstEltBitWidth, I * DstEltBitWidth);
291       if ((IsSExt && Elt.getSignificantBits() > SrcEltBitWidth) ||
292           (!IsSExt && Elt.getActiveBits() > SrcEltBitWidth))
293         return nullptr;
294       TruncBits.insertBits(Elt.trunc(SrcEltBitWidth), I * SrcEltBitWidth);
295     }
296 
297     Type *Ty = C->getType();
298     return rebuildConstant(Ty->getContext(), Ty->getScalarType(), TruncBits,
299                            SrcEltBitWidth);
300   }
301 
302   return nullptr;
303 }
304 static Constant *rebuildSExtCst(const Constant *C, unsigned NumBits,
305                                 unsigned NumElts, unsigned SrcEltBitWidth) {
306   return rebuildExtCst(C, true, NumBits, NumElts, SrcEltBitWidth);
307 }
308 static Constant *rebuildZExtCst(const Constant *C, unsigned NumBits,
309                                 unsigned NumElts, unsigned SrcEltBitWidth) {
310   return rebuildExtCst(C, false, NumBits, NumElts, SrcEltBitWidth);
311 }
312 
313 bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,
314                                                      MachineBasicBlock &MBB,
315                                                      MachineInstr &MI) {
316   unsigned Opc = MI.getOpcode();
317   MachineConstantPool *CP = MI.getParent()->getParent()->getConstantPool();
318   bool HasSSE41 = ST->hasSSE41();
319   bool HasAVX2 = ST->hasAVX2();
320   bool HasDQI = ST->hasDQI();
321   bool HasBWI = ST->hasBWI();
322   bool HasVLX = ST->hasVLX();
323 
324   struct FixupEntry {
325     int Op;
326     int NumCstElts;
327     int MemBitWidth;
328     std::function<Constant *(const Constant *, unsigned, unsigned, unsigned)>
329         RebuildConstant;
330   };
331   auto FixupConstant = [&](ArrayRef<FixupEntry> Fixups, unsigned OperandNo) {
332 #ifdef EXPENSIVE_CHECKS
333     assert(llvm::is_sorted(Fixups,
334                            [](const FixupEntry &A, const FixupEntry &B) {
335                              return (A.NumCstElts * A.MemBitWidth) <
336                                     (B.NumCstElts * B.MemBitWidth);
337                            }) &&
338            "Constant fixup table not sorted in ascending constant size");
339 #endif
340     assert(MI.getNumOperands() >= (OperandNo + X86::AddrNumOperands) &&
341            "Unexpected number of operands!");
342     if (auto *C = X86::getConstantFromPool(MI, OperandNo)) {
343       unsigned RegBitWidth = C->getType()->getPrimitiveSizeInBits();
344       for (const FixupEntry &Fixup : Fixups) {
345         if (Fixup.Op) {
346           // Construct a suitable constant and adjust the MI to use the new
347           // constant pool entry.
348           if (Constant *NewCst = Fixup.RebuildConstant(
349                   C, RegBitWidth, Fixup.NumCstElts, Fixup.MemBitWidth)) {
350             unsigned NewCPI =
351                 CP->getConstantPoolIndex(NewCst, Align(Fixup.MemBitWidth / 8));
352             MI.setDesc(TII->get(Fixup.Op));
353             MI.getOperand(OperandNo + X86::AddrDisp).setIndex(NewCPI);
354             return true;
355           }
356         }
357       }
358     }
359     return false;
360   };
361 
362   // Attempt to detect a suitable vzload/broadcast/vextload from increasing
363   // constant bitwidths. Prefer vzload/broadcast/vextload for same bitwidth:
364   // - vzload shouldn't ever need a shuffle port to zero the upper elements and
365   // the fp/int domain versions are equally available so we don't introduce a
366   // domain crossing penalty.
367   // - broadcast sometimes need a shuffle port (especially for 8/16-bit
368   // variants), AVX1 only has fp domain broadcasts but AVX2+ have good fp/int
369   // domain equivalents.
370   // - vextload always needs a shuffle port and is only ever int domain.
371   switch (Opc) {
372   /* FP Loads */
373   case X86::MOVAPDrm:
374   case X86::MOVAPSrm:
375   case X86::MOVUPDrm:
376   case X86::MOVUPSrm:
377     // TODO: SSE3 MOVDDUP Handling
378     return FixupConstant({{X86::MOVSSrm, 1, 32, rebuildZeroUpperCst},
379                           {X86::MOVSDrm, 1, 64, rebuildZeroUpperCst}},
380                          1);
381   case X86::VMOVAPDrm:
382   case X86::VMOVAPSrm:
383   case X86::VMOVUPDrm:
384   case X86::VMOVUPSrm:
385     return FixupConstant({{X86::VMOVSSrm, 1, 32, rebuildZeroUpperCst},
386                           {X86::VBROADCASTSSrm, 1, 32, rebuildSplatCst},
387                           {X86::VMOVSDrm, 1, 64, rebuildZeroUpperCst},
388                           {X86::VMOVDDUPrm, 1, 64, rebuildSplatCst}},
389                          1);
390   case X86::VMOVAPDYrm:
391   case X86::VMOVAPSYrm:
392   case X86::VMOVUPDYrm:
393   case X86::VMOVUPSYrm:
394     return FixupConstant({{X86::VBROADCASTSSYrm, 1, 32, rebuildSplatCst},
395                           {X86::VBROADCASTSDYrm, 1, 64, rebuildSplatCst},
396                           {X86::VBROADCASTF128rm, 1, 128, rebuildSplatCst}},
397                          1);
398   case X86::VMOVAPDZ128rm:
399   case X86::VMOVAPSZ128rm:
400   case X86::VMOVUPDZ128rm:
401   case X86::VMOVUPSZ128rm:
402     return FixupConstant({{X86::VMOVSSZrm, 1, 32, rebuildZeroUpperCst},
403                           {X86::VBROADCASTSSZ128rm, 1, 32, rebuildSplatCst},
404                           {X86::VMOVSDZrm, 1, 64, rebuildZeroUpperCst},
405                           {X86::VMOVDDUPZ128rm, 1, 64, rebuildSplatCst}},
406                          1);
407   case X86::VMOVAPDZ256rm:
408   case X86::VMOVAPSZ256rm:
409   case X86::VMOVUPDZ256rm:
410   case X86::VMOVUPSZ256rm:
411     return FixupConstant(
412         {{X86::VBROADCASTSSZ256rm, 1, 32, rebuildSplatCst},
413          {X86::VBROADCASTSDZ256rm, 1, 64, rebuildSplatCst},
414          {X86::VBROADCASTF32X4Z256rm, 1, 128, rebuildSplatCst}},
415         1);
416   case X86::VMOVAPDZrm:
417   case X86::VMOVAPSZrm:
418   case X86::VMOVUPDZrm:
419   case X86::VMOVUPSZrm:
420     return FixupConstant({{X86::VBROADCASTSSZrm, 1, 32, rebuildSplatCst},
421                           {X86::VBROADCASTSDZrm, 1, 64, rebuildSplatCst},
422                           {X86::VBROADCASTF32X4rm, 1, 128, rebuildSplatCst},
423                           {X86::VBROADCASTF64X4rm, 1, 256, rebuildSplatCst}},
424                          1);
425     /* Integer Loads */
426   case X86::MOVDQArm:
427   case X86::MOVDQUrm: {
428     FixupEntry Fixups[] = {
429         {HasSSE41 ? X86::PMOVSXBQrm : 0, 2, 8, rebuildSExtCst},
430         {HasSSE41 ? X86::PMOVZXBQrm : 0, 2, 8, rebuildZExtCst},
431         {X86::MOVDI2PDIrm, 1, 32, rebuildZeroUpperCst},
432         {HasSSE41 ? X86::PMOVSXBDrm : 0, 4, 8, rebuildSExtCst},
433         {HasSSE41 ? X86::PMOVZXBDrm : 0, 4, 8, rebuildZExtCst},
434         {HasSSE41 ? X86::PMOVSXWQrm : 0, 2, 16, rebuildSExtCst},
435         {HasSSE41 ? X86::PMOVZXWQrm : 0, 2, 16, rebuildZExtCst},
436         {X86::MOVQI2PQIrm, 1, 64, rebuildZeroUpperCst},
437         {HasSSE41 ? X86::PMOVSXBWrm : 0, 8, 8, rebuildSExtCst},
438         {HasSSE41 ? X86::PMOVZXBWrm : 0, 8, 8, rebuildZExtCst},
439         {HasSSE41 ? X86::PMOVSXWDrm : 0, 4, 16, rebuildSExtCst},
440         {HasSSE41 ? X86::PMOVZXWDrm : 0, 4, 16, rebuildZExtCst},
441         {HasSSE41 ? X86::PMOVSXDQrm : 0, 2, 32, rebuildSExtCst},
442         {HasSSE41 ? X86::PMOVZXDQrm : 0, 2, 32, rebuildZExtCst}};
443     return FixupConstant(Fixups, 1);
444   }
445   case X86::VMOVDQArm:
446   case X86::VMOVDQUrm: {
447     FixupEntry Fixups[] = {
448         {HasAVX2 ? X86::VPBROADCASTBrm : 0, 1, 8, rebuildSplatCst},
449         {HasAVX2 ? X86::VPBROADCASTWrm : 0, 1, 16, rebuildSplatCst},
450         {X86::VPMOVSXBQrm, 2, 8, rebuildSExtCst},
451         {X86::VPMOVZXBQrm, 2, 8, rebuildZExtCst},
452         {X86::VMOVDI2PDIrm, 1, 32, rebuildZeroUpperCst},
453         {HasAVX2 ? X86::VPBROADCASTDrm : X86::VBROADCASTSSrm, 1, 32,
454          rebuildSplatCst},
455         {X86::VPMOVSXBDrm, 4, 8, rebuildSExtCst},
456         {X86::VPMOVZXBDrm, 4, 8, rebuildZExtCst},
457         {X86::VPMOVSXWQrm, 2, 16, rebuildSExtCst},
458         {X86::VPMOVZXWQrm, 2, 16, rebuildZExtCst},
459         {X86::VMOVQI2PQIrm, 1, 64, rebuildZeroUpperCst},
460         {HasAVX2 ? X86::VPBROADCASTQrm : X86::VMOVDDUPrm, 1, 64,
461          rebuildSplatCst},
462         {X86::VPMOVSXBWrm, 8, 8, rebuildSExtCst},
463         {X86::VPMOVZXBWrm, 8, 8, rebuildZExtCst},
464         {X86::VPMOVSXWDrm, 4, 16, rebuildSExtCst},
465         {X86::VPMOVZXWDrm, 4, 16, rebuildZExtCst},
466         {X86::VPMOVSXDQrm, 2, 32, rebuildSExtCst},
467         {X86::VPMOVZXDQrm, 2, 32, rebuildZExtCst}};
468     return FixupConstant(Fixups, 1);
469   }
470   case X86::VMOVDQAYrm:
471   case X86::VMOVDQUYrm: {
472     FixupEntry Fixups[] = {
473         {HasAVX2 ? X86::VPBROADCASTBYrm : 0, 1, 8, rebuildSplatCst},
474         {HasAVX2 ? X86::VPBROADCASTWYrm : 0, 1, 16, rebuildSplatCst},
475         {HasAVX2 ? X86::VPBROADCASTDYrm : X86::VBROADCASTSSYrm, 1, 32,
476          rebuildSplatCst},
477         {HasAVX2 ? X86::VPMOVSXBQYrm : 0, 4, 8, rebuildSExtCst},
478         {HasAVX2 ? X86::VPMOVZXBQYrm : 0, 4, 8, rebuildZExtCst},
479         {HasAVX2 ? X86::VPBROADCASTQYrm : X86::VBROADCASTSDYrm, 1, 64,
480          rebuildSplatCst},
481         {HasAVX2 ? X86::VPMOVSXBDYrm : 0, 8, 8, rebuildSExtCst},
482         {HasAVX2 ? X86::VPMOVZXBDYrm : 0, 8, 8, rebuildZExtCst},
483         {HasAVX2 ? X86::VPMOVSXWQYrm : 0, 4, 16, rebuildSExtCst},
484         {HasAVX2 ? X86::VPMOVZXWQYrm : 0, 4, 16, rebuildZExtCst},
485         {HasAVX2 ? X86::VBROADCASTI128rm : X86::VBROADCASTF128rm, 1, 128,
486          rebuildSplatCst},
487         {HasAVX2 ? X86::VPMOVSXBWYrm : 0, 16, 8, rebuildSExtCst},
488         {HasAVX2 ? X86::VPMOVZXBWYrm : 0, 16, 8, rebuildZExtCst},
489         {HasAVX2 ? X86::VPMOVSXWDYrm : 0, 8, 16, rebuildSExtCst},
490         {HasAVX2 ? X86::VPMOVZXWDYrm : 0, 8, 16, rebuildZExtCst},
491         {HasAVX2 ? X86::VPMOVSXDQYrm : 0, 4, 32, rebuildSExtCst},
492         {HasAVX2 ? X86::VPMOVZXDQYrm : 0, 4, 32, rebuildZExtCst}};
493     return FixupConstant(Fixups, 1);
494   }
495   case X86::VMOVDQA32Z128rm:
496   case X86::VMOVDQA64Z128rm:
497   case X86::VMOVDQU32Z128rm:
498   case X86::VMOVDQU64Z128rm: {
499     FixupEntry Fixups[] = {
500         {HasBWI ? X86::VPBROADCASTBZ128rm : 0, 1, 8, rebuildSplatCst},
501         {HasBWI ? X86::VPBROADCASTWZ128rm : 0, 1, 16, rebuildSplatCst},
502         {X86::VPMOVSXBQZ128rm, 2, 8, rebuildSExtCst},
503         {X86::VPMOVZXBQZ128rm, 2, 8, rebuildZExtCst},
504         {X86::VMOVDI2PDIZrm, 1, 32, rebuildZeroUpperCst},
505         {X86::VPBROADCASTDZ128rm, 1, 32, rebuildSplatCst},
506         {X86::VPMOVSXBDZ128rm, 4, 8, rebuildSExtCst},
507         {X86::VPMOVZXBDZ128rm, 4, 8, rebuildZExtCst},
508         {X86::VPMOVSXWQZ128rm, 2, 16, rebuildSExtCst},
509         {X86::VPMOVZXWQZ128rm, 2, 16, rebuildZExtCst},
510         {X86::VMOVQI2PQIZrm, 1, 64, rebuildZeroUpperCst},
511         {X86::VPBROADCASTQZ128rm, 1, 64, rebuildSplatCst},
512         {HasBWI ? X86::VPMOVSXBWZ128rm : 0, 8, 8, rebuildSExtCst},
513         {HasBWI ? X86::VPMOVZXBWZ128rm : 0, 8, 8, rebuildZExtCst},
514         {X86::VPMOVSXWDZ128rm, 4, 16, rebuildSExtCst},
515         {X86::VPMOVZXWDZ128rm, 4, 16, rebuildZExtCst},
516         {X86::VPMOVSXDQZ128rm, 2, 32, rebuildSExtCst},
517         {X86::VPMOVZXDQZ128rm, 2, 32, rebuildZExtCst}};
518     return FixupConstant(Fixups, 1);
519   }
520   case X86::VMOVDQA32Z256rm:
521   case X86::VMOVDQA64Z256rm:
522   case X86::VMOVDQU32Z256rm:
523   case X86::VMOVDQU64Z256rm: {
524     FixupEntry Fixups[] = {
525         {HasBWI ? X86::VPBROADCASTBZ256rm : 0, 1, 8, rebuildSplatCst},
526         {HasBWI ? X86::VPBROADCASTWZ256rm : 0, 1, 16, rebuildSplatCst},
527         {X86::VPBROADCASTDZ256rm, 1, 32, rebuildSplatCst},
528         {X86::VPMOVSXBQZ256rm, 4, 8, rebuildSExtCst},
529         {X86::VPMOVZXBQZ256rm, 4, 8, rebuildZExtCst},
530         {X86::VPBROADCASTQZ256rm, 1, 64, rebuildSplatCst},
531         {X86::VPMOVSXBDZ256rm, 8, 8, rebuildSExtCst},
532         {X86::VPMOVZXBDZ256rm, 8, 8, rebuildZExtCst},
533         {X86::VPMOVSXWQZ256rm, 4, 16, rebuildSExtCst},
534         {X86::VPMOVZXWQZ256rm, 4, 16, rebuildZExtCst},
535         {X86::VBROADCASTI32X4Z256rm, 1, 128, rebuildSplatCst},
536         {HasBWI ? X86::VPMOVSXBWZ256rm : 0, 16, 8, rebuildSExtCst},
537         {HasBWI ? X86::VPMOVZXBWZ256rm : 0, 16, 8, rebuildZExtCst},
538         {X86::VPMOVSXWDZ256rm, 8, 16, rebuildSExtCst},
539         {X86::VPMOVZXWDZ256rm, 8, 16, rebuildZExtCst},
540         {X86::VPMOVSXDQZ256rm, 4, 32, rebuildSExtCst},
541         {X86::VPMOVZXDQZ256rm, 4, 32, rebuildZExtCst}};
542     return FixupConstant(Fixups, 1);
543   }
544   case X86::VMOVDQA32Zrm:
545   case X86::VMOVDQA64Zrm:
546   case X86::VMOVDQU32Zrm:
547   case X86::VMOVDQU64Zrm: {
548     FixupEntry Fixups[] = {
549         {HasBWI ? X86::VPBROADCASTBZrm : 0, 1, 8, rebuildSplatCst},
550         {HasBWI ? X86::VPBROADCASTWZrm : 0, 1, 16, rebuildSplatCst},
551         {X86::VPBROADCASTDZrm, 1, 32, rebuildSplatCst},
552         {X86::VPBROADCASTQZrm, 1, 64, rebuildSplatCst},
553         {X86::VPMOVSXBQZrm, 8, 8, rebuildSExtCst},
554         {X86::VPMOVZXBQZrm, 8, 8, rebuildZExtCst},
555         {X86::VBROADCASTI32X4rm, 1, 128, rebuildSplatCst},
556         {X86::VPMOVSXBDZrm, 16, 8, rebuildSExtCst},
557         {X86::VPMOVZXBDZrm, 16, 8, rebuildZExtCst},
558         {X86::VPMOVSXWQZrm, 8, 16, rebuildSExtCst},
559         {X86::VPMOVZXWQZrm, 8, 16, rebuildZExtCst},
560         {X86::VBROADCASTI64X4rm, 1, 256, rebuildSplatCst},
561         {HasBWI ? X86::VPMOVSXBWZrm : 0, 32, 8, rebuildSExtCst},
562         {HasBWI ? X86::VPMOVZXBWZrm : 0, 32, 8, rebuildZExtCst},
563         {X86::VPMOVSXWDZrm, 16, 16, rebuildSExtCst},
564         {X86::VPMOVZXWDZrm, 16, 16, rebuildZExtCst},
565         {X86::VPMOVSXDQZrm, 8, 32, rebuildSExtCst},
566         {X86::VPMOVZXDQZrm, 8, 32, rebuildZExtCst}};
567     return FixupConstant(Fixups, 1);
568   }
569   }
570 
571   auto ConvertToBroadcastAVX512 = [&](unsigned OpSrc32, unsigned OpSrc64) {
572     unsigned OpBcst32 = 0, OpBcst64 = 0;
573     unsigned OpNoBcst32 = 0, OpNoBcst64 = 0;
574     if (OpSrc32) {
575       if (const X86FoldTableEntry *Mem2Bcst =
576               llvm::lookupBroadcastFoldTableBySize(OpSrc32, 32)) {
577         OpBcst32 = Mem2Bcst->DstOp;
578         OpNoBcst32 = Mem2Bcst->Flags & TB_INDEX_MASK;
579       }
580     }
581     if (OpSrc64) {
582       if (const X86FoldTableEntry *Mem2Bcst =
583               llvm::lookupBroadcastFoldTableBySize(OpSrc64, 64)) {
584         OpBcst64 = Mem2Bcst->DstOp;
585         OpNoBcst64 = Mem2Bcst->Flags & TB_INDEX_MASK;
586       }
587     }
588     assert(((OpBcst32 == 0) || (OpBcst64 == 0) || (OpNoBcst32 == OpNoBcst64)) &&
589            "OperandNo mismatch");
590 
591     if (OpBcst32 || OpBcst64) {
592       unsigned OpNo = OpBcst32 == 0 ? OpNoBcst64 : OpNoBcst32;
593       FixupEntry Fixups[] = {{(int)OpBcst32, 32, 32, rebuildSplatCst},
594                              {(int)OpBcst64, 64, 64, rebuildSplatCst}};
595       return FixupConstant(Fixups, OpNo);
596     }
597     return false;
598   };
599 
600   // Attempt to find a AVX512 mapping from a full width memory-fold instruction
601   // to a broadcast-fold instruction variant.
602   if ((MI.getDesc().TSFlags & X86II::EncodingMask) == X86II::EVEX)
603     return ConvertToBroadcastAVX512(Opc, Opc);
604 
605   // Reverse the X86InstrInfo::setExecutionDomainCustom EVEX->VEX logic
606   // conversion to see if we can convert to a broadcasted (integer) logic op.
607   if (HasVLX && !HasDQI) {
608     unsigned OpSrc32 = 0, OpSrc64 = 0;
609     switch (Opc) {
610     case X86::VANDPDrm:
611     case X86::VANDPSrm:
612     case X86::VPANDrm:
613       OpSrc32 = X86 ::VPANDDZ128rm;
614       OpSrc64 = X86 ::VPANDQZ128rm;
615       break;
616     case X86::VANDPDYrm:
617     case X86::VANDPSYrm:
618     case X86::VPANDYrm:
619       OpSrc32 = X86 ::VPANDDZ256rm;
620       OpSrc64 = X86 ::VPANDQZ256rm;
621       break;
622     case X86::VANDNPDrm:
623     case X86::VANDNPSrm:
624     case X86::VPANDNrm:
625       OpSrc32 = X86 ::VPANDNDZ128rm;
626       OpSrc64 = X86 ::VPANDNQZ128rm;
627       break;
628     case X86::VANDNPDYrm:
629     case X86::VANDNPSYrm:
630     case X86::VPANDNYrm:
631       OpSrc32 = X86 ::VPANDNDZ256rm;
632       OpSrc64 = X86 ::VPANDNQZ256rm;
633       break;
634     case X86::VORPDrm:
635     case X86::VORPSrm:
636     case X86::VPORrm:
637       OpSrc32 = X86 ::VPORDZ128rm;
638       OpSrc64 = X86 ::VPORQZ128rm;
639       break;
640     case X86::VORPDYrm:
641     case X86::VORPSYrm:
642     case X86::VPORYrm:
643       OpSrc32 = X86 ::VPORDZ256rm;
644       OpSrc64 = X86 ::VPORQZ256rm;
645       break;
646     case X86::VXORPDrm:
647     case X86::VXORPSrm:
648     case X86::VPXORrm:
649       OpSrc32 = X86 ::VPXORDZ128rm;
650       OpSrc64 = X86 ::VPXORQZ128rm;
651       break;
652     case X86::VXORPDYrm:
653     case X86::VXORPSYrm:
654     case X86::VPXORYrm:
655       OpSrc32 = X86 ::VPXORDZ256rm;
656       OpSrc64 = X86 ::VPXORQZ256rm;
657       break;
658     }
659     if (OpSrc32 || OpSrc64)
660       return ConvertToBroadcastAVX512(OpSrc32, OpSrc64);
661   }
662 
663   return false;
664 }
665 
666 bool X86FixupVectorConstantsPass::runOnMachineFunction(MachineFunction &MF) {
667   LLVM_DEBUG(dbgs() << "Start X86FixupVectorConstants\n";);
668   bool Changed = false;
669   ST = &MF.getSubtarget<X86Subtarget>();
670   TII = ST->getInstrInfo();
671   SM = &ST->getSchedModel();
672 
673   for (MachineBasicBlock &MBB : MF) {
674     for (MachineInstr &MI : MBB) {
675       if (processInstruction(MF, MBB, MI)) {
676         ++NumInstChanges;
677         Changed = true;
678       }
679     }
680   }
681   LLVM_DEBUG(dbgs() << "End X86FixupVectorConstants\n";);
682   return Changed;
683 }
684