xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 430de48a61369ea41efe1cbb98654a86568ae95f)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/CodeGen/CodeGenCommonISel.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/MachineSizeOpts.h"
27 #include "llvm/CodeGen/RegisterBankInfo.h"
28 #include "llvm/CodeGen/StackProtector.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Transforms/Utils/SizeOpts.h"
36 #include <numeric>
37 #include <optional>
38 
39 #define DEBUG_TYPE "globalisel-utils"
40 
41 using namespace llvm;
42 using namespace MIPatternMatch;
43 
44 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
45                                    const TargetInstrInfo &TII,
46                                    const RegisterBankInfo &RBI, Register Reg,
47                                    const TargetRegisterClass &RegClass) {
48   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
49     return MRI.createVirtualRegister(&RegClass);
50 
51   return Reg;
52 }
53 
54 Register llvm::constrainOperandRegClass(
55     const MachineFunction &MF, const TargetRegisterInfo &TRI,
56     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
57     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
58     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
59   Register Reg = RegMO.getReg();
60   // Assume physical registers are properly constrained.
61   assert(Reg.isVirtual() && "PhysReg not implemented");
62 
63   // Save the old register class to check whether
64   // the change notifications will be required.
65   // TODO: A better approach would be to pass
66   // the observers to constrainRegToClass().
67   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
68   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
69   // If we created a new virtual register because the class is not compatible
70   // then create a copy between the new and the old register.
71   if (ConstrainedReg != Reg) {
72     MachineBasicBlock::iterator InsertIt(&InsertPt);
73     MachineBasicBlock &MBB = *InsertPt.getParent();
74     // FIXME: The copy needs to have the classes constrained for its operands.
75     // Use operand's regbank to get the class for old register (Reg).
76     if (RegMO.isUse()) {
77       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
78               TII.get(TargetOpcode::COPY), ConstrainedReg)
79           .addReg(Reg);
80     } else {
81       assert(RegMO.isDef() && "Must be a definition");
82       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
83               TII.get(TargetOpcode::COPY), Reg)
84           .addReg(ConstrainedReg);
85     }
86     if (GISelChangeObserver *Observer = MF.getObserver()) {
87       Observer->changingInstr(*RegMO.getParent());
88     }
89     RegMO.setReg(ConstrainedReg);
90     if (GISelChangeObserver *Observer = MF.getObserver()) {
91       Observer->changedInstr(*RegMO.getParent());
92     }
93   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
94     if (GISelChangeObserver *Observer = MF.getObserver()) {
95       if (!RegMO.isDef()) {
96         MachineInstr *RegDef = MRI.getVRegDef(Reg);
97         Observer->changedInstr(*RegDef);
98       }
99       Observer->changingAllUsesOfReg(MRI, Reg);
100       Observer->finishedChangingAllUsesOfReg();
101     }
102   }
103   return ConstrainedReg;
104 }
105 
106 Register llvm::constrainOperandRegClass(
107     const MachineFunction &MF, const TargetRegisterInfo &TRI,
108     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
109     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
110     MachineOperand &RegMO, unsigned OpIdx) {
111   Register Reg = RegMO.getReg();
112   // Assume physical registers are properly constrained.
113   assert(Reg.isVirtual() && "PhysReg not implemented");
114 
115   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
116   // Some of the target independent instructions, like COPY, may not impose any
117   // register class constraints on some of their operands: If it's a use, we can
118   // skip constraining as the instruction defining the register would constrain
119   // it.
120 
121   if (OpRC) {
122     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
123     // can have multiple regbanks for a superclass that combine different
124     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
125     // resolved by targets during regbankselect should not be overridden.
126     if (const auto *SubRC = TRI.getCommonSubClass(
127             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
128       OpRC = SubRC;
129 
130     OpRC = TRI.getAllocatableClass(OpRC);
131   }
132 
133   if (!OpRC) {
134     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
135            "Register class constraint is required unless either the "
136            "instruction is target independent or the operand is a use");
137     // FIXME: Just bailing out like this here could be not enough, unless we
138     // expect the users of this function to do the right thing for PHIs and
139     // COPY:
140     //   v1 = COPY v0
141     //   v2 = COPY v1
142     // v1 here may end up not being constrained at all. Please notice that to
143     // reproduce the issue we likely need a destination pattern of a selection
144     // rule producing such extra copies, not just an input GMIR with them as
145     // every existing target using selectImpl handles copies before calling it
146     // and they never reach this function.
147     return Reg;
148   }
149   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
150                                   RegMO);
151 }
152 
153 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
154                                             const TargetInstrInfo &TII,
155                                             const TargetRegisterInfo &TRI,
156                                             const RegisterBankInfo &RBI) {
157   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
158          "A selected instruction is expected");
159   MachineBasicBlock &MBB = *I.getParent();
160   MachineFunction &MF = *MBB.getParent();
161   MachineRegisterInfo &MRI = MF.getRegInfo();
162 
163   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
164     MachineOperand &MO = I.getOperand(OpI);
165 
166     // There's nothing to be done on non-register operands.
167     if (!MO.isReg())
168       continue;
169 
170     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
171     assert(MO.isReg() && "Unsupported non-reg operand");
172 
173     Register Reg = MO.getReg();
174     // Physical registers don't need to be constrained.
175     if (Reg.isPhysical())
176       continue;
177 
178     // Register operands with a value of 0 (e.g. predicate operands) don't need
179     // to be constrained.
180     if (Reg == 0)
181       continue;
182 
183     // If the operand is a vreg, we should constrain its regclass, and only
184     // insert COPYs if that's impossible.
185     // constrainOperandRegClass does that for us.
186     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
187 
188     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
189     // done.
190     if (MO.isUse()) {
191       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
192       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
193         I.tieOperands(DefIdx, OpI);
194     }
195   }
196   return true;
197 }
198 
199 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
200                          MachineRegisterInfo &MRI) {
201   // Give up if either DstReg or SrcReg  is a physical register.
202   if (DstReg.isPhysical() || SrcReg.isPhysical())
203     return false;
204   // Give up if the types don't match.
205   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
206     return false;
207   // Replace if either DstReg has no constraints or the register
208   // constraints match.
209   const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
210   if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
211     return true;
212 
213   // Otherwise match if the Src is already a regclass that is covered by the Dst
214   // RegBank.
215   return DstRBC.is<const RegisterBank *>() && MRI.getRegClassOrNull(SrcReg) &&
216          DstRBC.get<const RegisterBank *>()->covers(
217              *MRI.getRegClassOrNull(SrcReg));
218 }
219 
220 bool llvm::isTriviallyDead(const MachineInstr &MI,
221                            const MachineRegisterInfo &MRI) {
222   // FIXME: This logical is mostly duplicated with
223   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
224   // MachineInstr::isLabel?
225 
226   // Don't delete frame allocation labels.
227   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
228     return false;
229   // LIFETIME markers should be preserved even if they seem dead.
230   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
231       MI.getOpcode() == TargetOpcode::LIFETIME_END)
232     return false;
233 
234   // If we can move an instruction, we can remove it.  Otherwise, it has
235   // a side-effect of some sort.
236   bool SawStore = false;
237   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
238     return false;
239 
240   // Instructions without side-effects are dead iff they only define dead vregs.
241   for (const auto &MO : MI.all_defs()) {
242     Register Reg = MO.getReg();
243     if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
244       return false;
245   }
246   return true;
247 }
248 
249 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
250                                   MachineFunction &MF,
251                                   const TargetPassConfig &TPC,
252                                   MachineOptimizationRemarkEmitter &MORE,
253                                   MachineOptimizationRemarkMissed &R) {
254   bool IsFatal = Severity == DS_Error &&
255                  TPC.isGlobalISelAbortEnabled();
256   // Print the function name explicitly if we don't have a debug location (which
257   // makes the diagnostic less useful) or if we're going to emit a raw error.
258   if (!R.getLocation().isValid() || IsFatal)
259     R << (" (in function: " + MF.getName() + ")").str();
260 
261   if (IsFatal)
262     report_fatal_error(Twine(R.getMsg()));
263   else
264     MORE.emit(R);
265 }
266 
267 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
268                               MachineOptimizationRemarkEmitter &MORE,
269                               MachineOptimizationRemarkMissed &R) {
270   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
271 }
272 
273 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
274                               MachineOptimizationRemarkEmitter &MORE,
275                               MachineOptimizationRemarkMissed &R) {
276   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
277   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
278 }
279 
280 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
281                               MachineOptimizationRemarkEmitter &MORE,
282                               const char *PassName, StringRef Msg,
283                               const MachineInstr &MI) {
284   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
285                                     MI.getDebugLoc(), MI.getParent());
286   R << Msg;
287   // Printing MI is expensive;  only do it if expensive remarks are enabled.
288   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
289     R << ": " << ore::MNV("Inst", MI);
290   reportGISelFailure(MF, TPC, MORE, R);
291 }
292 
293 std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
294                                                const MachineRegisterInfo &MRI) {
295   std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
296       VReg, MRI, /*LookThroughInstrs*/ false);
297   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
298          "Value found while looking through instrs");
299   if (!ValAndVReg)
300     return std::nullopt;
301   return ValAndVReg->Value;
302 }
303 
304 std::optional<int64_t>
305 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
306   std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
307   if (Val && Val->getBitWidth() <= 64)
308     return Val->getSExtValue();
309   return std::nullopt;
310 }
311 
312 namespace {
313 
314 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
315 typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
316 
317 std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
318     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
319     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
320     bool LookThroughAnyExt = false) {
321   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
322   MachineInstr *MI;
323 
324   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
325          LookThroughInstrs) {
326     switch (MI->getOpcode()) {
327     case TargetOpcode::G_ANYEXT:
328       if (!LookThroughAnyExt)
329         return std::nullopt;
330       [[fallthrough]];
331     case TargetOpcode::G_TRUNC:
332     case TargetOpcode::G_SEXT:
333     case TargetOpcode::G_ZEXT:
334       SeenOpcodes.push_back(std::make_pair(
335           MI->getOpcode(),
336           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
337       VReg = MI->getOperand(1).getReg();
338       break;
339     case TargetOpcode::COPY:
340       VReg = MI->getOperand(1).getReg();
341       if (VReg.isPhysical())
342         return std::nullopt;
343       break;
344     case TargetOpcode::G_INTTOPTR:
345       VReg = MI->getOperand(1).getReg();
346       break;
347     default:
348       return std::nullopt;
349     }
350   }
351   if (!MI || !IsConstantOpcode(MI))
352     return std::nullopt;
353 
354   std::optional<APInt> MaybeVal = getAPCstValue(MI);
355   if (!MaybeVal)
356     return std::nullopt;
357   APInt &Val = *MaybeVal;
358   for (auto [Opcode, Size] : reverse(SeenOpcodes)) {
359     switch (Opcode) {
360     case TargetOpcode::G_TRUNC:
361       Val = Val.trunc(Size);
362       break;
363     case TargetOpcode::G_ANYEXT:
364     case TargetOpcode::G_SEXT:
365       Val = Val.sext(Size);
366       break;
367     case TargetOpcode::G_ZEXT:
368       Val = Val.zext(Size);
369       break;
370     }
371   }
372 
373   return ValueAndVReg{Val, VReg};
374 }
375 
376 bool isIConstant(const MachineInstr *MI) {
377   if (!MI)
378     return false;
379   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
380 }
381 
382 bool isFConstant(const MachineInstr *MI) {
383   if (!MI)
384     return false;
385   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
386 }
387 
388 bool isAnyConstant(const MachineInstr *MI) {
389   if (!MI)
390     return false;
391   unsigned Opc = MI->getOpcode();
392   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
393 }
394 
395 std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
396   const MachineOperand &CstVal = MI->getOperand(1);
397   if (CstVal.isCImm())
398     return CstVal.getCImm()->getValue();
399   return std::nullopt;
400 }
401 
402 std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
403   const MachineOperand &CstVal = MI->getOperand(1);
404   if (CstVal.isCImm())
405     return CstVal.getCImm()->getValue();
406   if (CstVal.isFPImm())
407     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
408   return std::nullopt;
409 }
410 
411 } // end anonymous namespace
412 
413 std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
414     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
415   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
416                                            getCImmAsAPInt, LookThroughInstrs);
417 }
418 
419 std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
420     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
421     bool LookThroughAnyExt) {
422   return getConstantVRegValWithLookThrough(
423       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
424       LookThroughAnyExt);
425 }
426 
427 std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
428     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
429   auto Reg = getConstantVRegValWithLookThrough(
430       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
431   if (!Reg)
432     return std::nullopt;
433   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
434                         Reg->VReg};
435 }
436 
437 const ConstantFP *
438 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
439   MachineInstr *MI = MRI.getVRegDef(VReg);
440   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
441     return nullptr;
442   return MI->getOperand(1).getFPImm();
443 }
444 
445 std::optional<DefinitionAndSourceRegister>
446 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
447   Register DefSrcReg = Reg;
448   auto *DefMI = MRI.getVRegDef(Reg);
449   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
450   if (!DstTy.isValid())
451     return std::nullopt;
452   unsigned Opc = DefMI->getOpcode();
453   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
454     Register SrcReg = DefMI->getOperand(1).getReg();
455     auto SrcTy = MRI.getType(SrcReg);
456     if (!SrcTy.isValid())
457       break;
458     DefMI = MRI.getVRegDef(SrcReg);
459     DefSrcReg = SrcReg;
460     Opc = DefMI->getOpcode();
461   }
462   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
463 }
464 
465 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
466                                          const MachineRegisterInfo &MRI) {
467   std::optional<DefinitionAndSourceRegister> DefSrcReg =
468       getDefSrcRegIgnoringCopies(Reg, MRI);
469   return DefSrcReg ? DefSrcReg->MI : nullptr;
470 }
471 
472 Register llvm::getSrcRegIgnoringCopies(Register Reg,
473                                        const MachineRegisterInfo &MRI) {
474   std::optional<DefinitionAndSourceRegister> DefSrcReg =
475       getDefSrcRegIgnoringCopies(Reg, MRI);
476   return DefSrcReg ? DefSrcReg->Reg : Register();
477 }
478 
479 void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
480                         SmallVectorImpl<Register> &VRegs,
481                         MachineIRBuilder &MIRBuilder,
482                         MachineRegisterInfo &MRI) {
483   for (int i = 0; i < NumParts; ++i)
484     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
485   MIRBuilder.buildUnmerge(VRegs, Reg);
486 }
487 
488 bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
489                         SmallVectorImpl<Register> &VRegs,
490                         SmallVectorImpl<Register> &LeftoverRegs,
491                         MachineIRBuilder &MIRBuilder,
492                         MachineRegisterInfo &MRI) {
493   assert(!LeftoverTy.isValid() && "this is an out argument");
494 
495   unsigned RegSize = RegTy.getSizeInBits();
496   unsigned MainSize = MainTy.getSizeInBits();
497   unsigned NumParts = RegSize / MainSize;
498   unsigned LeftoverSize = RegSize - NumParts * MainSize;
499 
500   // Use an unmerge when possible.
501   if (LeftoverSize == 0) {
502     for (unsigned I = 0; I < NumParts; ++I)
503       VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
504     MIRBuilder.buildUnmerge(VRegs, Reg);
505     return true;
506   }
507 
508   // Try to use unmerge for irregular vector split where possible
509   // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
510   // leftover, it becomes:
511   //  <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
512   //  <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
513   if (RegTy.isVector() && MainTy.isVector()) {
514     unsigned RegNumElts = RegTy.getNumElements();
515     unsigned MainNumElts = MainTy.getNumElements();
516     unsigned LeftoverNumElts = RegNumElts % MainNumElts;
517     // If can unmerge to LeftoverTy, do it
518     if (MainNumElts % LeftoverNumElts == 0 &&
519         RegNumElts % LeftoverNumElts == 0 &&
520         RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
521         LeftoverNumElts > 1) {
522       LeftoverTy =
523           LLT::fixed_vector(LeftoverNumElts, RegTy.getScalarSizeInBits());
524 
525       // Unmerge the SrcReg to LeftoverTy vectors
526       SmallVector<Register, 4> UnmergeValues;
527       extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
528                    MIRBuilder, MRI);
529 
530       // Find how many LeftoverTy makes one MainTy
531       unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
532       unsigned NumOfLeftoverVal =
533           ((RegNumElts % MainNumElts) / LeftoverNumElts);
534 
535       // Create as many MainTy as possible using unmerged value
536       SmallVector<Register, 4> MergeValues;
537       for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
538         MergeValues.push_back(UnmergeValues[I]);
539         if (MergeValues.size() == LeftoverPerMain) {
540           VRegs.push_back(
541               MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
542           MergeValues.clear();
543         }
544       }
545       // Populate LeftoverRegs with the leftovers
546       for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
547            I < UnmergeValues.size(); I++) {
548         LeftoverRegs.push_back(UnmergeValues[I]);
549       }
550       return true;
551     }
552   }
553   // Perform irregular split. Leftover is last element of RegPieces.
554   if (MainTy.isVector()) {
555     SmallVector<Register, 8> RegPieces;
556     extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
557                        MRI);
558     for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
559       VRegs.push_back(RegPieces[i]);
560     LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
561     LeftoverTy = MRI.getType(LeftoverRegs[0]);
562     return true;
563   }
564 
565   LeftoverTy = LLT::scalar(LeftoverSize);
566   // For irregular sizes, extract the individual parts.
567   for (unsigned I = 0; I != NumParts; ++I) {
568     Register NewReg = MRI.createGenericVirtualRegister(MainTy);
569     VRegs.push_back(NewReg);
570     MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
571   }
572 
573   for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
574        Offset += LeftoverSize) {
575     Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
576     LeftoverRegs.push_back(NewReg);
577     MIRBuilder.buildExtract(NewReg, Reg, Offset);
578   }
579 
580   return true;
581 }
582 
583 void llvm::extractVectorParts(Register Reg, unsigned NumElts,
584                               SmallVectorImpl<Register> &VRegs,
585                               MachineIRBuilder &MIRBuilder,
586                               MachineRegisterInfo &MRI) {
587   LLT RegTy = MRI.getType(Reg);
588   assert(RegTy.isVector() && "Expected a vector type");
589 
590   LLT EltTy = RegTy.getElementType();
591   LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
592   unsigned RegNumElts = RegTy.getNumElements();
593   unsigned LeftoverNumElts = RegNumElts % NumElts;
594   unsigned NumNarrowTyPieces = RegNumElts / NumElts;
595 
596   // Perfect split without leftover
597   if (LeftoverNumElts == 0)
598     return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
599                         MRI);
600 
601   // Irregular split. Provide direct access to all elements for artifact
602   // combiner using unmerge to elements. Then build vectors with NumElts
603   // elements. Remaining element(s) will be (used to build vector) Leftover.
604   SmallVector<Register, 8> Elts;
605   extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
606 
607   unsigned Offset = 0;
608   // Requested sub-vectors of NarrowTy.
609   for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
610     ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
611     VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
612   }
613 
614   // Leftover element(s).
615   if (LeftoverNumElts == 1) {
616     VRegs.push_back(Elts[Offset]);
617   } else {
618     LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
619     ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
620     VRegs.push_back(
621         MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
622   }
623 }
624 
625 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
626                                  const MachineRegisterInfo &MRI) {
627   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
628   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
629 }
630 
631 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
632   if (Size == 32)
633     return APFloat(float(Val));
634   if (Size == 64)
635     return APFloat(Val);
636   if (Size != 16)
637     llvm_unreachable("Unsupported FPConstant size");
638   bool Ignored;
639   APFloat APF(Val);
640   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
641   return APF;
642 }
643 
644 std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
645                                              const Register Op1,
646                                              const Register Op2,
647                                              const MachineRegisterInfo &MRI) {
648   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
649   if (!MaybeOp2Cst)
650     return std::nullopt;
651 
652   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
653   if (!MaybeOp1Cst)
654     return std::nullopt;
655 
656   const APInt &C1 = MaybeOp1Cst->Value;
657   const APInt &C2 = MaybeOp2Cst->Value;
658   switch (Opcode) {
659   default:
660     break;
661   case TargetOpcode::G_ADD:
662     return C1 + C2;
663   case TargetOpcode::G_PTR_ADD:
664     // Types can be of different width here.
665     // Result needs to be the same width as C1, so trunc or sext C2.
666     return C1 + C2.sextOrTrunc(C1.getBitWidth());
667   case TargetOpcode::G_AND:
668     return C1 & C2;
669   case TargetOpcode::G_ASHR:
670     return C1.ashr(C2);
671   case TargetOpcode::G_LSHR:
672     return C1.lshr(C2);
673   case TargetOpcode::G_MUL:
674     return C1 * C2;
675   case TargetOpcode::G_OR:
676     return C1 | C2;
677   case TargetOpcode::G_SHL:
678     return C1 << C2;
679   case TargetOpcode::G_SUB:
680     return C1 - C2;
681   case TargetOpcode::G_XOR:
682     return C1 ^ C2;
683   case TargetOpcode::G_UDIV:
684     if (!C2.getBoolValue())
685       break;
686     return C1.udiv(C2);
687   case TargetOpcode::G_SDIV:
688     if (!C2.getBoolValue())
689       break;
690     return C1.sdiv(C2);
691   case TargetOpcode::G_UREM:
692     if (!C2.getBoolValue())
693       break;
694     return C1.urem(C2);
695   case TargetOpcode::G_SREM:
696     if (!C2.getBoolValue())
697       break;
698     return C1.srem(C2);
699   case TargetOpcode::G_SMIN:
700     return APIntOps::smin(C1, C2);
701   case TargetOpcode::G_SMAX:
702     return APIntOps::smax(C1, C2);
703   case TargetOpcode::G_UMIN:
704     return APIntOps::umin(C1, C2);
705   case TargetOpcode::G_UMAX:
706     return APIntOps::umax(C1, C2);
707   }
708 
709   return std::nullopt;
710 }
711 
712 std::optional<APFloat>
713 llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
714                           const Register Op2, const MachineRegisterInfo &MRI) {
715   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
716   if (!Op2Cst)
717     return std::nullopt;
718 
719   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
720   if (!Op1Cst)
721     return std::nullopt;
722 
723   APFloat C1 = Op1Cst->getValueAPF();
724   const APFloat &C2 = Op2Cst->getValueAPF();
725   switch (Opcode) {
726   case TargetOpcode::G_FADD:
727     C1.add(C2, APFloat::rmNearestTiesToEven);
728     return C1;
729   case TargetOpcode::G_FSUB:
730     C1.subtract(C2, APFloat::rmNearestTiesToEven);
731     return C1;
732   case TargetOpcode::G_FMUL:
733     C1.multiply(C2, APFloat::rmNearestTiesToEven);
734     return C1;
735   case TargetOpcode::G_FDIV:
736     C1.divide(C2, APFloat::rmNearestTiesToEven);
737     return C1;
738   case TargetOpcode::G_FREM:
739     C1.mod(C2);
740     return C1;
741   case TargetOpcode::G_FCOPYSIGN:
742     C1.copySign(C2);
743     return C1;
744   case TargetOpcode::G_FMINNUM:
745     return minnum(C1, C2);
746   case TargetOpcode::G_FMAXNUM:
747     return maxnum(C1, C2);
748   case TargetOpcode::G_FMINIMUM:
749     return minimum(C1, C2);
750   case TargetOpcode::G_FMAXIMUM:
751     return maximum(C1, C2);
752   case TargetOpcode::G_FMINNUM_IEEE:
753   case TargetOpcode::G_FMAXNUM_IEEE:
754     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
755     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
756     // and currently there isn't a nice wrapper in APFloat for the version with
757     // correct snan handling.
758     break;
759   default:
760     break;
761   }
762 
763   return std::nullopt;
764 }
765 
766 SmallVector<APInt>
767 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
768                               const Register Op2,
769                               const MachineRegisterInfo &MRI) {
770   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
771   if (!SrcVec2)
772     return SmallVector<APInt>();
773 
774   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
775   if (!SrcVec1)
776     return SmallVector<APInt>();
777 
778   SmallVector<APInt> FoldedElements;
779   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
780     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
781                                       SrcVec2->getSourceReg(Idx), MRI);
782     if (!MaybeCst)
783       return SmallVector<APInt>();
784     FoldedElements.push_back(*MaybeCst);
785   }
786   return FoldedElements;
787 }
788 
789 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
790                            bool SNaN) {
791   const MachineInstr *DefMI = MRI.getVRegDef(Val);
792   if (!DefMI)
793     return false;
794 
795   const TargetMachine& TM = DefMI->getMF()->getTarget();
796   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
797     return true;
798 
799   // If the value is a constant, we can obviously see if it is a NaN or not.
800   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
801     return !FPVal->getValueAPF().isNaN() ||
802            (SNaN && !FPVal->getValueAPF().isSignaling());
803   }
804 
805   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
806     for (const auto &Op : DefMI->uses())
807       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
808         return false;
809     return true;
810   }
811 
812   switch (DefMI->getOpcode()) {
813   default:
814     break;
815   case TargetOpcode::G_FADD:
816   case TargetOpcode::G_FSUB:
817   case TargetOpcode::G_FMUL:
818   case TargetOpcode::G_FDIV:
819   case TargetOpcode::G_FREM:
820   case TargetOpcode::G_FSIN:
821   case TargetOpcode::G_FCOS:
822   case TargetOpcode::G_FMA:
823   case TargetOpcode::G_FMAD:
824     if (SNaN)
825       return true;
826 
827     // TODO: Need isKnownNeverInfinity
828     return false;
829   case TargetOpcode::G_FMINNUM_IEEE:
830   case TargetOpcode::G_FMAXNUM_IEEE: {
831     if (SNaN)
832       return true;
833     // This can return a NaN if either operand is an sNaN, or if both operands
834     // are NaN.
835     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
836             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
837            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
838             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
839   }
840   case TargetOpcode::G_FMINNUM:
841   case TargetOpcode::G_FMAXNUM: {
842     // Only one needs to be known not-nan, since it will be returned if the
843     // other ends up being one.
844     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
845            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
846   }
847   }
848 
849   if (SNaN) {
850     // FP operations quiet. For now, just handle the ones inserted during
851     // legalization.
852     switch (DefMI->getOpcode()) {
853     case TargetOpcode::G_FPEXT:
854     case TargetOpcode::G_FPTRUNC:
855     case TargetOpcode::G_FCANONICALIZE:
856       return true;
857     default:
858       return false;
859     }
860   }
861 
862   return false;
863 }
864 
865 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
866                                   const MachinePointerInfo &MPO) {
867   auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
868   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
869     MachineFrameInfo &MFI = MF.getFrameInfo();
870     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
871                            MPO.Offset);
872   }
873 
874   if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
875     const Module *M = MF.getFunction().getParent();
876     return V->getPointerAlignment(M->getDataLayout());
877   }
878 
879   return Align(1);
880 }
881 
882 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
883                                         const TargetInstrInfo &TII,
884                                         MCRegister PhysReg,
885                                         const TargetRegisterClass &RC,
886                                         const DebugLoc &DL, LLT RegTy) {
887   MachineBasicBlock &EntryMBB = MF.front();
888   MachineRegisterInfo &MRI = MF.getRegInfo();
889   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
890   if (LiveIn) {
891     MachineInstr *Def = MRI.getVRegDef(LiveIn);
892     if (Def) {
893       // FIXME: Should the verifier check this is in the entry block?
894       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
895       return LiveIn;
896     }
897 
898     // It's possible the incoming argument register and copy was added during
899     // lowering, but later deleted due to being/becoming dead. If this happens,
900     // re-insert the copy.
901   } else {
902     // The live in register was not present, so add it.
903     LiveIn = MF.addLiveIn(PhysReg, &RC);
904     if (RegTy.isValid())
905       MRI.setType(LiveIn, RegTy);
906   }
907 
908   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
909     .addReg(PhysReg);
910   if (!EntryMBB.isLiveIn(PhysReg))
911     EntryMBB.addLiveIn(PhysReg);
912   return LiveIn;
913 }
914 
915 std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
916                                              const Register Op1, uint64_t Imm,
917                                              const MachineRegisterInfo &MRI) {
918   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
919   if (MaybeOp1Cst) {
920     switch (Opcode) {
921     default:
922       break;
923     case TargetOpcode::G_SEXT_INREG: {
924       LLT Ty = MRI.getType(Op1);
925       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
926     }
927     }
928   }
929   return std::nullopt;
930 }
931 
932 std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
933                                               const Register Op0,
934                                               const MachineRegisterInfo &MRI) {
935   std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
936   if (!Val)
937     return Val;
938 
939   const unsigned DstSize = DstTy.getScalarSizeInBits();
940 
941   switch (Opcode) {
942   case TargetOpcode::G_SEXT:
943     return Val->sext(DstSize);
944   case TargetOpcode::G_ZEXT:
945   case TargetOpcode::G_ANYEXT:
946     // TODO: DAG considers target preference when constant folding any_extend.
947     return Val->zext(DstSize);
948   default:
949     break;
950   }
951 
952   llvm_unreachable("unexpected cast opcode to constant fold");
953 }
954 
955 std::optional<APFloat>
956 llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
957                              const MachineRegisterInfo &MRI) {
958   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
959   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
960     APFloat DstVal(getFltSemanticForLLT(DstTy));
961     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
962                             APFloat::rmNearestTiesToEven);
963     return DstVal;
964   }
965   return std::nullopt;
966 }
967 
968 std::optional<SmallVector<unsigned>>
969 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
970   LLT Ty = MRI.getType(Src);
971   SmallVector<unsigned> FoldedCTLZs;
972   auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
973     auto MaybeCst = getIConstantVRegVal(R, MRI);
974     if (!MaybeCst)
975       return std::nullopt;
976     return MaybeCst->countl_zero();
977   };
978   if (Ty.isVector()) {
979     // Try to constant fold each element.
980     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
981     if (!BV)
982       return std::nullopt;
983     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
984       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
985         FoldedCTLZs.emplace_back(*MaybeFold);
986         continue;
987       }
988       return std::nullopt;
989     }
990     return FoldedCTLZs;
991   }
992   if (auto MaybeCst = tryFoldScalar(Src)) {
993     FoldedCTLZs.emplace_back(*MaybeCst);
994     return FoldedCTLZs;
995   }
996   return std::nullopt;
997 }
998 
999 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
1000                                   GISelKnownBits *KB) {
1001   std::optional<DefinitionAndSourceRegister> DefSrcReg =
1002       getDefSrcRegIgnoringCopies(Reg, MRI);
1003   if (!DefSrcReg)
1004     return false;
1005 
1006   const MachineInstr &MI = *DefSrcReg->MI;
1007   const LLT Ty = MRI.getType(Reg);
1008 
1009   switch (MI.getOpcode()) {
1010   case TargetOpcode::G_CONSTANT: {
1011     unsigned BitWidth = Ty.getScalarSizeInBits();
1012     const ConstantInt *CI = MI.getOperand(1).getCImm();
1013     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1014   }
1015   case TargetOpcode::G_SHL: {
1016     // A left-shift of a constant one will have exactly one bit set because
1017     // shifting the bit off the end is undefined.
1018 
1019     // TODO: Constant splat
1020     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1021       if (*ConstLHS == 1)
1022         return true;
1023     }
1024 
1025     break;
1026   }
1027   case TargetOpcode::G_LSHR: {
1028     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1029       if (ConstLHS->isSignMask())
1030         return true;
1031     }
1032 
1033     break;
1034   }
1035   case TargetOpcode::G_BUILD_VECTOR: {
1036     // TODO: Probably should have a recursion depth guard since you could have
1037     // bitcasted vector elements.
1038     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1039       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1040         return false;
1041 
1042     return true;
1043   }
1044   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1045     // Only handle constants since we would need to know if number of leading
1046     // zeros is greater than the truncation amount.
1047     const unsigned BitWidth = Ty.getScalarSizeInBits();
1048     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1049       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1050       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1051         return false;
1052     }
1053 
1054     return true;
1055   }
1056   default:
1057     break;
1058   }
1059 
1060   if (!KB)
1061     return false;
1062 
1063   // More could be done here, though the above checks are enough
1064   // to handle some common cases.
1065 
1066   // Fall back to computeKnownBits to catch other known cases.
1067   KnownBits Known = KB->getKnownBits(Reg);
1068   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1069 }
1070 
1071 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
1072   AU.addPreserved<StackProtector>();
1073 }
1074 
1075 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1076   if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1077     return OrigTy;
1078 
1079   if (OrigTy.isVector() && TargetTy.isVector()) {
1080     LLT OrigElt = OrigTy.getElementType();
1081     LLT TargetElt = TargetTy.getElementType();
1082 
1083     // TODO: The docstring for this function says the intention is to use this
1084     // function to build MERGE/UNMERGE instructions. It won't be the case that
1085     // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1086     // could implement getLCMType between the two in the future if there was a
1087     // need, but it is not worth it now as this function should not be used in
1088     // that way.
1089     assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1090             (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1091            "getLCMType not implemented between fixed and scalable vectors.");
1092 
1093     if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1094       int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1095                                 TargetTy.getElementCount().getKnownMinValue());
1096       // Prefer the original element type.
1097       ElementCount Mul = OrigTy.getElementCount().multiplyCoefficientBy(
1098           TargetTy.getElementCount().getKnownMinValue());
1099       return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1100                          OrigTy.getElementType());
1101     }
1102     unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1103                             TargetTy.getSizeInBits().getKnownMinValue());
1104     return LLT::vector(
1105         ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1106         OrigElt);
1107   }
1108 
1109   // One type is scalar, one type is vector
1110   if (OrigTy.isVector() || TargetTy.isVector()) {
1111     LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1112     LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1113     LLT EltTy = VecTy.getElementType();
1114     LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1115 
1116     // Prefer scalar type from OrigTy.
1117     if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1118       return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1119 
1120     // Different size scalars. Create vector with the same total size.
1121     // LCM will take fixed/scalable from VecTy.
1122     unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1123                                 VecTy.getElementCount().getKnownMinValue(),
1124                             ScalarTy.getSizeInBits().getFixedValue());
1125     // Prefer type from OrigTy
1126     return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1127                                          VecTy.getElementCount().isScalable()),
1128                        OrigEltTy);
1129   }
1130 
1131   // At this point, both types are scalars of different size
1132   unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1133                           TargetTy.getSizeInBits().getFixedValue());
1134   // Preserve pointer types.
1135   if (LCM == OrigTy.getSizeInBits())
1136     return OrigTy;
1137   if (LCM == TargetTy.getSizeInBits())
1138     return TargetTy;
1139   return LLT::scalar(LCM);
1140 }
1141 
1142 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1143 
1144   if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1145       (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1146     llvm_unreachable(
1147         "getCoverTy not implemented between fixed and scalable vectors.");
1148 
1149   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1150       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1151     return getLCMType(OrigTy, TargetTy);
1152 
1153   unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1154   unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1155   if (OrigTyNumElts % TargetTyNumElts == 0)
1156     return OrigTy;
1157 
1158   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1159   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
1160                              OrigTy.getElementType());
1161 }
1162 
1163 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1164   if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1165     return OrigTy;
1166 
1167   if (OrigTy.isVector() && TargetTy.isVector()) {
1168     LLT OrigElt = OrigTy.getElementType();
1169 
1170     // TODO: The docstring for this function says the intention is to use this
1171     // function to build MERGE/UNMERGE instructions. It won't be the case that
1172     // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1173     // could implement getGCDType between the two in the future if there was a
1174     // need, but it is not worth it now as this function should not be used in
1175     // that way.
1176     assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1177             (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1178            "getGCDType not implemented between fixed and scalable vectors.");
1179 
1180     unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1181                             TargetTy.getSizeInBits().getKnownMinValue());
1182     if (GCD == OrigElt.getSizeInBits())
1183       return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
1184                                  OrigElt);
1185 
1186     // Cannot produce original element type, but both have vscale in common.
1187     if (GCD < OrigElt.getSizeInBits())
1188       return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
1189                                  GCD);
1190 
1191     return LLT::vector(
1192         ElementCount::get(GCD / OrigElt.getSizeInBits().getFixedValue(),
1193                           OrigTy.isScalable()),
1194         OrigElt);
1195   }
1196 
1197   // If one type is vector and the element size matches the scalar size, then
1198   // the gcd is the scalar type.
1199   if (OrigTy.isVector() &&
1200       OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1201     return OrigTy.getElementType();
1202   if (TargetTy.isVector() &&
1203       TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1204     return OrigTy;
1205 
1206   // At this point, both types are either scalars of different type or one is a
1207   // vector and one is a scalar. If both types are scalars, the GCD type is the
1208   // GCD between the two scalar sizes. If one is vector and one is scalar, then
1209   // the GCD type is the GCD between the scalar and the vector element size.
1210   LLT OrigScalar = OrigTy.getScalarType();
1211   LLT TargetScalar = TargetTy.getScalarType();
1212   unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1213                           TargetScalar.getSizeInBits().getFixedValue());
1214   return LLT::scalar(GCD);
1215 }
1216 
1217 std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
1218   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1219          "Only G_SHUFFLE_VECTOR can have a splat index!");
1220   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1221   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1222 
1223   // If all elements are undefined, this shuffle can be considered a splat.
1224   // Return 0 for better potential for callers to simplify.
1225   if (FirstDefinedIdx == Mask.end())
1226     return 0;
1227 
1228   // Make sure all remaining elements are either undef or the same
1229   // as the first non-undef value.
1230   int SplatValue = *FirstDefinedIdx;
1231   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1232              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1233     return std::nullopt;
1234 
1235   return SplatValue;
1236 }
1237 
1238 static bool isBuildVectorOp(unsigned Opcode) {
1239   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1240          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1241 }
1242 
1243 namespace {
1244 
1245 std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1246                                                 const MachineRegisterInfo &MRI,
1247                                                 bool AllowUndef) {
1248   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1249   if (!MI)
1250     return std::nullopt;
1251 
1252   bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1253   if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1254     return std::nullopt;
1255 
1256   std::optional<ValueAndVReg> SplatValAndReg;
1257   for (MachineOperand &Op : MI->uses()) {
1258     Register Element = Op.getReg();
1259     // If we have a G_CONCAT_VECTOR, we recursively look into the
1260     // vectors that we're concatenating to see if they're splats.
1261     auto ElementValAndReg =
1262         isConcatVectorsOp
1263             ? getAnyConstantSplat(Element, MRI, AllowUndef)
1264             : getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1265 
1266     // If AllowUndef, treat undef as value that will result in a constant splat.
1267     if (!ElementValAndReg) {
1268       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1269         continue;
1270       return std::nullopt;
1271     }
1272 
1273     // Record splat value
1274     if (!SplatValAndReg)
1275       SplatValAndReg = ElementValAndReg;
1276 
1277     // Different constant than the one already recorded, not a constant splat.
1278     if (SplatValAndReg->Value != ElementValAndReg->Value)
1279       return std::nullopt;
1280   }
1281 
1282   return SplatValAndReg;
1283 }
1284 
1285 } // end anonymous namespace
1286 
1287 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1288                                       const MachineRegisterInfo &MRI,
1289                                       int64_t SplatValue, bool AllowUndef) {
1290   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1291     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1292   return false;
1293 }
1294 
1295 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1296                                       const MachineRegisterInfo &MRI,
1297                                       int64_t SplatValue, bool AllowUndef) {
1298   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1299                                     AllowUndef);
1300 }
1301 
1302 std::optional<APInt>
1303 llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
1304   if (auto SplatValAndReg =
1305           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1306     if (std::optional<ValueAndVReg> ValAndVReg =
1307         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1308       return ValAndVReg->Value;
1309   }
1310 
1311   return std::nullopt;
1312 }
1313 
1314 std::optional<APInt>
1315 llvm::getIConstantSplatVal(const MachineInstr &MI,
1316                            const MachineRegisterInfo &MRI) {
1317   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1318 }
1319 
1320 std::optional<int64_t>
1321 llvm::getIConstantSplatSExtVal(const Register Reg,
1322                                const MachineRegisterInfo &MRI) {
1323   if (auto SplatValAndReg =
1324           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1325     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1326   return std::nullopt;
1327 }
1328 
1329 std::optional<int64_t>
1330 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
1331                                const MachineRegisterInfo &MRI) {
1332   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1333 }
1334 
1335 std::optional<FPValueAndVReg>
1336 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
1337                         bool AllowUndef) {
1338   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1339     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1340   return std::nullopt;
1341 }
1342 
1343 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1344                                  const MachineRegisterInfo &MRI,
1345                                  bool AllowUndef) {
1346   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1347 }
1348 
1349 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1350                                 const MachineRegisterInfo &MRI,
1351                                 bool AllowUndef) {
1352   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1353 }
1354 
1355 std::optional<RegOrConstant>
1356 llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
1357   unsigned Opc = MI.getOpcode();
1358   if (!isBuildVectorOp(Opc))
1359     return std::nullopt;
1360   if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1361     return RegOrConstant(*Splat);
1362   auto Reg = MI.getOperand(1).getReg();
1363   if (any_of(drop_begin(MI.operands(), 2),
1364              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1365     return std::nullopt;
1366   return RegOrConstant(Reg);
1367 }
1368 
1369 static bool isConstantScalar(const MachineInstr &MI,
1370                              const MachineRegisterInfo &MRI,
1371                              bool AllowFP = true,
1372                              bool AllowOpaqueConstants = true) {
1373   switch (MI.getOpcode()) {
1374   case TargetOpcode::G_CONSTANT:
1375   case TargetOpcode::G_IMPLICIT_DEF:
1376     return true;
1377   case TargetOpcode::G_FCONSTANT:
1378     return AllowFP;
1379   case TargetOpcode::G_GLOBAL_VALUE:
1380   case TargetOpcode::G_FRAME_INDEX:
1381   case TargetOpcode::G_BLOCK_ADDR:
1382   case TargetOpcode::G_JUMP_TABLE:
1383     return AllowOpaqueConstants;
1384   default:
1385     return false;
1386   }
1387 }
1388 
1389 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1390                                       const MachineRegisterInfo &MRI) {
1391   Register Def = MI.getOperand(0).getReg();
1392   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1393     return true;
1394   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1395   if (!BV)
1396     return false;
1397   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1398     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1399         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1400       continue;
1401     return false;
1402   }
1403   return true;
1404 }
1405 
1406 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1407                                       const MachineRegisterInfo &MRI,
1408                                       bool AllowFP, bool AllowOpaqueConstants) {
1409   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1410     return true;
1411 
1412   if (!isBuildVectorOp(MI.getOpcode()))
1413     return false;
1414 
1415   const unsigned NumOps = MI.getNumOperands();
1416   for (unsigned I = 1; I != NumOps; ++I) {
1417     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1418     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1419       return false;
1420   }
1421 
1422   return true;
1423 }
1424 
1425 std::optional<APInt>
1426 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1427                                       const MachineRegisterInfo &MRI) {
1428   Register Def = MI.getOperand(0).getReg();
1429   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1430     return C->Value;
1431   auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1432   if (!MaybeCst)
1433     return std::nullopt;
1434   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1435   return APInt(ScalarSize, *MaybeCst, true);
1436 }
1437 
1438 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1439                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1440   switch (MI.getOpcode()) {
1441   case TargetOpcode::G_IMPLICIT_DEF:
1442     return AllowUndefs;
1443   case TargetOpcode::G_CONSTANT:
1444     return MI.getOperand(1).getCImm()->isNullValue();
1445   case TargetOpcode::G_FCONSTANT: {
1446     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1447     return FPImm->isZero() && !FPImm->isNegative();
1448   }
1449   default:
1450     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1451       return false;
1452     return isBuildVectorAllZeros(MI, MRI);
1453   }
1454 }
1455 
1456 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1457                                    const MachineRegisterInfo &MRI,
1458                                    bool AllowUndefs) {
1459   switch (MI.getOpcode()) {
1460   case TargetOpcode::G_IMPLICIT_DEF:
1461     return AllowUndefs;
1462   case TargetOpcode::G_CONSTANT:
1463     return MI.getOperand(1).getCImm()->isAllOnesValue();
1464   default:
1465     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1466       return false;
1467     return isBuildVectorAllOnes(MI, MRI);
1468   }
1469 }
1470 
1471 bool llvm::matchUnaryPredicate(
1472     const MachineRegisterInfo &MRI, Register Reg,
1473     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1474 
1475   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1476   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1477     return Match(nullptr);
1478 
1479   // TODO: Also handle fconstant
1480   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1481     return Match(Def->getOperand(1).getCImm());
1482 
1483   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1484     return false;
1485 
1486   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1487     Register SrcElt = Def->getOperand(I).getReg();
1488     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1489     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1490       if (!Match(nullptr))
1491         return false;
1492       continue;
1493     }
1494 
1495     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1496         !Match(SrcDef->getOperand(1).getCImm()))
1497       return false;
1498   }
1499 
1500   return true;
1501 }
1502 
1503 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1504                           bool IsFP) {
1505   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1506   case TargetLowering::UndefinedBooleanContent:
1507     return Val & 0x1;
1508   case TargetLowering::ZeroOrOneBooleanContent:
1509     return Val == 1;
1510   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1511     return Val == -1;
1512   }
1513   llvm_unreachable("Invalid boolean contents");
1514 }
1515 
1516 bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1517                            bool IsVector, bool IsFP) {
1518   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1519   case TargetLowering::UndefinedBooleanContent:
1520     return ~Val & 0x1;
1521   case TargetLowering::ZeroOrOneBooleanContent:
1522   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1523     return Val == 0;
1524   }
1525   llvm_unreachable("Invalid boolean contents");
1526 }
1527 
1528 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1529                              bool IsFP) {
1530   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1531   case TargetLowering::UndefinedBooleanContent:
1532   case TargetLowering::ZeroOrOneBooleanContent:
1533     return 1;
1534   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1535     return -1;
1536   }
1537   llvm_unreachable("Invalid boolean contents");
1538 }
1539 
1540 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1541                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1542   const auto &F = MBB.getParent()->getFunction();
1543   return F.hasOptSize() || F.hasMinSize() ||
1544          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1545 }
1546 
1547 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1548                             LostDebugLocObserver *LocObserver,
1549                             SmallInstListTy &DeadInstChain) {
1550   for (MachineOperand &Op : MI.uses()) {
1551     if (Op.isReg() && Op.getReg().isVirtual())
1552       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1553   }
1554   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1555   DeadInstChain.remove(&MI);
1556   MI.eraseFromParent();
1557   if (LocObserver)
1558     LocObserver->checkpoint(false);
1559 }
1560 
1561 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1562                        MachineRegisterInfo &MRI,
1563                        LostDebugLocObserver *LocObserver) {
1564   SmallInstListTy DeadInstChain;
1565   for (MachineInstr *MI : DeadInstrs)
1566     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1567 
1568   while (!DeadInstChain.empty()) {
1569     MachineInstr *Inst = DeadInstChain.pop_back_val();
1570     if (!isTriviallyDead(*Inst, MRI))
1571       continue;
1572     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1573   }
1574 }
1575 
1576 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1577                       LostDebugLocObserver *LocObserver) {
1578   return eraseInstrs({&MI}, MRI, LocObserver);
1579 }
1580 
1581 void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
1582   for (auto &Def : MI.defs()) {
1583     assert(Def.isReg() && "Must be a reg");
1584 
1585     SmallVector<MachineOperand *, 16> DbgUsers;
1586     for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1587       MachineInstr *DbgValue = MOUse.getParent();
1588       // Ignore partially formed DBG_VALUEs.
1589       if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1590         DbgUsers.push_back(&MOUse);
1591       }
1592     }
1593 
1594     if (!DbgUsers.empty()) {
1595       salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1596     }
1597   }
1598 }
1599