xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision c21a3776c94eef46ea43192c6d0b8df6258b6489)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/CodeGen/CodeGenCommonISel.h"
17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
18 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
20 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
22 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/MachineSizeOpts.h"
28 #include "llvm/CodeGen/RegisterBankInfo.h"
29 #include "llvm/CodeGen/StackProtector.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/CodeGen/TargetLowering.h"
32 #include "llvm/CodeGen/TargetOpcodes.h"
33 #include "llvm/CodeGen/TargetPassConfig.h"
34 #include "llvm/CodeGen/TargetRegisterInfo.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/Utils/SizeOpts.h"
38 #include <numeric>
39 #include <optional>
40 
41 #define DEBUG_TYPE "globalisel-utils"
42 
43 using namespace llvm;
44 using namespace MIPatternMatch;
45 
46 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
47                                    const TargetInstrInfo &TII,
48                                    const RegisterBankInfo &RBI, Register Reg,
49                                    const TargetRegisterClass &RegClass) {
50   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
51     return MRI.createVirtualRegister(&RegClass);
52 
53   return Reg;
54 }
55 
56 Register llvm::constrainOperandRegClass(
57     const MachineFunction &MF, const TargetRegisterInfo &TRI,
58     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
59     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
60     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
61   Register Reg = RegMO.getReg();
62   // Assume physical registers are properly constrained.
63   assert(Reg.isVirtual() && "PhysReg not implemented");
64 
65   // Save the old register class to check whether
66   // the change notifications will be required.
67   // TODO: A better approach would be to pass
68   // the observers to constrainRegToClass().
69   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
70   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
71   // If we created a new virtual register because the class is not compatible
72   // then create a copy between the new and the old register.
73   if (ConstrainedReg != Reg) {
74     MachineBasicBlock::iterator InsertIt(&InsertPt);
75     MachineBasicBlock &MBB = *InsertPt.getParent();
76     // FIXME: The copy needs to have the classes constrained for its operands.
77     // Use operand's regbank to get the class for old register (Reg).
78     if (RegMO.isUse()) {
79       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
80               TII.get(TargetOpcode::COPY), ConstrainedReg)
81           .addReg(Reg);
82     } else {
83       assert(RegMO.isDef() && "Must be a definition");
84       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
85               TII.get(TargetOpcode::COPY), Reg)
86           .addReg(ConstrainedReg);
87     }
88     if (GISelChangeObserver *Observer = MF.getObserver()) {
89       Observer->changingInstr(*RegMO.getParent());
90     }
91     RegMO.setReg(ConstrainedReg);
92     if (GISelChangeObserver *Observer = MF.getObserver()) {
93       Observer->changedInstr(*RegMO.getParent());
94     }
95   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
96     if (GISelChangeObserver *Observer = MF.getObserver()) {
97       if (!RegMO.isDef()) {
98         MachineInstr *RegDef = MRI.getVRegDef(Reg);
99         Observer->changedInstr(*RegDef);
100       }
101       Observer->changingAllUsesOfReg(MRI, Reg);
102       Observer->finishedChangingAllUsesOfReg();
103     }
104   }
105   return ConstrainedReg;
106 }
107 
108 Register llvm::constrainOperandRegClass(
109     const MachineFunction &MF, const TargetRegisterInfo &TRI,
110     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
111     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
112     MachineOperand &RegMO, unsigned OpIdx) {
113   Register Reg = RegMO.getReg();
114   // Assume physical registers are properly constrained.
115   assert(Reg.isVirtual() && "PhysReg not implemented");
116 
117   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
118   // Some of the target independent instructions, like COPY, may not impose any
119   // register class constraints on some of their operands: If it's a use, we can
120   // skip constraining as the instruction defining the register would constrain
121   // it.
122 
123   if (OpRC) {
124     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
125     // can have multiple regbanks for a superclass that combine different
126     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
127     // resolved by targets during regbankselect should not be overridden.
128     if (const auto *SubRC = TRI.getCommonSubClass(
129             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
130       OpRC = SubRC;
131 
132     OpRC = TRI.getAllocatableClass(OpRC);
133   }
134 
135   if (!OpRC) {
136     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
137            "Register class constraint is required unless either the "
138            "instruction is target independent or the operand is a use");
139     // FIXME: Just bailing out like this here could be not enough, unless we
140     // expect the users of this function to do the right thing for PHIs and
141     // COPY:
142     //   v1 = COPY v0
143     //   v2 = COPY v1
144     // v1 here may end up not being constrained at all. Please notice that to
145     // reproduce the issue we likely need a destination pattern of a selection
146     // rule producing such extra copies, not just an input GMIR with them as
147     // every existing target using selectImpl handles copies before calling it
148     // and they never reach this function.
149     return Reg;
150   }
151   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
152                                   RegMO);
153 }
154 
155 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
156                                             const TargetInstrInfo &TII,
157                                             const TargetRegisterInfo &TRI,
158                                             const RegisterBankInfo &RBI) {
159   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
160          "A selected instruction is expected");
161   MachineBasicBlock &MBB = *I.getParent();
162   MachineFunction &MF = *MBB.getParent();
163   MachineRegisterInfo &MRI = MF.getRegInfo();
164 
165   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
166     MachineOperand &MO = I.getOperand(OpI);
167 
168     // There's nothing to be done on non-register operands.
169     if (!MO.isReg())
170       continue;
171 
172     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
173     assert(MO.isReg() && "Unsupported non-reg operand");
174 
175     Register Reg = MO.getReg();
176     // Physical registers don't need to be constrained.
177     if (Reg.isPhysical())
178       continue;
179 
180     // Register operands with a value of 0 (e.g. predicate operands) don't need
181     // to be constrained.
182     if (Reg == 0)
183       continue;
184 
185     // If the operand is a vreg, we should constrain its regclass, and only
186     // insert COPYs if that's impossible.
187     // constrainOperandRegClass does that for us.
188     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
189 
190     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
191     // done.
192     if (MO.isUse()) {
193       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
194       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
195         I.tieOperands(DefIdx, OpI);
196     }
197   }
198   return true;
199 }
200 
201 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
202                          MachineRegisterInfo &MRI) {
203   // Give up if either DstReg or SrcReg  is a physical register.
204   if (DstReg.isPhysical() || SrcReg.isPhysical())
205     return false;
206   // Give up if the types don't match.
207   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
208     return false;
209   // Replace if either DstReg has no constraints or the register
210   // constraints match.
211   const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);
212   if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))
213     return true;
214 
215   // Otherwise match if the Src is already a regclass that is covered by the Dst
216   // RegBank.
217   return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&
218          cast<const RegisterBank *>(DstRBC)->covers(
219              *MRI.getRegClassOrNull(SrcReg));
220 }
221 
222 bool llvm::isTriviallyDead(const MachineInstr &MI,
223                            const MachineRegisterInfo &MRI) {
224   // Instructions without side-effects are dead iff they only define dead regs.
225   // This function is hot and this loop returns early in the common case,
226   // so only perform additional checks before this if absolutely necessary.
227   for (const auto &MO : MI.all_defs()) {
228     Register Reg = MO.getReg();
229     if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
230       return false;
231   }
232   return MI.wouldBeTriviallyDead();
233 }
234 
235 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
236                                   MachineFunction &MF,
237                                   const TargetPassConfig &TPC,
238                                   MachineOptimizationRemarkEmitter &MORE,
239                                   MachineOptimizationRemarkMissed &R) {
240   bool IsFatal = Severity == DS_Error &&
241                  TPC.isGlobalISelAbortEnabled();
242   // Print the function name explicitly if we don't have a debug location (which
243   // makes the diagnostic less useful) or if we're going to emit a raw error.
244   if (!R.getLocation().isValid() || IsFatal)
245     R << (" (in function: " + MF.getName() + ")").str();
246 
247   if (IsFatal)
248     report_fatal_error(Twine(R.getMsg()));
249   else
250     MORE.emit(R);
251 }
252 
253 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
254                               MachineOptimizationRemarkEmitter &MORE,
255                               MachineOptimizationRemarkMissed &R) {
256   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
257 }
258 
259 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
260                               MachineOptimizationRemarkEmitter &MORE,
261                               MachineOptimizationRemarkMissed &R) {
262   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
263   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
264 }
265 
266 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
267                               MachineOptimizationRemarkEmitter &MORE,
268                               const char *PassName, StringRef Msg,
269                               const MachineInstr &MI) {
270   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
271                                     MI.getDebugLoc(), MI.getParent());
272   R << Msg;
273   // Printing MI is expensive;  only do it if expensive remarks are enabled.
274   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
275     R << ": " << ore::MNV("Inst", MI);
276   reportGISelFailure(MF, TPC, MORE, R);
277 }
278 
279 std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
280                                                const MachineRegisterInfo &MRI) {
281   std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
282       VReg, MRI, /*LookThroughInstrs*/ false);
283   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
284          "Value found while looking through instrs");
285   if (!ValAndVReg)
286     return std::nullopt;
287   return ValAndVReg->Value;
288 }
289 
290 const APInt &llvm::getIConstantFromReg(Register Reg,
291                                        const MachineRegisterInfo &MRI) {
292   MachineInstr *Const = MRI.getVRegDef(Reg);
293   assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
294          "expected a G_CONSTANT on Reg");
295   return Const->getOperand(1).getCImm()->getValue();
296 }
297 
298 std::optional<int64_t>
299 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
300   std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
301   if (Val && Val->getBitWidth() <= 64)
302     return Val->getSExtValue();
303   return std::nullopt;
304 }
305 
306 namespace {
307 
308 // This function is used in many places, and as such, it has some
309 // micro-optimizations to try and make it as fast as it can be.
310 //
311 // - We use template arguments to avoid an indirect call caused by passing a
312 // function_ref/std::function
313 // - GetAPCstValue does not return std::optional<APInt> as that's expensive.
314 // Instead it returns true/false and places the result in a pre-constructed
315 // APInt.
316 //
317 // Please change this function carefully and benchmark your changes.
318 template <bool (*IsConstantOpcode)(const MachineInstr *),
319           bool (*GetAPCstValue)(const MachineInstr *MI, APInt &)>
320 std::optional<ValueAndVReg>
321 getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
322                                   bool LookThroughInstrs = true,
323                                   bool LookThroughAnyExt = false) {
324   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
325   MachineInstr *MI;
326 
327   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
328          LookThroughInstrs) {
329     switch (MI->getOpcode()) {
330     case TargetOpcode::G_ANYEXT:
331       if (!LookThroughAnyExt)
332         return std::nullopt;
333       [[fallthrough]];
334     case TargetOpcode::G_TRUNC:
335     case TargetOpcode::G_SEXT:
336     case TargetOpcode::G_ZEXT:
337       SeenOpcodes.push_back(std::make_pair(
338           MI->getOpcode(),
339           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
340       VReg = MI->getOperand(1).getReg();
341       break;
342     case TargetOpcode::COPY:
343       VReg = MI->getOperand(1).getReg();
344       if (VReg.isPhysical())
345         return std::nullopt;
346       break;
347     case TargetOpcode::G_INTTOPTR:
348       VReg = MI->getOperand(1).getReg();
349       break;
350     default:
351       return std::nullopt;
352     }
353   }
354   if (!MI || !IsConstantOpcode(MI))
355     return std::nullopt;
356 
357   APInt Val;
358   if (!GetAPCstValue(MI, Val))
359     return std::nullopt;
360   for (auto &Pair : reverse(SeenOpcodes)) {
361     switch (Pair.first) {
362     case TargetOpcode::G_TRUNC:
363       Val = Val.trunc(Pair.second);
364       break;
365     case TargetOpcode::G_ANYEXT:
366     case TargetOpcode::G_SEXT:
367       Val = Val.sext(Pair.second);
368       break;
369     case TargetOpcode::G_ZEXT:
370       Val = Val.zext(Pair.second);
371       break;
372     }
373   }
374 
375   return ValueAndVReg{std::move(Val), VReg};
376 }
377 
378 bool isIConstant(const MachineInstr *MI) {
379   if (!MI)
380     return false;
381   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
382 }
383 
384 bool isFConstant(const MachineInstr *MI) {
385   if (!MI)
386     return false;
387   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
388 }
389 
390 bool isAnyConstant(const MachineInstr *MI) {
391   if (!MI)
392     return false;
393   unsigned Opc = MI->getOpcode();
394   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
395 }
396 
397 bool getCImmAsAPInt(const MachineInstr *MI, APInt &Result) {
398   const MachineOperand &CstVal = MI->getOperand(1);
399   if (!CstVal.isCImm())
400     return false;
401   Result = CstVal.getCImm()->getValue();
402   return true;
403 }
404 
405 bool getCImmOrFPImmAsAPInt(const MachineInstr *MI, APInt &Result) {
406   const MachineOperand &CstVal = MI->getOperand(1);
407   if (CstVal.isCImm())
408     Result = CstVal.getCImm()->getValue();
409   else if (CstVal.isFPImm())
410     Result = CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
411   else
412     return false;
413   return true;
414 }
415 
416 } // end anonymous namespace
417 
418 std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
419     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
420   return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
421       VReg, MRI, LookThroughInstrs);
422 }
423 
424 std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
425     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
426     bool LookThroughAnyExt) {
427   return getConstantVRegValWithLookThrough<isAnyConstant,
428                                            getCImmOrFPImmAsAPInt>(
429       VReg, MRI, LookThroughInstrs, LookThroughAnyExt);
430 }
431 
432 std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
433     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
434   auto Reg =
435       getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
436           VReg, MRI, LookThroughInstrs);
437   if (!Reg)
438     return std::nullopt;
439   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
440                         Reg->VReg};
441 }
442 
443 const ConstantFP *
444 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
445   MachineInstr *MI = MRI.getVRegDef(VReg);
446   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
447     return nullptr;
448   return MI->getOperand(1).getFPImm();
449 }
450 
451 std::optional<DefinitionAndSourceRegister>
452 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
453   Register DefSrcReg = Reg;
454   auto *DefMI = MRI.getVRegDef(Reg);
455   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
456   if (!DstTy.isValid())
457     return std::nullopt;
458   unsigned Opc = DefMI->getOpcode();
459   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
460     Register SrcReg = DefMI->getOperand(1).getReg();
461     auto SrcTy = MRI.getType(SrcReg);
462     if (!SrcTy.isValid())
463       break;
464     DefMI = MRI.getVRegDef(SrcReg);
465     DefSrcReg = SrcReg;
466     Opc = DefMI->getOpcode();
467   }
468   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
469 }
470 
471 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
472                                          const MachineRegisterInfo &MRI) {
473   std::optional<DefinitionAndSourceRegister> DefSrcReg =
474       getDefSrcRegIgnoringCopies(Reg, MRI);
475   return DefSrcReg ? DefSrcReg->MI : nullptr;
476 }
477 
478 Register llvm::getSrcRegIgnoringCopies(Register Reg,
479                                        const MachineRegisterInfo &MRI) {
480   std::optional<DefinitionAndSourceRegister> DefSrcReg =
481       getDefSrcRegIgnoringCopies(Reg, MRI);
482   return DefSrcReg ? DefSrcReg->Reg : Register();
483 }
484 
485 void llvm::extractParts(Register Reg, LLT Ty, int NumParts,
486                         SmallVectorImpl<Register> &VRegs,
487                         MachineIRBuilder &MIRBuilder,
488                         MachineRegisterInfo &MRI) {
489   for (int i = 0; i < NumParts; ++i)
490     VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
491   MIRBuilder.buildUnmerge(VRegs, Reg);
492 }
493 
494 bool llvm::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
495                         SmallVectorImpl<Register> &VRegs,
496                         SmallVectorImpl<Register> &LeftoverRegs,
497                         MachineIRBuilder &MIRBuilder,
498                         MachineRegisterInfo &MRI) {
499   assert(!LeftoverTy.isValid() && "this is an out argument");
500 
501   unsigned RegSize = RegTy.getSizeInBits();
502   unsigned MainSize = MainTy.getSizeInBits();
503   unsigned NumParts = RegSize / MainSize;
504   unsigned LeftoverSize = RegSize - NumParts * MainSize;
505 
506   // Use an unmerge when possible.
507   if (LeftoverSize == 0) {
508     for (unsigned I = 0; I < NumParts; ++I)
509       VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));
510     MIRBuilder.buildUnmerge(VRegs, Reg);
511     return true;
512   }
513 
514   // Try to use unmerge for irregular vector split where possible
515   // For example when splitting a <6 x i32> into <4 x i32> with <2 x i32>
516   // leftover, it becomes:
517   //  <2 x i32> %2, <2 x i32>%3, <2 x i32> %4 = G_UNMERGE_VALUE <6 x i32> %1
518   //  <4 x i32> %5 = G_CONCAT_VECTOR <2 x i32> %2, <2 x i32> %3
519   if (RegTy.isVector() && MainTy.isVector()) {
520     unsigned RegNumElts = RegTy.getNumElements();
521     unsigned MainNumElts = MainTy.getNumElements();
522     unsigned LeftoverNumElts = RegNumElts % MainNumElts;
523     // If can unmerge to LeftoverTy, do it
524     if (MainNumElts % LeftoverNumElts == 0 &&
525         RegNumElts % LeftoverNumElts == 0 &&
526         RegTy.getScalarSizeInBits() == MainTy.getScalarSizeInBits() &&
527         LeftoverNumElts > 1) {
528       LeftoverTy = LLT::fixed_vector(LeftoverNumElts, RegTy.getElementType());
529 
530       // Unmerge the SrcReg to LeftoverTy vectors
531       SmallVector<Register, 4> UnmergeValues;
532       extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
533                    MIRBuilder, MRI);
534 
535       // Find how many LeftoverTy makes one MainTy
536       unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
537       unsigned NumOfLeftoverVal =
538           ((RegNumElts % MainNumElts) / LeftoverNumElts);
539 
540       // Create as many MainTy as possible using unmerged value
541       SmallVector<Register, 4> MergeValues;
542       for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {
543         MergeValues.push_back(UnmergeValues[I]);
544         if (MergeValues.size() == LeftoverPerMain) {
545           VRegs.push_back(
546               MIRBuilder.buildMergeLikeInstr(MainTy, MergeValues).getReg(0));
547           MergeValues.clear();
548         }
549       }
550       // Populate LeftoverRegs with the leftovers
551       for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;
552            I < UnmergeValues.size(); I++) {
553         LeftoverRegs.push_back(UnmergeValues[I]);
554       }
555       return true;
556     }
557   }
558   // Perform irregular split. Leftover is last element of RegPieces.
559   if (MainTy.isVector()) {
560     SmallVector<Register, 8> RegPieces;
561     extractVectorParts(Reg, MainTy.getNumElements(), RegPieces, MIRBuilder,
562                        MRI);
563     for (unsigned i = 0; i < RegPieces.size() - 1; ++i)
564       VRegs.push_back(RegPieces[i]);
565     LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);
566     LeftoverTy = MRI.getType(LeftoverRegs[0]);
567     return true;
568   }
569 
570   LeftoverTy = LLT::scalar(LeftoverSize);
571   // For irregular sizes, extract the individual parts.
572   for (unsigned I = 0; I != NumParts; ++I) {
573     Register NewReg = MRI.createGenericVirtualRegister(MainTy);
574     VRegs.push_back(NewReg);
575     MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
576   }
577 
578   for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
579        Offset += LeftoverSize) {
580     Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
581     LeftoverRegs.push_back(NewReg);
582     MIRBuilder.buildExtract(NewReg, Reg, Offset);
583   }
584 
585   return true;
586 }
587 
588 void llvm::extractVectorParts(Register Reg, unsigned NumElts,
589                               SmallVectorImpl<Register> &VRegs,
590                               MachineIRBuilder &MIRBuilder,
591                               MachineRegisterInfo &MRI) {
592   LLT RegTy = MRI.getType(Reg);
593   assert(RegTy.isVector() && "Expected a vector type");
594 
595   LLT EltTy = RegTy.getElementType();
596   LLT NarrowTy = (NumElts == 1) ? EltTy : LLT::fixed_vector(NumElts, EltTy);
597   unsigned RegNumElts = RegTy.getNumElements();
598   unsigned LeftoverNumElts = RegNumElts % NumElts;
599   unsigned NumNarrowTyPieces = RegNumElts / NumElts;
600 
601   // Perfect split without leftover
602   if (LeftoverNumElts == 0)
603     return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
604                         MRI);
605 
606   // Irregular split. Provide direct access to all elements for artifact
607   // combiner using unmerge to elements. Then build vectors with NumElts
608   // elements. Remaining element(s) will be (used to build vector) Leftover.
609   SmallVector<Register, 8> Elts;
610   extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);
611 
612   unsigned Offset = 0;
613   // Requested sub-vectors of NarrowTy.
614   for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {
615     ArrayRef<Register> Pieces(&Elts[Offset], NumElts);
616     VRegs.push_back(MIRBuilder.buildMergeLikeInstr(NarrowTy, Pieces).getReg(0));
617   }
618 
619   // Leftover element(s).
620   if (LeftoverNumElts == 1) {
621     VRegs.push_back(Elts[Offset]);
622   } else {
623     LLT LeftoverTy = LLT::fixed_vector(LeftoverNumElts, EltTy);
624     ArrayRef<Register> Pieces(&Elts[Offset], LeftoverNumElts);
625     VRegs.push_back(
626         MIRBuilder.buildMergeLikeInstr(LeftoverTy, Pieces).getReg(0));
627   }
628 }
629 
630 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
631                                  const MachineRegisterInfo &MRI) {
632   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
633   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
634 }
635 
636 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
637   if (Size == 32)
638     return APFloat(float(Val));
639   if (Size == 64)
640     return APFloat(Val);
641   if (Size != 16)
642     llvm_unreachable("Unsupported FPConstant size");
643   bool Ignored;
644   APFloat APF(Val);
645   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
646   return APF;
647 }
648 
649 std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
650                                              const Register Op1,
651                                              const Register Op2,
652                                              const MachineRegisterInfo &MRI) {
653   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
654   if (!MaybeOp2Cst)
655     return std::nullopt;
656 
657   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
658   if (!MaybeOp1Cst)
659     return std::nullopt;
660 
661   const APInt &C1 = MaybeOp1Cst->Value;
662   const APInt &C2 = MaybeOp2Cst->Value;
663   switch (Opcode) {
664   default:
665     break;
666   case TargetOpcode::G_ADD:
667     return C1 + C2;
668   case TargetOpcode::G_PTR_ADD:
669     // Types can be of different width here.
670     // Result needs to be the same width as C1, so trunc or sext C2.
671     return C1 + C2.sextOrTrunc(C1.getBitWidth());
672   case TargetOpcode::G_AND:
673     return C1 & C2;
674   case TargetOpcode::G_ASHR:
675     return C1.ashr(C2);
676   case TargetOpcode::G_LSHR:
677     return C1.lshr(C2);
678   case TargetOpcode::G_MUL:
679     return C1 * C2;
680   case TargetOpcode::G_OR:
681     return C1 | C2;
682   case TargetOpcode::G_SHL:
683     return C1 << C2;
684   case TargetOpcode::G_SUB:
685     return C1 - C2;
686   case TargetOpcode::G_XOR:
687     return C1 ^ C2;
688   case TargetOpcode::G_UDIV:
689     if (!C2.getBoolValue())
690       break;
691     return C1.udiv(C2);
692   case TargetOpcode::G_SDIV:
693     if (!C2.getBoolValue())
694       break;
695     return C1.sdiv(C2);
696   case TargetOpcode::G_UREM:
697     if (!C2.getBoolValue())
698       break;
699     return C1.urem(C2);
700   case TargetOpcode::G_SREM:
701     if (!C2.getBoolValue())
702       break;
703     return C1.srem(C2);
704   case TargetOpcode::G_SMIN:
705     return APIntOps::smin(C1, C2);
706   case TargetOpcode::G_SMAX:
707     return APIntOps::smax(C1, C2);
708   case TargetOpcode::G_UMIN:
709     return APIntOps::umin(C1, C2);
710   case TargetOpcode::G_UMAX:
711     return APIntOps::umax(C1, C2);
712   }
713 
714   return std::nullopt;
715 }
716 
717 std::optional<APFloat>
718 llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
719                           const Register Op2, const MachineRegisterInfo &MRI) {
720   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
721   if (!Op2Cst)
722     return std::nullopt;
723 
724   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
725   if (!Op1Cst)
726     return std::nullopt;
727 
728   APFloat C1 = Op1Cst->getValueAPF();
729   const APFloat &C2 = Op2Cst->getValueAPF();
730   switch (Opcode) {
731   case TargetOpcode::G_FADD:
732     C1.add(C2, APFloat::rmNearestTiesToEven);
733     return C1;
734   case TargetOpcode::G_FSUB:
735     C1.subtract(C2, APFloat::rmNearestTiesToEven);
736     return C1;
737   case TargetOpcode::G_FMUL:
738     C1.multiply(C2, APFloat::rmNearestTiesToEven);
739     return C1;
740   case TargetOpcode::G_FDIV:
741     C1.divide(C2, APFloat::rmNearestTiesToEven);
742     return C1;
743   case TargetOpcode::G_FREM:
744     C1.mod(C2);
745     return C1;
746   case TargetOpcode::G_FCOPYSIGN:
747     C1.copySign(C2);
748     return C1;
749   case TargetOpcode::G_FMINNUM:
750     return minnum(C1, C2);
751   case TargetOpcode::G_FMAXNUM:
752     return maxnum(C1, C2);
753   case TargetOpcode::G_FMINIMUM:
754     return minimum(C1, C2);
755   case TargetOpcode::G_FMAXIMUM:
756     return maximum(C1, C2);
757   case TargetOpcode::G_FMINNUM_IEEE:
758   case TargetOpcode::G_FMAXNUM_IEEE:
759     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
760     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
761     // and currently there isn't a nice wrapper in APFloat for the version with
762     // correct snan handling.
763     break;
764   default:
765     break;
766   }
767 
768   return std::nullopt;
769 }
770 
771 SmallVector<APInt>
772 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
773                               const Register Op2,
774                               const MachineRegisterInfo &MRI) {
775   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
776   if (!SrcVec2)
777     return SmallVector<APInt>();
778 
779   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
780   if (!SrcVec1)
781     return SmallVector<APInt>();
782 
783   SmallVector<APInt> FoldedElements;
784   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
785     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
786                                       SrcVec2->getSourceReg(Idx), MRI);
787     if (!MaybeCst)
788       return SmallVector<APInt>();
789     FoldedElements.push_back(*MaybeCst);
790   }
791   return FoldedElements;
792 }
793 
794 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
795                            bool SNaN) {
796   const MachineInstr *DefMI = MRI.getVRegDef(Val);
797   if (!DefMI)
798     return false;
799 
800   const TargetMachine& TM = DefMI->getMF()->getTarget();
801   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
802     return true;
803 
804   // If the value is a constant, we can obviously see if it is a NaN or not.
805   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
806     return !FPVal->getValueAPF().isNaN() ||
807            (SNaN && !FPVal->getValueAPF().isSignaling());
808   }
809 
810   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
811     for (const auto &Op : DefMI->uses())
812       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
813         return false;
814     return true;
815   }
816 
817   switch (DefMI->getOpcode()) {
818   default:
819     break;
820   case TargetOpcode::G_FADD:
821   case TargetOpcode::G_FSUB:
822   case TargetOpcode::G_FMUL:
823   case TargetOpcode::G_FDIV:
824   case TargetOpcode::G_FREM:
825   case TargetOpcode::G_FSIN:
826   case TargetOpcode::G_FCOS:
827   case TargetOpcode::G_FTAN:
828   case TargetOpcode::G_FACOS:
829   case TargetOpcode::G_FASIN:
830   case TargetOpcode::G_FATAN:
831   case TargetOpcode::G_FATAN2:
832   case TargetOpcode::G_FCOSH:
833   case TargetOpcode::G_FSINH:
834   case TargetOpcode::G_FTANH:
835   case TargetOpcode::G_FMA:
836   case TargetOpcode::G_FMAD:
837     if (SNaN)
838       return true;
839 
840     // TODO: Need isKnownNeverInfinity
841     return false;
842   case TargetOpcode::G_FMINNUM_IEEE:
843   case TargetOpcode::G_FMAXNUM_IEEE: {
844     if (SNaN)
845       return true;
846     // This can return a NaN if either operand is an sNaN, or if both operands
847     // are NaN.
848     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
849             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
850            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
851             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
852   }
853   case TargetOpcode::G_FMINNUM:
854   case TargetOpcode::G_FMAXNUM: {
855     // Only one needs to be known not-nan, since it will be returned if the
856     // other ends up being one.
857     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
858            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
859   }
860   }
861 
862   if (SNaN) {
863     // FP operations quiet. For now, just handle the ones inserted during
864     // legalization.
865     switch (DefMI->getOpcode()) {
866     case TargetOpcode::G_FPEXT:
867     case TargetOpcode::G_FPTRUNC:
868     case TargetOpcode::G_FCANONICALIZE:
869       return true;
870     default:
871       return false;
872     }
873   }
874 
875   return false;
876 }
877 
878 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
879                                   const MachinePointerInfo &MPO) {
880   auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);
881   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
882     MachineFrameInfo &MFI = MF.getFrameInfo();
883     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
884                            MPO.Offset);
885   }
886 
887   if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {
888     const Module *M = MF.getFunction().getParent();
889     return V->getPointerAlignment(M->getDataLayout());
890   }
891 
892   return Align(1);
893 }
894 
895 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
896                                         const TargetInstrInfo &TII,
897                                         MCRegister PhysReg,
898                                         const TargetRegisterClass &RC,
899                                         const DebugLoc &DL, LLT RegTy) {
900   MachineBasicBlock &EntryMBB = MF.front();
901   MachineRegisterInfo &MRI = MF.getRegInfo();
902   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
903   if (LiveIn) {
904     MachineInstr *Def = MRI.getVRegDef(LiveIn);
905     if (Def) {
906       // FIXME: Should the verifier check this is in the entry block?
907       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
908       return LiveIn;
909     }
910 
911     // It's possible the incoming argument register and copy was added during
912     // lowering, but later deleted due to being/becoming dead. If this happens,
913     // re-insert the copy.
914   } else {
915     // The live in register was not present, so add it.
916     LiveIn = MF.addLiveIn(PhysReg, &RC);
917     if (RegTy.isValid())
918       MRI.setType(LiveIn, RegTy);
919   }
920 
921   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
922     .addReg(PhysReg);
923   if (!EntryMBB.isLiveIn(PhysReg))
924     EntryMBB.addLiveIn(PhysReg);
925   return LiveIn;
926 }
927 
928 std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
929                                              const Register Op1, uint64_t Imm,
930                                              const MachineRegisterInfo &MRI) {
931   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
932   if (MaybeOp1Cst) {
933     switch (Opcode) {
934     default:
935       break;
936     case TargetOpcode::G_SEXT_INREG: {
937       LLT Ty = MRI.getType(Op1);
938       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
939     }
940     }
941   }
942   return std::nullopt;
943 }
944 
945 std::optional<APInt> llvm::ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
946                                               const Register Op0,
947                                               const MachineRegisterInfo &MRI) {
948   std::optional<APInt> Val = getIConstantVRegVal(Op0, MRI);
949   if (!Val)
950     return Val;
951 
952   const unsigned DstSize = DstTy.getScalarSizeInBits();
953 
954   switch (Opcode) {
955   case TargetOpcode::G_SEXT:
956     return Val->sext(DstSize);
957   case TargetOpcode::G_ZEXT:
958   case TargetOpcode::G_ANYEXT:
959     // TODO: DAG considers target preference when constant folding any_extend.
960     return Val->zext(DstSize);
961   default:
962     break;
963   }
964 
965   llvm_unreachable("unexpected cast opcode to constant fold");
966 }
967 
968 std::optional<APFloat>
969 llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
970                              const MachineRegisterInfo &MRI) {
971   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
972   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
973     APFloat DstVal(getFltSemanticForLLT(DstTy));
974     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
975                             APFloat::rmNearestTiesToEven);
976     return DstVal;
977   }
978   return std::nullopt;
979 }
980 
981 std::optional<SmallVector<unsigned>>
982 llvm::ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI,
983                              std::function<unsigned(APInt)> CB) {
984   LLT Ty = MRI.getType(Src);
985   SmallVector<unsigned> FoldedCTLZs;
986   auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
987     auto MaybeCst = getIConstantVRegVal(R, MRI);
988     if (!MaybeCst)
989       return std::nullopt;
990     return CB(*MaybeCst);
991   };
992   if (Ty.isVector()) {
993     // Try to constant fold each element.
994     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
995     if (!BV)
996       return std::nullopt;
997     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
998       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
999         FoldedCTLZs.emplace_back(*MaybeFold);
1000         continue;
1001       }
1002       return std::nullopt;
1003     }
1004     return FoldedCTLZs;
1005   }
1006   if (auto MaybeCst = tryFoldScalar(Src)) {
1007     FoldedCTLZs.emplace_back(*MaybeCst);
1008     return FoldedCTLZs;
1009   }
1010   return std::nullopt;
1011 }
1012 
1013 std::optional<SmallVector<APInt>>
1014 llvm::ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2,
1015                        const MachineRegisterInfo &MRI) {
1016   LLT Ty = MRI.getType(Op1);
1017   if (Ty != MRI.getType(Op2))
1018     return std::nullopt;
1019 
1020   auto TryFoldScalar = [&MRI, Pred](Register LHS,
1021                                     Register RHS) -> std::optional<APInt> {
1022     auto LHSCst = getIConstantVRegVal(LHS, MRI);
1023     auto RHSCst = getIConstantVRegVal(RHS, MRI);
1024     if (!LHSCst || !RHSCst)
1025       return std::nullopt;
1026 
1027     switch (Pred) {
1028     case CmpInst::Predicate::ICMP_EQ:
1029       return APInt(/*numBits=*/1, LHSCst->eq(*RHSCst));
1030     case CmpInst::Predicate::ICMP_NE:
1031       return APInt(/*numBits=*/1, LHSCst->ne(*RHSCst));
1032     case CmpInst::Predicate::ICMP_UGT:
1033       return APInt(/*numBits=*/1, LHSCst->ugt(*RHSCst));
1034     case CmpInst::Predicate::ICMP_UGE:
1035       return APInt(/*numBits=*/1, LHSCst->uge(*RHSCst));
1036     case CmpInst::Predicate::ICMP_ULT:
1037       return APInt(/*numBits=*/1, LHSCst->ult(*RHSCst));
1038     case CmpInst::Predicate::ICMP_ULE:
1039       return APInt(/*numBits=*/1, LHSCst->ule(*RHSCst));
1040     case CmpInst::Predicate::ICMP_SGT:
1041       return APInt(/*numBits=*/1, LHSCst->sgt(*RHSCst));
1042     case CmpInst::Predicate::ICMP_SGE:
1043       return APInt(/*numBits=*/1, LHSCst->sge(*RHSCst));
1044     case CmpInst::Predicate::ICMP_SLT:
1045       return APInt(/*numBits=*/1, LHSCst->slt(*RHSCst));
1046     case CmpInst::Predicate::ICMP_SLE:
1047       return APInt(/*numBits=*/1, LHSCst->sle(*RHSCst));
1048     default:
1049       return std::nullopt;
1050     }
1051   };
1052 
1053   SmallVector<APInt> FoldedICmps;
1054 
1055   if (Ty.isVector()) {
1056     // Try to constant fold each element.
1057     auto *BV1 = getOpcodeDef<GBuildVector>(Op1, MRI);
1058     auto *BV2 = getOpcodeDef<GBuildVector>(Op2, MRI);
1059     if (!BV1 || !BV2)
1060       return std::nullopt;
1061     assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");
1062     for (unsigned I = 0; I < BV1->getNumSources(); ++I) {
1063       if (auto MaybeFold =
1064               TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {
1065         FoldedICmps.emplace_back(*MaybeFold);
1066         continue;
1067       }
1068       return std::nullopt;
1069     }
1070     return FoldedICmps;
1071   }
1072 
1073   if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1074     FoldedICmps.emplace_back(*MaybeCst);
1075     return FoldedICmps;
1076   }
1077 
1078   return std::nullopt;
1079 }
1080 
1081 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
1082                                   GISelKnownBits *KB) {
1083   std::optional<DefinitionAndSourceRegister> DefSrcReg =
1084       getDefSrcRegIgnoringCopies(Reg, MRI);
1085   if (!DefSrcReg)
1086     return false;
1087 
1088   const MachineInstr &MI = *DefSrcReg->MI;
1089   const LLT Ty = MRI.getType(Reg);
1090 
1091   switch (MI.getOpcode()) {
1092   case TargetOpcode::G_CONSTANT: {
1093     unsigned BitWidth = Ty.getScalarSizeInBits();
1094     const ConstantInt *CI = MI.getOperand(1).getCImm();
1095     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
1096   }
1097   case TargetOpcode::G_SHL: {
1098     // A left-shift of a constant one will have exactly one bit set because
1099     // shifting the bit off the end is undefined.
1100 
1101     // TODO: Constant splat
1102     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1103       if (*ConstLHS == 1)
1104         return true;
1105     }
1106 
1107     break;
1108   }
1109   case TargetOpcode::G_LSHR: {
1110     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
1111       if (ConstLHS->isSignMask())
1112         return true;
1113     }
1114 
1115     break;
1116   }
1117   case TargetOpcode::G_BUILD_VECTOR: {
1118     // TODO: Probably should have a recursion depth guard since you could have
1119     // bitcasted vector elements.
1120     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
1121       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
1122         return false;
1123 
1124     return true;
1125   }
1126   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1127     // Only handle constants since we would need to know if number of leading
1128     // zeros is greater than the truncation amount.
1129     const unsigned BitWidth = Ty.getScalarSizeInBits();
1130     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
1131       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
1132       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
1133         return false;
1134     }
1135 
1136     return true;
1137   }
1138   default:
1139     break;
1140   }
1141 
1142   if (!KB)
1143     return false;
1144 
1145   // More could be done here, though the above checks are enough
1146   // to handle some common cases.
1147 
1148   // Fall back to computeKnownBits to catch other known cases.
1149   KnownBits Known = KB->getKnownBits(Reg);
1150   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
1151 }
1152 
1153 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
1154   AU.addPreserved<StackProtector>();
1155 }
1156 
1157 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
1158   if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1159     return OrigTy;
1160 
1161   if (OrigTy.isVector() && TargetTy.isVector()) {
1162     LLT OrigElt = OrigTy.getElementType();
1163     LLT TargetElt = TargetTy.getElementType();
1164 
1165     // TODO: The docstring for this function says the intention is to use this
1166     // function to build MERGE/UNMERGE instructions. It won't be the case that
1167     // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1168     // could implement getLCMType between the two in the future if there was a
1169     // need, but it is not worth it now as this function should not be used in
1170     // that way.
1171     assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1172             (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1173            "getLCMType not implemented between fixed and scalable vectors.");
1174 
1175     if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
1176       int GCDMinElts = std::gcd(OrigTy.getElementCount().getKnownMinValue(),
1177                                 TargetTy.getElementCount().getKnownMinValue());
1178       // Prefer the original element type.
1179       ElementCount Mul = OrigTy.getElementCount().multiplyCoefficientBy(
1180           TargetTy.getElementCount().getKnownMinValue());
1181       return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),
1182                          OrigTy.getElementType());
1183     }
1184     unsigned LCM = std::lcm(OrigTy.getSizeInBits().getKnownMinValue(),
1185                             TargetTy.getSizeInBits().getKnownMinValue());
1186     return LLT::vector(
1187         ElementCount::get(LCM / OrigElt.getSizeInBits(), OrigTy.isScalable()),
1188         OrigElt);
1189   }
1190 
1191   // One type is scalar, one type is vector
1192   if (OrigTy.isVector() || TargetTy.isVector()) {
1193     LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;
1194     LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;
1195     LLT EltTy = VecTy.getElementType();
1196     LLT OrigEltTy = OrigTy.isVector() ? OrigTy.getElementType() : OrigTy;
1197 
1198     // Prefer scalar type from OrigTy.
1199     if (EltTy.getSizeInBits() == ScalarTy.getSizeInBits())
1200       return LLT::vector(VecTy.getElementCount(), OrigEltTy);
1201 
1202     // Different size scalars. Create vector with the same total size.
1203     // LCM will take fixed/scalable from VecTy.
1204     unsigned LCM = std::lcm(EltTy.getSizeInBits().getFixedValue() *
1205                                 VecTy.getElementCount().getKnownMinValue(),
1206                             ScalarTy.getSizeInBits().getFixedValue());
1207     // Prefer type from OrigTy
1208     return LLT::vector(ElementCount::get(LCM / OrigEltTy.getSizeInBits(),
1209                                          VecTy.getElementCount().isScalable()),
1210                        OrigEltTy);
1211   }
1212 
1213   // At this point, both types are scalars of different size
1214   unsigned LCM = std::lcm(OrigTy.getSizeInBits().getFixedValue(),
1215                           TargetTy.getSizeInBits().getFixedValue());
1216   // Preserve pointer types.
1217   if (LCM == OrigTy.getSizeInBits())
1218     return OrigTy;
1219   if (LCM == TargetTy.getSizeInBits())
1220     return TargetTy;
1221   return LLT::scalar(LCM);
1222 }
1223 
1224 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
1225 
1226   if ((OrigTy.isScalableVector() && TargetTy.isFixedVector()) ||
1227       (OrigTy.isFixedVector() && TargetTy.isScalableVector()))
1228     llvm_unreachable(
1229         "getCoverTy not implemented between fixed and scalable vectors.");
1230 
1231   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
1232       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
1233     return getLCMType(OrigTy, TargetTy);
1234 
1235   unsigned OrigTyNumElts = OrigTy.getElementCount().getKnownMinValue();
1236   unsigned TargetTyNumElts = TargetTy.getElementCount().getKnownMinValue();
1237   if (OrigTyNumElts % TargetTyNumElts == 0)
1238     return OrigTy;
1239 
1240   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
1241   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
1242                              OrigTy.getElementType());
1243 }
1244 
1245 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
1246   if (OrigTy.getSizeInBits() == TargetTy.getSizeInBits())
1247     return OrigTy;
1248 
1249   if (OrigTy.isVector() && TargetTy.isVector()) {
1250     LLT OrigElt = OrigTy.getElementType();
1251 
1252     // TODO: The docstring for this function says the intention is to use this
1253     // function to build MERGE/UNMERGE instructions. It won't be the case that
1254     // we generate a MERGE/UNMERGE between fixed and scalable vector types. We
1255     // could implement getGCDType between the two in the future if there was a
1256     // need, but it is not worth it now as this function should not be used in
1257     // that way.
1258     assert(((OrigTy.isScalableVector() && !TargetTy.isFixedVector()) ||
1259             (OrigTy.isFixedVector() && !TargetTy.isScalableVector())) &&
1260            "getGCDType not implemented between fixed and scalable vectors.");
1261 
1262     unsigned GCD = std::gcd(OrigTy.getSizeInBits().getKnownMinValue(),
1263                             TargetTy.getSizeInBits().getKnownMinValue());
1264     if (GCD == OrigElt.getSizeInBits())
1265       return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
1266                                  OrigElt);
1267 
1268     // Cannot produce original element type, but both have vscale in common.
1269     if (GCD < OrigElt.getSizeInBits())
1270       return LLT::scalarOrVector(ElementCount::get(1, OrigTy.isScalable()),
1271                                  GCD);
1272 
1273     return LLT::vector(
1274         ElementCount::get(GCD / OrigElt.getSizeInBits().getFixedValue(),
1275                           OrigTy.isScalable()),
1276         OrigElt);
1277   }
1278 
1279   // If one type is vector and the element size matches the scalar size, then
1280   // the gcd is the scalar type.
1281   if (OrigTy.isVector() &&
1282       OrigTy.getElementType().getSizeInBits() == TargetTy.getSizeInBits())
1283     return OrigTy.getElementType();
1284   if (TargetTy.isVector() &&
1285       TargetTy.getElementType().getSizeInBits() == OrigTy.getSizeInBits())
1286     return OrigTy;
1287 
1288   // At this point, both types are either scalars of different type or one is a
1289   // vector and one is a scalar. If both types are scalars, the GCD type is the
1290   // GCD between the two scalar sizes. If one is vector and one is scalar, then
1291   // the GCD type is the GCD between the scalar and the vector element size.
1292   LLT OrigScalar = OrigTy.getScalarType();
1293   LLT TargetScalar = TargetTy.getScalarType();
1294   unsigned GCD = std::gcd(OrigScalar.getSizeInBits().getFixedValue(),
1295                           TargetScalar.getSizeInBits().getFixedValue());
1296   return LLT::scalar(GCD);
1297 }
1298 
1299 std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
1300   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1301          "Only G_SHUFFLE_VECTOR can have a splat index!");
1302   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1303   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1304 
1305   // If all elements are undefined, this shuffle can be considered a splat.
1306   // Return 0 for better potential for callers to simplify.
1307   if (FirstDefinedIdx == Mask.end())
1308     return 0;
1309 
1310   // Make sure all remaining elements are either undef or the same
1311   // as the first non-undef value.
1312   int SplatValue = *FirstDefinedIdx;
1313   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1314              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1315     return std::nullopt;
1316 
1317   return SplatValue;
1318 }
1319 
1320 static bool isBuildVectorOp(unsigned Opcode) {
1321   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1322          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1323 }
1324 
1325 namespace {
1326 
1327 std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1328                                                 const MachineRegisterInfo &MRI,
1329                                                 bool AllowUndef) {
1330   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1331   if (!MI)
1332     return std::nullopt;
1333 
1334   bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1335   if (!isBuildVectorOp(MI->getOpcode()) && !isConcatVectorsOp)
1336     return std::nullopt;
1337 
1338   std::optional<ValueAndVReg> SplatValAndReg;
1339   for (MachineOperand &Op : MI->uses()) {
1340     Register Element = Op.getReg();
1341     // If we have a G_CONCAT_VECTOR, we recursively look into the
1342     // vectors that we're concatenating to see if they're splats.
1343     auto ElementValAndReg =
1344         isConcatVectorsOp
1345             ? getAnyConstantSplat(Element, MRI, AllowUndef)
1346             : getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1347 
1348     // If AllowUndef, treat undef as value that will result in a constant splat.
1349     if (!ElementValAndReg) {
1350       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1351         continue;
1352       return std::nullopt;
1353     }
1354 
1355     // Record splat value
1356     if (!SplatValAndReg)
1357       SplatValAndReg = ElementValAndReg;
1358 
1359     // Different constant than the one already recorded, not a constant splat.
1360     if (SplatValAndReg->Value != ElementValAndReg->Value)
1361       return std::nullopt;
1362   }
1363 
1364   return SplatValAndReg;
1365 }
1366 
1367 } // end anonymous namespace
1368 
1369 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1370                                       const MachineRegisterInfo &MRI,
1371                                       int64_t SplatValue, bool AllowUndef) {
1372   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1373     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1374   return false;
1375 }
1376 
1377 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1378                                       const MachineRegisterInfo &MRI,
1379                                       int64_t SplatValue, bool AllowUndef) {
1380   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1381                                     AllowUndef);
1382 }
1383 
1384 std::optional<APInt>
1385 llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
1386   if (auto SplatValAndReg =
1387           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1388     if (std::optional<ValueAndVReg> ValAndVReg =
1389         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI))
1390       return ValAndVReg->Value;
1391   }
1392 
1393   return std::nullopt;
1394 }
1395 
1396 std::optional<APInt>
1397 llvm::getIConstantSplatVal(const MachineInstr &MI,
1398                            const MachineRegisterInfo &MRI) {
1399   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1400 }
1401 
1402 std::optional<int64_t>
1403 llvm::getIConstantSplatSExtVal(const Register Reg,
1404                                const MachineRegisterInfo &MRI) {
1405   if (auto SplatValAndReg =
1406           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1407     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1408   return std::nullopt;
1409 }
1410 
1411 std::optional<int64_t>
1412 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
1413                                const MachineRegisterInfo &MRI) {
1414   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1415 }
1416 
1417 std::optional<FPValueAndVReg>
1418 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
1419                         bool AllowUndef) {
1420   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1421     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1422   return std::nullopt;
1423 }
1424 
1425 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1426                                  const MachineRegisterInfo &MRI,
1427                                  bool AllowUndef) {
1428   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1429 }
1430 
1431 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1432                                 const MachineRegisterInfo &MRI,
1433                                 bool AllowUndef) {
1434   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1435 }
1436 
1437 std::optional<RegOrConstant>
1438 llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
1439   unsigned Opc = MI.getOpcode();
1440   if (!isBuildVectorOp(Opc))
1441     return std::nullopt;
1442   if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1443     return RegOrConstant(*Splat);
1444   auto Reg = MI.getOperand(1).getReg();
1445   if (any_of(drop_begin(MI.operands(), 2),
1446              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1447     return std::nullopt;
1448   return RegOrConstant(Reg);
1449 }
1450 
1451 static bool isConstantScalar(const MachineInstr &MI,
1452                              const MachineRegisterInfo &MRI,
1453                              bool AllowFP = true,
1454                              bool AllowOpaqueConstants = true) {
1455   switch (MI.getOpcode()) {
1456   case TargetOpcode::G_CONSTANT:
1457   case TargetOpcode::G_IMPLICIT_DEF:
1458     return true;
1459   case TargetOpcode::G_FCONSTANT:
1460     return AllowFP;
1461   case TargetOpcode::G_GLOBAL_VALUE:
1462   case TargetOpcode::G_FRAME_INDEX:
1463   case TargetOpcode::G_BLOCK_ADDR:
1464   case TargetOpcode::G_JUMP_TABLE:
1465     return AllowOpaqueConstants;
1466   default:
1467     return false;
1468   }
1469 }
1470 
1471 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1472                                       const MachineRegisterInfo &MRI) {
1473   Register Def = MI.getOperand(0).getReg();
1474   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1475     return true;
1476   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1477   if (!BV)
1478     return false;
1479   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1480     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1481         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1482       continue;
1483     return false;
1484   }
1485   return true;
1486 }
1487 
1488 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1489                                       const MachineRegisterInfo &MRI,
1490                                       bool AllowFP, bool AllowOpaqueConstants) {
1491   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1492     return true;
1493 
1494   if (!isBuildVectorOp(MI.getOpcode()))
1495     return false;
1496 
1497   const unsigned NumOps = MI.getNumOperands();
1498   for (unsigned I = 1; I != NumOps; ++I) {
1499     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1500     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1501       return false;
1502   }
1503 
1504   return true;
1505 }
1506 
1507 std::optional<APInt>
1508 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1509                                       const MachineRegisterInfo &MRI) {
1510   Register Def = MI.getOperand(0).getReg();
1511   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1512     return C->Value;
1513   auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1514   if (!MaybeCst)
1515     return std::nullopt;
1516   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1517   return APInt(ScalarSize, *MaybeCst, true);
1518 }
1519 
1520 std::optional<APFloat>
1521 llvm::isConstantOrConstantSplatVectorFP(MachineInstr &MI,
1522                                         const MachineRegisterInfo &MRI) {
1523   Register Def = MI.getOperand(0).getReg();
1524   if (auto FpConst = getFConstantVRegValWithLookThrough(Def, MRI))
1525     return FpConst->Value;
1526   auto MaybeCstFP = getFConstantSplat(Def, MRI, /*allowUndef=*/false);
1527   if (!MaybeCstFP)
1528     return std::nullopt;
1529   return MaybeCstFP->Value;
1530 }
1531 
1532 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1533                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1534   switch (MI.getOpcode()) {
1535   case TargetOpcode::G_IMPLICIT_DEF:
1536     return AllowUndefs;
1537   case TargetOpcode::G_CONSTANT:
1538     return MI.getOperand(1).getCImm()->isNullValue();
1539   case TargetOpcode::G_FCONSTANT: {
1540     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1541     return FPImm->isZero() && !FPImm->isNegative();
1542   }
1543   default:
1544     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1545       return false;
1546     return isBuildVectorAllZeros(MI, MRI);
1547   }
1548 }
1549 
1550 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1551                                    const MachineRegisterInfo &MRI,
1552                                    bool AllowUndefs) {
1553   switch (MI.getOpcode()) {
1554   case TargetOpcode::G_IMPLICIT_DEF:
1555     return AllowUndefs;
1556   case TargetOpcode::G_CONSTANT:
1557     return MI.getOperand(1).getCImm()->isAllOnesValue();
1558   default:
1559     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1560       return false;
1561     return isBuildVectorAllOnes(MI, MRI);
1562   }
1563 }
1564 
1565 bool llvm::matchUnaryPredicate(
1566     const MachineRegisterInfo &MRI, Register Reg,
1567     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1568 
1569   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1570   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1571     return Match(nullptr);
1572 
1573   // TODO: Also handle fconstant
1574   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1575     return Match(Def->getOperand(1).getCImm());
1576 
1577   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1578     return false;
1579 
1580   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1581     Register SrcElt = Def->getOperand(I).getReg();
1582     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1583     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1584       if (!Match(nullptr))
1585         return false;
1586       continue;
1587     }
1588 
1589     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1590         !Match(SrcDef->getOperand(1).getCImm()))
1591       return false;
1592   }
1593 
1594   return true;
1595 }
1596 
1597 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1598                           bool IsFP) {
1599   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1600   case TargetLowering::UndefinedBooleanContent:
1601     return Val & 0x1;
1602   case TargetLowering::ZeroOrOneBooleanContent:
1603     return Val == 1;
1604   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1605     return Val == -1;
1606   }
1607   llvm_unreachable("Invalid boolean contents");
1608 }
1609 
1610 bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1611                            bool IsVector, bool IsFP) {
1612   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1613   case TargetLowering::UndefinedBooleanContent:
1614     return ~Val & 0x1;
1615   case TargetLowering::ZeroOrOneBooleanContent:
1616   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1617     return Val == 0;
1618   }
1619   llvm_unreachable("Invalid boolean contents");
1620 }
1621 
1622 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1623                              bool IsFP) {
1624   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1625   case TargetLowering::UndefinedBooleanContent:
1626   case TargetLowering::ZeroOrOneBooleanContent:
1627     return 1;
1628   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1629     return -1;
1630   }
1631   llvm_unreachable("Invalid boolean contents");
1632 }
1633 
1634 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1635                             LostDebugLocObserver *LocObserver,
1636                             SmallInstListTy &DeadInstChain) {
1637   for (MachineOperand &Op : MI.uses()) {
1638     if (Op.isReg() && Op.getReg().isVirtual())
1639       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1640   }
1641   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1642   DeadInstChain.remove(&MI);
1643   MI.eraseFromParent();
1644   if (LocObserver)
1645     LocObserver->checkpoint(false);
1646 }
1647 
1648 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1649                        MachineRegisterInfo &MRI,
1650                        LostDebugLocObserver *LocObserver) {
1651   SmallInstListTy DeadInstChain;
1652   for (MachineInstr *MI : DeadInstrs)
1653     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1654 
1655   while (!DeadInstChain.empty()) {
1656     MachineInstr *Inst = DeadInstChain.pop_back_val();
1657     if (!isTriviallyDead(*Inst, MRI))
1658       continue;
1659     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1660   }
1661 }
1662 
1663 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1664                       LostDebugLocObserver *LocObserver) {
1665   return eraseInstrs({&MI}, MRI, LocObserver);
1666 }
1667 
1668 void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
1669   for (auto &Def : MI.defs()) {
1670     assert(Def.isReg() && "Must be a reg");
1671 
1672     SmallVector<MachineOperand *, 16> DbgUsers;
1673     for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1674       MachineInstr *DbgValue = MOUse.getParent();
1675       // Ignore partially formed DBG_VALUEs.
1676       if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1677         DbgUsers.push_back(&MOUse);
1678       }
1679     }
1680 
1681     if (!DbgUsers.empty()) {
1682       salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1683     }
1684   }
1685 }
1686 
1687 bool llvm::isPreISelGenericFloatingPointOpcode(unsigned Opc) {
1688   switch (Opc) {
1689   case TargetOpcode::G_FABS:
1690   case TargetOpcode::G_FADD:
1691   case TargetOpcode::G_FCANONICALIZE:
1692   case TargetOpcode::G_FCEIL:
1693   case TargetOpcode::G_FCONSTANT:
1694   case TargetOpcode::G_FCOPYSIGN:
1695   case TargetOpcode::G_FCOS:
1696   case TargetOpcode::G_FDIV:
1697   case TargetOpcode::G_FEXP2:
1698   case TargetOpcode::G_FEXP:
1699   case TargetOpcode::G_FFLOOR:
1700   case TargetOpcode::G_FLOG10:
1701   case TargetOpcode::G_FLOG2:
1702   case TargetOpcode::G_FLOG:
1703   case TargetOpcode::G_FMA:
1704   case TargetOpcode::G_FMAD:
1705   case TargetOpcode::G_FMAXIMUM:
1706   case TargetOpcode::G_FMAXNUM:
1707   case TargetOpcode::G_FMAXNUM_IEEE:
1708   case TargetOpcode::G_FMINIMUM:
1709   case TargetOpcode::G_FMINNUM:
1710   case TargetOpcode::G_FMINNUM_IEEE:
1711   case TargetOpcode::G_FMUL:
1712   case TargetOpcode::G_FNEARBYINT:
1713   case TargetOpcode::G_FNEG:
1714   case TargetOpcode::G_FPEXT:
1715   case TargetOpcode::G_FPOW:
1716   case TargetOpcode::G_FPTRUNC:
1717   case TargetOpcode::G_FREM:
1718   case TargetOpcode::G_FRINT:
1719   case TargetOpcode::G_FSIN:
1720   case TargetOpcode::G_FTAN:
1721   case TargetOpcode::G_FACOS:
1722   case TargetOpcode::G_FASIN:
1723   case TargetOpcode::G_FATAN:
1724   case TargetOpcode::G_FATAN2:
1725   case TargetOpcode::G_FCOSH:
1726   case TargetOpcode::G_FSINH:
1727   case TargetOpcode::G_FTANH:
1728   case TargetOpcode::G_FSQRT:
1729   case TargetOpcode::G_FSUB:
1730   case TargetOpcode::G_INTRINSIC_ROUND:
1731   case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1732   case TargetOpcode::G_INTRINSIC_TRUNC:
1733     return true;
1734   default:
1735     return false;
1736   }
1737 }
1738 
1739 /// Shifts return poison if shiftwidth is larger than the bitwidth.
1740 static bool shiftAmountKnownInRange(Register ShiftAmount,
1741                                     const MachineRegisterInfo &MRI) {
1742   LLT Ty = MRI.getType(ShiftAmount);
1743 
1744   if (Ty.isScalableVector())
1745     return false; // Can't tell, just return false to be safe
1746 
1747   if (Ty.isScalar()) {
1748     std::optional<ValueAndVReg> Val =
1749         getIConstantVRegValWithLookThrough(ShiftAmount, MRI);
1750     if (!Val)
1751       return false;
1752     return Val->Value.ult(Ty.getScalarSizeInBits());
1753   }
1754 
1755   GBuildVector *BV = getOpcodeDef<GBuildVector>(ShiftAmount, MRI);
1756   if (!BV)
1757     return false;
1758 
1759   unsigned Sources = BV->getNumSources();
1760   for (unsigned I = 0; I < Sources; ++I) {
1761     std::optional<ValueAndVReg> Val =
1762         getIConstantVRegValWithLookThrough(BV->getSourceReg(I), MRI);
1763     if (!Val)
1764       return false;
1765     if (!Val->Value.ult(Ty.getScalarSizeInBits()))
1766       return false;
1767   }
1768 
1769   return true;
1770 }
1771 
1772 namespace {
1773 enum class UndefPoisonKind {
1774   PoisonOnly = (1 << 0),
1775   UndefOnly = (1 << 1),
1776   UndefOrPoison = PoisonOnly | UndefOnly,
1777 };
1778 }
1779 
1780 static bool includesPoison(UndefPoisonKind Kind) {
1781   return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
1782 }
1783 
1784 static bool includesUndef(UndefPoisonKind Kind) {
1785   return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
1786 }
1787 
1788 static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
1789                                    bool ConsiderFlagsAndMetadata,
1790                                    UndefPoisonKind Kind) {
1791   MachineInstr *RegDef = MRI.getVRegDef(Reg);
1792 
1793   if (ConsiderFlagsAndMetadata && includesPoison(Kind))
1794     if (auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1795       if (GMI->hasPoisonGeneratingFlags())
1796         return true;
1797 
1798   // Check whether opcode is a poison/undef-generating operation.
1799   switch (RegDef->getOpcode()) {
1800   case TargetOpcode::G_BUILD_VECTOR:
1801   case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1802     return false;
1803   case TargetOpcode::G_SHL:
1804   case TargetOpcode::G_ASHR:
1805   case TargetOpcode::G_LSHR:
1806     return includesPoison(Kind) &&
1807            !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1808   case TargetOpcode::G_FPTOSI:
1809   case TargetOpcode::G_FPTOUI:
1810     // fptosi/ui yields poison if the resulting value does not fit in the
1811     // destination type.
1812     return true;
1813   case TargetOpcode::G_CTLZ:
1814   case TargetOpcode::G_CTTZ:
1815   case TargetOpcode::G_ABS:
1816   case TargetOpcode::G_CTPOP:
1817   case TargetOpcode::G_BSWAP:
1818   case TargetOpcode::G_BITREVERSE:
1819   case TargetOpcode::G_FSHL:
1820   case TargetOpcode::G_FSHR:
1821   case TargetOpcode::G_SMAX:
1822   case TargetOpcode::G_SMIN:
1823   case TargetOpcode::G_UMAX:
1824   case TargetOpcode::G_UMIN:
1825   case TargetOpcode::G_PTRMASK:
1826   case TargetOpcode::G_SADDO:
1827   case TargetOpcode::G_SSUBO:
1828   case TargetOpcode::G_UADDO:
1829   case TargetOpcode::G_USUBO:
1830   case TargetOpcode::G_SMULO:
1831   case TargetOpcode::G_UMULO:
1832   case TargetOpcode::G_SADDSAT:
1833   case TargetOpcode::G_UADDSAT:
1834   case TargetOpcode::G_SSUBSAT:
1835   case TargetOpcode::G_USUBSAT:
1836     return false;
1837   case TargetOpcode::G_SSHLSAT:
1838   case TargetOpcode::G_USHLSAT:
1839     return includesPoison(Kind) &&
1840            !shiftAmountKnownInRange(RegDef->getOperand(2).getReg(), MRI);
1841   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1842     GInsertVectorElement *Insert = cast<GInsertVectorElement>(RegDef);
1843     if (includesPoison(Kind)) {
1844       std::optional<ValueAndVReg> Index =
1845           getIConstantVRegValWithLookThrough(Insert->getIndexReg(), MRI);
1846       if (!Index)
1847         return true;
1848       LLT VecTy = MRI.getType(Insert->getVectorReg());
1849       return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1850     }
1851     return false;
1852   }
1853   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1854     GExtractVectorElement *Extract = cast<GExtractVectorElement>(RegDef);
1855     if (includesPoison(Kind)) {
1856       std::optional<ValueAndVReg> Index =
1857           getIConstantVRegValWithLookThrough(Extract->getIndexReg(), MRI);
1858       if (!Index)
1859         return true;
1860       LLT VecTy = MRI.getType(Extract->getVectorReg());
1861       return Index->Value.uge(VecTy.getElementCount().getKnownMinValue());
1862     }
1863     return false;
1864   }
1865   case TargetOpcode::G_SHUFFLE_VECTOR: {
1866     GShuffleVector *Shuffle = cast<GShuffleVector>(RegDef);
1867     ArrayRef<int> Mask = Shuffle->getMask();
1868     return includesPoison(Kind) && is_contained(Mask, -1);
1869   }
1870   case TargetOpcode::G_FNEG:
1871   case TargetOpcode::G_PHI:
1872   case TargetOpcode::G_SELECT:
1873   case TargetOpcode::G_UREM:
1874   case TargetOpcode::G_SREM:
1875   case TargetOpcode::G_FREEZE:
1876   case TargetOpcode::G_ICMP:
1877   case TargetOpcode::G_FCMP:
1878   case TargetOpcode::G_FADD:
1879   case TargetOpcode::G_FSUB:
1880   case TargetOpcode::G_FMUL:
1881   case TargetOpcode::G_FDIV:
1882   case TargetOpcode::G_FREM:
1883   case TargetOpcode::G_PTR_ADD:
1884     return false;
1885   default:
1886     return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1887   }
1888 }
1889 
1890 static bool isGuaranteedNotToBeUndefOrPoison(Register Reg,
1891                                              const MachineRegisterInfo &MRI,
1892                                              unsigned Depth,
1893                                              UndefPoisonKind Kind) {
1894   if (Depth >= MaxAnalysisRecursionDepth)
1895     return false;
1896 
1897   MachineInstr *RegDef = MRI.getVRegDef(Reg);
1898 
1899   switch (RegDef->getOpcode()) {
1900   case TargetOpcode::G_FREEZE:
1901     return true;
1902   case TargetOpcode::G_IMPLICIT_DEF:
1903     return !includesUndef(Kind);
1904   case TargetOpcode::G_CONSTANT:
1905   case TargetOpcode::G_FCONSTANT:
1906     return true;
1907   case TargetOpcode::G_BUILD_VECTOR: {
1908     GBuildVector *BV = cast<GBuildVector>(RegDef);
1909     unsigned NumSources = BV->getNumSources();
1910     for (unsigned I = 0; I < NumSources; ++I)
1911       if (!::isGuaranteedNotToBeUndefOrPoison(BV->getSourceReg(I), MRI,
1912                                               Depth + 1, Kind))
1913         return false;
1914     return true;
1915   }
1916   case TargetOpcode::G_PHI: {
1917     GPhi *Phi = cast<GPhi>(RegDef);
1918     unsigned NumIncoming = Phi->getNumIncomingValues();
1919     for (unsigned I = 0; I < NumIncoming; ++I)
1920       if (!::isGuaranteedNotToBeUndefOrPoison(Phi->getIncomingValue(I), MRI,
1921                                               Depth + 1, Kind))
1922         return false;
1923     return true;
1924   }
1925   default: {
1926     auto MOCheck = [&](const MachineOperand &MO) {
1927       if (!MO.isReg())
1928         return true;
1929       return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,
1930                                                 Kind);
1931     };
1932     return !::canCreateUndefOrPoison(Reg, MRI,
1933                                      /*ConsiderFlagsAndMetadata=*/true, Kind) &&
1934            all_of(RegDef->uses(), MOCheck);
1935   }
1936   }
1937 }
1938 
1939 bool llvm::canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI,
1940                                   bool ConsiderFlagsAndMetadata) {
1941   return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1942                                   UndefPoisonKind::UndefOrPoison);
1943 }
1944 
1945 bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI,
1946                      bool ConsiderFlagsAndMetadata = true) {
1947   return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,
1948                                   UndefPoisonKind::PoisonOnly);
1949 }
1950 
1951 bool llvm::isGuaranteedNotToBeUndefOrPoison(Register Reg,
1952                                             const MachineRegisterInfo &MRI,
1953                                             unsigned Depth) {
1954   return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1955                                             UndefPoisonKind::UndefOrPoison);
1956 }
1957 
1958 bool llvm::isGuaranteedNotToBePoison(Register Reg,
1959                                      const MachineRegisterInfo &MRI,
1960                                      unsigned Depth) {
1961   return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1962                                             UndefPoisonKind::PoisonOnly);
1963 }
1964 
1965 bool llvm::isGuaranteedNotToBeUndef(Register Reg,
1966                                     const MachineRegisterInfo &MRI,
1967                                     unsigned Depth) {
1968   return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,
1969                                             UndefPoisonKind::UndefOnly);
1970 }
1971 
1972 Type *llvm::getTypeForLLT(LLT Ty, LLVMContext &C) {
1973   if (Ty.isVector())
1974     return VectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1975                            Ty.getElementCount());
1976   return IntegerType::get(C, Ty.getSizeInBits());
1977 }
1978 
1979 APInt llvm::GIConstant::getScalarValue() const {
1980   assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");
1981 
1982   return Value;
1983 }
1984 
1985 std::optional<GIConstant>
1986 llvm::GIConstant::getConstant(Register Const, const MachineRegisterInfo &MRI) {
1987   MachineInstr *Constant = getDefIgnoringCopies(Const, MRI);
1988 
1989   if (GSplatVector *Splat = dyn_cast<GSplatVector>(Constant)) {
1990     std::optional<ValueAndVReg> MayBeConstant =
1991         getIConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
1992     if (!MayBeConstant)
1993       return std::nullopt;
1994     return GIConstant(MayBeConstant->Value, GIConstantKind::ScalableVector);
1995   }
1996 
1997   if (GBuildVector *Build = dyn_cast<GBuildVector>(Constant)) {
1998     SmallVector<APInt> Values;
1999     unsigned NumSources = Build->getNumSources();
2000     for (unsigned I = 0; I < NumSources; ++I) {
2001       Register SrcReg = Build->getSourceReg(I);
2002       std::optional<ValueAndVReg> MayBeConstant =
2003           getIConstantVRegValWithLookThrough(SrcReg, MRI);
2004       if (!MayBeConstant)
2005         return std::nullopt;
2006       Values.push_back(MayBeConstant->Value);
2007     }
2008     return GIConstant(Values);
2009   }
2010 
2011   std::optional<ValueAndVReg> MayBeConstant =
2012       getIConstantVRegValWithLookThrough(Const, MRI);
2013   if (!MayBeConstant)
2014     return std::nullopt;
2015 
2016   return GIConstant(MayBeConstant->Value, GIConstantKind::Scalar);
2017 }
2018 
2019 APFloat llvm::GFConstant::getScalarValue() const {
2020   assert(Kind == GFConstantKind::Scalar && "Expected scalar constant");
2021 
2022   return Values[0];
2023 }
2024 
2025 std::optional<GFConstant>
2026 llvm::GFConstant::getConstant(Register Const, const MachineRegisterInfo &MRI) {
2027   MachineInstr *Constant = getDefIgnoringCopies(Const, MRI);
2028 
2029   if (GSplatVector *Splat = dyn_cast<GSplatVector>(Constant)) {
2030     std::optional<FPValueAndVReg> MayBeConstant =
2031         getFConstantVRegValWithLookThrough(Splat->getScalarReg(), MRI);
2032     if (!MayBeConstant)
2033       return std::nullopt;
2034     return GFConstant(MayBeConstant->Value, GFConstantKind::ScalableVector);
2035   }
2036 
2037   if (GBuildVector *Build = dyn_cast<GBuildVector>(Constant)) {
2038     SmallVector<APFloat> Values;
2039     unsigned NumSources = Build->getNumSources();
2040     for (unsigned I = 0; I < NumSources; ++I) {
2041       Register SrcReg = Build->getSourceReg(I);
2042       std::optional<FPValueAndVReg> MayBeConstant =
2043           getFConstantVRegValWithLookThrough(SrcReg, MRI);
2044       if (!MayBeConstant)
2045         return std::nullopt;
2046       Values.push_back(MayBeConstant->Value);
2047     }
2048     return GFConstant(Values);
2049   }
2050 
2051   std::optional<FPValueAndVReg> MayBeConstant =
2052       getFConstantVRegValWithLookThrough(Const, MRI);
2053   if (!MayBeConstant)
2054     return std::nullopt;
2055 
2056   return GFConstant(MayBeConstant->Value, GFConstantKind::Scalar);
2057 }
2058