xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 67819a72c6ba39267effe8edfc1befddc3f3f2f9)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/CodeGenCommonISel.h"
17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
18 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
20 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/MachineSizeOpts.h"
27 #include "llvm/CodeGen/RegisterBankInfo.h"
28 #include "llvm/CodeGen/StackProtector.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Transforms/Utils/SizeOpts.h"
36 #include <numeric>
37 #include <optional>
38 
39 #define DEBUG_TYPE "globalisel-utils"
40 
41 using namespace llvm;
42 using namespace MIPatternMatch;
43 
44 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
45                                    const TargetInstrInfo &TII,
46                                    const RegisterBankInfo &RBI, Register Reg,
47                                    const TargetRegisterClass &RegClass) {
48   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
49     return MRI.createVirtualRegister(&RegClass);
50 
51   return Reg;
52 }
53 
54 Register llvm::constrainOperandRegClass(
55     const MachineFunction &MF, const TargetRegisterInfo &TRI,
56     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
57     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
58     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
59   Register Reg = RegMO.getReg();
60   // Assume physical registers are properly constrained.
61   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
62 
63   // Save the old register class to check whether
64   // the change notifications will be required.
65   // TODO: A better approach would be to pass
66   // the observers to constrainRegToClass().
67   auto *OldRegClass = MRI.getRegClassOrNull(Reg);
68   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
69   // If we created a new virtual register because the class is not compatible
70   // then create a copy between the new and the old register.
71   if (ConstrainedReg != Reg) {
72     MachineBasicBlock::iterator InsertIt(&InsertPt);
73     MachineBasicBlock &MBB = *InsertPt.getParent();
74     // FIXME: The copy needs to have the classes constrained for its operands.
75     // Use operand's regbank to get the class for old register (Reg).
76     if (RegMO.isUse()) {
77       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
78               TII.get(TargetOpcode::COPY), ConstrainedReg)
79           .addReg(Reg);
80     } else {
81       assert(RegMO.isDef() && "Must be a definition");
82       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
83               TII.get(TargetOpcode::COPY), Reg)
84           .addReg(ConstrainedReg);
85     }
86     if (GISelChangeObserver *Observer = MF.getObserver()) {
87       Observer->changingInstr(*RegMO.getParent());
88     }
89     RegMO.setReg(ConstrainedReg);
90     if (GISelChangeObserver *Observer = MF.getObserver()) {
91       Observer->changedInstr(*RegMO.getParent());
92     }
93   } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {
94     if (GISelChangeObserver *Observer = MF.getObserver()) {
95       if (!RegMO.isDef()) {
96         MachineInstr *RegDef = MRI.getVRegDef(Reg);
97         Observer->changedInstr(*RegDef);
98       }
99       Observer->changingAllUsesOfReg(MRI, Reg);
100       Observer->finishedChangingAllUsesOfReg();
101     }
102   }
103   return ConstrainedReg;
104 }
105 
106 Register llvm::constrainOperandRegClass(
107     const MachineFunction &MF, const TargetRegisterInfo &TRI,
108     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
109     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
110     MachineOperand &RegMO, unsigned OpIdx) {
111   Register Reg = RegMO.getReg();
112   // Assume physical registers are properly constrained.
113   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
114 
115   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
116   // Some of the target independent instructions, like COPY, may not impose any
117   // register class constraints on some of their operands: If it's a use, we can
118   // skip constraining as the instruction defining the register would constrain
119   // it.
120 
121   if (OpRC) {
122     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
123     // can have multiple regbanks for a superclass that combine different
124     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
125     // resolved by targets during regbankselect should not be overridden.
126     if (const auto *SubRC = TRI.getCommonSubClass(
127             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
128       OpRC = SubRC;
129 
130     OpRC = TRI.getAllocatableClass(OpRC);
131   }
132 
133   if (!OpRC) {
134     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
135            "Register class constraint is required unless either the "
136            "instruction is target independent or the operand is a use");
137     // FIXME: Just bailing out like this here could be not enough, unless we
138     // expect the users of this function to do the right thing for PHIs and
139     // COPY:
140     //   v1 = COPY v0
141     //   v2 = COPY v1
142     // v1 here may end up not being constrained at all. Please notice that to
143     // reproduce the issue we likely need a destination pattern of a selection
144     // rule producing such extra copies, not just an input GMIR with them as
145     // every existing target using selectImpl handles copies before calling it
146     // and they never reach this function.
147     return Reg;
148   }
149   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
150                                   RegMO);
151 }
152 
153 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
154                                             const TargetInstrInfo &TII,
155                                             const TargetRegisterInfo &TRI,
156                                             const RegisterBankInfo &RBI) {
157   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
158          "A selected instruction is expected");
159   MachineBasicBlock &MBB = *I.getParent();
160   MachineFunction &MF = *MBB.getParent();
161   MachineRegisterInfo &MRI = MF.getRegInfo();
162 
163   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
164     MachineOperand &MO = I.getOperand(OpI);
165 
166     // There's nothing to be done on non-register operands.
167     if (!MO.isReg())
168       continue;
169 
170     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
171     assert(MO.isReg() && "Unsupported non-reg operand");
172 
173     Register Reg = MO.getReg();
174     // Physical registers don't need to be constrained.
175     if (Register::isPhysicalRegister(Reg))
176       continue;
177 
178     // Register operands with a value of 0 (e.g. predicate operands) don't need
179     // to be constrained.
180     if (Reg == 0)
181       continue;
182 
183     // If the operand is a vreg, we should constrain its regclass, and only
184     // insert COPYs if that's impossible.
185     // constrainOperandRegClass does that for us.
186     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
187 
188     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
189     // done.
190     if (MO.isUse()) {
191       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
192       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
193         I.tieOperands(DefIdx, OpI);
194     }
195   }
196   return true;
197 }
198 
199 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
200                          MachineRegisterInfo &MRI) {
201   // Give up if either DstReg or SrcReg  is a physical register.
202   if (DstReg.isPhysical() || SrcReg.isPhysical())
203     return false;
204   // Give up if the types don't match.
205   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
206     return false;
207   // Replace if either DstReg has no constraints or the register
208   // constraints match.
209   return !MRI.getRegClassOrRegBank(DstReg) ||
210          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
211 }
212 
213 bool llvm::isTriviallyDead(const MachineInstr &MI,
214                            const MachineRegisterInfo &MRI) {
215   // FIXME: This logical is mostly duplicated with
216   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
217   // MachineInstr::isLabel?
218 
219   // Don't delete frame allocation labels.
220   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
221     return false;
222   // LIFETIME markers should be preserved even if they seem dead.
223   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
224       MI.getOpcode() == TargetOpcode::LIFETIME_END)
225     return false;
226 
227   // If we can move an instruction, we can remove it.  Otherwise, it has
228   // a side-effect of some sort.
229   bool SawStore = false;
230   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
231     return false;
232 
233   // Instructions without side-effects are dead iff they only define dead vregs.
234   for (const auto &MO : MI.operands()) {
235     if (!MO.isReg() || !MO.isDef())
236       continue;
237 
238     Register Reg = MO.getReg();
239     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
240       return false;
241   }
242   return true;
243 }
244 
245 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
246                                   MachineFunction &MF,
247                                   const TargetPassConfig &TPC,
248                                   MachineOptimizationRemarkEmitter &MORE,
249                                   MachineOptimizationRemarkMissed &R) {
250   bool IsFatal = Severity == DS_Error &&
251                  TPC.isGlobalISelAbortEnabled();
252   // Print the function name explicitly if we don't have a debug location (which
253   // makes the diagnostic less useful) or if we're going to emit a raw error.
254   if (!R.getLocation().isValid() || IsFatal)
255     R << (" (in function: " + MF.getName() + ")").str();
256 
257   if (IsFatal)
258     report_fatal_error(Twine(R.getMsg()));
259   else
260     MORE.emit(R);
261 }
262 
263 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
264                               MachineOptimizationRemarkEmitter &MORE,
265                               MachineOptimizationRemarkMissed &R) {
266   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
267 }
268 
269 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
270                               MachineOptimizationRemarkEmitter &MORE,
271                               MachineOptimizationRemarkMissed &R) {
272   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
273   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
274 }
275 
276 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
277                               MachineOptimizationRemarkEmitter &MORE,
278                               const char *PassName, StringRef Msg,
279                               const MachineInstr &MI) {
280   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
281                                     MI.getDebugLoc(), MI.getParent());
282   R << Msg;
283   // Printing MI is expensive;  only do it if expensive remarks are enabled.
284   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
285     R << ": " << ore::MNV("Inst", MI);
286   reportGISelFailure(MF, TPC, MORE, R);
287 }
288 
289 std::optional<APInt> llvm::getIConstantVRegVal(Register VReg,
290                                                const MachineRegisterInfo &MRI) {
291   std::optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
292       VReg, MRI, /*LookThroughInstrs*/ false);
293   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
294          "Value found while looking through instrs");
295   if (!ValAndVReg)
296     return std::nullopt;
297   return ValAndVReg->Value;
298 }
299 
300 std::optional<int64_t>
301 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
302   std::optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
303   if (Val && Val->getBitWidth() <= 64)
304     return Val->getSExtValue();
305   return std::nullopt;
306 }
307 
308 namespace {
309 
310 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
311 typedef std::function<std::optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
312 
313 std::optional<ValueAndVReg> getConstantVRegValWithLookThrough(
314     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
315     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
316     bool LookThroughAnyExt = false) {
317   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
318   MachineInstr *MI;
319 
320   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
321          LookThroughInstrs) {
322     switch (MI->getOpcode()) {
323     case TargetOpcode::G_ANYEXT:
324       if (!LookThroughAnyExt)
325         return std::nullopt;
326       [[fallthrough]];
327     case TargetOpcode::G_TRUNC:
328     case TargetOpcode::G_SEXT:
329     case TargetOpcode::G_ZEXT:
330       SeenOpcodes.push_back(std::make_pair(
331           MI->getOpcode(),
332           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
333       VReg = MI->getOperand(1).getReg();
334       break;
335     case TargetOpcode::COPY:
336       VReg = MI->getOperand(1).getReg();
337       if (Register::isPhysicalRegister(VReg))
338         return std::nullopt;
339       break;
340     case TargetOpcode::G_INTTOPTR:
341       VReg = MI->getOperand(1).getReg();
342       break;
343     default:
344       return std::nullopt;
345     }
346   }
347   if (!MI || !IsConstantOpcode(MI))
348     return std::nullopt;
349 
350   std::optional<APInt> MaybeVal = getAPCstValue(MI);
351   if (!MaybeVal)
352     return std::nullopt;
353   APInt &Val = *MaybeVal;
354   while (!SeenOpcodes.empty()) {
355     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
356     switch (OpcodeAndSize.first) {
357     case TargetOpcode::G_TRUNC:
358       Val = Val.trunc(OpcodeAndSize.second);
359       break;
360     case TargetOpcode::G_ANYEXT:
361     case TargetOpcode::G_SEXT:
362       Val = Val.sext(OpcodeAndSize.second);
363       break;
364     case TargetOpcode::G_ZEXT:
365       Val = Val.zext(OpcodeAndSize.second);
366       break;
367     }
368   }
369 
370   return ValueAndVReg{Val, VReg};
371 }
372 
373 bool isIConstant(const MachineInstr *MI) {
374   if (!MI)
375     return false;
376   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
377 }
378 
379 bool isFConstant(const MachineInstr *MI) {
380   if (!MI)
381     return false;
382   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
383 }
384 
385 bool isAnyConstant(const MachineInstr *MI) {
386   if (!MI)
387     return false;
388   unsigned Opc = MI->getOpcode();
389   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
390 }
391 
392 std::optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
393   const MachineOperand &CstVal = MI->getOperand(1);
394   if (CstVal.isCImm())
395     return CstVal.getCImm()->getValue();
396   return std::nullopt;
397 }
398 
399 std::optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
400   const MachineOperand &CstVal = MI->getOperand(1);
401   if (CstVal.isCImm())
402     return CstVal.getCImm()->getValue();
403   if (CstVal.isFPImm())
404     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
405   return std::nullopt;
406 }
407 
408 } // end anonymous namespace
409 
410 std::optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
411     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
412   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
413                                            getCImmAsAPInt, LookThroughInstrs);
414 }
415 
416 std::optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
417     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
418     bool LookThroughAnyExt) {
419   return getConstantVRegValWithLookThrough(
420       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
421       LookThroughAnyExt);
422 }
423 
424 std::optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
425     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
426   auto Reg = getConstantVRegValWithLookThrough(
427       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
428   if (!Reg)
429     return std::nullopt;
430   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
431                         Reg->VReg};
432 }
433 
434 const ConstantFP *
435 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
436   MachineInstr *MI = MRI.getVRegDef(VReg);
437   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
438     return nullptr;
439   return MI->getOperand(1).getFPImm();
440 }
441 
442 std::optional<DefinitionAndSourceRegister>
443 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
444   Register DefSrcReg = Reg;
445   auto *DefMI = MRI.getVRegDef(Reg);
446   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
447   if (!DstTy.isValid())
448     return std::nullopt;
449   unsigned Opc = DefMI->getOpcode();
450   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
451     Register SrcReg = DefMI->getOperand(1).getReg();
452     auto SrcTy = MRI.getType(SrcReg);
453     if (!SrcTy.isValid())
454       break;
455     DefMI = MRI.getVRegDef(SrcReg);
456     DefSrcReg = SrcReg;
457     Opc = DefMI->getOpcode();
458   }
459   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
460 }
461 
462 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
463                                          const MachineRegisterInfo &MRI) {
464   std::optional<DefinitionAndSourceRegister> DefSrcReg =
465       getDefSrcRegIgnoringCopies(Reg, MRI);
466   return DefSrcReg ? DefSrcReg->MI : nullptr;
467 }
468 
469 Register llvm::getSrcRegIgnoringCopies(Register Reg,
470                                        const MachineRegisterInfo &MRI) {
471   std::optional<DefinitionAndSourceRegister> DefSrcReg =
472       getDefSrcRegIgnoringCopies(Reg, MRI);
473   return DefSrcReg ? DefSrcReg->Reg : Register();
474 }
475 
476 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
477                                  const MachineRegisterInfo &MRI) {
478   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
479   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
480 }
481 
482 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
483   if (Size == 32)
484     return APFloat(float(Val));
485   if (Size == 64)
486     return APFloat(Val);
487   if (Size != 16)
488     llvm_unreachable("Unsupported FPConstant size");
489   bool Ignored;
490   APFloat APF(Val);
491   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
492   return APF;
493 }
494 
495 std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
496                                              const Register Op1,
497                                              const Register Op2,
498                                              const MachineRegisterInfo &MRI) {
499   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
500   if (!MaybeOp2Cst)
501     return std::nullopt;
502 
503   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
504   if (!MaybeOp1Cst)
505     return std::nullopt;
506 
507   const APInt &C1 = MaybeOp1Cst->Value;
508   const APInt &C2 = MaybeOp2Cst->Value;
509   switch (Opcode) {
510   default:
511     break;
512   case TargetOpcode::G_ADD:
513   case TargetOpcode::G_PTR_ADD:
514     return C1 + C2;
515   case TargetOpcode::G_AND:
516     return C1 & C2;
517   case TargetOpcode::G_ASHR:
518     return C1.ashr(C2);
519   case TargetOpcode::G_LSHR:
520     return C1.lshr(C2);
521   case TargetOpcode::G_MUL:
522     return C1 * C2;
523   case TargetOpcode::G_OR:
524     return C1 | C2;
525   case TargetOpcode::G_SHL:
526     return C1 << C2;
527   case TargetOpcode::G_SUB:
528     return C1 - C2;
529   case TargetOpcode::G_XOR:
530     return C1 ^ C2;
531   case TargetOpcode::G_UDIV:
532     if (!C2.getBoolValue())
533       break;
534     return C1.udiv(C2);
535   case TargetOpcode::G_SDIV:
536     if (!C2.getBoolValue())
537       break;
538     return C1.sdiv(C2);
539   case TargetOpcode::G_UREM:
540     if (!C2.getBoolValue())
541       break;
542     return C1.urem(C2);
543   case TargetOpcode::G_SREM:
544     if (!C2.getBoolValue())
545       break;
546     return C1.srem(C2);
547   case TargetOpcode::G_SMIN:
548     return APIntOps::smin(C1, C2);
549   case TargetOpcode::G_SMAX:
550     return APIntOps::smax(C1, C2);
551   case TargetOpcode::G_UMIN:
552     return APIntOps::umin(C1, C2);
553   case TargetOpcode::G_UMAX:
554     return APIntOps::umax(C1, C2);
555   }
556 
557   return std::nullopt;
558 }
559 
560 std::optional<APFloat>
561 llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
562                           const Register Op2, const MachineRegisterInfo &MRI) {
563   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
564   if (!Op2Cst)
565     return std::nullopt;
566 
567   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
568   if (!Op1Cst)
569     return std::nullopt;
570 
571   APFloat C1 = Op1Cst->getValueAPF();
572   const APFloat &C2 = Op2Cst->getValueAPF();
573   switch (Opcode) {
574   case TargetOpcode::G_FADD:
575     C1.add(C2, APFloat::rmNearestTiesToEven);
576     return C1;
577   case TargetOpcode::G_FSUB:
578     C1.subtract(C2, APFloat::rmNearestTiesToEven);
579     return C1;
580   case TargetOpcode::G_FMUL:
581     C1.multiply(C2, APFloat::rmNearestTiesToEven);
582     return C1;
583   case TargetOpcode::G_FDIV:
584     C1.divide(C2, APFloat::rmNearestTiesToEven);
585     return C1;
586   case TargetOpcode::G_FREM:
587     C1.mod(C2);
588     return C1;
589   case TargetOpcode::G_FCOPYSIGN:
590     C1.copySign(C2);
591     return C1;
592   case TargetOpcode::G_FMINNUM:
593     return minnum(C1, C2);
594   case TargetOpcode::G_FMAXNUM:
595     return maxnum(C1, C2);
596   case TargetOpcode::G_FMINIMUM:
597     return minimum(C1, C2);
598   case TargetOpcode::G_FMAXIMUM:
599     return maximum(C1, C2);
600   case TargetOpcode::G_FMINNUM_IEEE:
601   case TargetOpcode::G_FMAXNUM_IEEE:
602     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
603     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
604     // and currently there isn't a nice wrapper in APFloat for the version with
605     // correct snan handling.
606     break;
607   default:
608     break;
609   }
610 
611   return std::nullopt;
612 }
613 
614 SmallVector<APInt>
615 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
616                               const Register Op2,
617                               const MachineRegisterInfo &MRI) {
618   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
619   if (!SrcVec2)
620     return SmallVector<APInt>();
621 
622   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
623   if (!SrcVec1)
624     return SmallVector<APInt>();
625 
626   SmallVector<APInt> FoldedElements;
627   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
628     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
629                                       SrcVec2->getSourceReg(Idx), MRI);
630     if (!MaybeCst)
631       return SmallVector<APInt>();
632     FoldedElements.push_back(*MaybeCst);
633   }
634   return FoldedElements;
635 }
636 
637 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
638                            bool SNaN) {
639   const MachineInstr *DefMI = MRI.getVRegDef(Val);
640   if (!DefMI)
641     return false;
642 
643   const TargetMachine& TM = DefMI->getMF()->getTarget();
644   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
645     return true;
646 
647   // If the value is a constant, we can obviously see if it is a NaN or not.
648   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
649     return !FPVal->getValueAPF().isNaN() ||
650            (SNaN && !FPVal->getValueAPF().isSignaling());
651   }
652 
653   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
654     for (const auto &Op : DefMI->uses())
655       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
656         return false;
657     return true;
658   }
659 
660   switch (DefMI->getOpcode()) {
661   default:
662     break;
663   case TargetOpcode::G_FADD:
664   case TargetOpcode::G_FSUB:
665   case TargetOpcode::G_FMUL:
666   case TargetOpcode::G_FDIV:
667   case TargetOpcode::G_FREM:
668   case TargetOpcode::G_FSIN:
669   case TargetOpcode::G_FCOS:
670   case TargetOpcode::G_FMA:
671   case TargetOpcode::G_FMAD:
672     if (SNaN)
673       return true;
674 
675     // TODO: Need isKnownNeverInfinity
676     return false;
677   case TargetOpcode::G_FMINNUM_IEEE:
678   case TargetOpcode::G_FMAXNUM_IEEE: {
679     if (SNaN)
680       return true;
681     // This can return a NaN if either operand is an sNaN, or if both operands
682     // are NaN.
683     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
684             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
685            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
686             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
687   }
688   case TargetOpcode::G_FMINNUM:
689   case TargetOpcode::G_FMAXNUM: {
690     // Only one needs to be known not-nan, since it will be returned if the
691     // other ends up being one.
692     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
693            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
694   }
695   }
696 
697   if (SNaN) {
698     // FP operations quiet. For now, just handle the ones inserted during
699     // legalization.
700     switch (DefMI->getOpcode()) {
701     case TargetOpcode::G_FPEXT:
702     case TargetOpcode::G_FPTRUNC:
703     case TargetOpcode::G_FCANONICALIZE:
704       return true;
705     default:
706       return false;
707     }
708   }
709 
710   return false;
711 }
712 
713 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
714                                   const MachinePointerInfo &MPO) {
715   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
716   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
717     MachineFrameInfo &MFI = MF.getFrameInfo();
718     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
719                            MPO.Offset);
720   }
721 
722   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
723     const Module *M = MF.getFunction().getParent();
724     return V->getPointerAlignment(M->getDataLayout());
725   }
726 
727   return Align(1);
728 }
729 
730 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
731                                         const TargetInstrInfo &TII,
732                                         MCRegister PhysReg,
733                                         const TargetRegisterClass &RC,
734                                         const DebugLoc &DL, LLT RegTy) {
735   MachineBasicBlock &EntryMBB = MF.front();
736   MachineRegisterInfo &MRI = MF.getRegInfo();
737   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
738   if (LiveIn) {
739     MachineInstr *Def = MRI.getVRegDef(LiveIn);
740     if (Def) {
741       // FIXME: Should the verifier check this is in the entry block?
742       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
743       return LiveIn;
744     }
745 
746     // It's possible the incoming argument register and copy was added during
747     // lowering, but later deleted due to being/becoming dead. If this happens,
748     // re-insert the copy.
749   } else {
750     // The live in register was not present, so add it.
751     LiveIn = MF.addLiveIn(PhysReg, &RC);
752     if (RegTy.isValid())
753       MRI.setType(LiveIn, RegTy);
754   }
755 
756   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
757     .addReg(PhysReg);
758   if (!EntryMBB.isLiveIn(PhysReg))
759     EntryMBB.addLiveIn(PhysReg);
760   return LiveIn;
761 }
762 
763 std::optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode,
764                                              const Register Op1, uint64_t Imm,
765                                              const MachineRegisterInfo &MRI) {
766   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
767   if (MaybeOp1Cst) {
768     switch (Opcode) {
769     default:
770       break;
771     case TargetOpcode::G_SEXT_INREG: {
772       LLT Ty = MRI.getType(Op1);
773       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
774     }
775     }
776   }
777   return std::nullopt;
778 }
779 
780 std::optional<APFloat>
781 llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src,
782                              const MachineRegisterInfo &MRI) {
783   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
784   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
785     APFloat DstVal(getFltSemanticForLLT(DstTy));
786     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
787                             APFloat::rmNearestTiesToEven);
788     return DstVal;
789   }
790   return std::nullopt;
791 }
792 
793 std::optional<SmallVector<unsigned>>
794 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
795   LLT Ty = MRI.getType(Src);
796   SmallVector<unsigned> FoldedCTLZs;
797   auto tryFoldScalar = [&](Register R) -> std::optional<unsigned> {
798     auto MaybeCst = getIConstantVRegVal(R, MRI);
799     if (!MaybeCst)
800       return std::nullopt;
801     return MaybeCst->countLeadingZeros();
802   };
803   if (Ty.isVector()) {
804     // Try to constant fold each element.
805     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
806     if (!BV)
807       return std::nullopt;
808     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
809       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
810         FoldedCTLZs.emplace_back(*MaybeFold);
811         continue;
812       }
813       return std::nullopt;
814     }
815     return FoldedCTLZs;
816   }
817   if (auto MaybeCst = tryFoldScalar(Src)) {
818     FoldedCTLZs.emplace_back(*MaybeCst);
819     return FoldedCTLZs;
820   }
821   return std::nullopt;
822 }
823 
824 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
825                                   GISelKnownBits *KB) {
826   std::optional<DefinitionAndSourceRegister> DefSrcReg =
827       getDefSrcRegIgnoringCopies(Reg, MRI);
828   if (!DefSrcReg)
829     return false;
830 
831   const MachineInstr &MI = *DefSrcReg->MI;
832   const LLT Ty = MRI.getType(Reg);
833 
834   switch (MI.getOpcode()) {
835   case TargetOpcode::G_CONSTANT: {
836     unsigned BitWidth = Ty.getScalarSizeInBits();
837     const ConstantInt *CI = MI.getOperand(1).getCImm();
838     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
839   }
840   case TargetOpcode::G_SHL: {
841     // A left-shift of a constant one will have exactly one bit set because
842     // shifting the bit off the end is undefined.
843 
844     // TODO: Constant splat
845     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
846       if (*ConstLHS == 1)
847         return true;
848     }
849 
850     break;
851   }
852   case TargetOpcode::G_LSHR: {
853     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
854       if (ConstLHS->isSignMask())
855         return true;
856     }
857 
858     break;
859   }
860   case TargetOpcode::G_BUILD_VECTOR: {
861     // TODO: Probably should have a recursion depth guard since you could have
862     // bitcasted vector elements.
863     for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
864       if (!isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB))
865         return false;
866 
867     return true;
868   }
869   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
870     // Only handle constants since we would need to know if number of leading
871     // zeros is greater than the truncation amount.
872     const unsigned BitWidth = Ty.getScalarSizeInBits();
873     for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
874       auto Const = getIConstantVRegVal(MO.getReg(), MRI);
875       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
876         return false;
877     }
878 
879     return true;
880   }
881   default:
882     break;
883   }
884 
885   if (!KB)
886     return false;
887 
888   // More could be done here, though the above checks are enough
889   // to handle some common cases.
890 
891   // Fall back to computeKnownBits to catch other known cases.
892   KnownBits Known = KB->getKnownBits(Reg);
893   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
894 }
895 
896 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
897   AU.addPreserved<StackProtector>();
898 }
899 
900 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
901   const unsigned OrigSize = OrigTy.getSizeInBits();
902   const unsigned TargetSize = TargetTy.getSizeInBits();
903 
904   if (OrigSize == TargetSize)
905     return OrigTy;
906 
907   if (OrigTy.isVector()) {
908     const LLT OrigElt = OrigTy.getElementType();
909 
910     if (TargetTy.isVector()) {
911       const LLT TargetElt = TargetTy.getElementType();
912 
913       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
914         int GCDElts =
915             std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements());
916         // Prefer the original element type.
917         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
918         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
919                            OrigTy.getElementType());
920       }
921     } else {
922       if (OrigElt.getSizeInBits() == TargetSize)
923         return OrigTy;
924     }
925 
926     unsigned LCMSize = std::lcm(OrigSize, TargetSize);
927     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
928   }
929 
930   if (TargetTy.isVector()) {
931     unsigned LCMSize = std::lcm(OrigSize, TargetSize);
932     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
933   }
934 
935   unsigned LCMSize = std::lcm(OrigSize, TargetSize);
936 
937   // Preserve pointer types.
938   if (LCMSize == OrigSize)
939     return OrigTy;
940   if (LCMSize == TargetSize)
941     return TargetTy;
942 
943   return LLT::scalar(LCMSize);
944 }
945 
946 LLT llvm::getCoverTy(LLT OrigTy, LLT TargetTy) {
947   if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||
948       (OrigTy.getScalarSizeInBits() != TargetTy.getScalarSizeInBits()))
949     return getLCMType(OrigTy, TargetTy);
950 
951   unsigned OrigTyNumElts = OrigTy.getNumElements();
952   unsigned TargetTyNumElts = TargetTy.getNumElements();
953   if (OrigTyNumElts % TargetTyNumElts == 0)
954     return OrigTy;
955 
956   unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);
957   return LLT::scalarOrVector(ElementCount::getFixed(NumElts),
958                              OrigTy.getElementType());
959 }
960 
961 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
962   const unsigned OrigSize = OrigTy.getSizeInBits();
963   const unsigned TargetSize = TargetTy.getSizeInBits();
964 
965   if (OrigSize == TargetSize)
966     return OrigTy;
967 
968   if (OrigTy.isVector()) {
969     LLT OrigElt = OrigTy.getElementType();
970     if (TargetTy.isVector()) {
971       LLT TargetElt = TargetTy.getElementType();
972       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
973         int GCD = std::gcd(OrigTy.getNumElements(), TargetTy.getNumElements());
974         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
975       }
976     } else {
977       // If the source is a vector of pointers, return a pointer element.
978       if (OrigElt.getSizeInBits() == TargetSize)
979         return OrigElt;
980     }
981 
982     unsigned GCD = std::gcd(OrigSize, TargetSize);
983     if (GCD == OrigElt.getSizeInBits())
984       return OrigElt;
985 
986     // If we can't produce the original element type, we have to use a smaller
987     // scalar.
988     if (GCD < OrigElt.getSizeInBits())
989       return LLT::scalar(GCD);
990     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
991   }
992 
993   if (TargetTy.isVector()) {
994     // Try to preserve the original element type.
995     LLT TargetElt = TargetTy.getElementType();
996     if (TargetElt.getSizeInBits() == OrigSize)
997       return OrigTy;
998   }
999 
1000   unsigned GCD = std::gcd(OrigSize, TargetSize);
1001   return LLT::scalar(GCD);
1002 }
1003 
1004 std::optional<int> llvm::getSplatIndex(MachineInstr &MI) {
1005   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1006          "Only G_SHUFFLE_VECTOR can have a splat index!");
1007   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
1008   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
1009 
1010   // If all elements are undefined, this shuffle can be considered a splat.
1011   // Return 0 for better potential for callers to simplify.
1012   if (FirstDefinedIdx == Mask.end())
1013     return 0;
1014 
1015   // Make sure all remaining elements are either undef or the same
1016   // as the first non-undef value.
1017   int SplatValue = *FirstDefinedIdx;
1018   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
1019              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1020     return std::nullopt;
1021 
1022   return SplatValue;
1023 }
1024 
1025 static bool isBuildVectorOp(unsigned Opcode) {
1026   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1027          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1028 }
1029 
1030 namespace {
1031 
1032 std::optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1033                                                 const MachineRegisterInfo &MRI,
1034                                                 bool AllowUndef) {
1035   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1036   if (!MI)
1037     return std::nullopt;
1038 
1039   if (!isBuildVectorOp(MI->getOpcode()))
1040     return std::nullopt;
1041 
1042   std::optional<ValueAndVReg> SplatValAndReg;
1043   for (MachineOperand &Op : MI->uses()) {
1044     Register Element = Op.getReg();
1045     auto ElementValAndReg =
1046         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1047 
1048     // If AllowUndef, treat undef as value that will result in a constant splat.
1049     if (!ElementValAndReg) {
1050       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1051         continue;
1052       return std::nullopt;
1053     }
1054 
1055     // Record splat value
1056     if (!SplatValAndReg)
1057       SplatValAndReg = ElementValAndReg;
1058 
1059     // Different constant then the one already recorded, not a constant splat.
1060     if (SplatValAndReg->Value != ElementValAndReg->Value)
1061       return std::nullopt;
1062   }
1063 
1064   return SplatValAndReg;
1065 }
1066 
1067 } // end anonymous namespace
1068 
1069 bool llvm::isBuildVectorConstantSplat(const Register Reg,
1070                                       const MachineRegisterInfo &MRI,
1071                                       int64_t SplatValue, bool AllowUndef) {
1072   if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))
1073     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1074   return false;
1075 }
1076 
1077 bool llvm::isBuildVectorConstantSplat(const MachineInstr &MI,
1078                                       const MachineRegisterInfo &MRI,
1079                                       int64_t SplatValue, bool AllowUndef) {
1080   return isBuildVectorConstantSplat(MI.getOperand(0).getReg(), MRI, SplatValue,
1081                                     AllowUndef);
1082 }
1083 
1084 std::optional<APInt>
1085 llvm::getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI) {
1086   if (auto SplatValAndReg =
1087           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false)) {
1088     std::optional<ValueAndVReg> ValAndVReg =
1089         getIConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1090     return ValAndVReg->Value;
1091   }
1092 
1093   return std::nullopt;
1094 }
1095 
1096 std::optional<APInt>
1097 llvm::getIConstantSplatVal(const MachineInstr &MI,
1098                            const MachineRegisterInfo &MRI) {
1099   return getIConstantSplatVal(MI.getOperand(0).getReg(), MRI);
1100 }
1101 
1102 std::optional<int64_t>
1103 llvm::getIConstantSplatSExtVal(const Register Reg,
1104                                const MachineRegisterInfo &MRI) {
1105   if (auto SplatValAndReg =
1106           getAnyConstantSplat(Reg, MRI, /* AllowUndef */ false))
1107     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1108   return std::nullopt;
1109 }
1110 
1111 std::optional<int64_t>
1112 llvm::getIConstantSplatSExtVal(const MachineInstr &MI,
1113                                const MachineRegisterInfo &MRI) {
1114   return getIConstantSplatSExtVal(MI.getOperand(0).getReg(), MRI);
1115 }
1116 
1117 std::optional<FPValueAndVReg>
1118 llvm::getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI,
1119                         bool AllowUndef) {
1120   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1121     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1122   return std::nullopt;
1123 }
1124 
1125 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1126                                  const MachineRegisterInfo &MRI,
1127                                  bool AllowUndef) {
1128   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1129 }
1130 
1131 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1132                                 const MachineRegisterInfo &MRI,
1133                                 bool AllowUndef) {
1134   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1135 }
1136 
1137 std::optional<RegOrConstant>
1138 llvm::getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI) {
1139   unsigned Opc = MI.getOpcode();
1140   if (!isBuildVectorOp(Opc))
1141     return std::nullopt;
1142   if (auto Splat = getIConstantSplatSExtVal(MI, MRI))
1143     return RegOrConstant(*Splat);
1144   auto Reg = MI.getOperand(1).getReg();
1145   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1146              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1147     return std::nullopt;
1148   return RegOrConstant(Reg);
1149 }
1150 
1151 static bool isConstantScalar(const MachineInstr &MI,
1152                              const MachineRegisterInfo &MRI,
1153                              bool AllowFP = true,
1154                              bool AllowOpaqueConstants = true) {
1155   switch (MI.getOpcode()) {
1156   case TargetOpcode::G_CONSTANT:
1157   case TargetOpcode::G_IMPLICIT_DEF:
1158     return true;
1159   case TargetOpcode::G_FCONSTANT:
1160     return AllowFP;
1161   case TargetOpcode::G_GLOBAL_VALUE:
1162   case TargetOpcode::G_FRAME_INDEX:
1163   case TargetOpcode::G_BLOCK_ADDR:
1164   case TargetOpcode::G_JUMP_TABLE:
1165     return AllowOpaqueConstants;
1166   default:
1167     return false;
1168   }
1169 }
1170 
1171 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1172                                       const MachineRegisterInfo &MRI) {
1173   Register Def = MI.getOperand(0).getReg();
1174   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1175     return true;
1176   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1177   if (!BV)
1178     return false;
1179   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1180     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1181         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1182       continue;
1183     return false;
1184   }
1185   return true;
1186 }
1187 
1188 bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
1189                                       const MachineRegisterInfo &MRI,
1190                                       bool AllowFP, bool AllowOpaqueConstants) {
1191   if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
1192     return true;
1193 
1194   if (!isBuildVectorOp(MI.getOpcode()))
1195     return false;
1196 
1197   const unsigned NumOps = MI.getNumOperands();
1198   for (unsigned I = 1; I != NumOps; ++I) {
1199     const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
1200     if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
1201       return false;
1202   }
1203 
1204   return true;
1205 }
1206 
1207 std::optional<APInt>
1208 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1209                                       const MachineRegisterInfo &MRI) {
1210   Register Def = MI.getOperand(0).getReg();
1211   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1212     return C->Value;
1213   auto MaybeCst = getIConstantSplatSExtVal(MI, MRI);
1214   if (!MaybeCst)
1215     return std::nullopt;
1216   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1217   return APInt(ScalarSize, *MaybeCst, true);
1218 }
1219 
1220 bool llvm::isNullOrNullSplat(const MachineInstr &MI,
1221                              const MachineRegisterInfo &MRI, bool AllowUndefs) {
1222   switch (MI.getOpcode()) {
1223   case TargetOpcode::G_IMPLICIT_DEF:
1224     return AllowUndefs;
1225   case TargetOpcode::G_CONSTANT:
1226     return MI.getOperand(1).getCImm()->isNullValue();
1227   case TargetOpcode::G_FCONSTANT: {
1228     const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
1229     return FPImm->isZero() && !FPImm->isNegative();
1230   }
1231   default:
1232     if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
1233       return false;
1234     return isBuildVectorAllZeros(MI, MRI);
1235   }
1236 }
1237 
1238 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
1239                                    const MachineRegisterInfo &MRI,
1240                                    bool AllowUndefs) {
1241   switch (MI.getOpcode()) {
1242   case TargetOpcode::G_IMPLICIT_DEF:
1243     return AllowUndefs;
1244   case TargetOpcode::G_CONSTANT:
1245     return MI.getOperand(1).getCImm()->isAllOnesValue();
1246   default:
1247     if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
1248       return false;
1249     return isBuildVectorAllOnes(MI, MRI);
1250   }
1251 }
1252 
1253 bool llvm::matchUnaryPredicate(
1254     const MachineRegisterInfo &MRI, Register Reg,
1255     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1256 
1257   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1258   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1259     return Match(nullptr);
1260 
1261   // TODO: Also handle fconstant
1262   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1263     return Match(Def->getOperand(1).getCImm());
1264 
1265   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1266     return false;
1267 
1268   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1269     Register SrcElt = Def->getOperand(I).getReg();
1270     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1271     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1272       if (!Match(nullptr))
1273         return false;
1274       continue;
1275     }
1276 
1277     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1278         !Match(SrcDef->getOperand(1).getCImm()))
1279       return false;
1280   }
1281 
1282   return true;
1283 }
1284 
1285 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1286                           bool IsFP) {
1287   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1288   case TargetLowering::UndefinedBooleanContent:
1289     return Val & 0x1;
1290   case TargetLowering::ZeroOrOneBooleanContent:
1291     return Val == 1;
1292   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1293     return Val == -1;
1294   }
1295   llvm_unreachable("Invalid boolean contents");
1296 }
1297 
1298 bool llvm::isConstFalseVal(const TargetLowering &TLI, int64_t Val,
1299                            bool IsVector, bool IsFP) {
1300   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1301   case TargetLowering::UndefinedBooleanContent:
1302     return ~Val & 0x1;
1303   case TargetLowering::ZeroOrOneBooleanContent:
1304   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1305     return Val == 0;
1306   }
1307   llvm_unreachable("Invalid boolean contents");
1308 }
1309 
1310 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1311                              bool IsFP) {
1312   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1313   case TargetLowering::UndefinedBooleanContent:
1314   case TargetLowering::ZeroOrOneBooleanContent:
1315     return 1;
1316   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1317     return -1;
1318   }
1319   llvm_unreachable("Invalid boolean contents");
1320 }
1321 
1322 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1323                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1324   const auto &F = MBB.getParent()->getFunction();
1325   return F.hasOptSize() || F.hasMinSize() ||
1326          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1327 }
1328 
1329 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1330                             LostDebugLocObserver *LocObserver,
1331                             SmallInstListTy &DeadInstChain) {
1332   for (MachineOperand &Op : MI.uses()) {
1333     if (Op.isReg() && Op.getReg().isVirtual())
1334       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1335   }
1336   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1337   DeadInstChain.remove(&MI);
1338   MI.eraseFromParent();
1339   if (LocObserver)
1340     LocObserver->checkpoint(false);
1341 }
1342 
1343 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1344                        MachineRegisterInfo &MRI,
1345                        LostDebugLocObserver *LocObserver) {
1346   SmallInstListTy DeadInstChain;
1347   for (MachineInstr *MI : DeadInstrs)
1348     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1349 
1350   while (!DeadInstChain.empty()) {
1351     MachineInstr *Inst = DeadInstChain.pop_back_val();
1352     if (!isTriviallyDead(*Inst, MRI))
1353       continue;
1354     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1355   }
1356 }
1357 
1358 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1359                       LostDebugLocObserver *LocObserver) {
1360   return eraseInstrs({&MI}, MRI, LocObserver);
1361 }
1362 
1363 void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
1364   for (auto &Def : MI.defs()) {
1365     assert(Def.isReg() && "Must be a reg");
1366 
1367     SmallVector<MachineOperand *, 16> DbgUsers;
1368     for (auto &MOUse : MRI.use_operands(Def.getReg())) {
1369       MachineInstr *DbgValue = MOUse.getParent();
1370       // Ignore partially formed DBG_VALUEs.
1371       if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {
1372         DbgUsers.push_back(&MOUse);
1373       }
1374     }
1375 
1376     if (!DbgUsers.empty()) {
1377       salvageDebugInfoForDbgValue(MRI, MI, DbgUsers);
1378     }
1379   }
1380 }
1381