xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 5abce56edbee9b960385efcd7cb13bde1c37f1aa)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 
35 #define DEBUG_TYPE "globalisel-utils"
36 
37 using namespace llvm;
38 using namespace MIPatternMatch;
39 
40 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
41                                    const TargetInstrInfo &TII,
42                                    const RegisterBankInfo &RBI, Register Reg,
43                                    const TargetRegisterClass &RegClass) {
44   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
45     return MRI.createVirtualRegister(&RegClass);
46 
47   return Reg;
48 }
49 
50 Register llvm::constrainOperandRegClass(
51     const MachineFunction &MF, const TargetRegisterInfo &TRI,
52     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
53     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
54     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
55   Register Reg = RegMO.getReg();
56   // Assume physical registers are properly constrained.
57   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
58 
59   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
60   // If we created a new virtual register because the class is not compatible
61   // then create a copy between the new and the old register.
62   if (ConstrainedReg != Reg) {
63     MachineBasicBlock::iterator InsertIt(&InsertPt);
64     MachineBasicBlock &MBB = *InsertPt.getParent();
65     if (RegMO.isUse()) {
66       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
67               TII.get(TargetOpcode::COPY), ConstrainedReg)
68           .addReg(Reg);
69     } else {
70       assert(RegMO.isDef() && "Must be a definition");
71       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
72               TII.get(TargetOpcode::COPY), Reg)
73           .addReg(ConstrainedReg);
74     }
75     if (GISelChangeObserver *Observer = MF.getObserver()) {
76       Observer->changingInstr(*RegMO.getParent());
77     }
78     RegMO.setReg(ConstrainedReg);
79     if (GISelChangeObserver *Observer = MF.getObserver()) {
80       Observer->changedInstr(*RegMO.getParent());
81     }
82   } else {
83     if (GISelChangeObserver *Observer = MF.getObserver()) {
84       if (!RegMO.isDef()) {
85         MachineInstr *RegDef = MRI.getVRegDef(Reg);
86         Observer->changedInstr(*RegDef);
87       }
88       Observer->changingAllUsesOfReg(MRI, Reg);
89       Observer->finishedChangingAllUsesOfReg();
90     }
91   }
92   return ConstrainedReg;
93 }
94 
95 Register llvm::constrainOperandRegClass(
96     const MachineFunction &MF, const TargetRegisterInfo &TRI,
97     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
98     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
99     MachineOperand &RegMO, unsigned OpIdx) {
100   Register Reg = RegMO.getReg();
101   // Assume physical registers are properly constrained.
102   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
103 
104   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
105   // Some of the target independent instructions, like COPY, may not impose any
106   // register class constraints on some of their operands: If it's a use, we can
107   // skip constraining as the instruction defining the register would constrain
108   // it.
109 
110   // We can't constrain unallocatable register classes, because we can't create
111   // virtual registers for these classes, so we need to let targets handled this
112   // case.
113   if (RegClass && !RegClass->isAllocatable())
114     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
115 
116   if (!RegClass) {
117     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
118            "Register class constraint is required unless either the "
119            "instruction is target independent or the operand is a use");
120     // FIXME: Just bailing out like this here could be not enough, unless we
121     // expect the users of this function to do the right thing for PHIs and
122     // COPY:
123     //   v1 = COPY v0
124     //   v2 = COPY v1
125     // v1 here may end up not being constrained at all. Please notice that to
126     // reproduce the issue we likely need a destination pattern of a selection
127     // rule producing such extra copies, not just an input GMIR with them as
128     // every existing target using selectImpl handles copies before calling it
129     // and they never reach this function.
130     return Reg;
131   }
132   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
133                                   RegMO);
134 }
135 
136 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
137                                             const TargetInstrInfo &TII,
138                                             const TargetRegisterInfo &TRI,
139                                             const RegisterBankInfo &RBI) {
140   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
141          "A selected instruction is expected");
142   MachineBasicBlock &MBB = *I.getParent();
143   MachineFunction &MF = *MBB.getParent();
144   MachineRegisterInfo &MRI = MF.getRegInfo();
145 
146   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
147     MachineOperand &MO = I.getOperand(OpI);
148 
149     // There's nothing to be done on non-register operands.
150     if (!MO.isReg())
151       continue;
152 
153     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
154     assert(MO.isReg() && "Unsupported non-reg operand");
155 
156     Register Reg = MO.getReg();
157     // Physical registers don't need to be constrained.
158     if (Register::isPhysicalRegister(Reg))
159       continue;
160 
161     // Register operands with a value of 0 (e.g. predicate operands) don't need
162     // to be constrained.
163     if (Reg == 0)
164       continue;
165 
166     // If the operand is a vreg, we should constrain its regclass, and only
167     // insert COPYs if that's impossible.
168     // constrainOperandRegClass does that for us.
169     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
170 
171     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
172     // done.
173     if (MO.isUse()) {
174       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
175       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
176         I.tieOperands(DefIdx, OpI);
177     }
178   }
179   return true;
180 }
181 
182 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
183                          MachineRegisterInfo &MRI) {
184   // Give up if either DstReg or SrcReg  is a physical register.
185   if (DstReg.isPhysical() || SrcReg.isPhysical())
186     return false;
187   // Give up if the types don't match.
188   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
189     return false;
190   // Replace if either DstReg has no constraints or the register
191   // constraints match.
192   return !MRI.getRegClassOrRegBank(DstReg) ||
193          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
194 }
195 
196 bool llvm::isTriviallyDead(const MachineInstr &MI,
197                            const MachineRegisterInfo &MRI) {
198   // FIXME: This logical is mostly duplicated with
199   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
200   // MachineInstr::isLabel?
201 
202   // Don't delete frame allocation labels.
203   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
204     return false;
205   // LIFETIME markers should be preserved even if they seem dead.
206   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
207       MI.getOpcode() == TargetOpcode::LIFETIME_END)
208     return false;
209 
210   // If we can move an instruction, we can remove it.  Otherwise, it has
211   // a side-effect of some sort.
212   bool SawStore = false;
213   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
214     return false;
215 
216   // Instructions without side-effects are dead iff they only define dead vregs.
217   for (auto &MO : MI.operands()) {
218     if (!MO.isReg() || !MO.isDef())
219       continue;
220 
221     Register Reg = MO.getReg();
222     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
223       return false;
224   }
225   return true;
226 }
227 
228 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
229                                   MachineFunction &MF,
230                                   const TargetPassConfig &TPC,
231                                   MachineOptimizationRemarkEmitter &MORE,
232                                   MachineOptimizationRemarkMissed &R) {
233   bool IsFatal = Severity == DS_Error &&
234                  TPC.isGlobalISelAbortEnabled();
235   // Print the function name explicitly if we don't have a debug location (which
236   // makes the diagnostic less useful) or if we're going to emit a raw error.
237   if (!R.getLocation().isValid() || IsFatal)
238     R << (" (in function: " + MF.getName() + ")").str();
239 
240   if (IsFatal)
241     report_fatal_error(Twine(R.getMsg()));
242   else
243     MORE.emit(R);
244 }
245 
246 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
247                               MachineOptimizationRemarkEmitter &MORE,
248                               MachineOptimizationRemarkMissed &R) {
249   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
250 }
251 
252 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
253                               MachineOptimizationRemarkEmitter &MORE,
254                               MachineOptimizationRemarkMissed &R) {
255   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
256   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
257 }
258 
259 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
260                               MachineOptimizationRemarkEmitter &MORE,
261                               const char *PassName, StringRef Msg,
262                               const MachineInstr &MI) {
263   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
264                                     MI.getDebugLoc(), MI.getParent());
265   R << Msg;
266   // Printing MI is expensive;  only do it if expensive remarks are enabled.
267   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
268     R << ": " << ore::MNV("Inst", MI);
269   reportGISelFailure(MF, TPC, MORE, R);
270 }
271 
272 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
273                                           const MachineRegisterInfo &MRI) {
274   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
275       VReg, MRI, /*LookThroughInstrs*/ false);
276   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
277          "Value found while looking through instrs");
278   if (!ValAndVReg)
279     return None;
280   return ValAndVReg->Value;
281 }
282 
283 Optional<int64_t>
284 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
285   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
286   if (Val && Val->getBitWidth() <= 64)
287     return Val->getSExtValue();
288   return None;
289 }
290 
291 namespace {
292 
293 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
294 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
295 
296 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
297     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
298     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
299     bool LookThroughAnyExt = false) {
300   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
301   MachineInstr *MI;
302 
303   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
304          LookThroughInstrs) {
305     switch (MI->getOpcode()) {
306     case TargetOpcode::G_ANYEXT:
307       if (!LookThroughAnyExt)
308         return None;
309       LLVM_FALLTHROUGH;
310     case TargetOpcode::G_TRUNC:
311     case TargetOpcode::G_SEXT:
312     case TargetOpcode::G_ZEXT:
313       SeenOpcodes.push_back(std::make_pair(
314           MI->getOpcode(),
315           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
316       VReg = MI->getOperand(1).getReg();
317       break;
318     case TargetOpcode::COPY:
319       VReg = MI->getOperand(1).getReg();
320       if (Register::isPhysicalRegister(VReg))
321         return None;
322       break;
323     case TargetOpcode::G_INTTOPTR:
324       VReg = MI->getOperand(1).getReg();
325       break;
326     default:
327       return None;
328     }
329   }
330   if (!MI || !IsConstantOpcode(MI))
331     return None;
332 
333   Optional<APInt> MaybeVal = getAPCstValue(MI);
334   if (!MaybeVal)
335     return None;
336   APInt &Val = *MaybeVal;
337   while (!SeenOpcodes.empty()) {
338     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
339     switch (OpcodeAndSize.first) {
340     case TargetOpcode::G_TRUNC:
341       Val = Val.trunc(OpcodeAndSize.second);
342       break;
343     case TargetOpcode::G_ANYEXT:
344     case TargetOpcode::G_SEXT:
345       Val = Val.sext(OpcodeAndSize.second);
346       break;
347     case TargetOpcode::G_ZEXT:
348       Val = Val.zext(OpcodeAndSize.second);
349       break;
350     }
351   }
352 
353   return ValueAndVReg{Val, VReg};
354 }
355 
356 bool isIConstant(const MachineInstr *MI) {
357   if (!MI)
358     return false;
359   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
360 }
361 
362 bool isFConstant(const MachineInstr *MI) {
363   if (!MI)
364     return false;
365   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
366 }
367 
368 bool isAnyConstant(const MachineInstr *MI) {
369   if (!MI)
370     return false;
371   unsigned Opc = MI->getOpcode();
372   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
373 }
374 
375 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
376   const MachineOperand &CstVal = MI->getOperand(1);
377   if (CstVal.isCImm())
378     return CstVal.getCImm()->getValue();
379   return None;
380 }
381 
382 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
383   const MachineOperand &CstVal = MI->getOperand(1);
384   if (CstVal.isCImm())
385     return CstVal.getCImm()->getValue();
386   if (CstVal.isFPImm())
387     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
388   return None;
389 }
390 
391 } // end anonymous namespace
392 
393 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
394     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
395   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
396                                            getCImmAsAPInt, LookThroughInstrs);
397 }
398 
399 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
400     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
401     bool LookThroughAnyExt) {
402   return getConstantVRegValWithLookThrough(
403       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
404       LookThroughAnyExt);
405 }
406 
407 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
408     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
409   auto Reg = getConstantVRegValWithLookThrough(
410       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
411   if (!Reg)
412     return None;
413   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
414                         Reg->VReg};
415 }
416 
417 const ConstantFP *
418 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
419   MachineInstr *MI = MRI.getVRegDef(VReg);
420   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
421     return nullptr;
422   return MI->getOperand(1).getFPImm();
423 }
424 
425 Optional<DefinitionAndSourceRegister>
426 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
427   Register DefSrcReg = Reg;
428   auto *DefMI = MRI.getVRegDef(Reg);
429   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
430   if (!DstTy.isValid())
431     return None;
432   unsigned Opc = DefMI->getOpcode();
433   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
434     Register SrcReg = DefMI->getOperand(1).getReg();
435     auto SrcTy = MRI.getType(SrcReg);
436     if (!SrcTy.isValid())
437       break;
438     DefMI = MRI.getVRegDef(SrcReg);
439     DefSrcReg = SrcReg;
440     Opc = DefMI->getOpcode();
441   }
442   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
443 }
444 
445 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
446                                          const MachineRegisterInfo &MRI) {
447   Optional<DefinitionAndSourceRegister> DefSrcReg =
448       getDefSrcRegIgnoringCopies(Reg, MRI);
449   return DefSrcReg ? DefSrcReg->MI : nullptr;
450 }
451 
452 Register llvm::getSrcRegIgnoringCopies(Register Reg,
453                                        const MachineRegisterInfo &MRI) {
454   Optional<DefinitionAndSourceRegister> DefSrcReg =
455       getDefSrcRegIgnoringCopies(Reg, MRI);
456   return DefSrcReg ? DefSrcReg->Reg : Register();
457 }
458 
459 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
460                                  const MachineRegisterInfo &MRI) {
461   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
462   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
463 }
464 
465 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
466   if (Size == 32)
467     return APFloat(float(Val));
468   if (Size == 64)
469     return APFloat(Val);
470   if (Size != 16)
471     llvm_unreachable("Unsupported FPConstant size");
472   bool Ignored;
473   APFloat APF(Val);
474   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
475   return APF;
476 }
477 
478 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
479                                         const Register Op2,
480                                         const MachineRegisterInfo &MRI) {
481   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
482   if (!MaybeOp2Cst)
483     return None;
484 
485   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
486   if (!MaybeOp1Cst)
487     return None;
488 
489   const APInt &C1 = MaybeOp1Cst->Value;
490   const APInt &C2 = MaybeOp2Cst->Value;
491   switch (Opcode) {
492   default:
493     break;
494   case TargetOpcode::G_ADD:
495     return C1 + C2;
496   case TargetOpcode::G_AND:
497     return C1 & C2;
498   case TargetOpcode::G_ASHR:
499     return C1.ashr(C2);
500   case TargetOpcode::G_LSHR:
501     return C1.lshr(C2);
502   case TargetOpcode::G_MUL:
503     return C1 * C2;
504   case TargetOpcode::G_OR:
505     return C1 | C2;
506   case TargetOpcode::G_SHL:
507     return C1 << C2;
508   case TargetOpcode::G_SUB:
509     return C1 - C2;
510   case TargetOpcode::G_XOR:
511     return C1 ^ C2;
512   case TargetOpcode::G_UDIV:
513     if (!C2.getBoolValue())
514       break;
515     return C1.udiv(C2);
516   case TargetOpcode::G_SDIV:
517     if (!C2.getBoolValue())
518       break;
519     return C1.sdiv(C2);
520   case TargetOpcode::G_UREM:
521     if (!C2.getBoolValue())
522       break;
523     return C1.urem(C2);
524   case TargetOpcode::G_SREM:
525     if (!C2.getBoolValue())
526       break;
527     return C1.srem(C2);
528   }
529 
530   return None;
531 }
532 
533 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
534                                             const Register Op2,
535                                             const MachineRegisterInfo &MRI) {
536   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
537   if (!Op2Cst)
538     return None;
539 
540   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
541   if (!Op1Cst)
542     return None;
543 
544   APFloat C1 = Op1Cst->getValueAPF();
545   const APFloat &C2 = Op2Cst->getValueAPF();
546   switch (Opcode) {
547   case TargetOpcode::G_FADD:
548     C1.add(C2, APFloat::rmNearestTiesToEven);
549     return C1;
550   case TargetOpcode::G_FSUB:
551     C1.subtract(C2, APFloat::rmNearestTiesToEven);
552     return C1;
553   case TargetOpcode::G_FMUL:
554     C1.multiply(C2, APFloat::rmNearestTiesToEven);
555     return C1;
556   case TargetOpcode::G_FDIV:
557     C1.divide(C2, APFloat::rmNearestTiesToEven);
558     return C1;
559   case TargetOpcode::G_FREM:
560     C1.mod(C2);
561     return C1;
562   case TargetOpcode::G_FCOPYSIGN:
563     C1.copySign(C2);
564     return C1;
565   case TargetOpcode::G_FMINNUM:
566     return minnum(C1, C2);
567   case TargetOpcode::G_FMAXNUM:
568     return maxnum(C1, C2);
569   case TargetOpcode::G_FMINIMUM:
570     return minimum(C1, C2);
571   case TargetOpcode::G_FMAXIMUM:
572     return maximum(C1, C2);
573   case TargetOpcode::G_FMINNUM_IEEE:
574   case TargetOpcode::G_FMAXNUM_IEEE:
575     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
576     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
577     // and currently there isn't a nice wrapper in APFloat for the version with
578     // correct snan handling.
579     break;
580   default:
581     break;
582   }
583 
584   return None;
585 }
586 
587 Optional<MachineInstr *>
588 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
589                               const Register Op2,
590                               const MachineRegisterInfo &MRI,
591                               MachineIRBuilder &MIB) {
592   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
593   if (!SrcVec1)
594     return None;
595   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
596   if (!SrcVec2)
597     return None;
598 
599   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
600 
601   SmallVector<Register, 16> FoldedElements;
602   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
603     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
604                                       SrcVec2->getSourceReg(Idx), MRI);
605     if (!MaybeCst)
606       return None;
607     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
608     FoldedElements.emplace_back(FoldedCstReg);
609   }
610   // Create the new vector constant.
611   auto CstVec =
612       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
613   return &*CstVec;
614 }
615 
616 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
617                            bool SNaN) {
618   const MachineInstr *DefMI = MRI.getVRegDef(Val);
619   if (!DefMI)
620     return false;
621 
622   const TargetMachine& TM = DefMI->getMF()->getTarget();
623   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
624     return true;
625 
626   // If the value is a constant, we can obviously see if it is a NaN or not.
627   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
628     return !FPVal->getValueAPF().isNaN() ||
629            (SNaN && !FPVal->getValueAPF().isSignaling());
630   }
631 
632   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
633     for (const auto &Op : DefMI->uses())
634       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
635         return false;
636     return true;
637   }
638 
639   switch (DefMI->getOpcode()) {
640   default:
641     break;
642   case TargetOpcode::G_FMINNUM_IEEE:
643   case TargetOpcode::G_FMAXNUM_IEEE: {
644     if (SNaN)
645       return true;
646     // This can return a NaN if either operand is an sNaN, or if both operands
647     // are NaN.
648     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
649             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
650            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
651             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
652   }
653   case TargetOpcode::G_FMINNUM:
654   case TargetOpcode::G_FMAXNUM: {
655     // Only one needs to be known not-nan, since it will be returned if the
656     // other ends up being one.
657     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
658            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
659   }
660   }
661 
662   if (SNaN) {
663     // FP operations quiet. For now, just handle the ones inserted during
664     // legalization.
665     switch (DefMI->getOpcode()) {
666     case TargetOpcode::G_FPEXT:
667     case TargetOpcode::G_FPTRUNC:
668     case TargetOpcode::G_FCANONICALIZE:
669       return true;
670     default:
671       return false;
672     }
673   }
674 
675   return false;
676 }
677 
678 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
679                                   const MachinePointerInfo &MPO) {
680   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
681   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
682     MachineFrameInfo &MFI = MF.getFrameInfo();
683     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
684                            MPO.Offset);
685   }
686 
687   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
688     const Module *M = MF.getFunction().getParent();
689     return V->getPointerAlignment(M->getDataLayout());
690   }
691 
692   return Align(1);
693 }
694 
695 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
696                                         const TargetInstrInfo &TII,
697                                         MCRegister PhysReg,
698                                         const TargetRegisterClass &RC,
699                                         LLT RegTy) {
700   DebugLoc DL; // FIXME: Is no location the right choice?
701   MachineBasicBlock &EntryMBB = MF.front();
702   MachineRegisterInfo &MRI = MF.getRegInfo();
703   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
704   if (LiveIn) {
705     MachineInstr *Def = MRI.getVRegDef(LiveIn);
706     if (Def) {
707       // FIXME: Should the verifier check this is in the entry block?
708       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
709       return LiveIn;
710     }
711 
712     // It's possible the incoming argument register and copy was added during
713     // lowering, but later deleted due to being/becoming dead. If this happens,
714     // re-insert the copy.
715   } else {
716     // The live in register was not present, so add it.
717     LiveIn = MF.addLiveIn(PhysReg, &RC);
718     if (RegTy.isValid())
719       MRI.setType(LiveIn, RegTy);
720   }
721 
722   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
723     .addReg(PhysReg);
724   if (!EntryMBB.isLiveIn(PhysReg))
725     EntryMBB.addLiveIn(PhysReg);
726   return LiveIn;
727 }
728 
729 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
730                                         uint64_t Imm,
731                                         const MachineRegisterInfo &MRI) {
732   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
733   if (MaybeOp1Cst) {
734     switch (Opcode) {
735     default:
736       break;
737     case TargetOpcode::G_SEXT_INREG: {
738       LLT Ty = MRI.getType(Op1);
739       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
740     }
741     }
742   }
743   return None;
744 }
745 
746 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
747                                                Register Src,
748                                                const MachineRegisterInfo &MRI) {
749   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
750   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
751     APFloat DstVal(getFltSemanticForLLT(DstTy));
752     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
753                             APFloat::rmNearestTiesToEven);
754     return DstVal;
755   }
756   return None;
757 }
758 
759 Optional<SmallVector<unsigned>>
760 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
761   LLT Ty = MRI.getType(Src);
762   SmallVector<unsigned> FoldedCTLZs;
763   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
764     auto MaybeCst = getIConstantVRegVal(R, MRI);
765     if (!MaybeCst)
766       return None;
767     return MaybeCst->countLeadingZeros();
768   };
769   if (Ty.isVector()) {
770     // Try to constant fold each element.
771     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
772     if (!BV)
773       return None;
774     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
775       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
776         FoldedCTLZs.emplace_back(*MaybeFold);
777         continue;
778       }
779       return None;
780     }
781     return FoldedCTLZs;
782   }
783   if (auto MaybeCst = tryFoldScalar(Src)) {
784     FoldedCTLZs.emplace_back(*MaybeCst);
785     return FoldedCTLZs;
786   }
787   return None;
788 }
789 
790 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
791                                   GISelKnownBits *KB) {
792   Optional<DefinitionAndSourceRegister> DefSrcReg =
793       getDefSrcRegIgnoringCopies(Reg, MRI);
794   if (!DefSrcReg)
795     return false;
796 
797   const MachineInstr &MI = *DefSrcReg->MI;
798   const LLT Ty = MRI.getType(Reg);
799 
800   switch (MI.getOpcode()) {
801   case TargetOpcode::G_CONSTANT: {
802     unsigned BitWidth = Ty.getScalarSizeInBits();
803     const ConstantInt *CI = MI.getOperand(1).getCImm();
804     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
805   }
806   case TargetOpcode::G_SHL: {
807     // A left-shift of a constant one will have exactly one bit set because
808     // shifting the bit off the end is undefined.
809 
810     // TODO: Constant splat
811     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
812       if (*ConstLHS == 1)
813         return true;
814     }
815 
816     break;
817   }
818   case TargetOpcode::G_LSHR: {
819     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
820       if (ConstLHS->isSignMask())
821         return true;
822     }
823 
824     break;
825   }
826   case TargetOpcode::G_BUILD_VECTOR: {
827     // TODO: Probably should have a recursion depth guard since you could have
828     // bitcasted vector elements.
829     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
830       if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
831         return false;
832     }
833 
834     return true;
835   }
836   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
837     // Only handle constants since we would need to know if number of leading
838     // zeros is greater than the truncation amount.
839     const unsigned BitWidth = Ty.getScalarSizeInBits();
840     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
841       auto Const = getIConstantVRegVal(MI.getOperand(I).getReg(), MRI);
842       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
843         return false;
844     }
845 
846     return true;
847   }
848   default:
849     break;
850   }
851 
852   if (!KB)
853     return false;
854 
855   // More could be done here, though the above checks are enough
856   // to handle some common cases.
857 
858   // Fall back to computeKnownBits to catch other known cases.
859   KnownBits Known = KB->getKnownBits(Reg);
860   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
861 }
862 
863 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
864   AU.addPreserved<StackProtector>();
865 }
866 
867 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
868   unsigned Mul = OrigSize * TargetSize;
869   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
870   return Mul / GCDSize;
871 }
872 
873 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
874   const unsigned OrigSize = OrigTy.getSizeInBits();
875   const unsigned TargetSize = TargetTy.getSizeInBits();
876 
877   if (OrigSize == TargetSize)
878     return OrigTy;
879 
880   if (OrigTy.isVector()) {
881     const LLT OrigElt = OrigTy.getElementType();
882 
883     if (TargetTy.isVector()) {
884       const LLT TargetElt = TargetTy.getElementType();
885 
886       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
887         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
888                                             TargetTy.getNumElements());
889         // Prefer the original element type.
890         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
891         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
892                            OrigTy.getElementType());
893       }
894     } else {
895       if (OrigElt.getSizeInBits() == TargetSize)
896         return OrigTy;
897     }
898 
899     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
900     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
901   }
902 
903   if (TargetTy.isVector()) {
904     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
905     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
906   }
907 
908   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
909 
910   // Preserve pointer types.
911   if (LCMSize == OrigSize)
912     return OrigTy;
913   if (LCMSize == TargetSize)
914     return TargetTy;
915 
916   return LLT::scalar(LCMSize);
917 }
918 
919 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
920   const unsigned OrigSize = OrigTy.getSizeInBits();
921   const unsigned TargetSize = TargetTy.getSizeInBits();
922 
923   if (OrigSize == TargetSize)
924     return OrigTy;
925 
926   if (OrigTy.isVector()) {
927     LLT OrigElt = OrigTy.getElementType();
928     if (TargetTy.isVector()) {
929       LLT TargetElt = TargetTy.getElementType();
930       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
931         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
932                                         TargetTy.getNumElements());
933         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
934       }
935     } else {
936       // If the source is a vector of pointers, return a pointer element.
937       if (OrigElt.getSizeInBits() == TargetSize)
938         return OrigElt;
939     }
940 
941     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
942     if (GCD == OrigElt.getSizeInBits())
943       return OrigElt;
944 
945     // If we can't produce the original element type, we have to use a smaller
946     // scalar.
947     if (GCD < OrigElt.getSizeInBits())
948       return LLT::scalar(GCD);
949     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
950   }
951 
952   if (TargetTy.isVector()) {
953     // Try to preserve the original element type.
954     LLT TargetElt = TargetTy.getElementType();
955     if (TargetElt.getSizeInBits() == OrigSize)
956       return OrigTy;
957   }
958 
959   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
960   return LLT::scalar(GCD);
961 }
962 
963 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
964   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
965          "Only G_SHUFFLE_VECTOR can have a splat index!");
966   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
967   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
968 
969   // If all elements are undefined, this shuffle can be considered a splat.
970   // Return 0 for better potential for callers to simplify.
971   if (FirstDefinedIdx == Mask.end())
972     return 0;
973 
974   // Make sure all remaining elements are either undef or the same
975   // as the first non-undef value.
976   int SplatValue = *FirstDefinedIdx;
977   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
978              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
979     return None;
980 
981   return SplatValue;
982 }
983 
984 static bool isBuildVectorOp(unsigned Opcode) {
985   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
986          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
987 }
988 
989 namespace {
990 
991 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
992                                            const MachineRegisterInfo &MRI,
993                                            bool AllowUndef) {
994   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
995   if (!MI)
996     return None;
997 
998   if (!isBuildVectorOp(MI->getOpcode()))
999     return None;
1000 
1001   Optional<ValueAndVReg> SplatValAndReg = None;
1002   for (MachineOperand &Op : MI->uses()) {
1003     Register Element = Op.getReg();
1004     auto ElementValAndReg =
1005         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1006 
1007     // If AllowUndef, treat undef as value that will result in a constant splat.
1008     if (!ElementValAndReg) {
1009       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1010         continue;
1011       return None;
1012     }
1013 
1014     // Record splat value
1015     if (!SplatValAndReg)
1016       SplatValAndReg = ElementValAndReg;
1017 
1018     // Different constant then the one already recorded, not a constant splat.
1019     if (SplatValAndReg->Value != ElementValAndReg->Value)
1020       return None;
1021   }
1022 
1023   return SplatValAndReg;
1024 }
1025 
1026 bool isBuildVectorConstantSplat(const MachineInstr &MI,
1027                                 const MachineRegisterInfo &MRI,
1028                                 int64_t SplatValue, bool AllowUndef) {
1029   if (auto SplatValAndReg =
1030           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, AllowUndef))
1031     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1032   return false;
1033 }
1034 
1035 } // end anonymous namespace
1036 
1037 Optional<int64_t>
1038 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1039                                   const MachineRegisterInfo &MRI) {
1040   if (auto SplatValAndReg =
1041           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1042     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1043   return None;
1044 }
1045 
1046 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1047                                                  const MachineRegisterInfo &MRI,
1048                                                  bool AllowUndef) {
1049   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1050     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1051   return None;
1052 }
1053 
1054 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1055                                  const MachineRegisterInfo &MRI,
1056                                  bool AllowUndef) {
1057   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1058 }
1059 
1060 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1061                                 const MachineRegisterInfo &MRI,
1062                                 bool AllowUndef) {
1063   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1064 }
1065 
1066 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1067                                              const MachineRegisterInfo &MRI) {
1068   unsigned Opc = MI.getOpcode();
1069   if (!isBuildVectorOp(Opc))
1070     return None;
1071   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1072     return RegOrConstant(*Splat);
1073   auto Reg = MI.getOperand(1).getReg();
1074   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1075              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1076     return None;
1077   return RegOrConstant(Reg);
1078 }
1079 
1080 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1081                                       const MachineRegisterInfo &MRI) {
1082   Register Def = MI.getOperand(0).getReg();
1083   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1084     return true;
1085   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1086   if (!BV)
1087     return false;
1088   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1089     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1090         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1091       continue;
1092     return false;
1093   }
1094   return true;
1095 }
1096 
1097 Optional<APInt>
1098 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1099                                       const MachineRegisterInfo &MRI) {
1100   Register Def = MI.getOperand(0).getReg();
1101   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1102     return C->Value;
1103   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1104   if (!MaybeCst)
1105     return None;
1106   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1107   return APInt(ScalarSize, *MaybeCst, true);
1108 }
1109 
1110 bool llvm::matchUnaryPredicate(
1111     const MachineRegisterInfo &MRI, Register Reg,
1112     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1113 
1114   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1115   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1116     return Match(nullptr);
1117 
1118   // TODO: Also handle fconstant
1119   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1120     return Match(Def->getOperand(1).getCImm());
1121 
1122   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1123     return false;
1124 
1125   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1126     Register SrcElt = Def->getOperand(I).getReg();
1127     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1128     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1129       if (!Match(nullptr))
1130         return false;
1131       continue;
1132     }
1133 
1134     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1135         !Match(SrcDef->getOperand(1).getCImm()))
1136       return false;
1137   }
1138 
1139   return true;
1140 }
1141 
1142 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1143                           bool IsFP) {
1144   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1145   case TargetLowering::UndefinedBooleanContent:
1146     return Val & 0x1;
1147   case TargetLowering::ZeroOrOneBooleanContent:
1148     return Val == 1;
1149   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1150     return Val == -1;
1151   }
1152   llvm_unreachable("Invalid boolean contents");
1153 }
1154 
1155 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1156                              bool IsFP) {
1157   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1158   case TargetLowering::UndefinedBooleanContent:
1159   case TargetLowering::ZeroOrOneBooleanContent:
1160     return 1;
1161   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1162     return -1;
1163   }
1164   llvm_unreachable("Invalid boolean contents");
1165 }
1166 
1167 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1168                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1169   const auto &F = MBB.getParent()->getFunction();
1170   return F.hasOptSize() || F.hasMinSize() ||
1171          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1172 }
1173 
1174 /// These artifacts generally don't have any debug users because they don't
1175 /// directly originate from IR instructions, but instead usually from
1176 /// legalization. Avoiding checking for debug users improves compile time.
1177 /// Note that truncates or extends aren't included because they have IR
1178 /// counterparts which can have debug users after translation.
1179 static bool shouldSkipDbgValueFor(MachineInstr &MI) {
1180   switch (MI.getOpcode()) {
1181   case TargetOpcode::G_UNMERGE_VALUES:
1182   case TargetOpcode::G_MERGE_VALUES:
1183   case TargetOpcode::G_CONCAT_VECTORS:
1184   case TargetOpcode::G_BUILD_VECTOR:
1185   case TargetOpcode::G_EXTRACT:
1186   case TargetOpcode::G_INSERT:
1187     return true;
1188   default:
1189     return false;
1190   }
1191 }
1192 
1193 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1194                             LostDebugLocObserver *LocObserver,
1195                             SmallInstListTy &DeadInstChain) {
1196   for (MachineOperand &Op : MI.uses()) {
1197     if (Op.isReg() && Op.getReg().isVirtual())
1198       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1199   }
1200   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1201   DeadInstChain.remove(&MI);
1202   if (shouldSkipDbgValueFor(MI))
1203     MI.eraseFromParent();
1204   else
1205     MI.eraseFromParentAndMarkDBGValuesForRemoval();
1206   if (LocObserver)
1207     LocObserver->checkpoint(false);
1208 }
1209 
1210 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1211                        MachineRegisterInfo &MRI,
1212                        LostDebugLocObserver *LocObserver) {
1213   SmallInstListTy DeadInstChain;
1214   for (MachineInstr *MI : DeadInstrs)
1215     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1216 
1217   while (!DeadInstChain.empty()) {
1218     MachineInstr *Inst = DeadInstChain.pop_back_val();
1219     if (!isTriviallyDead(*Inst, MRI))
1220       continue;
1221     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1222   }
1223 }
1224 
1225 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1226                       LostDebugLocObserver *LocObserver) {
1227   return eraseInstrs({&MI}, MRI, LocObserver);
1228 }
1229