xref: /freebsd-src/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 
35 #define DEBUG_TYPE "globalisel-utils"
36 
37 using namespace llvm;
38 using namespace MIPatternMatch;
39 
40 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
41                                    const TargetInstrInfo &TII,
42                                    const RegisterBankInfo &RBI, Register Reg,
43                                    const TargetRegisterClass &RegClass) {
44   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
45     return MRI.createVirtualRegister(&RegClass);
46 
47   return Reg;
48 }
49 
50 Register llvm::constrainOperandRegClass(
51     const MachineFunction &MF, const TargetRegisterInfo &TRI,
52     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
53     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
54     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
55   Register Reg = RegMO.getReg();
56   // Assume physical registers are properly constrained.
57   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
58 
59   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
60   // If we created a new virtual register because the class is not compatible
61   // then create a copy between the new and the old register.
62   if (ConstrainedReg != Reg) {
63     MachineBasicBlock::iterator InsertIt(&InsertPt);
64     MachineBasicBlock &MBB = *InsertPt.getParent();
65     // FIXME: The copy needs to have the classes constrained for its operands.
66     // Use operand's regbank to get the class for old register (Reg).
67     if (RegMO.isUse()) {
68       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
69               TII.get(TargetOpcode::COPY), ConstrainedReg)
70           .addReg(Reg);
71     } else {
72       assert(RegMO.isDef() && "Must be a definition");
73       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
74               TII.get(TargetOpcode::COPY), Reg)
75           .addReg(ConstrainedReg);
76     }
77     if (GISelChangeObserver *Observer = MF.getObserver()) {
78       Observer->changingInstr(*RegMO.getParent());
79     }
80     RegMO.setReg(ConstrainedReg);
81     if (GISelChangeObserver *Observer = MF.getObserver()) {
82       Observer->changedInstr(*RegMO.getParent());
83     }
84   } else {
85     if (GISelChangeObserver *Observer = MF.getObserver()) {
86       if (!RegMO.isDef()) {
87         MachineInstr *RegDef = MRI.getVRegDef(Reg);
88         Observer->changedInstr(*RegDef);
89       }
90       Observer->changingAllUsesOfReg(MRI, Reg);
91       Observer->finishedChangingAllUsesOfReg();
92     }
93   }
94   return ConstrainedReg;
95 }
96 
97 Register llvm::constrainOperandRegClass(
98     const MachineFunction &MF, const TargetRegisterInfo &TRI,
99     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
100     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
101     MachineOperand &RegMO, unsigned OpIdx) {
102   Register Reg = RegMO.getReg();
103   // Assume physical registers are properly constrained.
104   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
105 
106   const TargetRegisterClass *OpRC = TII.getRegClass(II, OpIdx, &TRI, MF);
107   // Some of the target independent instructions, like COPY, may not impose any
108   // register class constraints on some of their operands: If it's a use, we can
109   // skip constraining as the instruction defining the register would constrain
110   // it.
111 
112   if (OpRC) {
113     // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
114     // can have multiple regbanks for a superclass that combine different
115     // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
116     // resolved by targets during regbankselect should not be overridden.
117     if (const auto *SubRC = TRI.getCommonSubClass(
118             OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))
119       OpRC = SubRC;
120 
121     OpRC = TRI.getAllocatableClass(OpRC);
122   }
123 
124   if (!OpRC) {
125     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
126            "Register class constraint is required unless either the "
127            "instruction is target independent or the operand is a use");
128     // FIXME: Just bailing out like this here could be not enough, unless we
129     // expect the users of this function to do the right thing for PHIs and
130     // COPY:
131     //   v1 = COPY v0
132     //   v2 = COPY v1
133     // v1 here may end up not being constrained at all. Please notice that to
134     // reproduce the issue we likely need a destination pattern of a selection
135     // rule producing such extra copies, not just an input GMIR with them as
136     // every existing target using selectImpl handles copies before calling it
137     // and they never reach this function.
138     return Reg;
139   }
140   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *OpRC,
141                                   RegMO);
142 }
143 
144 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
145                                             const TargetInstrInfo &TII,
146                                             const TargetRegisterInfo &TRI,
147                                             const RegisterBankInfo &RBI) {
148   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
149          "A selected instruction is expected");
150   MachineBasicBlock &MBB = *I.getParent();
151   MachineFunction &MF = *MBB.getParent();
152   MachineRegisterInfo &MRI = MF.getRegInfo();
153 
154   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
155     MachineOperand &MO = I.getOperand(OpI);
156 
157     // There's nothing to be done on non-register operands.
158     if (!MO.isReg())
159       continue;
160 
161     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
162     assert(MO.isReg() && "Unsupported non-reg operand");
163 
164     Register Reg = MO.getReg();
165     // Physical registers don't need to be constrained.
166     if (Register::isPhysicalRegister(Reg))
167       continue;
168 
169     // Register operands with a value of 0 (e.g. predicate operands) don't need
170     // to be constrained.
171     if (Reg == 0)
172       continue;
173 
174     // If the operand is a vreg, we should constrain its regclass, and only
175     // insert COPYs if that's impossible.
176     // constrainOperandRegClass does that for us.
177     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
178 
179     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
180     // done.
181     if (MO.isUse()) {
182       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
183       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
184         I.tieOperands(DefIdx, OpI);
185     }
186   }
187   return true;
188 }
189 
190 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
191                          MachineRegisterInfo &MRI) {
192   // Give up if either DstReg or SrcReg  is a physical register.
193   if (DstReg.isPhysical() || SrcReg.isPhysical())
194     return false;
195   // Give up if the types don't match.
196   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
197     return false;
198   // Replace if either DstReg has no constraints or the register
199   // constraints match.
200   return !MRI.getRegClassOrRegBank(DstReg) ||
201          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
202 }
203 
204 bool llvm::isTriviallyDead(const MachineInstr &MI,
205                            const MachineRegisterInfo &MRI) {
206   // FIXME: This logical is mostly duplicated with
207   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
208   // MachineInstr::isLabel?
209 
210   // Don't delete frame allocation labels.
211   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
212     return false;
213   // LIFETIME markers should be preserved even if they seem dead.
214   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
215       MI.getOpcode() == TargetOpcode::LIFETIME_END)
216     return false;
217 
218   // If we can move an instruction, we can remove it.  Otherwise, it has
219   // a side-effect of some sort.
220   bool SawStore = false;
221   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
222     return false;
223 
224   // Instructions without side-effects are dead iff they only define dead vregs.
225   for (auto &MO : MI.operands()) {
226     if (!MO.isReg() || !MO.isDef())
227       continue;
228 
229     Register Reg = MO.getReg();
230     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
231       return false;
232   }
233   return true;
234 }
235 
236 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
237                                   MachineFunction &MF,
238                                   const TargetPassConfig &TPC,
239                                   MachineOptimizationRemarkEmitter &MORE,
240                                   MachineOptimizationRemarkMissed &R) {
241   bool IsFatal = Severity == DS_Error &&
242                  TPC.isGlobalISelAbortEnabled();
243   // Print the function name explicitly if we don't have a debug location (which
244   // makes the diagnostic less useful) or if we're going to emit a raw error.
245   if (!R.getLocation().isValid() || IsFatal)
246     R << (" (in function: " + MF.getName() + ")").str();
247 
248   if (IsFatal)
249     report_fatal_error(Twine(R.getMsg()));
250   else
251     MORE.emit(R);
252 }
253 
254 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
255                               MachineOptimizationRemarkEmitter &MORE,
256                               MachineOptimizationRemarkMissed &R) {
257   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
258 }
259 
260 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
261                               MachineOptimizationRemarkEmitter &MORE,
262                               MachineOptimizationRemarkMissed &R) {
263   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
264   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
265 }
266 
267 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
268                               MachineOptimizationRemarkEmitter &MORE,
269                               const char *PassName, StringRef Msg,
270                               const MachineInstr &MI) {
271   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
272                                     MI.getDebugLoc(), MI.getParent());
273   R << Msg;
274   // Printing MI is expensive;  only do it if expensive remarks are enabled.
275   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
276     R << ": " << ore::MNV("Inst", MI);
277   reportGISelFailure(MF, TPC, MORE, R);
278 }
279 
280 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
281                                           const MachineRegisterInfo &MRI) {
282   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
283       VReg, MRI, /*LookThroughInstrs*/ false);
284   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
285          "Value found while looking through instrs");
286   if (!ValAndVReg)
287     return None;
288   return ValAndVReg->Value;
289 }
290 
291 Optional<int64_t>
292 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
293   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
294   if (Val && Val->getBitWidth() <= 64)
295     return Val->getSExtValue();
296   return None;
297 }
298 
299 namespace {
300 
301 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
302 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
303 
304 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
305     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
306     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
307     bool LookThroughAnyExt = false) {
308   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
309   MachineInstr *MI;
310 
311   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
312          LookThroughInstrs) {
313     switch (MI->getOpcode()) {
314     case TargetOpcode::G_ANYEXT:
315       if (!LookThroughAnyExt)
316         return None;
317       LLVM_FALLTHROUGH;
318     case TargetOpcode::G_TRUNC:
319     case TargetOpcode::G_SEXT:
320     case TargetOpcode::G_ZEXT:
321       SeenOpcodes.push_back(std::make_pair(
322           MI->getOpcode(),
323           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
324       VReg = MI->getOperand(1).getReg();
325       break;
326     case TargetOpcode::COPY:
327       VReg = MI->getOperand(1).getReg();
328       if (Register::isPhysicalRegister(VReg))
329         return None;
330       break;
331     case TargetOpcode::G_INTTOPTR:
332       VReg = MI->getOperand(1).getReg();
333       break;
334     default:
335       return None;
336     }
337   }
338   if (!MI || !IsConstantOpcode(MI))
339     return None;
340 
341   Optional<APInt> MaybeVal = getAPCstValue(MI);
342   if (!MaybeVal)
343     return None;
344   APInt &Val = *MaybeVal;
345   while (!SeenOpcodes.empty()) {
346     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
347     switch (OpcodeAndSize.first) {
348     case TargetOpcode::G_TRUNC:
349       Val = Val.trunc(OpcodeAndSize.second);
350       break;
351     case TargetOpcode::G_ANYEXT:
352     case TargetOpcode::G_SEXT:
353       Val = Val.sext(OpcodeAndSize.second);
354       break;
355     case TargetOpcode::G_ZEXT:
356       Val = Val.zext(OpcodeAndSize.second);
357       break;
358     }
359   }
360 
361   return ValueAndVReg{Val, VReg};
362 }
363 
364 bool isIConstant(const MachineInstr *MI) {
365   if (!MI)
366     return false;
367   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
368 }
369 
370 bool isFConstant(const MachineInstr *MI) {
371   if (!MI)
372     return false;
373   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
374 }
375 
376 bool isAnyConstant(const MachineInstr *MI) {
377   if (!MI)
378     return false;
379   unsigned Opc = MI->getOpcode();
380   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
381 }
382 
383 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
384   const MachineOperand &CstVal = MI->getOperand(1);
385   if (CstVal.isCImm())
386     return CstVal.getCImm()->getValue();
387   return None;
388 }
389 
390 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
391   const MachineOperand &CstVal = MI->getOperand(1);
392   if (CstVal.isCImm())
393     return CstVal.getCImm()->getValue();
394   if (CstVal.isFPImm())
395     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
396   return None;
397 }
398 
399 } // end anonymous namespace
400 
401 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
402     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
403   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
404                                            getCImmAsAPInt, LookThroughInstrs);
405 }
406 
407 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
408     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
409     bool LookThroughAnyExt) {
410   return getConstantVRegValWithLookThrough(
411       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
412       LookThroughAnyExt);
413 }
414 
415 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
416     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
417   auto Reg = getConstantVRegValWithLookThrough(
418       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
419   if (!Reg)
420     return None;
421   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
422                         Reg->VReg};
423 }
424 
425 const ConstantFP *
426 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
427   MachineInstr *MI = MRI.getVRegDef(VReg);
428   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
429     return nullptr;
430   return MI->getOperand(1).getFPImm();
431 }
432 
433 Optional<DefinitionAndSourceRegister>
434 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
435   Register DefSrcReg = Reg;
436   auto *DefMI = MRI.getVRegDef(Reg);
437   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
438   if (!DstTy.isValid())
439     return None;
440   unsigned Opc = DefMI->getOpcode();
441   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
442     Register SrcReg = DefMI->getOperand(1).getReg();
443     auto SrcTy = MRI.getType(SrcReg);
444     if (!SrcTy.isValid())
445       break;
446     DefMI = MRI.getVRegDef(SrcReg);
447     DefSrcReg = SrcReg;
448     Opc = DefMI->getOpcode();
449   }
450   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
451 }
452 
453 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
454                                          const MachineRegisterInfo &MRI) {
455   Optional<DefinitionAndSourceRegister> DefSrcReg =
456       getDefSrcRegIgnoringCopies(Reg, MRI);
457   return DefSrcReg ? DefSrcReg->MI : nullptr;
458 }
459 
460 Register llvm::getSrcRegIgnoringCopies(Register Reg,
461                                        const MachineRegisterInfo &MRI) {
462   Optional<DefinitionAndSourceRegister> DefSrcReg =
463       getDefSrcRegIgnoringCopies(Reg, MRI);
464   return DefSrcReg ? DefSrcReg->Reg : Register();
465 }
466 
467 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
468                                  const MachineRegisterInfo &MRI) {
469   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
470   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
471 }
472 
473 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
474   if (Size == 32)
475     return APFloat(float(Val));
476   if (Size == 64)
477     return APFloat(Val);
478   if (Size != 16)
479     llvm_unreachable("Unsupported FPConstant size");
480   bool Ignored;
481   APFloat APF(Val);
482   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
483   return APF;
484 }
485 
486 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
487                                         const Register Op2,
488                                         const MachineRegisterInfo &MRI) {
489   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
490   if (!MaybeOp2Cst)
491     return None;
492 
493   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
494   if (!MaybeOp1Cst)
495     return None;
496 
497   const APInt &C1 = MaybeOp1Cst->Value;
498   const APInt &C2 = MaybeOp2Cst->Value;
499   switch (Opcode) {
500   default:
501     break;
502   case TargetOpcode::G_ADD:
503     return C1 + C2;
504   case TargetOpcode::G_AND:
505     return C1 & C2;
506   case TargetOpcode::G_ASHR:
507     return C1.ashr(C2);
508   case TargetOpcode::G_LSHR:
509     return C1.lshr(C2);
510   case TargetOpcode::G_MUL:
511     return C1 * C2;
512   case TargetOpcode::G_OR:
513     return C1 | C2;
514   case TargetOpcode::G_SHL:
515     return C1 << C2;
516   case TargetOpcode::G_SUB:
517     return C1 - C2;
518   case TargetOpcode::G_XOR:
519     return C1 ^ C2;
520   case TargetOpcode::G_UDIV:
521     if (!C2.getBoolValue())
522       break;
523     return C1.udiv(C2);
524   case TargetOpcode::G_SDIV:
525     if (!C2.getBoolValue())
526       break;
527     return C1.sdiv(C2);
528   case TargetOpcode::G_UREM:
529     if (!C2.getBoolValue())
530       break;
531     return C1.urem(C2);
532   case TargetOpcode::G_SREM:
533     if (!C2.getBoolValue())
534       break;
535     return C1.srem(C2);
536   }
537 
538   return None;
539 }
540 
541 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
542                                             const Register Op2,
543                                             const MachineRegisterInfo &MRI) {
544   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
545   if (!Op2Cst)
546     return None;
547 
548   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
549   if (!Op1Cst)
550     return None;
551 
552   APFloat C1 = Op1Cst->getValueAPF();
553   const APFloat &C2 = Op2Cst->getValueAPF();
554   switch (Opcode) {
555   case TargetOpcode::G_FADD:
556     C1.add(C2, APFloat::rmNearestTiesToEven);
557     return C1;
558   case TargetOpcode::G_FSUB:
559     C1.subtract(C2, APFloat::rmNearestTiesToEven);
560     return C1;
561   case TargetOpcode::G_FMUL:
562     C1.multiply(C2, APFloat::rmNearestTiesToEven);
563     return C1;
564   case TargetOpcode::G_FDIV:
565     C1.divide(C2, APFloat::rmNearestTiesToEven);
566     return C1;
567   case TargetOpcode::G_FREM:
568     C1.mod(C2);
569     return C1;
570   case TargetOpcode::G_FCOPYSIGN:
571     C1.copySign(C2);
572     return C1;
573   case TargetOpcode::G_FMINNUM:
574     return minnum(C1, C2);
575   case TargetOpcode::G_FMAXNUM:
576     return maxnum(C1, C2);
577   case TargetOpcode::G_FMINIMUM:
578     return minimum(C1, C2);
579   case TargetOpcode::G_FMAXIMUM:
580     return maximum(C1, C2);
581   case TargetOpcode::G_FMINNUM_IEEE:
582   case TargetOpcode::G_FMAXNUM_IEEE:
583     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
584     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
585     // and currently there isn't a nice wrapper in APFloat for the version with
586     // correct snan handling.
587     break;
588   default:
589     break;
590   }
591 
592   return None;
593 }
594 
595 Optional<MachineInstr *>
596 llvm::ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
597                               const Register Op2,
598                               const MachineRegisterInfo &MRI,
599                               MachineIRBuilder &MIB) {
600   auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1, MRI);
601   if (!SrcVec1)
602     return None;
603   auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2, MRI);
604   if (!SrcVec2)
605     return None;
606 
607   const LLT EltTy = MRI.getType(SrcVec1->getSourceReg(0));
608 
609   SmallVector<Register, 16> FoldedElements;
610   for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {
611     auto MaybeCst = ConstantFoldBinOp(Opcode, SrcVec1->getSourceReg(Idx),
612                                       SrcVec2->getSourceReg(Idx), MRI);
613     if (!MaybeCst)
614       return None;
615     auto FoldedCstReg = MIB.buildConstant(EltTy, *MaybeCst).getReg(0);
616     FoldedElements.emplace_back(FoldedCstReg);
617   }
618   // Create the new vector constant.
619   auto CstVec =
620       MIB.buildBuildVector(MRI.getType(SrcVec1->getReg(0)), FoldedElements);
621   return &*CstVec;
622 }
623 
624 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
625                            bool SNaN) {
626   const MachineInstr *DefMI = MRI.getVRegDef(Val);
627   if (!DefMI)
628     return false;
629 
630   const TargetMachine& TM = DefMI->getMF()->getTarget();
631   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
632     return true;
633 
634   // If the value is a constant, we can obviously see if it is a NaN or not.
635   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
636     return !FPVal->getValueAPF().isNaN() ||
637            (SNaN && !FPVal->getValueAPF().isSignaling());
638   }
639 
640   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
641     for (const auto &Op : DefMI->uses())
642       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
643         return false;
644     return true;
645   }
646 
647   switch (DefMI->getOpcode()) {
648   default:
649     break;
650   case TargetOpcode::G_FMINNUM_IEEE:
651   case TargetOpcode::G_FMAXNUM_IEEE: {
652     if (SNaN)
653       return true;
654     // This can return a NaN if either operand is an sNaN, or if both operands
655     // are NaN.
656     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
657             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
658            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
659             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
660   }
661   case TargetOpcode::G_FMINNUM:
662   case TargetOpcode::G_FMAXNUM: {
663     // Only one needs to be known not-nan, since it will be returned if the
664     // other ends up being one.
665     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
666            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
667   }
668   }
669 
670   if (SNaN) {
671     // FP operations quiet. For now, just handle the ones inserted during
672     // legalization.
673     switch (DefMI->getOpcode()) {
674     case TargetOpcode::G_FPEXT:
675     case TargetOpcode::G_FPTRUNC:
676     case TargetOpcode::G_FCANONICALIZE:
677       return true;
678     default:
679       return false;
680     }
681   }
682 
683   return false;
684 }
685 
686 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
687                                   const MachinePointerInfo &MPO) {
688   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
689   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
690     MachineFrameInfo &MFI = MF.getFrameInfo();
691     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
692                            MPO.Offset);
693   }
694 
695   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
696     const Module *M = MF.getFunction().getParent();
697     return V->getPointerAlignment(M->getDataLayout());
698   }
699 
700   return Align(1);
701 }
702 
703 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
704                                         const TargetInstrInfo &TII,
705                                         MCRegister PhysReg,
706                                         const TargetRegisterClass &RC,
707                                         LLT RegTy) {
708   DebugLoc DL; // FIXME: Is no location the right choice?
709   MachineBasicBlock &EntryMBB = MF.front();
710   MachineRegisterInfo &MRI = MF.getRegInfo();
711   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
712   if (LiveIn) {
713     MachineInstr *Def = MRI.getVRegDef(LiveIn);
714     if (Def) {
715       // FIXME: Should the verifier check this is in the entry block?
716       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
717       return LiveIn;
718     }
719 
720     // It's possible the incoming argument register and copy was added during
721     // lowering, but later deleted due to being/becoming dead. If this happens,
722     // re-insert the copy.
723   } else {
724     // The live in register was not present, so add it.
725     LiveIn = MF.addLiveIn(PhysReg, &RC);
726     if (RegTy.isValid())
727       MRI.setType(LiveIn, RegTy);
728   }
729 
730   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
731     .addReg(PhysReg);
732   if (!EntryMBB.isLiveIn(PhysReg))
733     EntryMBB.addLiveIn(PhysReg);
734   return LiveIn;
735 }
736 
737 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
738                                         uint64_t Imm,
739                                         const MachineRegisterInfo &MRI) {
740   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
741   if (MaybeOp1Cst) {
742     switch (Opcode) {
743     default:
744       break;
745     case TargetOpcode::G_SEXT_INREG: {
746       LLT Ty = MRI.getType(Op1);
747       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
748     }
749     }
750   }
751   return None;
752 }
753 
754 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
755                                                Register Src,
756                                                const MachineRegisterInfo &MRI) {
757   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
758   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
759     APFloat DstVal(getFltSemanticForLLT(DstTy));
760     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
761                             APFloat::rmNearestTiesToEven);
762     return DstVal;
763   }
764   return None;
765 }
766 
767 Optional<SmallVector<unsigned>>
768 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
769   LLT Ty = MRI.getType(Src);
770   SmallVector<unsigned> FoldedCTLZs;
771   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
772     auto MaybeCst = getIConstantVRegVal(R, MRI);
773     if (!MaybeCst)
774       return None;
775     return MaybeCst->countLeadingZeros();
776   };
777   if (Ty.isVector()) {
778     // Try to constant fold each element.
779     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
780     if (!BV)
781       return None;
782     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
783       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
784         FoldedCTLZs.emplace_back(*MaybeFold);
785         continue;
786       }
787       return None;
788     }
789     return FoldedCTLZs;
790   }
791   if (auto MaybeCst = tryFoldScalar(Src)) {
792     FoldedCTLZs.emplace_back(*MaybeCst);
793     return FoldedCTLZs;
794   }
795   return None;
796 }
797 
798 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
799                                   GISelKnownBits *KB) {
800   Optional<DefinitionAndSourceRegister> DefSrcReg =
801       getDefSrcRegIgnoringCopies(Reg, MRI);
802   if (!DefSrcReg)
803     return false;
804 
805   const MachineInstr &MI = *DefSrcReg->MI;
806   const LLT Ty = MRI.getType(Reg);
807 
808   switch (MI.getOpcode()) {
809   case TargetOpcode::G_CONSTANT: {
810     unsigned BitWidth = Ty.getScalarSizeInBits();
811     const ConstantInt *CI = MI.getOperand(1).getCImm();
812     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
813   }
814   case TargetOpcode::G_SHL: {
815     // A left-shift of a constant one will have exactly one bit set because
816     // shifting the bit off the end is undefined.
817 
818     // TODO: Constant splat
819     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
820       if (*ConstLHS == 1)
821         return true;
822     }
823 
824     break;
825   }
826   case TargetOpcode::G_LSHR: {
827     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
828       if (ConstLHS->isSignMask())
829         return true;
830     }
831 
832     break;
833   }
834   case TargetOpcode::G_BUILD_VECTOR: {
835     // TODO: Probably should have a recursion depth guard since you could have
836     // bitcasted vector elements.
837     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
838       if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
839         return false;
840     }
841 
842     return true;
843   }
844   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
845     // Only handle constants since we would need to know if number of leading
846     // zeros is greater than the truncation amount.
847     const unsigned BitWidth = Ty.getScalarSizeInBits();
848     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
849       auto Const = getIConstantVRegVal(MI.getOperand(I).getReg(), MRI);
850       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
851         return false;
852     }
853 
854     return true;
855   }
856   default:
857     break;
858   }
859 
860   if (!KB)
861     return false;
862 
863   // More could be done here, though the above checks are enough
864   // to handle some common cases.
865 
866   // Fall back to computeKnownBits to catch other known cases.
867   KnownBits Known = KB->getKnownBits(Reg);
868   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
869 }
870 
871 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
872   AU.addPreserved<StackProtector>();
873 }
874 
875 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
876   unsigned Mul = OrigSize * TargetSize;
877   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
878   return Mul / GCDSize;
879 }
880 
881 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
882   const unsigned OrigSize = OrigTy.getSizeInBits();
883   const unsigned TargetSize = TargetTy.getSizeInBits();
884 
885   if (OrigSize == TargetSize)
886     return OrigTy;
887 
888   if (OrigTy.isVector()) {
889     const LLT OrigElt = OrigTy.getElementType();
890 
891     if (TargetTy.isVector()) {
892       const LLT TargetElt = TargetTy.getElementType();
893 
894       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
895         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
896                                             TargetTy.getNumElements());
897         // Prefer the original element type.
898         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
899         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
900                            OrigTy.getElementType());
901       }
902     } else {
903       if (OrigElt.getSizeInBits() == TargetSize)
904         return OrigTy;
905     }
906 
907     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
908     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
909   }
910 
911   if (TargetTy.isVector()) {
912     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
913     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
914   }
915 
916   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
917 
918   // Preserve pointer types.
919   if (LCMSize == OrigSize)
920     return OrigTy;
921   if (LCMSize == TargetSize)
922     return TargetTy;
923 
924   return LLT::scalar(LCMSize);
925 }
926 
927 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
928   const unsigned OrigSize = OrigTy.getSizeInBits();
929   const unsigned TargetSize = TargetTy.getSizeInBits();
930 
931   if (OrigSize == TargetSize)
932     return OrigTy;
933 
934   if (OrigTy.isVector()) {
935     LLT OrigElt = OrigTy.getElementType();
936     if (TargetTy.isVector()) {
937       LLT TargetElt = TargetTy.getElementType();
938       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
939         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
940                                         TargetTy.getNumElements());
941         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
942       }
943     } else {
944       // If the source is a vector of pointers, return a pointer element.
945       if (OrigElt.getSizeInBits() == TargetSize)
946         return OrigElt;
947     }
948 
949     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
950     if (GCD == OrigElt.getSizeInBits())
951       return OrigElt;
952 
953     // If we can't produce the original element type, we have to use a smaller
954     // scalar.
955     if (GCD < OrigElt.getSizeInBits())
956       return LLT::scalar(GCD);
957     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
958   }
959 
960   if (TargetTy.isVector()) {
961     // Try to preserve the original element type.
962     LLT TargetElt = TargetTy.getElementType();
963     if (TargetElt.getSizeInBits() == OrigSize)
964       return OrigTy;
965   }
966 
967   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
968   return LLT::scalar(GCD);
969 }
970 
971 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
972   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
973          "Only G_SHUFFLE_VECTOR can have a splat index!");
974   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
975   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
976 
977   // If all elements are undefined, this shuffle can be considered a splat.
978   // Return 0 for better potential for callers to simplify.
979   if (FirstDefinedIdx == Mask.end())
980     return 0;
981 
982   // Make sure all remaining elements are either undef or the same
983   // as the first non-undef value.
984   int SplatValue = *FirstDefinedIdx;
985   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
986              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
987     return None;
988 
989   return SplatValue;
990 }
991 
992 static bool isBuildVectorOp(unsigned Opcode) {
993   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
994          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
995 }
996 
997 namespace {
998 
999 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
1000                                            const MachineRegisterInfo &MRI,
1001                                            bool AllowUndef) {
1002   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
1003   if (!MI)
1004     return None;
1005 
1006   if (!isBuildVectorOp(MI->getOpcode()))
1007     return None;
1008 
1009   Optional<ValueAndVReg> SplatValAndReg = None;
1010   for (MachineOperand &Op : MI->uses()) {
1011     Register Element = Op.getReg();
1012     auto ElementValAndReg =
1013         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
1014 
1015     // If AllowUndef, treat undef as value that will result in a constant splat.
1016     if (!ElementValAndReg) {
1017       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
1018         continue;
1019       return None;
1020     }
1021 
1022     // Record splat value
1023     if (!SplatValAndReg)
1024       SplatValAndReg = ElementValAndReg;
1025 
1026     // Different constant then the one already recorded, not a constant splat.
1027     if (SplatValAndReg->Value != ElementValAndReg->Value)
1028       return None;
1029   }
1030 
1031   return SplatValAndReg;
1032 }
1033 
1034 bool isBuildVectorConstantSplat(const MachineInstr &MI,
1035                                 const MachineRegisterInfo &MRI,
1036                                 int64_t SplatValue, bool AllowUndef) {
1037   if (auto SplatValAndReg =
1038           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, AllowUndef))
1039     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1040   return false;
1041 }
1042 
1043 } // end anonymous namespace
1044 
1045 Optional<int64_t>
1046 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1047                                   const MachineRegisterInfo &MRI) {
1048   if (auto SplatValAndReg =
1049           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1050     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1051   return None;
1052 }
1053 
1054 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1055                                                  const MachineRegisterInfo &MRI,
1056                                                  bool AllowUndef) {
1057   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1058     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1059   return None;
1060 }
1061 
1062 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1063                                  const MachineRegisterInfo &MRI,
1064                                  bool AllowUndef) {
1065   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1066 }
1067 
1068 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1069                                 const MachineRegisterInfo &MRI,
1070                                 bool AllowUndef) {
1071   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1072 }
1073 
1074 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1075                                              const MachineRegisterInfo &MRI) {
1076   unsigned Opc = MI.getOpcode();
1077   if (!isBuildVectorOp(Opc))
1078     return None;
1079   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1080     return RegOrConstant(*Splat);
1081   auto Reg = MI.getOperand(1).getReg();
1082   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1083              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1084     return None;
1085   return RegOrConstant(Reg);
1086 }
1087 
1088 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1089                                       const MachineRegisterInfo &MRI) {
1090   Register Def = MI.getOperand(0).getReg();
1091   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1092     return true;
1093   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1094   if (!BV)
1095     return false;
1096   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1097     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1098         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1099       continue;
1100     return false;
1101   }
1102   return true;
1103 }
1104 
1105 Optional<APInt>
1106 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1107                                       const MachineRegisterInfo &MRI) {
1108   Register Def = MI.getOperand(0).getReg();
1109   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1110     return C->Value;
1111   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1112   if (!MaybeCst)
1113     return None;
1114   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1115   return APInt(ScalarSize, *MaybeCst, true);
1116 }
1117 
1118 bool llvm::matchUnaryPredicate(
1119     const MachineRegisterInfo &MRI, Register Reg,
1120     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1121 
1122   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1123   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1124     return Match(nullptr);
1125 
1126   // TODO: Also handle fconstant
1127   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1128     return Match(Def->getOperand(1).getCImm());
1129 
1130   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1131     return false;
1132 
1133   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1134     Register SrcElt = Def->getOperand(I).getReg();
1135     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1136     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1137       if (!Match(nullptr))
1138         return false;
1139       continue;
1140     }
1141 
1142     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1143         !Match(SrcDef->getOperand(1).getCImm()))
1144       return false;
1145   }
1146 
1147   return true;
1148 }
1149 
1150 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1151                           bool IsFP) {
1152   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1153   case TargetLowering::UndefinedBooleanContent:
1154     return Val & 0x1;
1155   case TargetLowering::ZeroOrOneBooleanContent:
1156     return Val == 1;
1157   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1158     return Val == -1;
1159   }
1160   llvm_unreachable("Invalid boolean contents");
1161 }
1162 
1163 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1164                              bool IsFP) {
1165   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1166   case TargetLowering::UndefinedBooleanContent:
1167   case TargetLowering::ZeroOrOneBooleanContent:
1168     return 1;
1169   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1170     return -1;
1171   }
1172   llvm_unreachable("Invalid boolean contents");
1173 }
1174 
1175 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1176                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1177   const auto &F = MBB.getParent()->getFunction();
1178   return F.hasOptSize() || F.hasMinSize() ||
1179          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1180 }
1181 
1182 /// These artifacts generally don't have any debug users because they don't
1183 /// directly originate from IR instructions, but instead usually from
1184 /// legalization. Avoiding checking for debug users improves compile time.
1185 /// Note that truncates or extends aren't included because they have IR
1186 /// counterparts which can have debug users after translation.
1187 static bool shouldSkipDbgValueFor(MachineInstr &MI) {
1188   switch (MI.getOpcode()) {
1189   case TargetOpcode::G_UNMERGE_VALUES:
1190   case TargetOpcode::G_MERGE_VALUES:
1191   case TargetOpcode::G_CONCAT_VECTORS:
1192   case TargetOpcode::G_BUILD_VECTOR:
1193   case TargetOpcode::G_EXTRACT:
1194   case TargetOpcode::G_INSERT:
1195     return true;
1196   default:
1197     return false;
1198   }
1199 }
1200 
1201 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1202                             LostDebugLocObserver *LocObserver,
1203                             SmallInstListTy &DeadInstChain) {
1204   for (MachineOperand &Op : MI.uses()) {
1205     if (Op.isReg() && Op.getReg().isVirtual())
1206       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1207   }
1208   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1209   DeadInstChain.remove(&MI);
1210   if (shouldSkipDbgValueFor(MI))
1211     MI.eraseFromParent();
1212   else
1213     MI.eraseFromParentAndMarkDBGValuesForRemoval();
1214   if (LocObserver)
1215     LocObserver->checkpoint(false);
1216 }
1217 
1218 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1219                        MachineRegisterInfo &MRI,
1220                        LostDebugLocObserver *LocObserver) {
1221   SmallInstListTy DeadInstChain;
1222   for (MachineInstr *MI : DeadInstrs)
1223     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1224 
1225   while (!DeadInstChain.empty()) {
1226     MachineInstr *Inst = DeadInstChain.pop_back_val();
1227     if (!isTriviallyDead(*Inst, MRI))
1228       continue;
1229     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1230   }
1231 }
1232 
1233 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1234                       LostDebugLocObserver *LocObserver) {
1235   return eraseInstrs({&MI}, MRI, LocObserver);
1236 }
1237