xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision 122c649c982f1f4f1dc60dca2c5e1c9df86327ed)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/StackProtector.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/CodeGen/TargetPassConfig.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/Target/TargetMachine.h"
31 
32 #define DEBUG_TYPE "globalisel-utils"
33 
34 using namespace llvm;
35 using namespace MIPatternMatch;
36 
37 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
38                                    const TargetInstrInfo &TII,
39                                    const RegisterBankInfo &RBI, Register Reg,
40                                    const TargetRegisterClass &RegClass) {
41   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
42     return MRI.createVirtualRegister(&RegClass);
43 
44   return Reg;
45 }
46 
47 Register llvm::constrainOperandRegClass(
48     const MachineFunction &MF, const TargetRegisterInfo &TRI,
49     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
50     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
51     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
52   Register Reg = RegMO.getReg();
53   // Assume physical registers are properly constrained.
54   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
55 
56   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
57   // If we created a new virtual register because the class is not compatible
58   // then create a copy between the new and the old register.
59   if (ConstrainedReg != Reg) {
60     MachineBasicBlock::iterator InsertIt(&InsertPt);
61     MachineBasicBlock &MBB = *InsertPt.getParent();
62     if (RegMO.isUse()) {
63       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
64               TII.get(TargetOpcode::COPY), ConstrainedReg)
65           .addReg(Reg);
66     } else {
67       assert(RegMO.isDef() && "Must be a definition");
68       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
69               TII.get(TargetOpcode::COPY), Reg)
70           .addReg(ConstrainedReg);
71     }
72     if (GISelChangeObserver *Observer = MF.getObserver()) {
73       Observer->changingInstr(*RegMO.getParent());
74     }
75     RegMO.setReg(ConstrainedReg);
76     if (GISelChangeObserver *Observer = MF.getObserver()) {
77       Observer->changedInstr(*RegMO.getParent());
78     }
79   } else {
80     if (GISelChangeObserver *Observer = MF.getObserver()) {
81       if (!RegMO.isDef()) {
82         MachineInstr *RegDef = MRI.getVRegDef(Reg);
83         Observer->changedInstr(*RegDef);
84       }
85       Observer->changingAllUsesOfReg(MRI, Reg);
86       Observer->finishedChangingAllUsesOfReg();
87     }
88   }
89   return ConstrainedReg;
90 }
91 
92 Register llvm::constrainOperandRegClass(
93     const MachineFunction &MF, const TargetRegisterInfo &TRI,
94     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
95     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
96     MachineOperand &RegMO, unsigned OpIdx) {
97   Register Reg = RegMO.getReg();
98   // Assume physical registers are properly constrained.
99   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
100 
101   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
102   // Some of the target independent instructions, like COPY, may not impose any
103   // register class constraints on some of their operands: If it's a use, we can
104   // skip constraining as the instruction defining the register would constrain
105   // it.
106 
107   // We can't constrain unallocatable register classes, because we can't create
108   // virtual registers for these classes, so we need to let targets handled this
109   // case.
110   if (RegClass && !RegClass->isAllocatable())
111     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
112 
113   if (!RegClass) {
114     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
115            "Register class constraint is required unless either the "
116            "instruction is target independent or the operand is a use");
117     // FIXME: Just bailing out like this here could be not enough, unless we
118     // expect the users of this function to do the right thing for PHIs and
119     // COPY:
120     //   v1 = COPY v0
121     //   v2 = COPY v1
122     // v1 here may end up not being constrained at all. Please notice that to
123     // reproduce the issue we likely need a destination pattern of a selection
124     // rule producing such extra copies, not just an input GMIR with them as
125     // every existing target using selectImpl handles copies before calling it
126     // and they never reach this function.
127     return Reg;
128   }
129   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
130                                   RegMO);
131 }
132 
133 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
134                                             const TargetInstrInfo &TII,
135                                             const TargetRegisterInfo &TRI,
136                                             const RegisterBankInfo &RBI) {
137   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
138          "A selected instruction is expected");
139   MachineBasicBlock &MBB = *I.getParent();
140   MachineFunction &MF = *MBB.getParent();
141   MachineRegisterInfo &MRI = MF.getRegInfo();
142 
143   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
144     MachineOperand &MO = I.getOperand(OpI);
145 
146     // There's nothing to be done on non-register operands.
147     if (!MO.isReg())
148       continue;
149 
150     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
151     assert(MO.isReg() && "Unsupported non-reg operand");
152 
153     Register Reg = MO.getReg();
154     // Physical registers don't need to be constrained.
155     if (Register::isPhysicalRegister(Reg))
156       continue;
157 
158     // Register operands with a value of 0 (e.g. predicate operands) don't need
159     // to be constrained.
160     if (Reg == 0)
161       continue;
162 
163     // If the operand is a vreg, we should constrain its regclass, and only
164     // insert COPYs if that's impossible.
165     // constrainOperandRegClass does that for us.
166     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
167 
168     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
169     // done.
170     if (MO.isUse()) {
171       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
172       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
173         I.tieOperands(DefIdx, OpI);
174     }
175   }
176   return true;
177 }
178 
179 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
180                          MachineRegisterInfo &MRI) {
181   // Give up if either DstReg or SrcReg  is a physical register.
182   if (DstReg.isPhysical() || SrcReg.isPhysical())
183     return false;
184   // Give up if the types don't match.
185   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
186     return false;
187   // Replace if either DstReg has no constraints or the register
188   // constraints match.
189   return !MRI.getRegClassOrRegBank(DstReg) ||
190          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
191 }
192 
193 bool llvm::isTriviallyDead(const MachineInstr &MI,
194                            const MachineRegisterInfo &MRI) {
195   // FIXME: This logical is mostly duplicated with
196   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
197   // MachineInstr::isLabel?
198 
199   // Don't delete frame allocation labels.
200   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
201     return false;
202 
203   // If we can move an instruction, we can remove it.  Otherwise, it has
204   // a side-effect of some sort.
205   bool SawStore = false;
206   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
207     return false;
208 
209   // Instructions without side-effects are dead iff they only define dead vregs.
210   for (auto &MO : MI.operands()) {
211     if (!MO.isReg() || !MO.isDef())
212       continue;
213 
214     Register Reg = MO.getReg();
215     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
216       return false;
217   }
218   return true;
219 }
220 
221 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
222                                   MachineFunction &MF,
223                                   const TargetPassConfig &TPC,
224                                   MachineOptimizationRemarkEmitter &MORE,
225                                   MachineOptimizationRemarkMissed &R) {
226   bool IsFatal = Severity == DS_Error &&
227                  TPC.isGlobalISelAbortEnabled();
228   // Print the function name explicitly if we don't have a debug location (which
229   // makes the diagnostic less useful) or if we're going to emit a raw error.
230   if (!R.getLocation().isValid() || IsFatal)
231     R << (" (in function: " + MF.getName() + ")").str();
232 
233   if (IsFatal)
234     report_fatal_error(R.getMsg());
235   else
236     MORE.emit(R);
237 }
238 
239 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
240                               MachineOptimizationRemarkEmitter &MORE,
241                               MachineOptimizationRemarkMissed &R) {
242   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
243 }
244 
245 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
246                               MachineOptimizationRemarkEmitter &MORE,
247                               MachineOptimizationRemarkMissed &R) {
248   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
249   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
250 }
251 
252 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
253                               MachineOptimizationRemarkEmitter &MORE,
254                               const char *PassName, StringRef Msg,
255                               const MachineInstr &MI) {
256   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
257                                     MI.getDebugLoc(), MI.getParent());
258   R << Msg;
259   // Printing MI is expensive;  only do it if expensive remarks are enabled.
260   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
261     R << ": " << ore::MNV("Inst", MI);
262   reportGISelFailure(MF, TPC, MORE, R);
263 }
264 
265 Optional<APInt> llvm::getConstantVRegVal(Register VReg,
266                                          const MachineRegisterInfo &MRI) {
267   Optional<ValueAndVReg> ValAndVReg =
268       getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
269   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
270          "Value found while looking through instrs");
271   if (!ValAndVReg)
272     return None;
273   return ValAndVReg->Value;
274 }
275 
276 Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg,
277                                                const MachineRegisterInfo &MRI) {
278   Optional<APInt> Val = getConstantVRegVal(VReg, MRI);
279   if (Val && Val->getBitWidth() <= 64)
280     return Val->getSExtValue();
281   return None;
282 }
283 
284 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
285     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
286     bool HandleFConstant, bool LookThroughAnyExt) {
287   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
288   MachineInstr *MI;
289   auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
290     return Opcode == TargetOpcode::G_CONSTANT ||
291            (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
292   };
293   auto GetImmediateValue = [HandleFConstant,
294                             &MRI](const MachineInstr &MI) -> Optional<APInt> {
295     const MachineOperand &CstVal = MI.getOperand(1);
296     if (!CstVal.isImm() && !CstVal.isCImm() &&
297         (!HandleFConstant || !CstVal.isFPImm()))
298       return None;
299     if (!CstVal.isFPImm()) {
300       unsigned BitWidth =
301           MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
302       APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
303                                  : CstVal.getCImm()->getValue();
304       assert(Val.getBitWidth() == BitWidth &&
305              "Value bitwidth doesn't match definition type");
306       return Val;
307     }
308     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
309   };
310   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
311          LookThroughInstrs) {
312     switch (MI->getOpcode()) {
313     case TargetOpcode::G_ANYEXT:
314       if (!LookThroughAnyExt)
315         return None;
316       LLVM_FALLTHROUGH;
317     case TargetOpcode::G_TRUNC:
318     case TargetOpcode::G_SEXT:
319     case TargetOpcode::G_ZEXT:
320       SeenOpcodes.push_back(std::make_pair(
321           MI->getOpcode(),
322           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
323       VReg = MI->getOperand(1).getReg();
324       break;
325     case TargetOpcode::COPY:
326       VReg = MI->getOperand(1).getReg();
327       if (Register::isPhysicalRegister(VReg))
328         return None;
329       break;
330     case TargetOpcode::G_INTTOPTR:
331       VReg = MI->getOperand(1).getReg();
332       break;
333     default:
334       return None;
335     }
336   }
337   if (!MI || !IsConstantOpcode(MI->getOpcode()))
338     return None;
339 
340   Optional<APInt> MaybeVal = GetImmediateValue(*MI);
341   if (!MaybeVal)
342     return None;
343   APInt &Val = *MaybeVal;
344   while (!SeenOpcodes.empty()) {
345     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
346     switch (OpcodeAndSize.first) {
347     case TargetOpcode::G_TRUNC:
348       Val = Val.trunc(OpcodeAndSize.second);
349       break;
350     case TargetOpcode::G_ANYEXT:
351     case TargetOpcode::G_SEXT:
352       Val = Val.sext(OpcodeAndSize.second);
353       break;
354     case TargetOpcode::G_ZEXT:
355       Val = Val.zext(OpcodeAndSize.second);
356       break;
357     }
358   }
359 
360   return ValueAndVReg{Val, VReg};
361 }
362 
363 const ConstantFP *
364 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
365   MachineInstr *MI = MRI.getVRegDef(VReg);
366   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
367     return nullptr;
368   return MI->getOperand(1).getFPImm();
369 }
370 
371 Optional<DefinitionAndSourceRegister>
372 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
373   Register DefSrcReg = Reg;
374   auto *DefMI = MRI.getVRegDef(Reg);
375   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
376   if (!DstTy.isValid())
377     return None;
378   unsigned Opc = DefMI->getOpcode();
379   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
380     Register SrcReg = DefMI->getOperand(1).getReg();
381     auto SrcTy = MRI.getType(SrcReg);
382     if (!SrcTy.isValid())
383       break;
384     DefMI = MRI.getVRegDef(SrcReg);
385     DefSrcReg = SrcReg;
386     Opc = DefMI->getOpcode();
387   }
388   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
389 }
390 
391 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
392                                          const MachineRegisterInfo &MRI) {
393   Optional<DefinitionAndSourceRegister> DefSrcReg =
394       getDefSrcRegIgnoringCopies(Reg, MRI);
395   return DefSrcReg ? DefSrcReg->MI : nullptr;
396 }
397 
398 Register llvm::getSrcRegIgnoringCopies(Register Reg,
399                                        const MachineRegisterInfo &MRI) {
400   Optional<DefinitionAndSourceRegister> DefSrcReg =
401       getDefSrcRegIgnoringCopies(Reg, MRI);
402   return DefSrcReg ? DefSrcReg->Reg : Register();
403 }
404 
405 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
406                                  const MachineRegisterInfo &MRI) {
407   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
408   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
409 }
410 
411 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
412   if (Size == 32)
413     return APFloat(float(Val));
414   if (Size == 64)
415     return APFloat(Val);
416   if (Size != 16)
417     llvm_unreachable("Unsupported FPConstant size");
418   bool Ignored;
419   APFloat APF(Val);
420   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
421   return APF;
422 }
423 
424 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
425                                         const Register Op2,
426                                         const MachineRegisterInfo &MRI) {
427   auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
428   if (!MaybeOp2Cst)
429     return None;
430 
431   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
432   if (!MaybeOp1Cst)
433     return None;
434 
435   const APInt &C1 = *MaybeOp1Cst;
436   const APInt &C2 = *MaybeOp2Cst;
437   switch (Opcode) {
438   default:
439     break;
440   case TargetOpcode::G_ADD:
441     return C1 + C2;
442   case TargetOpcode::G_AND:
443     return C1 & C2;
444   case TargetOpcode::G_ASHR:
445     return C1.ashr(C2);
446   case TargetOpcode::G_LSHR:
447     return C1.lshr(C2);
448   case TargetOpcode::G_MUL:
449     return C1 * C2;
450   case TargetOpcode::G_OR:
451     return C1 | C2;
452   case TargetOpcode::G_SHL:
453     return C1 << C2;
454   case TargetOpcode::G_SUB:
455     return C1 - C2;
456   case TargetOpcode::G_XOR:
457     return C1 ^ C2;
458   case TargetOpcode::G_UDIV:
459     if (!C2.getBoolValue())
460       break;
461     return C1.udiv(C2);
462   case TargetOpcode::G_SDIV:
463     if (!C2.getBoolValue())
464       break;
465     return C1.sdiv(C2);
466   case TargetOpcode::G_UREM:
467     if (!C2.getBoolValue())
468       break;
469     return C1.urem(C2);
470   case TargetOpcode::G_SREM:
471     if (!C2.getBoolValue())
472       break;
473     return C1.srem(C2);
474   }
475 
476   return None;
477 }
478 
479 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
480                            bool SNaN) {
481   const MachineInstr *DefMI = MRI.getVRegDef(Val);
482   if (!DefMI)
483     return false;
484 
485   const TargetMachine& TM = DefMI->getMF()->getTarget();
486   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
487     return true;
488 
489   // If the value is a constant, we can obviously see if it is a NaN or not.
490   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
491     return !FPVal->getValueAPF().isNaN() ||
492            (SNaN && !FPVal->getValueAPF().isSignaling());
493   }
494 
495   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
496     for (const auto &Op : DefMI->uses())
497       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
498         return false;
499     return true;
500   }
501 
502   if (SNaN) {
503     // FP operations quiet. For now, just handle the ones inserted during
504     // legalization.
505     switch (DefMI->getOpcode()) {
506     case TargetOpcode::G_FPEXT:
507     case TargetOpcode::G_FPTRUNC:
508     case TargetOpcode::G_FCANONICALIZE:
509       return true;
510     default:
511       return false;
512     }
513   }
514 
515   return false;
516 }
517 
518 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
519                                   const MachinePointerInfo &MPO) {
520   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
521   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
522     MachineFrameInfo &MFI = MF.getFrameInfo();
523     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
524                            MPO.Offset);
525   }
526 
527   return Align(1);
528 }
529 
530 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
531                                         const TargetInstrInfo &TII,
532                                         MCRegister PhysReg,
533                                         const TargetRegisterClass &RC,
534                                         LLT RegTy) {
535   DebugLoc DL; // FIXME: Is no location the right choice?
536   MachineBasicBlock &EntryMBB = MF.front();
537   MachineRegisterInfo &MRI = MF.getRegInfo();
538   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
539   if (LiveIn) {
540     MachineInstr *Def = MRI.getVRegDef(LiveIn);
541     if (Def) {
542       // FIXME: Should the verifier check this is in the entry block?
543       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
544       return LiveIn;
545     }
546 
547     // It's possible the incoming argument register and copy was added during
548     // lowering, but later deleted due to being/becoming dead. If this happens,
549     // re-insert the copy.
550   } else {
551     // The live in register was not present, so add it.
552     LiveIn = MF.addLiveIn(PhysReg, &RC);
553     if (RegTy.isValid())
554       MRI.setType(LiveIn, RegTy);
555   }
556 
557   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
558     .addReg(PhysReg);
559   if (!EntryMBB.isLiveIn(PhysReg))
560     EntryMBB.addLiveIn(PhysReg);
561   return LiveIn;
562 }
563 
564 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
565                                         uint64_t Imm,
566                                         const MachineRegisterInfo &MRI) {
567   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
568   if (MaybeOp1Cst) {
569     switch (Opcode) {
570     default:
571       break;
572     case TargetOpcode::G_SEXT_INREG: {
573       LLT Ty = MRI.getType(Op1);
574       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
575     }
576     }
577   }
578   return None;
579 }
580 
581 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
582                                   GISelKnownBits *KB) {
583   Optional<DefinitionAndSourceRegister> DefSrcReg =
584       getDefSrcRegIgnoringCopies(Reg, MRI);
585   if (!DefSrcReg)
586     return false;
587 
588   const MachineInstr &MI = *DefSrcReg->MI;
589   const LLT Ty = MRI.getType(Reg);
590 
591   switch (MI.getOpcode()) {
592   case TargetOpcode::G_CONSTANT: {
593     unsigned BitWidth = Ty.getScalarSizeInBits();
594     const ConstantInt *CI = MI.getOperand(1).getCImm();
595     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
596   }
597   case TargetOpcode::G_SHL: {
598     // A left-shift of a constant one will have exactly one bit set because
599     // shifting the bit off the end is undefined.
600 
601     // TODO: Constant splat
602     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
603       if (*ConstLHS == 1)
604         return true;
605     }
606 
607     break;
608   }
609   case TargetOpcode::G_LSHR: {
610     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
611       if (ConstLHS->isSignMask())
612         return true;
613     }
614 
615     break;
616   }
617   default:
618     break;
619   }
620 
621   // TODO: Are all operands of a build vector constant powers of two?
622   if (!KB)
623     return false;
624 
625   // More could be done here, though the above checks are enough
626   // to handle some common cases.
627 
628   // Fall back to computeKnownBits to catch other known cases.
629   KnownBits Known = KB->getKnownBits(Reg);
630   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
631 }
632 
633 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
634   AU.addPreserved<StackProtector>();
635 }
636 
637 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
638   unsigned Mul = OrigSize * TargetSize;
639   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
640   return Mul / GCDSize;
641 }
642 
643 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
644   const unsigned OrigSize = OrigTy.getSizeInBits();
645   const unsigned TargetSize = TargetTy.getSizeInBits();
646 
647   if (OrigSize == TargetSize)
648     return OrigTy;
649 
650   if (OrigTy.isVector()) {
651     const LLT OrigElt = OrigTy.getElementType();
652 
653     if (TargetTy.isVector()) {
654       const LLT TargetElt = TargetTy.getElementType();
655 
656       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
657         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
658                                             TargetTy.getNumElements());
659         // Prefer the original element type.
660         int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
661         return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
662       }
663     } else {
664       if (OrigElt.getSizeInBits() == TargetSize)
665         return OrigTy;
666     }
667 
668     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
669     return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
670   }
671 
672   if (TargetTy.isVector()) {
673     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
674     return LLT::vector(LCMSize / OrigSize, OrigTy);
675   }
676 
677   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
678 
679   // Preserve pointer types.
680   if (LCMSize == OrigSize)
681     return OrigTy;
682   if (LCMSize == TargetSize)
683     return TargetTy;
684 
685   return LLT::scalar(LCMSize);
686 }
687 
688 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
689   const unsigned OrigSize = OrigTy.getSizeInBits();
690   const unsigned TargetSize = TargetTy.getSizeInBits();
691 
692   if (OrigSize == TargetSize)
693     return OrigTy;
694 
695   if (OrigTy.isVector()) {
696     LLT OrigElt = OrigTy.getElementType();
697     if (TargetTy.isVector()) {
698       LLT TargetElt = TargetTy.getElementType();
699       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
700         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
701                                         TargetTy.getNumElements());
702         return LLT::scalarOrVector(GCD, OrigElt);
703       }
704     } else {
705       // If the source is a vector of pointers, return a pointer element.
706       if (OrigElt.getSizeInBits() == TargetSize)
707         return OrigElt;
708     }
709 
710     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
711     if (GCD == OrigElt.getSizeInBits())
712       return OrigElt;
713 
714     // If we can't produce the original element type, we have to use a smaller
715     // scalar.
716     if (GCD < OrigElt.getSizeInBits())
717       return LLT::scalar(GCD);
718     return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
719   }
720 
721   if (TargetTy.isVector()) {
722     // Try to preserve the original element type.
723     LLT TargetElt = TargetTy.getElementType();
724     if (TargetElt.getSizeInBits() == OrigSize)
725       return OrigTy;
726   }
727 
728   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
729   return LLT::scalar(GCD);
730 }
731 
732 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
733   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
734          "Only G_SHUFFLE_VECTOR can have a splat index!");
735   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
736   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
737 
738   // If all elements are undefined, this shuffle can be considered a splat.
739   // Return 0 for better potential for callers to simplify.
740   if (FirstDefinedIdx == Mask.end())
741     return 0;
742 
743   // Make sure all remaining elements are either undef or the same
744   // as the first non-undef value.
745   int SplatValue = *FirstDefinedIdx;
746   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
747              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
748     return None;
749 
750   return SplatValue;
751 }
752 
753 static bool isBuildVectorOp(unsigned Opcode) {
754   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
755          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
756 }
757 
758 // TODO: Handle mixed undef elements.
759 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
760                                        const MachineRegisterInfo &MRI,
761                                        int64_t SplatValue) {
762   if (!isBuildVectorOp(MI.getOpcode()))
763     return false;
764 
765   const unsigned NumOps = MI.getNumOperands();
766   for (unsigned I = 1; I != NumOps; ++I) {
767     Register Element = MI.getOperand(I).getReg();
768     if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
769       return false;
770   }
771 
772   return true;
773 }
774 
775 Optional<int64_t>
776 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
777                                   const MachineRegisterInfo &MRI) {
778   if (!isBuildVectorOp(MI.getOpcode()))
779     return None;
780 
781   const unsigned NumOps = MI.getNumOperands();
782   Optional<int64_t> Scalar;
783   for (unsigned I = 1; I != NumOps; ++I) {
784     Register Element = MI.getOperand(I).getReg();
785     int64_t ElementValue;
786     if (!mi_match(Element, MRI, m_ICst(ElementValue)))
787       return None;
788     if (!Scalar)
789       Scalar = ElementValue;
790     else if (*Scalar != ElementValue)
791       return None;
792   }
793 
794   return Scalar;
795 }
796 
797 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
798                                  const MachineRegisterInfo &MRI) {
799   return isBuildVectorConstantSplat(MI, MRI, 0);
800 }
801 
802 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
803                                 const MachineRegisterInfo &MRI) {
804   return isBuildVectorConstantSplat(MI, MRI, -1);
805 }
806 
807 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
808                           bool IsFP) {
809   switch (TLI.getBooleanContents(IsVector, IsFP)) {
810   case TargetLowering::UndefinedBooleanContent:
811     return Val & 0x1;
812   case TargetLowering::ZeroOrOneBooleanContent:
813     return Val == 1;
814   case TargetLowering::ZeroOrNegativeOneBooleanContent:
815     return Val == -1;
816   }
817   llvm_unreachable("Invalid boolean contents");
818 }
819 
820 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
821                              bool IsFP) {
822   switch (TLI.getBooleanContents(IsVector, IsFP)) {
823   case TargetLowering::UndefinedBooleanContent:
824   case TargetLowering::ZeroOrOneBooleanContent:
825     return 1;
826   case TargetLowering::ZeroOrNegativeOneBooleanContent:
827     return -1;
828   }
829   llvm_unreachable("Invalid boolean contents");
830 }
831