xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision a427f15d6070fd50457c553a097e031139b40886)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/StackProtector.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/CodeGen/TargetPassConfig.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/Target/TargetMachine.h"
31 
32 #define DEBUG_TYPE "globalisel-utils"
33 
34 using namespace llvm;
35 using namespace MIPatternMatch;
36 
37 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
38                                    const TargetInstrInfo &TII,
39                                    const RegisterBankInfo &RBI, Register Reg,
40                                    const TargetRegisterClass &RegClass) {
41   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
42     return MRI.createVirtualRegister(&RegClass);
43 
44   return Reg;
45 }
46 
47 Register llvm::constrainOperandRegClass(
48     const MachineFunction &MF, const TargetRegisterInfo &TRI,
49     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
50     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
51     const TargetRegisterClass &RegClass, const MachineOperand &RegMO) {
52   Register Reg = RegMO.getReg();
53   // Assume physical registers are properly constrained.
54   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
55 
56   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
57   // If we created a new virtual register because the class is not compatible
58   // then create a copy between the new and the old register.
59   if (ConstrainedReg != Reg) {
60     MachineBasicBlock::iterator InsertIt(&InsertPt);
61     MachineBasicBlock &MBB = *InsertPt.getParent();
62     if (RegMO.isUse()) {
63       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
64               TII.get(TargetOpcode::COPY), ConstrainedReg)
65           .addReg(Reg);
66     } else {
67       assert(RegMO.isDef() && "Must be a definition");
68       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
69               TII.get(TargetOpcode::COPY), Reg)
70           .addReg(ConstrainedReg);
71     }
72   } else {
73     if (GISelChangeObserver *Observer = MF.getObserver()) {
74       if (!RegMO.isDef()) {
75         MachineInstr *RegDef = MRI.getVRegDef(Reg);
76         Observer->changedInstr(*RegDef);
77       }
78       Observer->changingAllUsesOfReg(MRI, Reg);
79       Observer->finishedChangingAllUsesOfReg();
80     }
81   }
82   return ConstrainedReg;
83 }
84 
85 Register llvm::constrainOperandRegClass(
86     const MachineFunction &MF, const TargetRegisterInfo &TRI,
87     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
88     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
89     const MachineOperand &RegMO, unsigned OpIdx) {
90   Register Reg = RegMO.getReg();
91   // Assume physical registers are properly constrained.
92   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
93 
94   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
95   // Some of the target independent instructions, like COPY, may not impose any
96   // register class constraints on some of their operands: If it's a use, we can
97   // skip constraining as the instruction defining the register would constrain
98   // it.
99 
100   // We can't constrain unallocatable register classes, because we can't create
101   // virtual registers for these classes, so we need to let targets handled this
102   // case.
103   if (RegClass && !RegClass->isAllocatable())
104     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
105 
106   if (!RegClass) {
107     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
108            "Register class constraint is required unless either the "
109            "instruction is target independent or the operand is a use");
110     // FIXME: Just bailing out like this here could be not enough, unless we
111     // expect the users of this function to do the right thing for PHIs and
112     // COPY:
113     //   v1 = COPY v0
114     //   v2 = COPY v1
115     // v1 here may end up not being constrained at all. Please notice that to
116     // reproduce the issue we likely need a destination pattern of a selection
117     // rule producing such extra copies, not just an input GMIR with them as
118     // every existing target using selectImpl handles copies before calling it
119     // and they never reach this function.
120     return Reg;
121   }
122   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
123                                   RegMO);
124 }
125 
126 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
127                                             const TargetInstrInfo &TII,
128                                             const TargetRegisterInfo &TRI,
129                                             const RegisterBankInfo &RBI) {
130   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
131          "A selected instruction is expected");
132   MachineBasicBlock &MBB = *I.getParent();
133   MachineFunction &MF = *MBB.getParent();
134   MachineRegisterInfo &MRI = MF.getRegInfo();
135 
136   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
137     MachineOperand &MO = I.getOperand(OpI);
138 
139     // There's nothing to be done on non-register operands.
140     if (!MO.isReg())
141       continue;
142 
143     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
144     assert(MO.isReg() && "Unsupported non-reg operand");
145 
146     Register Reg = MO.getReg();
147     // Physical registers don't need to be constrained.
148     if (Register::isPhysicalRegister(Reg))
149       continue;
150 
151     // Register operands with a value of 0 (e.g. predicate operands) don't need
152     // to be constrained.
153     if (Reg == 0)
154       continue;
155 
156     // If the operand is a vreg, we should constrain its regclass, and only
157     // insert COPYs if that's impossible.
158     // constrainOperandRegClass does that for us.
159     MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
160                                        MO, OpI));
161 
162     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
163     // done.
164     if (MO.isUse()) {
165       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
166       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
167         I.tieOperands(DefIdx, OpI);
168     }
169   }
170   return true;
171 }
172 
173 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
174                          MachineRegisterInfo &MRI) {
175   // Give up if either DstReg or SrcReg  is a physical register.
176   if (DstReg.isPhysical() || SrcReg.isPhysical())
177     return false;
178   // Give up if the types don't match.
179   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
180     return false;
181   // Replace if either DstReg has no constraints or the register
182   // constraints match.
183   return !MRI.getRegClassOrRegBank(DstReg) ||
184          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
185 }
186 
187 bool llvm::isTriviallyDead(const MachineInstr &MI,
188                            const MachineRegisterInfo &MRI) {
189   // FIXME: This logical is mostly duplicated with
190   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
191   // MachineInstr::isLabel?
192 
193   // Don't delete frame allocation labels.
194   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
195     return false;
196 
197   // If we can move an instruction, we can remove it.  Otherwise, it has
198   // a side-effect of some sort.
199   bool SawStore = false;
200   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
201     return false;
202 
203   // Instructions without side-effects are dead iff they only define dead vregs.
204   for (auto &MO : MI.operands()) {
205     if (!MO.isReg() || !MO.isDef())
206       continue;
207 
208     Register Reg = MO.getReg();
209     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
210       return false;
211   }
212   return true;
213 }
214 
215 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
216                                   MachineFunction &MF,
217                                   const TargetPassConfig &TPC,
218                                   MachineOptimizationRemarkEmitter &MORE,
219                                   MachineOptimizationRemarkMissed &R) {
220   bool IsFatal = Severity == DS_Error &&
221                  TPC.isGlobalISelAbortEnabled();
222   // Print the function name explicitly if we don't have a debug location (which
223   // makes the diagnostic less useful) or if we're going to emit a raw error.
224   if (!R.getLocation().isValid() || IsFatal)
225     R << (" (in function: " + MF.getName() + ")").str();
226 
227   if (IsFatal)
228     report_fatal_error(R.getMsg());
229   else
230     MORE.emit(R);
231 }
232 
233 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
234                               MachineOptimizationRemarkEmitter &MORE,
235                               MachineOptimizationRemarkMissed &R) {
236   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
237 }
238 
239 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
240                               MachineOptimizationRemarkEmitter &MORE,
241                               MachineOptimizationRemarkMissed &R) {
242   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
243   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
244 }
245 
246 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
247                               MachineOptimizationRemarkEmitter &MORE,
248                               const char *PassName, StringRef Msg,
249                               const MachineInstr &MI) {
250   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
251                                     MI.getDebugLoc(), MI.getParent());
252   R << Msg;
253   // Printing MI is expensive;  only do it if expensive remarks are enabled.
254   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
255     R << ": " << ore::MNV("Inst", MI);
256   reportGISelFailure(MF, TPC, MORE, R);
257 }
258 
259 Optional<APInt> llvm::getConstantVRegVal(Register VReg,
260                                          const MachineRegisterInfo &MRI) {
261   Optional<ValueAndVReg> ValAndVReg =
262       getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
263   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
264          "Value found while looking through instrs");
265   if (!ValAndVReg)
266     return None;
267   return ValAndVReg->Value;
268 }
269 
270 Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg,
271                                                const MachineRegisterInfo &MRI) {
272   Optional<APInt> Val = getConstantVRegVal(VReg, MRI);
273   if (Val && Val->getBitWidth() <= 64)
274     return Val->getSExtValue();
275   return None;
276 }
277 
278 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
279     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
280     bool HandleFConstant) {
281   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
282   MachineInstr *MI;
283   auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
284     return Opcode == TargetOpcode::G_CONSTANT ||
285            (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
286   };
287   auto GetImmediateValue = [HandleFConstant,
288                             &MRI](const MachineInstr &MI) -> Optional<APInt> {
289     const MachineOperand &CstVal = MI.getOperand(1);
290     if (!CstVal.isImm() && !CstVal.isCImm() &&
291         (!HandleFConstant || !CstVal.isFPImm()))
292       return None;
293     if (!CstVal.isFPImm()) {
294       unsigned BitWidth =
295           MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
296       APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
297                                  : CstVal.getCImm()->getValue();
298       assert(Val.getBitWidth() == BitWidth &&
299              "Value bitwidth doesn't match definition type");
300       return Val;
301     }
302     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
303   };
304   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
305          LookThroughInstrs) {
306     switch (MI->getOpcode()) {
307     case TargetOpcode::G_TRUNC:
308     case TargetOpcode::G_SEXT:
309     case TargetOpcode::G_ZEXT:
310       SeenOpcodes.push_back(std::make_pair(
311           MI->getOpcode(),
312           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
313       VReg = MI->getOperand(1).getReg();
314       break;
315     case TargetOpcode::COPY:
316       VReg = MI->getOperand(1).getReg();
317       if (Register::isPhysicalRegister(VReg))
318         return None;
319       break;
320     case TargetOpcode::G_INTTOPTR:
321       VReg = MI->getOperand(1).getReg();
322       break;
323     default:
324       return None;
325     }
326   }
327   if (!MI || !IsConstantOpcode(MI->getOpcode()))
328     return None;
329 
330   Optional<APInt> MaybeVal = GetImmediateValue(*MI);
331   if (!MaybeVal)
332     return None;
333   APInt &Val = *MaybeVal;
334   while (!SeenOpcodes.empty()) {
335     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
336     switch (OpcodeAndSize.first) {
337     case TargetOpcode::G_TRUNC:
338       Val = Val.trunc(OpcodeAndSize.second);
339       break;
340     case TargetOpcode::G_SEXT:
341       Val = Val.sext(OpcodeAndSize.second);
342       break;
343     case TargetOpcode::G_ZEXT:
344       Val = Val.zext(OpcodeAndSize.second);
345       break;
346     }
347   }
348 
349   return ValueAndVReg{Val, VReg};
350 }
351 
352 const ConstantFP *
353 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
354   MachineInstr *MI = MRI.getVRegDef(VReg);
355   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
356     return nullptr;
357   return MI->getOperand(1).getFPImm();
358 }
359 
360 Optional<DefinitionAndSourceRegister>
361 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
362   Register DefSrcReg = Reg;
363   auto *DefMI = MRI.getVRegDef(Reg);
364   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
365   if (!DstTy.isValid())
366     return None;
367   while (DefMI->getOpcode() == TargetOpcode::COPY) {
368     Register SrcReg = DefMI->getOperand(1).getReg();
369     auto SrcTy = MRI.getType(SrcReg);
370     if (!SrcTy.isValid())
371       break;
372     DefMI = MRI.getVRegDef(SrcReg);
373     DefSrcReg = SrcReg;
374   }
375   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
376 }
377 
378 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
379                                          const MachineRegisterInfo &MRI) {
380   Optional<DefinitionAndSourceRegister> DefSrcReg =
381       getDefSrcRegIgnoringCopies(Reg, MRI);
382   return DefSrcReg ? DefSrcReg->MI : nullptr;
383 }
384 
385 Register llvm::getSrcRegIgnoringCopies(Register Reg,
386                                        const MachineRegisterInfo &MRI) {
387   Optional<DefinitionAndSourceRegister> DefSrcReg =
388       getDefSrcRegIgnoringCopies(Reg, MRI);
389   return DefSrcReg ? DefSrcReg->Reg : Register();
390 }
391 
392 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
393                                  const MachineRegisterInfo &MRI) {
394   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
395   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
396 }
397 
398 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
399   if (Size == 32)
400     return APFloat(float(Val));
401   if (Size == 64)
402     return APFloat(Val);
403   if (Size != 16)
404     llvm_unreachable("Unsupported FPConstant size");
405   bool Ignored;
406   APFloat APF(Val);
407   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
408   return APF;
409 }
410 
411 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
412                                         const Register Op2,
413                                         const MachineRegisterInfo &MRI) {
414   auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
415   if (!MaybeOp2Cst)
416     return None;
417 
418   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
419   if (!MaybeOp1Cst)
420     return None;
421 
422   const APInt &C1 = *MaybeOp1Cst;
423   const APInt &C2 = *MaybeOp2Cst;
424   switch (Opcode) {
425   default:
426     break;
427   case TargetOpcode::G_ADD:
428     return C1 + C2;
429   case TargetOpcode::G_AND:
430     return C1 & C2;
431   case TargetOpcode::G_ASHR:
432     return C1.ashr(C2);
433   case TargetOpcode::G_LSHR:
434     return C1.lshr(C2);
435   case TargetOpcode::G_MUL:
436     return C1 * C2;
437   case TargetOpcode::G_OR:
438     return C1 | C2;
439   case TargetOpcode::G_SHL:
440     return C1 << C2;
441   case TargetOpcode::G_SUB:
442     return C1 - C2;
443   case TargetOpcode::G_XOR:
444     return C1 ^ C2;
445   case TargetOpcode::G_UDIV:
446     if (!C2.getBoolValue())
447       break;
448     return C1.udiv(C2);
449   case TargetOpcode::G_SDIV:
450     if (!C2.getBoolValue())
451       break;
452     return C1.sdiv(C2);
453   case TargetOpcode::G_UREM:
454     if (!C2.getBoolValue())
455       break;
456     return C1.urem(C2);
457   case TargetOpcode::G_SREM:
458     if (!C2.getBoolValue())
459       break;
460     return C1.srem(C2);
461   }
462 
463   return None;
464 }
465 
466 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
467                            bool SNaN) {
468   const MachineInstr *DefMI = MRI.getVRegDef(Val);
469   if (!DefMI)
470     return false;
471 
472   const TargetMachine& TM = DefMI->getMF()->getTarget();
473   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
474     return true;
475 
476   if (SNaN) {
477     // FP operations quiet. For now, just handle the ones inserted during
478     // legalization.
479     switch (DefMI->getOpcode()) {
480     case TargetOpcode::G_FPEXT:
481     case TargetOpcode::G_FPTRUNC:
482     case TargetOpcode::G_FCANONICALIZE:
483       return true;
484     default:
485       return false;
486     }
487   }
488 
489   return false;
490 }
491 
492 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
493                                   const MachinePointerInfo &MPO) {
494   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
495   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
496     MachineFrameInfo &MFI = MF.getFrameInfo();
497     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
498                            MPO.Offset);
499   }
500 
501   return Align(1);
502 }
503 
504 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
505                                         const TargetInstrInfo &TII,
506                                         MCRegister PhysReg,
507                                         const TargetRegisterClass &RC,
508                                         LLT RegTy) {
509   DebugLoc DL; // FIXME: Is no location the right choice?
510   MachineBasicBlock &EntryMBB = MF.front();
511   MachineRegisterInfo &MRI = MF.getRegInfo();
512   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
513   if (LiveIn) {
514     MachineInstr *Def = MRI.getVRegDef(LiveIn);
515     if (Def) {
516       // FIXME: Should the verifier check this is in the entry block?
517       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
518       return LiveIn;
519     }
520 
521     // It's possible the incoming argument register and copy was added during
522     // lowering, but later deleted due to being/becoming dead. If this happens,
523     // re-insert the copy.
524   } else {
525     // The live in register was not present, so add it.
526     LiveIn = MF.addLiveIn(PhysReg, &RC);
527     if (RegTy.isValid())
528       MRI.setType(LiveIn, RegTy);
529   }
530 
531   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
532     .addReg(PhysReg);
533   if (!EntryMBB.isLiveIn(PhysReg))
534     EntryMBB.addLiveIn(PhysReg);
535   return LiveIn;
536 }
537 
538 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
539                                         uint64_t Imm,
540                                         const MachineRegisterInfo &MRI) {
541   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
542   if (MaybeOp1Cst) {
543     switch (Opcode) {
544     default:
545       break;
546     case TargetOpcode::G_SEXT_INREG: {
547       LLT Ty = MRI.getType(Op1);
548       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
549     }
550     }
551   }
552   return None;
553 }
554 
555 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
556                                   GISelKnownBits *KB) {
557   Optional<DefinitionAndSourceRegister> DefSrcReg =
558       getDefSrcRegIgnoringCopies(Reg, MRI);
559   if (!DefSrcReg)
560     return false;
561 
562   const MachineInstr &MI = *DefSrcReg->MI;
563   const LLT Ty = MRI.getType(Reg);
564 
565   switch (MI.getOpcode()) {
566   case TargetOpcode::G_CONSTANT: {
567     unsigned BitWidth = Ty.getScalarSizeInBits();
568     const ConstantInt *CI = MI.getOperand(1).getCImm();
569     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
570   }
571   case TargetOpcode::G_SHL: {
572     // A left-shift of a constant one will have exactly one bit set because
573     // shifting the bit off the end is undefined.
574 
575     // TODO: Constant splat
576     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
577       if (*ConstLHS == 1)
578         return true;
579     }
580 
581     break;
582   }
583   case TargetOpcode::G_LSHR: {
584     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
585       if (ConstLHS->isSignMask())
586         return true;
587     }
588 
589     break;
590   }
591   default:
592     break;
593   }
594 
595   // TODO: Are all operands of a build vector constant powers of two?
596   if (!KB)
597     return false;
598 
599   // More could be done here, though the above checks are enough
600   // to handle some common cases.
601 
602   // Fall back to computeKnownBits to catch other known cases.
603   KnownBits Known = KB->getKnownBits(Reg);
604   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
605 }
606 
607 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
608   AU.addPreserved<StackProtector>();
609 }
610 
611 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
612   unsigned Mul = OrigSize * TargetSize;
613   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
614   return Mul / GCDSize;
615 }
616 
617 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
618   const unsigned OrigSize = OrigTy.getSizeInBits();
619   const unsigned TargetSize = TargetTy.getSizeInBits();
620 
621   if (OrigSize == TargetSize)
622     return OrigTy;
623 
624   if (OrigTy.isVector()) {
625     const LLT OrigElt = OrigTy.getElementType();
626 
627     if (TargetTy.isVector()) {
628       const LLT TargetElt = TargetTy.getElementType();
629 
630       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
631         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
632                                             TargetTy.getNumElements());
633         // Prefer the original element type.
634         int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
635         return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
636       }
637     } else {
638       if (OrigElt.getSizeInBits() == TargetSize)
639         return OrigTy;
640     }
641 
642     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
643     return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
644   }
645 
646   if (TargetTy.isVector()) {
647     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
648     return LLT::vector(LCMSize / OrigSize, OrigTy);
649   }
650 
651   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
652 
653   // Preserve pointer types.
654   if (LCMSize == OrigSize)
655     return OrigTy;
656   if (LCMSize == TargetSize)
657     return TargetTy;
658 
659   return LLT::scalar(LCMSize);
660 }
661 
662 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
663   const unsigned OrigSize = OrigTy.getSizeInBits();
664   const unsigned TargetSize = TargetTy.getSizeInBits();
665 
666   if (OrigSize == TargetSize)
667     return OrigTy;
668 
669   if (OrigTy.isVector()) {
670     LLT OrigElt = OrigTy.getElementType();
671     if (TargetTy.isVector()) {
672       LLT TargetElt = TargetTy.getElementType();
673       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
674         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
675                                         TargetTy.getNumElements());
676         return LLT::scalarOrVector(GCD, OrigElt);
677       }
678     } else {
679       // If the source is a vector of pointers, return a pointer element.
680       if (OrigElt.getSizeInBits() == TargetSize)
681         return OrigElt;
682     }
683 
684     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
685     if (GCD == OrigElt.getSizeInBits())
686       return OrigElt;
687 
688     // If we can't produce the original element type, we have to use a smaller
689     // scalar.
690     if (GCD < OrigElt.getSizeInBits())
691       return LLT::scalar(GCD);
692     return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
693   }
694 
695   if (TargetTy.isVector()) {
696     // Try to preserve the original element type.
697     LLT TargetElt = TargetTy.getElementType();
698     if (TargetElt.getSizeInBits() == OrigSize)
699       return OrigTy;
700   }
701 
702   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
703   return LLT::scalar(GCD);
704 }
705 
706 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
707   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
708          "Only G_SHUFFLE_VECTOR can have a splat index!");
709   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
710   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
711 
712   // If all elements are undefined, this shuffle can be considered a splat.
713   // Return 0 for better potential for callers to simplify.
714   if (FirstDefinedIdx == Mask.end())
715     return 0;
716 
717   // Make sure all remaining elements are either undef or the same
718   // as the first non-undef value.
719   int SplatValue = *FirstDefinedIdx;
720   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
721              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
722     return None;
723 
724   return SplatValue;
725 }
726 
727 static bool isBuildVectorOp(unsigned Opcode) {
728   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
729          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
730 }
731 
732 // TODO: Handle mixed undef elements.
733 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
734                                        const MachineRegisterInfo &MRI,
735                                        int64_t SplatValue) {
736   if (!isBuildVectorOp(MI.getOpcode()))
737     return false;
738 
739   const unsigned NumOps = MI.getNumOperands();
740   for (unsigned I = 1; I != NumOps; ++I) {
741     Register Element = MI.getOperand(I).getReg();
742     if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
743       return false;
744   }
745 
746   return true;
747 }
748 
749 Optional<int64_t>
750 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
751                                   const MachineRegisterInfo &MRI) {
752   if (!isBuildVectorOp(MI.getOpcode()))
753     return None;
754 
755   const unsigned NumOps = MI.getNumOperands();
756   Optional<int64_t> Scalar;
757   for (unsigned I = 1; I != NumOps; ++I) {
758     Register Element = MI.getOperand(I).getReg();
759     int64_t ElementValue;
760     if (!mi_match(Element, MRI, m_ICst(ElementValue)))
761       return None;
762     if (!Scalar)
763       Scalar = ElementValue;
764     else if (*Scalar != ElementValue)
765       return None;
766   }
767 
768   return Scalar;
769 }
770 
771 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
772                                  const MachineRegisterInfo &MRI) {
773   return isBuildVectorConstantSplat(MI, MRI, 0);
774 }
775 
776 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
777                                 const MachineRegisterInfo &MRI) {
778   return isBuildVectorConstantSplat(MI, MRI, -1);
779 }
780 
781 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
782                           bool IsFP) {
783   switch (TLI.getBooleanContents(IsVector, IsFP)) {
784   case TargetLowering::UndefinedBooleanContent:
785     return Val & 0x1;
786   case TargetLowering::ZeroOrOneBooleanContent:
787     return Val == 1;
788   case TargetLowering::ZeroOrNegativeOneBooleanContent:
789     return Val == -1;
790   }
791   llvm_unreachable("Invalid boolean contents");
792 }
793 
794 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
795                              bool IsFP) {
796   switch (TLI.getBooleanContents(IsVector, IsFP)) {
797   case TargetLowering::UndefinedBooleanContent:
798   case TargetLowering::ZeroOrOneBooleanContent:
799     return 1;
800   case TargetLowering::ZeroOrNegativeOneBooleanContent:
801     return -1;
802   }
803   llvm_unreachable("Invalid boolean contents");
804 }
805