xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision d477a7c2e704f58be816159cd8ac11d5384334b5)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/MachineSizeOpts.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/StackProtector.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetPassConfig.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/Target/TargetMachine.h"
32 
33 #define DEBUG_TYPE "globalisel-utils"
34 
35 using namespace llvm;
36 using namespace MIPatternMatch;
37 
38 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
39                                    const TargetInstrInfo &TII,
40                                    const RegisterBankInfo &RBI, Register Reg,
41                                    const TargetRegisterClass &RegClass) {
42   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
43     return MRI.createVirtualRegister(&RegClass);
44 
45   return Reg;
46 }
47 
48 Register llvm::constrainOperandRegClass(
49     const MachineFunction &MF, const TargetRegisterInfo &TRI,
50     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
51     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
52     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
53   Register Reg = RegMO.getReg();
54   // Assume physical registers are properly constrained.
55   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
56 
57   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
58   // If we created a new virtual register because the class is not compatible
59   // then create a copy between the new and the old register.
60   if (ConstrainedReg != Reg) {
61     MachineBasicBlock::iterator InsertIt(&InsertPt);
62     MachineBasicBlock &MBB = *InsertPt.getParent();
63     if (RegMO.isUse()) {
64       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
65               TII.get(TargetOpcode::COPY), ConstrainedReg)
66           .addReg(Reg);
67     } else {
68       assert(RegMO.isDef() && "Must be a definition");
69       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
70               TII.get(TargetOpcode::COPY), Reg)
71           .addReg(ConstrainedReg);
72     }
73     if (GISelChangeObserver *Observer = MF.getObserver()) {
74       Observer->changingInstr(*RegMO.getParent());
75     }
76     RegMO.setReg(ConstrainedReg);
77     if (GISelChangeObserver *Observer = MF.getObserver()) {
78       Observer->changedInstr(*RegMO.getParent());
79     }
80   } else {
81     if (GISelChangeObserver *Observer = MF.getObserver()) {
82       if (!RegMO.isDef()) {
83         MachineInstr *RegDef = MRI.getVRegDef(Reg);
84         Observer->changedInstr(*RegDef);
85       }
86       Observer->changingAllUsesOfReg(MRI, Reg);
87       Observer->finishedChangingAllUsesOfReg();
88     }
89   }
90   return ConstrainedReg;
91 }
92 
93 Register llvm::constrainOperandRegClass(
94     const MachineFunction &MF, const TargetRegisterInfo &TRI,
95     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
96     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
97     MachineOperand &RegMO, unsigned OpIdx) {
98   Register Reg = RegMO.getReg();
99   // Assume physical registers are properly constrained.
100   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
101 
102   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
103   // Some of the target independent instructions, like COPY, may not impose any
104   // register class constraints on some of their operands: If it's a use, we can
105   // skip constraining as the instruction defining the register would constrain
106   // it.
107 
108   // We can't constrain unallocatable register classes, because we can't create
109   // virtual registers for these classes, so we need to let targets handled this
110   // case.
111   if (RegClass && !RegClass->isAllocatable())
112     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
113 
114   if (!RegClass) {
115     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
116            "Register class constraint is required unless either the "
117            "instruction is target independent or the operand is a use");
118     // FIXME: Just bailing out like this here could be not enough, unless we
119     // expect the users of this function to do the right thing for PHIs and
120     // COPY:
121     //   v1 = COPY v0
122     //   v2 = COPY v1
123     // v1 here may end up not being constrained at all. Please notice that to
124     // reproduce the issue we likely need a destination pattern of a selection
125     // rule producing such extra copies, not just an input GMIR with them as
126     // every existing target using selectImpl handles copies before calling it
127     // and they never reach this function.
128     return Reg;
129   }
130   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
131                                   RegMO);
132 }
133 
134 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
135                                             const TargetInstrInfo &TII,
136                                             const TargetRegisterInfo &TRI,
137                                             const RegisterBankInfo &RBI) {
138   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
139          "A selected instruction is expected");
140   MachineBasicBlock &MBB = *I.getParent();
141   MachineFunction &MF = *MBB.getParent();
142   MachineRegisterInfo &MRI = MF.getRegInfo();
143 
144   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
145     MachineOperand &MO = I.getOperand(OpI);
146 
147     // There's nothing to be done on non-register operands.
148     if (!MO.isReg())
149       continue;
150 
151     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
152     assert(MO.isReg() && "Unsupported non-reg operand");
153 
154     Register Reg = MO.getReg();
155     // Physical registers don't need to be constrained.
156     if (Register::isPhysicalRegister(Reg))
157       continue;
158 
159     // Register operands with a value of 0 (e.g. predicate operands) don't need
160     // to be constrained.
161     if (Reg == 0)
162       continue;
163 
164     // If the operand is a vreg, we should constrain its regclass, and only
165     // insert COPYs if that's impossible.
166     // constrainOperandRegClass does that for us.
167     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
168 
169     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
170     // done.
171     if (MO.isUse()) {
172       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
173       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
174         I.tieOperands(DefIdx, OpI);
175     }
176   }
177   return true;
178 }
179 
180 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
181                          MachineRegisterInfo &MRI) {
182   // Give up if either DstReg or SrcReg  is a physical register.
183   if (DstReg.isPhysical() || SrcReg.isPhysical())
184     return false;
185   // Give up if the types don't match.
186   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
187     return false;
188   // Replace if either DstReg has no constraints or the register
189   // constraints match.
190   return !MRI.getRegClassOrRegBank(DstReg) ||
191          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
192 }
193 
194 bool llvm::isTriviallyDead(const MachineInstr &MI,
195                            const MachineRegisterInfo &MRI) {
196   // FIXME: This logical is mostly duplicated with
197   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
198   // MachineInstr::isLabel?
199 
200   // Don't delete frame allocation labels.
201   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
202     return false;
203   // LIFETIME markers should be preserved even if they seem dead.
204   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
205       MI.getOpcode() == TargetOpcode::LIFETIME_END)
206     return false;
207 
208   // If we can move an instruction, we can remove it.  Otherwise, it has
209   // a side-effect of some sort.
210   bool SawStore = false;
211   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
212     return false;
213 
214   // Instructions without side-effects are dead iff they only define dead vregs.
215   for (auto &MO : MI.operands()) {
216     if (!MO.isReg() || !MO.isDef())
217       continue;
218 
219     Register Reg = MO.getReg();
220     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
221       return false;
222   }
223   return true;
224 }
225 
226 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
227                                   MachineFunction &MF,
228                                   const TargetPassConfig &TPC,
229                                   MachineOptimizationRemarkEmitter &MORE,
230                                   MachineOptimizationRemarkMissed &R) {
231   bool IsFatal = Severity == DS_Error &&
232                  TPC.isGlobalISelAbortEnabled();
233   // Print the function name explicitly if we don't have a debug location (which
234   // makes the diagnostic less useful) or if we're going to emit a raw error.
235   if (!R.getLocation().isValid() || IsFatal)
236     R << (" (in function: " + MF.getName() + ")").str();
237 
238   if (IsFatal)
239     report_fatal_error(R.getMsg());
240   else
241     MORE.emit(R);
242 }
243 
244 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
245                               MachineOptimizationRemarkEmitter &MORE,
246                               MachineOptimizationRemarkMissed &R) {
247   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
248 }
249 
250 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
251                               MachineOptimizationRemarkEmitter &MORE,
252                               MachineOptimizationRemarkMissed &R) {
253   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
254   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
255 }
256 
257 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
258                               MachineOptimizationRemarkEmitter &MORE,
259                               const char *PassName, StringRef Msg,
260                               const MachineInstr &MI) {
261   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
262                                     MI.getDebugLoc(), MI.getParent());
263   R << Msg;
264   // Printing MI is expensive;  only do it if expensive remarks are enabled.
265   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
266     R << ": " << ore::MNV("Inst", MI);
267   reportGISelFailure(MF, TPC, MORE, R);
268 }
269 
270 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
271                                           const MachineRegisterInfo &MRI) {
272   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
273       VReg, MRI, /*LookThroughInstrs*/ false);
274   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
275          "Value found while looking through instrs");
276   if (!ValAndVReg)
277     return None;
278   return ValAndVReg->Value;
279 }
280 
281 Optional<int64_t>
282 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
283   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
284   if (Val && Val->getBitWidth() <= 64)
285     return Val->getSExtValue();
286   return None;
287 }
288 
289 namespace {
290 
291 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
292 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
293 
294 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
295     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
296     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
297     bool LookThroughAnyExt = false) {
298   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
299   MachineInstr *MI;
300 
301   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
302          LookThroughInstrs) {
303     switch (MI->getOpcode()) {
304     case TargetOpcode::G_ANYEXT:
305       if (!LookThroughAnyExt)
306         return None;
307       LLVM_FALLTHROUGH;
308     case TargetOpcode::G_TRUNC:
309     case TargetOpcode::G_SEXT:
310     case TargetOpcode::G_ZEXT:
311       SeenOpcodes.push_back(std::make_pair(
312           MI->getOpcode(),
313           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
314       VReg = MI->getOperand(1).getReg();
315       break;
316     case TargetOpcode::COPY:
317       VReg = MI->getOperand(1).getReg();
318       if (Register::isPhysicalRegister(VReg))
319         return None;
320       break;
321     case TargetOpcode::G_INTTOPTR:
322       VReg = MI->getOperand(1).getReg();
323       break;
324     default:
325       return None;
326     }
327   }
328   if (!MI || !IsConstantOpcode(MI))
329     return None;
330 
331   Optional<APInt> MaybeVal = getAPCstValue(MI);
332   if (!MaybeVal)
333     return None;
334   APInt &Val = *MaybeVal;
335   while (!SeenOpcodes.empty()) {
336     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
337     switch (OpcodeAndSize.first) {
338     case TargetOpcode::G_TRUNC:
339       Val = Val.trunc(OpcodeAndSize.second);
340       break;
341     case TargetOpcode::G_ANYEXT:
342     case TargetOpcode::G_SEXT:
343       Val = Val.sext(OpcodeAndSize.second);
344       break;
345     case TargetOpcode::G_ZEXT:
346       Val = Val.zext(OpcodeAndSize.second);
347       break;
348     }
349   }
350 
351   return ValueAndVReg{Val, VReg};
352 }
353 
354 bool isIConstant(const MachineInstr *MI) {
355   if (!MI)
356     return false;
357   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
358 }
359 
360 bool isFConstant(const MachineInstr *MI) {
361   if (!MI)
362     return false;
363   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
364 }
365 
366 bool isAnyConstant(const MachineInstr *MI) {
367   if (!MI)
368     return false;
369   unsigned Opc = MI->getOpcode();
370   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
371 }
372 
373 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
374   const MachineOperand &CstVal = MI->getOperand(1);
375   if (CstVal.isCImm())
376     return CstVal.getCImm()->getValue();
377   return None;
378 }
379 
380 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
381   const MachineOperand &CstVal = MI->getOperand(1);
382   if (CstVal.isCImm())
383     return CstVal.getCImm()->getValue();
384   if (CstVal.isFPImm())
385     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
386   return None;
387 }
388 
389 } // end anonymous namespace
390 
391 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
392     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
393   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
394                                            getCImmAsAPInt, LookThroughInstrs);
395 }
396 
397 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
398     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
399     bool LookThroughAnyExt) {
400   return getConstantVRegValWithLookThrough(
401       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
402       LookThroughAnyExt);
403 }
404 
405 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
406     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
407   auto Reg = getConstantVRegValWithLookThrough(
408       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
409   if (!Reg)
410     return None;
411   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
412                         Reg->VReg};
413 }
414 
415 const ConstantFP *
416 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
417   MachineInstr *MI = MRI.getVRegDef(VReg);
418   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
419     return nullptr;
420   return MI->getOperand(1).getFPImm();
421 }
422 
423 Optional<DefinitionAndSourceRegister>
424 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
425   Register DefSrcReg = Reg;
426   auto *DefMI = MRI.getVRegDef(Reg);
427   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
428   if (!DstTy.isValid())
429     return None;
430   unsigned Opc = DefMI->getOpcode();
431   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
432     Register SrcReg = DefMI->getOperand(1).getReg();
433     auto SrcTy = MRI.getType(SrcReg);
434     if (!SrcTy.isValid())
435       break;
436     DefMI = MRI.getVRegDef(SrcReg);
437     DefSrcReg = SrcReg;
438     Opc = DefMI->getOpcode();
439   }
440   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
441 }
442 
443 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
444                                          const MachineRegisterInfo &MRI) {
445   Optional<DefinitionAndSourceRegister> DefSrcReg =
446       getDefSrcRegIgnoringCopies(Reg, MRI);
447   return DefSrcReg ? DefSrcReg->MI : nullptr;
448 }
449 
450 Register llvm::getSrcRegIgnoringCopies(Register Reg,
451                                        const MachineRegisterInfo &MRI) {
452   Optional<DefinitionAndSourceRegister> DefSrcReg =
453       getDefSrcRegIgnoringCopies(Reg, MRI);
454   return DefSrcReg ? DefSrcReg->Reg : Register();
455 }
456 
457 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
458                                  const MachineRegisterInfo &MRI) {
459   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
460   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
461 }
462 
463 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
464   if (Size == 32)
465     return APFloat(float(Val));
466   if (Size == 64)
467     return APFloat(Val);
468   if (Size != 16)
469     llvm_unreachable("Unsupported FPConstant size");
470   bool Ignored;
471   APFloat APF(Val);
472   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
473   return APF;
474 }
475 
476 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
477                                         const Register Op2,
478                                         const MachineRegisterInfo &MRI) {
479   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
480   if (!MaybeOp2Cst)
481     return None;
482 
483   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
484   if (!MaybeOp1Cst)
485     return None;
486 
487   const APInt &C1 = MaybeOp1Cst->Value;
488   const APInt &C2 = MaybeOp2Cst->Value;
489   switch (Opcode) {
490   default:
491     break;
492   case TargetOpcode::G_ADD:
493     return C1 + C2;
494   case TargetOpcode::G_AND:
495     return C1 & C2;
496   case TargetOpcode::G_ASHR:
497     return C1.ashr(C2);
498   case TargetOpcode::G_LSHR:
499     return C1.lshr(C2);
500   case TargetOpcode::G_MUL:
501     return C1 * C2;
502   case TargetOpcode::G_OR:
503     return C1 | C2;
504   case TargetOpcode::G_SHL:
505     return C1 << C2;
506   case TargetOpcode::G_SUB:
507     return C1 - C2;
508   case TargetOpcode::G_XOR:
509     return C1 ^ C2;
510   case TargetOpcode::G_UDIV:
511     if (!C2.getBoolValue())
512       break;
513     return C1.udiv(C2);
514   case TargetOpcode::G_SDIV:
515     if (!C2.getBoolValue())
516       break;
517     return C1.sdiv(C2);
518   case TargetOpcode::G_UREM:
519     if (!C2.getBoolValue())
520       break;
521     return C1.urem(C2);
522   case TargetOpcode::G_SREM:
523     if (!C2.getBoolValue())
524       break;
525     return C1.srem(C2);
526   }
527 
528   return None;
529 }
530 
531 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
532                                             const Register Op2,
533                                             const MachineRegisterInfo &MRI) {
534   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
535   if (!Op2Cst)
536     return None;
537 
538   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
539   if (!Op1Cst)
540     return None;
541 
542   APFloat C1 = Op1Cst->getValueAPF();
543   const APFloat &C2 = Op2Cst->getValueAPF();
544   switch (Opcode) {
545   case TargetOpcode::G_FADD:
546     C1.add(C2, APFloat::rmNearestTiesToEven);
547     return C1;
548   case TargetOpcode::G_FSUB:
549     C1.subtract(C2, APFloat::rmNearestTiesToEven);
550     return C1;
551   case TargetOpcode::G_FMUL:
552     C1.multiply(C2, APFloat::rmNearestTiesToEven);
553     return C1;
554   case TargetOpcode::G_FDIV:
555     C1.divide(C2, APFloat::rmNearestTiesToEven);
556     return C1;
557   case TargetOpcode::G_FREM:
558     C1.mod(C2);
559     return C1;
560   case TargetOpcode::G_FCOPYSIGN:
561     C1.copySign(C2);
562     return C1;
563   case TargetOpcode::G_FMINNUM:
564     return minnum(C1, C2);
565   case TargetOpcode::G_FMAXNUM:
566     return maxnum(C1, C2);
567   case TargetOpcode::G_FMINIMUM:
568     return minimum(C1, C2);
569   case TargetOpcode::G_FMAXIMUM:
570     return maximum(C1, C2);
571   case TargetOpcode::G_FMINNUM_IEEE:
572   case TargetOpcode::G_FMAXNUM_IEEE:
573     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
574     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
575     // and currently there isn't a nice wrapper in APFloat for the version with
576     // correct snan handling.
577     break;
578   default:
579     break;
580   }
581 
582   return None;
583 }
584 
585 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
586                            bool SNaN) {
587   const MachineInstr *DefMI = MRI.getVRegDef(Val);
588   if (!DefMI)
589     return false;
590 
591   const TargetMachine& TM = DefMI->getMF()->getTarget();
592   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
593     return true;
594 
595   // If the value is a constant, we can obviously see if it is a NaN or not.
596   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
597     return !FPVal->getValueAPF().isNaN() ||
598            (SNaN && !FPVal->getValueAPF().isSignaling());
599   }
600 
601   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
602     for (const auto &Op : DefMI->uses())
603       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
604         return false;
605     return true;
606   }
607 
608   switch (DefMI->getOpcode()) {
609   default:
610     break;
611   case TargetOpcode::G_FMINNUM_IEEE:
612   case TargetOpcode::G_FMAXNUM_IEEE: {
613     if (SNaN)
614       return true;
615     // This can return a NaN if either operand is an sNaN, or if both operands
616     // are NaN.
617     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
618             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
619            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
620             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
621   }
622   case TargetOpcode::G_FMINNUM:
623   case TargetOpcode::G_FMAXNUM: {
624     // Only one needs to be known not-nan, since it will be returned if the
625     // other ends up being one.
626     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
627            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
628   }
629   }
630 
631   if (SNaN) {
632     // FP operations quiet. For now, just handle the ones inserted during
633     // legalization.
634     switch (DefMI->getOpcode()) {
635     case TargetOpcode::G_FPEXT:
636     case TargetOpcode::G_FPTRUNC:
637     case TargetOpcode::G_FCANONICALIZE:
638       return true;
639     default:
640       return false;
641     }
642   }
643 
644   return false;
645 }
646 
647 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
648                                   const MachinePointerInfo &MPO) {
649   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
650   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
651     MachineFrameInfo &MFI = MF.getFrameInfo();
652     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
653                            MPO.Offset);
654   }
655 
656   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
657     const Module *M = MF.getFunction().getParent();
658     return V->getPointerAlignment(M->getDataLayout());
659   }
660 
661   return Align(1);
662 }
663 
664 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
665                                         const TargetInstrInfo &TII,
666                                         MCRegister PhysReg,
667                                         const TargetRegisterClass &RC,
668                                         LLT RegTy) {
669   DebugLoc DL; // FIXME: Is no location the right choice?
670   MachineBasicBlock &EntryMBB = MF.front();
671   MachineRegisterInfo &MRI = MF.getRegInfo();
672   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
673   if (LiveIn) {
674     MachineInstr *Def = MRI.getVRegDef(LiveIn);
675     if (Def) {
676       // FIXME: Should the verifier check this is in the entry block?
677       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
678       return LiveIn;
679     }
680 
681     // It's possible the incoming argument register and copy was added during
682     // lowering, but later deleted due to being/becoming dead. If this happens,
683     // re-insert the copy.
684   } else {
685     // The live in register was not present, so add it.
686     LiveIn = MF.addLiveIn(PhysReg, &RC);
687     if (RegTy.isValid())
688       MRI.setType(LiveIn, RegTy);
689   }
690 
691   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
692     .addReg(PhysReg);
693   if (!EntryMBB.isLiveIn(PhysReg))
694     EntryMBB.addLiveIn(PhysReg);
695   return LiveIn;
696 }
697 
698 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
699                                         uint64_t Imm,
700                                         const MachineRegisterInfo &MRI) {
701   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
702   if (MaybeOp1Cst) {
703     switch (Opcode) {
704     default:
705       break;
706     case TargetOpcode::G_SEXT_INREG: {
707       LLT Ty = MRI.getType(Op1);
708       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
709     }
710     }
711   }
712   return None;
713 }
714 
715 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
716                                                Register Src,
717                                                const MachineRegisterInfo &MRI) {
718   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
719   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
720     APFloat DstVal(getFltSemanticForLLT(DstTy));
721     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
722                             APFloat::rmNearestTiesToEven);
723     return DstVal;
724   }
725   return None;
726 }
727 
728 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
729                                   GISelKnownBits *KB) {
730   Optional<DefinitionAndSourceRegister> DefSrcReg =
731       getDefSrcRegIgnoringCopies(Reg, MRI);
732   if (!DefSrcReg)
733     return false;
734 
735   const MachineInstr &MI = *DefSrcReg->MI;
736   const LLT Ty = MRI.getType(Reg);
737 
738   switch (MI.getOpcode()) {
739   case TargetOpcode::G_CONSTANT: {
740     unsigned BitWidth = Ty.getScalarSizeInBits();
741     const ConstantInt *CI = MI.getOperand(1).getCImm();
742     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
743   }
744   case TargetOpcode::G_SHL: {
745     // A left-shift of a constant one will have exactly one bit set because
746     // shifting the bit off the end is undefined.
747 
748     // TODO: Constant splat
749     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
750       if (*ConstLHS == 1)
751         return true;
752     }
753 
754     break;
755   }
756   case TargetOpcode::G_LSHR: {
757     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
758       if (ConstLHS->isSignMask())
759         return true;
760     }
761 
762     break;
763   }
764   case TargetOpcode::G_BUILD_VECTOR: {
765     // TODO: Probably should have a recursion depth guard since you could have
766     // bitcasted vector elements.
767     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
768       if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
769         return false;
770     }
771 
772     return true;
773   }
774   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
775     // Only handle constants since we would need to know if number of leading
776     // zeros is greater than the truncation amount.
777     const unsigned BitWidth = Ty.getScalarSizeInBits();
778     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
779       auto Const = getIConstantVRegVal(MI.getOperand(I).getReg(), MRI);
780       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
781         return false;
782     }
783 
784     return true;
785   }
786   default:
787     break;
788   }
789 
790   if (!KB)
791     return false;
792 
793   // More could be done here, though the above checks are enough
794   // to handle some common cases.
795 
796   // Fall back to computeKnownBits to catch other known cases.
797   KnownBits Known = KB->getKnownBits(Reg);
798   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
799 }
800 
801 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
802   AU.addPreserved<StackProtector>();
803 }
804 
805 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
806   unsigned Mul = OrigSize * TargetSize;
807   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
808   return Mul / GCDSize;
809 }
810 
811 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
812   const unsigned OrigSize = OrigTy.getSizeInBits();
813   const unsigned TargetSize = TargetTy.getSizeInBits();
814 
815   if (OrigSize == TargetSize)
816     return OrigTy;
817 
818   if (OrigTy.isVector()) {
819     const LLT OrigElt = OrigTy.getElementType();
820 
821     if (TargetTy.isVector()) {
822       const LLT TargetElt = TargetTy.getElementType();
823 
824       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
825         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
826                                             TargetTy.getNumElements());
827         // Prefer the original element type.
828         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
829         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
830                            OrigTy.getElementType());
831       }
832     } else {
833       if (OrigElt.getSizeInBits() == TargetSize)
834         return OrigTy;
835     }
836 
837     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
838     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
839   }
840 
841   if (TargetTy.isVector()) {
842     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
843     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
844   }
845 
846   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
847 
848   // Preserve pointer types.
849   if (LCMSize == OrigSize)
850     return OrigTy;
851   if (LCMSize == TargetSize)
852     return TargetTy;
853 
854   return LLT::scalar(LCMSize);
855 }
856 
857 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
858   const unsigned OrigSize = OrigTy.getSizeInBits();
859   const unsigned TargetSize = TargetTy.getSizeInBits();
860 
861   if (OrigSize == TargetSize)
862     return OrigTy;
863 
864   if (OrigTy.isVector()) {
865     LLT OrigElt = OrigTy.getElementType();
866     if (TargetTy.isVector()) {
867       LLT TargetElt = TargetTy.getElementType();
868       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
869         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
870                                         TargetTy.getNumElements());
871         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
872       }
873     } else {
874       // If the source is a vector of pointers, return a pointer element.
875       if (OrigElt.getSizeInBits() == TargetSize)
876         return OrigElt;
877     }
878 
879     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
880     if (GCD == OrigElt.getSizeInBits())
881       return OrigElt;
882 
883     // If we can't produce the original element type, we have to use a smaller
884     // scalar.
885     if (GCD < OrigElt.getSizeInBits())
886       return LLT::scalar(GCD);
887     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
888   }
889 
890   if (TargetTy.isVector()) {
891     // Try to preserve the original element type.
892     LLT TargetElt = TargetTy.getElementType();
893     if (TargetElt.getSizeInBits() == OrigSize)
894       return OrigTy;
895   }
896 
897   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
898   return LLT::scalar(GCD);
899 }
900 
901 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
902   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
903          "Only G_SHUFFLE_VECTOR can have a splat index!");
904   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
905   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
906 
907   // If all elements are undefined, this shuffle can be considered a splat.
908   // Return 0 for better potential for callers to simplify.
909   if (FirstDefinedIdx == Mask.end())
910     return 0;
911 
912   // Make sure all remaining elements are either undef or the same
913   // as the first non-undef value.
914   int SplatValue = *FirstDefinedIdx;
915   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
916              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
917     return None;
918 
919   return SplatValue;
920 }
921 
922 static bool isBuildVectorOp(unsigned Opcode) {
923   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
924          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
925 }
926 
927 // TODO: Handle mixed undef elements.
928 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
929                                        const MachineRegisterInfo &MRI,
930                                        int64_t SplatValue) {
931   if (!isBuildVectorOp(MI.getOpcode()))
932     return false;
933 
934   const unsigned NumOps = MI.getNumOperands();
935   for (unsigned I = 1; I != NumOps; ++I) {
936     Register Element = MI.getOperand(I).getReg();
937     if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
938       return false;
939   }
940 
941   return true;
942 }
943 
944 Optional<int64_t>
945 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
946                                   const MachineRegisterInfo &MRI) {
947   if (!isBuildVectorOp(MI.getOpcode()))
948     return None;
949 
950   const unsigned NumOps = MI.getNumOperands();
951   Optional<int64_t> Scalar;
952   for (unsigned I = 1; I != NumOps; ++I) {
953     Register Element = MI.getOperand(I).getReg();
954     int64_t ElementValue;
955     if (!mi_match(Element, MRI, m_ICst(ElementValue)))
956       return None;
957     if (!Scalar)
958       Scalar = ElementValue;
959     else if (*Scalar != ElementValue)
960       return None;
961   }
962 
963   return Scalar;
964 }
965 
966 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
967                                  const MachineRegisterInfo &MRI) {
968   return isBuildVectorConstantSplat(MI, MRI, 0);
969 }
970 
971 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
972                                 const MachineRegisterInfo &MRI) {
973   return isBuildVectorConstantSplat(MI, MRI, -1);
974 }
975 
976 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
977                                              const MachineRegisterInfo &MRI) {
978   unsigned Opc = MI.getOpcode();
979   if (!isBuildVectorOp(Opc))
980     return None;
981   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
982     return RegOrConstant(*Splat);
983   auto Reg = MI.getOperand(1).getReg();
984   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
985              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
986     return None;
987   return RegOrConstant(Reg);
988 }
989 
990 bool llvm::matchUnaryPredicate(
991     const MachineRegisterInfo &MRI, Register Reg,
992     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
993 
994   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
995   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
996     return Match(nullptr);
997 
998   // TODO: Also handle fconstant
999   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1000     return Match(Def->getOperand(1).getCImm());
1001 
1002   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1003     return false;
1004 
1005   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1006     Register SrcElt = Def->getOperand(I).getReg();
1007     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1008     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1009       if (!Match(nullptr))
1010         return false;
1011       continue;
1012     }
1013 
1014     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1015         !Match(SrcDef->getOperand(1).getCImm()))
1016       return false;
1017   }
1018 
1019   return true;
1020 }
1021 
1022 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1023                           bool IsFP) {
1024   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1025   case TargetLowering::UndefinedBooleanContent:
1026     return Val & 0x1;
1027   case TargetLowering::ZeroOrOneBooleanContent:
1028     return Val == 1;
1029   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1030     return Val == -1;
1031   }
1032   llvm_unreachable("Invalid boolean contents");
1033 }
1034 
1035 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1036                              bool IsFP) {
1037   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1038   case TargetLowering::UndefinedBooleanContent:
1039   case TargetLowering::ZeroOrOneBooleanContent:
1040     return 1;
1041   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1042     return -1;
1043   }
1044   llvm_unreachable("Invalid boolean contents");
1045 }
1046 
1047 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1048                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1049   const auto &F = MBB.getParent()->getFunction();
1050   return F.hasOptSize() || F.hasMinSize() ||
1051          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1052 }
1053