xref: /llvm-project/llvm/lib/IR/Instructions.cpp (revision 29441e4f5fa5f5c7709f7cf180815ba97f611297)
1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/ConstantRange.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/ProfDataUtils.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CheckedArithmetic.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/KnownBits.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/ModRef.h"
46 #include "llvm/Support/TypeSize.h"
47 #include <algorithm>
48 #include <cassert>
49 #include <cstdint>
50 #include <optional>
51 #include <vector>
52 
53 using namespace llvm;
54 
55 static cl::opt<bool> DisableI2pP2iOpt(
56     "disable-i2p-p2i-opt", cl::init(false),
57     cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
58 
59 //===----------------------------------------------------------------------===//
60 //                            AllocaInst Class
61 //===----------------------------------------------------------------------===//
62 
63 std::optional<TypeSize>
64 AllocaInst::getAllocationSize(const DataLayout &DL) const {
65   TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
66   if (isArrayAllocation()) {
67     auto *C = dyn_cast<ConstantInt>(getArraySize());
68     if (!C)
69       return std::nullopt;
70     assert(!Size.isScalable() && "Array elements cannot have a scalable size");
71     auto CheckedProd =
72         checkedMulUnsigned(Size.getKnownMinValue(), C->getZExtValue());
73     if (!CheckedProd)
74       return std::nullopt;
75     return TypeSize::getFixed(*CheckedProd);
76   }
77   return Size;
78 }
79 
80 std::optional<TypeSize>
81 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
82   std::optional<TypeSize> Size = getAllocationSize(DL);
83   if (!Size)
84     return std::nullopt;
85   auto CheckedProd = checkedMulUnsigned(Size->getKnownMinValue(),
86                                         static_cast<TypeSize::ScalarTy>(8));
87   if (!CheckedProd)
88     return std::nullopt;
89   return TypeSize::get(*CheckedProd, Size->isScalable());
90 }
91 
92 //===----------------------------------------------------------------------===//
93 //                              SelectInst Class
94 //===----------------------------------------------------------------------===//
95 
96 /// areInvalidOperands - Return a string if the specified operands are invalid
97 /// for a select operation, otherwise return null.
98 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
99   if (Op1->getType() != Op2->getType())
100     return "both values to select must have same type";
101 
102   if (Op1->getType()->isTokenTy())
103     return "select values cannot have token type";
104 
105   if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
106     // Vector select.
107     if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
108       return "vector select condition element type must be i1";
109     VectorType *ET = dyn_cast<VectorType>(Op1->getType());
110     if (!ET)
111       return "selected values for vector select must be vectors";
112     if (ET->getElementCount() != VT->getElementCount())
113       return "vector select requires selected vectors to have "
114                    "the same vector length as select condition";
115   } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
116     return "select condition must be i1 or <n x i1>";
117   }
118   return nullptr;
119 }
120 
121 //===----------------------------------------------------------------------===//
122 //                               PHINode Class
123 //===----------------------------------------------------------------------===//
124 
125 PHINode::PHINode(const PHINode &PN)
126     : Instruction(PN.getType(), Instruction::PHI, AllocMarker),
127       ReservedSpace(PN.getNumOperands()) {
128   NumUserOperands = PN.getNumOperands();
129   allocHungoffUses(PN.getNumOperands());
130   std::copy(PN.op_begin(), PN.op_end(), op_begin());
131   copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
132   SubclassOptionalData = PN.SubclassOptionalData;
133 }
134 
135 // removeIncomingValue - Remove an incoming value.  This is useful if a
136 // predecessor basic block is deleted.
137 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
138   Value *Removed = getIncomingValue(Idx);
139 
140   // Move everything after this operand down.
141   //
142   // FIXME: we could just swap with the end of the list, then erase.  However,
143   // clients might not expect this to happen.  The code as it is thrashes the
144   // use/def lists, which is kinda lame.
145   std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
146   copyIncomingBlocks(drop_begin(blocks(), Idx + 1), Idx);
147 
148   // Nuke the last value.
149   Op<-1>().set(nullptr);
150   setNumHungOffUseOperands(getNumOperands() - 1);
151 
152   // If the PHI node is dead, because it has zero entries, nuke it now.
153   if (getNumOperands() == 0 && DeletePHIIfEmpty) {
154     // If anyone is using this PHI, make them use a dummy value instead...
155     replaceAllUsesWith(PoisonValue::get(getType()));
156     eraseFromParent();
157   }
158   return Removed;
159 }
160 
161 void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
162                                     bool DeletePHIIfEmpty) {
163   SmallDenseSet<unsigned> RemoveIndices;
164   for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
165     if (Predicate(Idx))
166       RemoveIndices.insert(Idx);
167 
168   if (RemoveIndices.empty())
169     return;
170 
171   // Remove operands.
172   auto NewOpEnd = remove_if(operands(), [&](Use &U) {
173     return RemoveIndices.contains(U.getOperandNo());
174   });
175   for (Use &U : make_range(NewOpEnd, op_end()))
176     U.set(nullptr);
177 
178   // Remove incoming blocks.
179   (void)std::remove_if(const_cast<block_iterator>(block_begin()),
180                  const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
181                    return RemoveIndices.contains(&BB - block_begin());
182                  });
183 
184   setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
185 
186   // If the PHI node is dead, because it has zero entries, nuke it now.
187   if (getNumOperands() == 0 && DeletePHIIfEmpty) {
188     // If anyone is using this PHI, make them use a dummy value instead...
189     replaceAllUsesWith(PoisonValue::get(getType()));
190     eraseFromParent();
191   }
192 }
193 
194 /// growOperands - grow operands - This grows the operand list in response
195 /// to a push_back style of operation.  This grows the number of ops by 1.5
196 /// times.
197 ///
198 void PHINode::growOperands() {
199   unsigned e = getNumOperands();
200   unsigned NumOps = e + e / 2;
201   if (NumOps < 2) NumOps = 2;      // 2 op PHI nodes are VERY common.
202 
203   ReservedSpace = NumOps;
204   growHungoffUses(ReservedSpace, /* IsPhi */ true);
205 }
206 
207 /// hasConstantValue - If the specified PHI node always merges together the same
208 /// value, return the value, otherwise return null.
209 Value *PHINode::hasConstantValue() const {
210   // Exploit the fact that phi nodes always have at least one entry.
211   Value *ConstantValue = getIncomingValue(0);
212   for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
213     if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
214       if (ConstantValue != this)
215         return nullptr; // Incoming values not all the same.
216        // The case where the first value is this PHI.
217       ConstantValue = getIncomingValue(i);
218     }
219   if (ConstantValue == this)
220     return PoisonValue::get(getType());
221   return ConstantValue;
222 }
223 
224 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
225 /// together the same value, assuming that undefs result in the same value as
226 /// non-undefs.
227 /// Unlike \ref hasConstantValue, this does not return a value because the
228 /// unique non-undef incoming value need not dominate the PHI node.
229 bool PHINode::hasConstantOrUndefValue() const {
230   Value *ConstantValue = nullptr;
231   for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
232     Value *Incoming = getIncomingValue(i);
233     if (Incoming != this && !isa<UndefValue>(Incoming)) {
234       if (ConstantValue && ConstantValue != Incoming)
235         return false;
236       ConstantValue = Incoming;
237     }
238   }
239   return true;
240 }
241 
242 //===----------------------------------------------------------------------===//
243 //                       LandingPadInst Implementation
244 //===----------------------------------------------------------------------===//
245 
246 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
247                                const Twine &NameStr,
248                                InsertPosition InsertBefore)
249     : Instruction(RetTy, Instruction::LandingPad, AllocMarker, InsertBefore) {
250   init(NumReservedValues, NameStr);
251 }
252 
253 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254     : Instruction(LP.getType(), Instruction::LandingPad, AllocMarker),
255       ReservedSpace(LP.getNumOperands()) {
256   NumUserOperands = LP.getNumOperands();
257   allocHungoffUses(LP.getNumOperands());
258   Use *OL = getOperandList();
259   const Use *InOL = LP.getOperandList();
260   for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261     OL[I] = InOL[I];
262 
263   setCleanup(LP.isCleanup());
264 }
265 
266 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267                                        const Twine &NameStr,
268                                        InsertPosition InsertBefore) {
269   return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270 }
271 
272 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
273   ReservedSpace = NumReservedValues;
274   setNumHungOffUseOperands(0);
275   allocHungoffUses(ReservedSpace);
276   setName(NameStr);
277   setCleanup(false);
278 }
279 
280 /// growOperands - grow operands - This grows the operand list in response to a
281 /// push_back style of operation. This grows the number of ops by 2 times.
282 void LandingPadInst::growOperands(unsigned Size) {
283   unsigned e = getNumOperands();
284   if (ReservedSpace >= e + Size) return;
285   ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
286   growHungoffUses(ReservedSpace);
287 }
288 
289 void LandingPadInst::addClause(Constant *Val) {
290   unsigned OpNo = getNumOperands();
291   growOperands(1);
292   assert(OpNo < ReservedSpace && "Growing didn't work!");
293   setNumHungOffUseOperands(getNumOperands() + 1);
294   getOperandList()[OpNo] = Val;
295 }
296 
297 //===----------------------------------------------------------------------===//
298 //                        CallBase Implementation
299 //===----------------------------------------------------------------------===//
300 
301 CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
302                            InsertPosition InsertPt) {
303   switch (CB->getOpcode()) {
304   case Instruction::Call:
305     return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
306   case Instruction::Invoke:
307     return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
308   case Instruction::CallBr:
309     return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
310   default:
311     llvm_unreachable("Unknown CallBase sub-class!");
312   }
313 }
314 
315 CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
316                            InsertPosition InsertPt) {
317   SmallVector<OperandBundleDef, 2> OpDefs;
318   for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
319     auto ChildOB = CI->getOperandBundleAt(i);
320     if (ChildOB.getTagName() != OpB.getTag())
321       OpDefs.emplace_back(ChildOB);
322   }
323   OpDefs.emplace_back(OpB);
324   return CallBase::Create(CI, OpDefs, InsertPt);
325 }
326 
327 Function *CallBase::getCaller() { return getParent()->getParent(); }
328 
329 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
330   assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
331   return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
332 }
333 
334 bool CallBase::isIndirectCall() const {
335   const Value *V = getCalledOperand();
336   if (isa<Function>(V) || isa<Constant>(V))
337     return false;
338   return !isInlineAsm();
339 }
340 
341 /// Tests if this call site must be tail call optimized. Only a CallInst can
342 /// be tail call optimized.
343 bool CallBase::isMustTailCall() const {
344   if (auto *CI = dyn_cast<CallInst>(this))
345     return CI->isMustTailCall();
346   return false;
347 }
348 
349 /// Tests if this call site is marked as a tail call.
350 bool CallBase::isTailCall() const {
351   if (auto *CI = dyn_cast<CallInst>(this))
352     return CI->isTailCall();
353   return false;
354 }
355 
356 Intrinsic::ID CallBase::getIntrinsicID() const {
357   if (auto *F = getCalledFunction())
358     return F->getIntrinsicID();
359   return Intrinsic::not_intrinsic;
360 }
361 
362 FPClassTest CallBase::getRetNoFPClass() const {
363   FPClassTest Mask = Attrs.getRetNoFPClass();
364 
365   if (const Function *F = getCalledFunction())
366     Mask |= F->getAttributes().getRetNoFPClass();
367   return Mask;
368 }
369 
370 FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
371   FPClassTest Mask = Attrs.getParamNoFPClass(i);
372 
373   if (const Function *F = getCalledFunction())
374     Mask |= F->getAttributes().getParamNoFPClass(i);
375   return Mask;
376 }
377 
378 std::optional<ConstantRange> CallBase::getRange() const {
379   const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
380   if (RangeAttr.isValid())
381     return RangeAttr.getRange();
382   return std::nullopt;
383 }
384 
385 bool CallBase::isReturnNonNull() const {
386   if (hasRetAttr(Attribute::NonNull))
387     return true;
388 
389   if (getRetDereferenceableBytes() > 0 &&
390       !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
391     return true;
392 
393   return false;
394 }
395 
396 Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
397   unsigned Index;
398 
399   if (Attrs.hasAttrSomewhere(Kind, &Index))
400     return getArgOperand(Index - AttributeList::FirstArgIndex);
401   if (const Function *F = getCalledFunction())
402     if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
403       return getArgOperand(Index - AttributeList::FirstArgIndex);
404 
405   return nullptr;
406 }
407 
408 /// Determine whether the argument or parameter has the given attribute.
409 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
410   assert(ArgNo < arg_size() && "Param index out of bounds!");
411 
412   if (Attrs.hasParamAttr(ArgNo, Kind))
413     return true;
414 
415   const Function *F = getCalledFunction();
416   if (!F)
417     return false;
418 
419   if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
420     return false;
421 
422   // Take into account mod/ref by operand bundles.
423   switch (Kind) {
424   case Attribute::ReadNone:
425     return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
426   case Attribute::ReadOnly:
427     return !hasClobberingOperandBundles();
428   case Attribute::WriteOnly:
429     return !hasReadingOperandBundles();
430   default:
431     return true;
432   }
433 }
434 
435 bool CallBase::paramHasNonNullAttr(unsigned ArgNo,
436                                    bool AllowUndefOrPoison) const {
437   assert(getArgOperand(ArgNo)->getType()->isPointerTy() &&
438          "Argument must be a pointer");
439   if (paramHasAttr(ArgNo, Attribute::NonNull) &&
440       (AllowUndefOrPoison || paramHasAttr(ArgNo, Attribute::NoUndef)))
441     return true;
442 
443   if (getParamDereferenceableBytes(ArgNo) > 0 &&
444       !NullPointerIsDefined(
445           getCaller(),
446           getArgOperand(ArgNo)->getType()->getPointerAddressSpace()))
447     return true;
448 
449   return false;
450 }
451 
452 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
453   if (auto *F = dyn_cast<Function>(getCalledOperand()))
454     return F->getAttributes().hasFnAttr(Kind);
455 
456   return false;
457 }
458 
459 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
460   if (auto *F = dyn_cast<Function>(getCalledOperand()))
461     return F->getAttributes().hasFnAttr(Kind);
462 
463   return false;
464 }
465 
466 template <typename AK>
467 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
468   if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
469     // getMemoryEffects() correctly combines memory effects from the call-site,
470     // operand bundles and function.
471     assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
472   }
473 
474   if (auto *F = dyn_cast<Function>(getCalledOperand()))
475     return F->getAttributes().getFnAttr(Kind);
476 
477   return Attribute();
478 }
479 
480 template Attribute
481 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
482 template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
483 
484 template <typename AK>
485 Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
486                                                  AK Kind) const {
487   Value *V = getCalledOperand();
488 
489   if (auto *F = dyn_cast<Function>(V))
490     return F->getAttributes().getParamAttr(ArgNo, Kind);
491 
492   return Attribute();
493 }
494 template Attribute
495 CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
496                                        Attribute::AttrKind Kind) const;
497 template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
498                                                           StringRef Kind) const;
499 
500 void CallBase::getOperandBundlesAsDefs(
501     SmallVectorImpl<OperandBundleDef> &Defs) const {
502   for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
503     Defs.emplace_back(getOperandBundleAt(i));
504 }
505 
506 CallBase::op_iterator
507 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
508                                      const unsigned BeginIndex) {
509   auto It = op_begin() + BeginIndex;
510   for (auto &B : Bundles)
511     It = std::copy(B.input_begin(), B.input_end(), It);
512 
513   auto *ContextImpl = getContext().pImpl;
514   auto BI = Bundles.begin();
515   unsigned CurrentIndex = BeginIndex;
516 
517   for (auto &BOI : bundle_op_infos()) {
518     assert(BI != Bundles.end() && "Incorrect allocation?");
519 
520     BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
521     BOI.Begin = CurrentIndex;
522     BOI.End = CurrentIndex + BI->input_size();
523     CurrentIndex = BOI.End;
524     BI++;
525   }
526 
527   assert(BI == Bundles.end() && "Incorrect allocation?");
528 
529   return It;
530 }
531 
532 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
533   /// When there isn't many bundles, we do a simple linear search.
534   /// Else fallback to a binary-search that use the fact that bundles usually
535   /// have similar number of argument to get faster convergence.
536   if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
537     for (auto &BOI : bundle_op_infos())
538       if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
539         return BOI;
540 
541     llvm_unreachable("Did not find operand bundle for operand!");
542   }
543 
544   assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
545   assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
546          OpIdx < std::prev(bundle_op_info_end())->End &&
547          "The Idx isn't in the operand bundle");
548 
549   /// We need a decimal number below and to prevent using floating point numbers
550   /// we use an intergal value multiplied by this constant.
551   constexpr unsigned NumberScaling = 1024;
552 
553   bundle_op_iterator Begin = bundle_op_info_begin();
554   bundle_op_iterator End = bundle_op_info_end();
555   bundle_op_iterator Current = Begin;
556 
557   while (Begin != End) {
558     unsigned ScaledOperandPerBundle =
559         NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
560     Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
561                        ScaledOperandPerBundle);
562     if (Current >= End)
563       Current = std::prev(End);
564     assert(Current < End && Current >= Begin &&
565            "the operand bundle doesn't cover every value in the range");
566     if (OpIdx >= Current->Begin && OpIdx < Current->End)
567       break;
568     if (OpIdx >= Current->End)
569       Begin = Current + 1;
570     else
571       End = Current;
572   }
573 
574   assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
575          "the operand bundle doesn't cover every value in the range");
576   return *Current;
577 }
578 
579 CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
580                                      OperandBundleDef OB,
581                                      InsertPosition InsertPt) {
582   if (CB->getOperandBundle(ID))
583     return CB;
584 
585   SmallVector<OperandBundleDef, 1> Bundles;
586   CB->getOperandBundlesAsDefs(Bundles);
587   Bundles.push_back(OB);
588   return Create(CB, Bundles, InsertPt);
589 }
590 
591 CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
592                                         InsertPosition InsertPt) {
593   SmallVector<OperandBundleDef, 1> Bundles;
594   bool CreateNew = false;
595 
596   for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
597     auto Bundle = CB->getOperandBundleAt(I);
598     if (Bundle.getTagID() == ID) {
599       CreateNew = true;
600       continue;
601     }
602     Bundles.emplace_back(Bundle);
603   }
604 
605   return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
606 }
607 
608 bool CallBase::hasReadingOperandBundles() const {
609   // Implementation note: this is a conservative implementation of operand
610   // bundle semantics, where *any* non-assume operand bundle (other than
611   // ptrauth) forces a callsite to be at least readonly.
612   return hasOperandBundlesOtherThan(
613              {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
614          getIntrinsicID() != Intrinsic::assume;
615 }
616 
617 bool CallBase::hasClobberingOperandBundles() const {
618   return hasOperandBundlesOtherThan(
619              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
620               LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
621          getIntrinsicID() != Intrinsic::assume;
622 }
623 
624 MemoryEffects CallBase::getMemoryEffects() const {
625   MemoryEffects ME = getAttributes().getMemoryEffects();
626   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
627     MemoryEffects FnME = Fn->getMemoryEffects();
628     if (hasOperandBundles()) {
629       // TODO: Add a method to get memory effects for operand bundles instead.
630       if (hasReadingOperandBundles())
631         FnME |= MemoryEffects::readOnly();
632       if (hasClobberingOperandBundles())
633         FnME |= MemoryEffects::writeOnly();
634     }
635     ME &= FnME;
636   }
637   return ME;
638 }
639 void CallBase::setMemoryEffects(MemoryEffects ME) {
640   addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
641 }
642 
643 /// Determine if the function does not access memory.
644 bool CallBase::doesNotAccessMemory() const {
645   return getMemoryEffects().doesNotAccessMemory();
646 }
647 void CallBase::setDoesNotAccessMemory() {
648   setMemoryEffects(MemoryEffects::none());
649 }
650 
651 /// Determine if the function does not access or only reads memory.
652 bool CallBase::onlyReadsMemory() const {
653   return getMemoryEffects().onlyReadsMemory();
654 }
655 void CallBase::setOnlyReadsMemory() {
656   setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
657 }
658 
659 /// Determine if the function does not access or only writes memory.
660 bool CallBase::onlyWritesMemory() const {
661   return getMemoryEffects().onlyWritesMemory();
662 }
663 void CallBase::setOnlyWritesMemory() {
664   setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
665 }
666 
667 /// Determine if the call can access memmory only using pointers based
668 /// on its arguments.
669 bool CallBase::onlyAccessesArgMemory() const {
670   return getMemoryEffects().onlyAccessesArgPointees();
671 }
672 void CallBase::setOnlyAccessesArgMemory() {
673   setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
674 }
675 
676 /// Determine if the function may only access memory that is
677 ///  inaccessible from the IR.
678 bool CallBase::onlyAccessesInaccessibleMemory() const {
679   return getMemoryEffects().onlyAccessesInaccessibleMem();
680 }
681 void CallBase::setOnlyAccessesInaccessibleMemory() {
682   setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
683 }
684 
685 /// Determine if the function may only access memory that is
686 ///  either inaccessible from the IR or pointed to by its arguments.
687 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
688   return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
689 }
690 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
691   setMemoryEffects(getMemoryEffects() &
692                    MemoryEffects::inaccessibleOrArgMemOnly());
693 }
694 
695 CaptureInfo CallBase::getCaptureInfo(unsigned OpNo) const {
696   if (OpNo < arg_size()) {
697     // If the argument is passed byval, the callee does not have access to the
698     // original pointer and thus cannot capture it.
699     if (isByValArgument(OpNo))
700       return CaptureInfo::none();
701 
702     CaptureInfo CI = getParamAttributes(OpNo).getCaptureInfo();
703     if (auto *Fn = dyn_cast<Function>(getCalledOperand()))
704       CI &= Fn->getAttributes().getParamAttrs(OpNo).getCaptureInfo();
705     return CI;
706   }
707 
708   // deopt operand bundles are captures(none)
709   auto &BOI = getBundleOpInfoForOperand(OpNo);
710   auto OBU = operandBundleFromBundleOpInfo(BOI);
711   return OBU.isDeoptOperandBundle() ? CaptureInfo::none() : CaptureInfo::all();
712 }
713 
714 //===----------------------------------------------------------------------===//
715 //                        CallInst Implementation
716 //===----------------------------------------------------------------------===//
717 
718 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
719                     ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
720   this->FTy = FTy;
721   assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
722          "NumOperands not set up?");
723 
724 #ifndef NDEBUG
725   assert((Args.size() == FTy->getNumParams() ||
726           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
727          "Calling a function with bad signature!");
728 
729   for (unsigned i = 0; i != Args.size(); ++i)
730     assert((i >= FTy->getNumParams() ||
731             FTy->getParamType(i) == Args[i]->getType()) &&
732            "Calling a function with a bad signature!");
733 #endif
734 
735   // Set operands in order of their index to match use-list-order
736   // prediction.
737   llvm::copy(Args, op_begin());
738   setCalledOperand(Func);
739 
740   auto It = populateBundleOperandInfos(Bundles, Args.size());
741   (void)It;
742   assert(It + 1 == op_end() && "Should add up!");
743 
744   setName(NameStr);
745 }
746 
747 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
748   this->FTy = FTy;
749   assert(getNumOperands() == 1 && "NumOperands not set up?");
750   setCalledOperand(Func);
751 
752   assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
753 
754   setName(NameStr);
755 }
756 
757 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
758                    AllocInfo AllocInfo, InsertPosition InsertBefore)
759     : CallBase(Ty->getReturnType(), Instruction::Call, AllocInfo,
760                InsertBefore) {
761   init(Ty, Func, Name);
762 }
763 
764 CallInst::CallInst(const CallInst &CI, AllocInfo AllocInfo)
765     : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call, AllocInfo) {
766   assert(getNumOperands() == CI.getNumOperands() &&
767          "Wrong number of operands allocated");
768   setTailCallKind(CI.getTailCallKind());
769   setCallingConv(CI.getCallingConv());
770 
771   std::copy(CI.op_begin(), CI.op_end(), op_begin());
772   std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
773             bundle_op_info_begin());
774   SubclassOptionalData = CI.SubclassOptionalData;
775 }
776 
777 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
778                            InsertPosition InsertPt) {
779   std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
780 
781   auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
782                                  Args, OpB, CI->getName(), InsertPt);
783   NewCI->setTailCallKind(CI->getTailCallKind());
784   NewCI->setCallingConv(CI->getCallingConv());
785   NewCI->SubclassOptionalData = CI->SubclassOptionalData;
786   NewCI->setAttributes(CI->getAttributes());
787   NewCI->setDebugLoc(CI->getDebugLoc());
788   return NewCI;
789 }
790 
791 // Update profile weight for call instruction by scaling it using the ratio
792 // of S/T. The meaning of "branch_weights" meta data for call instruction is
793 // transfered to represent call count.
794 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
795   if (T == 0) {
796     LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
797                          "div by 0. Ignoring. Likely the function "
798                       << getParent()->getParent()->getName()
799                       << " has 0 entry count, and contains call instructions "
800                          "with non-zero prof info.");
801     return;
802   }
803   scaleProfData(*this, S, T);
804 }
805 
806 //===----------------------------------------------------------------------===//
807 //                        InvokeInst Implementation
808 //===----------------------------------------------------------------------===//
809 
810 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
811                       BasicBlock *IfException, ArrayRef<Value *> Args,
812                       ArrayRef<OperandBundleDef> Bundles,
813                       const Twine &NameStr) {
814   this->FTy = FTy;
815 
816   assert(getNumOperands() ==
817              ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
818          "NumOperands not set up?");
819 
820 #ifndef NDEBUG
821   assert(((Args.size() == FTy->getNumParams()) ||
822           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
823          "Invoking a function with bad signature");
824 
825   for (unsigned i = 0, e = Args.size(); i != e; i++)
826     assert((i >= FTy->getNumParams() ||
827             FTy->getParamType(i) == Args[i]->getType()) &&
828            "Invoking a function with a bad signature!");
829 #endif
830 
831   // Set operands in order of their index to match use-list-order
832   // prediction.
833   llvm::copy(Args, op_begin());
834   setNormalDest(IfNormal);
835   setUnwindDest(IfException);
836   setCalledOperand(Fn);
837 
838   auto It = populateBundleOperandInfos(Bundles, Args.size());
839   (void)It;
840   assert(It + 3 == op_end() && "Should add up!");
841 
842   setName(NameStr);
843 }
844 
845 InvokeInst::InvokeInst(const InvokeInst &II, AllocInfo AllocInfo)
846     : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke, AllocInfo) {
847   assert(getNumOperands() == II.getNumOperands() &&
848          "Wrong number of operands allocated");
849   setCallingConv(II.getCallingConv());
850   std::copy(II.op_begin(), II.op_end(), op_begin());
851   std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
852             bundle_op_info_begin());
853   SubclassOptionalData = II.SubclassOptionalData;
854 }
855 
856 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
857                                InsertPosition InsertPt) {
858   std::vector<Value *> Args(II->arg_begin(), II->arg_end());
859 
860   auto *NewII = InvokeInst::Create(
861       II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
862       II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
863   NewII->setCallingConv(II->getCallingConv());
864   NewII->SubclassOptionalData = II->SubclassOptionalData;
865   NewII->setAttributes(II->getAttributes());
866   NewII->setDebugLoc(II->getDebugLoc());
867   return NewII;
868 }
869 
870 LandingPadInst *InvokeInst::getLandingPadInst() const {
871   return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHIIt());
872 }
873 
874 void InvokeInst::updateProfWeight(uint64_t S, uint64_t T) {
875   if (T == 0) {
876     LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
877                          "div by 0. Ignoring. Likely the function "
878                       << getParent()->getParent()->getName()
879                       << " has 0 entry count, and contains call instructions "
880                          "with non-zero prof info.");
881     return;
882   }
883   scaleProfData(*this, S, T);
884 }
885 
886 //===----------------------------------------------------------------------===//
887 //                        CallBrInst Implementation
888 //===----------------------------------------------------------------------===//
889 
890 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
891                       ArrayRef<BasicBlock *> IndirectDests,
892                       ArrayRef<Value *> Args,
893                       ArrayRef<OperandBundleDef> Bundles,
894                       const Twine &NameStr) {
895   this->FTy = FTy;
896 
897   assert(getNumOperands() == ComputeNumOperands(Args.size(),
898                                                 IndirectDests.size(),
899                                                 CountBundleInputs(Bundles)) &&
900          "NumOperands not set up?");
901 
902 #ifndef NDEBUG
903   assert(((Args.size() == FTy->getNumParams()) ||
904           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
905          "Calling a function with bad signature");
906 
907   for (unsigned i = 0, e = Args.size(); i != e; i++)
908     assert((i >= FTy->getNumParams() ||
909             FTy->getParamType(i) == Args[i]->getType()) &&
910            "Calling a function with a bad signature!");
911 #endif
912 
913   // Set operands in order of their index to match use-list-order
914   // prediction.
915   std::copy(Args.begin(), Args.end(), op_begin());
916   NumIndirectDests = IndirectDests.size();
917   setDefaultDest(Fallthrough);
918   for (unsigned i = 0; i != NumIndirectDests; ++i)
919     setIndirectDest(i, IndirectDests[i]);
920   setCalledOperand(Fn);
921 
922   auto It = populateBundleOperandInfos(Bundles, Args.size());
923   (void)It;
924   assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
925 
926   setName(NameStr);
927 }
928 
929 CallBrInst::CallBrInst(const CallBrInst &CBI, AllocInfo AllocInfo)
930     : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
931                AllocInfo) {
932   assert(getNumOperands() == CBI.getNumOperands() &&
933          "Wrong number of operands allocated");
934   setCallingConv(CBI.getCallingConv());
935   std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
936   std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
937             bundle_op_info_begin());
938   SubclassOptionalData = CBI.SubclassOptionalData;
939   NumIndirectDests = CBI.NumIndirectDests;
940 }
941 
942 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
943                                InsertPosition InsertPt) {
944   std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
945 
946   auto *NewCBI = CallBrInst::Create(
947       CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
948       CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
949   NewCBI->setCallingConv(CBI->getCallingConv());
950   NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
951   NewCBI->setAttributes(CBI->getAttributes());
952   NewCBI->setDebugLoc(CBI->getDebugLoc());
953   NewCBI->NumIndirectDests = CBI->NumIndirectDests;
954   return NewCBI;
955 }
956 
957 //===----------------------------------------------------------------------===//
958 //                        ReturnInst Implementation
959 //===----------------------------------------------------------------------===//
960 
961 ReturnInst::ReturnInst(const ReturnInst &RI, AllocInfo AllocInfo)
962     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
963                   AllocInfo) {
964   assert(getNumOperands() == RI.getNumOperands() &&
965          "Wrong number of operands allocated");
966   if (RI.getNumOperands())
967     Op<0>() = RI.Op<0>();
968   SubclassOptionalData = RI.SubclassOptionalData;
969 }
970 
971 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, AllocInfo AllocInfo,
972                        InsertPosition InsertBefore)
973     : Instruction(Type::getVoidTy(C), Instruction::Ret, AllocInfo,
974                   InsertBefore) {
975   if (retVal)
976     Op<0>() = retVal;
977 }
978 
979 //===----------------------------------------------------------------------===//
980 //                        ResumeInst Implementation
981 //===----------------------------------------------------------------------===//
982 
983 ResumeInst::ResumeInst(const ResumeInst &RI)
984     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
985                   AllocMarker) {
986   Op<0>() = RI.Op<0>();
987 }
988 
989 ResumeInst::ResumeInst(Value *Exn, InsertPosition InsertBefore)
990     : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
991                   AllocMarker, InsertBefore) {
992   Op<0>() = Exn;
993 }
994 
995 //===----------------------------------------------------------------------===//
996 //                        CleanupReturnInst Implementation
997 //===----------------------------------------------------------------------===//
998 
999 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,
1000                                      AllocInfo AllocInfo)
1001     : Instruction(CRI.getType(), Instruction::CleanupRet, AllocInfo) {
1002   assert(getNumOperands() == CRI.getNumOperands() &&
1003          "Wrong number of operands allocated");
1004   setSubclassData<Instruction::OpaqueField>(
1005       CRI.getSubclassData<Instruction::OpaqueField>());
1006   Op<0>() = CRI.Op<0>();
1007   if (CRI.hasUnwindDest())
1008     Op<1>() = CRI.Op<1>();
1009 }
1010 
1011 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1012   if (UnwindBB)
1013     setSubclassData<UnwindDestField>(true);
1014 
1015   Op<0>() = CleanupPad;
1016   if (UnwindBB)
1017     Op<1>() = UnwindBB;
1018 }
1019 
1020 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1021                                      AllocInfo AllocInfo,
1022                                      InsertPosition InsertBefore)
1023     : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1024                   Instruction::CleanupRet, AllocInfo, InsertBefore) {
1025   init(CleanupPad, UnwindBB);
1026 }
1027 
1028 //===----------------------------------------------------------------------===//
1029 //                        CatchReturnInst Implementation
1030 //===----------------------------------------------------------------------===//
1031 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1032   Op<0>() = CatchPad;
1033   Op<1>() = BB;
1034 }
1035 
1036 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1037     : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1038                   AllocMarker) {
1039   Op<0>() = CRI.Op<0>();
1040   Op<1>() = CRI.Op<1>();
1041 }
1042 
1043 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1044                                  InsertPosition InsertBefore)
1045     : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1046                   AllocMarker, InsertBefore) {
1047   init(CatchPad, BB);
1048 }
1049 
1050 //===----------------------------------------------------------------------===//
1051 //                       CatchSwitchInst Implementation
1052 //===----------------------------------------------------------------------===//
1053 
1054 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1055                                  unsigned NumReservedValues,
1056                                  const Twine &NameStr,
1057                                  InsertPosition InsertBefore)
1058     : Instruction(ParentPad->getType(), Instruction::CatchSwitch, AllocMarker,
1059                   InsertBefore) {
1060   if (UnwindDest)
1061     ++NumReservedValues;
1062   init(ParentPad, UnwindDest, NumReservedValues + 1);
1063   setName(NameStr);
1064 }
1065 
1066 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1067     : Instruction(CSI.getType(), Instruction::CatchSwitch, AllocMarker) {
1068   NumUserOperands = CSI.NumUserOperands;
1069   init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1070   setNumHungOffUseOperands(ReservedSpace);
1071   Use *OL = getOperandList();
1072   const Use *InOL = CSI.getOperandList();
1073   for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1074     OL[I] = InOL[I];
1075 }
1076 
1077 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1078                            unsigned NumReservedValues) {
1079   assert(ParentPad && NumReservedValues);
1080 
1081   ReservedSpace = NumReservedValues;
1082   setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1083   allocHungoffUses(ReservedSpace);
1084 
1085   Op<0>() = ParentPad;
1086   if (UnwindDest) {
1087     setSubclassData<UnwindDestField>(true);
1088     setUnwindDest(UnwindDest);
1089   }
1090 }
1091 
1092 /// growOperands - grow operands - This grows the operand list in response to a
1093 /// push_back style of operation. This grows the number of ops by 2 times.
1094 void CatchSwitchInst::growOperands(unsigned Size) {
1095   unsigned NumOperands = getNumOperands();
1096   assert(NumOperands >= 1);
1097   if (ReservedSpace >= NumOperands + Size)
1098     return;
1099   ReservedSpace = (NumOperands + Size / 2) * 2;
1100   growHungoffUses(ReservedSpace);
1101 }
1102 
1103 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1104   unsigned OpNo = getNumOperands();
1105   growOperands(1);
1106   assert(OpNo < ReservedSpace && "Growing didn't work!");
1107   setNumHungOffUseOperands(getNumOperands() + 1);
1108   getOperandList()[OpNo] = Handler;
1109 }
1110 
1111 void CatchSwitchInst::removeHandler(handler_iterator HI) {
1112   // Move all subsequent handlers up one.
1113   Use *EndDst = op_end() - 1;
1114   for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1115     *CurDst = *(CurDst + 1);
1116   // Null out the last handler use.
1117   *EndDst = nullptr;
1118 
1119   setNumHungOffUseOperands(getNumOperands() - 1);
1120 }
1121 
1122 //===----------------------------------------------------------------------===//
1123 //                        FuncletPadInst Implementation
1124 //===----------------------------------------------------------------------===//
1125 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1126                           const Twine &NameStr) {
1127   assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1128   llvm::copy(Args, op_begin());
1129   setParentPad(ParentPad);
1130   setName(NameStr);
1131 }
1132 
1133 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI, AllocInfo AllocInfo)
1134     : Instruction(FPI.getType(), FPI.getOpcode(), AllocInfo) {
1135   assert(getNumOperands() == FPI.getNumOperands() &&
1136          "Wrong number of operands allocated");
1137   std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1138   setParentPad(FPI.getParentPad());
1139 }
1140 
1141 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1142                                ArrayRef<Value *> Args, AllocInfo AllocInfo,
1143                                const Twine &NameStr,
1144                                InsertPosition InsertBefore)
1145     : Instruction(ParentPad->getType(), Op, AllocInfo, InsertBefore) {
1146   init(ParentPad, Args, NameStr);
1147 }
1148 
1149 //===----------------------------------------------------------------------===//
1150 //                      UnreachableInst Implementation
1151 //===----------------------------------------------------------------------===//
1152 
1153 UnreachableInst::UnreachableInst(LLVMContext &Context,
1154                                  InsertPosition InsertBefore)
1155     : Instruction(Type::getVoidTy(Context), Instruction::Unreachable,
1156                   AllocMarker, InsertBefore) {}
1157 
1158 //===----------------------------------------------------------------------===//
1159 //                        BranchInst Implementation
1160 //===----------------------------------------------------------------------===//
1161 
1162 void BranchInst::AssertOK() {
1163   if (isConditional())
1164     assert(getCondition()->getType()->isIntegerTy(1) &&
1165            "May only branch on boolean predicates!");
1166 }
1167 
1168 BranchInst::BranchInst(BasicBlock *IfTrue, AllocInfo AllocInfo,
1169                        InsertPosition InsertBefore)
1170     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1171                   AllocInfo, InsertBefore) {
1172   assert(IfTrue && "Branch destination may not be null!");
1173   Op<-1>() = IfTrue;
1174 }
1175 
1176 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1177                        AllocInfo AllocInfo, InsertPosition InsertBefore)
1178     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1179                   AllocInfo, InsertBefore) {
1180   // Assign in order of operand index to make use-list order predictable.
1181   Op<-3>() = Cond;
1182   Op<-2>() = IfFalse;
1183   Op<-1>() = IfTrue;
1184 #ifndef NDEBUG
1185   AssertOK();
1186 #endif
1187 }
1188 
1189 BranchInst::BranchInst(const BranchInst &BI, AllocInfo AllocInfo)
1190     : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1191                   AllocInfo) {
1192   assert(getNumOperands() == BI.getNumOperands() &&
1193          "Wrong number of operands allocated");
1194   // Assign in order of operand index to make use-list order predictable.
1195   if (BI.getNumOperands() != 1) {
1196     assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1197     Op<-3>() = BI.Op<-3>();
1198     Op<-2>() = BI.Op<-2>();
1199   }
1200   Op<-1>() = BI.Op<-1>();
1201   SubclassOptionalData = BI.SubclassOptionalData;
1202 }
1203 
1204 void BranchInst::swapSuccessors() {
1205   assert(isConditional() &&
1206          "Cannot swap successors of an unconditional branch");
1207   Op<-1>().swap(Op<-2>());
1208 
1209   // Update profile metadata if present and it matches our structural
1210   // expectations.
1211   swapProfMetadata();
1212 }
1213 
1214 //===----------------------------------------------------------------------===//
1215 //                        AllocaInst Implementation
1216 //===----------------------------------------------------------------------===//
1217 
1218 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1219   if (!Amt)
1220     Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1221   else {
1222     assert(!isa<BasicBlock>(Amt) &&
1223            "Passed basic block into allocation size parameter! Use other ctor");
1224     assert(Amt->getType()->isIntegerTy() &&
1225            "Allocation array size is not an integer!");
1226   }
1227   return Amt;
1228 }
1229 
1230 static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos) {
1231   assert(Pos.isValid() &&
1232          "Insertion position cannot be null when alignment not provided!");
1233   BasicBlock *BB = Pos.getBasicBlock();
1234   assert(BB->getParent() &&
1235          "BB must be in a Function when alignment not provided!");
1236   const DataLayout &DL = BB->getDataLayout();
1237   return DL.getPrefTypeAlign(Ty);
1238 }
1239 
1240 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1241                        InsertPosition InsertBefore)
1242     : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1243 
1244 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1245                        const Twine &Name, InsertPosition InsertBefore)
1246     : AllocaInst(Ty, AddrSpace, ArraySize,
1247                  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1248                  InsertBefore) {}
1249 
1250 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1251                        Align Align, const Twine &Name,
1252                        InsertPosition InsertBefore)
1253     : UnaryInstruction(PointerType::get(Ty->getContext(), AddrSpace), Alloca,
1254                        getAISize(Ty->getContext(), ArraySize), InsertBefore),
1255       AllocatedType(Ty) {
1256   setAlignment(Align);
1257   assert(!Ty->isVoidTy() && "Cannot allocate void!");
1258   setName(Name);
1259 }
1260 
1261 bool AllocaInst::isArrayAllocation() const {
1262   if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1263     return !CI->isOne();
1264   return true;
1265 }
1266 
1267 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1268 /// function and is a constant size.  If so, the code generator will fold it
1269 /// into the prolog/epilog code, so it is basically free.
1270 bool AllocaInst::isStaticAlloca() const {
1271   // Must be constant size.
1272   if (!isa<ConstantInt>(getArraySize())) return false;
1273 
1274   // Must be in the entry block.
1275   const BasicBlock *Parent = getParent();
1276   return Parent->isEntryBlock() && !isUsedWithInAlloca();
1277 }
1278 
1279 //===----------------------------------------------------------------------===//
1280 //                           LoadInst Implementation
1281 //===----------------------------------------------------------------------===//
1282 
1283 void LoadInst::AssertOK() {
1284   assert(getOperand(0)->getType()->isPointerTy() &&
1285          "Ptr must have pointer type.");
1286 }
1287 
1288 static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
1289   assert(Pos.isValid() &&
1290          "Insertion position cannot be null when alignment not provided!");
1291   BasicBlock *BB = Pos.getBasicBlock();
1292   assert(BB->getParent() &&
1293          "BB must be in a Function when alignment not provided!");
1294   const DataLayout &DL = BB->getDataLayout();
1295   return DL.getABITypeAlign(Ty);
1296 }
1297 
1298 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1299                    InsertPosition InsertBef)
1300     : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1301 
1302 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1303                    InsertPosition InsertBef)
1304     : LoadInst(Ty, Ptr, Name, isVolatile,
1305                computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1306 
1307 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1308                    Align Align, InsertPosition InsertBef)
1309     : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1310                SyncScope::System, InsertBef) {}
1311 
1312 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1313                    Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1314                    InsertPosition InsertBef)
1315     : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1316   setVolatile(isVolatile);
1317   setAlignment(Align);
1318   setAtomic(Order, SSID);
1319   AssertOK();
1320   setName(Name);
1321 }
1322 
1323 //===----------------------------------------------------------------------===//
1324 //                           StoreInst Implementation
1325 //===----------------------------------------------------------------------===//
1326 
1327 void StoreInst::AssertOK() {
1328   assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1329   assert(getOperand(1)->getType()->isPointerTy() &&
1330          "Ptr must have pointer type!");
1331 }
1332 
1333 StoreInst::StoreInst(Value *val, Value *addr, InsertPosition InsertBefore)
1334     : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1335 
1336 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1337                      InsertPosition InsertBefore)
1338     : StoreInst(val, addr, isVolatile,
1339                 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1340                 InsertBefore) {}
1341 
1342 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1343                      InsertPosition InsertBefore)
1344     : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1345                 SyncScope::System, InsertBefore) {}
1346 
1347 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1348                      AtomicOrdering Order, SyncScope::ID SSID,
1349                      InsertPosition InsertBefore)
1350     : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,
1351                   InsertBefore) {
1352   Op<0>() = val;
1353   Op<1>() = addr;
1354   setVolatile(isVolatile);
1355   setAlignment(Align);
1356   setAtomic(Order, SSID);
1357   AssertOK();
1358 }
1359 
1360 //===----------------------------------------------------------------------===//
1361 //                       AtomicCmpXchgInst Implementation
1362 //===----------------------------------------------------------------------===//
1363 
1364 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1365                              Align Alignment, AtomicOrdering SuccessOrdering,
1366                              AtomicOrdering FailureOrdering,
1367                              SyncScope::ID SSID) {
1368   Op<0>() = Ptr;
1369   Op<1>() = Cmp;
1370   Op<2>() = NewVal;
1371   setSuccessOrdering(SuccessOrdering);
1372   setFailureOrdering(FailureOrdering);
1373   setSyncScopeID(SSID);
1374   setAlignment(Alignment);
1375 
1376   assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1377          "All operands must be non-null!");
1378   assert(getOperand(0)->getType()->isPointerTy() &&
1379          "Ptr must have pointer type!");
1380   assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1381          "Cmp type and NewVal type must be same!");
1382 }
1383 
1384 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1385                                      Align Alignment,
1386                                      AtomicOrdering SuccessOrdering,
1387                                      AtomicOrdering FailureOrdering,
1388                                      SyncScope::ID SSID,
1389                                      InsertPosition InsertBefore)
1390     : Instruction(
1391           StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1392           AtomicCmpXchg, AllocMarker, InsertBefore) {
1393   Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1394 }
1395 
1396 //===----------------------------------------------------------------------===//
1397 //                       AtomicRMWInst Implementation
1398 //===----------------------------------------------------------------------===//
1399 
1400 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1401                          Align Alignment, AtomicOrdering Ordering,
1402                          SyncScope::ID SSID) {
1403   assert(Ordering != AtomicOrdering::NotAtomic &&
1404          "atomicrmw instructions can only be atomic.");
1405   assert(Ordering != AtomicOrdering::Unordered &&
1406          "atomicrmw instructions cannot be unordered.");
1407   Op<0>() = Ptr;
1408   Op<1>() = Val;
1409   setOperation(Operation);
1410   setOrdering(Ordering);
1411   setSyncScopeID(SSID);
1412   setAlignment(Alignment);
1413 
1414   assert(getOperand(0) && getOperand(1) && "All operands must be non-null!");
1415   assert(getOperand(0)->getType()->isPointerTy() &&
1416          "Ptr must have pointer type!");
1417   assert(Ordering != AtomicOrdering::NotAtomic &&
1418          "AtomicRMW instructions must be atomic!");
1419 }
1420 
1421 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1422                              Align Alignment, AtomicOrdering Ordering,
1423                              SyncScope::ID SSID, InsertPosition InsertBefore)
1424     : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {
1425   Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1426 }
1427 
1428 StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1429   switch (Op) {
1430   case AtomicRMWInst::Xchg:
1431     return "xchg";
1432   case AtomicRMWInst::Add:
1433     return "add";
1434   case AtomicRMWInst::Sub:
1435     return "sub";
1436   case AtomicRMWInst::And:
1437     return "and";
1438   case AtomicRMWInst::Nand:
1439     return "nand";
1440   case AtomicRMWInst::Or:
1441     return "or";
1442   case AtomicRMWInst::Xor:
1443     return "xor";
1444   case AtomicRMWInst::Max:
1445     return "max";
1446   case AtomicRMWInst::Min:
1447     return "min";
1448   case AtomicRMWInst::UMax:
1449     return "umax";
1450   case AtomicRMWInst::UMin:
1451     return "umin";
1452   case AtomicRMWInst::FAdd:
1453     return "fadd";
1454   case AtomicRMWInst::FSub:
1455     return "fsub";
1456   case AtomicRMWInst::FMax:
1457     return "fmax";
1458   case AtomicRMWInst::FMin:
1459     return "fmin";
1460   case AtomicRMWInst::UIncWrap:
1461     return "uinc_wrap";
1462   case AtomicRMWInst::UDecWrap:
1463     return "udec_wrap";
1464   case AtomicRMWInst::USubCond:
1465     return "usub_cond";
1466   case AtomicRMWInst::USubSat:
1467     return "usub_sat";
1468   case AtomicRMWInst::BAD_BINOP:
1469     return "<invalid operation>";
1470   }
1471 
1472   llvm_unreachable("invalid atomicrmw operation");
1473 }
1474 
1475 //===----------------------------------------------------------------------===//
1476 //                       FenceInst Implementation
1477 //===----------------------------------------------------------------------===//
1478 
1479 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1480                      SyncScope::ID SSID, InsertPosition InsertBefore)
1481     : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {
1482   setOrdering(Ordering);
1483   setSyncScopeID(SSID);
1484 }
1485 
1486 //===----------------------------------------------------------------------===//
1487 //                       GetElementPtrInst Implementation
1488 //===----------------------------------------------------------------------===//
1489 
1490 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1491                              const Twine &Name) {
1492   assert(getNumOperands() == 1 + IdxList.size() &&
1493          "NumOperands not initialized?");
1494   Op<0>() = Ptr;
1495   llvm::copy(IdxList, op_begin() + 1);
1496   setName(Name);
1497 }
1498 
1499 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,
1500                                      AllocInfo AllocInfo)
1501     : Instruction(GEPI.getType(), GetElementPtr, AllocInfo),
1502       SourceElementType(GEPI.SourceElementType),
1503       ResultElementType(GEPI.ResultElementType) {
1504   assert(getNumOperands() == GEPI.getNumOperands() &&
1505          "Wrong number of operands allocated");
1506   std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1507   SubclassOptionalData = GEPI.SubclassOptionalData;
1508 }
1509 
1510 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1511   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1512     if (!Struct->indexValid(Idx))
1513       return nullptr;
1514     return Struct->getTypeAtIndex(Idx);
1515   }
1516   if (!Idx->getType()->isIntOrIntVectorTy())
1517     return nullptr;
1518   if (auto *Array = dyn_cast<ArrayType>(Ty))
1519     return Array->getElementType();
1520   if (auto *Vector = dyn_cast<VectorType>(Ty))
1521     return Vector->getElementType();
1522   return nullptr;
1523 }
1524 
1525 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1526   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1527     if (Idx >= Struct->getNumElements())
1528       return nullptr;
1529     return Struct->getElementType(Idx);
1530   }
1531   if (auto *Array = dyn_cast<ArrayType>(Ty))
1532     return Array->getElementType();
1533   if (auto *Vector = dyn_cast<VectorType>(Ty))
1534     return Vector->getElementType();
1535   return nullptr;
1536 }
1537 
1538 template <typename IndexTy>
1539 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1540   if (IdxList.empty())
1541     return Ty;
1542   for (IndexTy V : IdxList.slice(1)) {
1543     Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1544     if (!Ty)
1545       return Ty;
1546   }
1547   return Ty;
1548 }
1549 
1550 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1551   return getIndexedTypeInternal(Ty, IdxList);
1552 }
1553 
1554 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1555                                         ArrayRef<Constant *> IdxList) {
1556   return getIndexedTypeInternal(Ty, IdxList);
1557 }
1558 
1559 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1560   return getIndexedTypeInternal(Ty, IdxList);
1561 }
1562 
1563 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1564 /// zeros.  If so, the result pointer and the first operand have the same
1565 /// value, just potentially different types.
1566 bool GetElementPtrInst::hasAllZeroIndices() const {
1567   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1568     if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1569       if (!CI->isZero()) return false;
1570     } else {
1571       return false;
1572     }
1573   }
1574   return true;
1575 }
1576 
1577 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1578 /// constant integers.  If so, the result pointer and the first operand have
1579 /// a constant offset between them.
1580 bool GetElementPtrInst::hasAllConstantIndices() const {
1581   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1582     if (!isa<ConstantInt>(getOperand(i)))
1583       return false;
1584   }
1585   return true;
1586 }
1587 
1588 void GetElementPtrInst::setNoWrapFlags(GEPNoWrapFlags NW) {
1589   SubclassOptionalData = NW.getRaw();
1590 }
1591 
1592 void GetElementPtrInst::setIsInBounds(bool B) {
1593   GEPNoWrapFlags NW = cast<GEPOperator>(this)->getNoWrapFlags();
1594   if (B)
1595     NW |= GEPNoWrapFlags::inBounds();
1596   else
1597     NW = NW.withoutInBounds();
1598   setNoWrapFlags(NW);
1599 }
1600 
1601 GEPNoWrapFlags GetElementPtrInst::getNoWrapFlags() const {
1602   return cast<GEPOperator>(this)->getNoWrapFlags();
1603 }
1604 
1605 bool GetElementPtrInst::isInBounds() const {
1606   return cast<GEPOperator>(this)->isInBounds();
1607 }
1608 
1609 bool GetElementPtrInst::hasNoUnsignedSignedWrap() const {
1610   return cast<GEPOperator>(this)->hasNoUnsignedSignedWrap();
1611 }
1612 
1613 bool GetElementPtrInst::hasNoUnsignedWrap() const {
1614   return cast<GEPOperator>(this)->hasNoUnsignedWrap();
1615 }
1616 
1617 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1618                                                  APInt &Offset) const {
1619   // Delegate to the generic GEPOperator implementation.
1620   return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1621 }
1622 
1623 bool GetElementPtrInst::collectOffset(
1624     const DataLayout &DL, unsigned BitWidth,
1625     SmallMapVector<Value *, APInt, 4> &VariableOffsets,
1626     APInt &ConstantOffset) const {
1627   // Delegate to the generic GEPOperator implementation.
1628   return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1629                                                 ConstantOffset);
1630 }
1631 
1632 //===----------------------------------------------------------------------===//
1633 //                           ExtractElementInst Implementation
1634 //===----------------------------------------------------------------------===//
1635 
1636 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1637                                        const Twine &Name,
1638                                        InsertPosition InsertBef)
1639     : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1640                   ExtractElement, AllocMarker, InsertBef) {
1641   assert(isValidOperands(Val, Index) &&
1642          "Invalid extractelement instruction operands!");
1643   Op<0>() = Val;
1644   Op<1>() = Index;
1645   setName(Name);
1646 }
1647 
1648 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1649   if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1650     return false;
1651   return true;
1652 }
1653 
1654 //===----------------------------------------------------------------------===//
1655 //                           InsertElementInst Implementation
1656 //===----------------------------------------------------------------------===//
1657 
1658 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1659                                      const Twine &Name,
1660                                      InsertPosition InsertBef)
1661     : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {
1662   assert(isValidOperands(Vec, Elt, Index) &&
1663          "Invalid insertelement instruction operands!");
1664   Op<0>() = Vec;
1665   Op<1>() = Elt;
1666   Op<2>() = Index;
1667   setName(Name);
1668 }
1669 
1670 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1671                                         const Value *Index) {
1672   if (!Vec->getType()->isVectorTy())
1673     return false;   // First operand of insertelement must be vector type.
1674 
1675   if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1676     return false;// Second operand of insertelement must be vector element type.
1677 
1678   if (!Index->getType()->isIntegerTy())
1679     return false;  // Third operand of insertelement must be i32.
1680   return true;
1681 }
1682 
1683 //===----------------------------------------------------------------------===//
1684 //                      ShuffleVectorInst Implementation
1685 //===----------------------------------------------------------------------===//
1686 
1687 static Value *createPlaceholderForShuffleVector(Value *V) {
1688   assert(V && "Cannot create placeholder of nullptr V");
1689   return PoisonValue::get(V->getType());
1690 }
1691 
1692 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
1693                                      InsertPosition InsertBefore)
1694     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1695                         InsertBefore) {}
1696 
1697 ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
1698                                      const Twine &Name,
1699                                      InsertPosition InsertBefore)
1700     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
1701                         InsertBefore) {}
1702 
1703 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1704                                      const Twine &Name,
1705                                      InsertPosition InsertBefore)
1706     : Instruction(
1707           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1708                           cast<VectorType>(Mask->getType())->getElementCount()),
1709           ShuffleVector, AllocMarker, InsertBefore) {
1710   assert(isValidOperands(V1, V2, Mask) &&
1711          "Invalid shuffle vector instruction operands!");
1712 
1713   Op<0>() = V1;
1714   Op<1>() = V2;
1715   SmallVector<int, 16> MaskArr;
1716   getShuffleMask(cast<Constant>(Mask), MaskArr);
1717   setShuffleMask(MaskArr);
1718   setName(Name);
1719 }
1720 
1721 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1722                                      const Twine &Name,
1723                                      InsertPosition InsertBefore)
1724     : Instruction(
1725           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1726                           Mask.size(), isa<ScalableVectorType>(V1->getType())),
1727           ShuffleVector, AllocMarker, InsertBefore) {
1728   assert(isValidOperands(V1, V2, Mask) &&
1729          "Invalid shuffle vector instruction operands!");
1730   Op<0>() = V1;
1731   Op<1>() = V2;
1732   setShuffleMask(Mask);
1733   setName(Name);
1734 }
1735 
1736 void ShuffleVectorInst::commute() {
1737   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
1738   int NumMaskElts = ShuffleMask.size();
1739   SmallVector<int, 16> NewMask(NumMaskElts);
1740   for (int i = 0; i != NumMaskElts; ++i) {
1741     int MaskElt = getMaskValue(i);
1742     if (MaskElt == PoisonMaskElem) {
1743       NewMask[i] = PoisonMaskElem;
1744       continue;
1745     }
1746     assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
1747     MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1748     NewMask[i] = MaskElt;
1749   }
1750   setShuffleMask(NewMask);
1751   Op<0>().swap(Op<1>());
1752 }
1753 
1754 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1755                                         ArrayRef<int> Mask) {
1756   // V1 and V2 must be vectors of the same type.
1757   if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
1758     return false;
1759 
1760   // Make sure the mask elements make sense.
1761   int V1Size =
1762       cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
1763   for (int Elem : Mask)
1764     if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
1765       return false;
1766 
1767   if (isa<ScalableVectorType>(V1->getType()))
1768     if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
1769       return false;
1770 
1771   return true;
1772 }
1773 
1774 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1775                                         const Value *Mask) {
1776   // V1 and V2 must be vectors of the same type.
1777   if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1778     return false;
1779 
1780   // Mask must be vector of i32, and must be the same kind of vector as the
1781   // input vectors
1782   auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1783   if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1784       isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
1785     return false;
1786 
1787   // Check to see if Mask is valid.
1788   if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1789     return true;
1790 
1791   // NOTE: Through vector ConstantInt we have the potential to support more
1792   // than just zero splat masks but that requires a LangRef change.
1793   if (isa<ScalableVectorType>(MaskTy))
1794     return false;
1795 
1796   unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
1797 
1798   if (const auto *CI = dyn_cast<ConstantInt>(Mask))
1799     return !CI->uge(V1Size * 2);
1800 
1801   if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1802     for (Value *Op : MV->operands()) {
1803       if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1804         if (CI->uge(V1Size*2))
1805           return false;
1806       } else if (!isa<UndefValue>(Op)) {
1807         return false;
1808       }
1809     }
1810     return true;
1811   }
1812 
1813   if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1814     for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
1815          i != e; ++i)
1816       if (CDS->getElementAsInteger(i) >= V1Size*2)
1817         return false;
1818     return true;
1819   }
1820 
1821   return false;
1822 }
1823 
1824 void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
1825                                        SmallVectorImpl<int> &Result) {
1826   ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1827 
1828   if (isa<ConstantAggregateZero>(Mask)) {
1829     Result.resize(EC.getKnownMinValue(), 0);
1830     return;
1831   }
1832 
1833   Result.reserve(EC.getKnownMinValue());
1834 
1835   if (EC.isScalable()) {
1836     assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1837            "Scalable vector shuffle mask must be undef or zeroinitializer");
1838     int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1839     for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
1840       Result.emplace_back(MaskVal);
1841     return;
1842   }
1843 
1844   unsigned NumElts = EC.getKnownMinValue();
1845 
1846   if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1847     for (unsigned i = 0; i != NumElts; ++i)
1848       Result.push_back(CDS->getElementAsInteger(i));
1849     return;
1850   }
1851   for (unsigned i = 0; i != NumElts; ++i) {
1852     Constant *C = Mask->getAggregateElement(i);
1853     Result.push_back(isa<UndefValue>(C) ? -1 :
1854                      cast<ConstantInt>(C)->getZExtValue());
1855   }
1856 }
1857 
1858 void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
1859   ShuffleMask.assign(Mask.begin(), Mask.end());
1860   ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
1861 }
1862 
1863 Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
1864                                                           Type *ResultTy) {
1865   Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
1866   if (isa<ScalableVectorType>(ResultTy)) {
1867     assert(all_equal(Mask) && "Unexpected shuffle");
1868     Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
1869     if (Mask[0] == 0)
1870       return Constant::getNullValue(VecTy);
1871     return PoisonValue::get(VecTy);
1872   }
1873   SmallVector<Constant *, 16> MaskConst;
1874   for (int Elem : Mask) {
1875     if (Elem == PoisonMaskElem)
1876       MaskConst.push_back(PoisonValue::get(Int32Ty));
1877     else
1878       MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
1879   }
1880   return ConstantVector::get(MaskConst);
1881 }
1882 
1883 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1884   assert(!Mask.empty() && "Shuffle mask must contain elements");
1885   bool UsesLHS = false;
1886   bool UsesRHS = false;
1887   for (int I : Mask) {
1888     if (I == -1)
1889       continue;
1890     assert(I >= 0 && I < (NumOpElts * 2) &&
1891            "Out-of-bounds shuffle mask element");
1892     UsesLHS |= (I < NumOpElts);
1893     UsesRHS |= (I >= NumOpElts);
1894     if (UsesLHS && UsesRHS)
1895       return false;
1896   }
1897   // Allow for degenerate case: completely undef mask means neither source is used.
1898   return UsesLHS || UsesRHS;
1899 }
1900 
1901 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts) {
1902   // We don't have vector operand size information, so assume operands are the
1903   // same size as the mask.
1904   return isSingleSourceMaskImpl(Mask, NumSrcElts);
1905 }
1906 
1907 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1908   if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1909     return false;
1910   for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1911     if (Mask[i] == -1)
1912       continue;
1913     if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1914       return false;
1915   }
1916   return true;
1917 }
1918 
1919 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask, int NumSrcElts) {
1920   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1921     return false;
1922   // We don't have vector operand size information, so assume operands are the
1923   // same size as the mask.
1924   return isIdentityMaskImpl(Mask, NumSrcElts);
1925 }
1926 
1927 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask, int NumSrcElts) {
1928   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1929     return false;
1930   if (!isSingleSourceMask(Mask, NumSrcElts))
1931     return false;
1932 
1933   // The number of elements in the mask must be at least 2.
1934   if (NumSrcElts < 2)
1935     return false;
1936 
1937   for (int I = 0, E = Mask.size(); I < E; ++I) {
1938     if (Mask[I] == -1)
1939       continue;
1940     if (Mask[I] != (NumSrcElts - 1 - I) &&
1941         Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
1942       return false;
1943   }
1944   return true;
1945 }
1946 
1947 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts) {
1948   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1949     return false;
1950   if (!isSingleSourceMask(Mask, NumSrcElts))
1951     return false;
1952   for (int I = 0, E = Mask.size(); I < E; ++I) {
1953     if (Mask[I] == -1)
1954       continue;
1955     if (Mask[I] != 0 && Mask[I] != NumSrcElts)
1956       return false;
1957   }
1958   return true;
1959 }
1960 
1961 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask, int NumSrcElts) {
1962   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1963     return false;
1964   // Select is differentiated from identity. It requires using both sources.
1965   if (isSingleSourceMask(Mask, NumSrcElts))
1966     return false;
1967   for (int I = 0, E = Mask.size(); I < E; ++I) {
1968     if (Mask[I] == -1)
1969       continue;
1970     if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
1971       return false;
1972   }
1973   return true;
1974 }
1975 
1976 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask, int NumSrcElts) {
1977   // Example masks that will return true:
1978   // v1 = <a, b, c, d>
1979   // v2 = <e, f, g, h>
1980   // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
1981   // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
1982 
1983   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
1984     return false;
1985   // 1. The number of elements in the mask must be a power-of-2 and at least 2.
1986   int Sz = Mask.size();
1987   if (Sz < 2 || !isPowerOf2_32(Sz))
1988     return false;
1989 
1990   // 2. The first element of the mask must be either a 0 or a 1.
1991   if (Mask[0] != 0 && Mask[0] != 1)
1992     return false;
1993 
1994   // 3. The difference between the first 2 elements must be equal to the
1995   // number of elements in the mask.
1996   if ((Mask[1] - Mask[0]) != NumSrcElts)
1997     return false;
1998 
1999   // 4. The difference between consecutive even-numbered and odd-numbered
2000   // elements must be equal to 2.
2001   for (int I = 2; I < Sz; ++I) {
2002     int MaskEltVal = Mask[I];
2003     if (MaskEltVal == -1)
2004       return false;
2005     int MaskEltPrevVal = Mask[I - 2];
2006     if (MaskEltVal - MaskEltPrevVal != 2)
2007       return false;
2008   }
2009   return true;
2010 }
2011 
2012 bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int NumSrcElts,
2013                                      int &Index) {
2014   if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2015     return false;
2016   // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2017   int StartIndex = -1;
2018   for (int I = 0, E = Mask.size(); I != E; ++I) {
2019     int MaskEltVal = Mask[I];
2020     if (MaskEltVal == -1)
2021       continue;
2022 
2023     if (StartIndex == -1) {
2024       // Don't support a StartIndex that begins in the second input, or if the
2025       // first non-undef index would access below the StartIndex.
2026       if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2027         return false;
2028 
2029       StartIndex = MaskEltVal - I;
2030       continue;
2031     }
2032 
2033     // Splice is sequential starting from StartIndex.
2034     if (MaskEltVal != (StartIndex + I))
2035       return false;
2036   }
2037 
2038   if (StartIndex == -1)
2039     return false;
2040 
2041   // NOTE: This accepts StartIndex == 0 (COPY).
2042   Index = StartIndex;
2043   return true;
2044 }
2045 
2046 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2047                                                int NumSrcElts, int &Index) {
2048   // Must extract from a single source.
2049   if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2050     return false;
2051 
2052   // Must be smaller (else this is an Identity shuffle).
2053   if (NumSrcElts <= (int)Mask.size())
2054     return false;
2055 
2056   // Find start of extraction, accounting that we may start with an UNDEF.
2057   int SubIndex = -1;
2058   for (int i = 0, e = Mask.size(); i != e; ++i) {
2059     int M = Mask[i];
2060     if (M < 0)
2061       continue;
2062     int Offset = (M % NumSrcElts) - i;
2063     if (0 <= SubIndex && SubIndex != Offset)
2064       return false;
2065     SubIndex = Offset;
2066   }
2067 
2068   if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2069     Index = SubIndex;
2070     return true;
2071   }
2072   return false;
2073 }
2074 
2075 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2076                                               int NumSrcElts, int &NumSubElts,
2077                                               int &Index) {
2078   int NumMaskElts = Mask.size();
2079 
2080   // Don't try to match if we're shuffling to a smaller size.
2081   if (NumMaskElts < NumSrcElts)
2082     return false;
2083 
2084   // TODO: We don't recognize self-insertion/widening.
2085   if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2086     return false;
2087 
2088   // Determine which mask elements are attributed to which source.
2089   APInt UndefElts = APInt::getZero(NumMaskElts);
2090   APInt Src0Elts = APInt::getZero(NumMaskElts);
2091   APInt Src1Elts = APInt::getZero(NumMaskElts);
2092   bool Src0Identity = true;
2093   bool Src1Identity = true;
2094 
2095   for (int i = 0; i != NumMaskElts; ++i) {
2096     int M = Mask[i];
2097     if (M < 0) {
2098       UndefElts.setBit(i);
2099       continue;
2100     }
2101     if (M < NumSrcElts) {
2102       Src0Elts.setBit(i);
2103       Src0Identity &= (M == i);
2104       continue;
2105     }
2106     Src1Elts.setBit(i);
2107     Src1Identity &= (M == (i + NumSrcElts));
2108   }
2109   assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2110          "unknown shuffle elements");
2111   assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2112          "2-source shuffle not found");
2113 
2114   // Determine lo/hi span ranges.
2115   // TODO: How should we handle undefs at the start of subvector insertions?
2116   int Src0Lo = Src0Elts.countr_zero();
2117   int Src1Lo = Src1Elts.countr_zero();
2118   int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2119   int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2120 
2121   // If src0 is in place, see if the src1 elements is inplace within its own
2122   // span.
2123   if (Src0Identity) {
2124     int NumSub1Elts = Src1Hi - Src1Lo;
2125     ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2126     if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2127       NumSubElts = NumSub1Elts;
2128       Index = Src1Lo;
2129       return true;
2130     }
2131   }
2132 
2133   // If src1 is in place, see if the src0 elements is inplace within its own
2134   // span.
2135   if (Src1Identity) {
2136     int NumSub0Elts = Src0Hi - Src0Lo;
2137     ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2138     if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2139       NumSubElts = NumSub0Elts;
2140       Index = Src0Lo;
2141       return true;
2142     }
2143   }
2144 
2145   return false;
2146 }
2147 
2148 bool ShuffleVectorInst::isIdentityWithPadding() const {
2149   // FIXME: Not currently possible to express a shuffle mask for a scalable
2150   // vector for this case.
2151   if (isa<ScalableVectorType>(getType()))
2152     return false;
2153 
2154   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2155   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2156   if (NumMaskElts <= NumOpElts)
2157     return false;
2158 
2159   // The first part of the mask must choose elements from exactly 1 source op.
2160   ArrayRef<int> Mask = getShuffleMask();
2161   if (!isIdentityMaskImpl(Mask, NumOpElts))
2162     return false;
2163 
2164   // All extending must be with undef elements.
2165   for (int i = NumOpElts; i < NumMaskElts; ++i)
2166     if (Mask[i] != -1)
2167       return false;
2168 
2169   return true;
2170 }
2171 
2172 bool ShuffleVectorInst::isIdentityWithExtract() const {
2173   // FIXME: Not currently possible to express a shuffle mask for a scalable
2174   // vector for this case.
2175   if (isa<ScalableVectorType>(getType()))
2176     return false;
2177 
2178   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2179   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2180   if (NumMaskElts >= NumOpElts)
2181     return false;
2182 
2183   return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2184 }
2185 
2186 bool ShuffleVectorInst::isConcat() const {
2187   // Vector concatenation is differentiated from identity with padding.
2188   if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2189     return false;
2190 
2191   // FIXME: Not currently possible to express a shuffle mask for a scalable
2192   // vector for this case.
2193   if (isa<ScalableVectorType>(getType()))
2194     return false;
2195 
2196   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2197   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2198   if (NumMaskElts != NumOpElts * 2)
2199     return false;
2200 
2201   // Use the mask length rather than the operands' vector lengths here. We
2202   // already know that the shuffle returns a vector twice as long as the inputs,
2203   // and neither of the inputs are undef vectors. If the mask picks consecutive
2204   // elements from both inputs, then this is a concatenation of the inputs.
2205   return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2206 }
2207 
2208 static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2209                                         int ReplicationFactor, int VF) {
2210   assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2211          "Unexpected mask size.");
2212 
2213   for (int CurrElt : seq(VF)) {
2214     ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2215     assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2216            "Run out of mask?");
2217     Mask = Mask.drop_front(ReplicationFactor);
2218     if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2219           return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2220         }))
2221       return false;
2222   }
2223   assert(Mask.empty() && "Did not consume the whole mask?");
2224 
2225   return true;
2226 }
2227 
2228 bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2229                                           int &ReplicationFactor, int &VF) {
2230   // undef-less case is trivial.
2231   if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2232     ReplicationFactor =
2233         Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2234     if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2235       return false;
2236     VF = Mask.size() / ReplicationFactor;
2237     return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2238   }
2239 
2240   // However, if the mask contains undef's, we have to enumerate possible tuples
2241   // and pick one. There are bounds on replication factor: [1, mask size]
2242   // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2243   // Additionally, mask size is a replication factor multiplied by vector size,
2244   // which further significantly reduces the search space.
2245 
2246   // Before doing that, let's perform basic correctness checking first.
2247   int Largest = -1;
2248   for (int MaskElt : Mask) {
2249     if (MaskElt == PoisonMaskElem)
2250       continue;
2251     // Elements must be in non-decreasing order.
2252     if (MaskElt < Largest)
2253       return false;
2254     Largest = std::max(Largest, MaskElt);
2255   }
2256 
2257   // Prefer larger replication factor if all else equal.
2258   for (int PossibleReplicationFactor :
2259        reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2260     if (Mask.size() % PossibleReplicationFactor != 0)
2261       continue;
2262     int PossibleVF = Mask.size() / PossibleReplicationFactor;
2263     if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2264                                      PossibleVF))
2265       continue;
2266     ReplicationFactor = PossibleReplicationFactor;
2267     VF = PossibleVF;
2268     return true;
2269   }
2270 
2271   return false;
2272 }
2273 
2274 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2275                                           int &VF) const {
2276   // Not possible to express a shuffle mask for a scalable vector for this
2277   // case.
2278   if (isa<ScalableVectorType>(getType()))
2279     return false;
2280 
2281   VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2282   if (ShuffleMask.size() % VF != 0)
2283     return false;
2284   ReplicationFactor = ShuffleMask.size() / VF;
2285 
2286   return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2287 }
2288 
2289 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2290   if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2291       Mask.size() % VF != 0)
2292     return false;
2293   for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2294     ArrayRef<int> SubMask = Mask.slice(K, VF);
2295     if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2296       continue;
2297     SmallBitVector Used(VF, false);
2298     for (int Idx : SubMask) {
2299       if (Idx != PoisonMaskElem && Idx < VF)
2300         Used.set(Idx);
2301     }
2302     if (!Used.all())
2303       return false;
2304   }
2305   return true;
2306 }
2307 
2308 /// Return true if this shuffle mask is a replication mask.
2309 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2310   // Not possible to express a shuffle mask for a scalable vector for this
2311   // case.
2312   if (isa<ScalableVectorType>(getType()))
2313     return false;
2314   if (!isSingleSourceMask(ShuffleMask, VF))
2315     return false;
2316 
2317   return isOneUseSingleSourceMask(ShuffleMask, VF);
2318 }
2319 
2320 bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2321   FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2322   // shuffle_vector can only interleave fixed length vectors - for scalable
2323   // vectors, see the @llvm.vector.interleave2 intrinsic
2324   if (!OpTy)
2325     return false;
2326   unsigned OpNumElts = OpTy->getNumElements();
2327 
2328   return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2329 }
2330 
2331 bool ShuffleVectorInst::isInterleaveMask(
2332     ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2333     SmallVectorImpl<unsigned> &StartIndexes) {
2334   unsigned NumElts = Mask.size();
2335   if (NumElts % Factor)
2336     return false;
2337 
2338   unsigned LaneLen = NumElts / Factor;
2339   if (!isPowerOf2_32(LaneLen))
2340     return false;
2341 
2342   StartIndexes.resize(Factor);
2343 
2344   // Check whether each element matches the general interleaved rule.
2345   // Ignore undef elements, as long as the defined elements match the rule.
2346   // Outer loop processes all factors (x, y, z in the above example)
2347   unsigned I = 0, J;
2348   for (; I < Factor; I++) {
2349     unsigned SavedLaneValue;
2350     unsigned SavedNoUndefs = 0;
2351 
2352     // Inner loop processes consecutive accesses (x, x+1... in the example)
2353     for (J = 0; J < LaneLen - 1; J++) {
2354       // Lane computes x's position in the Mask
2355       unsigned Lane = J * Factor + I;
2356       unsigned NextLane = Lane + Factor;
2357       int LaneValue = Mask[Lane];
2358       int NextLaneValue = Mask[NextLane];
2359 
2360       // If both are defined, values must be sequential
2361       if (LaneValue >= 0 && NextLaneValue >= 0 &&
2362           LaneValue + 1 != NextLaneValue)
2363         break;
2364 
2365       // If the next value is undef, save the current one as reference
2366       if (LaneValue >= 0 && NextLaneValue < 0) {
2367         SavedLaneValue = LaneValue;
2368         SavedNoUndefs = 1;
2369       }
2370 
2371       // Undefs are allowed, but defined elements must still be consecutive:
2372       // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2373       // Verify this by storing the last non-undef followed by an undef
2374       // Check that following non-undef masks are incremented with the
2375       // corresponding distance.
2376       if (SavedNoUndefs > 0 && LaneValue < 0) {
2377         SavedNoUndefs++;
2378         if (NextLaneValue >= 0 &&
2379             SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2380           break;
2381       }
2382     }
2383 
2384     if (J < LaneLen - 1)
2385       return false;
2386 
2387     int StartMask = 0;
2388     if (Mask[I] >= 0) {
2389       // Check that the start of the I range (J=0) is greater than 0
2390       StartMask = Mask[I];
2391     } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2392       // StartMask defined by the last value in lane
2393       StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2394     } else if (SavedNoUndefs > 0) {
2395       // StartMask defined by some non-zero value in the j loop
2396       StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2397     }
2398     // else StartMask remains set to 0, i.e. all elements are undefs
2399 
2400     if (StartMask < 0)
2401       return false;
2402     // We must stay within the vectors; This case can happen with undefs.
2403     if (StartMask + LaneLen > NumInputElts)
2404       return false;
2405 
2406     StartIndexes[I] = StartMask;
2407   }
2408 
2409   return true;
2410 }
2411 
2412 /// Check if the mask is a DE-interleave mask of the given factor
2413 /// \p Factor like:
2414 ///     <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2415 bool ShuffleVectorInst::isDeInterleaveMaskOfFactor(ArrayRef<int> Mask,
2416                                                    unsigned Factor,
2417                                                    unsigned &Index) {
2418   // Check all potential start indices from 0 to (Factor - 1).
2419   for (unsigned Idx = 0; Idx < Factor; Idx++) {
2420     unsigned I = 0;
2421 
2422     // Check that elements are in ascending order by Factor. Ignore undef
2423     // elements.
2424     for (; I < Mask.size(); I++)
2425       if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
2426         break;
2427 
2428     if (I == Mask.size()) {
2429       Index = Idx;
2430       return true;
2431     }
2432   }
2433 
2434   return false;
2435 }
2436 
2437 /// Try to lower a vector shuffle as a bit rotation.
2438 ///
2439 /// Look for a repeated rotation pattern in each sub group.
2440 /// Returns an element-wise left bit rotation amount or -1 if failed.
2441 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
2442   int NumElts = Mask.size();
2443   assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
2444 
2445   int RotateAmt = -1;
2446   for (int i = 0; i != NumElts; i += NumSubElts) {
2447     for (int j = 0; j != NumSubElts; ++j) {
2448       int M = Mask[i + j];
2449       if (M < 0)
2450         continue;
2451       if (M < i || M >= i + NumSubElts)
2452         return -1;
2453       int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2454       if (0 <= RotateAmt && Offset != RotateAmt)
2455         return -1;
2456       RotateAmt = Offset;
2457     }
2458   }
2459   return RotateAmt;
2460 }
2461 
2462 bool ShuffleVectorInst::isBitRotateMask(
2463     ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
2464     unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
2465   for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2466     int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
2467     if (EltRotateAmt < 0)
2468       continue;
2469     RotateAmt = EltRotateAmt * EltSizeInBits;
2470     return true;
2471   }
2472 
2473   return false;
2474 }
2475 
2476 //===----------------------------------------------------------------------===//
2477 //                             InsertValueInst Class
2478 //===----------------------------------------------------------------------===//
2479 
2480 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481                            const Twine &Name) {
2482   assert(getNumOperands() == 2 && "NumOperands not initialized?");
2483 
2484   // There's no fundamental reason why we require at least one index
2485   // (other than weirdness with &*IdxBegin being invalid; see
2486   // getelementptr's init routine for example). But there's no
2487   // present need to support it.
2488   assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2489 
2490   assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2491          Val->getType() && "Inserted value must match indexed type!");
2492   Op<0>() = Agg;
2493   Op<1>() = Val;
2494 
2495   Indices.append(Idxs.begin(), Idxs.end());
2496   setName(Name);
2497 }
2498 
2499 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2500     : Instruction(IVI.getType(), InsertValue, AllocMarker),
2501       Indices(IVI.Indices) {
2502   Op<0>() = IVI.getOperand(0);
2503   Op<1>() = IVI.getOperand(1);
2504   SubclassOptionalData = IVI.SubclassOptionalData;
2505 }
2506 
2507 //===----------------------------------------------------------------------===//
2508 //                             ExtractValueInst Class
2509 //===----------------------------------------------------------------------===//
2510 
2511 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2512   assert(getNumOperands() == 1 && "NumOperands not initialized?");
2513 
2514   // There's no fundamental reason why we require at least one index.
2515   // But there's no present need to support it.
2516   assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2517 
2518   Indices.append(Idxs.begin(), Idxs.end());
2519   setName(Name);
2520 }
2521 
2522 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2523     : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0),
2524                        (BasicBlock *)nullptr),
2525       Indices(EVI.Indices) {
2526   SubclassOptionalData = EVI.SubclassOptionalData;
2527 }
2528 
2529 // getIndexedType - Returns the type of the element that would be extracted
2530 // with an extractvalue instruction with the specified parameters.
2531 //
2532 // A null type is returned if the indices are invalid for the specified
2533 // pointer type.
2534 //
2535 Type *ExtractValueInst::getIndexedType(Type *Agg,
2536                                        ArrayRef<unsigned> Idxs) {
2537   for (unsigned Index : Idxs) {
2538     // We can't use CompositeType::indexValid(Index) here.
2539     // indexValid() always returns true for arrays because getelementptr allows
2540     // out-of-bounds indices. Since we don't allow those for extractvalue and
2541     // insertvalue we need to check array indexing manually.
2542     // Since the only other types we can index into are struct types it's just
2543     // as easy to check those manually as well.
2544     if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2545       if (Index >= AT->getNumElements())
2546         return nullptr;
2547       Agg = AT->getElementType();
2548     } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2549       if (Index >= ST->getNumElements())
2550         return nullptr;
2551       Agg = ST->getElementType(Index);
2552     } else {
2553       // Not a valid type to index into.
2554       return nullptr;
2555     }
2556   }
2557   return const_cast<Type*>(Agg);
2558 }
2559 
2560 //===----------------------------------------------------------------------===//
2561 //                             UnaryOperator Class
2562 //===----------------------------------------------------------------------===//
2563 
2564 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
2565                              const Twine &Name, InsertPosition InsertBefore)
2566     : UnaryInstruction(Ty, iType, S, InsertBefore) {
2567   Op<0>() = S;
2568   setName(Name);
2569   AssertOK();
2570 }
2571 
2572 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S, const Twine &Name,
2573                                      InsertPosition InsertBefore) {
2574   return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2575 }
2576 
2577 void UnaryOperator::AssertOK() {
2578   Value *LHS = getOperand(0);
2579   (void)LHS; // Silence warnings.
2580 #ifndef NDEBUG
2581   switch (getOpcode()) {
2582   case FNeg:
2583     assert(getType() == LHS->getType() &&
2584            "Unary operation should return same type as operand!");
2585     assert(getType()->isFPOrFPVectorTy() &&
2586            "Tried to create a floating-point operation on a "
2587            "non-floating-point type!");
2588     break;
2589   default: llvm_unreachable("Invalid opcode provided");
2590   }
2591 #endif
2592 }
2593 
2594 //===----------------------------------------------------------------------===//
2595 //                             BinaryOperator Class
2596 //===----------------------------------------------------------------------===//
2597 
2598 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
2599                                const Twine &Name, InsertPosition InsertBefore)
2600     : Instruction(Ty, iType, AllocMarker, InsertBefore) {
2601   Op<0>() = S1;
2602   Op<1>() = S2;
2603   setName(Name);
2604   AssertOK();
2605 }
2606 
2607 void BinaryOperator::AssertOK() {
2608   Value *LHS = getOperand(0), *RHS = getOperand(1);
2609   (void)LHS; (void)RHS; // Silence warnings.
2610   assert(LHS->getType() == RHS->getType() &&
2611          "Binary operator operand types must match!");
2612 #ifndef NDEBUG
2613   switch (getOpcode()) {
2614   case Add: case Sub:
2615   case Mul:
2616     assert(getType() == LHS->getType() &&
2617            "Arithmetic operation should return same type as operands!");
2618     assert(getType()->isIntOrIntVectorTy() &&
2619            "Tried to create an integer operation on a non-integer type!");
2620     break;
2621   case FAdd: case FSub:
2622   case FMul:
2623     assert(getType() == LHS->getType() &&
2624            "Arithmetic operation should return same type as operands!");
2625     assert(getType()->isFPOrFPVectorTy() &&
2626            "Tried to create a floating-point operation on a "
2627            "non-floating-point type!");
2628     break;
2629   case UDiv:
2630   case SDiv:
2631     assert(getType() == LHS->getType() &&
2632            "Arithmetic operation should return same type as operands!");
2633     assert(getType()->isIntOrIntVectorTy() &&
2634            "Incorrect operand type (not integer) for S/UDIV");
2635     break;
2636   case FDiv:
2637     assert(getType() == LHS->getType() &&
2638            "Arithmetic operation should return same type as operands!");
2639     assert(getType()->isFPOrFPVectorTy() &&
2640            "Incorrect operand type (not floating point) for FDIV");
2641     break;
2642   case URem:
2643   case SRem:
2644     assert(getType() == LHS->getType() &&
2645            "Arithmetic operation should return same type as operands!");
2646     assert(getType()->isIntOrIntVectorTy() &&
2647            "Incorrect operand type (not integer) for S/UREM");
2648     break;
2649   case FRem:
2650     assert(getType() == LHS->getType() &&
2651            "Arithmetic operation should return same type as operands!");
2652     assert(getType()->isFPOrFPVectorTy() &&
2653            "Incorrect operand type (not floating point) for FREM");
2654     break;
2655   case Shl:
2656   case LShr:
2657   case AShr:
2658     assert(getType() == LHS->getType() &&
2659            "Shift operation should return same type as operands!");
2660     assert(getType()->isIntOrIntVectorTy() &&
2661            "Tried to create a shift operation on a non-integral type!");
2662     break;
2663   case And: case Or:
2664   case Xor:
2665     assert(getType() == LHS->getType() &&
2666            "Logical operation should return same type as operands!");
2667     assert(getType()->isIntOrIntVectorTy() &&
2668            "Tried to create a logical operation on a non-integral type!");
2669     break;
2670   default: llvm_unreachable("Invalid opcode provided");
2671   }
2672 #endif
2673 }
2674 
2675 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2676                                        const Twine &Name,
2677                                        InsertPosition InsertBefore) {
2678   assert(S1->getType() == S2->getType() &&
2679          "Cannot create binary operator with two operands of differing type!");
2680   return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2681 }
2682 
2683 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2684                                           InsertPosition InsertBefore) {
2685   Value *Zero = ConstantInt::get(Op->getType(), 0);
2686   return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
2687                             InsertBefore);
2688 }
2689 
2690 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2691                                              InsertPosition InsertBefore) {
2692   Value *Zero = ConstantInt::get(Op->getType(), 0);
2693   return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
2694 }
2695 
2696 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2697                                           InsertPosition InsertBefore) {
2698   Constant *C = Constant::getAllOnesValue(Op->getType());
2699   return new BinaryOperator(Instruction::Xor, Op, C,
2700                             Op->getType(), Name, InsertBefore);
2701 }
2702 
2703 // Exchange the two operands to this instruction. This instruction is safe to
2704 // use on any binary instruction and does not modify the semantics of the
2705 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2706 // is changed.
2707 bool BinaryOperator::swapOperands() {
2708   if (!isCommutative())
2709     return true; // Can't commute operands
2710   Op<0>().swap(Op<1>());
2711   return false;
2712 }
2713 
2714 //===----------------------------------------------------------------------===//
2715 //                             FPMathOperator Class
2716 //===----------------------------------------------------------------------===//
2717 
2718 float FPMathOperator::getFPAccuracy() const {
2719   const MDNode *MD =
2720       cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2721   if (!MD)
2722     return 0.0;
2723   ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2724   return Accuracy->getValueAPF().convertToFloat();
2725 }
2726 
2727 //===----------------------------------------------------------------------===//
2728 //                                CastInst Class
2729 //===----------------------------------------------------------------------===//
2730 
2731 // Just determine if this cast only deals with integral->integral conversion.
2732 bool CastInst::isIntegerCast() const {
2733   switch (getOpcode()) {
2734     default: return false;
2735     case Instruction::ZExt:
2736     case Instruction::SExt:
2737     case Instruction::Trunc:
2738       return true;
2739     case Instruction::BitCast:
2740       return getOperand(0)->getType()->isIntegerTy() &&
2741         getType()->isIntegerTy();
2742   }
2743 }
2744 
2745 /// This function determines if the CastInst does not require any bits to be
2746 /// changed in order to effect the cast. Essentially, it identifies cases where
2747 /// no code gen is necessary for the cast, hence the name no-op cast.  For
2748 /// example, the following are all no-op casts:
2749 /// # bitcast i32* %x to i8*
2750 /// # bitcast <2 x i32> %x to <4 x i16>
2751 /// # ptrtoint i32* %x to i32     ; on 32-bit plaforms only
2752 /// Determine if the described cast is a no-op.
2753 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2754                           Type *SrcTy,
2755                           Type *DestTy,
2756                           const DataLayout &DL) {
2757   assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
2758   switch (Opcode) {
2759     default: llvm_unreachable("Invalid CastOp");
2760     case Instruction::Trunc:
2761     case Instruction::ZExt:
2762     case Instruction::SExt:
2763     case Instruction::FPTrunc:
2764     case Instruction::FPExt:
2765     case Instruction::UIToFP:
2766     case Instruction::SIToFP:
2767     case Instruction::FPToUI:
2768     case Instruction::FPToSI:
2769     case Instruction::AddrSpaceCast:
2770       // TODO: Target informations may give a more accurate answer here.
2771       return false;
2772     case Instruction::BitCast:
2773       return true;  // BitCast never modifies bits.
2774     case Instruction::PtrToInt:
2775       return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2776              DestTy->getScalarSizeInBits();
2777     case Instruction::IntToPtr:
2778       return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2779              SrcTy->getScalarSizeInBits();
2780   }
2781 }
2782 
2783 bool CastInst::isNoopCast(const DataLayout &DL) const {
2784   return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2785 }
2786 
2787 /// This function determines if a pair of casts can be eliminated and what
2788 /// opcode should be used in the elimination. This assumes that there are two
2789 /// instructions like this:
2790 /// *  %F = firstOpcode SrcTy %x to MidTy
2791 /// *  %S = secondOpcode MidTy %F to DstTy
2792 /// The function returns a resultOpcode so these two casts can be replaced with:
2793 /// *  %Replacement = resultOpcode %SrcTy %x to DstTy
2794 /// If no such cast is permitted, the function returns 0.
2795 unsigned CastInst::isEliminableCastPair(
2796   Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2797   Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2798   Type *DstIntPtrTy) {
2799   // Define the 144 possibilities for these two cast instructions. The values
2800   // in this matrix determine what to do in a given situation and select the
2801   // case in the switch below.  The rows correspond to firstOp, the columns
2802   // correspond to secondOp.  In looking at the table below, keep in mind
2803   // the following cast properties:
2804   //
2805   //          Size Compare       Source               Destination
2806   // Operator  Src ? Size   Type       Sign         Type       Sign
2807   // -------- ------------ -------------------   ---------------------
2808   // TRUNC         >       Integer      Any        Integral     Any
2809   // ZEXT          <       Integral   Unsigned     Integer      Any
2810   // SEXT          <       Integral    Signed      Integer      Any
2811   // FPTOUI       n/a      FloatPt      n/a        Integral   Unsigned
2812   // FPTOSI       n/a      FloatPt      n/a        Integral    Signed
2813   // UITOFP       n/a      Integral   Unsigned     FloatPt      n/a
2814   // SITOFP       n/a      Integral    Signed      FloatPt      n/a
2815   // FPTRUNC       >       FloatPt      n/a        FloatPt      n/a
2816   // FPEXT         <       FloatPt      n/a        FloatPt      n/a
2817   // PTRTOINT     n/a      Pointer      n/a        Integral   Unsigned
2818   // INTTOPTR     n/a      Integral   Unsigned     Pointer      n/a
2819   // BITCAST       =       FirstClass   n/a       FirstClass    n/a
2820   // ADDRSPCST    n/a      Pointer      n/a        Pointer      n/a
2821   //
2822   // NOTE: some transforms are safe, but we consider them to be non-profitable.
2823   // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2824   // into "fptoui double to i64", but this loses information about the range
2825   // of the produced value (we no longer know the top-part is all zeros).
2826   // Further this conversion is often much more expensive for typical hardware,
2827   // and causes issues when building libgcc.  We disallow fptosi+sext for the
2828   // same reason.
2829   const unsigned numCastOps =
2830     Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2831   static const uint8_t CastResults[numCastOps][numCastOps] = {
2832     // T        F  F  U  S  F  F  P  I  B  A  -+
2833     // R  Z  S  P  P  I  I  T  P  2  N  T  S   |
2834     // U  E  E  2  2  2  2  R  E  I  T  C  C   +- secondOp
2835     // N  X  X  U  S  F  F  N  X  N  2  V  V   |
2836     // C  T  T  I  I  P  P  C  T  T  P  T  T  -+
2837     {  1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc         -+
2838     {  8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt           |
2839     {  8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt           |
2840     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI         |
2841     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI         |
2842     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP         +- firstOp
2843     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP         |
2844     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc        |
2845     { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt          |
2846     {  1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt       |
2847     { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr       |
2848     {  5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast        |
2849     {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2850   };
2851 
2852   // TODO: This logic could be encoded into the table above and handled in the
2853   // switch below.
2854   // If either of the casts are a bitcast from scalar to vector, disallow the
2855   // merging. However, any pair of bitcasts are allowed.
2856   bool IsFirstBitcast  = (firstOp == Instruction::BitCast);
2857   bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2858   bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2859 
2860   // Check if any of the casts convert scalars <-> vectors.
2861   if ((IsFirstBitcast  && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2862       (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2863     if (!AreBothBitcasts)
2864       return 0;
2865 
2866   int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2867                             [secondOp-Instruction::CastOpsBegin];
2868   switch (ElimCase) {
2869     case 0:
2870       // Categorically disallowed.
2871       return 0;
2872     case 1:
2873       // Allowed, use first cast's opcode.
2874       return firstOp;
2875     case 2:
2876       // Allowed, use second cast's opcode.
2877       return secondOp;
2878     case 3:
2879       // No-op cast in second op implies firstOp as long as the DestTy
2880       // is integer and we are not converting between a vector and a
2881       // non-vector type.
2882       if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2883         return firstOp;
2884       return 0;
2885     case 4:
2886       // No-op cast in second op implies firstOp as long as the DestTy
2887       // matches MidTy.
2888       if (DstTy == MidTy)
2889         return firstOp;
2890       return 0;
2891     case 5:
2892       // No-op cast in first op implies secondOp as long as the SrcTy
2893       // is an integer.
2894       if (SrcTy->isIntegerTy())
2895         return secondOp;
2896       return 0;
2897     case 7: {
2898       // Disable inttoptr/ptrtoint optimization if enabled.
2899       if (DisableI2pP2iOpt)
2900         return 0;
2901 
2902       // Cannot simplify if address spaces are different!
2903       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2904         return 0;
2905 
2906       unsigned MidSize = MidTy->getScalarSizeInBits();
2907       // We can still fold this without knowing the actual sizes as long we
2908       // know that the intermediate pointer is the largest possible
2909       // pointer size.
2910       // FIXME: Is this always true?
2911       if (MidSize == 64)
2912         return Instruction::BitCast;
2913 
2914       // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2915       if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2916         return 0;
2917       unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2918       if (MidSize >= PtrSize)
2919         return Instruction::BitCast;
2920       return 0;
2921     }
2922     case 8: {
2923       // ext, trunc -> bitcast,    if the SrcTy and DstTy are the same
2924       // ext, trunc -> ext,        if sizeof(SrcTy) < sizeof(DstTy)
2925       // ext, trunc -> trunc,      if sizeof(SrcTy) > sizeof(DstTy)
2926       unsigned SrcSize = SrcTy->getScalarSizeInBits();
2927       unsigned DstSize = DstTy->getScalarSizeInBits();
2928       if (SrcTy == DstTy)
2929         return Instruction::BitCast;
2930       if (SrcSize < DstSize)
2931         return firstOp;
2932       if (SrcSize > DstSize)
2933         return secondOp;
2934       return 0;
2935     }
2936     case 9:
2937       // zext, sext -> zext, because sext can't sign extend after zext
2938       return Instruction::ZExt;
2939     case 11: {
2940       // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2941       if (!MidIntPtrTy)
2942         return 0;
2943       unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2944       unsigned SrcSize = SrcTy->getScalarSizeInBits();
2945       unsigned DstSize = DstTy->getScalarSizeInBits();
2946       if (SrcSize <= PtrSize && SrcSize == DstSize)
2947         return Instruction::BitCast;
2948       return 0;
2949     }
2950     case 12:
2951       // addrspacecast, addrspacecast -> bitcast,       if SrcAS == DstAS
2952       // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2953       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2954         return Instruction::AddrSpaceCast;
2955       return Instruction::BitCast;
2956     case 13:
2957       // FIXME: this state can be merged with (1), but the following assert
2958       // is useful to check the correcteness of the sequence due to semantic
2959       // change of bitcast.
2960       assert(
2961         SrcTy->isPtrOrPtrVectorTy() &&
2962         MidTy->isPtrOrPtrVectorTy() &&
2963         DstTy->isPtrOrPtrVectorTy() &&
2964         SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2965         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2966         "Illegal addrspacecast, bitcast sequence!");
2967       // Allowed, use first cast's opcode
2968       return firstOp;
2969     case 14:
2970       // bitcast, addrspacecast -> addrspacecast
2971       return Instruction::AddrSpaceCast;
2972     case 15:
2973       // FIXME: this state can be merged with (1), but the following assert
2974       // is useful to check the correcteness of the sequence due to semantic
2975       // change of bitcast.
2976       assert(
2977         SrcTy->isIntOrIntVectorTy() &&
2978         MidTy->isPtrOrPtrVectorTy() &&
2979         DstTy->isPtrOrPtrVectorTy() &&
2980         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2981         "Illegal inttoptr, bitcast sequence!");
2982       // Allowed, use first cast's opcode
2983       return firstOp;
2984     case 16:
2985       // FIXME: this state can be merged with (2), but the following assert
2986       // is useful to check the correcteness of the sequence due to semantic
2987       // change of bitcast.
2988       assert(
2989         SrcTy->isPtrOrPtrVectorTy() &&
2990         MidTy->isPtrOrPtrVectorTy() &&
2991         DstTy->isIntOrIntVectorTy() &&
2992         SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2993         "Illegal bitcast, ptrtoint sequence!");
2994       // Allowed, use second cast's opcode
2995       return secondOp;
2996     case 17:
2997       // (sitofp (zext x)) -> (uitofp x)
2998       return Instruction::UIToFP;
2999     case 99:
3000       // Cast combination can't happen (error in input). This is for all cases
3001       // where the MidTy is not the same for the two cast instructions.
3002       llvm_unreachable("Invalid Cast Combination");
3003     default:
3004       llvm_unreachable("Error in CastResults table!!!");
3005   }
3006 }
3007 
3008 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3009                            const Twine &Name, InsertPosition InsertBefore) {
3010   assert(castIsValid(op, S, Ty) && "Invalid cast!");
3011   // Construct and return the appropriate CastInst subclass
3012   switch (op) {
3013   case Trunc:         return new TruncInst         (S, Ty, Name, InsertBefore);
3014   case ZExt:          return new ZExtInst          (S, Ty, Name, InsertBefore);
3015   case SExt:          return new SExtInst          (S, Ty, Name, InsertBefore);
3016   case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertBefore);
3017   case FPExt:         return new FPExtInst         (S, Ty, Name, InsertBefore);
3018   case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertBefore);
3019   case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertBefore);
3020   case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertBefore);
3021   case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertBefore);
3022   case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertBefore);
3023   case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertBefore);
3024   case BitCast:
3025     return new BitCastInst(S, Ty, Name, InsertBefore);
3026   case AddrSpaceCast:
3027     return new AddrSpaceCastInst(S, Ty, Name, InsertBefore);
3028   default:
3029     llvm_unreachable("Invalid opcode provided");
3030   }
3031 }
3032 
3033 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3034                                         InsertPosition InsertBefore) {
3035   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3036     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3037   return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3038 }
3039 
3040 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name,
3041                                         InsertPosition InsertBefore) {
3042   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3043     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3044   return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3045 }
3046 
3047 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name,
3048                                          InsertPosition InsertBefore) {
3049   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3050     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3051   return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3052 }
3053 
3054 /// Create a BitCast or a PtrToInt cast instruction
3055 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty, const Twine &Name,
3056                                       InsertPosition InsertBefore) {
3057   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3058   assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3059          "Invalid cast");
3060   assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3061   assert((!Ty->isVectorTy() ||
3062           cast<VectorType>(Ty)->getElementCount() ==
3063               cast<VectorType>(S->getType())->getElementCount()) &&
3064          "Invalid cast");
3065 
3066   if (Ty->isIntOrIntVectorTy())
3067     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3068 
3069   return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3070 }
3071 
3072 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3073     Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore) {
3074   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3075   assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3076 
3077   if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3078     return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3079 
3080   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3081 }
3082 
3083 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3084                                            const Twine &Name,
3085                                            InsertPosition InsertBefore) {
3086   if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3087     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3088   if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3089     return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3090 
3091   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3092 }
3093 
3094 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty, bool isSigned,
3095                                       const Twine &Name,
3096                                       InsertPosition InsertBefore) {
3097   assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3098          "Invalid integer cast");
3099   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3100   unsigned DstBits = Ty->getScalarSizeInBits();
3101   Instruction::CastOps opcode =
3102     (SrcBits == DstBits ? Instruction::BitCast :
3103      (SrcBits > DstBits ? Instruction::Trunc :
3104       (isSigned ? Instruction::SExt : Instruction::ZExt)));
3105   return Create(opcode, C, Ty, Name, InsertBefore);
3106 }
3107 
3108 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty, const Twine &Name,
3109                                  InsertPosition InsertBefore) {
3110   assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3111          "Invalid cast");
3112   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3113   unsigned DstBits = Ty->getScalarSizeInBits();
3114   assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
3115   Instruction::CastOps opcode =
3116     (SrcBits == DstBits ? Instruction::BitCast :
3117      (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3118   return Create(opcode, C, Ty, Name, InsertBefore);
3119 }
3120 
3121 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3122   if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3123     return false;
3124 
3125   if (SrcTy == DestTy)
3126     return true;
3127 
3128   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3129     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3130       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3131         // An element by element cast. Valid if casting the elements is valid.
3132         SrcTy = SrcVecTy->getElementType();
3133         DestTy = DestVecTy->getElementType();
3134       }
3135     }
3136   }
3137 
3138   if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3139     if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3140       return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3141     }
3142   }
3143 
3144   TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3145   TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3146 
3147   // Could still have vectors of pointers if the number of elements doesn't
3148   // match
3149   if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3150     return false;
3151 
3152   if (SrcBits != DestBits)
3153     return false;
3154 
3155   return true;
3156 }
3157 
3158 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3159                                           const DataLayout &DL) {
3160   // ptrtoint and inttoptr are not allowed on non-integral pointers
3161   if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3162     if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3163       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3164               !DL.isNonIntegralPointerType(PtrTy));
3165   if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3166     if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3167       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3168               !DL.isNonIntegralPointerType(PtrTy));
3169 
3170   return isBitCastable(SrcTy, DestTy);
3171 }
3172 
3173 // Provide a way to get a "cast" where the cast opcode is inferred from the
3174 // types and size of the operand. This, basically, is a parallel of the
3175 // logic in the castIsValid function below.  This axiom should hold:
3176 //   castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3177 // should not assert in castIsValid. In other words, this produces a "correct"
3178 // casting opcode for the arguments passed to it.
3179 Instruction::CastOps
3180 CastInst::getCastOpcode(
3181   const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3182   Type *SrcTy = Src->getType();
3183 
3184   assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3185          "Only first class types are castable!");
3186 
3187   if (SrcTy == DestTy)
3188     return BitCast;
3189 
3190   // FIXME: Check address space sizes here
3191   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3192     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3193       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3194         // An element by element cast.  Find the appropriate opcode based on the
3195         // element types.
3196         SrcTy = SrcVecTy->getElementType();
3197         DestTy = DestVecTy->getElementType();
3198       }
3199 
3200   // Get the bit sizes, we'll need these
3201   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3202   unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3203 
3204   // Run through the possibilities ...
3205   if (DestTy->isIntegerTy()) {                      // Casting to integral
3206     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3207       if (DestBits < SrcBits)
3208         return Trunc;                               // int -> smaller int
3209       else if (DestBits > SrcBits) {                // its an extension
3210         if (SrcIsSigned)
3211           return SExt;                              // signed -> SEXT
3212         else
3213           return ZExt;                              // unsigned -> ZEXT
3214       } else {
3215         return BitCast;                             // Same size, No-op cast
3216       }
3217     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3218       if (DestIsSigned)
3219         return FPToSI;                              // FP -> sint
3220       else
3221         return FPToUI;                              // FP -> uint
3222     } else if (SrcTy->isVectorTy()) {
3223       assert(DestBits == SrcBits &&
3224              "Casting vector to integer of different width");
3225       return BitCast;                             // Same size, no-op cast
3226     } else {
3227       assert(SrcTy->isPointerTy() &&
3228              "Casting from a value that is not first-class type");
3229       return PtrToInt;                              // ptr -> int
3230     }
3231   } else if (DestTy->isFloatingPointTy()) {         // Casting to floating pt
3232     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3233       if (SrcIsSigned)
3234         return SIToFP;                              // sint -> FP
3235       else
3236         return UIToFP;                              // uint -> FP
3237     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3238       if (DestBits < SrcBits) {
3239         return FPTrunc;                             // FP -> smaller FP
3240       } else if (DestBits > SrcBits) {
3241         return FPExt;                               // FP -> larger FP
3242       } else  {
3243         return BitCast;                             // same size, no-op cast
3244       }
3245     } else if (SrcTy->isVectorTy()) {
3246       assert(DestBits == SrcBits &&
3247              "Casting vector to floating point of different width");
3248       return BitCast;                             // same size, no-op cast
3249     }
3250     llvm_unreachable("Casting pointer or non-first class to float");
3251   } else if (DestTy->isVectorTy()) {
3252     assert(DestBits == SrcBits &&
3253            "Illegal cast to vector (wrong type or size)");
3254     return BitCast;
3255   } else if (DestTy->isPointerTy()) {
3256     if (SrcTy->isPointerTy()) {
3257       if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3258         return AddrSpaceCast;
3259       return BitCast;                               // ptr -> ptr
3260     } else if (SrcTy->isIntegerTy()) {
3261       return IntToPtr;                              // int -> ptr
3262     }
3263     llvm_unreachable("Casting pointer to other than pointer or int");
3264   }
3265   llvm_unreachable("Casting to type that is not first-class");
3266 }
3267 
3268 //===----------------------------------------------------------------------===//
3269 //                    CastInst SubClass Constructors
3270 //===----------------------------------------------------------------------===//
3271 
3272 /// Check that the construction parameters for a CastInst are correct. This
3273 /// could be broken out into the separate constructors but it is useful to have
3274 /// it in one place and to eliminate the redundant code for getting the sizes
3275 /// of the types involved.
3276 bool
3277 CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3278   if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3279       SrcTy->isAggregateType() || DstTy->isAggregateType())
3280     return false;
3281 
3282   // Get the size of the types in bits, and whether we are dealing
3283   // with vector types, we'll need this later.
3284   bool SrcIsVec = isa<VectorType>(SrcTy);
3285   bool DstIsVec = isa<VectorType>(DstTy);
3286   unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3287   unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3288 
3289   // If these are vector types, get the lengths of the vectors (using zero for
3290   // scalar types means that checking that vector lengths match also checks that
3291   // scalars are not being converted to vectors or vectors to scalars).
3292   ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3293                                 : ElementCount::getFixed(0);
3294   ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3295                                 : ElementCount::getFixed(0);
3296 
3297   // Switch on the opcode provided
3298   switch (op) {
3299   default: return false; // This is an input error
3300   case Instruction::Trunc:
3301     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3302            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3303   case Instruction::ZExt:
3304     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3305            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3306   case Instruction::SExt:
3307     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3308            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3309   case Instruction::FPTrunc:
3310     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3311            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3312   case Instruction::FPExt:
3313     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3314            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3315   case Instruction::UIToFP:
3316   case Instruction::SIToFP:
3317     return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3318            SrcEC == DstEC;
3319   case Instruction::FPToUI:
3320   case Instruction::FPToSI:
3321     return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3322            SrcEC == DstEC;
3323   case Instruction::PtrToInt:
3324     if (SrcEC != DstEC)
3325       return false;
3326     return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3327   case Instruction::IntToPtr:
3328     if (SrcEC != DstEC)
3329       return false;
3330     return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3331   case Instruction::BitCast: {
3332     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3333     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3334 
3335     // BitCast implies a no-op cast of type only. No bits change.
3336     // However, you can't cast pointers to anything but pointers.
3337     if (!SrcPtrTy != !DstPtrTy)
3338       return false;
3339 
3340     // For non-pointer cases, the cast is okay if the source and destination bit
3341     // widths are identical.
3342     if (!SrcPtrTy)
3343       return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3344 
3345     // If both are pointers then the address spaces must match.
3346     if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3347       return false;
3348 
3349     // A vector of pointers must have the same number of elements.
3350     if (SrcIsVec && DstIsVec)
3351       return SrcEC == DstEC;
3352     if (SrcIsVec)
3353       return SrcEC == ElementCount::getFixed(1);
3354     if (DstIsVec)
3355       return DstEC == ElementCount::getFixed(1);
3356 
3357     return true;
3358   }
3359   case Instruction::AddrSpaceCast: {
3360     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3361     if (!SrcPtrTy)
3362       return false;
3363 
3364     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3365     if (!DstPtrTy)
3366       return false;
3367 
3368     if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3369       return false;
3370 
3371     return SrcEC == DstEC;
3372   }
3373   }
3374 }
3375 
3376 TruncInst::TruncInst(Value *S, Type *Ty, const Twine &Name,
3377                      InsertPosition InsertBefore)
3378     : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3379   assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3380 }
3381 
3382 ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name,
3383                    InsertPosition InsertBefore)
3384     : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3385   assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3386 }
3387 
3388 SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name,
3389                    InsertPosition InsertBefore)
3390     : CastInst(Ty, SExt, S, Name, InsertBefore) {
3391   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3392 }
3393 
3394 FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name,
3395                          InsertPosition InsertBefore)
3396     : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3397   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3398 }
3399 
3400 FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name,
3401                      InsertPosition InsertBefore)
3402     : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3403   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3404 }
3405 
3406 UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name,
3407                        InsertPosition InsertBefore)
3408     : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3409   assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3410 }
3411 
3412 SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name,
3413                        InsertPosition InsertBefore)
3414     : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3415   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3416 }
3417 
3418 FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name,
3419                        InsertPosition InsertBefore)
3420     : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3421   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3422 }
3423 
3424 FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name,
3425                        InsertPosition InsertBefore)
3426     : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3427   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3428 }
3429 
3430 PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name,
3431                            InsertPosition InsertBefore)
3432     : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3433   assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3434 }
3435 
3436 IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name,
3437                            InsertPosition InsertBefore)
3438     : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3439   assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3440 }
3441 
3442 BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name,
3443                          InsertPosition InsertBefore)
3444     : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3445   assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3446 }
3447 
3448 AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name,
3449                                      InsertPosition InsertBefore)
3450     : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3451   assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3452 }
3453 
3454 //===----------------------------------------------------------------------===//
3455 //                               CmpInst Classes
3456 //===----------------------------------------------------------------------===//
3457 
3458 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3459                  Value *RHS, const Twine &Name, InsertPosition InsertBefore,
3460                  Instruction *FlagsSource)
3461     : Instruction(ty, op, AllocMarker, InsertBefore) {
3462   Op<0>() = LHS;
3463   Op<1>() = RHS;
3464   setPredicate((Predicate)predicate);
3465   setName(Name);
3466   if (FlagsSource)
3467     copyIRFlags(FlagsSource);
3468 }
3469 
3470 CmpInst *CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3471                          const Twine &Name, InsertPosition InsertBefore) {
3472   if (Op == Instruction::ICmp) {
3473     if (InsertBefore.isValid())
3474       return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3475                           S1, S2, Name);
3476     else
3477       return new ICmpInst(CmpInst::Predicate(predicate),
3478                           S1, S2, Name);
3479   }
3480 
3481   if (InsertBefore.isValid())
3482     return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3483                         S1, S2, Name);
3484   else
3485     return new FCmpInst(CmpInst::Predicate(predicate),
3486                         S1, S2, Name);
3487 }
3488 
3489 CmpInst *CmpInst::CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1,
3490                                         Value *S2,
3491                                         const Instruction *FlagsSource,
3492                                         const Twine &Name,
3493                                         InsertPosition InsertBefore) {
3494   CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
3495   Inst->copyIRFlags(FlagsSource);
3496   return Inst;
3497 }
3498 
3499 void CmpInst::swapOperands() {
3500   if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3501     IC->swapOperands();
3502   else
3503     cast<FCmpInst>(this)->swapOperands();
3504 }
3505 
3506 bool CmpInst::isCommutative() const {
3507   if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3508     return IC->isCommutative();
3509   return cast<FCmpInst>(this)->isCommutative();
3510 }
3511 
3512 bool CmpInst::isEquality(Predicate P) {
3513   if (ICmpInst::isIntPredicate(P))
3514     return ICmpInst::isEquality(P);
3515   if (FCmpInst::isFPPredicate(P))
3516     return FCmpInst::isEquality(P);
3517   llvm_unreachable("Unsupported predicate kind");
3518 }
3519 
3520 // Returns true if either operand of CmpInst is a provably non-zero
3521 // floating-point constant.
3522 static bool hasNonZeroFPOperands(const CmpInst *Cmp) {
3523   auto *LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3524   auto *RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3525   if (auto *Const = LHS ? LHS : RHS) {
3526     using namespace llvm::PatternMatch;
3527     return match(Const, m_NonZeroNotDenormalFP());
3528   }
3529   return false;
3530 }
3531 
3532 // Floating-point equality is not an equivalence when comparing +0.0 with
3533 // -0.0, when comparing NaN with another value, or when flushing
3534 // denormals-to-zero.
3535 bool CmpInst::isEquivalence(bool Invert) const {
3536   switch (Invert ? getInversePredicate() : getPredicate()) {
3537   case CmpInst::Predicate::ICMP_EQ:
3538     return true;
3539   case CmpInst::Predicate::FCMP_UEQ:
3540     if (!hasNoNaNs())
3541       return false;
3542     [[fallthrough]];
3543   case CmpInst::Predicate::FCMP_OEQ:
3544     return hasNonZeroFPOperands(this);
3545   default:
3546     return false;
3547   }
3548 }
3549 
3550 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3551   switch (pred) {
3552     default: llvm_unreachable("Unknown cmp predicate!");
3553     case ICMP_EQ: return ICMP_NE;
3554     case ICMP_NE: return ICMP_EQ;
3555     case ICMP_UGT: return ICMP_ULE;
3556     case ICMP_ULT: return ICMP_UGE;
3557     case ICMP_UGE: return ICMP_ULT;
3558     case ICMP_ULE: return ICMP_UGT;
3559     case ICMP_SGT: return ICMP_SLE;
3560     case ICMP_SLT: return ICMP_SGE;
3561     case ICMP_SGE: return ICMP_SLT;
3562     case ICMP_SLE: return ICMP_SGT;
3563 
3564     case FCMP_OEQ: return FCMP_UNE;
3565     case FCMP_ONE: return FCMP_UEQ;
3566     case FCMP_OGT: return FCMP_ULE;
3567     case FCMP_OLT: return FCMP_UGE;
3568     case FCMP_OGE: return FCMP_ULT;
3569     case FCMP_OLE: return FCMP_UGT;
3570     case FCMP_UEQ: return FCMP_ONE;
3571     case FCMP_UNE: return FCMP_OEQ;
3572     case FCMP_UGT: return FCMP_OLE;
3573     case FCMP_ULT: return FCMP_OGE;
3574     case FCMP_UGE: return FCMP_OLT;
3575     case FCMP_ULE: return FCMP_OGT;
3576     case FCMP_ORD: return FCMP_UNO;
3577     case FCMP_UNO: return FCMP_ORD;
3578     case FCMP_TRUE: return FCMP_FALSE;
3579     case FCMP_FALSE: return FCMP_TRUE;
3580   }
3581 }
3582 
3583 StringRef CmpInst::getPredicateName(Predicate Pred) {
3584   switch (Pred) {
3585   default:                   return "unknown";
3586   case FCmpInst::FCMP_FALSE: return "false";
3587   case FCmpInst::FCMP_OEQ:   return "oeq";
3588   case FCmpInst::FCMP_OGT:   return "ogt";
3589   case FCmpInst::FCMP_OGE:   return "oge";
3590   case FCmpInst::FCMP_OLT:   return "olt";
3591   case FCmpInst::FCMP_OLE:   return "ole";
3592   case FCmpInst::FCMP_ONE:   return "one";
3593   case FCmpInst::FCMP_ORD:   return "ord";
3594   case FCmpInst::FCMP_UNO:   return "uno";
3595   case FCmpInst::FCMP_UEQ:   return "ueq";
3596   case FCmpInst::FCMP_UGT:   return "ugt";
3597   case FCmpInst::FCMP_UGE:   return "uge";
3598   case FCmpInst::FCMP_ULT:   return "ult";
3599   case FCmpInst::FCMP_ULE:   return "ule";
3600   case FCmpInst::FCMP_UNE:   return "une";
3601   case FCmpInst::FCMP_TRUE:  return "true";
3602   case ICmpInst::ICMP_EQ:    return "eq";
3603   case ICmpInst::ICMP_NE:    return "ne";
3604   case ICmpInst::ICMP_SGT:   return "sgt";
3605   case ICmpInst::ICMP_SGE:   return "sge";
3606   case ICmpInst::ICMP_SLT:   return "slt";
3607   case ICmpInst::ICMP_SLE:   return "sle";
3608   case ICmpInst::ICMP_UGT:   return "ugt";
3609   case ICmpInst::ICMP_UGE:   return "uge";
3610   case ICmpInst::ICMP_ULT:   return "ult";
3611   case ICmpInst::ICMP_ULE:   return "ule";
3612   }
3613 }
3614 
3615 raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
3616   OS << CmpInst::getPredicateName(Pred);
3617   return OS;
3618 }
3619 
3620 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3621   switch (pred) {
3622     default: llvm_unreachable("Unknown icmp predicate!");
3623     case ICMP_EQ: case ICMP_NE:
3624     case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3625        return pred;
3626     case ICMP_UGT: return ICMP_SGT;
3627     case ICMP_ULT: return ICMP_SLT;
3628     case ICMP_UGE: return ICMP_SGE;
3629     case ICMP_ULE: return ICMP_SLE;
3630   }
3631 }
3632 
3633 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3634   switch (pred) {
3635     default: llvm_unreachable("Unknown icmp predicate!");
3636     case ICMP_EQ: case ICMP_NE:
3637     case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3638        return pred;
3639     case ICMP_SGT: return ICMP_UGT;
3640     case ICMP_SLT: return ICMP_ULT;
3641     case ICMP_SGE: return ICMP_UGE;
3642     case ICMP_SLE: return ICMP_ULE;
3643   }
3644 }
3645 
3646 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3647   switch (pred) {
3648     default: llvm_unreachable("Unknown cmp predicate!");
3649     case ICMP_EQ: case ICMP_NE:
3650       return pred;
3651     case ICMP_SGT: return ICMP_SLT;
3652     case ICMP_SLT: return ICMP_SGT;
3653     case ICMP_SGE: return ICMP_SLE;
3654     case ICMP_SLE: return ICMP_SGE;
3655     case ICMP_UGT: return ICMP_ULT;
3656     case ICMP_ULT: return ICMP_UGT;
3657     case ICMP_UGE: return ICMP_ULE;
3658     case ICMP_ULE: return ICMP_UGE;
3659 
3660     case FCMP_FALSE: case FCMP_TRUE:
3661     case FCMP_OEQ: case FCMP_ONE:
3662     case FCMP_UEQ: case FCMP_UNE:
3663     case FCMP_ORD: case FCMP_UNO:
3664       return pred;
3665     case FCMP_OGT: return FCMP_OLT;
3666     case FCMP_OLT: return FCMP_OGT;
3667     case FCMP_OGE: return FCMP_OLE;
3668     case FCMP_OLE: return FCMP_OGE;
3669     case FCMP_UGT: return FCMP_ULT;
3670     case FCMP_ULT: return FCMP_UGT;
3671     case FCMP_UGE: return FCMP_ULE;
3672     case FCMP_ULE: return FCMP_UGE;
3673   }
3674 }
3675 
3676 bool CmpInst::isNonStrictPredicate(Predicate pred) {
3677   switch (pred) {
3678   case ICMP_SGE:
3679   case ICMP_SLE:
3680   case ICMP_UGE:
3681   case ICMP_ULE:
3682   case FCMP_OGE:
3683   case FCMP_OLE:
3684   case FCMP_UGE:
3685   case FCMP_ULE:
3686     return true;
3687   default:
3688     return false;
3689   }
3690 }
3691 
3692 bool CmpInst::isStrictPredicate(Predicate pred) {
3693   switch (pred) {
3694   case ICMP_SGT:
3695   case ICMP_SLT:
3696   case ICMP_UGT:
3697   case ICMP_ULT:
3698   case FCMP_OGT:
3699   case FCMP_OLT:
3700   case FCMP_UGT:
3701   case FCMP_ULT:
3702     return true;
3703   default:
3704     return false;
3705   }
3706 }
3707 
3708 CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
3709   switch (pred) {
3710   case ICMP_SGE:
3711     return ICMP_SGT;
3712   case ICMP_SLE:
3713     return ICMP_SLT;
3714   case ICMP_UGE:
3715     return ICMP_UGT;
3716   case ICMP_ULE:
3717     return ICMP_ULT;
3718   case FCMP_OGE:
3719     return FCMP_OGT;
3720   case FCMP_OLE:
3721     return FCMP_OLT;
3722   case FCMP_UGE:
3723     return FCMP_UGT;
3724   case FCMP_ULE:
3725     return FCMP_ULT;
3726   default:
3727     return pred;
3728   }
3729 }
3730 
3731 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
3732   switch (pred) {
3733   case ICMP_SGT:
3734     return ICMP_SGE;
3735   case ICMP_SLT:
3736     return ICMP_SLE;
3737   case ICMP_UGT:
3738     return ICMP_UGE;
3739   case ICMP_ULT:
3740     return ICMP_ULE;
3741   case FCMP_OGT:
3742     return FCMP_OGE;
3743   case FCMP_OLT:
3744     return FCMP_OLE;
3745   case FCMP_UGT:
3746     return FCMP_UGE;
3747   case FCMP_ULT:
3748     return FCMP_ULE;
3749   default:
3750     return pred;
3751   }
3752 }
3753 
3754 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
3755   assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
3756 
3757   if (isStrictPredicate(pred))
3758     return getNonStrictPredicate(pred);
3759   if (isNonStrictPredicate(pred))
3760     return getStrictPredicate(pred);
3761 
3762   llvm_unreachable("Unknown predicate!");
3763 }
3764 
3765 bool CmpInst::isUnsigned(Predicate predicate) {
3766   switch (predicate) {
3767     default: return false;
3768     case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3769     case ICmpInst::ICMP_UGE: return true;
3770   }
3771 }
3772 
3773 bool CmpInst::isSigned(Predicate predicate) {
3774   switch (predicate) {
3775     default: return false;
3776     case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3777     case ICmpInst::ICMP_SGE: return true;
3778   }
3779 }
3780 
3781 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
3782                        ICmpInst::Predicate Pred) {
3783   assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
3784   switch (Pred) {
3785   case ICmpInst::Predicate::ICMP_EQ:
3786     return LHS.eq(RHS);
3787   case ICmpInst::Predicate::ICMP_NE:
3788     return LHS.ne(RHS);
3789   case ICmpInst::Predicate::ICMP_UGT:
3790     return LHS.ugt(RHS);
3791   case ICmpInst::Predicate::ICMP_UGE:
3792     return LHS.uge(RHS);
3793   case ICmpInst::Predicate::ICMP_ULT:
3794     return LHS.ult(RHS);
3795   case ICmpInst::Predicate::ICMP_ULE:
3796     return LHS.ule(RHS);
3797   case ICmpInst::Predicate::ICMP_SGT:
3798     return LHS.sgt(RHS);
3799   case ICmpInst::Predicate::ICMP_SGE:
3800     return LHS.sge(RHS);
3801   case ICmpInst::Predicate::ICMP_SLT:
3802     return LHS.slt(RHS);
3803   case ICmpInst::Predicate::ICMP_SLE:
3804     return LHS.sle(RHS);
3805   default:
3806     llvm_unreachable("Unexpected non-integer predicate.");
3807   };
3808 }
3809 
3810 bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
3811                        FCmpInst::Predicate Pred) {
3812   APFloat::cmpResult R = LHS.compare(RHS);
3813   switch (Pred) {
3814   default:
3815     llvm_unreachable("Invalid FCmp Predicate");
3816   case FCmpInst::FCMP_FALSE:
3817     return false;
3818   case FCmpInst::FCMP_TRUE:
3819     return true;
3820   case FCmpInst::FCMP_UNO:
3821     return R == APFloat::cmpUnordered;
3822   case FCmpInst::FCMP_ORD:
3823     return R != APFloat::cmpUnordered;
3824   case FCmpInst::FCMP_UEQ:
3825     return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
3826   case FCmpInst::FCMP_OEQ:
3827     return R == APFloat::cmpEqual;
3828   case FCmpInst::FCMP_UNE:
3829     return R != APFloat::cmpEqual;
3830   case FCmpInst::FCMP_ONE:
3831     return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
3832   case FCmpInst::FCMP_ULT:
3833     return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
3834   case FCmpInst::FCMP_OLT:
3835     return R == APFloat::cmpLessThan;
3836   case FCmpInst::FCMP_UGT:
3837     return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
3838   case FCmpInst::FCMP_OGT:
3839     return R == APFloat::cmpGreaterThan;
3840   case FCmpInst::FCMP_ULE:
3841     return R != APFloat::cmpGreaterThan;
3842   case FCmpInst::FCMP_OLE:
3843     return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
3844   case FCmpInst::FCMP_UGE:
3845     return R != APFloat::cmpLessThan;
3846   case FCmpInst::FCMP_OGE:
3847     return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
3848   }
3849 }
3850 
3851 std::optional<bool> ICmpInst::compare(const KnownBits &LHS,
3852                                       const KnownBits &RHS,
3853                                       ICmpInst::Predicate Pred) {
3854   switch (Pred) {
3855   case ICmpInst::ICMP_EQ:
3856     return KnownBits::eq(LHS, RHS);
3857   case ICmpInst::ICMP_NE:
3858     return KnownBits::ne(LHS, RHS);
3859   case ICmpInst::ICMP_UGE:
3860     return KnownBits::uge(LHS, RHS);
3861   case ICmpInst::ICMP_UGT:
3862     return KnownBits::ugt(LHS, RHS);
3863   case ICmpInst::ICMP_ULE:
3864     return KnownBits::ule(LHS, RHS);
3865   case ICmpInst::ICMP_ULT:
3866     return KnownBits::ult(LHS, RHS);
3867   case ICmpInst::ICMP_SGE:
3868     return KnownBits::sge(LHS, RHS);
3869   case ICmpInst::ICMP_SGT:
3870     return KnownBits::sgt(LHS, RHS);
3871   case ICmpInst::ICMP_SLE:
3872     return KnownBits::sle(LHS, RHS);
3873   case ICmpInst::ICMP_SLT:
3874     return KnownBits::slt(LHS, RHS);
3875   default:
3876     llvm_unreachable("Unexpected non-integer predicate.");
3877   }
3878 }
3879 
3880 CmpInst::Predicate ICmpInst::getFlippedSignednessPredicate(Predicate pred) {
3881   if (CmpInst::isEquality(pred))
3882     return pred;
3883   if (isSigned(pred))
3884     return getUnsignedPredicate(pred);
3885   if (isUnsigned(pred))
3886     return getSignedPredicate(pred);
3887 
3888   llvm_unreachable("Unknown predicate!");
3889 }
3890 
3891 bool CmpInst::isOrdered(Predicate predicate) {
3892   switch (predicate) {
3893     default: return false;
3894     case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3895     case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3896     case FCmpInst::FCMP_ORD: return true;
3897   }
3898 }
3899 
3900 bool CmpInst::isUnordered(Predicate predicate) {
3901   switch (predicate) {
3902     default: return false;
3903     case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3904     case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3905     case FCmpInst::FCMP_UNO: return true;
3906   }
3907 }
3908 
3909 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3910   switch(predicate) {
3911     default: return false;
3912     case ICMP_EQ:   case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3913     case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3914   }
3915 }
3916 
3917 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3918   switch(predicate) {
3919   case ICMP_NE:    case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3920   case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3921   default: return false;
3922   }
3923 }
3924 
3925 static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2) {
3926   // If the predicates match, then we know the first condition implies the
3927   // second is true.
3928   if (CmpPredicate::getMatching(Pred1, Pred2))
3929     return true;
3930 
3931   if (Pred1.hasSameSign() && CmpInst::isSigned(Pred2))
3932     Pred1 = ICmpInst::getFlippedSignednessPredicate(Pred1);
3933   else if (Pred2.hasSameSign() && CmpInst::isSigned(Pred1))
3934     Pred2 = ICmpInst::getFlippedSignednessPredicate(Pred2);
3935 
3936   switch (Pred1) {
3937   default:
3938     break;
3939   case CmpInst::ICMP_EQ:
3940     // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3941     return Pred2 == CmpInst::ICMP_UGE || Pred2 == CmpInst::ICMP_ULE ||
3942            Pred2 == CmpInst::ICMP_SGE || Pred2 == CmpInst::ICMP_SLE;
3943   case CmpInst::ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3944     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_UGE;
3945   case CmpInst::ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3946     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_ULE;
3947   case CmpInst::ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3948     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SGE;
3949   case CmpInst::ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3950     return Pred2 == CmpInst::ICMP_NE || Pred2 == CmpInst::ICMP_SLE;
3951   }
3952   return false;
3953 }
3954 
3955 static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1,
3956                                         CmpPredicate Pred2) {
3957   return isImpliedTrueByMatchingCmp(Pred1,
3958                                     ICmpInst::getInverseCmpPredicate(Pred2));
3959 }
3960 
3961 std::optional<bool> ICmpInst::isImpliedByMatchingCmp(CmpPredicate Pred1,
3962                                                      CmpPredicate Pred2) {
3963   if (isImpliedTrueByMatchingCmp(Pred1, Pred2))
3964     return true;
3965   if (isImpliedFalseByMatchingCmp(Pred1, Pred2))
3966     return false;
3967   return std::nullopt;
3968 }
3969 
3970 //===----------------------------------------------------------------------===//
3971 //                       CmpPredicate Implementation
3972 //===----------------------------------------------------------------------===//
3973 
3974 std::optional<CmpPredicate> CmpPredicate::getMatching(CmpPredicate A,
3975                                                       CmpPredicate B) {
3976   if (A.Pred == B.Pred)
3977     return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);
3978   if (CmpInst::isFPPredicate(A) || CmpInst::isFPPredicate(B))
3979     return {};
3980   if (A.HasSameSign &&
3981       A.Pred == ICmpInst::getFlippedSignednessPredicate(B.Pred))
3982     return B.Pred;
3983   if (B.HasSameSign &&
3984       B.Pred == ICmpInst::getFlippedSignednessPredicate(A.Pred))
3985     return A.Pred;
3986   return {};
3987 }
3988 
3989 CmpInst::Predicate CmpPredicate::getPreferredSignedPredicate() const {
3990   return HasSameSign ? ICmpInst::getSignedPredicate(Pred) : Pred;
3991 }
3992 
3993 CmpPredicate CmpPredicate::get(const CmpInst *Cmp) {
3994   if (auto *ICI = dyn_cast<ICmpInst>(Cmp))
3995     return ICI->getCmpPredicate();
3996   return Cmp->getPredicate();
3997 }
3998 
3999 CmpPredicate CmpPredicate::getSwapped(CmpPredicate P) {
4000   return {CmpInst::getSwappedPredicate(P), P.hasSameSign()};
4001 }
4002 
4003 CmpPredicate CmpPredicate::getSwapped(const CmpInst *Cmp) {
4004   return getSwapped(get(Cmp));
4005 }
4006 
4007 //===----------------------------------------------------------------------===//
4008 //                        SwitchInst Implementation
4009 //===----------------------------------------------------------------------===//
4010 
4011 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4012   assert(Value && Default && NumReserved);
4013   ReservedSpace = NumReserved;
4014   setNumHungOffUseOperands(2);
4015   allocHungoffUses(ReservedSpace);
4016 
4017   Op<0>() = Value;
4018   Op<1>() = Default;
4019 }
4020 
4021 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4022 /// switch on and a default destination.  The number of additional cases can
4023 /// be specified here to make memory allocation more efficient.  This
4024 /// constructor can also autoinsert before another instruction.
4025 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4026                        InsertPosition InsertBefore)
4027     : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4028                   AllocMarker, InsertBefore) {
4029   init(Value, Default, 2+NumCases*2);
4030 }
4031 
4032 SwitchInst::SwitchInst(const SwitchInst &SI)
4033     : Instruction(SI.getType(), Instruction::Switch, AllocMarker) {
4034   init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4035   setNumHungOffUseOperands(SI.getNumOperands());
4036   Use *OL = getOperandList();
4037   const Use *InOL = SI.getOperandList();
4038   for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4039     OL[i] = InOL[i];
4040     OL[i+1] = InOL[i+1];
4041   }
4042   SubclassOptionalData = SI.SubclassOptionalData;
4043 }
4044 
4045 /// addCase - Add an entry to the switch instruction...
4046 ///
4047 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
4048   unsigned NewCaseIdx = getNumCases();
4049   unsigned OpNo = getNumOperands();
4050   if (OpNo+2 > ReservedSpace)
4051     growOperands();  // Get more space!
4052   // Initialize some new operands.
4053   assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4054   setNumHungOffUseOperands(OpNo+2);
4055   CaseHandle Case(this, NewCaseIdx);
4056   Case.setValue(OnVal);
4057   Case.setSuccessor(Dest);
4058 }
4059 
4060 /// removeCase - This method removes the specified case and its successor
4061 /// from the switch instruction.
4062 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
4063   unsigned idx = I->getCaseIndex();
4064 
4065   assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4066 
4067   unsigned NumOps = getNumOperands();
4068   Use *OL = getOperandList();
4069 
4070   // Overwrite this case with the end of the list.
4071   if (2 + (idx + 1) * 2 != NumOps) {
4072     OL[2 + idx * 2] = OL[NumOps - 2];
4073     OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4074   }
4075 
4076   // Nuke the last value.
4077   OL[NumOps-2].set(nullptr);
4078   OL[NumOps-2+1].set(nullptr);
4079   setNumHungOffUseOperands(NumOps-2);
4080 
4081   return CaseIt(this, idx);
4082 }
4083 
4084 /// growOperands - grow operands - This grows the operand list in response
4085 /// to a push_back style of operation.  This grows the number of ops by 3 times.
4086 ///
4087 void SwitchInst::growOperands() {
4088   unsigned e = getNumOperands();
4089   unsigned NumOps = e*3;
4090 
4091   ReservedSpace = NumOps;
4092   growHungoffUses(ReservedSpace);
4093 }
4094 
4095 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4096   assert(Changed && "called only if metadata has changed");
4097 
4098   if (!Weights)
4099     return nullptr;
4100 
4101   assert(SI.getNumSuccessors() == Weights->size() &&
4102          "num of prof branch_weights must accord with num of successors");
4103 
4104   bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4105 
4106   if (AllZeroes || Weights->size() < 2)
4107     return nullptr;
4108 
4109   return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4110 }
4111 
4112 void SwitchInstProfUpdateWrapper::init() {
4113   MDNode *ProfileData = getBranchWeightMDNode(SI);
4114   if (!ProfileData)
4115     return;
4116 
4117   if (getNumBranchWeights(*ProfileData) != SI.getNumSuccessors()) {
4118     llvm_unreachable("number of prof branch_weights metadata operands does "
4119                      "not correspond to number of succesors");
4120   }
4121 
4122   SmallVector<uint32_t, 8> Weights;
4123   if (!extractBranchWeights(ProfileData, Weights))
4124     return;
4125   this->Weights = std::move(Weights);
4126 }
4127 
4128 SwitchInst::CaseIt
4129 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4130   if (Weights) {
4131     assert(SI.getNumSuccessors() == Weights->size() &&
4132            "num of prof branch_weights must accord with num of successors");
4133     Changed = true;
4134     // Copy the last case to the place of the removed one and shrink.
4135     // This is tightly coupled with the way SwitchInst::removeCase() removes
4136     // the cases in SwitchInst::removeCase(CaseIt).
4137     (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4138     Weights->pop_back();
4139   }
4140   return SI.removeCase(I);
4141 }
4142 
4143 void SwitchInstProfUpdateWrapper::addCase(
4144     ConstantInt *OnVal, BasicBlock *Dest,
4145     SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4146   SI.addCase(OnVal, Dest);
4147 
4148   if (!Weights && W && *W) {
4149     Changed = true;
4150     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4151     (*Weights)[SI.getNumSuccessors() - 1] = *W;
4152   } else if (Weights) {
4153     Changed = true;
4154     Weights->push_back(W.value_or(0));
4155   }
4156   if (Weights)
4157     assert(SI.getNumSuccessors() == Weights->size() &&
4158            "num of prof branch_weights must accord with num of successors");
4159 }
4160 
4161 Instruction::InstListType::iterator
4162 SwitchInstProfUpdateWrapper::eraseFromParent() {
4163   // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4164   Changed = false;
4165   if (Weights)
4166     Weights->resize(0);
4167   return SI.eraseFromParent();
4168 }
4169 
4170 SwitchInstProfUpdateWrapper::CaseWeightOpt
4171 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4172   if (!Weights)
4173     return std::nullopt;
4174   return (*Weights)[idx];
4175 }
4176 
4177 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4178     unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4179   if (!W)
4180     return;
4181 
4182   if (!Weights && *W)
4183     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4184 
4185   if (Weights) {
4186     auto &OldW = (*Weights)[idx];
4187     if (*W != OldW) {
4188       Changed = true;
4189       OldW = *W;
4190     }
4191   }
4192 }
4193 
4194 SwitchInstProfUpdateWrapper::CaseWeightOpt
4195 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4196                                                 unsigned idx) {
4197   if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4198     if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4199       return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4200           ->getValue()
4201           .getZExtValue();
4202 
4203   return std::nullopt;
4204 }
4205 
4206 //===----------------------------------------------------------------------===//
4207 //                        IndirectBrInst Implementation
4208 //===----------------------------------------------------------------------===//
4209 
4210 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4211   assert(Address && Address->getType()->isPointerTy() &&
4212          "Address of indirectbr must be a pointer");
4213   ReservedSpace = 1+NumDests;
4214   setNumHungOffUseOperands(1);
4215   allocHungoffUses(ReservedSpace);
4216 
4217   Op<0>() = Address;
4218 }
4219 
4220 
4221 /// growOperands - grow operands - This grows the operand list in response
4222 /// to a push_back style of operation.  This grows the number of ops by 2 times.
4223 ///
4224 void IndirectBrInst::growOperands() {
4225   unsigned e = getNumOperands();
4226   unsigned NumOps = e*2;
4227 
4228   ReservedSpace = NumOps;
4229   growHungoffUses(ReservedSpace);
4230 }
4231 
4232 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4233                                InsertPosition InsertBefore)
4234     : Instruction(Type::getVoidTy(Address->getContext()),
4235                   Instruction::IndirectBr, AllocMarker, InsertBefore) {
4236   init(Address, NumCases);
4237 }
4238 
4239 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4240     : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4241                   AllocMarker) {
4242   NumUserOperands = IBI.NumUserOperands;
4243   allocHungoffUses(IBI.getNumOperands());
4244   Use *OL = getOperandList();
4245   const Use *InOL = IBI.getOperandList();
4246   for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4247     OL[i] = InOL[i];
4248   SubclassOptionalData = IBI.SubclassOptionalData;
4249 }
4250 
4251 /// addDestination - Add a destination.
4252 ///
4253 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4254   unsigned OpNo = getNumOperands();
4255   if (OpNo+1 > ReservedSpace)
4256     growOperands();  // Get more space!
4257   // Initialize some new operands.
4258   assert(OpNo < ReservedSpace && "Growing didn't work!");
4259   setNumHungOffUseOperands(OpNo+1);
4260   getOperandList()[OpNo] = DestBB;
4261 }
4262 
4263 /// removeDestination - This method removes the specified successor from the
4264 /// indirectbr instruction.
4265 void IndirectBrInst::removeDestination(unsigned idx) {
4266   assert(idx < getNumOperands()-1 && "Successor index out of range!");
4267 
4268   unsigned NumOps = getNumOperands();
4269   Use *OL = getOperandList();
4270 
4271   // Replace this value with the last one.
4272   OL[idx+1] = OL[NumOps-1];
4273 
4274   // Nuke the last value.
4275   OL[NumOps-1].set(nullptr);
4276   setNumHungOffUseOperands(NumOps-1);
4277 }
4278 
4279 //===----------------------------------------------------------------------===//
4280 //                            FreezeInst Implementation
4281 //===----------------------------------------------------------------------===//
4282 
4283 FreezeInst::FreezeInst(Value *S, const Twine &Name, InsertPosition InsertBefore)
4284     : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4285   setName(Name);
4286 }
4287 
4288 //===----------------------------------------------------------------------===//
4289 //                           cloneImpl() implementations
4290 //===----------------------------------------------------------------------===//
4291 
4292 // Define these methods here so vtables don't get emitted into every translation
4293 // unit that uses these classes.
4294 
4295 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4296   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4297   return new (AllocMarker) GetElementPtrInst(*this, AllocMarker);
4298 }
4299 
4300 UnaryOperator *UnaryOperator::cloneImpl() const {
4301   return Create(getOpcode(), Op<0>());
4302 }
4303 
4304 BinaryOperator *BinaryOperator::cloneImpl() const {
4305   return Create(getOpcode(), Op<0>(), Op<1>());
4306 }
4307 
4308 FCmpInst *FCmpInst::cloneImpl() const {
4309   return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4310 }
4311 
4312 ICmpInst *ICmpInst::cloneImpl() const {
4313   return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4314 }
4315 
4316 ExtractValueInst *ExtractValueInst::cloneImpl() const {
4317   return new ExtractValueInst(*this);
4318 }
4319 
4320 InsertValueInst *InsertValueInst::cloneImpl() const {
4321   return new InsertValueInst(*this);
4322 }
4323 
4324 AllocaInst *AllocaInst::cloneImpl() const {
4325   AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4326                                       getOperand(0), getAlign());
4327   Result->setUsedWithInAlloca(isUsedWithInAlloca());
4328   Result->setSwiftError(isSwiftError());
4329   return Result;
4330 }
4331 
4332 LoadInst *LoadInst::cloneImpl() const {
4333   return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4334                       getAlign(), getOrdering(), getSyncScopeID());
4335 }
4336 
4337 StoreInst *StoreInst::cloneImpl() const {
4338   return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4339                        getOrdering(), getSyncScopeID());
4340 }
4341 
4342 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4343   AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4344       getOperand(0), getOperand(1), getOperand(2), getAlign(),
4345       getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4346   Result->setVolatile(isVolatile());
4347   Result->setWeak(isWeak());
4348   return Result;
4349 }
4350 
4351 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4352   AtomicRMWInst *Result =
4353       new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4354                         getAlign(), getOrdering(), getSyncScopeID());
4355   Result->setVolatile(isVolatile());
4356   return Result;
4357 }
4358 
4359 FenceInst *FenceInst::cloneImpl() const {
4360   return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4361 }
4362 
4363 TruncInst *TruncInst::cloneImpl() const {
4364   return new TruncInst(getOperand(0), getType());
4365 }
4366 
4367 ZExtInst *ZExtInst::cloneImpl() const {
4368   return new ZExtInst(getOperand(0), getType());
4369 }
4370 
4371 SExtInst *SExtInst::cloneImpl() const {
4372   return new SExtInst(getOperand(0), getType());
4373 }
4374 
4375 FPTruncInst *FPTruncInst::cloneImpl() const {
4376   return new FPTruncInst(getOperand(0), getType());
4377 }
4378 
4379 FPExtInst *FPExtInst::cloneImpl() const {
4380   return new FPExtInst(getOperand(0), getType());
4381 }
4382 
4383 UIToFPInst *UIToFPInst::cloneImpl() const {
4384   return new UIToFPInst(getOperand(0), getType());
4385 }
4386 
4387 SIToFPInst *SIToFPInst::cloneImpl() const {
4388   return new SIToFPInst(getOperand(0), getType());
4389 }
4390 
4391 FPToUIInst *FPToUIInst::cloneImpl() const {
4392   return new FPToUIInst(getOperand(0), getType());
4393 }
4394 
4395 FPToSIInst *FPToSIInst::cloneImpl() const {
4396   return new FPToSIInst(getOperand(0), getType());
4397 }
4398 
4399 PtrToIntInst *PtrToIntInst::cloneImpl() const {
4400   return new PtrToIntInst(getOperand(0), getType());
4401 }
4402 
4403 IntToPtrInst *IntToPtrInst::cloneImpl() const {
4404   return new IntToPtrInst(getOperand(0), getType());
4405 }
4406 
4407 BitCastInst *BitCastInst::cloneImpl() const {
4408   return new BitCastInst(getOperand(0), getType());
4409 }
4410 
4411 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4412   return new AddrSpaceCastInst(getOperand(0), getType());
4413 }
4414 
4415 CallInst *CallInst::cloneImpl() const {
4416   if (hasOperandBundles()) {
4417     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4418         getNumOperands(),
4419         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4420     return new (AllocMarker) CallInst(*this, AllocMarker);
4421   }
4422   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4423   return new (AllocMarker) CallInst(*this, AllocMarker);
4424 }
4425 
4426 SelectInst *SelectInst::cloneImpl() const {
4427   return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4428 }
4429 
4430 VAArgInst *VAArgInst::cloneImpl() const {
4431   return new VAArgInst(getOperand(0), getType());
4432 }
4433 
4434 ExtractElementInst *ExtractElementInst::cloneImpl() const {
4435   return ExtractElementInst::Create(getOperand(0), getOperand(1));
4436 }
4437 
4438 InsertElementInst *InsertElementInst::cloneImpl() const {
4439   return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4440 }
4441 
4442 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4443   return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
4444 }
4445 
4446 PHINode *PHINode::cloneImpl() const { return new (AllocMarker) PHINode(*this); }
4447 
4448 LandingPadInst *LandingPadInst::cloneImpl() const {
4449   return new LandingPadInst(*this);
4450 }
4451 
4452 ReturnInst *ReturnInst::cloneImpl() const {
4453   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4454   return new (AllocMarker) ReturnInst(*this, AllocMarker);
4455 }
4456 
4457 BranchInst *BranchInst::cloneImpl() const {
4458   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4459   return new (AllocMarker) BranchInst(*this, AllocMarker);
4460 }
4461 
4462 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4463 
4464 IndirectBrInst *IndirectBrInst::cloneImpl() const {
4465   return new IndirectBrInst(*this);
4466 }
4467 
4468 InvokeInst *InvokeInst::cloneImpl() const {
4469   if (hasOperandBundles()) {
4470     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4471         getNumOperands(),
4472         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4473     return new (AllocMarker) InvokeInst(*this, AllocMarker);
4474   }
4475   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4476   return new (AllocMarker) InvokeInst(*this, AllocMarker);
4477 }
4478 
4479 CallBrInst *CallBrInst::cloneImpl() const {
4480   if (hasOperandBundles()) {
4481     IntrusiveOperandsAndDescriptorAllocMarker AllocMarker{
4482         getNumOperands(),
4483         getNumOperandBundles() * unsigned(sizeof(BundleOpInfo))};
4484     return new (AllocMarker) CallBrInst(*this, AllocMarker);
4485   }
4486   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4487   return new (AllocMarker) CallBrInst(*this, AllocMarker);
4488 }
4489 
4490 ResumeInst *ResumeInst::cloneImpl() const {
4491   return new (AllocMarker) ResumeInst(*this);
4492 }
4493 
4494 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4495   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4496   return new (AllocMarker) CleanupReturnInst(*this, AllocMarker);
4497 }
4498 
4499 CatchReturnInst *CatchReturnInst::cloneImpl() const {
4500   return new (AllocMarker) CatchReturnInst(*this);
4501 }
4502 
4503 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4504   return new CatchSwitchInst(*this);
4505 }
4506 
4507 FuncletPadInst *FuncletPadInst::cloneImpl() const {
4508   IntrusiveOperandsAllocMarker AllocMarker{getNumOperands()};
4509   return new (AllocMarker) FuncletPadInst(*this, AllocMarker);
4510 }
4511 
4512 UnreachableInst *UnreachableInst::cloneImpl() const {
4513   LLVMContext &Context = getContext();
4514   return new UnreachableInst(Context);
4515 }
4516 
4517 FreezeInst *FreezeInst::cloneImpl() const {
4518   return new FreezeInst(getOperand(0));
4519 }
4520