xref: /llvm-project/llvm/lib/IR/Instruction.cpp (revision 79499f010d2bfe809187a9a5f042d4e4ee1f1bcc)
1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/IR/AttributeMask.h"
17 #include "llvm/IR/Attributes.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/ProfDataUtils.h"
28 #include "llvm/IR/Type.h"
29 using namespace llvm;
30 
31 InsertPosition::InsertPosition(Instruction *InsertBefore)
32     : InsertAt(InsertBefore ? InsertBefore->getIterator()
33                             : InstListType::iterator()) {}
34 InsertPosition::InsertPosition(BasicBlock *InsertAtEnd)
35     : InsertAt(InsertAtEnd ? InsertAtEnd->end() : InstListType::iterator()) {}
36 
37 Instruction::Instruction(Type *ty, unsigned it, AllocInfo AllocInfo,
38                          InsertPosition InsertBefore)
39     : User(ty, Value::InstructionVal + it, AllocInfo) {
40   // When called with an iterator, there must be a block to insert into.
41   if (InstListType::iterator InsertIt = InsertBefore; InsertIt.isValid()) {
42     BasicBlock *BB = InsertIt.getNodeParent();
43     assert(BB && "Instruction to insert before is not in a basic block!");
44     insertInto(BB, InsertBefore);
45   }
46 }
47 
48 Instruction::~Instruction() {
49   assert(!getParent() && "Instruction still linked in the program!");
50 
51   // Replace any extant metadata uses of this instruction with undef to
52   // preserve debug info accuracy. Some alternatives include:
53   // - Treat Instruction like any other Value, and point its extant metadata
54   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
55   //   trivially dead (i.e. fair game for deletion in many passes), leading to
56   //   stale dbg.values being in effect for too long.
57   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
58   //   correct. OTOH results in wasted work in some common cases (e.g. when all
59   //   instructions in a BasicBlock are deleted).
60   if (isUsedByMetadata())
61     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
62 
63   // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
64   // mapping in LLVMContext.
65   setMetadata(LLVMContext::MD_DIAssignID, nullptr);
66 }
67 
68 const Module *Instruction::getModule() const {
69   return getParent()->getModule();
70 }
71 
72 const Function *Instruction::getFunction() const {
73   return getParent()->getParent();
74 }
75 
76 const DataLayout &Instruction::getDataLayout() const {
77   return getModule()->getDataLayout();
78 }
79 
80 void Instruction::removeFromParent() {
81   // Perform any debug-info maintenence required.
82   handleMarkerRemoval();
83 
84   getParent()->getInstList().remove(getIterator());
85 }
86 
87 void Instruction::handleMarkerRemoval() {
88   if (!getParent()->IsNewDbgInfoFormat || !DebugMarker)
89     return;
90 
91   DebugMarker->removeMarker();
92 }
93 
94 BasicBlock::iterator Instruction::eraseFromParent() {
95   handleMarkerRemoval();
96   return getParent()->getInstList().erase(getIterator());
97 }
98 
99 void Instruction::insertBefore(Instruction *InsertPos) {
100   insertBefore(InsertPos->getIterator());
101 }
102 
103 /// Insert an unlinked instruction into a basic block immediately before the
104 /// specified instruction.
105 void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
106   insertBefore(*InsertPos->getParent(), InsertPos);
107 }
108 
109 /// Insert an unlinked instruction into a basic block immediately after the
110 /// specified instruction.
111 void Instruction::insertAfter(Instruction *InsertPos) {
112   BasicBlock *DestParent = InsertPos->getParent();
113 
114   DestParent->getInstList().insertAfter(InsertPos->getIterator(), this);
115 }
116 
117 void Instruction::insertAfter(BasicBlock::iterator InsertPos) {
118   BasicBlock *DestParent = InsertPos->getParent();
119 
120   DestParent->getInstList().insertAfter(InsertPos, this);
121 }
122 
123 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
124                                              BasicBlock::iterator It) {
125   assert(getParent() == nullptr && "Expected detached instruction");
126   assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
127          "It not in ParentBB");
128   insertBefore(*ParentBB, It);
129   return getIterator();
130 }
131 
132 extern cl::opt<bool> UseNewDbgInfoFormat;
133 
134 void Instruction::insertBefore(BasicBlock &BB,
135                                InstListType::iterator InsertPos) {
136   assert(!DebugMarker);
137 
138   BB.getInstList().insert(InsertPos, this);
139 
140   if (!BB.IsNewDbgInfoFormat)
141     return;
142 
143   // We've inserted "this": if InsertAtHead is set then it comes before any
144   // DbgVariableRecords attached to InsertPos. But if it's not set, then any
145   // DbgRecords should now come before "this".
146   bool InsertAtHead = InsertPos.getHeadBit();
147   if (!InsertAtHead) {
148     DbgMarker *SrcMarker = BB.getMarker(InsertPos);
149     if (SrcMarker && !SrcMarker->empty()) {
150       // If this assertion fires, the calling code is about to insert a PHI
151       // after debug-records, which would form a sequence like:
152       //     %0 = PHI
153       //     #dbg_value
154       //     %1 = PHI
155       // Which is de-normalised and undesired -- hence the assertion. To avoid
156       // this, you must insert at that position using an iterator, and it must
157       // be aquired by calling getFirstNonPHIIt / begin or similar methods on
158       // the block. This will signal to this behind-the-scenes debug-info
159       // maintenence code that you intend the PHI to be ahead of everything,
160       // including any debug-info.
161       assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
162       adoptDbgRecords(&BB, InsertPos, false);
163     }
164   }
165 
166   // If we're inserting a terminator, check if we need to flush out
167   // TrailingDbgRecords. Inserting instructions at the end of an incomplete
168   // block is handled by the code block above.
169   if (isTerminator())
170     getParent()->flushTerminatorDbgRecords();
171 }
172 
173 /// Unlink this instruction from its current basic block and insert it into the
174 /// basic block that MovePos lives in, right before MovePos.
175 void Instruction::moveBefore(Instruction *MovePos) {
176   moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), false);
177 }
178 
179 void Instruction::moveBefore(BasicBlock::iterator MovePos) {
180   moveBeforeImpl(*MovePos->getParent(), MovePos, false);
181 }
182 
183 void Instruction::moveBeforePreserving(Instruction *MovePos) {
184   moveBeforeImpl(*MovePos->getParent(), MovePos->getIterator(), true);
185 }
186 
187 void Instruction::moveBeforePreserving(BasicBlock::iterator MovePos) {
188   moveBeforeImpl(*MovePos->getParent(), MovePos, true);
189 }
190 
191 void Instruction::moveAfter(Instruction *MovePos) {
192   auto NextIt = std::next(MovePos->getIterator());
193   // We want this instruction to be moved to before NextIt in the instruction
194   // list, but before NextIt's debug value range.
195   NextIt.setHeadBit(true);
196   moveBeforeImpl(*MovePos->getParent(), NextIt, false);
197 }
198 
199 void Instruction::moveAfterPreserving(Instruction *MovePos) {
200   auto NextIt = std::next(MovePos->getIterator());
201   // We want this instruction and its debug range to be moved to before NextIt
202   // in the instruction list, but before NextIt's debug value range.
203   NextIt.setHeadBit(true);
204   moveBeforeImpl(*MovePos->getParent(), NextIt, true);
205 }
206 
207 void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
208   moveBeforeImpl(BB, I, false);
209 }
210 
211 void Instruction::moveBeforePreserving(BasicBlock &BB,
212                                        InstListType::iterator I) {
213   moveBeforeImpl(BB, I, true);
214 }
215 
216 void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
217                               bool Preserve) {
218   assert(I == BB.end() || I->getParent() == &BB);
219   bool InsertAtHead = I.getHeadBit();
220 
221   // If we've been given the "Preserve" flag, then just move the DbgRecords with
222   // the instruction, no more special handling needed.
223   if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) {
224     if (I != this->getIterator() || InsertAtHead) {
225       // "this" is definitely moving in the list, or it's moving ahead of its
226       // attached DbgVariableRecords. Detach any existing DbgRecords.
227       handleMarkerRemoval();
228     }
229   }
230 
231   // Move this single instruction. Use the list splice method directly, not
232   // the block splicer, which will do more debug-info things.
233   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
234 
235   if (BB.IsNewDbgInfoFormat && !Preserve) {
236     DbgMarker *NextMarker = getParent()->getNextMarker(this);
237 
238     // If we're inserting at point I, and not in front of the DbgRecords
239     // attached there, then we should absorb the DbgRecords attached to I.
240     if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
241       adoptDbgRecords(&BB, I, false);
242     }
243   }
244 
245   if (isTerminator())
246     getParent()->flushTerminatorDbgRecords();
247 }
248 
249 iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
250     const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
251     bool InsertAtHead) {
252   if (!From->DebugMarker)
253     return DbgMarker::getEmptyDbgRecordRange();
254 
255   assert(getParent()->IsNewDbgInfoFormat);
256   assert(getParent()->IsNewDbgInfoFormat ==
257          From->getParent()->IsNewDbgInfoFormat);
258 
259   if (!DebugMarker)
260     getParent()->createMarker(this);
261 
262   return DebugMarker->cloneDebugInfoFrom(From->DebugMarker, FromHere,
263                                          InsertAtHead);
264 }
265 
266 std::optional<DbgRecord::self_iterator>
267 Instruction::getDbgReinsertionPosition() {
268   // Is there a marker on the next instruction?
269   DbgMarker *NextMarker = getParent()->getNextMarker(this);
270   if (!NextMarker)
271     return std::nullopt;
272 
273   // Are there any DbgRecords in the next marker?
274   if (NextMarker->StoredDbgRecords.empty())
275     return std::nullopt;
276 
277   return NextMarker->StoredDbgRecords.begin();
278 }
279 
280 bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
281 
282 void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
283                                   bool InsertAtHead) {
284   DbgMarker *SrcMarker = BB->getMarker(It);
285   auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
286     if (BB->end() == It) {
287       SrcMarker->eraseFromParent();
288       BB->deleteTrailingDbgRecords();
289     }
290   };
291 
292   if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
293     ReleaseTrailingDbgRecords();
294     return;
295   }
296 
297   // If we have DbgMarkers attached to this instruction, we have to honour the
298   // ordering of DbgRecords between this and the other marker. Fall back to just
299   // absorbing from the source.
300   if (DebugMarker || It == BB->end()) {
301     // Ensure we _do_ have a marker.
302     getParent()->createMarker(this);
303     DebugMarker->absorbDebugValues(*SrcMarker, InsertAtHead);
304 
305     // Having transferred everything out of SrcMarker, we _could_ clean it up
306     // and free the marker now. However, that's a lot of heap-accounting for a
307     // small amount of memory with a good chance of re-use. Leave it for the
308     // moment. It will be released when the Instruction is freed in the worst
309     // case.
310     // However: if we transferred from a trailing marker off the end of the
311     // block, it's important to not leave the empty marker trailing. It will
312     // give a misleading impression that some debug records have been left
313     // trailing.
314     ReleaseTrailingDbgRecords();
315   } else {
316     // Optimisation: we're transferring all the DbgRecords from the source
317     // marker onto this empty location: just adopt the other instructions
318     // marker.
319     DebugMarker = SrcMarker;
320     DebugMarker->MarkedInstr = this;
321     It->DebugMarker = nullptr;
322   }
323 }
324 
325 void Instruction::dropDbgRecords() {
326   if (DebugMarker)
327     DebugMarker->dropDbgRecords();
328 }
329 
330 void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
331   DebugMarker->dropOneDbgRecord(DVR);
332 }
333 
334 bool Instruction::comesBefore(const Instruction *Other) const {
335   assert(getParent() && Other->getParent() &&
336          "instructions without BB parents have no order");
337   assert(getParent() == Other->getParent() &&
338          "cross-BB instruction order comparison");
339   if (!getParent()->isInstrOrderValid())
340     const_cast<BasicBlock *>(getParent())->renumberInstructions();
341   return Order < Other->Order;
342 }
343 
344 std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
345   assert(!getType()->isVoidTy() && "Instruction must define result");
346   BasicBlock *InsertBB;
347   BasicBlock::iterator InsertPt;
348   if (auto *PN = dyn_cast<PHINode>(this)) {
349     InsertBB = PN->getParent();
350     InsertPt = InsertBB->getFirstInsertionPt();
351   } else if (auto *II = dyn_cast<InvokeInst>(this)) {
352     InsertBB = II->getNormalDest();
353     InsertPt = InsertBB->getFirstInsertionPt();
354   } else if (isa<CallBrInst>(this)) {
355     // Def is available in multiple successors, there's no single dominating
356     // insertion point.
357     return std::nullopt;
358   } else {
359     assert(!isTerminator() && "Only invoke/callbr terminators return value");
360     InsertBB = getParent();
361     InsertPt = std::next(getIterator());
362     // Any instruction inserted immediately after "this" will come before any
363     // debug-info records take effect -- thus, set the head bit indicating that
364     // to debug-info-transfer code.
365     InsertPt.setHeadBit(true);
366   }
367 
368   // catchswitch blocks don't have any legal insertion point (because they
369   // are both an exception pad and a terminator).
370   if (InsertPt == InsertBB->end())
371     return std::nullopt;
372   return InsertPt;
373 }
374 
375 bool Instruction::isOnlyUserOfAnyOperand() {
376   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
377 }
378 
379 void Instruction::setHasNoUnsignedWrap(bool b) {
380   if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
381     Inst->setHasNoUnsignedWrap(b);
382   else
383     cast<TruncInst>(this)->setHasNoUnsignedWrap(b);
384 }
385 
386 void Instruction::setHasNoSignedWrap(bool b) {
387   if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
388     Inst->setHasNoSignedWrap(b);
389   else
390     cast<TruncInst>(this)->setHasNoSignedWrap(b);
391 }
392 
393 void Instruction::setIsExact(bool b) {
394   cast<PossiblyExactOperator>(this)->setIsExact(b);
395 }
396 
397 void Instruction::setNonNeg(bool b) {
398   assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
399   SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
400                          (b * PossiblyNonNegInst::NonNeg);
401 }
402 
403 bool Instruction::hasNoUnsignedWrap() const {
404   if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
405     return Inst->hasNoUnsignedWrap();
406 
407   return cast<TruncInst>(this)->hasNoUnsignedWrap();
408 }
409 
410 bool Instruction::hasNoSignedWrap() const {
411   if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(this))
412     return Inst->hasNoSignedWrap();
413 
414   return cast<TruncInst>(this)->hasNoSignedWrap();
415 }
416 
417 bool Instruction::hasNonNeg() const {
418   assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
419   return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
420 }
421 
422 bool Instruction::hasPoisonGeneratingFlags() const {
423   return cast<Operator>(this)->hasPoisonGeneratingFlags();
424 }
425 
426 void Instruction::dropPoisonGeneratingFlags() {
427   switch (getOpcode()) {
428   case Instruction::Add:
429   case Instruction::Sub:
430   case Instruction::Mul:
431   case Instruction::Shl:
432     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
433     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
434     break;
435 
436   case Instruction::UDiv:
437   case Instruction::SDiv:
438   case Instruction::AShr:
439   case Instruction::LShr:
440     cast<PossiblyExactOperator>(this)->setIsExact(false);
441     break;
442 
443   case Instruction::Or:
444     cast<PossiblyDisjointInst>(this)->setIsDisjoint(false);
445     break;
446 
447   case Instruction::GetElementPtr:
448     cast<GetElementPtrInst>(this)->setNoWrapFlags(GEPNoWrapFlags::none());
449     break;
450 
451   case Instruction::UIToFP:
452   case Instruction::ZExt:
453     setNonNeg(false);
454     break;
455 
456   case Instruction::Trunc:
457     cast<TruncInst>(this)->setHasNoUnsignedWrap(false);
458     cast<TruncInst>(this)->setHasNoSignedWrap(false);
459     break;
460 
461   case Instruction::ICmp:
462     cast<ICmpInst>(this)->setSameSign(false);
463     break;
464   }
465 
466   if (isa<FPMathOperator>(this)) {
467     setHasNoNaNs(false);
468     setHasNoInfs(false);
469   }
470 
471   assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
472 }
473 
474 bool Instruction::hasPoisonGeneratingMetadata() const {
475   return any_of(Metadata::PoisonGeneratingIDs,
476                 [this](unsigned ID) { return hasMetadata(ID); });
477 }
478 
479 bool Instruction::hasNonDebugLocLoopMetadata() const {
480   // If there is no loop metadata at all, we also don't have
481   // non-debug loop metadata, obviously.
482   if (!hasMetadata(LLVMContext::MD_loop))
483     return false;
484 
485   // If we do have loop metadata, retrieve it.
486   MDNode *LoopMD = getMetadata(LLVMContext::MD_loop);
487 
488   // Check if the existing operands are debug locations. This loop
489   // should terminate after at most three iterations. Skip
490   // the first item because it is a self-reference.
491   for (const MDOperand &Op : llvm::drop_begin(LoopMD->operands())) {
492     // check for debug location type by attempting a cast.
493     if (!dyn_cast<DILocation>(Op)) {
494       return true;
495     }
496   }
497 
498   // If we get here, then all we have is debug locations in the loop metadata.
499   return false;
500 }
501 
502 void Instruction::dropPoisonGeneratingMetadata() {
503   for (unsigned ID : Metadata::PoisonGeneratingIDs)
504     eraseMetadata(ID);
505 }
506 
507 bool Instruction::hasPoisonGeneratingReturnAttributes() const {
508   if (const auto *CB = dyn_cast<CallBase>(this)) {
509     AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
510     return RetAttrs.hasAttribute(Attribute::Range) ||
511            RetAttrs.hasAttribute(Attribute::Alignment) ||
512            RetAttrs.hasAttribute(Attribute::NonNull);
513   }
514   return false;
515 }
516 
517 void Instruction::dropPoisonGeneratingReturnAttributes() {
518   if (auto *CB = dyn_cast<CallBase>(this)) {
519     AttributeMask AM;
520     AM.addAttribute(Attribute::Range);
521     AM.addAttribute(Attribute::Alignment);
522     AM.addAttribute(Attribute::NonNull);
523     CB->removeRetAttrs(AM);
524   }
525   assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
526 }
527 
528 void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
529     ArrayRef<unsigned> KnownIDs) {
530   dropUnknownNonDebugMetadata(KnownIDs);
531   auto *CB = dyn_cast<CallBase>(this);
532   if (!CB)
533     return;
534   // For call instructions, we also need to drop parameter and return attributes
535   // that are can cause UB if the call is moved to a location where the
536   // attribute is not valid.
537   AttributeList AL = CB->getAttributes();
538   if (AL.isEmpty())
539     return;
540   AttributeMask UBImplyingAttributes =
541       AttributeFuncs::getUBImplyingAttributes();
542   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
543     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
544   CB->removeRetAttrs(UBImplyingAttributes);
545 }
546 
547 void Instruction::dropUBImplyingAttrsAndMetadata() {
548   // !annotation metadata does not impact semantics.
549   // !range, !nonnull and !align produce poison, so they are safe to speculate.
550   // !noundef and various AA metadata must be dropped, as it generally produces
551   // immediate undefined behavior.
552   unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
553                          LLVMContext::MD_nonnull, LLVMContext::MD_align};
554   dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
555 }
556 
557 bool Instruction::isExact() const {
558   return cast<PossiblyExactOperator>(this)->isExact();
559 }
560 
561 void Instruction::setFast(bool B) {
562   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
563   cast<FPMathOperator>(this)->setFast(B);
564 }
565 
566 void Instruction::setHasAllowReassoc(bool B) {
567   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
568   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
569 }
570 
571 void Instruction::setHasNoNaNs(bool B) {
572   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
573   cast<FPMathOperator>(this)->setHasNoNaNs(B);
574 }
575 
576 void Instruction::setHasNoInfs(bool B) {
577   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
578   cast<FPMathOperator>(this)->setHasNoInfs(B);
579 }
580 
581 void Instruction::setHasNoSignedZeros(bool B) {
582   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
583   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
584 }
585 
586 void Instruction::setHasAllowReciprocal(bool B) {
587   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
588   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
589 }
590 
591 void Instruction::setHasAllowContract(bool B) {
592   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
593   cast<FPMathOperator>(this)->setHasAllowContract(B);
594 }
595 
596 void Instruction::setHasApproxFunc(bool B) {
597   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
598   cast<FPMathOperator>(this)->setHasApproxFunc(B);
599 }
600 
601 void Instruction::setFastMathFlags(FastMathFlags FMF) {
602   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
603   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
604 }
605 
606 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
607   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
608   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
609 }
610 
611 bool Instruction::isFast() const {
612   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
613   return cast<FPMathOperator>(this)->isFast();
614 }
615 
616 bool Instruction::hasAllowReassoc() const {
617   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
618   return cast<FPMathOperator>(this)->hasAllowReassoc();
619 }
620 
621 bool Instruction::hasNoNaNs() const {
622   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
623   return cast<FPMathOperator>(this)->hasNoNaNs();
624 }
625 
626 bool Instruction::hasNoInfs() const {
627   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
628   return cast<FPMathOperator>(this)->hasNoInfs();
629 }
630 
631 bool Instruction::hasNoSignedZeros() const {
632   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
633   return cast<FPMathOperator>(this)->hasNoSignedZeros();
634 }
635 
636 bool Instruction::hasAllowReciprocal() const {
637   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
638   return cast<FPMathOperator>(this)->hasAllowReciprocal();
639 }
640 
641 bool Instruction::hasAllowContract() const {
642   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
643   return cast<FPMathOperator>(this)->hasAllowContract();
644 }
645 
646 bool Instruction::hasApproxFunc() const {
647   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
648   return cast<FPMathOperator>(this)->hasApproxFunc();
649 }
650 
651 FastMathFlags Instruction::getFastMathFlags() const {
652   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
653   return cast<FPMathOperator>(this)->getFastMathFlags();
654 }
655 
656 void Instruction::copyFastMathFlags(const Instruction *I) {
657   copyFastMathFlags(I->getFastMathFlags());
658 }
659 
660 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
661   // Copy the wrapping flags.
662   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
663     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
664       setHasNoSignedWrap(OB->hasNoSignedWrap());
665       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
666     }
667   }
668 
669   if (auto *TI = dyn_cast<TruncInst>(V)) {
670     if (isa<TruncInst>(this)) {
671       setHasNoSignedWrap(TI->hasNoSignedWrap());
672       setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
673     }
674   }
675 
676   // Copy the exact flag.
677   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
678     if (isa<PossiblyExactOperator>(this))
679       setIsExact(PE->isExact());
680 
681   if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
682     if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
683       DestPD->setIsDisjoint(SrcPD->isDisjoint());
684 
685   // Copy the fast-math flags.
686   if (auto *FP = dyn_cast<FPMathOperator>(V))
687     if (isa<FPMathOperator>(this))
688       copyFastMathFlags(FP->getFastMathFlags());
689 
690   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
691     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
692       DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() |
693                               DestGEP->getNoWrapFlags());
694 
695   if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
696     if (isa<PossiblyNonNegInst>(this))
697       setNonNeg(NNI->hasNonNeg());
698 
699   if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
700     if (auto *DestICmp = dyn_cast<ICmpInst>(this))
701       DestICmp->setSameSign(SrcICmp->hasSameSign());
702 }
703 
704 void Instruction::andIRFlags(const Value *V) {
705   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
706     if (isa<OverflowingBinaryOperator>(this)) {
707       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
708       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
709     }
710   }
711 
712   if (auto *TI = dyn_cast<TruncInst>(V)) {
713     if (isa<TruncInst>(this)) {
714       setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
715       setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
716     }
717   }
718 
719   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
720     if (isa<PossiblyExactOperator>(this))
721       setIsExact(isExact() && PE->isExact());
722 
723   if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(V))
724     if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(this))
725       DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
726 
727   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
728     if (isa<FPMathOperator>(this)) {
729       FastMathFlags FM = getFastMathFlags();
730       FM &= FP->getFastMathFlags();
731       copyFastMathFlags(FM);
732     }
733   }
734 
735   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
736     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
737       DestGEP->setNoWrapFlags(SrcGEP->getNoWrapFlags() &
738                               DestGEP->getNoWrapFlags());
739 
740   if (auto *NNI = dyn_cast<PossiblyNonNegInst>(V))
741     if (isa<PossiblyNonNegInst>(this))
742       setNonNeg(hasNonNeg() && NNI->hasNonNeg());
743 
744   if (auto *SrcICmp = dyn_cast<ICmpInst>(V))
745     if (auto *DestICmp = dyn_cast<ICmpInst>(this))
746       DestICmp->setSameSign(DestICmp->hasSameSign() && SrcICmp->hasSameSign());
747 }
748 
749 const char *Instruction::getOpcodeName(unsigned OpCode) {
750   switch (OpCode) {
751   // Terminators
752   case Ret:    return "ret";
753   case Br:     return "br";
754   case Switch: return "switch";
755   case IndirectBr: return "indirectbr";
756   case Invoke: return "invoke";
757   case Resume: return "resume";
758   case Unreachable: return "unreachable";
759   case CleanupRet: return "cleanupret";
760   case CatchRet: return "catchret";
761   case CatchPad: return "catchpad";
762   case CatchSwitch: return "catchswitch";
763   case CallBr: return "callbr";
764 
765   // Standard unary operators...
766   case FNeg: return "fneg";
767 
768   // Standard binary operators...
769   case Add: return "add";
770   case FAdd: return "fadd";
771   case Sub: return "sub";
772   case FSub: return "fsub";
773   case Mul: return "mul";
774   case FMul: return "fmul";
775   case UDiv: return "udiv";
776   case SDiv: return "sdiv";
777   case FDiv: return "fdiv";
778   case URem: return "urem";
779   case SRem: return "srem";
780   case FRem: return "frem";
781 
782   // Logical operators...
783   case And: return "and";
784   case Or : return "or";
785   case Xor: return "xor";
786 
787   // Memory instructions...
788   case Alloca:        return "alloca";
789   case Load:          return "load";
790   case Store:         return "store";
791   case AtomicCmpXchg: return "cmpxchg";
792   case AtomicRMW:     return "atomicrmw";
793   case Fence:         return "fence";
794   case GetElementPtr: return "getelementptr";
795 
796   // Convert instructions...
797   case Trunc:         return "trunc";
798   case ZExt:          return "zext";
799   case SExt:          return "sext";
800   case FPTrunc:       return "fptrunc";
801   case FPExt:         return "fpext";
802   case FPToUI:        return "fptoui";
803   case FPToSI:        return "fptosi";
804   case UIToFP:        return "uitofp";
805   case SIToFP:        return "sitofp";
806   case IntToPtr:      return "inttoptr";
807   case PtrToInt:      return "ptrtoint";
808   case BitCast:       return "bitcast";
809   case AddrSpaceCast: return "addrspacecast";
810 
811   // Other instructions...
812   case ICmp:           return "icmp";
813   case FCmp:           return "fcmp";
814   case PHI:            return "phi";
815   case Select:         return "select";
816   case Call:           return "call";
817   case Shl:            return "shl";
818   case LShr:           return "lshr";
819   case AShr:           return "ashr";
820   case VAArg:          return "va_arg";
821   case ExtractElement: return "extractelement";
822   case InsertElement:  return "insertelement";
823   case ShuffleVector:  return "shufflevector";
824   case ExtractValue:   return "extractvalue";
825   case InsertValue:    return "insertvalue";
826   case LandingPad:     return "landingpad";
827   case CleanupPad:     return "cleanuppad";
828   case Freeze:         return "freeze";
829 
830   default: return "<Invalid operator> ";
831   }
832 }
833 
834 /// This must be kept in sync with FunctionComparator::cmpOperations in
835 /// lib/Transforms/IPO/MergeFunctions.cpp.
836 bool Instruction::hasSameSpecialState(const Instruction *I2,
837                                       bool IgnoreAlignment,
838                                       bool IntersectAttrs) const {
839   auto I1 = this;
840   assert(I1->getOpcode() == I2->getOpcode() &&
841          "Can not compare special state of different instructions");
842 
843   auto CheckAttrsSame = [IntersectAttrs](const CallBase *CB0,
844                                          const CallBase *CB1) {
845     return IntersectAttrs
846                ? CB0->getAttributes()
847                      .intersectWith(CB0->getContext(), CB1->getAttributes())
848                      .has_value()
849                : CB0->getAttributes() == CB1->getAttributes();
850   };
851 
852   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
853     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
854            (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
855             IgnoreAlignment);
856   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
857     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
858            (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
859             IgnoreAlignment) &&
860            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
861            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
862   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
863     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
864            (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
865             IgnoreAlignment) &&
866            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
867            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
868   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
869     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
870   if (const CallInst *CI = dyn_cast<CallInst>(I1))
871     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
872            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
873            CheckAttrsSame(CI, cast<CallInst>(I2)) &&
874            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
875   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
876     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
877            CheckAttrsSame(CI, cast<InvokeInst>(I2)) &&
878            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
879   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
880     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
881            CheckAttrsSame(CI, cast<CallBrInst>(I2)) &&
882            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
883   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
884     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
885   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
886     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
887   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
888     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
889            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
890   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
891     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
892            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
893            CXI->getSuccessOrdering() ==
894                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
895            CXI->getFailureOrdering() ==
896                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
897            CXI->getSyncScopeID() ==
898                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
899   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
900     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
901            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
902            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
903            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
904   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
905     return SVI->getShuffleMask() ==
906            cast<ShuffleVectorInst>(I2)->getShuffleMask();
907   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
908     return GEP->getSourceElementType() ==
909            cast<GetElementPtrInst>(I2)->getSourceElementType();
910 
911   return true;
912 }
913 
914 bool Instruction::isIdenticalTo(const Instruction *I) const {
915   return isIdenticalToWhenDefined(I) &&
916          SubclassOptionalData == I->SubclassOptionalData;
917 }
918 
919 bool Instruction::isIdenticalToWhenDefined(const Instruction *I,
920                                            bool IntersectAttrs) const {
921   if (getOpcode() != I->getOpcode() ||
922       getNumOperands() != I->getNumOperands() || getType() != I->getType())
923     return false;
924 
925   // If both instructions have no operands, they are identical.
926   if (getNumOperands() == 0 && I->getNumOperands() == 0)
927     return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
928                                      IntersectAttrs);
929 
930   // We have two instructions of identical opcode and #operands.  Check to see
931   // if all operands are the same.
932   if (!std::equal(op_begin(), op_end(), I->op_begin()))
933     return false;
934 
935   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
936   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
937     const PHINode *otherPHI = cast<PHINode>(I);
938     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
939                       otherPHI->block_begin());
940   }
941 
942   return this->hasSameSpecialState(I, /*IgnoreAlignment=*/false,
943                                    IntersectAttrs);
944 }
945 
946 // Keep this in sync with FunctionComparator::cmpOperations in
947 // lib/Transforms/IPO/MergeFunctions.cpp.
948 bool Instruction::isSameOperationAs(const Instruction *I,
949                                     unsigned flags) const {
950   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
951   bool UseScalarTypes = flags & CompareUsingScalarTypes;
952   bool IntersectAttrs = flags & CompareUsingIntersectedAttrs;
953 
954   if (getOpcode() != I->getOpcode() ||
955       getNumOperands() != I->getNumOperands() ||
956       (UseScalarTypes ?
957        getType()->getScalarType() != I->getType()->getScalarType() :
958        getType() != I->getType()))
959     return false;
960 
961   // We have two instructions of identical opcode and #operands.  Check to see
962   // if all operands are the same type
963   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
964     if (UseScalarTypes ?
965         getOperand(i)->getType()->getScalarType() !=
966           I->getOperand(i)->getType()->getScalarType() :
967         getOperand(i)->getType() != I->getOperand(i)->getType())
968       return false;
969 
970   return this->hasSameSpecialState(I, IgnoreAlignment, IntersectAttrs);
971 }
972 
973 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
974   for (const Use &U : uses()) {
975     // PHI nodes uses values in the corresponding predecessor block.  For other
976     // instructions, just check to see whether the parent of the use matches up.
977     const Instruction *I = cast<Instruction>(U.getUser());
978     const PHINode *PN = dyn_cast<PHINode>(I);
979     if (!PN) {
980       if (I->getParent() != BB)
981         return true;
982       continue;
983     }
984 
985     if (PN->getIncomingBlock(U) != BB)
986       return true;
987   }
988   return false;
989 }
990 
991 bool Instruction::mayReadFromMemory() const {
992   switch (getOpcode()) {
993   default: return false;
994   case Instruction::VAArg:
995   case Instruction::Load:
996   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
997   case Instruction::AtomicCmpXchg:
998   case Instruction::AtomicRMW:
999   case Instruction::CatchPad:
1000   case Instruction::CatchRet:
1001     return true;
1002   case Instruction::Call:
1003   case Instruction::Invoke:
1004   case Instruction::CallBr:
1005     return !cast<CallBase>(this)->onlyWritesMemory();
1006   case Instruction::Store:
1007     return !cast<StoreInst>(this)->isUnordered();
1008   }
1009 }
1010 
1011 bool Instruction::mayWriteToMemory() const {
1012   switch (getOpcode()) {
1013   default: return false;
1014   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
1015   case Instruction::Store:
1016   case Instruction::VAArg:
1017   case Instruction::AtomicCmpXchg:
1018   case Instruction::AtomicRMW:
1019   case Instruction::CatchPad:
1020   case Instruction::CatchRet:
1021     return true;
1022   case Instruction::Call:
1023   case Instruction::Invoke:
1024   case Instruction::CallBr:
1025     return !cast<CallBase>(this)->onlyReadsMemory();
1026   case Instruction::Load:
1027     return !cast<LoadInst>(this)->isUnordered();
1028   }
1029 }
1030 
1031 bool Instruction::isAtomic() const {
1032   switch (getOpcode()) {
1033   default:
1034     return false;
1035   case Instruction::AtomicCmpXchg:
1036   case Instruction::AtomicRMW:
1037   case Instruction::Fence:
1038     return true;
1039   case Instruction::Load:
1040     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1041   case Instruction::Store:
1042     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
1043   }
1044 }
1045 
1046 bool Instruction::hasAtomicLoad() const {
1047   assert(isAtomic());
1048   switch (getOpcode()) {
1049   default:
1050     return false;
1051   case Instruction::AtomicCmpXchg:
1052   case Instruction::AtomicRMW:
1053   case Instruction::Load:
1054     return true;
1055   }
1056 }
1057 
1058 bool Instruction::hasAtomicStore() const {
1059   assert(isAtomic());
1060   switch (getOpcode()) {
1061   default:
1062     return false;
1063   case Instruction::AtomicCmpXchg:
1064   case Instruction::AtomicRMW:
1065   case Instruction::Store:
1066     return true;
1067   }
1068 }
1069 
1070 bool Instruction::isVolatile() const {
1071   switch (getOpcode()) {
1072   default:
1073     return false;
1074   case Instruction::AtomicRMW:
1075     return cast<AtomicRMWInst>(this)->isVolatile();
1076   case Instruction::Store:
1077     return cast<StoreInst>(this)->isVolatile();
1078   case Instruction::Load:
1079     return cast<LoadInst>(this)->isVolatile();
1080   case Instruction::AtomicCmpXchg:
1081     return cast<AtomicCmpXchgInst>(this)->isVolatile();
1082   case Instruction::Call:
1083   case Instruction::Invoke:
1084     // There are a very limited number of intrinsics with volatile flags.
1085     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
1086       if (auto *MI = dyn_cast<MemIntrinsic>(II))
1087         return MI->isVolatile();
1088       switch (II->getIntrinsicID()) {
1089       default: break;
1090       case Intrinsic::matrix_column_major_load:
1091         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
1092       case Intrinsic::matrix_column_major_store:
1093         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
1094       }
1095     }
1096     return false;
1097   }
1098 }
1099 
1100 Type *Instruction::getAccessType() const {
1101   switch (getOpcode()) {
1102   case Instruction::Store:
1103     return cast<StoreInst>(this)->getValueOperand()->getType();
1104   case Instruction::Load:
1105   case Instruction::AtomicRMW:
1106     return getType();
1107   case Instruction::AtomicCmpXchg:
1108     return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
1109   case Instruction::Call:
1110   case Instruction::Invoke:
1111     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
1112       switch (II->getIntrinsicID()) {
1113       case Intrinsic::masked_load:
1114       case Intrinsic::masked_gather:
1115       case Intrinsic::masked_expandload:
1116       case Intrinsic::vp_load:
1117       case Intrinsic::vp_gather:
1118       case Intrinsic::experimental_vp_strided_load:
1119         return II->getType();
1120       case Intrinsic::masked_store:
1121       case Intrinsic::masked_scatter:
1122       case Intrinsic::masked_compressstore:
1123       case Intrinsic::vp_store:
1124       case Intrinsic::vp_scatter:
1125       case Intrinsic::experimental_vp_strided_store:
1126         return II->getOperand(0)->getType();
1127       default:
1128         break;
1129       }
1130     }
1131   }
1132 
1133   return nullptr;
1134 }
1135 
1136 static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1137                                     bool IncludePhaseOneUnwind) {
1138   // Because phase one unwinding skips cleanup landingpads, we effectively
1139   // unwind past this frame, and callers need to have valid unwind info.
1140   if (LP->isCleanup())
1141     return IncludePhaseOneUnwind;
1142 
1143   for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1144     Constant *Clause = LP->getClause(I);
1145     // catch ptr null catches all exceptions.
1146     if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
1147       return false;
1148     // filter [0 x ptr] catches all exceptions.
1149     if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
1150       return false;
1151   }
1152 
1153   // May catch only some subset of exceptions, in which case other exceptions
1154   // will continue unwinding.
1155   return true;
1156 }
1157 
1158 bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1159   switch (getOpcode()) {
1160   case Instruction::Call:
1161     return !cast<CallInst>(this)->doesNotThrow();
1162   case Instruction::CleanupRet:
1163     return cast<CleanupReturnInst>(this)->unwindsToCaller();
1164   case Instruction::CatchSwitch:
1165     return cast<CatchSwitchInst>(this)->unwindsToCaller();
1166   case Instruction::Resume:
1167     return true;
1168   case Instruction::Invoke: {
1169     // Landingpads themselves don't unwind -- however, an invoke of a skipped
1170     // landingpad may continue unwinding.
1171     BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
1172     BasicBlock::iterator Pad = UnwindDest->getFirstNonPHIIt();
1173     if (auto *LP = dyn_cast<LandingPadInst>(Pad))
1174       return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1175     return false;
1176   }
1177   case Instruction::CleanupPad:
1178     // Treat the same as cleanup landingpad.
1179     return IncludePhaseOneUnwind;
1180   default:
1181     return false;
1182   }
1183 }
1184 
1185 bool Instruction::mayHaveSideEffects() const {
1186   return mayWriteToMemory() || mayThrow() || !willReturn();
1187 }
1188 
1189 bool Instruction::isSafeToRemove() const {
1190   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
1191          !this->isTerminator() && !this->isEHPad();
1192 }
1193 
1194 bool Instruction::willReturn() const {
1195   // Volatile store isn't guaranteed to return; see LangRef.
1196   if (auto *SI = dyn_cast<StoreInst>(this))
1197     return !SI->isVolatile();
1198 
1199   if (const auto *CB = dyn_cast<CallBase>(this))
1200     return CB->hasFnAttr(Attribute::WillReturn);
1201   return true;
1202 }
1203 
1204 bool Instruction::isLifetimeStartOrEnd() const {
1205   auto *II = dyn_cast<IntrinsicInst>(this);
1206   if (!II)
1207     return false;
1208   Intrinsic::ID ID = II->getIntrinsicID();
1209   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1210 }
1211 
1212 bool Instruction::isLaunderOrStripInvariantGroup() const {
1213   auto *II = dyn_cast<IntrinsicInst>(this);
1214   if (!II)
1215     return false;
1216   Intrinsic::ID ID = II->getIntrinsicID();
1217   return ID == Intrinsic::launder_invariant_group ||
1218          ID == Intrinsic::strip_invariant_group;
1219 }
1220 
1221 bool Instruction::isDebugOrPseudoInst() const {
1222   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
1223 }
1224 
1225 const Instruction *
1226 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
1227   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
1228     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
1229       return I;
1230   return nullptr;
1231 }
1232 
1233 const Instruction *
1234 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
1235   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
1236     if (!isa<DbgInfoIntrinsic>(I) &&
1237         !(SkipPseudoOp && isa<PseudoProbeInst>(I)) &&
1238         !(isa<IntrinsicInst>(I) &&
1239           cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fake_use))
1240       return I;
1241   return nullptr;
1242 }
1243 
1244 const DebugLoc &Instruction::getStableDebugLoc() const {
1245   if (isa<DbgInfoIntrinsic>(this))
1246     if (const Instruction *Next = getNextNonDebugInstruction())
1247       return Next->getDebugLoc();
1248   return getDebugLoc();
1249 }
1250 
1251 bool Instruction::isAssociative() const {
1252   if (auto *II = dyn_cast<IntrinsicInst>(this))
1253     return II->isAssociative();
1254   unsigned Opcode = getOpcode();
1255   if (isAssociative(Opcode))
1256     return true;
1257 
1258   switch (Opcode) {
1259   case FMul:
1260   case FAdd:
1261     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
1262            cast<FPMathOperator>(this)->hasNoSignedZeros();
1263   default:
1264     return false;
1265   }
1266 }
1267 
1268 bool Instruction::isCommutative() const {
1269   if (auto *II = dyn_cast<IntrinsicInst>(this))
1270     return II->isCommutative();
1271   // TODO: Should allow icmp/fcmp?
1272   return isCommutative(getOpcode());
1273 }
1274 
1275 unsigned Instruction::getNumSuccessors() const {
1276   switch (getOpcode()) {
1277 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
1278   case Instruction::OPC:                                                       \
1279     return static_cast<const CLASS *>(this)->getNumSuccessors();
1280 #include "llvm/IR/Instruction.def"
1281   default:
1282     break;
1283   }
1284   llvm_unreachable("not a terminator");
1285 }
1286 
1287 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1288   switch (getOpcode()) {
1289 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
1290   case Instruction::OPC:                                                       \
1291     return static_cast<const CLASS *>(this)->getSuccessor(idx);
1292 #include "llvm/IR/Instruction.def"
1293   default:
1294     break;
1295   }
1296   llvm_unreachable("not a terminator");
1297 }
1298 
1299 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1300   switch (getOpcode()) {
1301 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
1302   case Instruction::OPC:                                                       \
1303     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1304 #include "llvm/IR/Instruction.def"
1305   default:
1306     break;
1307   }
1308   llvm_unreachable("not a terminator");
1309 }
1310 
1311 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1312   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1313        Idx != NumSuccessors; ++Idx)
1314     if (getSuccessor(Idx) == OldBB)
1315       setSuccessor(Idx, NewBB);
1316 }
1317 
1318 Instruction *Instruction::cloneImpl() const {
1319   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1320 }
1321 
1322 void Instruction::swapProfMetadata() {
1323   MDNode *ProfileData = getBranchWeightMDNode(*this);
1324   if (!ProfileData)
1325     return;
1326   unsigned FirstIdx = getBranchWeightOffset(ProfileData);
1327   if (ProfileData->getNumOperands() != 2 + FirstIdx)
1328     return;
1329 
1330   unsigned SecondIdx = FirstIdx + 1;
1331   SmallVector<Metadata *, 4> Ops;
1332   // If there are more weights past the second, we can't swap them
1333   if (ProfileData->getNumOperands() > SecondIdx + 1)
1334     return;
1335   for (unsigned Idx = 0; Idx < FirstIdx; ++Idx) {
1336     Ops.push_back(ProfileData->getOperand(Idx));
1337   }
1338   // Switch the order of the weights
1339   Ops.push_back(ProfileData->getOperand(SecondIdx));
1340   Ops.push_back(ProfileData->getOperand(FirstIdx));
1341   setMetadata(LLVMContext::MD_prof,
1342               MDNode::get(ProfileData->getContext(), Ops));
1343 }
1344 
1345 void Instruction::copyMetadata(const Instruction &SrcInst,
1346                                ArrayRef<unsigned> WL) {
1347   if (!SrcInst.hasMetadata())
1348     return;
1349 
1350   SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
1351 
1352   // Otherwise, enumerate and copy over metadata from the old instruction to the
1353   // new one.
1354   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1355   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
1356   for (const auto &MD : TheMDs) {
1357     if (WL.empty() || WLS.count(MD.first))
1358       setMetadata(MD.first, MD.second);
1359   }
1360   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
1361     setDebugLoc(SrcInst.getDebugLoc());
1362 }
1363 
1364 Instruction *Instruction::clone() const {
1365   Instruction *New = nullptr;
1366   switch (getOpcode()) {
1367   default:
1368     llvm_unreachable("Unhandled Opcode.");
1369 #define HANDLE_INST(num, opc, clas)                                            \
1370   case Instruction::opc:                                                       \
1371     New = cast<clas>(this)->cloneImpl();                                       \
1372     break;
1373 #include "llvm/IR/Instruction.def"
1374 #undef HANDLE_INST
1375   }
1376 
1377   New->SubclassOptionalData = SubclassOptionalData;
1378   New->copyMetadata(*this);
1379   return New;
1380 }
1381