xref: /freebsd-src/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
1 //===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements methods that make it really easy to deal with intrinsic
10 // functions.
11 //
12 // All intrinsic function calls are instances of the call instruction, so these
13 // are all subclasses of the CallInst class.  Note that none of these classes
14 // has state or virtual methods, which is an important part of this gross/neat
15 // hack working.
16 //
17 // In some cases, arguments to intrinsics need to be generic and are defined as
18 // type pointer to empty struct { }*.  To access the real item of interest the
19 // cast instruction needs to be stripped away.
20 //
21 //===----------------------------------------------------------------------===//
22 
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DebugInfoMetadata.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Metadata.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/IR/Statepoint.h"
33 
34 #include "llvm/Support/raw_ostream.h"
35 using namespace llvm;
36 
37 //===----------------------------------------------------------------------===//
38 /// DbgVariableIntrinsic - This is the common base class for debug info
39 /// intrinsics for variables.
40 ///
41 
42 iterator_range<DbgVariableIntrinsic::location_op_iterator>
43 DbgVariableIntrinsic::location_ops() const {
44   auto *MD = getRawLocation();
45   assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
46 
47   // If operand is ValueAsMetadata, return a range over just that operand.
48   if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
49     return {location_op_iterator(VAM), location_op_iterator(VAM + 1)};
50   }
51   // If operand is DIArgList, return a range over its args.
52   if (auto *AL = dyn_cast<DIArgList>(MD))
53     return {location_op_iterator(AL->args_begin()),
54             location_op_iterator(AL->args_end())};
55   // Operand must be an empty metadata tuple, so return empty iterator.
56   return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)),
57           location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))};
58 }
59 
60 Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const {
61   auto *MD = getRawLocation();
62   assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
63   if (auto *AL = dyn_cast<DIArgList>(MD))
64     return AL->getArgs()[OpIdx]->getValue();
65   if (isa<MDNode>(MD))
66     return nullptr;
67   assert(
68       isa<ValueAsMetadata>(MD) &&
69       "Attempted to get location operand from DbgVariableIntrinsic with none.");
70   auto *V = cast<ValueAsMetadata>(MD);
71   assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a "
72                        "single location operand.");
73   return V->getValue();
74 }
75 
76 static ValueAsMetadata *getAsMetadata(Value *V) {
77   return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
78                                        cast<MetadataAsValue>(V)->getMetadata())
79                                  : ValueAsMetadata::get(V);
80 }
81 
82 void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue,
83                                                      Value *NewValue) {
84   assert(NewValue && "Values must be non-null");
85   auto Locations = location_ops();
86   auto OldIt = find(Locations, OldValue);
87   assert(OldIt != Locations.end() && "OldValue must be a current location");
88   if (!hasArgList()) {
89     Value *NewOperand = isa<MetadataAsValue>(NewValue)
90                             ? NewValue
91                             : MetadataAsValue::get(
92                                   getContext(), ValueAsMetadata::get(NewValue));
93     return setArgOperand(0, NewOperand);
94   }
95   SmallVector<ValueAsMetadata *, 4> MDs;
96   ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
97   for (auto *VMD : Locations)
98     MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD));
99   setArgOperand(
100       0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
101 }
102 void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx,
103                                                      Value *NewValue) {
104   assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index");
105   if (!hasArgList()) {
106     Value *NewOperand = isa<MetadataAsValue>(NewValue)
107                             ? NewValue
108                             : MetadataAsValue::get(
109                                   getContext(), ValueAsMetadata::get(NewValue));
110     return setArgOperand(0, NewOperand);
111   }
112   SmallVector<ValueAsMetadata *, 4> MDs;
113   ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
114   for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx)
115     MDs.push_back(Idx == OpIdx ? NewOperand
116                                : getAsMetadata(getVariableLocationOp(Idx)));
117   setArgOperand(
118       0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
119 }
120 
121 void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues,
122                                                   DIExpression *NewExpr) {
123   assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() +
124                                     NewValues.size()) &&
125          "NewExpr for debug variable intrinsic does not reference every "
126          "location operand.");
127   assert(!is_contained(NewValues, nullptr) && "New values must be non-null");
128   setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr));
129   SmallVector<ValueAsMetadata *, 4> MDs;
130   for (auto *VMD : location_ops())
131     MDs.push_back(getAsMetadata(VMD));
132   for (auto *VMD : NewValues)
133     MDs.push_back(getAsMetadata(VMD));
134   setArgOperand(
135       0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
136 }
137 
138 Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
139   if (auto Fragment = getExpression()->getFragmentInfo())
140     return Fragment->SizeInBits;
141   return getVariable()->getSizeInBits();
142 }
143 
144 int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
145                                                StringRef Name) {
146   assert(Name.startswith("llvm."));
147 
148   // Do successive binary searches of the dotted name components. For
149   // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
150   // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
151   // "llvm.gc.experimental.statepoint", and then we will stop as the range is
152   // size 1. During the search, we can skip the prefix that we already know is
153   // identical. By using strncmp we consider names with differing suffixes to
154   // be part of the equal range.
155   size_t CmpEnd = 4; // Skip the "llvm" component.
156   const char *const *Low = NameTable.begin();
157   const char *const *High = NameTable.end();
158   const char *const *LastLow = Low;
159   while (CmpEnd < Name.size() && High - Low > 0) {
160     size_t CmpStart = CmpEnd;
161     CmpEnd = Name.find('.', CmpStart + 1);
162     CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
163     auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
164       return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
165     };
166     LastLow = Low;
167     std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
168   }
169   if (High - Low > 0)
170     LastLow = Low;
171 
172   if (LastLow == NameTable.end())
173     return -1;
174   StringRef NameFound = *LastLow;
175   if (Name == NameFound ||
176       (Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
177     return LastLow - NameTable.begin();
178   return -1;
179 }
180 
181 Value *InstrProfIncrementInst::getStep() const {
182   if (InstrProfIncrementInstStep::classof(this)) {
183     return const_cast<Value *>(getArgOperand(4));
184   }
185   const Module *M = getModule();
186   LLVMContext &Context = M->getContext();
187   return ConstantInt::get(Type::getInt64Ty(Context), 1);
188 }
189 
190 Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
191   unsigned NumOperands = arg_size();
192   Metadata *MD = nullptr;
193   auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
194   if (MAV)
195     MD = MAV->getMetadata();
196   if (!MD || !isa<MDString>(MD))
197     return None;
198   return convertStrToRoundingMode(cast<MDString>(MD)->getString());
199 }
200 
201 Optional<fp::ExceptionBehavior>
202 ConstrainedFPIntrinsic::getExceptionBehavior() const {
203   unsigned NumOperands = arg_size();
204   Metadata *MD = nullptr;
205   auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
206   if (MAV)
207     MD = MAV->getMetadata();
208   if (!MD || !isa<MDString>(MD))
209     return None;
210   return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
211 }
212 
213 bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
214   Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
215   if (Except) {
216     if (Except.getValue() != fp::ebIgnore)
217       return false;
218   }
219 
220   Optional<RoundingMode> Rounding = getRoundingMode();
221   if (Rounding) {
222     if (Rounding.getValue() != RoundingMode::NearestTiesToEven)
223       return false;
224   }
225 
226   return true;
227 }
228 
229 FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
230   Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
231   if (!MD || !isa<MDString>(MD))
232     return FCmpInst::BAD_FCMP_PREDICATE;
233   return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
234       .Case("oeq", FCmpInst::FCMP_OEQ)
235       .Case("ogt", FCmpInst::FCMP_OGT)
236       .Case("oge", FCmpInst::FCMP_OGE)
237       .Case("olt", FCmpInst::FCMP_OLT)
238       .Case("ole", FCmpInst::FCMP_OLE)
239       .Case("one", FCmpInst::FCMP_ONE)
240       .Case("ord", FCmpInst::FCMP_ORD)
241       .Case("uno", FCmpInst::FCMP_UNO)
242       .Case("ueq", FCmpInst::FCMP_UEQ)
243       .Case("ugt", FCmpInst::FCMP_UGT)
244       .Case("uge", FCmpInst::FCMP_UGE)
245       .Case("ult", FCmpInst::FCMP_ULT)
246       .Case("ule", FCmpInst::FCMP_ULE)
247       .Case("une", FCmpInst::FCMP_UNE)
248       .Default(FCmpInst::BAD_FCMP_PREDICATE);
249 }
250 
251 bool ConstrainedFPIntrinsic::isUnaryOp() const {
252   switch (getIntrinsicID()) {
253   default:
254     return false;
255 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
256   case Intrinsic::INTRINSIC:                                                   \
257     return NARG == 1;
258 #include "llvm/IR/ConstrainedOps.def"
259   }
260 }
261 
262 bool ConstrainedFPIntrinsic::isTernaryOp() const {
263   switch (getIntrinsicID()) {
264   default:
265     return false;
266 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
267   case Intrinsic::INTRINSIC:                                                   \
268     return NARG == 3;
269 #include "llvm/IR/ConstrainedOps.def"
270   }
271 }
272 
273 bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
274   switch (I->getIntrinsicID()) {
275 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC)                        \
276   case Intrinsic::INTRINSIC:
277 #include "llvm/IR/ConstrainedOps.def"
278     return true;
279   default:
280     return false;
281   }
282 }
283 
284 ElementCount VPIntrinsic::getStaticVectorLength() const {
285   auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
286     const auto *VT = cast<VectorType>(T);
287     auto ElemCount = VT->getElementCount();
288     return ElemCount;
289   };
290 
291   Value *VPMask = getMaskParam();
292   assert(VPMask && "No mask param?");
293   return GetVectorLengthOfType(VPMask->getType());
294 }
295 
296 Value *VPIntrinsic::getMaskParam() const {
297   if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
298     return getArgOperand(MaskPos.getValue());
299   return nullptr;
300 }
301 
302 void VPIntrinsic::setMaskParam(Value *NewMask) {
303   auto MaskPos = getMaskParamPos(getIntrinsicID());
304   setArgOperand(*MaskPos, NewMask);
305 }
306 
307 Value *VPIntrinsic::getVectorLengthParam() const {
308   if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
309     return getArgOperand(EVLPos.getValue());
310   return nullptr;
311 }
312 
313 void VPIntrinsic::setVectorLengthParam(Value *NewEVL) {
314   auto EVLPos = getVectorLengthParamPos(getIntrinsicID());
315   setArgOperand(*EVLPos, NewEVL);
316 }
317 
318 Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) {
319   switch (IntrinsicID) {
320   default:
321     return None;
322 
323 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS)                    \
324   case Intrinsic::VPID:                                                        \
325     return MASKPOS;
326 #include "llvm/IR/VPIntrinsics.def"
327   }
328 }
329 
330 Optional<unsigned>
331 VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
332   switch (IntrinsicID) {
333   default:
334     return None;
335 
336 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS)                    \
337   case Intrinsic::VPID:                                                        \
338     return VLENPOS;
339 #include "llvm/IR/VPIntrinsics.def"
340   }
341 }
342 
343 /// \return the alignment of the pointer used by this load/store/gather or
344 /// scatter.
345 MaybeAlign VPIntrinsic::getPointerAlignment() const {
346   Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID());
347   assert(PtrParamOpt.hasValue() && "no pointer argument!");
348   return getParamAlign(PtrParamOpt.getValue());
349 }
350 
351 /// \return The pointer operand of this load,store, gather or scatter.
352 Value *VPIntrinsic::getMemoryPointerParam() const {
353   if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
354     return getArgOperand(PtrParamOpt.getValue());
355   return nullptr;
356 }
357 
358 Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) {
359   switch (VPID) {
360   default:
361     break;
362 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
363 #define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS;
364 #define END_REGISTER_VP_INTRINSIC(VPID) break;
365 #include "llvm/IR/VPIntrinsics.def"
366   }
367   return None;
368 }
369 
370 /// \return The data (payload) operand of this store or scatter.
371 Value *VPIntrinsic::getMemoryDataParam() const {
372   auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
373   if (!DataParamOpt.hasValue())
374     return nullptr;
375   return getArgOperand(DataParamOpt.getValue());
376 }
377 
378 Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {
379   switch (VPID) {
380   default:
381     break;
382 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
383 #define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS;
384 #define END_REGISTER_VP_INTRINSIC(VPID) break;
385 #include "llvm/IR/VPIntrinsics.def"
386   }
387   return None;
388 }
389 
390 bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) {
391   switch (ID) {
392   default:
393     break;
394 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS)                    \
395   case Intrinsic::VPID:                                                        \
396     return true;
397 #include "llvm/IR/VPIntrinsics.def"
398   }
399   return false;
400 }
401 
402 // Equivalent non-predicated opcode
403 Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) {
404   switch (ID) {
405   default:
406     break;
407 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
408 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC;
409 #define END_REGISTER_VP_INTRINSIC(VPID) break;
410 #include "llvm/IR/VPIntrinsics.def"
411   }
412   return None;
413 }
414 
415 Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) {
416   switch (IROPC) {
417   default:
418     break;
419 
420 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break;
421 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC:
422 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
423 #include "llvm/IR/VPIntrinsics.def"
424   }
425   return Intrinsic::not_intrinsic;
426 }
427 
428 bool VPIntrinsic::canIgnoreVectorLengthParam() const {
429   using namespace PatternMatch;
430 
431   ElementCount EC = getStaticVectorLength();
432 
433   // No vlen param - no lanes masked-off by it.
434   auto *VLParam = getVectorLengthParam();
435   if (!VLParam)
436     return true;
437 
438   // Note that the VP intrinsic causes undefined behavior if the Explicit Vector
439   // Length parameter is strictly greater-than the number of vector elements of
440   // the operation. This function returns true when this is detected statically
441   // in the IR.
442 
443   // Check whether "W == vscale * EC.getKnownMinValue()"
444   if (EC.isScalable()) {
445     // Undig the DL
446     const auto *ParMod = this->getModule();
447     if (!ParMod)
448       return false;
449     const auto &DL = ParMod->getDataLayout();
450 
451     // Compare vscale patterns
452     uint64_t VScaleFactor;
453     if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
454       return VScaleFactor >= EC.getKnownMinValue();
455     return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
456   }
457 
458   // standard SIMD operation
459   const auto *VLConst = dyn_cast<ConstantInt>(VLParam);
460   if (!VLConst)
461     return false;
462 
463   uint64_t VLNum = VLConst->getZExtValue();
464   if (VLNum >= EC.getKnownMinValue())
465     return true;
466 
467   return false;
468 }
469 
470 Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
471                                                Type *ReturnType,
472                                                ArrayRef<Value *> Params) {
473   assert(isVPIntrinsic(VPID) && "not a VP intrinsic");
474   Function *VPFunc;
475   switch (VPID) {
476   default: {
477     Type *OverloadTy = Params[0]->getType();
478     if (VPReductionIntrinsic::isVPReduction(VPID))
479       OverloadTy =
480           Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType();
481 
482     VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
483     break;
484   }
485   case Intrinsic::vp_select:
486     VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
487     break;
488   case Intrinsic::vp_load:
489     VPFunc = Intrinsic::getDeclaration(
490         M, VPID, {ReturnType, Params[0]->getType()});
491     break;
492   case Intrinsic::vp_gather:
493     VPFunc = Intrinsic::getDeclaration(
494         M, VPID, {ReturnType, Params[0]->getType()});
495     break;
496   case Intrinsic::vp_store:
497     VPFunc = Intrinsic::getDeclaration(
498         M, VPID, {Params[0]->getType(), Params[1]->getType()});
499     break;
500   case Intrinsic::vp_scatter:
501     VPFunc = Intrinsic::getDeclaration(
502         M, VPID, {Params[0]->getType(), Params[1]->getType()});
503     break;
504   }
505   assert(VPFunc && "Could not declare VP intrinsic");
506   return VPFunc;
507 }
508 
509 bool VPReductionIntrinsic::isVPReduction(Intrinsic::ID ID) {
510   switch (ID) {
511   default:
512     break;
513 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
514 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
515 #define END_REGISTER_VP_INTRINSIC(VPID) break;
516 #include "llvm/IR/VPIntrinsics.def"
517   }
518   return false;
519 }
520 
521 unsigned VPReductionIntrinsic::getVectorParamPos() const {
522   return *VPReductionIntrinsic::getVectorParamPos(getIntrinsicID());
523 }
524 
525 unsigned VPReductionIntrinsic::getStartParamPos() const {
526   return *VPReductionIntrinsic::getStartParamPos(getIntrinsicID());
527 }
528 
529 Optional<unsigned> VPReductionIntrinsic::getVectorParamPos(Intrinsic::ID ID) {
530   switch (ID) {
531 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
532 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS;
533 #define END_REGISTER_VP_INTRINSIC(VPID) break;
534 #include "llvm/IR/VPIntrinsics.def"
535   default:
536     break;
537   }
538   return None;
539 }
540 
541 Optional<unsigned> VPReductionIntrinsic::getStartParamPos(Intrinsic::ID ID) {
542   switch (ID) {
543 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
544 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS;
545 #define END_REGISTER_VP_INTRINSIC(VPID) break;
546 #include "llvm/IR/VPIntrinsics.def"
547   default:
548     break;
549   }
550   return None;
551 }
552 
553 Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
554   switch (getIntrinsicID()) {
555   case Intrinsic::uadd_with_overflow:
556   case Intrinsic::sadd_with_overflow:
557   case Intrinsic::uadd_sat:
558   case Intrinsic::sadd_sat:
559     return Instruction::Add;
560   case Intrinsic::usub_with_overflow:
561   case Intrinsic::ssub_with_overflow:
562   case Intrinsic::usub_sat:
563   case Intrinsic::ssub_sat:
564     return Instruction::Sub;
565   case Intrinsic::umul_with_overflow:
566   case Intrinsic::smul_with_overflow:
567     return Instruction::Mul;
568   default:
569     llvm_unreachable("Invalid intrinsic");
570   }
571 }
572 
573 bool BinaryOpIntrinsic::isSigned() const {
574   switch (getIntrinsicID()) {
575   case Intrinsic::sadd_with_overflow:
576   case Intrinsic::ssub_with_overflow:
577   case Intrinsic::smul_with_overflow:
578   case Intrinsic::sadd_sat:
579   case Intrinsic::ssub_sat:
580     return true;
581   default:
582     return false;
583   }
584 }
585 
586 unsigned BinaryOpIntrinsic::getNoWrapKind() const {
587   if (isSigned())
588     return OverflowingBinaryOperator::NoSignedWrap;
589   else
590     return OverflowingBinaryOperator::NoUnsignedWrap;
591 }
592 
593 const GCStatepointInst *GCProjectionInst::getStatepoint() const {
594   const Value *Token = getArgOperand(0);
595 
596   // This takes care both of relocates for call statepoints and relocates
597   // on normal path of invoke statepoint.
598   if (!isa<LandingPadInst>(Token))
599     return cast<GCStatepointInst>(Token);
600 
601   // This relocate is on exceptional path of an invoke statepoint
602   const BasicBlock *InvokeBB =
603     cast<Instruction>(Token)->getParent()->getUniquePredecessor();
604 
605   assert(InvokeBB && "safepoints should have unique landingpads");
606   assert(InvokeBB->getTerminator() &&
607          "safepoint block should be well formed");
608 
609   return cast<GCStatepointInst>(InvokeBB->getTerminator());
610 }
611 
612 Value *GCRelocateInst::getBasePtr() const {
613   if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
614     return *(Opt->Inputs.begin() + getBasePtrIndex());
615   return *(getStatepoint()->arg_begin() + getBasePtrIndex());
616 }
617 
618 Value *GCRelocateInst::getDerivedPtr() const {
619   if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
620     return *(Opt->Inputs.begin() + getDerivedPtrIndex());
621   return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
622 }
623