xref: /llvm-project/llvm/lib/IR/Operator.cpp (revision ff3523f67b858b87ed4f72d143a1ab3634ae1db7)
1 //===-- Operator.cpp - Implement the LLVM operators -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the non-inline methods for the LLVM Operator classes.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Operator.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/GetElementPtrTypeIterator.h"
16 #include "llvm/IR/Instructions.h"
17 
18 #include "ConstantsContext.h"
19 
20 namespace llvm {
21 bool Operator::hasPoisonGeneratingFlags() const {
22   switch (getOpcode()) {
23   case Instruction::Add:
24   case Instruction::Sub:
25   case Instruction::Mul:
26   case Instruction::Shl: {
27     auto *OBO = cast<OverflowingBinaryOperator>(this);
28     return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
29   }
30   case Instruction::Trunc: {
31     if (auto *TI = dyn_cast<TruncInst>(this))
32       return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
33     return false;
34   }
35   case Instruction::UDiv:
36   case Instruction::SDiv:
37   case Instruction::AShr:
38   case Instruction::LShr:
39     return cast<PossiblyExactOperator>(this)->isExact();
40   case Instruction::Or:
41     return cast<PossiblyDisjointInst>(this)->isDisjoint();
42   case Instruction::GetElementPtr: {
43     auto *GEP = cast<GEPOperator>(this);
44     // Note: inrange exists on constexpr only
45     return GEP->isInBounds() || GEP->getInRange() != std::nullopt;
46   }
47   case Instruction::UIToFP:
48   case Instruction::ZExt:
49     if (auto *NNI = dyn_cast<PossiblyNonNegInst>(this))
50       return NNI->hasNonNeg();
51     return false;
52   default:
53     if (const auto *FP = dyn_cast<FPMathOperator>(this))
54       return FP->hasNoNaNs() || FP->hasNoInfs();
55     return false;
56   }
57 }
58 
59 bool Operator::hasPoisonGeneratingAnnotations() const {
60   if (hasPoisonGeneratingFlags())
61     return true;
62   auto *I = dyn_cast<Instruction>(this);
63   return I && (I->hasPoisonGeneratingReturnAttributes() ||
64                I->hasPoisonGeneratingMetadata());
65 }
66 
67 Type *GEPOperator::getSourceElementType() const {
68   if (auto *I = dyn_cast<GetElementPtrInst>(this))
69     return I->getSourceElementType();
70   return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
71 }
72 
73 Type *GEPOperator::getResultElementType() const {
74   if (auto *I = dyn_cast<GetElementPtrInst>(this))
75     return I->getResultElementType();
76   return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
77 }
78 
79 std::optional<ConstantRange> GEPOperator::getInRange() const {
80   if (auto *CE = dyn_cast<GetElementPtrConstantExpr>(this))
81     return CE->getInRange();
82   return std::nullopt;
83 }
84 
85 Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
86   /// compute the worse possible offset for every level of the GEP et accumulate
87   /// the minimum alignment into Result.
88 
89   Align Result = Align(llvm::Value::MaximumAlignment);
90   for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
91        GTI != GTE; ++GTI) {
92     uint64_t Offset;
93     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
94 
95     if (StructType *STy = GTI.getStructTypeOrNull()) {
96       const StructLayout *SL = DL.getStructLayout(STy);
97       Offset = SL->getElementOffset(OpC->getZExtValue());
98     } else {
99       assert(GTI.isSequential() && "should be sequencial");
100       /// If the index isn't known, we take 1 because it is the index that will
101       /// give the worse alignment of the offset.
102       const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1;
103       Offset = GTI.getSequentialElementStride(DL) * ElemCount;
104     }
105     Result = Align(MinAlign(Offset, Result.value()));
106   }
107   return Result;
108 }
109 
110 bool GEPOperator::accumulateConstantOffset(
111     const DataLayout &DL, APInt &Offset,
112     function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
113   assert(Offset.getBitWidth() ==
114              DL.getIndexSizeInBits(getPointerAddressSpace()) &&
115          "The offset bit width does not match DL specification.");
116   SmallVector<const Value *> Index(llvm::drop_begin(operand_values()));
117   return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
118                                                DL, Offset, ExternalAnalysis);
119 }
120 
121 bool GEPOperator::accumulateConstantOffset(
122     Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
123     APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
124   // Fast path for canonical getelementptr i8 form.
125   if (SourceType->isIntegerTy(8) && !ExternalAnalysis) {
126     if (auto *CI = dyn_cast<ConstantInt>(Index.front())) {
127       Offset += CI->getValue().sextOrTrunc(Offset.getBitWidth());
128       return true;
129     }
130     return false;
131   }
132 
133   bool UsedExternalAnalysis = false;
134   auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
135     Index = Index.sextOrTrunc(Offset.getBitWidth());
136     APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
137     // For array or vector indices, scale the index by the size of the type.
138     if (!UsedExternalAnalysis) {
139       Offset += Index * IndexedSize;
140     } else {
141       // External Analysis can return a result higher/lower than the value
142       // represents. We need to detect overflow/underflow.
143       bool Overflow = false;
144       APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
145       if (Overflow)
146         return false;
147       Offset = Offset.sadd_ov(OffsetPlus, Overflow);
148       if (Overflow)
149         return false;
150     }
151     return true;
152   };
153   auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
154       SourceType, Index.begin());
155   auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
156   for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
157     // Scalable vectors are multiplied by a runtime constant.
158     bool ScalableType = GTI.getIndexedType()->isScalableTy();
159 
160     Value *V = GTI.getOperand();
161     StructType *STy = GTI.getStructTypeOrNull();
162     // Handle ConstantInt if possible.
163     if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
164       if (ConstOffset->isZero())
165         continue;
166       // if the type is scalable and the constant is not zero (vscale * n * 0 =
167       // 0) bailout.
168       if (ScalableType)
169         return false;
170       // Handle a struct index, which adds its field offset to the pointer.
171       if (STy) {
172         unsigned ElementIdx = ConstOffset->getZExtValue();
173         const StructLayout *SL = DL.getStructLayout(STy);
174         // Element offset is in bytes.
175         if (!AccumulateOffset(
176                 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
177                 1))
178           return false;
179         continue;
180       }
181       if (!AccumulateOffset(ConstOffset->getValue(),
182                             GTI.getSequentialElementStride(DL)))
183         return false;
184       continue;
185     }
186 
187     // The operand is not constant, check if an external analysis was provided.
188     // External analsis is not applicable to a struct type.
189     if (!ExternalAnalysis || STy || ScalableType)
190       return false;
191     APInt AnalysisIndex;
192     if (!ExternalAnalysis(*V, AnalysisIndex))
193       return false;
194     UsedExternalAnalysis = true;
195     if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(DL)))
196       return false;
197   }
198   return true;
199 }
200 
201 bool GEPOperator::collectOffset(
202     const DataLayout &DL, unsigned BitWidth,
203     MapVector<Value *, APInt> &VariableOffsets,
204     APInt &ConstantOffset) const {
205   assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
206          "The offset bit width does not match DL specification.");
207 
208   auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
209     Index = Index.sextOrTrunc(BitWidth);
210     APInt IndexedSize = APInt(BitWidth, Size);
211     ConstantOffset += Index * IndexedSize;
212   };
213 
214   for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
215        GTI != GTE; ++GTI) {
216     // Scalable vectors are multiplied by a runtime constant.
217     bool ScalableType = GTI.getIndexedType()->isScalableTy();
218 
219     Value *V = GTI.getOperand();
220     StructType *STy = GTI.getStructTypeOrNull();
221     // Handle ConstantInt if possible.
222     if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
223       if (ConstOffset->isZero())
224         continue;
225       // If the type is scalable and the constant is not zero (vscale * n * 0 =
226       // 0) bailout.
227       // TODO: If the runtime value is accessible at any point before DWARF
228       // emission, then we could potentially keep a forward reference to it
229       // in the debug value to be filled in later.
230       if (ScalableType)
231         return false;
232       // Handle a struct index, which adds its field offset to the pointer.
233       if (STy) {
234         unsigned ElementIdx = ConstOffset->getZExtValue();
235         const StructLayout *SL = DL.getStructLayout(STy);
236         // Element offset is in bytes.
237         CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
238                               1);
239         continue;
240       }
241       CollectConstantOffset(ConstOffset->getValue(),
242                             GTI.getSequentialElementStride(DL));
243       continue;
244     }
245 
246     if (STy || ScalableType)
247       return false;
248     APInt IndexedSize = APInt(BitWidth, GTI.getSequentialElementStride(DL));
249     // Insert an initial offset of 0 for V iff none exists already, then
250     // increment the offset by IndexedSize.
251     if (!IndexedSize.isZero()) {
252       auto *It = VariableOffsets.insert({V, APInt(BitWidth, 0)}).first;
253       It->second += IndexedSize;
254     }
255   }
256   return true;
257 }
258 
259 void FastMathFlags::print(raw_ostream &O) const {
260   if (all())
261     O << " fast";
262   else {
263     if (allowReassoc())
264       O << " reassoc";
265     if (noNaNs())
266       O << " nnan";
267     if (noInfs())
268       O << " ninf";
269     if (noSignedZeros())
270       O << " nsz";
271     if (allowReciprocal())
272       O << " arcp";
273     if (allowContract())
274       O << " contract";
275     if (approxFunc())
276       O << " afn";
277   }
278 }
279 } // namespace llvm
280