xref: /llvm-project/llvm/lib/IR/Operator.cpp (revision 7d3924cee30a87a51f9dc04ec843ae6bc3d1c90e)
1 //===-- Operator.cpp - Implement the LLVM operators -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the non-inline methods for the LLVM Operator classes.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Operator.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/GetElementPtrTypeIterator.h"
16 #include "llvm/IR/Instructions.h"
17 
18 #include "ConstantsContext.h"
19 
20 namespace llvm {
21 bool Operator::hasPoisonGeneratingFlags() const {
22   switch (getOpcode()) {
23   case Instruction::Add:
24   case Instruction::Sub:
25   case Instruction::Mul:
26   case Instruction::Shl: {
27     auto *OBO = cast<OverflowingBinaryOperator>(this);
28     return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
29   }
30   case Instruction::Trunc: {
31     auto *TI = dyn_cast<TruncInst>(this);
32     return TI->hasNoUnsignedWrap() || TI->hasNoSignedWrap();
33   }
34   case Instruction::UDiv:
35   case Instruction::SDiv:
36   case Instruction::AShr:
37   case Instruction::LShr:
38     return cast<PossiblyExactOperator>(this)->isExact();
39   case Instruction::Or:
40     return cast<PossiblyDisjointInst>(this)->isDisjoint();
41   case Instruction::GetElementPtr: {
42     auto *GEP = cast<GEPOperator>(this);
43     // Note: inrange exists on constexpr only
44     return GEP->isInBounds() || GEP->getInRange() != std::nullopt;
45   }
46   case Instruction::ZExt:
47     if (auto *NNI = dyn_cast<PossiblyNonNegInst>(this))
48       return NNI->hasNonNeg();
49     return false;
50   default:
51     if (const auto *FP = dyn_cast<FPMathOperator>(this))
52       return FP->hasNoNaNs() || FP->hasNoInfs();
53     return false;
54   }
55 }
56 
57 bool Operator::hasPoisonGeneratingFlagsOrMetadata() const {
58   if (hasPoisonGeneratingFlags())
59     return true;
60   auto *I = dyn_cast<Instruction>(this);
61   return I && I->hasPoisonGeneratingMetadata();
62 }
63 
64 Type *GEPOperator::getSourceElementType() const {
65   if (auto *I = dyn_cast<GetElementPtrInst>(this))
66     return I->getSourceElementType();
67   return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
68 }
69 
70 Type *GEPOperator::getResultElementType() const {
71   if (auto *I = dyn_cast<GetElementPtrInst>(this))
72     return I->getResultElementType();
73   return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
74 }
75 
76 std::optional<ConstantRange> GEPOperator::getInRange() const {
77   if (auto *CE = dyn_cast<GetElementPtrConstantExpr>(this))
78     return CE->getInRange();
79   return std::nullopt;
80 }
81 
82 Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
83   /// compute the worse possible offset for every level of the GEP et accumulate
84   /// the minimum alignment into Result.
85 
86   Align Result = Align(llvm::Value::MaximumAlignment);
87   for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
88        GTI != GTE; ++GTI) {
89     uint64_t Offset;
90     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
91 
92     if (StructType *STy = GTI.getStructTypeOrNull()) {
93       const StructLayout *SL = DL.getStructLayout(STy);
94       Offset = SL->getElementOffset(OpC->getZExtValue());
95     } else {
96       assert(GTI.isSequential() && "should be sequencial");
97       /// If the index isn't known, we take 1 because it is the index that will
98       /// give the worse alignment of the offset.
99       const uint64_t ElemCount = OpC ? OpC->getZExtValue() : 1;
100       Offset = GTI.getSequentialElementStride(DL) * ElemCount;
101     }
102     Result = Align(MinAlign(Offset, Result.value()));
103   }
104   return Result;
105 }
106 
107 bool GEPOperator::accumulateConstantOffset(
108     const DataLayout &DL, APInt &Offset,
109     function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
110   assert(Offset.getBitWidth() ==
111              DL.getIndexSizeInBits(getPointerAddressSpace()) &&
112          "The offset bit width does not match DL specification.");
113   SmallVector<const Value *> Index(llvm::drop_begin(operand_values()));
114   return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
115                                                DL, Offset, ExternalAnalysis);
116 }
117 
118 bool GEPOperator::accumulateConstantOffset(
119     Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
120     APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
121   // Fast path for canonical getelementptr i8 form.
122   if (SourceType->isIntegerTy(8) && !ExternalAnalysis) {
123     if (auto *CI = dyn_cast<ConstantInt>(Index.front())) {
124       Offset += CI->getValue().sextOrTrunc(Offset.getBitWidth());
125       return true;
126     }
127     return false;
128   }
129 
130   bool UsedExternalAnalysis = false;
131   auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
132     Index = Index.sextOrTrunc(Offset.getBitWidth());
133     APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
134     // For array or vector indices, scale the index by the size of the type.
135     if (!UsedExternalAnalysis) {
136       Offset += Index * IndexedSize;
137     } else {
138       // External Analysis can return a result higher/lower than the value
139       // represents. We need to detect overflow/underflow.
140       bool Overflow = false;
141       APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
142       if (Overflow)
143         return false;
144       Offset = Offset.sadd_ov(OffsetPlus, Overflow);
145       if (Overflow)
146         return false;
147     }
148     return true;
149   };
150   auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
151       SourceType, Index.begin());
152   auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
153   for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
154     // Scalable vectors are multiplied by a runtime constant.
155     bool ScalableType = GTI.getIndexedType()->isScalableTy();
156 
157     Value *V = GTI.getOperand();
158     StructType *STy = GTI.getStructTypeOrNull();
159     // Handle ConstantInt if possible.
160     if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
161       if (ConstOffset->isZero())
162         continue;
163       // if the type is scalable and the constant is not zero (vscale * n * 0 =
164       // 0) bailout.
165       if (ScalableType)
166         return false;
167       // Handle a struct index, which adds its field offset to the pointer.
168       if (STy) {
169         unsigned ElementIdx = ConstOffset->getZExtValue();
170         const StructLayout *SL = DL.getStructLayout(STy);
171         // Element offset is in bytes.
172         if (!AccumulateOffset(
173                 APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
174                 1))
175           return false;
176         continue;
177       }
178       if (!AccumulateOffset(ConstOffset->getValue(),
179                             GTI.getSequentialElementStride(DL)))
180         return false;
181       continue;
182     }
183 
184     // The operand is not constant, check if an external analysis was provided.
185     // External analsis is not applicable to a struct type.
186     if (!ExternalAnalysis || STy || ScalableType)
187       return false;
188     APInt AnalysisIndex;
189     if (!ExternalAnalysis(*V, AnalysisIndex))
190       return false;
191     UsedExternalAnalysis = true;
192     if (!AccumulateOffset(AnalysisIndex, GTI.getSequentialElementStride(DL)))
193       return false;
194   }
195   return true;
196 }
197 
198 bool GEPOperator::collectOffset(
199     const DataLayout &DL, unsigned BitWidth,
200     MapVector<Value *, APInt> &VariableOffsets,
201     APInt &ConstantOffset) const {
202   assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
203          "The offset bit width does not match DL specification.");
204 
205   auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
206     Index = Index.sextOrTrunc(BitWidth);
207     APInt IndexedSize = APInt(BitWidth, Size);
208     ConstantOffset += Index * IndexedSize;
209   };
210 
211   for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
212        GTI != GTE; ++GTI) {
213     // Scalable vectors are multiplied by a runtime constant.
214     bool ScalableType = GTI.getIndexedType()->isScalableTy();
215 
216     Value *V = GTI.getOperand();
217     StructType *STy = GTI.getStructTypeOrNull();
218     // Handle ConstantInt if possible.
219     if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
220       if (ConstOffset->isZero())
221         continue;
222       // If the type is scalable and the constant is not zero (vscale * n * 0 =
223       // 0) bailout.
224       // TODO: If the runtime value is accessible at any point before DWARF
225       // emission, then we could potentially keep a forward reference to it
226       // in the debug value to be filled in later.
227       if (ScalableType)
228         return false;
229       // Handle a struct index, which adds its field offset to the pointer.
230       if (STy) {
231         unsigned ElementIdx = ConstOffset->getZExtValue();
232         const StructLayout *SL = DL.getStructLayout(STy);
233         // Element offset is in bytes.
234         CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
235                               1);
236         continue;
237       }
238       CollectConstantOffset(ConstOffset->getValue(),
239                             GTI.getSequentialElementStride(DL));
240       continue;
241     }
242 
243     if (STy || ScalableType)
244       return false;
245     APInt IndexedSize = APInt(BitWidth, GTI.getSequentialElementStride(DL));
246     // Insert an initial offset of 0 for V iff none exists already, then
247     // increment the offset by IndexedSize.
248     if (!IndexedSize.isZero()) {
249       auto *It = VariableOffsets.insert({V, APInt(BitWidth, 0)}).first;
250       It->second += IndexedSize;
251     }
252   }
253   return true;
254 }
255 
256 void FastMathFlags::print(raw_ostream &O) const {
257   if (all())
258     O << " fast";
259   else {
260     if (allowReassoc())
261       O << " reassoc";
262     if (noNaNs())
263       O << " nnan";
264     if (noInfs())
265       O << " ninf";
266     if (noSignedZeros())
267       O << " nsz";
268     if (allowReciprocal())
269       O << " arcp";
270     if (allowContract())
271       O << " contract";
272     if (approxFunc())
273       O << " afn";
274   }
275 }
276 } // namespace llvm
277