xref: /llvm-project/llvm/lib/Analysis/Local.cpp (revision 3ccbd68480d0b8f777502300024725b9fc4ed81f)
1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/Utils/Local.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/GetElementPtrTypeIterator.h"
18 #include "llvm/IR/IRBuilder.h"
19 
20 using namespace llvm;
21 
22 Value *llvm::emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL,
23                            User *GEP, bool NoAssumptions) {
24   GEPOperator *GEPOp = cast<GEPOperator>(GEP);
25   Type *IntIdxTy = DL.getIndexType(GEP->getType());
26   Value *Result = nullptr;
27 
28   // If the GEP is inbounds, we know that none of the addressing operations will
29   // overflow in a signed sense.
30   bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
31   auto AddOffset = [&](Value *Offset) {
32     if (Result)
33       Result = Builder->CreateAdd(Result, Offset, GEP->getName() + ".offs",
34                                   false /*NUW*/, isInBounds /*NSW*/);
35     else
36       Result = Offset;
37   };
38 
39   // Build a mask for high order bits.
40   unsigned IntPtrWidth = IntIdxTy->getScalarType()->getIntegerBitWidth();
41   uint64_t PtrSizeMask =
42       std::numeric_limits<uint64_t>::max() >> (64 - IntPtrWidth);
43 
44   gep_type_iterator GTI = gep_type_begin(GEP);
45   for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
46        ++i, ++GTI) {
47     Value *Op = *i;
48     TypeSize TSize = DL.getTypeAllocSize(GTI.getIndexedType());
49     uint64_t Size = TSize.getKnownMinValue() & PtrSizeMask;
50     if (Constant *OpC = dyn_cast<Constant>(Op)) {
51       if (OpC->isZeroValue())
52         continue;
53 
54       // Handle a struct index, which adds its field offset to the pointer.
55       if (StructType *STy = GTI.getStructTypeOrNull()) {
56         uint64_t OpValue = OpC->getUniqueInteger().getZExtValue();
57         Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
58         if (!Size)
59           continue;
60 
61         AddOffset(ConstantInt::get(IntIdxTy, Size));
62         continue;
63       }
64     }
65 
66     // Splat the index if needed.
67     if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
68       Op = Builder->CreateVectorSplat(
69           cast<FixedVectorType>(IntIdxTy)->getNumElements(), Op);
70 
71     // Convert to correct type.
72     if (Op->getType() != IntIdxTy)
73       Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName() + ".c");
74     if (Size != 1 || TSize.isScalable()) {
75       // We'll let instcombine(mul) convert this to a shl if possible.
76       auto *ScaleC = ConstantInt::get(IntIdxTy, Size);
77       Value *Scale =
78           !TSize.isScalable() ? ScaleC : Builder->CreateVScale(ScaleC);
79       Op = Builder->CreateMul(Op, Scale, GEP->getName() + ".idx", false /*NUW*/,
80                               isInBounds /*NSW*/);
81     }
82     AddOffset(Op);
83   }
84   return Result ? Result : Constant::getNullValue(IntIdxTy);
85 }
86