xref: /llvm-project/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp (revision 5f8f34e459b60efb332337e7cfe902a7cabe4096)
1 //===- SeparateConstOffsetFromGEP.cpp -------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Loop unrolling may create many similar GEPs for array accesses.
11 // e.g., a 2-level loop
12 //
13 // float a[32][32]; // global variable
14 //
15 // for (int i = 0; i < 2; ++i) {
16 //   for (int j = 0; j < 2; ++j) {
17 //     ...
18 //     ... = a[x + i][y + j];
19 //     ...
20 //   }
21 // }
22 //
23 // will probably be unrolled to:
24 //
25 // gep %a, 0, %x, %y; load
26 // gep %a, 0, %x, %y + 1; load
27 // gep %a, 0, %x + 1, %y; load
28 // gep %a, 0, %x + 1, %y + 1; load
29 //
30 // LLVM's GVN does not use partial redundancy elimination yet, and is thus
31 // unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
32 // significant slowdown in targets with limited addressing modes. For instance,
33 // because the PTX target does not support the reg+reg addressing mode, the
34 // NVPTX backend emits PTX code that literally computes the pointer address of
35 // each GEP, wasting tons of registers. It emits the following PTX for the
36 // first load and similar PTX for other loads.
37 //
38 // mov.u32         %r1, %x;
39 // mov.u32         %r2, %y;
40 // mul.wide.u32    %rl2, %r1, 128;
41 // mov.u64         %rl3, a;
42 // add.s64         %rl4, %rl3, %rl2;
43 // mul.wide.u32    %rl5, %r2, 4;
44 // add.s64         %rl6, %rl4, %rl5;
45 // ld.global.f32   %f1, [%rl6];
46 //
47 // To reduce the register pressure, the optimization implemented in this file
48 // merges the common part of a group of GEPs, so we can compute each pointer
49 // address by adding a simple offset to the common part, saving many registers.
50 //
51 // It works by splitting each GEP into a variadic base and a constant offset.
52 // The variadic base can be computed once and reused by multiple GEPs, and the
53 // constant offsets can be nicely folded into the reg+immediate addressing mode
54 // (supported by most targets) without using any extra register.
55 //
56 // For instance, we transform the four GEPs and four loads in the above example
57 // into:
58 //
59 // base = gep a, 0, x, y
60 // load base
61 // laod base + 1  * sizeof(float)
62 // load base + 32 * sizeof(float)
63 // load base + 33 * sizeof(float)
64 //
65 // Given the transformed IR, a backend that supports the reg+immediate
66 // addressing mode can easily fold the pointer arithmetics into the loads. For
67 // example, the NVPTX backend can easily fold the pointer arithmetics into the
68 // ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
69 //
70 // mov.u32         %r1, %tid.x;
71 // mov.u32         %r2, %tid.y;
72 // mul.wide.u32    %rl2, %r1, 128;
73 // mov.u64         %rl3, a;
74 // add.s64         %rl4, %rl3, %rl2;
75 // mul.wide.u32    %rl5, %r2, 4;
76 // add.s64         %rl6, %rl4, %rl5;
77 // ld.global.f32   %f1, [%rl6]; // so far the same as unoptimized PTX
78 // ld.global.f32   %f2, [%rl6+4]; // much better
79 // ld.global.f32   %f3, [%rl6+128]; // much better
80 // ld.global.f32   %f4, [%rl6+132]; // much better
81 //
82 // Another improvement enabled by the LowerGEP flag is to lower a GEP with
83 // multiple indices to either multiple GEPs with a single index or arithmetic
84 // operations (depending on whether the target uses alias analysis in codegen).
85 // Such transformation can have following benefits:
86 // (1) It can always extract constants in the indices of structure type.
87 // (2) After such Lowering, there are more optimization opportunities such as
88 //     CSE, LICM and CGP.
89 //
90 // E.g. The following GEPs have multiple indices:
91 //  BB1:
92 //    %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
93 //    load %p
94 //    ...
95 //  BB2:
96 //    %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
97 //    load %p2
98 //    ...
99 //
100 // We can not do CSE to the common part related to index "i64 %i". Lowering
101 // GEPs can achieve such goals.
102 // If the target does not use alias analysis in codegen, this pass will
103 // lower a GEP with multiple indices into arithmetic operations:
104 //  BB1:
105 //    %1 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
106 //    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
107 //    %3 = add i64 %1, %2                          ; CSE opportunity
108 //    %4 = mul i64 %j1, length_of_struct
109 //    %5 = add i64 %3, %4
110 //    %6 = add i64 %3, struct_field_3              ; Constant offset
111 //    %p = inttoptr i64 %6 to i32*
112 //    load %p
113 //    ...
114 //  BB2:
115 //    %7 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
116 //    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
117 //    %9 = add i64 %7, %8                          ; CSE opportunity
118 //    %10 = mul i64 %j2, length_of_struct
119 //    %11 = add i64 %9, %10
120 //    %12 = add i64 %11, struct_field_2            ; Constant offset
121 //    %p = inttoptr i64 %12 to i32*
122 //    load %p2
123 //    ...
124 //
125 // If the target uses alias analysis in codegen, this pass will lower a GEP
126 // with multiple indices into multiple GEPs with a single index:
127 //  BB1:
128 //    %1 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
129 //    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
130 //    %3 = getelementptr i8* %1, i64 %2            ; CSE opportunity
131 //    %4 = mul i64 %j1, length_of_struct
132 //    %5 = getelementptr i8* %3, i64 %4
133 //    %6 = getelementptr i8* %5, struct_field_3    ; Constant offset
134 //    %p = bitcast i8* %6 to i32*
135 //    load %p
136 //    ...
137 //  BB2:
138 //    %7 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
139 //    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
140 //    %9 = getelementptr i8* %7, i64 %8            ; CSE opportunity
141 //    %10 = mul i64 %j2, length_of_struct
142 //    %11 = getelementptr i8* %9, i64 %10
143 //    %12 = getelementptr i8* %11, struct_field_2  ; Constant offset
144 //    %p2 = bitcast i8* %12 to i32*
145 //    load %p2
146 //    ...
147 //
148 // Lowering GEPs can also benefit other passes such as LICM and CGP.
149 // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
150 // indices if one of the index is variant. If we lower such GEP into invariant
151 // parts and variant parts, LICM can hoist/sink those invariant parts.
152 // CGP (CodeGen Prepare) tries to sink address calculations that match the
153 // target's addressing modes. A GEP with multiple indices may not match and will
154 // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
155 // them. So we end up with a better addressing mode.
156 //
157 //===----------------------------------------------------------------------===//
158 
159 #include "llvm/ADT/APInt.h"
160 #include "llvm/ADT/DenseMap.h"
161 #include "llvm/ADT/DepthFirstIterator.h"
162 #include "llvm/ADT/SmallVector.h"
163 #include "llvm/Analysis/LoopInfo.h"
164 #include "llvm/Analysis/MemoryBuiltins.h"
165 #include "llvm/Analysis/ScalarEvolution.h"
166 #include "llvm/Analysis/TargetLibraryInfo.h"
167 #include "llvm/Analysis/TargetTransformInfo.h"
168 #include "llvm/Analysis/Utils/Local.h"
169 #include "llvm/Analysis/ValueTracking.h"
170 #include "llvm/IR/BasicBlock.h"
171 #include "llvm/IR/Constant.h"
172 #include "llvm/IR/Constants.h"
173 #include "llvm/IR/DataLayout.h"
174 #include "llvm/IR/DerivedTypes.h"
175 #include "llvm/IR/Dominators.h"
176 #include "llvm/IR/Function.h"
177 #include "llvm/IR/GetElementPtrTypeIterator.h"
178 #include "llvm/IR/IRBuilder.h"
179 #include "llvm/IR/Instruction.h"
180 #include "llvm/IR/Instructions.h"
181 #include "llvm/IR/Module.h"
182 #include "llvm/IR/PatternMatch.h"
183 #include "llvm/IR/Type.h"
184 #include "llvm/IR/User.h"
185 #include "llvm/IR/Value.h"
186 #include "llvm/Pass.h"
187 #include "llvm/Support/Casting.h"
188 #include "llvm/Support/CommandLine.h"
189 #include "llvm/Support/ErrorHandling.h"
190 #include "llvm/Support/raw_ostream.h"
191 #include "llvm/Target/TargetMachine.h"
192 #include "llvm/Transforms/Scalar.h"
193 #include <cassert>
194 #include <cstdint>
195 #include <string>
196 
197 using namespace llvm;
198 using namespace llvm::PatternMatch;
199 
200 static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
201     "disable-separate-const-offset-from-gep", cl::init(false),
202     cl::desc("Do not separate the constant offset from a GEP instruction"),
203     cl::Hidden);
204 
205 // Setting this flag may emit false positives when the input module already
206 // contains dead instructions. Therefore, we set it only in unit tests that are
207 // free of dead code.
208 static cl::opt<bool>
209     VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
210                      cl::desc("Verify this pass produces no dead code"),
211                      cl::Hidden);
212 
213 namespace {
214 
215 /// A helper class for separating a constant offset from a GEP index.
216 ///
217 /// In real programs, a GEP index may be more complicated than a simple addition
218 /// of something and a constant integer which can be trivially splitted. For
219 /// example, to split ((a << 3) | 5) + b, we need to search deeper for the
220 /// constant offset, so that we can separate the index to (a << 3) + b and 5.
221 ///
222 /// Therefore, this class looks into the expression that computes a given GEP
223 /// index, and tries to find a constant integer that can be hoisted to the
224 /// outermost level of the expression as an addition. Not every constant in an
225 /// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
226 /// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
227 /// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
228 class ConstantOffsetExtractor {
229 public:
230   /// Extracts a constant offset from the given GEP index. It returns the
231   /// new index representing the remainder (equal to the original index minus
232   /// the constant offset), or nullptr if we cannot extract a constant offset.
233   /// \p Idx The given GEP index
234   /// \p GEP The given GEP
235   /// \p UserChainTail Outputs the tail of UserChain so that we can
236   ///                  garbage-collect unused instructions in UserChain.
237   static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
238                         User *&UserChainTail, const DominatorTree *DT);
239 
240   /// Looks for a constant offset from the given GEP index without extracting
241   /// it. It returns the numeric value of the extracted constant offset (0 if
242   /// failed). The meaning of the arguments are the same as Extract.
243   static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
244                       const DominatorTree *DT);
245 
246 private:
247   ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
248       : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
249   }
250 
251   /// Searches the expression that computes V for a non-zero constant C s.t.
252   /// V can be reassociated into the form V' + C. If the searching is
253   /// successful, returns C and update UserChain as a def-use chain from C to V;
254   /// otherwise, UserChain is empty.
255   ///
256   /// \p V            The given expression
257   /// \p SignExtended Whether V will be sign-extended in the computation of the
258   ///                 GEP index
259   /// \p ZeroExtended Whether V will be zero-extended in the computation of the
260   ///                 GEP index
261   /// \p NonNegative  Whether V is guaranteed to be non-negative. For example,
262   ///                 an index of an inbounds GEP is guaranteed to be
263   ///                 non-negative. Levaraging this, we can better split
264   ///                 inbounds GEPs.
265   APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
266 
267   /// A helper function to look into both operands of a binary operator.
268   APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
269                             bool ZeroExtended);
270 
271   /// After finding the constant offset C from the GEP index I, we build a new
272   /// index I' s.t. I' + C = I. This function builds and returns the new
273   /// index I' according to UserChain produced by function "find".
274   ///
275   /// The building conceptually takes two steps:
276   /// 1) iteratively distribute s/zext towards the leaves of the expression tree
277   /// that computes I
278   /// 2) reassociate the expression tree to the form I' + C.
279   ///
280   /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
281   /// sext to a, b and 5 so that we have
282   ///   sext(a) + (sext(b) + 5).
283   /// Then, we reassociate it to
284   ///   (sext(a) + sext(b)) + 5.
285   /// Given this form, we know I' is sext(a) + sext(b).
286   Value *rebuildWithoutConstOffset();
287 
288   /// After the first step of rebuilding the GEP index without the constant
289   /// offset, distribute s/zext to the operands of all operators in UserChain.
290   /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
291   /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
292   ///
293   /// The function also updates UserChain to point to new subexpressions after
294   /// distributing s/zext. e.g., the old UserChain of the above example is
295   /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
296   /// and the new UserChain is
297   /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
298   ///   zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
299   ///
300   /// \p ChainIndex The index to UserChain. ChainIndex is initially
301   ///               UserChain.size() - 1, and is decremented during
302   ///               the recursion.
303   Value *distributeExtsAndCloneChain(unsigned ChainIndex);
304 
305   /// Reassociates the GEP index to the form I' + C and returns I'.
306   Value *removeConstOffset(unsigned ChainIndex);
307 
308   /// A helper function to apply ExtInsts, a list of s/zext, to value V.
309   /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
310   /// returns "sext i32 (zext i16 V to i32) to i64".
311   Value *applyExts(Value *V);
312 
313   /// A helper function that returns whether we can trace into the operands
314   /// of binary operator BO for a constant offset.
315   ///
316   /// \p SignExtended Whether BO is surrounded by sext
317   /// \p ZeroExtended Whether BO is surrounded by zext
318   /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
319   ///                array index.
320   bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
321                     bool NonNegative);
322 
323   /// The path from the constant offset to the old GEP index. e.g., if the GEP
324   /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
325   /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
326   /// UserChain[2] will be the entire expression "a * b + (c + 5)".
327   ///
328   /// This path helps to rebuild the new GEP index.
329   SmallVector<User *, 8> UserChain;
330 
331   /// A data structure used in rebuildWithoutConstOffset. Contains all
332   /// sext/zext instructions along UserChain.
333   SmallVector<CastInst *, 16> ExtInsts;
334 
335   /// Insertion position of cloned instructions.
336   Instruction *IP;
337 
338   const DataLayout &DL;
339   const DominatorTree *DT;
340 };
341 
342 /// A pass that tries to split every GEP in the function into a variadic
343 /// base and a constant offset. It is a FunctionPass because searching for the
344 /// constant offset may inspect other basic blocks.
345 class SeparateConstOffsetFromGEP : public FunctionPass {
346 public:
347   static char ID;
348 
349   SeparateConstOffsetFromGEP(bool LowerGEP = false)
350       : FunctionPass(ID), LowerGEP(LowerGEP) {
351     initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
352   }
353 
354   void getAnalysisUsage(AnalysisUsage &AU) const override {
355     AU.addRequired<DominatorTreeWrapperPass>();
356     AU.addRequired<ScalarEvolutionWrapperPass>();
357     AU.addRequired<TargetTransformInfoWrapperPass>();
358     AU.addRequired<LoopInfoWrapperPass>();
359     AU.setPreservesCFG();
360     AU.addRequired<TargetLibraryInfoWrapperPass>();
361   }
362 
363   bool doInitialization(Module &M) override {
364     DL = &M.getDataLayout();
365     return false;
366   }
367 
368   bool runOnFunction(Function &F) override;
369 
370 private:
371   /// Tries to split the given GEP into a variadic base and a constant offset,
372   /// and returns true if the splitting succeeds.
373   bool splitGEP(GetElementPtrInst *GEP);
374 
375   /// Lower a GEP with multiple indices into multiple GEPs with a single index.
376   /// Function splitGEP already split the original GEP into a variadic part and
377   /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
378   /// variadic part into a set of GEPs with a single index and applies
379   /// AccumulativeByteOffset to it.
380   /// \p Variadic                  The variadic part of the original GEP.
381   /// \p AccumulativeByteOffset    The constant offset.
382   void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
383                               int64_t AccumulativeByteOffset);
384 
385   /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
386   /// Function splitGEP already split the original GEP into a variadic part and
387   /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
388   /// variadic part into a set of arithmetic operations and applies
389   /// AccumulativeByteOffset to it.
390   /// \p Variadic                  The variadic part of the original GEP.
391   /// \p AccumulativeByteOffset    The constant offset.
392   void lowerToArithmetics(GetElementPtrInst *Variadic,
393                           int64_t AccumulativeByteOffset);
394 
395   /// Finds the constant offset within each index and accumulates them. If
396   /// LowerGEP is true, it finds in indices of both sequential and structure
397   /// types, otherwise it only finds in sequential indices. The output
398   /// NeedsExtraction indicates whether we successfully find a non-zero constant
399   /// offset.
400   int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
401 
402   /// Canonicalize array indices to pointer-size integers. This helps to
403   /// simplify the logic of splitting a GEP. For example, if a + b is a
404   /// pointer-size integer, we have
405   ///   gep base, a + b = gep (gep base, a), b
406   /// However, this equality may not hold if the size of a + b is smaller than
407   /// the pointer size, because LLVM conceptually sign-extends GEP indices to
408   /// pointer size before computing the address
409   /// (http://llvm.org/docs/LangRef.html#id181).
410   ///
411   /// This canonicalization is very likely already done in clang and
412   /// instcombine. Therefore, the program will probably remain the same.
413   ///
414   /// Returns true if the module changes.
415   ///
416   /// Verified in @i32_add in split-gep.ll
417   bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
418 
419   /// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
420   /// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
421   /// the constant offset. After extraction, it becomes desirable to reunion the
422   /// distributed sexts. For example,
423   ///
424   ///                              &a[sext(i +nsw (j +nsw 5)]
425   ///   => distribute              &a[sext(i) +nsw (sext(j) +nsw 5)]
426   ///   => constant extraction     &a[sext(i) + sext(j)] + 5
427   ///   => reunion                 &a[sext(i +nsw j)] + 5
428   bool reuniteExts(Function &F);
429 
430   /// A helper that reunites sexts in an instruction.
431   bool reuniteExts(Instruction *I);
432 
433   /// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
434   Instruction *findClosestMatchingDominator(const SCEV *Key,
435                                             Instruction *Dominatee);
436   /// Verify F is free of dead code.
437   void verifyNoDeadCode(Function &F);
438 
439   bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
440 
441   // Swap the index operand of two GEP.
442   void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
443 
444   // Check if it is safe to swap operand of two GEP.
445   bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
446                             Loop *CurLoop);
447 
448   const DataLayout *DL = nullptr;
449   DominatorTree *DT = nullptr;
450   ScalarEvolution *SE;
451 
452   LoopInfo *LI;
453   TargetLibraryInfo *TLI;
454 
455   /// Whether to lower a GEP with multiple indices into arithmetic operations or
456   /// multiple GEPs with a single index.
457   bool LowerGEP;
458 
459   DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingExprs;
460 };
461 
462 } // end anonymous namespace
463 
464 char SeparateConstOffsetFromGEP::ID = 0;
465 
466 INITIALIZE_PASS_BEGIN(
467     SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
468     "Split GEPs to a variadic base and a constant offset for better CSE", false,
469     false)
470 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
471 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
472 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
473 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
474 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
475 INITIALIZE_PASS_END(
476     SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
477     "Split GEPs to a variadic base and a constant offset for better CSE", false,
478     false)
479 
480 FunctionPass *llvm::createSeparateConstOffsetFromGEPPass(bool LowerGEP) {
481   return new SeparateConstOffsetFromGEP(LowerGEP);
482 }
483 
484 bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
485                                             bool ZeroExtended,
486                                             BinaryOperator *BO,
487                                             bool NonNegative) {
488   // We only consider ADD, SUB and OR, because a non-zero constant found in
489   // expressions composed of these operations can be easily hoisted as a
490   // constant offset by reassociation.
491   if (BO->getOpcode() != Instruction::Add &&
492       BO->getOpcode() != Instruction::Sub &&
493       BO->getOpcode() != Instruction::Or) {
494     return false;
495   }
496 
497   Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
498   // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
499   // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
500   // FIXME: this does not appear to be covered by any tests
501   //        (with x86/aarch64 backends at least)
502   if (BO->getOpcode() == Instruction::Or &&
503       !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
504     return false;
505 
506   // In addition, tracing into BO requires that its surrounding s/zext (if
507   // any) is distributable to both operands.
508   //
509   // Suppose BO = A op B.
510   //  SignExtended | ZeroExtended | Distributable?
511   // --------------+--------------+----------------------------------
512   //       0       |      0       | true because no s/zext exists
513   //       0       |      1       | zext(BO) == zext(A) op zext(B)
514   //       1       |      0       | sext(BO) == sext(A) op sext(B)
515   //       1       |      1       | zext(sext(BO)) ==
516   //               |              |     zext(sext(A)) op zext(sext(B))
517   if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
518     // If a + b >= 0 and (a >= 0 or b >= 0), then
519     //   sext(a + b) = sext(a) + sext(b)
520     // even if the addition is not marked nsw.
521     //
522     // Leveraging this invarient, we can trace into an sext'ed inbound GEP
523     // index if the constant offset is non-negative.
524     //
525     // Verified in @sext_add in split-gep.ll.
526     if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
527       if (!ConstLHS->isNegative())
528         return true;
529     }
530     if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
531       if (!ConstRHS->isNegative())
532         return true;
533     }
534   }
535 
536   // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
537   // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
538   if (BO->getOpcode() == Instruction::Add ||
539       BO->getOpcode() == Instruction::Sub) {
540     if (SignExtended && !BO->hasNoSignedWrap())
541       return false;
542     if (ZeroExtended && !BO->hasNoUnsignedWrap())
543       return false;
544   }
545 
546   return true;
547 }
548 
549 APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
550                                                    bool SignExtended,
551                                                    bool ZeroExtended) {
552   // BO being non-negative does not shed light on whether its operands are
553   // non-negative. Clear the NonNegative flag here.
554   APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
555                               /* NonNegative */ false);
556   // If we found a constant offset in the left operand, stop and return that.
557   // This shortcut might cause us to miss opportunities of combining the
558   // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
559   // However, such cases are probably already handled by -instcombine,
560   // given this pass runs after the standard optimizations.
561   if (ConstantOffset != 0) return ConstantOffset;
562   ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
563                         /* NonNegative */ false);
564   // If U is a sub operator, negate the constant offset found in the right
565   // operand.
566   if (BO->getOpcode() == Instruction::Sub)
567     ConstantOffset = -ConstantOffset;
568   return ConstantOffset;
569 }
570 
571 APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
572                                     bool ZeroExtended, bool NonNegative) {
573   // TODO(jingyue): We could trace into integer/pointer casts, such as
574   // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
575   // integers because it gives good enough results for our benchmarks.
576   unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
577 
578   // We cannot do much with Values that are not a User, such as an Argument.
579   User *U = dyn_cast<User>(V);
580   if (U == nullptr) return APInt(BitWidth, 0);
581 
582   APInt ConstantOffset(BitWidth, 0);
583   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
584     // Hooray, we found it!
585     ConstantOffset = CI->getValue();
586   } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
587     // Trace into subexpressions for more hoisting opportunities.
588     if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
589       ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
590   } else if (isa<SExtInst>(V)) {
591     ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
592                           ZeroExtended, NonNegative).sext(BitWidth);
593   } else if (isa<ZExtInst>(V)) {
594     // As an optimization, we can clear the SignExtended flag because
595     // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
596     //
597     // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
598     ConstantOffset =
599         find(U->getOperand(0), /* SignExtended */ false,
600              /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
601   }
602 
603   // If we found a non-zero constant offset, add it to the path for
604   // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
605   // help this optimization.
606   if (ConstantOffset != 0)
607     UserChain.push_back(U);
608   return ConstantOffset;
609 }
610 
611 Value *ConstantOffsetExtractor::applyExts(Value *V) {
612   Value *Current = V;
613   // ExtInsts is built in the use-def order. Therefore, we apply them to V
614   // in the reversed order.
615   for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
616     if (Constant *C = dyn_cast<Constant>(Current)) {
617       // If Current is a constant, apply s/zext using ConstantExpr::getCast.
618       // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
619       Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
620     } else {
621       Instruction *Ext = (*I)->clone();
622       Ext->setOperand(0, Current);
623       Ext->insertBefore(IP);
624       Current = Ext;
625     }
626   }
627   return Current;
628 }
629 
630 Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
631   distributeExtsAndCloneChain(UserChain.size() - 1);
632   // Remove all nullptrs (used to be s/zext) from UserChain.
633   unsigned NewSize = 0;
634   for (User *I : UserChain) {
635     if (I != nullptr) {
636       UserChain[NewSize] = I;
637       NewSize++;
638     }
639   }
640   UserChain.resize(NewSize);
641   return removeConstOffset(UserChain.size() - 1);
642 }
643 
644 Value *
645 ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
646   User *U = UserChain[ChainIndex];
647   if (ChainIndex == 0) {
648     assert(isa<ConstantInt>(U));
649     // If U is a ConstantInt, applyExts will return a ConstantInt as well.
650     return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
651   }
652 
653   if (CastInst *Cast = dyn_cast<CastInst>(U)) {
654     assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
655            "We only traced into two types of CastInst: sext and zext");
656     ExtInsts.push_back(Cast);
657     UserChain[ChainIndex] = nullptr;
658     return distributeExtsAndCloneChain(ChainIndex - 1);
659   }
660 
661   // Function find only trace into BinaryOperator and CastInst.
662   BinaryOperator *BO = cast<BinaryOperator>(U);
663   // OpNo = which operand of BO is UserChain[ChainIndex - 1]
664   unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
665   Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
666   Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
667 
668   BinaryOperator *NewBO = nullptr;
669   if (OpNo == 0) {
670     NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
671                                    BO->getName(), IP);
672   } else {
673     NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
674                                    BO->getName(), IP);
675   }
676   return UserChain[ChainIndex] = NewBO;
677 }
678 
679 Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
680   if (ChainIndex == 0) {
681     assert(isa<ConstantInt>(UserChain[ChainIndex]));
682     return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
683   }
684 
685   BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
686   assert(BO->getNumUses() <= 1 &&
687          "distributeExtsAndCloneChain clones each BinaryOperator in "
688          "UserChain, so no one should be used more than "
689          "once");
690 
691   unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
692   assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
693   Value *NextInChain = removeConstOffset(ChainIndex - 1);
694   Value *TheOther = BO->getOperand(1 - OpNo);
695 
696   // If NextInChain is 0 and not the LHS of a sub, we can simplify the
697   // sub-expression to be just TheOther.
698   if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
699     if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
700       return TheOther;
701   }
702 
703   BinaryOperator::BinaryOps NewOp = BO->getOpcode();
704   if (BO->getOpcode() == Instruction::Or) {
705     // Rebuild "or" as "add", because "or" may be invalid for the new
706     // epxression.
707     //
708     // For instance, given
709     //   a | (b + 5) where a and b + 5 have no common bits,
710     // we can extract 5 as the constant offset.
711     //
712     // However, reusing the "or" in the new index would give us
713     //   (a | b) + 5
714     // which does not equal a | (b + 5).
715     //
716     // Replacing the "or" with "add" is fine, because
717     //   a | (b + 5) = a + (b + 5) = (a + b) + 5
718     NewOp = Instruction::Add;
719   }
720 
721   BinaryOperator *NewBO;
722   if (OpNo == 0) {
723     NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
724   } else {
725     NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
726   }
727   NewBO->takeName(BO);
728   return NewBO;
729 }
730 
731 Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
732                                         User *&UserChainTail,
733                                         const DominatorTree *DT) {
734   ConstantOffsetExtractor Extractor(GEP, DT);
735   // Find a non-zero constant offset first.
736   APInt ConstantOffset =
737       Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
738                      GEP->isInBounds());
739   if (ConstantOffset == 0) {
740     UserChainTail = nullptr;
741     return nullptr;
742   }
743   // Separates the constant offset from the GEP index.
744   Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
745   UserChainTail = Extractor.UserChain.back();
746   return IdxWithoutConstOffset;
747 }
748 
749 int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
750                                       const DominatorTree *DT) {
751   // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
752   return ConstantOffsetExtractor(GEP, DT)
753       .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
754             GEP->isInBounds())
755       .getSExtValue();
756 }
757 
758 bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
759     GetElementPtrInst *GEP) {
760   bool Changed = false;
761   Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
762   gep_type_iterator GTI = gep_type_begin(*GEP);
763   for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
764        I != E; ++I, ++GTI) {
765     // Skip struct member indices which must be i32.
766     if (GTI.isSequential()) {
767       if ((*I)->getType() != IntPtrTy) {
768         *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
769         Changed = true;
770       }
771     }
772   }
773   return Changed;
774 }
775 
776 int64_t
777 SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
778                                                  bool &NeedsExtraction) {
779   NeedsExtraction = false;
780   int64_t AccumulativeByteOffset = 0;
781   gep_type_iterator GTI = gep_type_begin(*GEP);
782   for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
783     if (GTI.isSequential()) {
784       // Tries to extract a constant offset from this GEP index.
785       int64_t ConstantOffset =
786           ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
787       if (ConstantOffset != 0) {
788         NeedsExtraction = true;
789         // A GEP may have multiple indices.  We accumulate the extracted
790         // constant offset to a byte offset, and later offset the remainder of
791         // the original GEP with this byte offset.
792         AccumulativeByteOffset +=
793             ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
794       }
795     } else if (LowerGEP) {
796       StructType *StTy = GTI.getStructType();
797       uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
798       // Skip field 0 as the offset is always 0.
799       if (Field != 0) {
800         NeedsExtraction = true;
801         AccumulativeByteOffset +=
802             DL->getStructLayout(StTy)->getElementOffset(Field);
803       }
804     }
805   }
806   return AccumulativeByteOffset;
807 }
808 
809 void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
810     GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
811   IRBuilder<> Builder(Variadic);
812   Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
813 
814   Type *I8PtrTy =
815       Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
816   Value *ResultPtr = Variadic->getOperand(0);
817   Loop *L = LI->getLoopFor(Variadic->getParent());
818   // Check if the base is not loop invariant or used more than once.
819   bool isSwapCandidate =
820       L && L->isLoopInvariant(ResultPtr) &&
821       !hasMoreThanOneUseInLoop(ResultPtr, L);
822   Value *FirstResult = nullptr;
823 
824   if (ResultPtr->getType() != I8PtrTy)
825     ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
826 
827   gep_type_iterator GTI = gep_type_begin(*Variadic);
828   // Create an ugly GEP for each sequential index. We don't create GEPs for
829   // structure indices, as they are accumulated in the constant offset index.
830   for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
831     if (GTI.isSequential()) {
832       Value *Idx = Variadic->getOperand(I);
833       // Skip zero indices.
834       if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
835         if (CI->isZero())
836           continue;
837 
838       APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
839                                 DL->getTypeAllocSize(GTI.getIndexedType()));
840       // Scale the index by element size.
841       if (ElementSize != 1) {
842         if (ElementSize.isPowerOf2()) {
843           Idx = Builder.CreateShl(
844               Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
845         } else {
846           Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
847         }
848       }
849       // Create an ugly GEP with a single index for each index.
850       ResultPtr =
851           Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
852       if (FirstResult == nullptr)
853         FirstResult = ResultPtr;
854     }
855   }
856 
857   // Create a GEP with the constant offset index.
858   if (AccumulativeByteOffset != 0) {
859     Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
860     ResultPtr =
861         Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
862   } else
863     isSwapCandidate = false;
864 
865   // If we created a GEP with constant index, and the base is loop invariant,
866   // then we swap the first one with it, so LICM can move constant GEP out
867   // later.
868   GetElementPtrInst *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
869   GetElementPtrInst *SecondGEP = dyn_cast_or_null<GetElementPtrInst>(ResultPtr);
870   if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
871     swapGEPOperand(FirstGEP, SecondGEP);
872 
873   if (ResultPtr->getType() != Variadic->getType())
874     ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
875 
876   Variadic->replaceAllUsesWith(ResultPtr);
877   Variadic->eraseFromParent();
878 }
879 
880 void
881 SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
882                                                int64_t AccumulativeByteOffset) {
883   IRBuilder<> Builder(Variadic);
884   Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
885 
886   Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
887   gep_type_iterator GTI = gep_type_begin(*Variadic);
888   // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
889   // don't create arithmetics for structure indices, as they are accumulated
890   // in the constant offset index.
891   for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
892     if (GTI.isSequential()) {
893       Value *Idx = Variadic->getOperand(I);
894       // Skip zero indices.
895       if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
896         if (CI->isZero())
897           continue;
898 
899       APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
900                                 DL->getTypeAllocSize(GTI.getIndexedType()));
901       // Scale the index by element size.
902       if (ElementSize != 1) {
903         if (ElementSize.isPowerOf2()) {
904           Idx = Builder.CreateShl(
905               Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
906         } else {
907           Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
908         }
909       }
910       // Create an ADD for each index.
911       ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
912     }
913   }
914 
915   // Create an ADD for the constant offset index.
916   if (AccumulativeByteOffset != 0) {
917     ResultPtr = Builder.CreateAdd(
918         ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
919   }
920 
921   ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
922   Variadic->replaceAllUsesWith(ResultPtr);
923   Variadic->eraseFromParent();
924 }
925 
926 bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
927   // Skip vector GEPs.
928   if (GEP->getType()->isVectorTy())
929     return false;
930 
931   // The backend can already nicely handle the case where all indices are
932   // constant.
933   if (GEP->hasAllConstantIndices())
934     return false;
935 
936   bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
937 
938   bool NeedsExtraction;
939   int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
940 
941   if (!NeedsExtraction)
942     return Changed;
943 
944   TargetTransformInfo &TTI =
945       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*GEP->getFunction());
946 
947   // If LowerGEP is disabled, before really splitting the GEP, check whether the
948   // backend supports the addressing mode we are about to produce. If no, this
949   // splitting probably won't be beneficial.
950   // If LowerGEP is enabled, even the extracted constant offset can not match
951   // the addressing mode, we can still do optimizations to other lowered parts
952   // of variable indices. Therefore, we don't check for addressing modes in that
953   // case.
954   if (!LowerGEP) {
955     unsigned AddrSpace = GEP->getPointerAddressSpace();
956     if (!TTI.isLegalAddressingMode(GEP->getResultElementType(),
957                                    /*BaseGV=*/nullptr, AccumulativeByteOffset,
958                                    /*HasBaseReg=*/true, /*Scale=*/0,
959                                    AddrSpace)) {
960       return Changed;
961     }
962   }
963 
964   // Remove the constant offset in each sequential index. The resultant GEP
965   // computes the variadic base.
966   // Notice that we don't remove struct field indices here. If LowerGEP is
967   // disabled, a structure index is not accumulated and we still use the old
968   // one. If LowerGEP is enabled, a structure index is accumulated in the
969   // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
970   // handle the constant offset and won't need a new structure index.
971   gep_type_iterator GTI = gep_type_begin(*GEP);
972   for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
973     if (GTI.isSequential()) {
974       // Splits this GEP index into a variadic part and a constant offset, and
975       // uses the variadic part as the new index.
976       Value *OldIdx = GEP->getOperand(I);
977       User *UserChainTail;
978       Value *NewIdx =
979           ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
980       if (NewIdx != nullptr) {
981         // Switches to the index with the constant offset removed.
982         GEP->setOperand(I, NewIdx);
983         // After switching to the new index, we can garbage-collect UserChain
984         // and the old index if they are not used.
985         RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
986         RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
987       }
988     }
989   }
990 
991   // Clear the inbounds attribute because the new index may be off-bound.
992   // e.g.,
993   //
994   //   b     = add i64 a, 5
995   //   addr  = gep inbounds float, float* p, i64 b
996   //
997   // is transformed to:
998   //
999   //   addr2 = gep float, float* p, i64 a ; inbounds removed
1000   //   addr  = gep inbounds float, float* addr2, i64 5
1001   //
1002   // If a is -4, although the old index b is in bounds, the new index a is
1003   // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
1004   // inbounds keyword is not present, the offsets are added to the base
1005   // address with silently-wrapping two's complement arithmetic".
1006   // Therefore, the final code will be a semantically equivalent.
1007   //
1008   // TODO(jingyue): do some range analysis to keep as many inbounds as
1009   // possible. GEPs with inbounds are more friendly to alias analysis.
1010   bool GEPWasInBounds = GEP->isInBounds();
1011   GEP->setIsInBounds(false);
1012 
1013   // Lowers a GEP to either GEPs with a single index or arithmetic operations.
1014   if (LowerGEP) {
1015     // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
1016     // arithmetic operations if the target uses alias analysis in codegen.
1017     if (TTI.useAA())
1018       lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
1019     else
1020       lowerToArithmetics(GEP, AccumulativeByteOffset);
1021     return true;
1022   }
1023 
1024   // No need to create another GEP if the accumulative byte offset is 0.
1025   if (AccumulativeByteOffset == 0)
1026     return true;
1027 
1028   // Offsets the base with the accumulative byte offset.
1029   //
1030   //   %gep                        ; the base
1031   //   ... %gep ...
1032   //
1033   // => add the offset
1034   //
1035   //   %gep2                       ; clone of %gep
1036   //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1037   //   %gep                        ; will be removed
1038   //   ... %gep ...
1039   //
1040   // => replace all uses of %gep with %new.gep and remove %gep
1041   //
1042   //   %gep2                       ; clone of %gep
1043   //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1044   //   ... %new.gep ...
1045   //
1046   // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
1047   // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
1048   // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
1049   // type of %gep.
1050   //
1051   //   %gep2                       ; clone of %gep
1052   //   %0       = bitcast %gep2 to i8*
1053   //   %uglygep = gep %0, <offset>
1054   //   %new.gep = bitcast %uglygep to <type of %gep>
1055   //   ... %new.gep ...
1056   Instruction *NewGEP = GEP->clone();
1057   NewGEP->insertBefore(GEP);
1058 
1059   // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
1060   // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
1061   // used with unsigned integers later.
1062   int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
1063       DL->getTypeAllocSize(GEP->getResultElementType()));
1064   Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
1065   if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1066     // Very likely. As long as %gep is natually aligned, the byte offset we
1067     // extracted should be a multiple of sizeof(*%gep).
1068     int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1069     NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
1070                                        ConstantInt::get(IntPtrTy, Index, true),
1071                                        GEP->getName(), GEP);
1072     NewGEP->copyMetadata(*GEP);
1073     // Inherit the inbounds attribute of the original GEP.
1074     cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1075   } else {
1076     // Unlikely but possible. For example,
1077     // #pragma pack(1)
1078     // struct S {
1079     //   int a[3];
1080     //   int64 b[8];
1081     // };
1082     // #pragma pack()
1083     //
1084     // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
1085     // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
1086     // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
1087     // sizeof(int64).
1088     //
1089     // Emit an uglygep in this case.
1090     Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
1091                                        GEP->getPointerAddressSpace());
1092     NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
1093     NewGEP = GetElementPtrInst::Create(
1094         Type::getInt8Ty(GEP->getContext()), NewGEP,
1095         ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
1096         GEP);
1097     NewGEP->copyMetadata(*GEP);
1098     // Inherit the inbounds attribute of the original GEP.
1099     cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1100     if (GEP->getType() != I8PtrTy)
1101       NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
1102   }
1103 
1104   GEP->replaceAllUsesWith(NewGEP);
1105   GEP->eraseFromParent();
1106 
1107   return true;
1108 }
1109 
1110 bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1111   if (skipFunction(F))
1112     return false;
1113 
1114   if (DisableSeparateConstOffsetFromGEP)
1115     return false;
1116 
1117   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1118   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1119   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1120   TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1121   bool Changed = false;
1122   for (BasicBlock &B : F) {
1123     for (BasicBlock::iterator I = B.begin(), IE = B.end(); I != IE;)
1124       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++))
1125         Changed |= splitGEP(GEP);
1126     // No need to split GEP ConstantExprs because all its indices are constant
1127     // already.
1128   }
1129 
1130   Changed |= reuniteExts(F);
1131 
1132   if (VerifyNoDeadCode)
1133     verifyNoDeadCode(F);
1134 
1135   return Changed;
1136 }
1137 
1138 Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1139     const SCEV *Key, Instruction *Dominatee) {
1140   auto Pos = DominatingExprs.find(Key);
1141   if (Pos == DominatingExprs.end())
1142     return nullptr;
1143 
1144   auto &Candidates = Pos->second;
1145   // Because we process the basic blocks in pre-order of the dominator tree, a
1146   // candidate that doesn't dominate the current instruction won't dominate any
1147   // future instruction either. Therefore, we pop it out of the stack. This
1148   // optimization makes the algorithm O(n).
1149   while (!Candidates.empty()) {
1150     Instruction *Candidate = Candidates.back();
1151     if (DT->dominates(Candidate, Dominatee))
1152       return Candidate;
1153     Candidates.pop_back();
1154   }
1155   return nullptr;
1156 }
1157 
1158 bool SeparateConstOffsetFromGEP::reuniteExts(Instruction *I) {
1159   if (!SE->isSCEVable(I->getType()))
1160     return false;
1161 
1162   //   Dom: LHS+RHS
1163   //   I: sext(LHS)+sext(RHS)
1164   // If Dom can't sign overflow and Dom dominates I, optimize I to sext(Dom).
1165   // TODO: handle zext
1166   Value *LHS = nullptr, *RHS = nullptr;
1167   if (match(I, m_Add(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS)))) ||
1168       match(I, m_Sub(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1169     if (LHS->getType() == RHS->getType()) {
1170       const SCEV *Key =
1171           SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1172       if (auto *Dom = findClosestMatchingDominator(Key, I)) {
1173         Instruction *NewSExt = new SExtInst(Dom, I->getType(), "", I);
1174         NewSExt->takeName(I);
1175         I->replaceAllUsesWith(NewSExt);
1176         RecursivelyDeleteTriviallyDeadInstructions(I);
1177         return true;
1178       }
1179     }
1180   }
1181 
1182   // Add I to DominatingExprs if it's an add/sub that can't sign overflow.
1183   if (match(I, m_NSWAdd(m_Value(LHS), m_Value(RHS))) ||
1184       match(I, m_NSWSub(m_Value(LHS), m_Value(RHS)))) {
1185     if (programUndefinedIfFullPoison(I)) {
1186       const SCEV *Key =
1187           SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1188       DominatingExprs[Key].push_back(I);
1189     }
1190   }
1191   return false;
1192 }
1193 
1194 bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) {
1195   bool Changed = false;
1196   DominatingExprs.clear();
1197   for (const auto Node : depth_first(DT)) {
1198     BasicBlock *BB = Node->getBlock();
1199     for (auto I = BB->begin(); I != BB->end(); ) {
1200       Instruction *Cur = &*I++;
1201       Changed |= reuniteExts(Cur);
1202     }
1203   }
1204   return Changed;
1205 }
1206 
1207 void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1208   for (BasicBlock &B : F) {
1209     for (Instruction &I : B) {
1210       if (isInstructionTriviallyDead(&I)) {
1211         std::string ErrMessage;
1212         raw_string_ostream RSO(ErrMessage);
1213         RSO << "Dead instruction detected!\n" << I << "\n";
1214         llvm_unreachable(RSO.str().c_str());
1215       }
1216     }
1217   }
1218 }
1219 
1220 bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1221     GetElementPtrInst *FirstGEP, GetElementPtrInst *SecondGEP, Loop *CurLoop) {
1222   if (!FirstGEP || !FirstGEP->hasOneUse())
1223     return false;
1224 
1225   if (!SecondGEP || FirstGEP->getParent() != SecondGEP->getParent())
1226     return false;
1227 
1228   if (FirstGEP == SecondGEP)
1229     return false;
1230 
1231   unsigned FirstNum = FirstGEP->getNumOperands();
1232   unsigned SecondNum = SecondGEP->getNumOperands();
1233   // Give up if the number of operands are not 2.
1234   if (FirstNum != SecondNum || FirstNum != 2)
1235     return false;
1236 
1237   Value *FirstBase = FirstGEP->getOperand(0);
1238   Value *SecondBase = SecondGEP->getOperand(0);
1239   Value *FirstOffset = FirstGEP->getOperand(1);
1240   // Give up if the index of the first GEP is loop invariant.
1241   if (CurLoop->isLoopInvariant(FirstOffset))
1242     return false;
1243 
1244   // Give up if base doesn't have same type.
1245   if (FirstBase->getType() != SecondBase->getType())
1246     return false;
1247 
1248   Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1249 
1250   // Check if the second operand of first GEP has constant coefficient.
1251   // For an example, for the following code,  we won't gain anything by
1252   // hoisting the second GEP out because the second GEP can be folded away.
1253   //   %scevgep.sum.ur159 = add i64 %idxprom48.ur, 256
1254   //   %67 = shl i64 %scevgep.sum.ur159, 2
1255   //   %uglygep160 = getelementptr i8* %65, i64 %67
1256   //   %uglygep161 = getelementptr i8* %uglygep160, i64 -1024
1257 
1258   // Skip constant shift instruction which may be generated by Splitting GEPs.
1259   if (FirstOffsetDef && FirstOffsetDef->isShift() &&
1260       isa<ConstantInt>(FirstOffsetDef->getOperand(1)))
1261     FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->getOperand(0));
1262 
1263   // Give up if FirstOffsetDef is an Add or Sub with constant.
1264   // Because it may not profitable at all due to constant folding.
1265   if (FirstOffsetDef)
1266     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1267       unsigned opc = BO->getOpcode();
1268       if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1269           (isa<ConstantInt>(BO->getOperand(0)) ||
1270            isa<ConstantInt>(BO->getOperand(1))))
1271         return false;
1272     }
1273   return true;
1274 }
1275 
1276 bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(Value *V, Loop *L) {
1277   int UsesInLoop = 0;
1278   for (User *U : V->users()) {
1279     if (Instruction *User = dyn_cast<Instruction>(U))
1280       if (L->contains(User))
1281         if (++UsesInLoop > 1)
1282           return true;
1283   }
1284   return false;
1285 }
1286 
1287 void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
1288                                                 GetElementPtrInst *Second) {
1289   Value *Offset1 = First->getOperand(1);
1290   Value *Offset2 = Second->getOperand(1);
1291   First->setOperand(1, Offset2);
1292   Second->setOperand(1, Offset1);
1293 
1294   // We changed p+o+c to p+c+o, p+c may not be inbound anymore.
1295   const DataLayout &DAL = First->getModule()->getDataLayout();
1296   APInt Offset(DAL.getIndexSizeInBits(
1297                    cast<PointerType>(First->getType())->getAddressSpace()),
1298                0);
1299   Value *NewBase =
1300       First->stripAndAccumulateInBoundsConstantOffsets(DAL, Offset);
1301   uint64_t ObjectSize;
1302   if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
1303      Offset.ugt(ObjectSize)) {
1304     First->setIsInBounds(false);
1305     Second->setIsInBounds(false);
1306   } else
1307     First->setIsInBounds(true);
1308 }
1309