xref: /llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp (revision 6ab26eab4f1e06f2da7b3183c55666ad57f8866e)
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/CodeGenPrepare.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/CodeGenTypes/MachineValueType.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/Argument.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfo.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/GlobalVariable.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAArch64.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/MDBuilder.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/Operator.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/IR/ProfDataUtils.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/Use.h"
75 #include "llvm/IR/User.h"
76 #include "llvm/IR/Value.h"
77 #include "llvm/IR/ValueHandle.h"
78 #include "llvm/IR/ValueMap.h"
79 #include "llvm/InitializePasses.h"
80 #include "llvm/Pass.h"
81 #include "llvm/Support/BlockFrequency.h"
82 #include "llvm/Support/BranchProbability.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/CommandLine.h"
85 #include "llvm/Support/Compiler.h"
86 #include "llvm/Support/Debug.h"
87 #include "llvm/Support/ErrorHandling.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
91 #include "llvm/Target/TargetOptions.h"
92 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
93 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
96 #include "llvm/Transforms/Utils/SizeOpts.h"
97 #include <algorithm>
98 #include <cassert>
99 #include <cstdint>
100 #include <iterator>
101 #include <limits>
102 #include <memory>
103 #include <optional>
104 #include <utility>
105 #include <vector>
106 
107 using namespace llvm;
108 using namespace llvm::PatternMatch;
109 
110 #define DEBUG_TYPE "codegenprepare"
111 
112 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
113 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
114 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
115 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
116                       "sunken Cmps");
117 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
118                        "of sunken Casts");
119 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
120                           "computations were sunk");
121 STATISTIC(NumMemoryInstsPhiCreated,
122           "Number of phis created when address "
123           "computations were sunk to memory instructions");
124 STATISTIC(NumMemoryInstsSelectCreated,
125           "Number of select created when address "
126           "computations were sunk to memory instructions");
127 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
128 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
129 STATISTIC(NumAndsAdded,
130           "Number of and mask instructions added to form ext loads");
131 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
132 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
133 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
134 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
135 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
136 
137 static cl::opt<bool> DisableBranchOpts(
138     "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
139     cl::desc("Disable branch optimizations in CodeGenPrepare"));
140 
141 static cl::opt<bool>
142     DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
143                   cl::desc("Disable GC optimizations in CodeGenPrepare"));
144 
145 static cl::opt<bool>
146     DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
147                           cl::init(false),
148                           cl::desc("Disable select to branch conversion."));
149 
150 static cl::opt<bool>
151     AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
152                       cl::desc("Address sinking in CGP using GEPs."));
153 
154 static cl::opt<bool>
155     EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
156                         cl::desc("Enable sinkinig and/cmp into branches."));
157 
158 static cl::opt<bool> DisableStoreExtract(
159     "disable-cgp-store-extract", cl::Hidden, cl::init(false),
160     cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161 
162 static cl::opt<bool> StressStoreExtract(
163     "stress-cgp-store-extract", cl::Hidden, cl::init(false),
164     cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165 
166 static cl::opt<bool> DisableExtLdPromotion(
167     "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168     cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
169              "CodeGenPrepare"));
170 
171 static cl::opt<bool> StressExtLdPromotion(
172     "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
173     cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
174              "optimization in CodeGenPrepare"));
175 
176 static cl::opt<bool> DisablePreheaderProtect(
177     "disable-preheader-prot", cl::Hidden, cl::init(false),
178     cl::desc("Disable protection against removing loop preheaders"));
179 
180 static cl::opt<bool> ProfileGuidedSectionPrefix(
181     "profile-guided-section-prefix", cl::Hidden, cl::init(true),
182     cl::desc("Use profile info to add section prefix for hot/cold functions"));
183 
184 static cl::opt<bool> ProfileUnknownInSpecialSection(
185     "profile-unknown-in-special-section", cl::Hidden,
186     cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
187              "profile, we cannot tell the function is cold for sure because "
188              "it may be a function newly added without ever being sampled. "
189              "With the flag enabled, compiler can put such profile unknown "
190              "functions into a special section, so runtime system can choose "
191              "to handle it in a different way than .text section, to save "
192              "RAM for example. "));
193 
194 static cl::opt<bool> BBSectionsGuidedSectionPrefix(
195     "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
196     cl::desc("Use the basic-block-sections profile to determine the text "
197              "section prefix for hot functions. Functions with "
198              "basic-block-sections profile will be placed in `.text.hot` "
199              "regardless of their FDO profile info. Other functions won't be "
200              "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
201              "profiles."));
202 
203 static cl::opt<uint64_t> FreqRatioToSkipMerge(
204     "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
205     cl::desc("Skip merging empty blocks if (frequency of empty block) / "
206              "(frequency of destination block) is greater than this ratio"));
207 
208 static cl::opt<bool> ForceSplitStore(
209     "force-split-store", cl::Hidden, cl::init(false),
210     cl::desc("Force store splitting no matter what the target query says."));
211 
212 static cl::opt<bool> EnableTypePromotionMerge(
213     "cgp-type-promotion-merge", cl::Hidden,
214     cl::desc("Enable merging of redundant sexts when one is dominating"
215              " the other."),
216     cl::init(true));
217 
218 static cl::opt<bool> DisableComplexAddrModes(
219     "disable-complex-addr-modes", cl::Hidden, cl::init(false),
220     cl::desc("Disables combining addressing modes with different parts "
221              "in optimizeMemoryInst."));
222 
223 static cl::opt<bool>
224     AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
225                     cl::desc("Allow creation of Phis in Address sinking."));
226 
227 static cl::opt<bool> AddrSinkNewSelects(
228     "addr-sink-new-select", cl::Hidden, cl::init(true),
229     cl::desc("Allow creation of selects in Address sinking."));
230 
231 static cl::opt<bool> AddrSinkCombineBaseReg(
232     "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
233     cl::desc("Allow combining of BaseReg field in Address sinking."));
234 
235 static cl::opt<bool> AddrSinkCombineBaseGV(
236     "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
237     cl::desc("Allow combining of BaseGV field in Address sinking."));
238 
239 static cl::opt<bool> AddrSinkCombineBaseOffs(
240     "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
241     cl::desc("Allow combining of BaseOffs field in Address sinking."));
242 
243 static cl::opt<bool> AddrSinkCombineScaledReg(
244     "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
245     cl::desc("Allow combining of ScaledReg field in Address sinking."));
246 
247 static cl::opt<bool>
248     EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
249                          cl::init(true),
250                          cl::desc("Enable splitting large offset of GEP."));
251 
252 static cl::opt<bool> EnableICMP_EQToICMP_ST(
253     "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
254     cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
255 
256 static cl::opt<bool>
257     VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
258                      cl::desc("Enable BFI update verification for "
259                               "CodeGenPrepare."));
260 
261 static cl::opt<bool>
262     OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true),
263                      cl::desc("Enable converting phi types in CodeGenPrepare"));
264 
265 static cl::opt<unsigned>
266     HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
267                             cl::desc("Least BB number of huge function."));
268 
269 static cl::opt<unsigned>
270     MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
271                           cl::Hidden,
272                           cl::desc("Max number of address users to look at"));
273 
274 static cl::opt<bool>
275     DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false),
276                       cl::desc("Disable elimination of dead PHI nodes."));
277 
278 namespace {
279 
280 enum ExtType {
281   ZeroExtension, // Zero extension has been seen.
282   SignExtension, // Sign extension has been seen.
283   BothExtension  // This extension type is used if we saw sext after
284                  // ZeroExtension had been set, or if we saw zext after
285                  // SignExtension had been set. It makes the type
286                  // information of a promoted instruction invalid.
287 };
288 
289 enum ModifyDT {
290   NotModifyDT, // Not Modify any DT.
291   ModifyBBDT,  // Modify the Basic Block Dominator Tree.
292   ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
293                // This usually means we move/delete/insert instruction
294                // in a Basic Block. So we should re-iterate instructions
295                // in such Basic Block.
296 };
297 
298 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
299 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
300 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
301 using SExts = SmallVector<Instruction *, 16>;
302 using ValueToSExts = MapVector<Value *, SExts>;
303 
304 class TypePromotionTransaction;
305 
306 class CodeGenPrepare {
307   friend class CodeGenPrepareLegacyPass;
308   const TargetMachine *TM = nullptr;
309   const TargetSubtargetInfo *SubtargetInfo = nullptr;
310   const TargetLowering *TLI = nullptr;
311   const TargetRegisterInfo *TRI = nullptr;
312   const TargetTransformInfo *TTI = nullptr;
313   const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
314   const TargetLibraryInfo *TLInfo = nullptr;
315   LoopInfo *LI = nullptr;
316   std::unique_ptr<BlockFrequencyInfo> BFI;
317   std::unique_ptr<BranchProbabilityInfo> BPI;
318   ProfileSummaryInfo *PSI = nullptr;
319 
320   /// As we scan instructions optimizing them, this is the next instruction
321   /// to optimize. Transforms that can invalidate this should update it.
322   BasicBlock::iterator CurInstIterator;
323 
324   /// Keeps track of non-local addresses that have been sunk into a block.
325   /// This allows us to avoid inserting duplicate code for blocks with
326   /// multiple load/stores of the same address. The usage of WeakTrackingVH
327   /// enables SunkAddrs to be treated as a cache whose entries can be
328   /// invalidated if a sunken address computation has been erased.
329   ValueMap<Value *, WeakTrackingVH> SunkAddrs;
330 
331   /// Keeps track of all instructions inserted for the current function.
332   SetOfInstrs InsertedInsts;
333 
334   /// Keeps track of the type of the related instruction before their
335   /// promotion for the current function.
336   InstrToOrigTy PromotedInsts;
337 
338   /// Keep track of instructions removed during promotion.
339   SetOfInstrs RemovedInsts;
340 
341   /// Keep track of sext chains based on their initial value.
342   DenseMap<Value *, Instruction *> SeenChainsForSExt;
343 
344   /// Keep track of GEPs accessing the same data structures such as structs or
345   /// arrays that are candidates to be split later because of their large
346   /// size.
347   MapVector<AssertingVH<Value>,
348             SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
349       LargeOffsetGEPMap;
350 
351   /// Keep track of new GEP base after splitting the GEPs having large offset.
352   SmallSet<AssertingVH<Value>, 2> NewGEPBases;
353 
354   /// Map serial numbers to Large offset GEPs.
355   DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
356 
357   /// Keep track of SExt promoted.
358   ValueToSExts ValToSExtendedUses;
359 
360   /// True if the function has the OptSize attribute.
361   bool OptSize;
362 
363   /// DataLayout for the Function being processed.
364   const DataLayout *DL = nullptr;
365 
366   /// Building the dominator tree can be expensive, so we only build it
367   /// lazily and update it when required.
368   std::unique_ptr<DominatorTree> DT;
369 
370 public:
371   CodeGenPrepare(){};
372   CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
373   /// If encounter huge function, we need to limit the build time.
374   bool IsHugeFunc = false;
375 
376   /// FreshBBs is like worklist, it collected the updated BBs which need
377   /// to be optimized again.
378   /// Note: Consider building time in this pass, when a BB updated, we need
379   /// to insert such BB into FreshBBs for huge function.
380   SmallSet<BasicBlock *, 32> FreshBBs;
381 
382   void releaseMemory() {
383     // Clear per function information.
384     InsertedInsts.clear();
385     PromotedInsts.clear();
386     FreshBBs.clear();
387     BPI.reset();
388     BFI.reset();
389   }
390 
391   bool run(Function &F, FunctionAnalysisManager &AM);
392 
393 private:
394   template <typename F>
395   void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
396     // Substituting can cause recursive simplifications, which can invalidate
397     // our iterator.  Use a WeakTrackingVH to hold onto it in case this
398     // happens.
399     Value *CurValue = &*CurInstIterator;
400     WeakTrackingVH IterHandle(CurValue);
401 
402     f();
403 
404     // If the iterator instruction was recursively deleted, start over at the
405     // start of the block.
406     if (IterHandle != CurValue) {
407       CurInstIterator = BB->begin();
408       SunkAddrs.clear();
409     }
410   }
411 
412   // Get the DominatorTree, building if necessary.
413   DominatorTree &getDT(Function &F) {
414     if (!DT)
415       DT = std::make_unique<DominatorTree>(F);
416     return *DT;
417   }
418 
419   void removeAllAssertingVHReferences(Value *V);
420   bool eliminateAssumptions(Function &F);
421   bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr);
422   bool eliminateMostlyEmptyBlocks(Function &F);
423   BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
424   bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
425   void eliminateMostlyEmptyBlock(BasicBlock *BB);
426   bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
427                                      bool isPreheader);
428   bool makeBitReverse(Instruction &I);
429   bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
430   bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
431   bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
432                           unsigned AddrSpace);
433   bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
434   bool optimizeInlineAsmInst(CallInst *CS);
435   bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
436   bool optimizeExt(Instruction *&I);
437   bool optimizeExtUses(Instruction *I);
438   bool optimizeLoadExt(LoadInst *Load);
439   bool optimizeShiftInst(BinaryOperator *BO);
440   bool optimizeFunnelShift(IntrinsicInst *Fsh);
441   bool optimizeSelectInst(SelectInst *SI);
442   bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
443   bool optimizeSwitchType(SwitchInst *SI);
444   bool optimizeSwitchPhiConstants(SwitchInst *SI);
445   bool optimizeSwitchInst(SwitchInst *SI);
446   bool optimizeExtractElementInst(Instruction *Inst);
447   bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
448   bool fixupDbgValue(Instruction *I);
449   bool fixupDbgVariableRecord(DbgVariableRecord &I);
450   bool fixupDbgVariableRecordsOnInst(Instruction &I);
451   bool placeDbgValues(Function &F);
452   bool placePseudoProbes(Function &F);
453   bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
454                     LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
455   bool tryToPromoteExts(TypePromotionTransaction &TPT,
456                         const SmallVectorImpl<Instruction *> &Exts,
457                         SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
458                         unsigned CreatedInstsCost = 0);
459   bool mergeSExts(Function &F);
460   bool splitLargeGEPOffsets();
461   bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
462                        SmallPtrSetImpl<Instruction *> &DeletedInstrs);
463   bool optimizePhiTypes(Function &F);
464   bool performAddressTypePromotion(
465       Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
466       bool HasPromoted, TypePromotionTransaction &TPT,
467       SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
468   bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
469   bool simplifyOffsetableRelocate(GCStatepointInst &I);
470 
471   bool tryToSinkFreeOperands(Instruction *I);
472   bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
473                                    CmpInst *Cmp, Intrinsic::ID IID);
474   bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
475   bool optimizeURem(Instruction *Rem);
476   bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
477   bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
478   void verifyBFIUpdates(Function &F);
479   bool _run(Function &F);
480 };
481 
482 class CodeGenPrepareLegacyPass : public FunctionPass {
483 public:
484   static char ID; // Pass identification, replacement for typeid
485 
486   CodeGenPrepareLegacyPass() : FunctionPass(ID) {
487     initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
488   }
489 
490   bool runOnFunction(Function &F) override;
491 
492   StringRef getPassName() const override { return "CodeGen Prepare"; }
493 
494   void getAnalysisUsage(AnalysisUsage &AU) const override {
495     // FIXME: When we can selectively preserve passes, preserve the domtree.
496     AU.addRequired<ProfileSummaryInfoWrapperPass>();
497     AU.addRequired<TargetLibraryInfoWrapperPass>();
498     AU.addRequired<TargetPassConfig>();
499     AU.addRequired<TargetTransformInfoWrapperPass>();
500     AU.addRequired<LoopInfoWrapperPass>();
501     AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
502   }
503 };
504 
505 } // end anonymous namespace
506 
507 char CodeGenPrepareLegacyPass::ID = 0;
508 
509 bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) {
510   if (skipFunction(F))
511     return false;
512   auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
513   CodeGenPrepare CGP(TM);
514   CGP.DL = &F.getDataLayout();
515   CGP.SubtargetInfo = TM->getSubtargetImpl(F);
516   CGP.TLI = CGP.SubtargetInfo->getTargetLowering();
517   CGP.TRI = CGP.SubtargetInfo->getRegisterInfo();
518   CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
519   CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
520   CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
521   CGP.BPI.reset(new BranchProbabilityInfo(F, *CGP.LI));
522   CGP.BFI.reset(new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI));
523   CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
524   auto BBSPRWP =
525       getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
526   CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr;
527 
528   return CGP._run(F);
529 }
530 
531 INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE,
532                       "Optimize for code generation", false, false)
533 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass)
534 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
535 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
536 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
537 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
538 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
539 INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE,
540                     "Optimize for code generation", false, false)
541 
542 FunctionPass *llvm::createCodeGenPrepareLegacyPass() {
543   return new CodeGenPrepareLegacyPass();
544 }
545 
546 PreservedAnalyses CodeGenPreparePass::run(Function &F,
547                                           FunctionAnalysisManager &AM) {
548   CodeGenPrepare CGP(TM);
549 
550   bool Changed = CGP.run(F, AM);
551   if (!Changed)
552     return PreservedAnalyses::all();
553 
554   PreservedAnalyses PA;
555   PA.preserve<TargetLibraryAnalysis>();
556   PA.preserve<TargetIRAnalysis>();
557   PA.preserve<LoopAnalysis>();
558   return PA;
559 }
560 
561 bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) {
562   DL = &F.getDataLayout();
563   SubtargetInfo = TM->getSubtargetImpl(F);
564   TLI = SubtargetInfo->getTargetLowering();
565   TRI = SubtargetInfo->getRegisterInfo();
566   TLInfo = &AM.getResult<TargetLibraryAnalysis>(F);
567   TTI = &AM.getResult<TargetIRAnalysis>(F);
568   LI = &AM.getResult<LoopAnalysis>(F);
569   BPI.reset(new BranchProbabilityInfo(F, *LI));
570   BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
571   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
572   PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
573   BBSectionsProfileReader =
574       AM.getCachedResult<BasicBlockSectionsProfileReaderAnalysis>(F);
575   return _run(F);
576 }
577 
578 bool CodeGenPrepare::_run(Function &F) {
579   bool EverMadeChange = false;
580 
581   OptSize = F.hasOptSize();
582   // Use the basic-block-sections profile to promote hot functions to .text.hot
583   // if requested.
584   if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
585       BBSectionsProfileReader->isFunctionHot(F.getName())) {
586     F.setSectionPrefix("hot");
587   } else if (ProfileGuidedSectionPrefix) {
588     // The hot attribute overwrites profile count based hotness while profile
589     // counts based hotness overwrite the cold attribute.
590     // This is a conservative behabvior.
591     if (F.hasFnAttribute(Attribute::Hot) ||
592         PSI->isFunctionHotInCallGraph(&F, *BFI))
593       F.setSectionPrefix("hot");
594     // If PSI shows this function is not hot, we will placed the function
595     // into unlikely section if (1) PSI shows this is a cold function, or
596     // (2) the function has a attribute of cold.
597     else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
598              F.hasFnAttribute(Attribute::Cold))
599       F.setSectionPrefix("unlikely");
600     else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
601              PSI->isFunctionHotnessUnknown(F))
602       F.setSectionPrefix("unknown");
603   }
604 
605   /// This optimization identifies DIV instructions that can be
606   /// profitably bypassed and carried out with a shorter, faster divide.
607   if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
608     const DenseMap<unsigned int, unsigned int> &BypassWidths =
609         TLI->getBypassSlowDivWidths();
610     BasicBlock *BB = &*F.begin();
611     while (BB != nullptr) {
612       // bypassSlowDivision may create new BBs, but we don't want to reapply the
613       // optimization to those blocks.
614       BasicBlock *Next = BB->getNextNode();
615       if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
616         EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
617       BB = Next;
618     }
619   }
620 
621   // Get rid of @llvm.assume builtins before attempting to eliminate empty
622   // blocks, since there might be blocks that only contain @llvm.assume calls
623   // (plus arguments that we can get rid of).
624   EverMadeChange |= eliminateAssumptions(F);
625 
626   // Eliminate blocks that contain only PHI nodes and an
627   // unconditional branch.
628   EverMadeChange |= eliminateMostlyEmptyBlocks(F);
629 
630   ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
631   if (!DisableBranchOpts)
632     EverMadeChange |= splitBranchCondition(F, ModifiedDT);
633 
634   // Split some critical edges where one of the sources is an indirect branch,
635   // to help generate sane code for PHIs involving such edges.
636   EverMadeChange |=
637       SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
638 
639   // If we are optimzing huge function, we need to consider the build time.
640   // Because the basic algorithm's complex is near O(N!).
641   IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
642 
643   // Transformations above may invalidate dominator tree and/or loop info.
644   DT.reset();
645   LI->releaseMemory();
646   LI->analyze(getDT(F));
647 
648   bool MadeChange = true;
649   bool FuncIterated = false;
650   while (MadeChange) {
651     MadeChange = false;
652 
653     for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
654       if (FuncIterated && !FreshBBs.contains(&BB))
655         continue;
656 
657       ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
658       bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
659 
660       if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
661         DT.reset();
662 
663       MadeChange |= Changed;
664       if (IsHugeFunc) {
665         // If the BB is updated, it may still has chance to be optimized.
666         // This usually happen at sink optimization.
667         // For example:
668         //
669         // bb0:
670         // %and = and i32 %a, 4
671         // %cmp = icmp eq i32 %and, 0
672         //
673         // If the %cmp sink to other BB, the %and will has chance to sink.
674         if (Changed)
675           FreshBBs.insert(&BB);
676         else if (FuncIterated)
677           FreshBBs.erase(&BB);
678       } else {
679         // For small/normal functions, we restart BB iteration if the dominator
680         // tree of the Function was changed.
681         if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
682           break;
683       }
684     }
685     // We have iterated all the BB in the (only work for huge) function.
686     FuncIterated = IsHugeFunc;
687 
688     if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
689       MadeChange |= mergeSExts(F);
690     if (!LargeOffsetGEPMap.empty())
691       MadeChange |= splitLargeGEPOffsets();
692     MadeChange |= optimizePhiTypes(F);
693 
694     if (MadeChange)
695       eliminateFallThrough(F, DT.get());
696 
697 #ifndef NDEBUG
698     if (MadeChange && VerifyLoopInfo)
699       LI->verify(getDT(F));
700 #endif
701 
702     // Really free removed instructions during promotion.
703     for (Instruction *I : RemovedInsts)
704       I->deleteValue();
705 
706     EverMadeChange |= MadeChange;
707     SeenChainsForSExt.clear();
708     ValToSExtendedUses.clear();
709     RemovedInsts.clear();
710     LargeOffsetGEPMap.clear();
711     LargeOffsetGEPID.clear();
712   }
713 
714   NewGEPBases.clear();
715   SunkAddrs.clear();
716 
717   if (!DisableBranchOpts) {
718     MadeChange = false;
719     // Use a set vector to get deterministic iteration order. The order the
720     // blocks are removed may affect whether or not PHI nodes in successors
721     // are removed.
722     SmallSetVector<BasicBlock *, 8> WorkList;
723     for (BasicBlock &BB : F) {
724       SmallVector<BasicBlock *, 2> Successors(successors(&BB));
725       MadeChange |= ConstantFoldTerminator(&BB, true);
726       if (!MadeChange)
727         continue;
728 
729       for (BasicBlock *Succ : Successors)
730         if (pred_empty(Succ))
731           WorkList.insert(Succ);
732     }
733 
734     // Delete the dead blocks and any of their dead successors.
735     MadeChange |= !WorkList.empty();
736     while (!WorkList.empty()) {
737       BasicBlock *BB = WorkList.pop_back_val();
738       SmallVector<BasicBlock *, 2> Successors(successors(BB));
739 
740       DeleteDeadBlock(BB);
741 
742       for (BasicBlock *Succ : Successors)
743         if (pred_empty(Succ))
744           WorkList.insert(Succ);
745     }
746 
747     // Merge pairs of basic blocks with unconditional branches, connected by
748     // a single edge.
749     if (EverMadeChange || MadeChange)
750       MadeChange |= eliminateFallThrough(F);
751 
752     EverMadeChange |= MadeChange;
753   }
754 
755   if (!DisableGCOpts) {
756     SmallVector<GCStatepointInst *, 2> Statepoints;
757     for (BasicBlock &BB : F)
758       for (Instruction &I : BB)
759         if (auto *SP = dyn_cast<GCStatepointInst>(&I))
760           Statepoints.push_back(SP);
761     for (auto &I : Statepoints)
762       EverMadeChange |= simplifyOffsetableRelocate(*I);
763   }
764 
765   // Do this last to clean up use-before-def scenarios introduced by other
766   // preparatory transforms.
767   EverMadeChange |= placeDbgValues(F);
768   EverMadeChange |= placePseudoProbes(F);
769 
770 #ifndef NDEBUG
771   if (VerifyBFIUpdates)
772     verifyBFIUpdates(F);
773 #endif
774 
775   return EverMadeChange;
776 }
777 
778 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
779   bool MadeChange = false;
780   for (BasicBlock &BB : F) {
781     CurInstIterator = BB.begin();
782     while (CurInstIterator != BB.end()) {
783       Instruction *I = &*(CurInstIterator++);
784       if (auto *Assume = dyn_cast<AssumeInst>(I)) {
785         MadeChange = true;
786         Value *Operand = Assume->getOperand(0);
787         Assume->eraseFromParent();
788 
789         resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
790           RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
791         });
792       }
793     }
794   }
795   return MadeChange;
796 }
797 
798 /// An instruction is about to be deleted, so remove all references to it in our
799 /// GEP-tracking data strcutures.
800 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
801   LargeOffsetGEPMap.erase(V);
802   NewGEPBases.erase(V);
803 
804   auto GEP = dyn_cast<GetElementPtrInst>(V);
805   if (!GEP)
806     return;
807 
808   LargeOffsetGEPID.erase(GEP);
809 
810   auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
811   if (VecI == LargeOffsetGEPMap.end())
812     return;
813 
814   auto &GEPVector = VecI->second;
815   llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
816 
817   if (GEPVector.empty())
818     LargeOffsetGEPMap.erase(VecI);
819 }
820 
821 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
822 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
823   DominatorTree NewDT(F);
824   LoopInfo NewLI(NewDT);
825   BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
826   BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
827   NewBFI.verifyMatch(*BFI);
828 }
829 
830 /// Merge basic blocks which are connected by a single edge, where one of the
831 /// basic blocks has a single successor pointing to the other basic block,
832 /// which has a single predecessor.
833 bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) {
834   bool Changed = false;
835   // Scan all of the blocks in the function, except for the entry block.
836   // Use a temporary array to avoid iterator being invalidated when
837   // deleting blocks.
838   SmallVector<WeakTrackingVH, 16> Blocks;
839   for (auto &Block : llvm::drop_begin(F))
840     Blocks.push_back(&Block);
841 
842   SmallSet<WeakTrackingVH, 16> Preds;
843   for (auto &Block : Blocks) {
844     auto *BB = cast_or_null<BasicBlock>(Block);
845     if (!BB)
846       continue;
847     // If the destination block has a single pred, then this is a trivial
848     // edge, just collapse it.
849     BasicBlock *SinglePred = BB->getSinglePredecessor();
850 
851     // Don't merge if BB's address is taken.
852     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
853       continue;
854 
855     // Make an effort to skip unreachable blocks.
856     if (DT && !DT->isReachableFromEntry(BB))
857       continue;
858 
859     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
860     if (Term && !Term->isConditional()) {
861       Changed = true;
862       LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
863 
864       // Merge BB into SinglePred and delete it.
865       MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr,
866                                 /* MemDep */ nullptr,
867                                 /* PredecessorWithTwoSuccessors */ false, DT);
868       Preds.insert(SinglePred);
869 
870       if (IsHugeFunc) {
871         // Update FreshBBs to optimize the merged BB.
872         FreshBBs.insert(SinglePred);
873         FreshBBs.erase(BB);
874       }
875     }
876   }
877 
878   // (Repeatedly) merging blocks into their predecessors can create redundant
879   // debug intrinsics.
880   for (const auto &Pred : Preds)
881     if (auto *BB = cast_or_null<BasicBlock>(Pred))
882       RemoveRedundantDbgInstrs(BB);
883 
884   return Changed;
885 }
886 
887 /// Find a destination block from BB if BB is mergeable empty block.
888 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
889   // If this block doesn't end with an uncond branch, ignore it.
890   BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
891   if (!BI || !BI->isUnconditional())
892     return nullptr;
893 
894   // If the instruction before the branch (skipping debug info) isn't a phi
895   // node, then other stuff is happening here.
896   BasicBlock::iterator BBI = BI->getIterator();
897   if (BBI != BB->begin()) {
898     --BBI;
899     while (isa<DbgInfoIntrinsic>(BBI)) {
900       if (BBI == BB->begin())
901         break;
902       --BBI;
903     }
904     if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
905       return nullptr;
906   }
907 
908   // Do not break infinite loops.
909   BasicBlock *DestBB = BI->getSuccessor(0);
910   if (DestBB == BB)
911     return nullptr;
912 
913   if (!canMergeBlocks(BB, DestBB))
914     DestBB = nullptr;
915 
916   return DestBB;
917 }
918 
919 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
920 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
921 /// edges in ways that are non-optimal for isel. Start by eliminating these
922 /// blocks so we can split them the way we want them.
923 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
924   SmallPtrSet<BasicBlock *, 16> Preheaders;
925   SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
926   while (!LoopList.empty()) {
927     Loop *L = LoopList.pop_back_val();
928     llvm::append_range(LoopList, *L);
929     if (BasicBlock *Preheader = L->getLoopPreheader())
930       Preheaders.insert(Preheader);
931   }
932 
933   bool MadeChange = false;
934   // Copy blocks into a temporary array to avoid iterator invalidation issues
935   // as we remove them.
936   // Note that this intentionally skips the entry block.
937   SmallVector<WeakTrackingVH, 16> Blocks;
938   for (auto &Block : llvm::drop_begin(F)) {
939     // Delete phi nodes that could block deleting other empty blocks.
940     if (!DisableDeletePHIs)
941       MadeChange |= DeleteDeadPHIs(&Block, TLInfo);
942     Blocks.push_back(&Block);
943   }
944 
945   for (auto &Block : Blocks) {
946     BasicBlock *BB = cast_or_null<BasicBlock>(Block);
947     if (!BB)
948       continue;
949     BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
950     if (!DestBB ||
951         !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
952       continue;
953 
954     eliminateMostlyEmptyBlock(BB);
955     MadeChange = true;
956   }
957   return MadeChange;
958 }
959 
960 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
961                                                    BasicBlock *DestBB,
962                                                    bool isPreheader) {
963   // Do not delete loop preheaders if doing so would create a critical edge.
964   // Loop preheaders can be good locations to spill registers. If the
965   // preheader is deleted and we create a critical edge, registers may be
966   // spilled in the loop body instead.
967   if (!DisablePreheaderProtect && isPreheader &&
968       !(BB->getSinglePredecessor() &&
969         BB->getSinglePredecessor()->getSingleSuccessor()))
970     return false;
971 
972   // Skip merging if the block's successor is also a successor to any callbr
973   // that leads to this block.
974   // FIXME: Is this really needed? Is this a correctness issue?
975   for (BasicBlock *Pred : predecessors(BB)) {
976     if (isa<CallBrInst>(Pred->getTerminator()) &&
977         llvm::is_contained(successors(Pred), DestBB))
978       return false;
979   }
980 
981   // Try to skip merging if the unique predecessor of BB is terminated by a
982   // switch or indirect branch instruction, and BB is used as an incoming block
983   // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
984   // add COPY instructions in the predecessor of BB instead of BB (if it is not
985   // merged). Note that the critical edge created by merging such blocks wont be
986   // split in MachineSink because the jump table is not analyzable. By keeping
987   // such empty block (BB), ISel will place COPY instructions in BB, not in the
988   // predecessor of BB.
989   BasicBlock *Pred = BB->getUniquePredecessor();
990   if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
991                  isa<IndirectBrInst>(Pred->getTerminator())))
992     return true;
993 
994   if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
995     return true;
996 
997   // We use a simple cost heuristic which determine skipping merging is
998   // profitable if the cost of skipping merging is less than the cost of
999   // merging : Cost(skipping merging) < Cost(merging BB), where the
1000   // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1001   // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1002   // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1003   //   Freq(Pred) / Freq(BB) > 2.
1004   // Note that if there are multiple empty blocks sharing the same incoming
1005   // value for the PHIs in the DestBB, we consider them together. In such
1006   // case, Cost(merging BB) will be the sum of their frequencies.
1007 
1008   if (!isa<PHINode>(DestBB->begin()))
1009     return true;
1010 
1011   SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
1012 
1013   // Find all other incoming blocks from which incoming values of all PHIs in
1014   // DestBB are the same as the ones from BB.
1015   for (BasicBlock *DestBBPred : predecessors(DestBB)) {
1016     if (DestBBPred == BB)
1017       continue;
1018 
1019     if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
1020           return DestPN.getIncomingValueForBlock(BB) ==
1021                  DestPN.getIncomingValueForBlock(DestBBPred);
1022         }))
1023       SameIncomingValueBBs.insert(DestBBPred);
1024   }
1025 
1026   // See if all BB's incoming values are same as the value from Pred. In this
1027   // case, no reason to skip merging because COPYs are expected to be place in
1028   // Pred already.
1029   if (SameIncomingValueBBs.count(Pred))
1030     return true;
1031 
1032   BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
1033   BlockFrequency BBFreq = BFI->getBlockFreq(BB);
1034 
1035   for (auto *SameValueBB : SameIncomingValueBBs)
1036     if (SameValueBB->getUniquePredecessor() == Pred &&
1037         DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
1038       BBFreq += BFI->getBlockFreq(SameValueBB);
1039 
1040   std::optional<BlockFrequency> Limit = BBFreq.mul(FreqRatioToSkipMerge);
1041   return !Limit || PredFreq <= *Limit;
1042 }
1043 
1044 /// Return true if we can merge BB into DestBB if there is a single
1045 /// unconditional branch between them, and BB contains no other non-phi
1046 /// instructions.
1047 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1048                                     const BasicBlock *DestBB) const {
1049   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1050   // the successor.  If there are more complex condition (e.g. preheaders),
1051   // don't mess around with them.
1052   for (const PHINode &PN : BB->phis()) {
1053     for (const User *U : PN.users()) {
1054       const Instruction *UI = cast<Instruction>(U);
1055       if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1056         return false;
1057       // If User is inside DestBB block and it is a PHINode then check
1058       // incoming value. If incoming value is not from BB then this is
1059       // a complex condition (e.g. preheaders) we want to avoid here.
1060       if (UI->getParent() == DestBB) {
1061         if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1062           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1063             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1064             if (Insn && Insn->getParent() == BB &&
1065                 Insn->getParent() != UPN->getIncomingBlock(I))
1066               return false;
1067           }
1068       }
1069     }
1070   }
1071 
1072   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1073   // and DestBB may have conflicting incoming values for the block.  If so, we
1074   // can't merge the block.
1075   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1076   if (!DestBBPN)
1077     return true; // no conflict.
1078 
1079   // Collect the preds of BB.
1080   SmallPtrSet<const BasicBlock *, 16> BBPreds;
1081   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1082     // It is faster to get preds from a PHI than with pred_iterator.
1083     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1084       BBPreds.insert(BBPN->getIncomingBlock(i));
1085   } else {
1086     BBPreds.insert(pred_begin(BB), pred_end(BB));
1087   }
1088 
1089   // Walk the preds of DestBB.
1090   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1091     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1092     if (BBPreds.count(Pred)) { // Common predecessor?
1093       for (const PHINode &PN : DestBB->phis()) {
1094         const Value *V1 = PN.getIncomingValueForBlock(Pred);
1095         const Value *V2 = PN.getIncomingValueForBlock(BB);
1096 
1097         // If V2 is a phi node in BB, look up what the mapped value will be.
1098         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1099           if (V2PN->getParent() == BB)
1100             V2 = V2PN->getIncomingValueForBlock(Pred);
1101 
1102         // If there is a conflict, bail out.
1103         if (V1 != V2)
1104           return false;
1105       }
1106     }
1107   }
1108 
1109   return true;
1110 }
1111 
1112 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1113 static void replaceAllUsesWith(Value *Old, Value *New,
1114                                SmallSet<BasicBlock *, 32> &FreshBBs,
1115                                bool IsHuge) {
1116   auto *OldI = dyn_cast<Instruction>(Old);
1117   if (OldI) {
1118     for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1119          UI != E; ++UI) {
1120       Instruction *User = cast<Instruction>(*UI);
1121       if (IsHuge)
1122         FreshBBs.insert(User->getParent());
1123     }
1124   }
1125   Old->replaceAllUsesWith(New);
1126 }
1127 
1128 /// Eliminate a basic block that has only phi's and an unconditional branch in
1129 /// it.
1130 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1131   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1132   BasicBlock *DestBB = BI->getSuccessor(0);
1133 
1134   LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1135                     << *BB << *DestBB);
1136 
1137   // If the destination block has a single pred, then this is a trivial edge,
1138   // just collapse it.
1139   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1140     if (SinglePred != DestBB) {
1141       assert(SinglePred == BB &&
1142              "Single predecessor not the same as predecessor");
1143       // Merge DestBB into SinglePred/BB and delete it.
1144       MergeBlockIntoPredecessor(DestBB);
1145       // Note: BB(=SinglePred) will not be deleted on this path.
1146       // DestBB(=its single successor) is the one that was deleted.
1147       LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1148 
1149       if (IsHugeFunc) {
1150         // Update FreshBBs to optimize the merged BB.
1151         FreshBBs.insert(SinglePred);
1152         FreshBBs.erase(DestBB);
1153       }
1154       return;
1155     }
1156   }
1157 
1158   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
1159   // to handle the new incoming edges it is about to have.
1160   for (PHINode &PN : DestBB->phis()) {
1161     // Remove the incoming value for BB, and remember it.
1162     Value *InVal = PN.removeIncomingValue(BB, false);
1163 
1164     // Two options: either the InVal is a phi node defined in BB or it is some
1165     // value that dominates BB.
1166     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1167     if (InValPhi && InValPhi->getParent() == BB) {
1168       // Add all of the input values of the input PHI as inputs of this phi.
1169       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1170         PN.addIncoming(InValPhi->getIncomingValue(i),
1171                        InValPhi->getIncomingBlock(i));
1172     } else {
1173       // Otherwise, add one instance of the dominating value for each edge that
1174       // we will be adding.
1175       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1176         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1177           PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1178       } else {
1179         for (BasicBlock *Pred : predecessors(BB))
1180           PN.addIncoming(InVal, Pred);
1181       }
1182     }
1183   }
1184 
1185   // Preserve loop Metadata.
1186   if (BI->hasMetadata(LLVMContext::MD_loop)) {
1187     for (auto *Pred : predecessors(BB))
1188       Pred->getTerminator()->copyMetadata(*BI, LLVMContext::MD_loop);
1189   }
1190 
1191   // The PHIs are now updated, change everything that refers to BB to use
1192   // DestBB and remove BB.
1193   BB->replaceAllUsesWith(DestBB);
1194   BB->eraseFromParent();
1195   ++NumBlocksElim;
1196 
1197   LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1198 }
1199 
1200 // Computes a map of base pointer relocation instructions to corresponding
1201 // derived pointer relocation instructions given a vector of all relocate calls
1202 static void computeBaseDerivedRelocateMap(
1203     const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1204     MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>>
1205         &RelocateInstMap) {
1206   // Collect information in two maps: one primarily for locating the base object
1207   // while filling the second map; the second map is the final structure holding
1208   // a mapping between Base and corresponding Derived relocate calls
1209   MapVector<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1210   for (auto *ThisRelocate : AllRelocateCalls) {
1211     auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1212                             ThisRelocate->getDerivedPtrIndex());
1213     RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1214   }
1215   for (auto &Item : RelocateIdxMap) {
1216     std::pair<unsigned, unsigned> Key = Item.first;
1217     if (Key.first == Key.second)
1218       // Base relocation: nothing to insert
1219       continue;
1220 
1221     GCRelocateInst *I = Item.second;
1222     auto BaseKey = std::make_pair(Key.first, Key.first);
1223 
1224     // We're iterating over RelocateIdxMap so we cannot modify it.
1225     auto MaybeBase = RelocateIdxMap.find(BaseKey);
1226     if (MaybeBase == RelocateIdxMap.end())
1227       // TODO: We might want to insert a new base object relocate and gep off
1228       // that, if there are enough derived object relocates.
1229       continue;
1230 
1231     RelocateInstMap[MaybeBase->second].push_back(I);
1232   }
1233 }
1234 
1235 // Accepts a GEP and extracts the operands into a vector provided they're all
1236 // small integer constants
1237 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1238                                           SmallVectorImpl<Value *> &OffsetV) {
1239   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1240     // Only accept small constant integer operands
1241     auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1242     if (!Op || Op->getZExtValue() > 20)
1243       return false;
1244   }
1245 
1246   for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1247     OffsetV.push_back(GEP->getOperand(i));
1248   return true;
1249 }
1250 
1251 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1252 // replace, computes a replacement, and affects it.
1253 static bool
1254 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1255                           const SmallVectorImpl<GCRelocateInst *> &Targets) {
1256   bool MadeChange = false;
1257   // We must ensure the relocation of derived pointer is defined after
1258   // relocation of base pointer. If we find a relocation corresponding to base
1259   // defined earlier than relocation of base then we move relocation of base
1260   // right before found relocation. We consider only relocation in the same
1261   // basic block as relocation of base. Relocations from other basic block will
1262   // be skipped by optimization and we do not care about them.
1263   for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1264        &*R != RelocatedBase; ++R)
1265     if (auto *RI = dyn_cast<GCRelocateInst>(R))
1266       if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1267         if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1268           RelocatedBase->moveBefore(RI);
1269           MadeChange = true;
1270           break;
1271         }
1272 
1273   for (GCRelocateInst *ToReplace : Targets) {
1274     assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1275            "Not relocating a derived object of the original base object");
1276     if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1277       // A duplicate relocate call. TODO: coalesce duplicates.
1278       continue;
1279     }
1280 
1281     if (RelocatedBase->getParent() != ToReplace->getParent()) {
1282       // Base and derived relocates are in different basic blocks.
1283       // In this case transform is only valid when base dominates derived
1284       // relocate. However it would be too expensive to check dominance
1285       // for each such relocate, so we skip the whole transformation.
1286       continue;
1287     }
1288 
1289     Value *Base = ToReplace->getBasePtr();
1290     auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1291     if (!Derived || Derived->getPointerOperand() != Base)
1292       continue;
1293 
1294     SmallVector<Value *, 2> OffsetV;
1295     if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1296       continue;
1297 
1298     // Create a Builder and replace the target callsite with a gep
1299     assert(RelocatedBase->getNextNode() &&
1300            "Should always have one since it's not a terminator");
1301 
1302     // Insert after RelocatedBase
1303     IRBuilder<> Builder(RelocatedBase->getNextNode());
1304     Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1305 
1306     // If gc_relocate does not match the actual type, cast it to the right type.
1307     // In theory, there must be a bitcast after gc_relocate if the type does not
1308     // match, and we should reuse it to get the derived pointer. But it could be
1309     // cases like this:
1310     // bb1:
1311     //  ...
1312     //  %g1 = call coldcc i8 addrspace(1)*
1313     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1314     //
1315     // bb2:
1316     //  ...
1317     //  %g2 = call coldcc i8 addrspace(1)*
1318     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1319     //
1320     // merge:
1321     //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1322     //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1323     //
1324     // In this case, we can not find the bitcast any more. So we insert a new
1325     // bitcast no matter there is already one or not. In this way, we can handle
1326     // all cases, and the extra bitcast should be optimized away in later
1327     // passes.
1328     Value *ActualRelocatedBase = RelocatedBase;
1329     if (RelocatedBase->getType() != Base->getType()) {
1330       ActualRelocatedBase =
1331           Builder.CreateBitCast(RelocatedBase, Base->getType());
1332     }
1333     Value *Replacement =
1334         Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1335                           ArrayRef(OffsetV));
1336     Replacement->takeName(ToReplace);
1337     // If the newly generated derived pointer's type does not match the original
1338     // derived pointer's type, cast the new derived pointer to match it. Same
1339     // reasoning as above.
1340     Value *ActualReplacement = Replacement;
1341     if (Replacement->getType() != ToReplace->getType()) {
1342       ActualReplacement =
1343           Builder.CreateBitCast(Replacement, ToReplace->getType());
1344     }
1345     ToReplace->replaceAllUsesWith(ActualReplacement);
1346     ToReplace->eraseFromParent();
1347 
1348     MadeChange = true;
1349   }
1350   return MadeChange;
1351 }
1352 
1353 // Turns this:
1354 //
1355 // %base = ...
1356 // %ptr = gep %base + 15
1357 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1358 // %base' = relocate(%tok, i32 4, i32 4)
1359 // %ptr' = relocate(%tok, i32 4, i32 5)
1360 // %val = load %ptr'
1361 //
1362 // into this:
1363 //
1364 // %base = ...
1365 // %ptr = gep %base + 15
1366 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1367 // %base' = gc.relocate(%tok, i32 4, i32 4)
1368 // %ptr' = gep %base' + 15
1369 // %val = load %ptr'
1370 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1371   bool MadeChange = false;
1372   SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1373   for (auto *U : I.users())
1374     if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1375       // Collect all the relocate calls associated with a statepoint
1376       AllRelocateCalls.push_back(Relocate);
1377 
1378   // We need at least one base pointer relocation + one derived pointer
1379   // relocation to mangle
1380   if (AllRelocateCalls.size() < 2)
1381     return false;
1382 
1383   // RelocateInstMap is a mapping from the base relocate instruction to the
1384   // corresponding derived relocate instructions
1385   MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> RelocateInstMap;
1386   computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1387   if (RelocateInstMap.empty())
1388     return false;
1389 
1390   for (auto &Item : RelocateInstMap)
1391     // Item.first is the RelocatedBase to offset against
1392     // Item.second is the vector of Targets to replace
1393     MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1394   return MadeChange;
1395 }
1396 
1397 /// Sink the specified cast instruction into its user blocks.
1398 static bool SinkCast(CastInst *CI) {
1399   BasicBlock *DefBB = CI->getParent();
1400 
1401   /// InsertedCasts - Only insert a cast in each block once.
1402   DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1403 
1404   bool MadeChange = false;
1405   for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1406        UI != E;) {
1407     Use &TheUse = UI.getUse();
1408     Instruction *User = cast<Instruction>(*UI);
1409 
1410     // Figure out which BB this cast is used in.  For PHI's this is the
1411     // appropriate predecessor block.
1412     BasicBlock *UserBB = User->getParent();
1413     if (PHINode *PN = dyn_cast<PHINode>(User)) {
1414       UserBB = PN->getIncomingBlock(TheUse);
1415     }
1416 
1417     // Preincrement use iterator so we don't invalidate it.
1418     ++UI;
1419 
1420     // The first insertion point of a block containing an EH pad is after the
1421     // pad.  If the pad is the user, we cannot sink the cast past the pad.
1422     if (User->isEHPad())
1423       continue;
1424 
1425     // If the block selected to receive the cast is an EH pad that does not
1426     // allow non-PHI instructions before the terminator, we can't sink the
1427     // cast.
1428     if (UserBB->getTerminator()->isEHPad())
1429       continue;
1430 
1431     // If this user is in the same block as the cast, don't change the cast.
1432     if (UserBB == DefBB)
1433       continue;
1434 
1435     // If we have already inserted a cast into this block, use it.
1436     CastInst *&InsertedCast = InsertedCasts[UserBB];
1437 
1438     if (!InsertedCast) {
1439       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1440       assert(InsertPt != UserBB->end());
1441       InsertedCast = cast<CastInst>(CI->clone());
1442       InsertedCast->insertBefore(*UserBB, InsertPt);
1443     }
1444 
1445     // Replace a use of the cast with a use of the new cast.
1446     TheUse = InsertedCast;
1447     MadeChange = true;
1448     ++NumCastUses;
1449   }
1450 
1451   // If we removed all uses, nuke the cast.
1452   if (CI->use_empty()) {
1453     salvageDebugInfo(*CI);
1454     CI->eraseFromParent();
1455     MadeChange = true;
1456   }
1457 
1458   return MadeChange;
1459 }
1460 
1461 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1462 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1463 /// reduce the number of virtual registers that must be created and coalesced.
1464 ///
1465 /// Return true if any changes are made.
1466 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1467                                        const DataLayout &DL) {
1468   // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1469   // than sinking only nop casts, but is helpful on some platforms.
1470   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1471     if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1472                                  ASC->getDestAddressSpace()))
1473       return false;
1474   }
1475 
1476   // If this is a noop copy,
1477   EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1478   EVT DstVT = TLI.getValueType(DL, CI->getType());
1479 
1480   // This is an fp<->int conversion?
1481   if (SrcVT.isInteger() != DstVT.isInteger())
1482     return false;
1483 
1484   // If this is an extension, it will be a zero or sign extension, which
1485   // isn't a noop.
1486   if (SrcVT.bitsLT(DstVT))
1487     return false;
1488 
1489   // If these values will be promoted, find out what they will be promoted
1490   // to.  This helps us consider truncates on PPC as noop copies when they
1491   // are.
1492   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1493       TargetLowering::TypePromoteInteger)
1494     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1495   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1496       TargetLowering::TypePromoteInteger)
1497     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1498 
1499   // If, after promotion, these are the same types, this is a noop copy.
1500   if (SrcVT != DstVT)
1501     return false;
1502 
1503   return SinkCast(CI);
1504 }
1505 
1506 // Match a simple increment by constant operation.  Note that if a sub is
1507 // matched, the step is negated (as if the step had been canonicalized to
1508 // an add, even though we leave the instruction alone.)
1509 static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1510                            Constant *&Step) {
1511   if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1512       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1513                        m_Instruction(LHS), m_Constant(Step)))))
1514     return true;
1515   if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1516       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1517                        m_Instruction(LHS), m_Constant(Step))))) {
1518     Step = ConstantExpr::getNeg(Step);
1519     return true;
1520   }
1521   return false;
1522 }
1523 
1524 /// If given \p PN is an inductive variable with value IVInc coming from the
1525 /// backedge, and on each iteration it gets increased by Step, return pair
1526 /// <IVInc, Step>. Otherwise, return std::nullopt.
1527 static std::optional<std::pair<Instruction *, Constant *>>
1528 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1529   const Loop *L = LI->getLoopFor(PN->getParent());
1530   if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1531     return std::nullopt;
1532   auto *IVInc =
1533       dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1534   if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1535     return std::nullopt;
1536   Instruction *LHS = nullptr;
1537   Constant *Step = nullptr;
1538   if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1539     return std::make_pair(IVInc, Step);
1540   return std::nullopt;
1541 }
1542 
1543 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1544   auto *I = dyn_cast<Instruction>(V);
1545   if (!I)
1546     return false;
1547   Instruction *LHS = nullptr;
1548   Constant *Step = nullptr;
1549   if (!matchIncrement(I, LHS, Step))
1550     return false;
1551   if (auto *PN = dyn_cast<PHINode>(LHS))
1552     if (auto IVInc = getIVIncrement(PN, LI))
1553       return IVInc->first == I;
1554   return false;
1555 }
1556 
1557 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1558                                                  Value *Arg0, Value *Arg1,
1559                                                  CmpInst *Cmp,
1560                                                  Intrinsic::ID IID) {
1561   auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1562     if (!isIVIncrement(BO, LI))
1563       return false;
1564     const Loop *L = LI->getLoopFor(BO->getParent());
1565     assert(L && "L should not be null after isIVIncrement()");
1566     // Do not risk on moving increment into a child loop.
1567     if (LI->getLoopFor(Cmp->getParent()) != L)
1568       return false;
1569 
1570     // Finally, we need to ensure that the insert point will dominate all
1571     // existing uses of the increment.
1572 
1573     auto &DT = getDT(*BO->getParent()->getParent());
1574     if (DT.dominates(Cmp->getParent(), BO->getParent()))
1575       // If we're moving up the dom tree, all uses are trivially dominated.
1576       // (This is the common case for code produced by LSR.)
1577       return true;
1578 
1579     // Otherwise, special case the single use in the phi recurrence.
1580     return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1581   };
1582   if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1583     // We used to use a dominator tree here to allow multi-block optimization.
1584     // But that was problematic because:
1585     // 1. It could cause a perf regression by hoisting the math op into the
1586     //    critical path.
1587     // 2. It could cause a perf regression by creating a value that was live
1588     //    across multiple blocks and increasing register pressure.
1589     // 3. Use of a dominator tree could cause large compile-time regression.
1590     //    This is because we recompute the DT on every change in the main CGP
1591     //    run-loop. The recomputing is probably unnecessary in many cases, so if
1592     //    that was fixed, using a DT here would be ok.
1593     //
1594     // There is one important particular case we still want to handle: if BO is
1595     // the IV increment. Important properties that make it profitable:
1596     // - We can speculate IV increment anywhere in the loop (as long as the
1597     //   indvar Phi is its only user);
1598     // - Upon computing Cmp, we effectively compute something equivalent to the
1599     //   IV increment (despite it loops differently in the IR). So moving it up
1600     //   to the cmp point does not really increase register pressure.
1601     return false;
1602   }
1603 
1604   // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1605   if (BO->getOpcode() == Instruction::Add &&
1606       IID == Intrinsic::usub_with_overflow) {
1607     assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1608     Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1609   }
1610 
1611   // Insert at the first instruction of the pair.
1612   Instruction *InsertPt = nullptr;
1613   for (Instruction &Iter : *Cmp->getParent()) {
1614     // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1615     // the overflow intrinsic are defined.
1616     if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1617       InsertPt = &Iter;
1618       break;
1619     }
1620   }
1621   assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1622 
1623   IRBuilder<> Builder(InsertPt);
1624   Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1625   if (BO->getOpcode() != Instruction::Xor) {
1626     Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1627     replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1628   } else
1629     assert(BO->hasOneUse() &&
1630            "Patterns with XOr should use the BO only in the compare");
1631   Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1632   replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1633   Cmp->eraseFromParent();
1634   BO->eraseFromParent();
1635   return true;
1636 }
1637 
1638 /// Match special-case patterns that check for unsigned add overflow.
1639 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1640                                                    BinaryOperator *&Add) {
1641   // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1642   // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1643   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1644 
1645   // We are not expecting non-canonical/degenerate code. Just bail out.
1646   if (isa<Constant>(A))
1647     return false;
1648 
1649   ICmpInst::Predicate Pred = Cmp->getPredicate();
1650   if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1651     B = ConstantInt::get(B->getType(), 1);
1652   else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1653     B = Constant::getAllOnesValue(B->getType());
1654   else
1655     return false;
1656 
1657   // Check the users of the variable operand of the compare looking for an add
1658   // with the adjusted constant.
1659   for (User *U : A->users()) {
1660     if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1661       Add = cast<BinaryOperator>(U);
1662       return true;
1663     }
1664   }
1665   return false;
1666 }
1667 
1668 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1669 /// intrinsic. Return true if any changes were made.
1670 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1671                                                ModifyDT &ModifiedDT) {
1672   bool EdgeCase = false;
1673   Value *A, *B;
1674   BinaryOperator *Add;
1675   if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1676     if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1677       return false;
1678     // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1679     A = Add->getOperand(0);
1680     B = Add->getOperand(1);
1681     EdgeCase = true;
1682   }
1683 
1684   if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1685                                  TLI->getValueType(*DL, Add->getType()),
1686                                  Add->hasNUsesOrMore(EdgeCase ? 1 : 2)))
1687     return false;
1688 
1689   // We don't want to move around uses of condition values this late, so we
1690   // check if it is legal to create the call to the intrinsic in the basic
1691   // block containing the icmp.
1692   if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1693     return false;
1694 
1695   if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1696                                    Intrinsic::uadd_with_overflow))
1697     return false;
1698 
1699   // Reset callers - do not crash by iterating over a dead instruction.
1700   ModifiedDT = ModifyDT::ModifyInstDT;
1701   return true;
1702 }
1703 
1704 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1705                                                ModifyDT &ModifiedDT) {
1706   // We are not expecting non-canonical/degenerate code. Just bail out.
1707   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1708   if (isa<Constant>(A) && isa<Constant>(B))
1709     return false;
1710 
1711   // Convert (A u> B) to (A u< B) to simplify pattern matching.
1712   ICmpInst::Predicate Pred = Cmp->getPredicate();
1713   if (Pred == ICmpInst::ICMP_UGT) {
1714     std::swap(A, B);
1715     Pred = ICmpInst::ICMP_ULT;
1716   }
1717   // Convert special-case: (A == 0) is the same as (A u< 1).
1718   if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1719     B = ConstantInt::get(B->getType(), 1);
1720     Pred = ICmpInst::ICMP_ULT;
1721   }
1722   // Convert special-case: (A != 0) is the same as (0 u< A).
1723   if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1724     std::swap(A, B);
1725     Pred = ICmpInst::ICMP_ULT;
1726   }
1727   if (Pred != ICmpInst::ICMP_ULT)
1728     return false;
1729 
1730   // Walk the users of a variable operand of a compare looking for a subtract or
1731   // add with that same operand. Also match the 2nd operand of the compare to
1732   // the add/sub, but that may be a negated constant operand of an add.
1733   Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1734   BinaryOperator *Sub = nullptr;
1735   for (User *U : CmpVariableOperand->users()) {
1736     // A - B, A u< B --> usubo(A, B)
1737     if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1738       Sub = cast<BinaryOperator>(U);
1739       break;
1740     }
1741 
1742     // A + (-C), A u< C (canonicalized form of (sub A, C))
1743     const APInt *CmpC, *AddC;
1744     if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1745         match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1746       Sub = cast<BinaryOperator>(U);
1747       break;
1748     }
1749   }
1750   if (!Sub)
1751     return false;
1752 
1753   if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1754                                  TLI->getValueType(*DL, Sub->getType()),
1755                                  Sub->hasNUsesOrMore(1)))
1756     return false;
1757 
1758   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1759                                    Cmp, Intrinsic::usub_with_overflow))
1760     return false;
1761 
1762   // Reset callers - do not crash by iterating over a dead instruction.
1763   ModifiedDT = ModifyDT::ModifyInstDT;
1764   return true;
1765 }
1766 
1767 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1768 /// registers that must be created and coalesced. This is a clear win except on
1769 /// targets with multiple condition code registers (PowerPC), where it might
1770 /// lose; some adjustment may be wanted there.
1771 ///
1772 /// Return true if any changes are made.
1773 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1774   if (TLI.hasMultipleConditionRegisters())
1775     return false;
1776 
1777   // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1778   if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1779     return false;
1780 
1781   // Only insert a cmp in each block once.
1782   DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1783 
1784   bool MadeChange = false;
1785   for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1786        UI != E;) {
1787     Use &TheUse = UI.getUse();
1788     Instruction *User = cast<Instruction>(*UI);
1789 
1790     // Preincrement use iterator so we don't invalidate it.
1791     ++UI;
1792 
1793     // Don't bother for PHI nodes.
1794     if (isa<PHINode>(User))
1795       continue;
1796 
1797     // Figure out which BB this cmp is used in.
1798     BasicBlock *UserBB = User->getParent();
1799     BasicBlock *DefBB = Cmp->getParent();
1800 
1801     // If this user is in the same block as the cmp, don't change the cmp.
1802     if (UserBB == DefBB)
1803       continue;
1804 
1805     // If we have already inserted a cmp into this block, use it.
1806     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1807 
1808     if (!InsertedCmp) {
1809       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1810       assert(InsertPt != UserBB->end());
1811       InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1812                                     Cmp->getOperand(0), Cmp->getOperand(1), "");
1813       InsertedCmp->insertBefore(*UserBB, InsertPt);
1814       // Propagate the debug info.
1815       InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1816     }
1817 
1818     // Replace a use of the cmp with a use of the new cmp.
1819     TheUse = InsertedCmp;
1820     MadeChange = true;
1821     ++NumCmpUses;
1822   }
1823 
1824   // If we removed all uses, nuke the cmp.
1825   if (Cmp->use_empty()) {
1826     Cmp->eraseFromParent();
1827     MadeChange = true;
1828   }
1829 
1830   return MadeChange;
1831 }
1832 
1833 /// For pattern like:
1834 ///
1835 ///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1836 ///   ...
1837 /// DomBB:
1838 ///   ...
1839 ///   br DomCond, TrueBB, CmpBB
1840 /// CmpBB: (with DomBB being the single predecessor)
1841 ///   ...
1842 ///   Cmp = icmp eq CmpOp0, CmpOp1
1843 ///   ...
1844 ///
1845 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1846 /// different from lowering of icmp eq (PowerPC). This function try to convert
1847 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1848 /// After that, DomCond and Cmp can use the same comparison so reduce one
1849 /// comparison.
1850 ///
1851 /// Return true if any changes are made.
1852 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1853                                        const TargetLowering &TLI) {
1854   if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1855     return false;
1856 
1857   ICmpInst::Predicate Pred = Cmp->getPredicate();
1858   if (Pred != ICmpInst::ICMP_EQ)
1859     return false;
1860 
1861   // If icmp eq has users other than BranchInst and SelectInst, converting it to
1862   // icmp slt/sgt would introduce more redundant LLVM IR.
1863   for (User *U : Cmp->users()) {
1864     if (isa<BranchInst>(U))
1865       continue;
1866     if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1867       continue;
1868     return false;
1869   }
1870 
1871   // This is a cheap/incomplete check for dominance - just match a single
1872   // predecessor with a conditional branch.
1873   BasicBlock *CmpBB = Cmp->getParent();
1874   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1875   if (!DomBB)
1876     return false;
1877 
1878   // We want to ensure that the only way control gets to the comparison of
1879   // interest is that a less/greater than comparison on the same operands is
1880   // false.
1881   Value *DomCond;
1882   BasicBlock *TrueBB, *FalseBB;
1883   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1884     return false;
1885   if (CmpBB != FalseBB)
1886     return false;
1887 
1888   Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1889   ICmpInst::Predicate DomPred;
1890   if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1891     return false;
1892   if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1893     return false;
1894 
1895   // Convert the equality comparison to the opposite of the dominating
1896   // comparison and swap the direction for all branch/select users.
1897   // We have conceptually converted:
1898   // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1899   // to
1900   // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1901   // And similarly for branches.
1902   for (User *U : Cmp->users()) {
1903     if (auto *BI = dyn_cast<BranchInst>(U)) {
1904       assert(BI->isConditional() && "Must be conditional");
1905       BI->swapSuccessors();
1906       continue;
1907     }
1908     if (auto *SI = dyn_cast<SelectInst>(U)) {
1909       // Swap operands
1910       SI->swapValues();
1911       SI->swapProfMetadata();
1912       continue;
1913     }
1914     llvm_unreachable("Must be a branch or a select");
1915   }
1916   Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1917   return true;
1918 }
1919 
1920 /// Many architectures use the same instruction for both subtract and cmp. Try
1921 /// to swap cmp operands to match subtract operations to allow for CSE.
1922 static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
1923   Value *Op0 = Cmp->getOperand(0);
1924   Value *Op1 = Cmp->getOperand(1);
1925   if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
1926       isa<Constant>(Op1) || Op0 == Op1)
1927     return false;
1928 
1929   // If a subtract already has the same operands as a compare, swapping would be
1930   // bad. If a subtract has the same operands as a compare but in reverse order,
1931   // then swapping is good.
1932   int GoodToSwap = 0;
1933   unsigned NumInspected = 0;
1934   for (const User *U : Op0->users()) {
1935     // Avoid walking many users.
1936     if (++NumInspected > 128)
1937       return false;
1938     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
1939       GoodToSwap++;
1940     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
1941       GoodToSwap--;
1942   }
1943 
1944   if (GoodToSwap > 0) {
1945     Cmp->swapOperands();
1946     return true;
1947   }
1948   return false;
1949 }
1950 
1951 static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI,
1952                                   const DataLayout &DL) {
1953   FCmpInst *FCmp = dyn_cast<FCmpInst>(Cmp);
1954   if (!FCmp)
1955     return false;
1956 
1957   // Don't fold if the target offers free fabs and the predicate is legal.
1958   EVT VT = TLI.getValueType(DL, Cmp->getOperand(0)->getType());
1959   if (TLI.isFAbsFree(VT) &&
1960       TLI.isCondCodeLegal(getFCmpCondCode(FCmp->getPredicate()),
1961                           VT.getSimpleVT()))
1962     return false;
1963 
1964   // Reverse the canonicalization if it is a FP class test
1965   auto ShouldReverseTransform = [](FPClassTest ClassTest) {
1966     return ClassTest == fcInf || ClassTest == (fcInf | fcNan);
1967   };
1968   auto [ClassVal, ClassTest] =
1969       fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1970                       FCmp->getOperand(0), FCmp->getOperand(1));
1971   if (!ClassVal)
1972     return false;
1973 
1974   if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest))
1975     return false;
1976 
1977   IRBuilder<> Builder(Cmp);
1978   Value *IsFPClass = Builder.createIsFPClass(ClassVal, ClassTest);
1979   Cmp->replaceAllUsesWith(IsFPClass);
1980   RecursivelyDeleteTriviallyDeadInstructions(Cmp);
1981   return true;
1982 }
1983 
1984 static bool isRemOfLoopIncrementWithLoopInvariant(Instruction *Rem,
1985                                                   const LoopInfo *LI,
1986                                                   Value *&RemAmtOut,
1987                                                   PHINode *&LoopIncrPNOut) {
1988   Value *Incr, *RemAmt;
1989   // NB: If RemAmt is a power of 2 it *should* have been transformed by now.
1990   if (!match(Rem, m_URem(m_Value(Incr), m_Value(RemAmt))))
1991     return false;
1992 
1993   // Find out loop increment PHI.
1994   auto *PN = dyn_cast<PHINode>(Incr);
1995   if (!PN)
1996     return false;
1997 
1998   // This isn't strictly necessary, what we really need is one increment and any
1999   // amount of initial values all being the same.
2000   if (PN->getNumIncomingValues() != 2)
2001     return false;
2002 
2003   // Only trivially analyzable loops.
2004   Loop *L = LI->getLoopFor(PN->getParent());
2005   if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
2006     return false;
2007 
2008   // Req that the remainder is in the loop
2009   if (!L->contains(Rem))
2010     return false;
2011 
2012   // Only works if the remainder amount is a loop invaraint
2013   if (!L->isLoopInvariant(RemAmt))
2014     return false;
2015 
2016   // Is the PHI a loop increment?
2017   auto LoopIncrInfo = getIVIncrement(PN, LI);
2018   if (!LoopIncrInfo)
2019     return false;
2020 
2021   // We need remainder_amount % increment_amount to be zero. Increment of one
2022   // satisfies that without any special logic and is overwhelmingly the common
2023   // case.
2024   if (!match(LoopIncrInfo->second, m_One()))
2025     return false;
2026 
2027   // Need the increment to not overflow.
2028   if (!match(LoopIncrInfo->first, m_c_NUWAdd(m_Specific(PN), m_Value())))
2029     return false;
2030 
2031   // Set output variables.
2032   RemAmtOut = RemAmt;
2033   LoopIncrPNOut = PN;
2034 
2035   return true;
2036 }
2037 
2038 // Try to transform:
2039 //
2040 // for(i = Start; i < End; ++i)
2041 //    Rem = (i nuw+ IncrLoopInvariant) u% RemAmtLoopInvariant;
2042 //
2043 // ->
2044 //
2045 // Rem = (Start nuw+ IncrLoopInvariant) % RemAmtLoopInvariant;
2046 // for(i = Start; i < End; ++i, ++rem)
2047 //    Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
2048 //
2049 // Currently only implemented for `IncrLoopInvariant` being zero.
2050 static bool foldURemOfLoopIncrement(Instruction *Rem, const DataLayout *DL,
2051                                     const LoopInfo *LI,
2052                                     SmallSet<BasicBlock *, 32> &FreshBBs,
2053                                     bool IsHuge) {
2054   Value *RemAmt;
2055   PHINode *LoopIncrPN;
2056   if (!isRemOfLoopIncrementWithLoopInvariant(Rem, LI, RemAmt, LoopIncrPN))
2057     return false;
2058 
2059   // Only non-constant remainder as the extra IV is probably not profitable
2060   // in that case.
2061   //
2062   // Potential TODO(1): `urem` of a const ends up as `mul` + `shift` + `add`. If
2063   // we can rule out register pressure and ensure this `urem` is executed each
2064   // iteration, its probably profitable to handle the const case as well.
2065   //
2066   // Potential TODO(2): Should we have a check for how "nested" this remainder
2067   // operation is? The new code runs every iteration so if the remainder is
2068   // guarded behind unlikely conditions this might not be worth it.
2069   if (match(RemAmt, m_ImmConstant()))
2070     return false;
2071 
2072   Loop *L = LI->getLoopFor(LoopIncrPN->getParent());
2073   Value *Start = LoopIncrPN->getIncomingValueForBlock(L->getLoopPreheader());
2074   // If we can't fully optimize out the `rem`, skip this transform.
2075   Start = simplifyURemInst(Start, RemAmt, *DL);
2076   if (!Start)
2077     return false;
2078 
2079   // Create new remainder with induction variable.
2080   Type *Ty = Rem->getType();
2081   IRBuilder<> Builder(Rem->getContext());
2082 
2083   Builder.SetInsertPoint(LoopIncrPN);
2084   PHINode *NewRem = Builder.CreatePHI(Ty, 2);
2085 
2086   Builder.SetInsertPoint(cast<Instruction>(
2087       LoopIncrPN->getIncomingValueForBlock(L->getLoopLatch())));
2088   // `(add (urem x, y), 1)` is always nuw.
2089   Value *RemAdd = Builder.CreateNUWAdd(NewRem, ConstantInt::get(Ty, 1));
2090   Value *RemCmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, RemAdd, RemAmt);
2091   Value *RemSel =
2092       Builder.CreateSelect(RemCmp, Constant::getNullValue(Ty), RemAdd);
2093 
2094   NewRem->addIncoming(Start, L->getLoopPreheader());
2095   NewRem->addIncoming(RemSel, L->getLoopLatch());
2096 
2097   // Insert all touched BBs.
2098   FreshBBs.insert(LoopIncrPN->getParent());
2099   FreshBBs.insert(L->getLoopLatch());
2100   FreshBBs.insert(Rem->getParent());
2101 
2102   replaceAllUsesWith(Rem, NewRem, FreshBBs, IsHuge);
2103   Rem->eraseFromParent();
2104   return true;
2105 }
2106 
2107 bool CodeGenPrepare::optimizeURem(Instruction *Rem) {
2108   if (foldURemOfLoopIncrement(Rem, DL, LI, FreshBBs, IsHugeFunc))
2109     return true;
2110   return false;
2111 }
2112 
2113 /// Some targets have better codegen for `ctpop(X) u< 2` than `ctpop(X) == 1`.
2114 /// This function converts `ctpop(X) ==/!= 1` into `ctpop(X) u</u> 2/1` if the
2115 /// result cannot be zero.
2116 static bool adjustIsPower2Test(CmpInst *Cmp, const TargetLowering &TLI,
2117                                const TargetTransformInfo &TTI,
2118                                const DataLayout &DL) {
2119   ICmpInst::Predicate Pred;
2120   if (!match(Cmp, m_ICmp(Pred, m_Intrinsic<Intrinsic::ctpop>(), m_One())))
2121     return false;
2122   if (!ICmpInst::isEquality(Pred))
2123     return false;
2124   auto *II = cast<IntrinsicInst>(Cmp->getOperand(0));
2125 
2126   if (isKnownNonZero(II, DL)) {
2127     if (Pred == ICmpInst::ICMP_EQ) {
2128       Cmp->setOperand(1, ConstantInt::get(II->getType(), 2));
2129       Cmp->setPredicate(ICmpInst::ICMP_ULT);
2130     } else {
2131       Cmp->setPredicate(ICmpInst::ICMP_UGT);
2132     }
2133     return true;
2134   }
2135   return false;
2136 }
2137 
2138 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
2139   if (sinkCmpExpression(Cmp, *TLI))
2140     return true;
2141 
2142   if (combineToUAddWithOverflow(Cmp, ModifiedDT))
2143     return true;
2144 
2145   if (combineToUSubWithOverflow(Cmp, ModifiedDT))
2146     return true;
2147 
2148   if (foldICmpWithDominatingICmp(Cmp, *TLI))
2149     return true;
2150 
2151   if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
2152     return true;
2153 
2154   if (foldFCmpToFPClassTest(Cmp, *TLI, *DL))
2155     return true;
2156 
2157   if (adjustIsPower2Test(Cmp, *TLI, *TTI, *DL))
2158     return true;
2159 
2160   return false;
2161 }
2162 
2163 /// Duplicate and sink the given 'and' instruction into user blocks where it is
2164 /// used in a compare to allow isel to generate better code for targets where
2165 /// this operation can be combined.
2166 ///
2167 /// Return true if any changes are made.
2168 static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
2169                                   SetOfInstrs &InsertedInsts) {
2170   // Double-check that we're not trying to optimize an instruction that was
2171   // already optimized by some other part of this pass.
2172   assert(!InsertedInsts.count(AndI) &&
2173          "Attempting to optimize already optimized and instruction");
2174   (void)InsertedInsts;
2175 
2176   // Nothing to do for single use in same basic block.
2177   if (AndI->hasOneUse() &&
2178       AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
2179     return false;
2180 
2181   // Try to avoid cases where sinking/duplicating is likely to increase register
2182   // pressure.
2183   if (!isa<ConstantInt>(AndI->getOperand(0)) &&
2184       !isa<ConstantInt>(AndI->getOperand(1)) &&
2185       AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
2186     return false;
2187 
2188   for (auto *U : AndI->users()) {
2189     Instruction *User = cast<Instruction>(U);
2190 
2191     // Only sink 'and' feeding icmp with 0.
2192     if (!isa<ICmpInst>(User))
2193       return false;
2194 
2195     auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
2196     if (!CmpC || !CmpC->isZero())
2197       return false;
2198   }
2199 
2200   if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
2201     return false;
2202 
2203   LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2204   LLVM_DEBUG(AndI->getParent()->dump());
2205 
2206   // Push the 'and' into the same block as the icmp 0.  There should only be
2207   // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2208   // others, so we don't need to keep track of which BBs we insert into.
2209   for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
2210        UI != E;) {
2211     Use &TheUse = UI.getUse();
2212     Instruction *User = cast<Instruction>(*UI);
2213 
2214     // Preincrement use iterator so we don't invalidate it.
2215     ++UI;
2216 
2217     LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
2218 
2219     // Keep the 'and' in the same place if the use is already in the same block.
2220     Instruction *InsertPt =
2221         User->getParent() == AndI->getParent() ? AndI : User;
2222     Instruction *InsertedAnd = BinaryOperator::Create(
2223         Instruction::And, AndI->getOperand(0), AndI->getOperand(1), "",
2224         InsertPt->getIterator());
2225     // Propagate the debug info.
2226     InsertedAnd->setDebugLoc(AndI->getDebugLoc());
2227 
2228     // Replace a use of the 'and' with a use of the new 'and'.
2229     TheUse = InsertedAnd;
2230     ++NumAndUses;
2231     LLVM_DEBUG(User->getParent()->dump());
2232   }
2233 
2234   // We removed all uses, nuke the and.
2235   AndI->eraseFromParent();
2236   return true;
2237 }
2238 
2239 /// Check if the candidates could be combined with a shift instruction, which
2240 /// includes:
2241 /// 1. Truncate instruction
2242 /// 2. And instruction and the imm is a mask of the low bits:
2243 /// imm & (imm+1) == 0
2244 static bool isExtractBitsCandidateUse(Instruction *User) {
2245   if (!isa<TruncInst>(User)) {
2246     if (User->getOpcode() != Instruction::And ||
2247         !isa<ConstantInt>(User->getOperand(1)))
2248       return false;
2249 
2250     const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
2251 
2252     if ((Cimm & (Cimm + 1)).getBoolValue())
2253       return false;
2254   }
2255   return true;
2256 }
2257 
2258 /// Sink both shift and truncate instruction to the use of truncate's BB.
2259 static bool
2260 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
2261                      DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
2262                      const TargetLowering &TLI, const DataLayout &DL) {
2263   BasicBlock *UserBB = User->getParent();
2264   DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
2265   auto *TruncI = cast<TruncInst>(User);
2266   bool MadeChange = false;
2267 
2268   for (Value::user_iterator TruncUI = TruncI->user_begin(),
2269                             TruncE = TruncI->user_end();
2270        TruncUI != TruncE;) {
2271 
2272     Use &TruncTheUse = TruncUI.getUse();
2273     Instruction *TruncUser = cast<Instruction>(*TruncUI);
2274     // Preincrement use iterator so we don't invalidate it.
2275 
2276     ++TruncUI;
2277 
2278     int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
2279     if (!ISDOpcode)
2280       continue;
2281 
2282     // If the use is actually a legal node, there will not be an
2283     // implicit truncate.
2284     // FIXME: always querying the result type is just an
2285     // approximation; some nodes' legality is determined by the
2286     // operand or other means. There's no good way to find out though.
2287     if (TLI.isOperationLegalOrCustom(
2288             ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
2289       continue;
2290 
2291     // Don't bother for PHI nodes.
2292     if (isa<PHINode>(TruncUser))
2293       continue;
2294 
2295     BasicBlock *TruncUserBB = TruncUser->getParent();
2296 
2297     if (UserBB == TruncUserBB)
2298       continue;
2299 
2300     BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
2301     CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
2302 
2303     if (!InsertedShift && !InsertedTrunc) {
2304       BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
2305       assert(InsertPt != TruncUserBB->end());
2306       // Sink the shift
2307       if (ShiftI->getOpcode() == Instruction::AShr)
2308         InsertedShift =
2309             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2310       else
2311         InsertedShift =
2312             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2313       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2314       InsertedShift->insertBefore(*TruncUserBB, InsertPt);
2315 
2316       // Sink the trunc
2317       BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
2318       TruncInsertPt++;
2319       // It will go ahead of any debug-info.
2320       TruncInsertPt.setHeadBit(true);
2321       assert(TruncInsertPt != TruncUserBB->end());
2322 
2323       InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2324                                        TruncI->getType(), "");
2325       InsertedTrunc->insertBefore(*TruncUserBB, TruncInsertPt);
2326       InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2327 
2328       MadeChange = true;
2329 
2330       TruncTheUse = InsertedTrunc;
2331     }
2332   }
2333   return MadeChange;
2334 }
2335 
2336 /// Sink the shift *right* instruction into user blocks if the uses could
2337 /// potentially be combined with this shift instruction and generate BitExtract
2338 /// instruction. It will only be applied if the architecture supports BitExtract
2339 /// instruction. Here is an example:
2340 /// BB1:
2341 ///   %x.extract.shift = lshr i64 %arg1, 32
2342 /// BB2:
2343 ///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
2344 /// ==>
2345 ///
2346 /// BB2:
2347 ///   %x.extract.shift.1 = lshr i64 %arg1, 32
2348 ///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2349 ///
2350 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2351 /// instruction.
2352 /// Return true if any changes are made.
2353 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
2354                                 const TargetLowering &TLI,
2355                                 const DataLayout &DL) {
2356   BasicBlock *DefBB = ShiftI->getParent();
2357 
2358   /// Only insert instructions in each block once.
2359   DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
2360 
2361   bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2362 
2363   bool MadeChange = false;
2364   for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2365        UI != E;) {
2366     Use &TheUse = UI.getUse();
2367     Instruction *User = cast<Instruction>(*UI);
2368     // Preincrement use iterator so we don't invalidate it.
2369     ++UI;
2370 
2371     // Don't bother for PHI nodes.
2372     if (isa<PHINode>(User))
2373       continue;
2374 
2375     if (!isExtractBitsCandidateUse(User))
2376       continue;
2377 
2378     BasicBlock *UserBB = User->getParent();
2379 
2380     if (UserBB == DefBB) {
2381       // If the shift and truncate instruction are in the same BB. The use of
2382       // the truncate(TruncUse) may still introduce another truncate if not
2383       // legal. In this case, we would like to sink both shift and truncate
2384       // instruction to the BB of TruncUse.
2385       // for example:
2386       // BB1:
2387       // i64 shift.result = lshr i64 opnd, imm
2388       // trunc.result = trunc shift.result to i16
2389       //
2390       // BB2:
2391       //   ----> We will have an implicit truncate here if the architecture does
2392       //   not have i16 compare.
2393       // cmp i16 trunc.result, opnd2
2394       //
2395       if (isa<TruncInst>(User) &&
2396           shiftIsLegal
2397           // If the type of the truncate is legal, no truncate will be
2398           // introduced in other basic blocks.
2399           && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2400         MadeChange =
2401             SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2402 
2403       continue;
2404     }
2405     // If we have already inserted a shift into this block, use it.
2406     BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2407 
2408     if (!InsertedShift) {
2409       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2410       assert(InsertPt != UserBB->end());
2411 
2412       if (ShiftI->getOpcode() == Instruction::AShr)
2413         InsertedShift =
2414             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2415       else
2416         InsertedShift =
2417             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2418       InsertedShift->insertBefore(*UserBB, InsertPt);
2419       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2420 
2421       MadeChange = true;
2422     }
2423 
2424     // Replace a use of the shift with a use of the new shift.
2425     TheUse = InsertedShift;
2426   }
2427 
2428   // If we removed all uses, or there are none, nuke the shift.
2429   if (ShiftI->use_empty()) {
2430     salvageDebugInfo(*ShiftI);
2431     ShiftI->eraseFromParent();
2432     MadeChange = true;
2433   }
2434 
2435   return MadeChange;
2436 }
2437 
2438 /// If counting leading or trailing zeros is an expensive operation and a zero
2439 /// input is defined, add a check for zero to avoid calling the intrinsic.
2440 ///
2441 /// We want to transform:
2442 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2443 ///
2444 /// into:
2445 ///   entry:
2446 ///     %cmpz = icmp eq i64 %A, 0
2447 ///     br i1 %cmpz, label %cond.end, label %cond.false
2448 ///   cond.false:
2449 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2450 ///     br label %cond.end
2451 ///   cond.end:
2452 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2453 ///
2454 /// If the transform is performed, return true and set ModifiedDT to true.
2455 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2456                                   LoopInfo &LI,
2457                                   const TargetLowering *TLI,
2458                                   const DataLayout *DL, ModifyDT &ModifiedDT,
2459                                   SmallSet<BasicBlock *, 32> &FreshBBs,
2460                                   bool IsHugeFunc) {
2461   // If a zero input is undefined, it doesn't make sense to despeculate that.
2462   if (match(CountZeros->getOperand(1), m_One()))
2463     return false;
2464 
2465   // If it's cheap to speculate, there's nothing to do.
2466   Type *Ty = CountZeros->getType();
2467   auto IntrinsicID = CountZeros->getIntrinsicID();
2468   if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2469       (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2470     return false;
2471 
2472   // Only handle legal scalar cases. Anything else requires too much work.
2473   unsigned SizeInBits = Ty->getScalarSizeInBits();
2474   if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2475     return false;
2476 
2477   // Bail if the value is never zero.
2478   Use &Op = CountZeros->getOperandUse(0);
2479   if (isKnownNonZero(Op, *DL))
2480     return false;
2481 
2482   // The intrinsic will be sunk behind a compare against zero and branch.
2483   BasicBlock *StartBlock = CountZeros->getParent();
2484   BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2485   if (IsHugeFunc)
2486     FreshBBs.insert(CallBlock);
2487 
2488   // Create another block after the count zero intrinsic. A PHI will be added
2489   // in this block to select the result of the intrinsic or the bit-width
2490   // constant if the input to the intrinsic is zero.
2491   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(CountZeros));
2492   // Any debug-info after CountZeros should not be included.
2493   SplitPt.setHeadBit(true);
2494   BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2495   if (IsHugeFunc)
2496     FreshBBs.insert(EndBlock);
2497 
2498   // Update the LoopInfo. The new blocks are in the same loop as the start
2499   // block.
2500   if (Loop *L = LI.getLoopFor(StartBlock)) {
2501     L->addBasicBlockToLoop(CallBlock, LI);
2502     L->addBasicBlockToLoop(EndBlock, LI);
2503   }
2504 
2505   // Set up a builder to create a compare, conditional branch, and PHI.
2506   IRBuilder<> Builder(CountZeros->getContext());
2507   Builder.SetInsertPoint(StartBlock->getTerminator());
2508   Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2509 
2510   // Replace the unconditional branch that was created by the first split with
2511   // a compare against zero and a conditional branch.
2512   Value *Zero = Constant::getNullValue(Ty);
2513   // Avoid introducing branch on poison. This also replaces the ctz operand.
2514   if (!isGuaranteedNotToBeUndefOrPoison(Op))
2515     Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2516   Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2517   Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2518   StartBlock->getTerminator()->eraseFromParent();
2519 
2520   // Create a PHI in the end block to select either the output of the intrinsic
2521   // or the bit width of the operand.
2522   Builder.SetInsertPoint(EndBlock, EndBlock->begin());
2523   PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2524   replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2525   Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2526   PN->addIncoming(BitWidth, StartBlock);
2527   PN->addIncoming(CountZeros, CallBlock);
2528 
2529   // We are explicitly handling the zero case, so we can set the intrinsic's
2530   // undefined zero argument to 'true'. This will also prevent reprocessing the
2531   // intrinsic; we only despeculate when a zero input is defined.
2532   CountZeros->setArgOperand(1, Builder.getTrue());
2533   ModifiedDT = ModifyDT::ModifyBBDT;
2534   return true;
2535 }
2536 
2537 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2538   BasicBlock *BB = CI->getParent();
2539 
2540   // Lower inline assembly if we can.
2541   // If we found an inline asm expession, and if the target knows how to
2542   // lower it to normal LLVM code, do so now.
2543   if (CI->isInlineAsm()) {
2544     if (TLI->ExpandInlineAsm(CI)) {
2545       // Avoid invalidating the iterator.
2546       CurInstIterator = BB->begin();
2547       // Avoid processing instructions out of order, which could cause
2548       // reuse before a value is defined.
2549       SunkAddrs.clear();
2550       return true;
2551     }
2552     // Sink address computing for memory operands into the block.
2553     if (optimizeInlineAsmInst(CI))
2554       return true;
2555   }
2556 
2557   // Align the pointer arguments to this call if the target thinks it's a good
2558   // idea
2559   unsigned MinSize;
2560   Align PrefAlign;
2561   if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2562     for (auto &Arg : CI->args()) {
2563       // We want to align both objects whose address is used directly and
2564       // objects whose address is used in casts and GEPs, though it only makes
2565       // sense for GEPs if the offset is a multiple of the desired alignment and
2566       // if size - offset meets the size threshold.
2567       if (!Arg->getType()->isPointerTy())
2568         continue;
2569       APInt Offset(DL->getIndexSizeInBits(
2570                        cast<PointerType>(Arg->getType())->getAddressSpace()),
2571                    0);
2572       Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2573       uint64_t Offset2 = Offset.getLimitedValue();
2574       if (!isAligned(PrefAlign, Offset2))
2575         continue;
2576       AllocaInst *AI;
2577       if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2578           DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2579         AI->setAlignment(PrefAlign);
2580       // Global variables can only be aligned if they are defined in this
2581       // object (i.e. they are uniquely initialized in this object), and
2582       // over-aligning global variables that have an explicit section is
2583       // forbidden.
2584       GlobalVariable *GV;
2585       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2586           GV->getPointerAlignment(*DL) < PrefAlign &&
2587           DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2588         GV->setAlignment(PrefAlign);
2589     }
2590   }
2591   // If this is a memcpy (or similar) then we may be able to improve the
2592   // alignment.
2593   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2594     Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2595     MaybeAlign MIDestAlign = MI->getDestAlign();
2596     if (!MIDestAlign || DestAlign > *MIDestAlign)
2597       MI->setDestAlignment(DestAlign);
2598     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2599       MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2600       Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2601       if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2602         MTI->setSourceAlignment(SrcAlign);
2603     }
2604   }
2605 
2606   // If we have a cold call site, try to sink addressing computation into the
2607   // cold block.  This interacts with our handling for loads and stores to
2608   // ensure that we can fold all uses of a potential addressing computation
2609   // into their uses.  TODO: generalize this to work over profiling data
2610   if (CI->hasFnAttr(Attribute::Cold) &&
2611       !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2612     for (auto &Arg : CI->args()) {
2613       if (!Arg->getType()->isPointerTy())
2614         continue;
2615       unsigned AS = Arg->getType()->getPointerAddressSpace();
2616       if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS))
2617         return true;
2618     }
2619 
2620   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2621   if (II) {
2622     switch (II->getIntrinsicID()) {
2623     default:
2624       break;
2625     case Intrinsic::assume:
2626       llvm_unreachable("llvm.assume should have been removed already");
2627     case Intrinsic::allow_runtime_check:
2628     case Intrinsic::allow_ubsan_check:
2629     case Intrinsic::experimental_widenable_condition: {
2630       // Give up on future widening opportunities so that we can fold away dead
2631       // paths and merge blocks before going into block-local instruction
2632       // selection.
2633       if (II->use_empty()) {
2634         II->eraseFromParent();
2635         return true;
2636       }
2637       Constant *RetVal = ConstantInt::getTrue(II->getContext());
2638       resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2639         replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2640       });
2641       return true;
2642     }
2643     case Intrinsic::objectsize:
2644       llvm_unreachable("llvm.objectsize.* should have been lowered already");
2645     case Intrinsic::is_constant:
2646       llvm_unreachable("llvm.is.constant.* should have been lowered already");
2647     case Intrinsic::aarch64_stlxr:
2648     case Intrinsic::aarch64_stxr: {
2649       ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2650       if (!ExtVal || !ExtVal->hasOneUse() ||
2651           ExtVal->getParent() == CI->getParent())
2652         return false;
2653       // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2654       ExtVal->moveBefore(CI);
2655       // Mark this instruction as "inserted by CGP", so that other
2656       // optimizations don't touch it.
2657       InsertedInsts.insert(ExtVal);
2658       return true;
2659     }
2660 
2661     case Intrinsic::launder_invariant_group:
2662     case Intrinsic::strip_invariant_group: {
2663       Value *ArgVal = II->getArgOperand(0);
2664       auto it = LargeOffsetGEPMap.find(II);
2665       if (it != LargeOffsetGEPMap.end()) {
2666         // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2667         // Make sure not to have to deal with iterator invalidation
2668         // after possibly adding ArgVal to LargeOffsetGEPMap.
2669         auto GEPs = std::move(it->second);
2670         LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2671         LargeOffsetGEPMap.erase(II);
2672       }
2673 
2674       replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2675       II->eraseFromParent();
2676       return true;
2677     }
2678     case Intrinsic::cttz:
2679     case Intrinsic::ctlz:
2680       // If counting zeros is expensive, try to avoid it.
2681       return despeculateCountZeros(II, *LI, TLI, DL, ModifiedDT, FreshBBs,
2682                                    IsHugeFunc);
2683     case Intrinsic::fshl:
2684     case Intrinsic::fshr:
2685       return optimizeFunnelShift(II);
2686     case Intrinsic::dbg_assign:
2687     case Intrinsic::dbg_value:
2688       return fixupDbgValue(II);
2689     case Intrinsic::masked_gather:
2690       return optimizeGatherScatterInst(II, II->getArgOperand(0));
2691     case Intrinsic::masked_scatter:
2692       return optimizeGatherScatterInst(II, II->getArgOperand(1));
2693     }
2694 
2695     SmallVector<Value *, 2> PtrOps;
2696     Type *AccessTy;
2697     if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2698       while (!PtrOps.empty()) {
2699         Value *PtrVal = PtrOps.pop_back_val();
2700         unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2701         if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2702           return true;
2703       }
2704   }
2705 
2706   // From here on out we're working with named functions.
2707   auto *Callee = CI->getCalledFunction();
2708   if (!Callee)
2709     return false;
2710 
2711   // Lower all default uses of _chk calls.  This is very similar
2712   // to what InstCombineCalls does, but here we are only lowering calls
2713   // to fortified library functions (e.g. __memcpy_chk) that have the default
2714   // "don't know" as the objectsize.  Anything else should be left alone.
2715   FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2716   IRBuilder<> Builder(CI);
2717   if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2718     replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2719     CI->eraseFromParent();
2720     return true;
2721   }
2722 
2723   // SCCP may have propagated, among other things, C++ static variables across
2724   // calls. If this happens to be the case, we may want to undo it in order to
2725   // avoid redundant pointer computation of the constant, as the function method
2726   // returning the constant needs to be executed anyways.
2727   auto GetUniformReturnValue = [](const Function *F) -> GlobalVariable * {
2728     if (!F->getReturnType()->isPointerTy())
2729       return nullptr;
2730 
2731     GlobalVariable *UniformValue = nullptr;
2732     for (auto &BB : *F) {
2733       if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) {
2734         if (auto *V = dyn_cast<GlobalVariable>(RI->getReturnValue())) {
2735           if (!UniformValue)
2736             UniformValue = V;
2737           else if (V != UniformValue)
2738             return nullptr;
2739         } else {
2740           return nullptr;
2741         }
2742       }
2743     }
2744 
2745     return UniformValue;
2746   };
2747 
2748   if (Callee->hasExactDefinition()) {
2749     if (GlobalVariable *RV = GetUniformReturnValue(Callee)) {
2750       bool MadeChange = false;
2751       for (Use &U : make_early_inc_range(RV->uses())) {
2752         auto *I = dyn_cast<Instruction>(U.getUser());
2753         if (!I || I->getParent() != CI->getParent()) {
2754           // Limit to the same basic block to avoid extending the call-site live
2755           // range, which otherwise could increase register pressure.
2756           continue;
2757         }
2758         if (CI->comesBefore(I)) {
2759           U.set(CI);
2760           MadeChange = true;
2761         }
2762       }
2763 
2764       return MadeChange;
2765     }
2766   }
2767 
2768   return false;
2769 }
2770 
2771 static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo,
2772                                           const CallInst *CI) {
2773   assert(CI && CI->use_empty());
2774 
2775   if (const auto *II = dyn_cast<IntrinsicInst>(CI))
2776     switch (II->getIntrinsicID()) {
2777     case Intrinsic::memset:
2778     case Intrinsic::memcpy:
2779     case Intrinsic::memmove:
2780       return true;
2781     default:
2782       return false;
2783     }
2784 
2785   LibFunc LF;
2786   Function *Callee = CI->getCalledFunction();
2787   if (Callee && TLInfo && TLInfo->getLibFunc(*Callee, LF))
2788     switch (LF) {
2789     case LibFunc_strcpy:
2790     case LibFunc_strncpy:
2791     case LibFunc_strcat:
2792     case LibFunc_strncat:
2793       return true;
2794     default:
2795       return false;
2796     }
2797 
2798   return false;
2799 }
2800 
2801 /// Look for opportunities to duplicate return instructions to the predecessor
2802 /// to enable tail call optimizations. The case it is currently looking for is
2803 /// the following one. Known intrinsics or library function that may be tail
2804 /// called are taken into account as well.
2805 /// @code
2806 /// bb0:
2807 ///   %tmp0 = tail call i32 @f0()
2808 ///   br label %return
2809 /// bb1:
2810 ///   %tmp1 = tail call i32 @f1()
2811 ///   br label %return
2812 /// bb2:
2813 ///   %tmp2 = tail call i32 @f2()
2814 ///   br label %return
2815 /// return:
2816 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2817 ///   ret i32 %retval
2818 /// @endcode
2819 ///
2820 /// =>
2821 ///
2822 /// @code
2823 /// bb0:
2824 ///   %tmp0 = tail call i32 @f0()
2825 ///   ret i32 %tmp0
2826 /// bb1:
2827 ///   %tmp1 = tail call i32 @f1()
2828 ///   ret i32 %tmp1
2829 /// bb2:
2830 ///   %tmp2 = tail call i32 @f2()
2831 ///   ret i32 %tmp2
2832 /// @endcode
2833 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2834                                                 ModifyDT &ModifiedDT) {
2835   if (!BB->getTerminator())
2836     return false;
2837 
2838   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2839   if (!RetI)
2840     return false;
2841 
2842   assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop");
2843 
2844   PHINode *PN = nullptr;
2845   ExtractValueInst *EVI = nullptr;
2846   BitCastInst *BCI = nullptr;
2847   Value *V = RetI->getReturnValue();
2848   if (V) {
2849     BCI = dyn_cast<BitCastInst>(V);
2850     if (BCI)
2851       V = BCI->getOperand(0);
2852 
2853     EVI = dyn_cast<ExtractValueInst>(V);
2854     if (EVI) {
2855       V = EVI->getOperand(0);
2856       if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2857         return false;
2858     }
2859 
2860     PN = dyn_cast<PHINode>(V);
2861   }
2862 
2863   if (PN && PN->getParent() != BB)
2864     return false;
2865 
2866   auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2867     const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2868     if (BC && BC->hasOneUse())
2869       Inst = BC->user_back();
2870 
2871     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2872       return II->getIntrinsicID() == Intrinsic::lifetime_end;
2873     return false;
2874   };
2875 
2876   SmallVector<const IntrinsicInst *, 4> FakeUses;
2877 
2878   auto isFakeUse = [&FakeUses](const Instruction *Inst) {
2879     if (auto *II = dyn_cast<IntrinsicInst>(Inst);
2880         II && II->getIntrinsicID() == Intrinsic::fake_use) {
2881       // Record the instruction so it can be preserved when the exit block is
2882       // removed. Do not preserve the fake use that uses the result of the
2883       // PHI instruction.
2884       // Do not copy fake uses that use the result of a PHI node.
2885       // FIXME: If we do want to copy the fake use into the return blocks, we
2886       // have to figure out which of the PHI node operands to use for each
2887       // copy.
2888       if (!isa<PHINode>(II->getOperand(0))) {
2889         FakeUses.push_back(II);
2890       }
2891       return true;
2892     }
2893 
2894     return false;
2895   };
2896 
2897   // Make sure there are no instructions between the first instruction
2898   // and return.
2899   const Instruction *BI = BB->getFirstNonPHI();
2900   // Skip over debug and the bitcast.
2901   while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2902          isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI) ||
2903          isFakeUse(BI))
2904     BI = BI->getNextNode();
2905   if (BI != RetI)
2906     return false;
2907 
2908   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2909   /// call.
2910   const Function *F = BB->getParent();
2911   SmallVector<BasicBlock *, 4> TailCallBBs;
2912   // Record the call instructions so we can insert any fake uses
2913   // that need to be preserved before them.
2914   SmallVector<CallInst *, 4> CallInsts;
2915   if (PN) {
2916     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2917       // Look through bitcasts.
2918       Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2919       CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2920       BasicBlock *PredBB = PN->getIncomingBlock(I);
2921       // Make sure the phi value is indeed produced by the tail call.
2922       if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2923           TLI->mayBeEmittedAsTailCall(CI) &&
2924           attributesPermitTailCall(F, CI, RetI, *TLI)) {
2925         TailCallBBs.push_back(PredBB);
2926         CallInsts.push_back(CI);
2927       } else {
2928         // Consider the cases in which the phi value is indirectly produced by
2929         // the tail call, for example when encountering memset(), memmove(),
2930         // strcpy(), whose return value may have been optimized out. In such
2931         // cases, the value needs to be the first function argument.
2932         //
2933         // bb0:
2934         //   tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
2935         //   br label %return
2936         // return:
2937         //   %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
2938         if (PredBB && PredBB->getSingleSuccessor() == BB)
2939           CI = dyn_cast_or_null<CallInst>(
2940               PredBB->getTerminator()->getPrevNonDebugInstruction(true));
2941 
2942         if (CI && CI->use_empty() &&
2943             isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2944             IncomingVal == CI->getArgOperand(0) &&
2945             TLI->mayBeEmittedAsTailCall(CI) &&
2946             attributesPermitTailCall(F, CI, RetI, *TLI)) {
2947           TailCallBBs.push_back(PredBB);
2948           CallInsts.push_back(CI);
2949         }
2950       }
2951     }
2952   } else {
2953     SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2954     for (BasicBlock *Pred : predecessors(BB)) {
2955       if (!VisitedBBs.insert(Pred).second)
2956         continue;
2957       if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2958         CallInst *CI = dyn_cast<CallInst>(I);
2959         if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2960             attributesPermitTailCall(F, CI, RetI, *TLI)) {
2961           // Either we return void or the return value must be the first
2962           // argument of a known intrinsic or library function.
2963           if (!V || isa<UndefValue>(V) ||
2964               (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2965                V == CI->getArgOperand(0))) {
2966             TailCallBBs.push_back(Pred);
2967             CallInsts.push_back(CI);
2968           }
2969         }
2970       }
2971     }
2972   }
2973 
2974   bool Changed = false;
2975   for (auto const &TailCallBB : TailCallBBs) {
2976     // Make sure the call instruction is followed by an unconditional branch to
2977     // the return block.
2978     BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2979     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2980       continue;
2981 
2982     // Duplicate the return into TailCallBB.
2983     (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2984     assert(!VerifyBFIUpdates ||
2985            BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2986     BFI->setBlockFreq(BB,
2987                       (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)));
2988     ModifiedDT = ModifyDT::ModifyBBDT;
2989     Changed = true;
2990     ++NumRetsDup;
2991   }
2992 
2993   // If we eliminated all predecessors of the block, delete the block now.
2994   if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) {
2995     // Copy the fake uses found in the original return block to all blocks
2996     // that contain tail calls.
2997     for (auto *CI : CallInsts) {
2998       for (auto const *FakeUse : FakeUses) {
2999         auto *ClonedInst = FakeUse->clone();
3000         ClonedInst->insertBefore(CI);
3001       }
3002     }
3003     BB->eraseFromParent();
3004   }
3005 
3006   return Changed;
3007 }
3008 
3009 //===----------------------------------------------------------------------===//
3010 // Memory Optimization
3011 //===----------------------------------------------------------------------===//
3012 
3013 namespace {
3014 
3015 /// This is an extended version of TargetLowering::AddrMode
3016 /// which holds actual Value*'s for register values.
3017 struct ExtAddrMode : public TargetLowering::AddrMode {
3018   Value *BaseReg = nullptr;
3019   Value *ScaledReg = nullptr;
3020   Value *OriginalValue = nullptr;
3021   bool InBounds = true;
3022 
3023   enum FieldName {
3024     NoField = 0x00,
3025     BaseRegField = 0x01,
3026     BaseGVField = 0x02,
3027     BaseOffsField = 0x04,
3028     ScaledRegField = 0x08,
3029     ScaleField = 0x10,
3030     MultipleFields = 0xff
3031   };
3032 
3033   ExtAddrMode() = default;
3034 
3035   void print(raw_ostream &OS) const;
3036   void dump() const;
3037 
3038   FieldName compare(const ExtAddrMode &other) {
3039     // First check that the types are the same on each field, as differing types
3040     // is something we can't cope with later on.
3041     if (BaseReg && other.BaseReg &&
3042         BaseReg->getType() != other.BaseReg->getType())
3043       return MultipleFields;
3044     if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
3045       return MultipleFields;
3046     if (ScaledReg && other.ScaledReg &&
3047         ScaledReg->getType() != other.ScaledReg->getType())
3048       return MultipleFields;
3049 
3050     // Conservatively reject 'inbounds' mismatches.
3051     if (InBounds != other.InBounds)
3052       return MultipleFields;
3053 
3054     // Check each field to see if it differs.
3055     unsigned Result = NoField;
3056     if (BaseReg != other.BaseReg)
3057       Result |= BaseRegField;
3058     if (BaseGV != other.BaseGV)
3059       Result |= BaseGVField;
3060     if (BaseOffs != other.BaseOffs)
3061       Result |= BaseOffsField;
3062     if (ScaledReg != other.ScaledReg)
3063       Result |= ScaledRegField;
3064     // Don't count 0 as being a different scale, because that actually means
3065     // unscaled (which will already be counted by having no ScaledReg).
3066     if (Scale && other.Scale && Scale != other.Scale)
3067       Result |= ScaleField;
3068 
3069     if (llvm::popcount(Result) > 1)
3070       return MultipleFields;
3071     else
3072       return static_cast<FieldName>(Result);
3073   }
3074 
3075   // An AddrMode is trivial if it involves no calculation i.e. it is just a base
3076   // with no offset.
3077   bool isTrivial() {
3078     // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
3079     // trivial if at most one of these terms is nonzero, except that BaseGV and
3080     // BaseReg both being zero actually means a null pointer value, which we
3081     // consider to be 'non-zero' here.
3082     return !BaseOffs && !Scale && !(BaseGV && BaseReg);
3083   }
3084 
3085   Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
3086     switch (Field) {
3087     default:
3088       return nullptr;
3089     case BaseRegField:
3090       return BaseReg;
3091     case BaseGVField:
3092       return BaseGV;
3093     case ScaledRegField:
3094       return ScaledReg;
3095     case BaseOffsField:
3096       return ConstantInt::get(IntPtrTy, BaseOffs);
3097     }
3098   }
3099 
3100   void SetCombinedField(FieldName Field, Value *V,
3101                         const SmallVectorImpl<ExtAddrMode> &AddrModes) {
3102     switch (Field) {
3103     default:
3104       llvm_unreachable("Unhandled fields are expected to be rejected earlier");
3105       break;
3106     case ExtAddrMode::BaseRegField:
3107       BaseReg = V;
3108       break;
3109     case ExtAddrMode::BaseGVField:
3110       // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
3111       // in the BaseReg field.
3112       assert(BaseReg == nullptr);
3113       BaseReg = V;
3114       BaseGV = nullptr;
3115       break;
3116     case ExtAddrMode::ScaledRegField:
3117       ScaledReg = V;
3118       // If we have a mix of scaled and unscaled addrmodes then we want scale
3119       // to be the scale and not zero.
3120       if (!Scale)
3121         for (const ExtAddrMode &AM : AddrModes)
3122           if (AM.Scale) {
3123             Scale = AM.Scale;
3124             break;
3125           }
3126       break;
3127     case ExtAddrMode::BaseOffsField:
3128       // The offset is no longer a constant, so it goes in ScaledReg with a
3129       // scale of 1.
3130       assert(ScaledReg == nullptr);
3131       ScaledReg = V;
3132       Scale = 1;
3133       BaseOffs = 0;
3134       break;
3135     }
3136   }
3137 };
3138 
3139 #ifndef NDEBUG
3140 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
3141   AM.print(OS);
3142   return OS;
3143 }
3144 #endif
3145 
3146 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3147 void ExtAddrMode::print(raw_ostream &OS) const {
3148   bool NeedPlus = false;
3149   OS << "[";
3150   if (InBounds)
3151     OS << "inbounds ";
3152   if (BaseGV) {
3153     OS << "GV:";
3154     BaseGV->printAsOperand(OS, /*PrintType=*/false);
3155     NeedPlus = true;
3156   }
3157 
3158   if (BaseOffs) {
3159     OS << (NeedPlus ? " + " : "") << BaseOffs;
3160     NeedPlus = true;
3161   }
3162 
3163   if (BaseReg) {
3164     OS << (NeedPlus ? " + " : "") << "Base:";
3165     BaseReg->printAsOperand(OS, /*PrintType=*/false);
3166     NeedPlus = true;
3167   }
3168   if (Scale) {
3169     OS << (NeedPlus ? " + " : "") << Scale << "*";
3170     ScaledReg->printAsOperand(OS, /*PrintType=*/false);
3171   }
3172 
3173   OS << ']';
3174 }
3175 
3176 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
3177   print(dbgs());
3178   dbgs() << '\n';
3179 }
3180 #endif
3181 
3182 } // end anonymous namespace
3183 
3184 namespace {
3185 
3186 /// This class provides transaction based operation on the IR.
3187 /// Every change made through this class is recorded in the internal state and
3188 /// can be undone (rollback) until commit is called.
3189 /// CGP does not check if instructions could be speculatively executed when
3190 /// moved. Preserving the original location would pessimize the debugging
3191 /// experience, as well as negatively impact the quality of sample PGO.
3192 class TypePromotionTransaction {
3193   /// This represents the common interface of the individual transaction.
3194   /// Each class implements the logic for doing one specific modification on
3195   /// the IR via the TypePromotionTransaction.
3196   class TypePromotionAction {
3197   protected:
3198     /// The Instruction modified.
3199     Instruction *Inst;
3200 
3201   public:
3202     /// Constructor of the action.
3203     /// The constructor performs the related action on the IR.
3204     TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
3205 
3206     virtual ~TypePromotionAction() = default;
3207 
3208     /// Undo the modification done by this action.
3209     /// When this method is called, the IR must be in the same state as it was
3210     /// before this action was applied.
3211     /// \pre Undoing the action works if and only if the IR is in the exact same
3212     /// state as it was directly after this action was applied.
3213     virtual void undo() = 0;
3214 
3215     /// Advocate every change made by this action.
3216     /// When the results on the IR of the action are to be kept, it is important
3217     /// to call this function, otherwise hidden information may be kept forever.
3218     virtual void commit() {
3219       // Nothing to be done, this action is not doing anything.
3220     }
3221   };
3222 
3223   /// Utility to remember the position of an instruction.
3224   class InsertionHandler {
3225     /// Position of an instruction.
3226     /// Either an instruction:
3227     /// - Is the first in a basic block: BB is used.
3228     /// - Has a previous instruction: PrevInst is used.
3229     union {
3230       Instruction *PrevInst;
3231       BasicBlock *BB;
3232     } Point;
3233     std::optional<DbgRecord::self_iterator> BeforeDbgRecord = std::nullopt;
3234 
3235     /// Remember whether or not the instruction had a previous instruction.
3236     bool HasPrevInstruction;
3237 
3238   public:
3239     /// Record the position of \p Inst.
3240     InsertionHandler(Instruction *Inst) {
3241       HasPrevInstruction = (Inst != &*(Inst->getParent()->begin()));
3242       BasicBlock *BB = Inst->getParent();
3243 
3244       // Record where we would have to re-insert the instruction in the sequence
3245       // of DbgRecords, if we ended up reinserting.
3246       if (BB->IsNewDbgInfoFormat)
3247         BeforeDbgRecord = Inst->getDbgReinsertionPosition();
3248 
3249       if (HasPrevInstruction) {
3250         Point.PrevInst = &*std::prev(Inst->getIterator());
3251       } else {
3252         Point.BB = BB;
3253       }
3254     }
3255 
3256     /// Insert \p Inst at the recorded position.
3257     void insert(Instruction *Inst) {
3258       if (HasPrevInstruction) {
3259         if (Inst->getParent())
3260           Inst->removeFromParent();
3261         Inst->insertAfter(&*Point.PrevInst);
3262       } else {
3263         BasicBlock::iterator Position = Point.BB->getFirstInsertionPt();
3264         if (Inst->getParent())
3265           Inst->moveBefore(*Point.BB, Position);
3266         else
3267           Inst->insertBefore(*Point.BB, Position);
3268       }
3269 
3270       Inst->getParent()->reinsertInstInDbgRecords(Inst, BeforeDbgRecord);
3271     }
3272   };
3273 
3274   /// Move an instruction before another.
3275   class InstructionMoveBefore : public TypePromotionAction {
3276     /// Original position of the instruction.
3277     InsertionHandler Position;
3278 
3279   public:
3280     /// Move \p Inst before \p Before.
3281     InstructionMoveBefore(Instruction *Inst, Instruction *Before)
3282         : TypePromotionAction(Inst), Position(Inst) {
3283       LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
3284                         << "\n");
3285       Inst->moveBefore(Before);
3286     }
3287 
3288     /// Move the instruction back to its original position.
3289     void undo() override {
3290       LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
3291       Position.insert(Inst);
3292     }
3293   };
3294 
3295   /// Set the operand of an instruction with a new value.
3296   class OperandSetter : public TypePromotionAction {
3297     /// Original operand of the instruction.
3298     Value *Origin;
3299 
3300     /// Index of the modified instruction.
3301     unsigned Idx;
3302 
3303   public:
3304     /// Set \p Idx operand of \p Inst with \p NewVal.
3305     OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
3306         : TypePromotionAction(Inst), Idx(Idx) {
3307       LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
3308                         << "for:" << *Inst << "\n"
3309                         << "with:" << *NewVal << "\n");
3310       Origin = Inst->getOperand(Idx);
3311       Inst->setOperand(Idx, NewVal);
3312     }
3313 
3314     /// Restore the original value of the instruction.
3315     void undo() override {
3316       LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
3317                         << "for: " << *Inst << "\n"
3318                         << "with: " << *Origin << "\n");
3319       Inst->setOperand(Idx, Origin);
3320     }
3321   };
3322 
3323   /// Hide the operands of an instruction.
3324   /// Do as if this instruction was not using any of its operands.
3325   class OperandsHider : public TypePromotionAction {
3326     /// The list of original operands.
3327     SmallVector<Value *, 4> OriginalValues;
3328 
3329   public:
3330     /// Remove \p Inst from the uses of the operands of \p Inst.
3331     OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
3332       LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
3333       unsigned NumOpnds = Inst->getNumOperands();
3334       OriginalValues.reserve(NumOpnds);
3335       for (unsigned It = 0; It < NumOpnds; ++It) {
3336         // Save the current operand.
3337         Value *Val = Inst->getOperand(It);
3338         OriginalValues.push_back(Val);
3339         // Set a dummy one.
3340         // We could use OperandSetter here, but that would imply an overhead
3341         // that we are not willing to pay.
3342         Inst->setOperand(It, PoisonValue::get(Val->getType()));
3343       }
3344     }
3345 
3346     /// Restore the original list of uses.
3347     void undo() override {
3348       LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3349       for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3350         Inst->setOperand(It, OriginalValues[It]);
3351     }
3352   };
3353 
3354   /// Build a truncate instruction.
3355   class TruncBuilder : public TypePromotionAction {
3356     Value *Val;
3357 
3358   public:
3359     /// Build a truncate instruction of \p Opnd producing a \p Ty
3360     /// result.
3361     /// trunc Opnd to Ty.
3362     TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3363       IRBuilder<> Builder(Opnd);
3364       Builder.SetCurrentDebugLocation(DebugLoc());
3365       Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3366       LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3367     }
3368 
3369     /// Get the built value.
3370     Value *getBuiltValue() { return Val; }
3371 
3372     /// Remove the built instruction.
3373     void undo() override {
3374       LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3375       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3376         IVal->eraseFromParent();
3377     }
3378   };
3379 
3380   /// Build a sign extension instruction.
3381   class SExtBuilder : public TypePromotionAction {
3382     Value *Val;
3383 
3384   public:
3385     /// Build a sign extension instruction of \p Opnd producing a \p Ty
3386     /// result.
3387     /// sext Opnd to Ty.
3388     SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3389         : TypePromotionAction(InsertPt) {
3390       IRBuilder<> Builder(InsertPt);
3391       Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3392       LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3393     }
3394 
3395     /// Get the built value.
3396     Value *getBuiltValue() { return Val; }
3397 
3398     /// Remove the built instruction.
3399     void undo() override {
3400       LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3401       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3402         IVal->eraseFromParent();
3403     }
3404   };
3405 
3406   /// Build a zero extension instruction.
3407   class ZExtBuilder : public TypePromotionAction {
3408     Value *Val;
3409 
3410   public:
3411     /// Build a zero extension instruction of \p Opnd producing a \p Ty
3412     /// result.
3413     /// zext Opnd to Ty.
3414     ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3415         : TypePromotionAction(InsertPt) {
3416       IRBuilder<> Builder(InsertPt);
3417       Builder.SetCurrentDebugLocation(DebugLoc());
3418       Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3419       LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3420     }
3421 
3422     /// Get the built value.
3423     Value *getBuiltValue() { return Val; }
3424 
3425     /// Remove the built instruction.
3426     void undo() override {
3427       LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3428       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3429         IVal->eraseFromParent();
3430     }
3431   };
3432 
3433   /// Mutate an instruction to another type.
3434   class TypeMutator : public TypePromotionAction {
3435     /// Record the original type.
3436     Type *OrigTy;
3437 
3438   public:
3439     /// Mutate the type of \p Inst into \p NewTy.
3440     TypeMutator(Instruction *Inst, Type *NewTy)
3441         : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3442       LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3443                         << "\n");
3444       Inst->mutateType(NewTy);
3445     }
3446 
3447     /// Mutate the instruction back to its original type.
3448     void undo() override {
3449       LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3450                         << "\n");
3451       Inst->mutateType(OrigTy);
3452     }
3453   };
3454 
3455   /// Replace the uses of an instruction by another instruction.
3456   class UsesReplacer : public TypePromotionAction {
3457     /// Helper structure to keep track of the replaced uses.
3458     struct InstructionAndIdx {
3459       /// The instruction using the instruction.
3460       Instruction *Inst;
3461 
3462       /// The index where this instruction is used for Inst.
3463       unsigned Idx;
3464 
3465       InstructionAndIdx(Instruction *Inst, unsigned Idx)
3466           : Inst(Inst), Idx(Idx) {}
3467     };
3468 
3469     /// Keep track of the original uses (pair Instruction, Index).
3470     SmallVector<InstructionAndIdx, 4> OriginalUses;
3471     /// Keep track of the debug users.
3472     SmallVector<DbgValueInst *, 1> DbgValues;
3473     /// And non-instruction debug-users too.
3474     SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
3475 
3476     /// Keep track of the new value so that we can undo it by replacing
3477     /// instances of the new value with the original value.
3478     Value *New;
3479 
3480     using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3481 
3482   public:
3483     /// Replace all the use of \p Inst by \p New.
3484     UsesReplacer(Instruction *Inst, Value *New)
3485         : TypePromotionAction(Inst), New(New) {
3486       LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3487                         << "\n");
3488       // Record the original uses.
3489       for (Use &U : Inst->uses()) {
3490         Instruction *UserI = cast<Instruction>(U.getUser());
3491         OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3492       }
3493       // Record the debug uses separately. They are not in the instruction's
3494       // use list, but they are replaced by RAUW.
3495       findDbgValues(DbgValues, Inst, &DbgVariableRecords);
3496 
3497       // Now, we can replace the uses.
3498       Inst->replaceAllUsesWith(New);
3499     }
3500 
3501     /// Reassign the original uses of Inst to Inst.
3502     void undo() override {
3503       LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3504       for (InstructionAndIdx &Use : OriginalUses)
3505         Use.Inst->setOperand(Use.Idx, Inst);
3506       // RAUW has replaced all original uses with references to the new value,
3507       // including the debug uses. Since we are undoing the replacements,
3508       // the original debug uses must also be reinstated to maintain the
3509       // correctness and utility of debug value instructions.
3510       for (auto *DVI : DbgValues)
3511         DVI->replaceVariableLocationOp(New, Inst);
3512       // Similar story with DbgVariableRecords, the non-instruction
3513       // representation of dbg.values.
3514       for (DbgVariableRecord *DVR : DbgVariableRecords)
3515         DVR->replaceVariableLocationOp(New, Inst);
3516     }
3517   };
3518 
3519   /// Remove an instruction from the IR.
3520   class InstructionRemover : public TypePromotionAction {
3521     /// Original position of the instruction.
3522     InsertionHandler Inserter;
3523 
3524     /// Helper structure to hide all the link to the instruction. In other
3525     /// words, this helps to do as if the instruction was removed.
3526     OperandsHider Hider;
3527 
3528     /// Keep track of the uses replaced, if any.
3529     UsesReplacer *Replacer = nullptr;
3530 
3531     /// Keep track of instructions removed.
3532     SetOfInstrs &RemovedInsts;
3533 
3534   public:
3535     /// Remove all reference of \p Inst and optionally replace all its
3536     /// uses with New.
3537     /// \p RemovedInsts Keep track of the instructions removed by this Action.
3538     /// \pre If !Inst->use_empty(), then New != nullptr
3539     InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3540                        Value *New = nullptr)
3541         : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3542           RemovedInsts(RemovedInsts) {
3543       if (New)
3544         Replacer = new UsesReplacer(Inst, New);
3545       LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3546       RemovedInsts.insert(Inst);
3547       /// The instructions removed here will be freed after completing
3548       /// optimizeBlock() for all blocks as we need to keep track of the
3549       /// removed instructions during promotion.
3550       Inst->removeFromParent();
3551     }
3552 
3553     ~InstructionRemover() override { delete Replacer; }
3554 
3555     InstructionRemover &operator=(const InstructionRemover &other) = delete;
3556     InstructionRemover(const InstructionRemover &other) = delete;
3557 
3558     /// Resurrect the instruction and reassign it to the proper uses if
3559     /// new value was provided when build this action.
3560     void undo() override {
3561       LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3562       Inserter.insert(Inst);
3563       if (Replacer)
3564         Replacer->undo();
3565       Hider.undo();
3566       RemovedInsts.erase(Inst);
3567     }
3568   };
3569 
3570 public:
3571   /// Restoration point.
3572   /// The restoration point is a pointer to an action instead of an iterator
3573   /// because the iterator may be invalidated but not the pointer.
3574   using ConstRestorationPt = const TypePromotionAction *;
3575 
3576   TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3577       : RemovedInsts(RemovedInsts) {}
3578 
3579   /// Advocate every changes made in that transaction. Return true if any change
3580   /// happen.
3581   bool commit();
3582 
3583   /// Undo all the changes made after the given point.
3584   void rollback(ConstRestorationPt Point);
3585 
3586   /// Get the current restoration point.
3587   ConstRestorationPt getRestorationPoint() const;
3588 
3589   /// \name API for IR modification with state keeping to support rollback.
3590   /// @{
3591   /// Same as Instruction::setOperand.
3592   void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3593 
3594   /// Same as Instruction::eraseFromParent.
3595   void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3596 
3597   /// Same as Value::replaceAllUsesWith.
3598   void replaceAllUsesWith(Instruction *Inst, Value *New);
3599 
3600   /// Same as Value::mutateType.
3601   void mutateType(Instruction *Inst, Type *NewTy);
3602 
3603   /// Same as IRBuilder::createTrunc.
3604   Value *createTrunc(Instruction *Opnd, Type *Ty);
3605 
3606   /// Same as IRBuilder::createSExt.
3607   Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3608 
3609   /// Same as IRBuilder::createZExt.
3610   Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3611 
3612 private:
3613   /// The ordered list of actions made so far.
3614   SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3615 
3616   using CommitPt =
3617       SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3618 
3619   SetOfInstrs &RemovedInsts;
3620 };
3621 
3622 } // end anonymous namespace
3623 
3624 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3625                                           Value *NewVal) {
3626   Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3627       Inst, Idx, NewVal));
3628 }
3629 
3630 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3631                                                 Value *NewVal) {
3632   Actions.push_back(
3633       std::make_unique<TypePromotionTransaction::InstructionRemover>(
3634           Inst, RemovedInsts, NewVal));
3635 }
3636 
3637 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3638                                                   Value *New) {
3639   Actions.push_back(
3640       std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3641 }
3642 
3643 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3644   Actions.push_back(
3645       std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3646 }
3647 
3648 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3649   std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3650   Value *Val = Ptr->getBuiltValue();
3651   Actions.push_back(std::move(Ptr));
3652   return Val;
3653 }
3654 
3655 Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3656                                             Type *Ty) {
3657   std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3658   Value *Val = Ptr->getBuiltValue();
3659   Actions.push_back(std::move(Ptr));
3660   return Val;
3661 }
3662 
3663 Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3664                                             Type *Ty) {
3665   std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3666   Value *Val = Ptr->getBuiltValue();
3667   Actions.push_back(std::move(Ptr));
3668   return Val;
3669 }
3670 
3671 TypePromotionTransaction::ConstRestorationPt
3672 TypePromotionTransaction::getRestorationPoint() const {
3673   return !Actions.empty() ? Actions.back().get() : nullptr;
3674 }
3675 
3676 bool TypePromotionTransaction::commit() {
3677   for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3678     Action->commit();
3679   bool Modified = !Actions.empty();
3680   Actions.clear();
3681   return Modified;
3682 }
3683 
3684 void TypePromotionTransaction::rollback(
3685     TypePromotionTransaction::ConstRestorationPt Point) {
3686   while (!Actions.empty() && Point != Actions.back().get()) {
3687     std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3688     Curr->undo();
3689   }
3690 }
3691 
3692 namespace {
3693 
3694 /// A helper class for matching addressing modes.
3695 ///
3696 /// This encapsulates the logic for matching the target-legal addressing modes.
3697 class AddressingModeMatcher {
3698   SmallVectorImpl<Instruction *> &AddrModeInsts;
3699   const TargetLowering &TLI;
3700   const TargetRegisterInfo &TRI;
3701   const DataLayout &DL;
3702   const LoopInfo &LI;
3703   const std::function<const DominatorTree &()> getDTFn;
3704 
3705   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3706   /// the memory instruction that we're computing this address for.
3707   Type *AccessTy;
3708   unsigned AddrSpace;
3709   Instruction *MemoryInst;
3710 
3711   /// This is the addressing mode that we're building up. This is
3712   /// part of the return value of this addressing mode matching stuff.
3713   ExtAddrMode &AddrMode;
3714 
3715   /// The instructions inserted by other CodeGenPrepare optimizations.
3716   const SetOfInstrs &InsertedInsts;
3717 
3718   /// A map from the instructions to their type before promotion.
3719   InstrToOrigTy &PromotedInsts;
3720 
3721   /// The ongoing transaction where every action should be registered.
3722   TypePromotionTransaction &TPT;
3723 
3724   // A GEP which has too large offset to be folded into the addressing mode.
3725   std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3726 
3727   /// This is set to true when we should not do profitability checks.
3728   /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3729   bool IgnoreProfitability;
3730 
3731   /// True if we are optimizing for size.
3732   bool OptSize = false;
3733 
3734   ProfileSummaryInfo *PSI;
3735   BlockFrequencyInfo *BFI;
3736 
3737   AddressingModeMatcher(
3738       SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3739       const TargetRegisterInfo &TRI, const LoopInfo &LI,
3740       const std::function<const DominatorTree &()> getDTFn, Type *AT,
3741       unsigned AS, Instruction *MI, ExtAddrMode &AM,
3742       const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3743       TypePromotionTransaction &TPT,
3744       std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3745       bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3746       : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3747         DL(MI->getDataLayout()), LI(LI), getDTFn(getDTFn),
3748         AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3749         InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3750         LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3751     IgnoreProfitability = false;
3752   }
3753 
3754 public:
3755   /// Find the maximal addressing mode that a load/store of V can fold,
3756   /// give an access type of AccessTy.  This returns a list of involved
3757   /// instructions in AddrModeInsts.
3758   /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3759   /// optimizations.
3760   /// \p PromotedInsts maps the instructions to their type before promotion.
3761   /// \p The ongoing transaction where every action should be registered.
3762   static ExtAddrMode
3763   Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3764         SmallVectorImpl<Instruction *> &AddrModeInsts,
3765         const TargetLowering &TLI, const LoopInfo &LI,
3766         const std::function<const DominatorTree &()> getDTFn,
3767         const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3768         InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3769         std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3770         bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3771     ExtAddrMode Result;
3772 
3773     bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3774                                          AccessTy, AS, MemoryInst, Result,
3775                                          InsertedInsts, PromotedInsts, TPT,
3776                                          LargeOffsetGEP, OptSize, PSI, BFI)
3777                        .matchAddr(V, 0);
3778     (void)Success;
3779     assert(Success && "Couldn't select *anything*?");
3780     return Result;
3781   }
3782 
3783 private:
3784   bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3785   bool matchAddr(Value *Addr, unsigned Depth);
3786   bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3787                           bool *MovedAway = nullptr);
3788   bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3789                                             ExtAddrMode &AMBefore,
3790                                             ExtAddrMode &AMAfter);
3791   bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3792   bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3793                              Value *PromotedOperand) const;
3794 };
3795 
3796 class PhiNodeSet;
3797 
3798 /// An iterator for PhiNodeSet.
3799 class PhiNodeSetIterator {
3800   PhiNodeSet *const Set;
3801   size_t CurrentIndex = 0;
3802 
3803 public:
3804   /// The constructor. Start should point to either a valid element, or be equal
3805   /// to the size of the underlying SmallVector of the PhiNodeSet.
3806   PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3807   PHINode *operator*() const;
3808   PhiNodeSetIterator &operator++();
3809   bool operator==(const PhiNodeSetIterator &RHS) const;
3810   bool operator!=(const PhiNodeSetIterator &RHS) const;
3811 };
3812 
3813 /// Keeps a set of PHINodes.
3814 ///
3815 /// This is a minimal set implementation for a specific use case:
3816 /// It is very fast when there are very few elements, but also provides good
3817 /// performance when there are many. It is similar to SmallPtrSet, but also
3818 /// provides iteration by insertion order, which is deterministic and stable
3819 /// across runs. It is also similar to SmallSetVector, but provides removing
3820 /// elements in O(1) time. This is achieved by not actually removing the element
3821 /// from the underlying vector, so comes at the cost of using more memory, but
3822 /// that is fine, since PhiNodeSets are used as short lived objects.
3823 class PhiNodeSet {
3824   friend class PhiNodeSetIterator;
3825 
3826   using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3827   using iterator = PhiNodeSetIterator;
3828 
3829   /// Keeps the elements in the order of their insertion in the underlying
3830   /// vector. To achieve constant time removal, it never deletes any element.
3831   SmallVector<PHINode *, 32> NodeList;
3832 
3833   /// Keeps the elements in the underlying set implementation. This (and not the
3834   /// NodeList defined above) is the source of truth on whether an element
3835   /// is actually in the collection.
3836   MapType NodeMap;
3837 
3838   /// Points to the first valid (not deleted) element when the set is not empty
3839   /// and the value is not zero. Equals to the size of the underlying vector
3840   /// when the set is empty. When the value is 0, as in the beginning, the
3841   /// first element may or may not be valid.
3842   size_t FirstValidElement = 0;
3843 
3844 public:
3845   /// Inserts a new element to the collection.
3846   /// \returns true if the element is actually added, i.e. was not in the
3847   /// collection before the operation.
3848   bool insert(PHINode *Ptr) {
3849     if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3850       NodeList.push_back(Ptr);
3851       return true;
3852     }
3853     return false;
3854   }
3855 
3856   /// Removes the element from the collection.
3857   /// \returns whether the element is actually removed, i.e. was in the
3858   /// collection before the operation.
3859   bool erase(PHINode *Ptr) {
3860     if (NodeMap.erase(Ptr)) {
3861       SkipRemovedElements(FirstValidElement);
3862       return true;
3863     }
3864     return false;
3865   }
3866 
3867   /// Removes all elements and clears the collection.
3868   void clear() {
3869     NodeMap.clear();
3870     NodeList.clear();
3871     FirstValidElement = 0;
3872   }
3873 
3874   /// \returns an iterator that will iterate the elements in the order of
3875   /// insertion.
3876   iterator begin() {
3877     if (FirstValidElement == 0)
3878       SkipRemovedElements(FirstValidElement);
3879     return PhiNodeSetIterator(this, FirstValidElement);
3880   }
3881 
3882   /// \returns an iterator that points to the end of the collection.
3883   iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3884 
3885   /// Returns the number of elements in the collection.
3886   size_t size() const { return NodeMap.size(); }
3887 
3888   /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3889   size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3890 
3891 private:
3892   /// Updates the CurrentIndex so that it will point to a valid element.
3893   ///
3894   /// If the element of NodeList at CurrentIndex is valid, it does not
3895   /// change it. If there are no more valid elements, it updates CurrentIndex
3896   /// to point to the end of the NodeList.
3897   void SkipRemovedElements(size_t &CurrentIndex) {
3898     while (CurrentIndex < NodeList.size()) {
3899       auto it = NodeMap.find(NodeList[CurrentIndex]);
3900       // If the element has been deleted and added again later, NodeMap will
3901       // point to a different index, so CurrentIndex will still be invalid.
3902       if (it != NodeMap.end() && it->second == CurrentIndex)
3903         break;
3904       ++CurrentIndex;
3905     }
3906   }
3907 };
3908 
3909 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3910     : Set(Set), CurrentIndex(Start) {}
3911 
3912 PHINode *PhiNodeSetIterator::operator*() const {
3913   assert(CurrentIndex < Set->NodeList.size() &&
3914          "PhiNodeSet access out of range");
3915   return Set->NodeList[CurrentIndex];
3916 }
3917 
3918 PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3919   assert(CurrentIndex < Set->NodeList.size() &&
3920          "PhiNodeSet access out of range");
3921   ++CurrentIndex;
3922   Set->SkipRemovedElements(CurrentIndex);
3923   return *this;
3924 }
3925 
3926 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3927   return CurrentIndex == RHS.CurrentIndex;
3928 }
3929 
3930 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3931   return !((*this) == RHS);
3932 }
3933 
3934 /// Keep track of simplification of Phi nodes.
3935 /// Accept the set of all phi nodes and erase phi node from this set
3936 /// if it is simplified.
3937 class SimplificationTracker {
3938   DenseMap<Value *, Value *> Storage;
3939   const SimplifyQuery &SQ;
3940   // Tracks newly created Phi nodes. The elements are iterated by insertion
3941   // order.
3942   PhiNodeSet AllPhiNodes;
3943   // Tracks newly created Select nodes.
3944   SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3945 
3946 public:
3947   SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3948 
3949   Value *Get(Value *V) {
3950     do {
3951       auto SV = Storage.find(V);
3952       if (SV == Storage.end())
3953         return V;
3954       V = SV->second;
3955     } while (true);
3956   }
3957 
3958   Value *Simplify(Value *Val) {
3959     SmallVector<Value *, 32> WorkList;
3960     SmallPtrSet<Value *, 32> Visited;
3961     WorkList.push_back(Val);
3962     while (!WorkList.empty()) {
3963       auto *P = WorkList.pop_back_val();
3964       if (!Visited.insert(P).second)
3965         continue;
3966       if (auto *PI = dyn_cast<Instruction>(P))
3967         if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3968           for (auto *U : PI->users())
3969             WorkList.push_back(cast<Value>(U));
3970           Put(PI, V);
3971           PI->replaceAllUsesWith(V);
3972           if (auto *PHI = dyn_cast<PHINode>(PI))
3973             AllPhiNodes.erase(PHI);
3974           if (auto *Select = dyn_cast<SelectInst>(PI))
3975             AllSelectNodes.erase(Select);
3976           PI->eraseFromParent();
3977         }
3978     }
3979     return Get(Val);
3980   }
3981 
3982   void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3983 
3984   void ReplacePhi(PHINode *From, PHINode *To) {
3985     Value *OldReplacement = Get(From);
3986     while (OldReplacement != From) {
3987       From = To;
3988       To = dyn_cast<PHINode>(OldReplacement);
3989       OldReplacement = Get(From);
3990     }
3991     assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3992     Put(From, To);
3993     From->replaceAllUsesWith(To);
3994     AllPhiNodes.erase(From);
3995     From->eraseFromParent();
3996   }
3997 
3998   PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
3999 
4000   void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
4001 
4002   void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
4003 
4004   unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
4005 
4006   unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
4007 
4008   void destroyNewNodes(Type *CommonType) {
4009     // For safe erasing, replace the uses with dummy value first.
4010     auto *Dummy = PoisonValue::get(CommonType);
4011     for (auto *I : AllPhiNodes) {
4012       I->replaceAllUsesWith(Dummy);
4013       I->eraseFromParent();
4014     }
4015     AllPhiNodes.clear();
4016     for (auto *I : AllSelectNodes) {
4017       I->replaceAllUsesWith(Dummy);
4018       I->eraseFromParent();
4019     }
4020     AllSelectNodes.clear();
4021   }
4022 };
4023 
4024 /// A helper class for combining addressing modes.
4025 class AddressingModeCombiner {
4026   typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
4027   typedef std::pair<PHINode *, PHINode *> PHIPair;
4028 
4029 private:
4030   /// The addressing modes we've collected.
4031   SmallVector<ExtAddrMode, 16> AddrModes;
4032 
4033   /// The field in which the AddrModes differ, when we have more than one.
4034   ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
4035 
4036   /// Are the AddrModes that we have all just equal to their original values?
4037   bool AllAddrModesTrivial = true;
4038 
4039   /// Common Type for all different fields in addressing modes.
4040   Type *CommonType = nullptr;
4041 
4042   /// SimplifyQuery for simplifyInstruction utility.
4043   const SimplifyQuery &SQ;
4044 
4045   /// Original Address.
4046   Value *Original;
4047 
4048   /// Common value among addresses
4049   Value *CommonValue = nullptr;
4050 
4051 public:
4052   AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
4053       : SQ(_SQ), Original(OriginalValue) {}
4054 
4055   ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
4056 
4057   /// Get the combined AddrMode
4058   const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
4059 
4060   /// Add a new AddrMode if it's compatible with the AddrModes we already
4061   /// have.
4062   /// \return True iff we succeeded in doing so.
4063   bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
4064     // Take note of if we have any non-trivial AddrModes, as we need to detect
4065     // when all AddrModes are trivial as then we would introduce a phi or select
4066     // which just duplicates what's already there.
4067     AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
4068 
4069     // If this is the first addrmode then everything is fine.
4070     if (AddrModes.empty()) {
4071       AddrModes.emplace_back(NewAddrMode);
4072       return true;
4073     }
4074 
4075     // Figure out how different this is from the other address modes, which we
4076     // can do just by comparing against the first one given that we only care
4077     // about the cumulative difference.
4078     ExtAddrMode::FieldName ThisDifferentField =
4079         AddrModes[0].compare(NewAddrMode);
4080     if (DifferentField == ExtAddrMode::NoField)
4081       DifferentField = ThisDifferentField;
4082     else if (DifferentField != ThisDifferentField)
4083       DifferentField = ExtAddrMode::MultipleFields;
4084 
4085     // If NewAddrMode differs in more than one dimension we cannot handle it.
4086     bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
4087 
4088     // If Scale Field is different then we reject.
4089     CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
4090 
4091     // We also must reject the case when base offset is different and
4092     // scale reg is not null, we cannot handle this case due to merge of
4093     // different offsets will be used as ScaleReg.
4094     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
4095                               !NewAddrMode.ScaledReg);
4096 
4097     // We also must reject the case when GV is different and BaseReg installed
4098     // due to we want to use base reg as a merge of GV values.
4099     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
4100                               !NewAddrMode.HasBaseReg);
4101 
4102     // Even if NewAddMode is the same we still need to collect it due to
4103     // original value is different. And later we will need all original values
4104     // as anchors during finding the common Phi node.
4105     if (CanHandle)
4106       AddrModes.emplace_back(NewAddrMode);
4107     else
4108       AddrModes.clear();
4109 
4110     return CanHandle;
4111   }
4112 
4113   /// Combine the addressing modes we've collected into a single
4114   /// addressing mode.
4115   /// \return True iff we successfully combined them or we only had one so
4116   /// didn't need to combine them anyway.
4117   bool combineAddrModes() {
4118     // If we have no AddrModes then they can't be combined.
4119     if (AddrModes.size() == 0)
4120       return false;
4121 
4122     // A single AddrMode can trivially be combined.
4123     if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
4124       return true;
4125 
4126     // If the AddrModes we collected are all just equal to the value they are
4127     // derived from then combining them wouldn't do anything useful.
4128     if (AllAddrModesTrivial)
4129       return false;
4130 
4131     if (!addrModeCombiningAllowed())
4132       return false;
4133 
4134     // Build a map between <original value, basic block where we saw it> to
4135     // value of base register.
4136     // Bail out if there is no common type.
4137     FoldAddrToValueMapping Map;
4138     if (!initializeMap(Map))
4139       return false;
4140 
4141     CommonValue = findCommon(Map);
4142     if (CommonValue)
4143       AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
4144     return CommonValue != nullptr;
4145   }
4146 
4147 private:
4148   /// `CommonValue` may be a placeholder inserted by us.
4149   /// If the placeholder is not used, we should remove this dead instruction.
4150   void eraseCommonValueIfDead() {
4151     if (CommonValue && CommonValue->getNumUses() == 0)
4152       if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue))
4153         CommonInst->eraseFromParent();
4154   }
4155 
4156   /// Initialize Map with anchor values. For address seen
4157   /// we set the value of different field saw in this address.
4158   /// At the same time we find a common type for different field we will
4159   /// use to create new Phi/Select nodes. Keep it in CommonType field.
4160   /// Return false if there is no common type found.
4161   bool initializeMap(FoldAddrToValueMapping &Map) {
4162     // Keep track of keys where the value is null. We will need to replace it
4163     // with constant null when we know the common type.
4164     SmallVector<Value *, 2> NullValue;
4165     Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
4166     for (auto &AM : AddrModes) {
4167       Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
4168       if (DV) {
4169         auto *Type = DV->getType();
4170         if (CommonType && CommonType != Type)
4171           return false;
4172         CommonType = Type;
4173         Map[AM.OriginalValue] = DV;
4174       } else {
4175         NullValue.push_back(AM.OriginalValue);
4176       }
4177     }
4178     assert(CommonType && "At least one non-null value must be!");
4179     for (auto *V : NullValue)
4180       Map[V] = Constant::getNullValue(CommonType);
4181     return true;
4182   }
4183 
4184   /// We have mapping between value A and other value B where B was a field in
4185   /// addressing mode represented by A. Also we have an original value C
4186   /// representing an address we start with. Traversing from C through phi and
4187   /// selects we ended up with A's in a map. This utility function tries to find
4188   /// a value V which is a field in addressing mode C and traversing through phi
4189   /// nodes and selects we will end up in corresponded values B in a map.
4190   /// The utility will create a new Phi/Selects if needed.
4191   // The simple example looks as follows:
4192   // BB1:
4193   //   p1 = b1 + 40
4194   //   br cond BB2, BB3
4195   // BB2:
4196   //   p2 = b2 + 40
4197   //   br BB3
4198   // BB3:
4199   //   p = phi [p1, BB1], [p2, BB2]
4200   //   v = load p
4201   // Map is
4202   //   p1 -> b1
4203   //   p2 -> b2
4204   // Request is
4205   //   p -> ?
4206   // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
4207   Value *findCommon(FoldAddrToValueMapping &Map) {
4208     // Tracks the simplification of newly created phi nodes. The reason we use
4209     // this mapping is because we will add new created Phi nodes in AddrToBase.
4210     // Simplification of Phi nodes is recursive, so some Phi node may
4211     // be simplified after we added it to AddrToBase. In reality this
4212     // simplification is possible only if original phi/selects were not
4213     // simplified yet.
4214     // Using this mapping we can find the current value in AddrToBase.
4215     SimplificationTracker ST(SQ);
4216 
4217     // First step, DFS to create PHI nodes for all intermediate blocks.
4218     // Also fill traverse order for the second step.
4219     SmallVector<Value *, 32> TraverseOrder;
4220     InsertPlaceholders(Map, TraverseOrder, ST);
4221 
4222     // Second Step, fill new nodes by merged values and simplify if possible.
4223     FillPlaceholders(Map, TraverseOrder, ST);
4224 
4225     if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
4226       ST.destroyNewNodes(CommonType);
4227       return nullptr;
4228     }
4229 
4230     // Now we'd like to match New Phi nodes to existed ones.
4231     unsigned PhiNotMatchedCount = 0;
4232     if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
4233       ST.destroyNewNodes(CommonType);
4234       return nullptr;
4235     }
4236 
4237     auto *Result = ST.Get(Map.find(Original)->second);
4238     if (Result) {
4239       NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
4240       NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
4241     }
4242     return Result;
4243   }
4244 
4245   /// Try to match PHI node to Candidate.
4246   /// Matcher tracks the matched Phi nodes.
4247   bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
4248                     SmallSetVector<PHIPair, 8> &Matcher,
4249                     PhiNodeSet &PhiNodesToMatch) {
4250     SmallVector<PHIPair, 8> WorkList;
4251     Matcher.insert({PHI, Candidate});
4252     SmallSet<PHINode *, 8> MatchedPHIs;
4253     MatchedPHIs.insert(PHI);
4254     WorkList.push_back({PHI, Candidate});
4255     SmallSet<PHIPair, 8> Visited;
4256     while (!WorkList.empty()) {
4257       auto Item = WorkList.pop_back_val();
4258       if (!Visited.insert(Item).second)
4259         continue;
4260       // We iterate over all incoming values to Phi to compare them.
4261       // If values are different and both of them Phi and the first one is a
4262       // Phi we added (subject to match) and both of them is in the same basic
4263       // block then we can match our pair if values match. So we state that
4264       // these values match and add it to work list to verify that.
4265       for (auto *B : Item.first->blocks()) {
4266         Value *FirstValue = Item.first->getIncomingValueForBlock(B);
4267         Value *SecondValue = Item.second->getIncomingValueForBlock(B);
4268         if (FirstValue == SecondValue)
4269           continue;
4270 
4271         PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
4272         PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
4273 
4274         // One of them is not Phi or
4275         // The first one is not Phi node from the set we'd like to match or
4276         // Phi nodes from different basic blocks then
4277         // we will not be able to match.
4278         if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
4279             FirstPhi->getParent() != SecondPhi->getParent())
4280           return false;
4281 
4282         // If we already matched them then continue.
4283         if (Matcher.count({FirstPhi, SecondPhi}))
4284           continue;
4285         // So the values are different and does not match. So we need them to
4286         // match. (But we register no more than one match per PHI node, so that
4287         // we won't later try to replace them twice.)
4288         if (MatchedPHIs.insert(FirstPhi).second)
4289           Matcher.insert({FirstPhi, SecondPhi});
4290         // But me must check it.
4291         WorkList.push_back({FirstPhi, SecondPhi});
4292       }
4293     }
4294     return true;
4295   }
4296 
4297   /// For the given set of PHI nodes (in the SimplificationTracker) try
4298   /// to find their equivalents.
4299   /// Returns false if this matching fails and creation of new Phi is disabled.
4300   bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
4301                    unsigned &PhiNotMatchedCount) {
4302     // Matched and PhiNodesToMatch iterate their elements in a deterministic
4303     // order, so the replacements (ReplacePhi) are also done in a deterministic
4304     // order.
4305     SmallSetVector<PHIPair, 8> Matched;
4306     SmallPtrSet<PHINode *, 8> WillNotMatch;
4307     PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
4308     while (PhiNodesToMatch.size()) {
4309       PHINode *PHI = *PhiNodesToMatch.begin();
4310 
4311       // Add us, if no Phi nodes in the basic block we do not match.
4312       WillNotMatch.clear();
4313       WillNotMatch.insert(PHI);
4314 
4315       // Traverse all Phis until we found equivalent or fail to do that.
4316       bool IsMatched = false;
4317       for (auto &P : PHI->getParent()->phis()) {
4318         // Skip new Phi nodes.
4319         if (PhiNodesToMatch.count(&P))
4320           continue;
4321         if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
4322           break;
4323         // If it does not match, collect all Phi nodes from matcher.
4324         // if we end up with no match, them all these Phi nodes will not match
4325         // later.
4326         for (auto M : Matched)
4327           WillNotMatch.insert(M.first);
4328         Matched.clear();
4329       }
4330       if (IsMatched) {
4331         // Replace all matched values and erase them.
4332         for (auto MV : Matched)
4333           ST.ReplacePhi(MV.first, MV.second);
4334         Matched.clear();
4335         continue;
4336       }
4337       // If we are not allowed to create new nodes then bail out.
4338       if (!AllowNewPhiNodes)
4339         return false;
4340       // Just remove all seen values in matcher. They will not match anything.
4341       PhiNotMatchedCount += WillNotMatch.size();
4342       for (auto *P : WillNotMatch)
4343         PhiNodesToMatch.erase(P);
4344     }
4345     return true;
4346   }
4347   /// Fill the placeholders with values from predecessors and simplify them.
4348   void FillPlaceholders(FoldAddrToValueMapping &Map,
4349                         SmallVectorImpl<Value *> &TraverseOrder,
4350                         SimplificationTracker &ST) {
4351     while (!TraverseOrder.empty()) {
4352       Value *Current = TraverseOrder.pop_back_val();
4353       assert(Map.contains(Current) && "No node to fill!!!");
4354       Value *V = Map[Current];
4355 
4356       if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
4357         // CurrentValue also must be Select.
4358         auto *CurrentSelect = cast<SelectInst>(Current);
4359         auto *TrueValue = CurrentSelect->getTrueValue();
4360         assert(Map.contains(TrueValue) && "No True Value!");
4361         Select->setTrueValue(ST.Get(Map[TrueValue]));
4362         auto *FalseValue = CurrentSelect->getFalseValue();
4363         assert(Map.contains(FalseValue) && "No False Value!");
4364         Select->setFalseValue(ST.Get(Map[FalseValue]));
4365       } else {
4366         // Must be a Phi node then.
4367         auto *PHI = cast<PHINode>(V);
4368         // Fill the Phi node with values from predecessors.
4369         for (auto *B : predecessors(PHI->getParent())) {
4370           Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
4371           assert(Map.contains(PV) && "No predecessor Value!");
4372           PHI->addIncoming(ST.Get(Map[PV]), B);
4373         }
4374       }
4375       Map[Current] = ST.Simplify(V);
4376     }
4377   }
4378 
4379   /// Starting from original value recursively iterates over def-use chain up to
4380   /// known ending values represented in a map. For each traversed phi/select
4381   /// inserts a placeholder Phi or Select.
4382   /// Reports all new created Phi/Select nodes by adding them to set.
4383   /// Also reports and order in what values have been traversed.
4384   void InsertPlaceholders(FoldAddrToValueMapping &Map,
4385                           SmallVectorImpl<Value *> &TraverseOrder,
4386                           SimplificationTracker &ST) {
4387     SmallVector<Value *, 32> Worklist;
4388     assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
4389            "Address must be a Phi or Select node");
4390     auto *Dummy = PoisonValue::get(CommonType);
4391     Worklist.push_back(Original);
4392     while (!Worklist.empty()) {
4393       Value *Current = Worklist.pop_back_val();
4394       // if it is already visited or it is an ending value then skip it.
4395       if (Map.contains(Current))
4396         continue;
4397       TraverseOrder.push_back(Current);
4398 
4399       // CurrentValue must be a Phi node or select. All others must be covered
4400       // by anchors.
4401       if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
4402         // Is it OK to get metadata from OrigSelect?!
4403         // Create a Select placeholder with dummy value.
4404         SelectInst *Select =
4405             SelectInst::Create(CurrentSelect->getCondition(), Dummy, Dummy,
4406                                CurrentSelect->getName(),
4407                                CurrentSelect->getIterator(), CurrentSelect);
4408         Map[Current] = Select;
4409         ST.insertNewSelect(Select);
4410         // We are interested in True and False values.
4411         Worklist.push_back(CurrentSelect->getTrueValue());
4412         Worklist.push_back(CurrentSelect->getFalseValue());
4413       } else {
4414         // It must be a Phi node then.
4415         PHINode *CurrentPhi = cast<PHINode>(Current);
4416         unsigned PredCount = CurrentPhi->getNumIncomingValues();
4417         PHINode *PHI =
4418             PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi->getIterator());
4419         Map[Current] = PHI;
4420         ST.insertNewPhi(PHI);
4421         append_range(Worklist, CurrentPhi->incoming_values());
4422       }
4423     }
4424   }
4425 
4426   bool addrModeCombiningAllowed() {
4427     if (DisableComplexAddrModes)
4428       return false;
4429     switch (DifferentField) {
4430     default:
4431       return false;
4432     case ExtAddrMode::BaseRegField:
4433       return AddrSinkCombineBaseReg;
4434     case ExtAddrMode::BaseGVField:
4435       return AddrSinkCombineBaseGV;
4436     case ExtAddrMode::BaseOffsField:
4437       return AddrSinkCombineBaseOffs;
4438     case ExtAddrMode::ScaledRegField:
4439       return AddrSinkCombineScaledReg;
4440     }
4441   }
4442 };
4443 } // end anonymous namespace
4444 
4445 /// Try adding ScaleReg*Scale to the current addressing mode.
4446 /// Return true and update AddrMode if this addr mode is legal for the target,
4447 /// false if not.
4448 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
4449                                              unsigned Depth) {
4450   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4451   // mode.  Just process that directly.
4452   if (Scale == 1)
4453     return matchAddr(ScaleReg, Depth);
4454 
4455   // If the scale is 0, it takes nothing to add this.
4456   if (Scale == 0)
4457     return true;
4458 
4459   // If we already have a scale of this value, we can add to it, otherwise, we
4460   // need an available scale field.
4461   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
4462     return false;
4463 
4464   ExtAddrMode TestAddrMode = AddrMode;
4465 
4466   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
4467   // [A+B + A*7] -> [B+A*8].
4468   TestAddrMode.Scale += Scale;
4469   TestAddrMode.ScaledReg = ScaleReg;
4470 
4471   // If the new address isn't legal, bail out.
4472   if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
4473     return false;
4474 
4475   // It was legal, so commit it.
4476   AddrMode = TestAddrMode;
4477 
4478   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
4479   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
4480   // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4481   // go any further: we can reuse it and cannot eliminate it.
4482   ConstantInt *CI = nullptr;
4483   Value *AddLHS = nullptr;
4484   if (isa<Instruction>(ScaleReg) && // not a constant expr.
4485       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4486       !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4487     TestAddrMode.InBounds = false;
4488     TestAddrMode.ScaledReg = AddLHS;
4489     TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4490 
4491     // If this addressing mode is legal, commit it and remember that we folded
4492     // this instruction.
4493     if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4494       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4495       AddrMode = TestAddrMode;
4496       return true;
4497     }
4498     // Restore status quo.
4499     TestAddrMode = AddrMode;
4500   }
4501 
4502   // If this is an add recurrence with a constant step, return the increment
4503   // instruction and the canonicalized step.
4504   auto GetConstantStep =
4505       [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4506     auto *PN = dyn_cast<PHINode>(V);
4507     if (!PN)
4508       return std::nullopt;
4509     auto IVInc = getIVIncrement(PN, &LI);
4510     if (!IVInc)
4511       return std::nullopt;
4512     // TODO: The result of the intrinsics above is two-complement. However when
4513     // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4514     // If it has nuw or nsw flags, we need to make sure that these flags are
4515     // inferrable at the point of memory instruction. Otherwise we are replacing
4516     // well-defined two-complement computation with poison. Currently, to avoid
4517     // potentially complex analysis needed to prove this, we reject such cases.
4518     if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4519       if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4520         return std::nullopt;
4521     if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4522       return std::make_pair(IVInc->first, ConstantStep->getValue());
4523     return std::nullopt;
4524   };
4525 
4526   // Try to account for the following special case:
4527   // 1. ScaleReg is an inductive variable;
4528   // 2. We use it with non-zero offset;
4529   // 3. IV's increment is available at the point of memory instruction.
4530   //
4531   // In this case, we may reuse the IV increment instead of the IV Phi to
4532   // achieve the following advantages:
4533   // 1. If IV step matches the offset, we will have no need in the offset;
4534   // 2. Even if they don't match, we will reduce the overlap of living IV
4535   //    and IV increment, that will potentially lead to better register
4536   //    assignment.
4537   if (AddrMode.BaseOffs) {
4538     if (auto IVStep = GetConstantStep(ScaleReg)) {
4539       Instruction *IVInc = IVStep->first;
4540       // The following assert is important to ensure a lack of infinite loops.
4541       // This transforms is (intentionally) the inverse of the one just above.
4542       // If they don't agree on the definition of an increment, we'd alternate
4543       // back and forth indefinitely.
4544       assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4545       APInt Step = IVStep->second;
4546       APInt Offset = Step * AddrMode.Scale;
4547       if (Offset.isSignedIntN(64)) {
4548         TestAddrMode.InBounds = false;
4549         TestAddrMode.ScaledReg = IVInc;
4550         TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4551         // If this addressing mode is legal, commit it..
4552         // (Note that we defer the (expensive) domtree base legality check
4553         // to the very last possible point.)
4554         if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4555             getDTFn().dominates(IVInc, MemoryInst)) {
4556           AddrModeInsts.push_back(cast<Instruction>(IVInc));
4557           AddrMode = TestAddrMode;
4558           return true;
4559         }
4560         // Restore status quo.
4561         TestAddrMode = AddrMode;
4562       }
4563     }
4564   }
4565 
4566   // Otherwise, just return what we have.
4567   return true;
4568 }
4569 
4570 /// This is a little filter, which returns true if an addressing computation
4571 /// involving I might be folded into a load/store accessing it.
4572 /// This doesn't need to be perfect, but needs to accept at least
4573 /// the set of instructions that MatchOperationAddr can.
4574 static bool MightBeFoldableInst(Instruction *I) {
4575   switch (I->getOpcode()) {
4576   case Instruction::BitCast:
4577   case Instruction::AddrSpaceCast:
4578     // Don't touch identity bitcasts.
4579     if (I->getType() == I->getOperand(0)->getType())
4580       return false;
4581     return I->getType()->isIntOrPtrTy();
4582   case Instruction::PtrToInt:
4583     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4584     return true;
4585   case Instruction::IntToPtr:
4586     // We know the input is intptr_t, so this is foldable.
4587     return true;
4588   case Instruction::Add:
4589     return true;
4590   case Instruction::Mul:
4591   case Instruction::Shl:
4592     // Can only handle X*C and X << C.
4593     return isa<ConstantInt>(I->getOperand(1));
4594   case Instruction::GetElementPtr:
4595     return true;
4596   default:
4597     return false;
4598   }
4599 }
4600 
4601 /// Check whether or not \p Val is a legal instruction for \p TLI.
4602 /// \note \p Val is assumed to be the product of some type promotion.
4603 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4604 /// to be legal, as the non-promoted value would have had the same state.
4605 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
4606                                        const DataLayout &DL, Value *Val) {
4607   Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4608   if (!PromotedInst)
4609     return false;
4610   int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4611   // If the ISDOpcode is undefined, it was undefined before the promotion.
4612   if (!ISDOpcode)
4613     return true;
4614   // Otherwise, check if the promoted instruction is legal or not.
4615   return TLI.isOperationLegalOrCustom(
4616       ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4617 }
4618 
4619 namespace {
4620 
4621 /// Hepler class to perform type promotion.
4622 class TypePromotionHelper {
4623   /// Utility function to add a promoted instruction \p ExtOpnd to
4624   /// \p PromotedInsts and record the type of extension we have seen.
4625   static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4626                               Instruction *ExtOpnd, bool IsSExt) {
4627     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4628     InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4629     if (It != PromotedInsts.end()) {
4630       // If the new extension is same as original, the information in
4631       // PromotedInsts[ExtOpnd] is still correct.
4632       if (It->second.getInt() == ExtTy)
4633         return;
4634 
4635       // Now the new extension is different from old extension, we make
4636       // the type information invalid by setting extension type to
4637       // BothExtension.
4638       ExtTy = BothExtension;
4639     }
4640     PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4641   }
4642 
4643   /// Utility function to query the original type of instruction \p Opnd
4644   /// with a matched extension type. If the extension doesn't match, we
4645   /// cannot use the information we had on the original type.
4646   /// BothExtension doesn't match any extension type.
4647   static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4648                                  Instruction *Opnd, bool IsSExt) {
4649     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4650     InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4651     if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4652       return It->second.getPointer();
4653     return nullptr;
4654   }
4655 
4656   /// Utility function to check whether or not a sign or zero extension
4657   /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4658   /// either using the operands of \p Inst or promoting \p Inst.
4659   /// The type of the extension is defined by \p IsSExt.
4660   /// In other words, check if:
4661   /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4662   /// #1 Promotion applies:
4663   /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4664   /// #2 Operand reuses:
4665   /// ext opnd1 to ConsideredExtType.
4666   /// \p PromotedInsts maps the instructions to their type before promotion.
4667   static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4668                             const InstrToOrigTy &PromotedInsts, bool IsSExt);
4669 
4670   /// Utility function to determine if \p OpIdx should be promoted when
4671   /// promoting \p Inst.
4672   static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4673     return !(isa<SelectInst>(Inst) && OpIdx == 0);
4674   }
4675 
4676   /// Utility function to promote the operand of \p Ext when this
4677   /// operand is a promotable trunc or sext or zext.
4678   /// \p PromotedInsts maps the instructions to their type before promotion.
4679   /// \p CreatedInstsCost[out] contains the cost of all instructions
4680   /// created to promote the operand of Ext.
4681   /// Newly added extensions are inserted in \p Exts.
4682   /// Newly added truncates are inserted in \p Truncs.
4683   /// Should never be called directly.
4684   /// \return The promoted value which is used instead of Ext.
4685   static Value *promoteOperandForTruncAndAnyExt(
4686       Instruction *Ext, TypePromotionTransaction &TPT,
4687       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4688       SmallVectorImpl<Instruction *> *Exts,
4689       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4690 
4691   /// Utility function to promote the operand of \p Ext when this
4692   /// operand is promotable and is not a supported trunc or sext.
4693   /// \p PromotedInsts maps the instructions to their type before promotion.
4694   /// \p CreatedInstsCost[out] contains the cost of all the instructions
4695   /// created to promote the operand of Ext.
4696   /// Newly added extensions are inserted in \p Exts.
4697   /// Newly added truncates are inserted in \p Truncs.
4698   /// Should never be called directly.
4699   /// \return The promoted value which is used instead of Ext.
4700   static Value *promoteOperandForOther(Instruction *Ext,
4701                                        TypePromotionTransaction &TPT,
4702                                        InstrToOrigTy &PromotedInsts,
4703                                        unsigned &CreatedInstsCost,
4704                                        SmallVectorImpl<Instruction *> *Exts,
4705                                        SmallVectorImpl<Instruction *> *Truncs,
4706                                        const TargetLowering &TLI, bool IsSExt);
4707 
4708   /// \see promoteOperandForOther.
4709   static Value *signExtendOperandForOther(
4710       Instruction *Ext, TypePromotionTransaction &TPT,
4711       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4712       SmallVectorImpl<Instruction *> *Exts,
4713       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4714     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4715                                   Exts, Truncs, TLI, true);
4716   }
4717 
4718   /// \see promoteOperandForOther.
4719   static Value *zeroExtendOperandForOther(
4720       Instruction *Ext, TypePromotionTransaction &TPT,
4721       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4722       SmallVectorImpl<Instruction *> *Exts,
4723       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4724     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4725                                   Exts, Truncs, TLI, false);
4726   }
4727 
4728 public:
4729   /// Type for the utility function that promotes the operand of Ext.
4730   using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4731                             InstrToOrigTy &PromotedInsts,
4732                             unsigned &CreatedInstsCost,
4733                             SmallVectorImpl<Instruction *> *Exts,
4734                             SmallVectorImpl<Instruction *> *Truncs,
4735                             const TargetLowering &TLI);
4736 
4737   /// Given a sign/zero extend instruction \p Ext, return the appropriate
4738   /// action to promote the operand of \p Ext instead of using Ext.
4739   /// \return NULL if no promotable action is possible with the current
4740   /// sign extension.
4741   /// \p InsertedInsts keeps track of all the instructions inserted by the
4742   /// other CodeGenPrepare optimizations. This information is important
4743   /// because we do not want to promote these instructions as CodeGenPrepare
4744   /// will reinsert them later. Thus creating an infinite loop: create/remove.
4745   /// \p PromotedInsts maps the instructions to their type before promotion.
4746   static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4747                           const TargetLowering &TLI,
4748                           const InstrToOrigTy &PromotedInsts);
4749 };
4750 
4751 } // end anonymous namespace
4752 
4753 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4754                                         Type *ConsideredExtType,
4755                                         const InstrToOrigTy &PromotedInsts,
4756                                         bool IsSExt) {
4757   // The promotion helper does not know how to deal with vector types yet.
4758   // To be able to fix that, we would need to fix the places where we
4759   // statically extend, e.g., constants and such.
4760   if (Inst->getType()->isVectorTy())
4761     return false;
4762 
4763   // We can always get through zext.
4764   if (isa<ZExtInst>(Inst))
4765     return true;
4766 
4767   // sext(sext) is ok too.
4768   if (IsSExt && isa<SExtInst>(Inst))
4769     return true;
4770 
4771   // We can get through binary operator, if it is legal. In other words, the
4772   // binary operator must have a nuw or nsw flag.
4773   if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4774     if (isa<OverflowingBinaryOperator>(BinOp) &&
4775         ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4776          (IsSExt && BinOp->hasNoSignedWrap())))
4777       return true;
4778 
4779   // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4780   if ((Inst->getOpcode() == Instruction::And ||
4781        Inst->getOpcode() == Instruction::Or))
4782     return true;
4783 
4784   // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4785   if (Inst->getOpcode() == Instruction::Xor) {
4786     // Make sure it is not a NOT.
4787     if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4788       if (!Cst->getValue().isAllOnes())
4789         return true;
4790   }
4791 
4792   // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4793   // It may change a poisoned value into a regular value, like
4794   //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
4795   //          poisoned value                    regular value
4796   // It should be OK since undef covers valid value.
4797   if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4798     return true;
4799 
4800   // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4801   // It may change a poisoned value into a regular value, like
4802   //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
4803   //          poisoned value                    regular value
4804   // It should be OK since undef covers valid value.
4805   if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4806     const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4807     if (ExtInst->hasOneUse()) {
4808       const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4809       if (AndInst && AndInst->getOpcode() == Instruction::And) {
4810         const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4811         if (Cst &&
4812             Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4813           return true;
4814       }
4815     }
4816   }
4817 
4818   // Check if we can do the following simplification.
4819   // ext(trunc(opnd)) --> ext(opnd)
4820   if (!isa<TruncInst>(Inst))
4821     return false;
4822 
4823   Value *OpndVal = Inst->getOperand(0);
4824   // Check if we can use this operand in the extension.
4825   // If the type is larger than the result type of the extension, we cannot.
4826   if (!OpndVal->getType()->isIntegerTy() ||
4827       OpndVal->getType()->getIntegerBitWidth() >
4828           ConsideredExtType->getIntegerBitWidth())
4829     return false;
4830 
4831   // If the operand of the truncate is not an instruction, we will not have
4832   // any information on the dropped bits.
4833   // (Actually we could for constant but it is not worth the extra logic).
4834   Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4835   if (!Opnd)
4836     return false;
4837 
4838   // Check if the source of the type is narrow enough.
4839   // I.e., check that trunc just drops extended bits of the same kind of
4840   // the extension.
4841   // #1 get the type of the operand and check the kind of the extended bits.
4842   const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4843   if (OpndType)
4844     ;
4845   else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4846     OpndType = Opnd->getOperand(0)->getType();
4847   else
4848     return false;
4849 
4850   // #2 check that the truncate just drops extended bits.
4851   return Inst->getType()->getIntegerBitWidth() >=
4852          OpndType->getIntegerBitWidth();
4853 }
4854 
4855 TypePromotionHelper::Action TypePromotionHelper::getAction(
4856     Instruction *Ext, const SetOfInstrs &InsertedInsts,
4857     const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4858   assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4859          "Unexpected instruction type");
4860   Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4861   Type *ExtTy = Ext->getType();
4862   bool IsSExt = isa<SExtInst>(Ext);
4863   // If the operand of the extension is not an instruction, we cannot
4864   // get through.
4865   // If it, check we can get through.
4866   if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4867     return nullptr;
4868 
4869   // Do not promote if the operand has been added by codegenprepare.
4870   // Otherwise, it means we are undoing an optimization that is likely to be
4871   // redone, thus causing potential infinite loop.
4872   if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4873     return nullptr;
4874 
4875   // SExt or Trunc instructions.
4876   // Return the related handler.
4877   if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4878       isa<ZExtInst>(ExtOpnd))
4879     return promoteOperandForTruncAndAnyExt;
4880 
4881   // Regular instruction.
4882   // Abort early if we will have to insert non-free instructions.
4883   if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4884     return nullptr;
4885   return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4886 }
4887 
4888 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4889     Instruction *SExt, TypePromotionTransaction &TPT,
4890     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4891     SmallVectorImpl<Instruction *> *Exts,
4892     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4893   // By construction, the operand of SExt is an instruction. Otherwise we cannot
4894   // get through it and this method should not be called.
4895   Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4896   Value *ExtVal = SExt;
4897   bool HasMergedNonFreeExt = false;
4898   if (isa<ZExtInst>(SExtOpnd)) {
4899     // Replace s|zext(zext(opnd))
4900     // => zext(opnd).
4901     HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4902     Value *ZExt =
4903         TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4904     TPT.replaceAllUsesWith(SExt, ZExt);
4905     TPT.eraseInstruction(SExt);
4906     ExtVal = ZExt;
4907   } else {
4908     // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4909     // => z|sext(opnd).
4910     TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4911   }
4912   CreatedInstsCost = 0;
4913 
4914   // Remove dead code.
4915   if (SExtOpnd->use_empty())
4916     TPT.eraseInstruction(SExtOpnd);
4917 
4918   // Check if the extension is still needed.
4919   Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4920   if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4921     if (ExtInst) {
4922       if (Exts)
4923         Exts->push_back(ExtInst);
4924       CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4925     }
4926     return ExtVal;
4927   }
4928 
4929   // At this point we have: ext ty opnd to ty.
4930   // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4931   Value *NextVal = ExtInst->getOperand(0);
4932   TPT.eraseInstruction(ExtInst, NextVal);
4933   return NextVal;
4934 }
4935 
4936 Value *TypePromotionHelper::promoteOperandForOther(
4937     Instruction *Ext, TypePromotionTransaction &TPT,
4938     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4939     SmallVectorImpl<Instruction *> *Exts,
4940     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4941     bool IsSExt) {
4942   // By construction, the operand of Ext is an instruction. Otherwise we cannot
4943   // get through it and this method should not be called.
4944   Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4945   CreatedInstsCost = 0;
4946   if (!ExtOpnd->hasOneUse()) {
4947     // ExtOpnd will be promoted.
4948     // All its uses, but Ext, will need to use a truncated value of the
4949     // promoted version.
4950     // Create the truncate now.
4951     Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4952     if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4953       // Insert it just after the definition.
4954       ITrunc->moveAfter(ExtOpnd);
4955       if (Truncs)
4956         Truncs->push_back(ITrunc);
4957     }
4958 
4959     TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4960     // Restore the operand of Ext (which has been replaced by the previous call
4961     // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4962     TPT.setOperand(Ext, 0, ExtOpnd);
4963   }
4964 
4965   // Get through the Instruction:
4966   // 1. Update its type.
4967   // 2. Replace the uses of Ext by Inst.
4968   // 3. Extend each operand that needs to be extended.
4969 
4970   // Remember the original type of the instruction before promotion.
4971   // This is useful to know that the high bits are sign extended bits.
4972   addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4973   // Step #1.
4974   TPT.mutateType(ExtOpnd, Ext->getType());
4975   // Step #2.
4976   TPT.replaceAllUsesWith(Ext, ExtOpnd);
4977   // Step #3.
4978   LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4979   for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4980        ++OpIdx) {
4981     LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4982     if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4983         !shouldExtOperand(ExtOpnd, OpIdx)) {
4984       LLVM_DEBUG(dbgs() << "No need to propagate\n");
4985       continue;
4986     }
4987     // Check if we can statically extend the operand.
4988     Value *Opnd = ExtOpnd->getOperand(OpIdx);
4989     if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4990       LLVM_DEBUG(dbgs() << "Statically extend\n");
4991       unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4992       APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4993                             : Cst->getValue().zext(BitWidth);
4994       TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4995       continue;
4996     }
4997     // UndefValue are typed, so we have to statically sign extend them.
4998     if (isa<UndefValue>(Opnd)) {
4999       LLVM_DEBUG(dbgs() << "Statically extend\n");
5000       TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
5001       continue;
5002     }
5003 
5004     // Otherwise we have to explicitly sign extend the operand.
5005     Value *ValForExtOpnd = IsSExt
5006                                ? TPT.createSExt(ExtOpnd, Opnd, Ext->getType())
5007                                : TPT.createZExt(ExtOpnd, Opnd, Ext->getType());
5008     TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
5009     Instruction *InstForExtOpnd = dyn_cast<Instruction>(ValForExtOpnd);
5010     if (!InstForExtOpnd)
5011       continue;
5012 
5013     if (Exts)
5014       Exts->push_back(InstForExtOpnd);
5015 
5016     CreatedInstsCost += !TLI.isExtFree(InstForExtOpnd);
5017   }
5018   LLVM_DEBUG(dbgs() << "Extension is useless now\n");
5019   TPT.eraseInstruction(Ext);
5020   return ExtOpnd;
5021 }
5022 
5023 /// Check whether or not promoting an instruction to a wider type is profitable.
5024 /// \p NewCost gives the cost of extension instructions created by the
5025 /// promotion.
5026 /// \p OldCost gives the cost of extension instructions before the promotion
5027 /// plus the number of instructions that have been
5028 /// matched in the addressing mode the promotion.
5029 /// \p PromotedOperand is the value that has been promoted.
5030 /// \return True if the promotion is profitable, false otherwise.
5031 bool AddressingModeMatcher::isPromotionProfitable(
5032     unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
5033   LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
5034                     << '\n');
5035   // The cost of the new extensions is greater than the cost of the
5036   // old extension plus what we folded.
5037   // This is not profitable.
5038   if (NewCost > OldCost)
5039     return false;
5040   if (NewCost < OldCost)
5041     return true;
5042   // The promotion is neutral but it may help folding the sign extension in
5043   // loads for instance.
5044   // Check that we did not create an illegal instruction.
5045   return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
5046 }
5047 
5048 /// Given an instruction or constant expr, see if we can fold the operation
5049 /// into the addressing mode. If so, update the addressing mode and return
5050 /// true, otherwise return false without modifying AddrMode.
5051 /// If \p MovedAway is not NULL, it contains the information of whether or
5052 /// not AddrInst has to be folded into the addressing mode on success.
5053 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
5054 /// because it has been moved away.
5055 /// Thus AddrInst must not be added in the matched instructions.
5056 /// This state can happen when AddrInst is a sext, since it may be moved away.
5057 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
5058 /// not be referenced anymore.
5059 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
5060                                                unsigned Depth,
5061                                                bool *MovedAway) {
5062   // Avoid exponential behavior on extremely deep expression trees.
5063   if (Depth >= 5)
5064     return false;
5065 
5066   // By default, all matched instructions stay in place.
5067   if (MovedAway)
5068     *MovedAway = false;
5069 
5070   switch (Opcode) {
5071   case Instruction::PtrToInt:
5072     // PtrToInt is always a noop, as we know that the int type is pointer sized.
5073     return matchAddr(AddrInst->getOperand(0), Depth);
5074   case Instruction::IntToPtr: {
5075     auto AS = AddrInst->getType()->getPointerAddressSpace();
5076     auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
5077     // This inttoptr is a no-op if the integer type is pointer sized.
5078     if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
5079       return matchAddr(AddrInst->getOperand(0), Depth);
5080     return false;
5081   }
5082   case Instruction::BitCast:
5083     // BitCast is always a noop, and we can handle it as long as it is
5084     // int->int or pointer->pointer (we don't want int<->fp or something).
5085     if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
5086         // Don't touch identity bitcasts.  These were probably put here by LSR,
5087         // and we don't want to mess around with them.  Assume it knows what it
5088         // is doing.
5089         AddrInst->getOperand(0)->getType() != AddrInst->getType())
5090       return matchAddr(AddrInst->getOperand(0), Depth);
5091     return false;
5092   case Instruction::AddrSpaceCast: {
5093     unsigned SrcAS =
5094         AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
5095     unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
5096     if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
5097       return matchAddr(AddrInst->getOperand(0), Depth);
5098     return false;
5099   }
5100   case Instruction::Add: {
5101     // Check to see if we can merge in one operand, then the other.  If so, we
5102     // win.
5103     ExtAddrMode BackupAddrMode = AddrMode;
5104     unsigned OldSize = AddrModeInsts.size();
5105     // Start a transaction at this point.
5106     // The LHS may match but not the RHS.
5107     // Therefore, we need a higher level restoration point to undo partially
5108     // matched operation.
5109     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5110         TPT.getRestorationPoint();
5111 
5112     // Try to match an integer constant second to increase its chance of ending
5113     // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
5114     int First = 0, Second = 1;
5115     if (isa<ConstantInt>(AddrInst->getOperand(First))
5116       && !isa<ConstantInt>(AddrInst->getOperand(Second)))
5117         std::swap(First, Second);
5118     AddrMode.InBounds = false;
5119     if (matchAddr(AddrInst->getOperand(First), Depth + 1) &&
5120         matchAddr(AddrInst->getOperand(Second), Depth + 1))
5121       return true;
5122 
5123     // Restore the old addr mode info.
5124     AddrMode = BackupAddrMode;
5125     AddrModeInsts.resize(OldSize);
5126     TPT.rollback(LastKnownGood);
5127 
5128     // Otherwise this was over-aggressive.  Try merging operands in the opposite
5129     // order.
5130     if (matchAddr(AddrInst->getOperand(Second), Depth + 1) &&
5131         matchAddr(AddrInst->getOperand(First), Depth + 1))
5132       return true;
5133 
5134     // Otherwise we definitely can't merge the ADD in.
5135     AddrMode = BackupAddrMode;
5136     AddrModeInsts.resize(OldSize);
5137     TPT.rollback(LastKnownGood);
5138     break;
5139   }
5140   // case Instruction::Or:
5141   //  TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
5142   // break;
5143   case Instruction::Mul:
5144   case Instruction::Shl: {
5145     // Can only handle X*C and X << C.
5146     AddrMode.InBounds = false;
5147     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
5148     if (!RHS || RHS->getBitWidth() > 64)
5149       return false;
5150     int64_t Scale = Opcode == Instruction::Shl
5151                         ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
5152                         : RHS->getSExtValue();
5153 
5154     return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
5155   }
5156   case Instruction::GetElementPtr: {
5157     // Scan the GEP.  We check it if it contains constant offsets and at most
5158     // one variable offset.
5159     int VariableOperand = -1;
5160     unsigned VariableScale = 0;
5161 
5162     int64_t ConstantOffset = 0;
5163     gep_type_iterator GTI = gep_type_begin(AddrInst);
5164     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
5165       if (StructType *STy = GTI.getStructTypeOrNull()) {
5166         const StructLayout *SL = DL.getStructLayout(STy);
5167         unsigned Idx =
5168             cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
5169         ConstantOffset += SL->getElementOffset(Idx);
5170       } else {
5171         TypeSize TS = GTI.getSequentialElementStride(DL);
5172         if (TS.isNonZero()) {
5173           // The optimisations below currently only work for fixed offsets.
5174           if (TS.isScalable())
5175             return false;
5176           int64_t TypeSize = TS.getFixedValue();
5177           if (ConstantInt *CI =
5178                   dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
5179             const APInt &CVal = CI->getValue();
5180             if (CVal.getSignificantBits() <= 64) {
5181               ConstantOffset += CVal.getSExtValue() * TypeSize;
5182               continue;
5183             }
5184           }
5185           // We only allow one variable index at the moment.
5186           if (VariableOperand != -1)
5187             return false;
5188 
5189           // Remember the variable index.
5190           VariableOperand = i;
5191           VariableScale = TypeSize;
5192         }
5193       }
5194     }
5195 
5196     // A common case is for the GEP to only do a constant offset.  In this case,
5197     // just add it to the disp field and check validity.
5198     if (VariableOperand == -1) {
5199       AddrMode.BaseOffs += ConstantOffset;
5200       if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5201           if (!cast<GEPOperator>(AddrInst)->isInBounds())
5202             AddrMode.InBounds = false;
5203           return true;
5204       }
5205       AddrMode.BaseOffs -= ConstantOffset;
5206 
5207       if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
5208           TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
5209           ConstantOffset > 0) {
5210           // Record GEPs with non-zero offsets as candidates for splitting in
5211           // the event that the offset cannot fit into the r+i addressing mode.
5212           // Simple and common case that only one GEP is used in calculating the
5213           // address for the memory access.
5214           Value *Base = AddrInst->getOperand(0);
5215           auto *BaseI = dyn_cast<Instruction>(Base);
5216           auto *GEP = cast<GetElementPtrInst>(AddrInst);
5217           if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
5218               (BaseI && !isa<CastInst>(BaseI) &&
5219                !isa<GetElementPtrInst>(BaseI))) {
5220             // Make sure the parent block allows inserting non-PHI instructions
5221             // before the terminator.
5222             BasicBlock *Parent = BaseI ? BaseI->getParent()
5223                                        : &GEP->getFunction()->getEntryBlock();
5224             if (!Parent->getTerminator()->isEHPad())
5225             LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
5226           }
5227       }
5228 
5229       return false;
5230     }
5231 
5232     // Save the valid addressing mode in case we can't match.
5233     ExtAddrMode BackupAddrMode = AddrMode;
5234     unsigned OldSize = AddrModeInsts.size();
5235 
5236     // See if the scale and offset amount is valid for this target.
5237     AddrMode.BaseOffs += ConstantOffset;
5238     if (!cast<GEPOperator>(AddrInst)->isInBounds())
5239       AddrMode.InBounds = false;
5240 
5241     // Match the base operand of the GEP.
5242     if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5243       // If it couldn't be matched, just stuff the value in a register.
5244       if (AddrMode.HasBaseReg) {
5245         AddrMode = BackupAddrMode;
5246         AddrModeInsts.resize(OldSize);
5247         return false;
5248       }
5249       AddrMode.HasBaseReg = true;
5250       AddrMode.BaseReg = AddrInst->getOperand(0);
5251     }
5252 
5253     // Match the remaining variable portion of the GEP.
5254     if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
5255                           Depth)) {
5256       // If it couldn't be matched, try stuffing the base into a register
5257       // instead of matching it, and retrying the match of the scale.
5258       AddrMode = BackupAddrMode;
5259       AddrModeInsts.resize(OldSize);
5260       if (AddrMode.HasBaseReg)
5261         return false;
5262       AddrMode.HasBaseReg = true;
5263       AddrMode.BaseReg = AddrInst->getOperand(0);
5264       AddrMode.BaseOffs += ConstantOffset;
5265       if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
5266                             VariableScale, Depth)) {
5267         // If even that didn't work, bail.
5268         AddrMode = BackupAddrMode;
5269         AddrModeInsts.resize(OldSize);
5270         return false;
5271       }
5272     }
5273 
5274     return true;
5275   }
5276   case Instruction::SExt:
5277   case Instruction::ZExt: {
5278     Instruction *Ext = dyn_cast<Instruction>(AddrInst);
5279     if (!Ext)
5280       return false;
5281 
5282     // Try to move this ext out of the way of the addressing mode.
5283     // Ask for a method for doing so.
5284     TypePromotionHelper::Action TPH =
5285         TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
5286     if (!TPH)
5287       return false;
5288 
5289     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5290         TPT.getRestorationPoint();
5291     unsigned CreatedInstsCost = 0;
5292     unsigned ExtCost = !TLI.isExtFree(Ext);
5293     Value *PromotedOperand =
5294         TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
5295     // SExt has been moved away.
5296     // Thus either it will be rematched later in the recursive calls or it is
5297     // gone. Anyway, we must not fold it into the addressing mode at this point.
5298     // E.g.,
5299     // op = add opnd, 1
5300     // idx = ext op
5301     // addr = gep base, idx
5302     // is now:
5303     // promotedOpnd = ext opnd            <- no match here
5304     // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
5305     // addr = gep base, op                <- match
5306     if (MovedAway)
5307       *MovedAway = true;
5308 
5309     assert(PromotedOperand &&
5310            "TypePromotionHelper should have filtered out those cases");
5311 
5312     ExtAddrMode BackupAddrMode = AddrMode;
5313     unsigned OldSize = AddrModeInsts.size();
5314 
5315     if (!matchAddr(PromotedOperand, Depth) ||
5316         // The total of the new cost is equal to the cost of the created
5317         // instructions.
5318         // The total of the old cost is equal to the cost of the extension plus
5319         // what we have saved in the addressing mode.
5320         !isPromotionProfitable(CreatedInstsCost,
5321                                ExtCost + (AddrModeInsts.size() - OldSize),
5322                                PromotedOperand)) {
5323       AddrMode = BackupAddrMode;
5324       AddrModeInsts.resize(OldSize);
5325       LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5326       TPT.rollback(LastKnownGood);
5327       return false;
5328     }
5329     return true;
5330   }
5331   case Instruction::Call:
5332     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(AddrInst)) {
5333       if (II->getIntrinsicID() == Intrinsic::threadlocal_address) {
5334         GlobalValue &GV = cast<GlobalValue>(*II->getArgOperand(0));
5335         if (TLI.addressingModeSupportsTLS(GV))
5336           return matchAddr(AddrInst->getOperand(0), Depth);
5337       }
5338     }
5339     break;
5340   }
5341   return false;
5342 }
5343 
5344 /// If we can, try to add the value of 'Addr' into the current addressing mode.
5345 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5346 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
5347 /// for the target.
5348 ///
5349 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
5350   // Start a transaction at this point that we will rollback if the matching
5351   // fails.
5352   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5353       TPT.getRestorationPoint();
5354   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
5355     if (CI->getValue().isSignedIntN(64)) {
5356       // Fold in immediates if legal for the target.
5357       AddrMode.BaseOffs += CI->getSExtValue();
5358       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5359         return true;
5360       AddrMode.BaseOffs -= CI->getSExtValue();
5361     }
5362   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
5363     // If this is a global variable, try to fold it into the addressing mode.
5364     if (!AddrMode.BaseGV) {
5365       AddrMode.BaseGV = GV;
5366       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5367         return true;
5368       AddrMode.BaseGV = nullptr;
5369     }
5370   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
5371     ExtAddrMode BackupAddrMode = AddrMode;
5372     unsigned OldSize = AddrModeInsts.size();
5373 
5374     // Check to see if it is possible to fold this operation.
5375     bool MovedAway = false;
5376     if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
5377       // This instruction may have been moved away. If so, there is nothing
5378       // to check here.
5379       if (MovedAway)
5380         return true;
5381       // Okay, it's possible to fold this.  Check to see if it is actually
5382       // *profitable* to do so.  We use a simple cost model to avoid increasing
5383       // register pressure too much.
5384       if (I->hasOneUse() ||
5385           isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
5386         AddrModeInsts.push_back(I);
5387         return true;
5388       }
5389 
5390       // It isn't profitable to do this, roll back.
5391       AddrMode = BackupAddrMode;
5392       AddrModeInsts.resize(OldSize);
5393       TPT.rollback(LastKnownGood);
5394     }
5395   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
5396     if (matchOperationAddr(CE, CE->getOpcode(), Depth))
5397       return true;
5398     TPT.rollback(LastKnownGood);
5399   } else if (isa<ConstantPointerNull>(Addr)) {
5400     // Null pointer gets folded without affecting the addressing mode.
5401     return true;
5402   }
5403 
5404   // Worse case, the target should support [reg] addressing modes. :)
5405   if (!AddrMode.HasBaseReg) {
5406     AddrMode.HasBaseReg = true;
5407     AddrMode.BaseReg = Addr;
5408     // Still check for legality in case the target supports [imm] but not [i+r].
5409     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5410       return true;
5411     AddrMode.HasBaseReg = false;
5412     AddrMode.BaseReg = nullptr;
5413   }
5414 
5415   // If the base register is already taken, see if we can do [r+r].
5416   if (AddrMode.Scale == 0) {
5417     AddrMode.Scale = 1;
5418     AddrMode.ScaledReg = Addr;
5419     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5420       return true;
5421     AddrMode.Scale = 0;
5422     AddrMode.ScaledReg = nullptr;
5423   }
5424   // Couldn't match.
5425   TPT.rollback(LastKnownGood);
5426   return false;
5427 }
5428 
5429 /// Check to see if all uses of OpVal by the specified inline asm call are due
5430 /// to memory operands. If so, return true, otherwise return false.
5431 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
5432                                     const TargetLowering &TLI,
5433                                     const TargetRegisterInfo &TRI) {
5434   const Function *F = CI->getFunction();
5435   TargetLowering::AsmOperandInfoVector TargetConstraints =
5436       TLI.ParseConstraints(F->getDataLayout(), &TRI, *CI);
5437 
5438   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5439     // Compute the constraint code and ConstraintType to use.
5440     TLI.ComputeConstraintToUse(OpInfo, SDValue());
5441 
5442     // If this asm operand is our Value*, and if it isn't an indirect memory
5443     // operand, we can't fold it!  TODO: Also handle C_Address?
5444     if (OpInfo.CallOperandVal == OpVal &&
5445         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
5446          !OpInfo.isIndirect))
5447       return false;
5448   }
5449 
5450   return true;
5451 }
5452 
5453 /// Recursively walk all the uses of I until we find a memory use.
5454 /// If we find an obviously non-foldable instruction, return true.
5455 /// Add accessed addresses and types to MemoryUses.
5456 static bool FindAllMemoryUses(
5457     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5458     SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
5459     const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
5460     BlockFrequencyInfo *BFI, unsigned &SeenInsts) {
5461   // If we already considered this instruction, we're done.
5462   if (!ConsideredInsts.insert(I).second)
5463     return false;
5464 
5465   // If this is an obviously unfoldable instruction, bail out.
5466   if (!MightBeFoldableInst(I))
5467     return true;
5468 
5469   // Loop over all the uses, recursively processing them.
5470   for (Use &U : I->uses()) {
5471     // Conservatively return true if we're seeing a large number or a deep chain
5472     // of users. This avoids excessive compilation times in pathological cases.
5473     if (SeenInsts++ >= MaxAddressUsersToScan)
5474       return true;
5475 
5476     Instruction *UserI = cast<Instruction>(U.getUser());
5477     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
5478       MemoryUses.push_back({&U, LI->getType()});
5479       continue;
5480     }
5481 
5482     if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5483       if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5484         return true; // Storing addr, not into addr.
5485       MemoryUses.push_back({&U, SI->getValueOperand()->getType()});
5486       continue;
5487     }
5488 
5489     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5490       if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5491         return true; // Storing addr, not into addr.
5492       MemoryUses.push_back({&U, RMW->getValOperand()->getType()});
5493       continue;
5494     }
5495 
5496     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5497       if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5498         return true; // Storing addr, not into addr.
5499       MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()});
5500       continue;
5501     }
5502 
5503     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5504       if (CI->hasFnAttr(Attribute::Cold)) {
5505         // If this is a cold call, we can sink the addressing calculation into
5506         // the cold path.  See optimizeCallInst
5507         if (!llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI))
5508           continue;
5509       }
5510 
5511       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5512       if (!IA)
5513         return true;
5514 
5515       // If this is a memory operand, we're cool, otherwise bail out.
5516       if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5517         return true;
5518       continue;
5519     }
5520 
5521     if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5522                           PSI, BFI, SeenInsts))
5523       return true;
5524   }
5525 
5526   return false;
5527 }
5528 
5529 static bool FindAllMemoryUses(
5530     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5531     const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize,
5532     ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
5533   unsigned SeenInsts = 0;
5534   SmallPtrSet<Instruction *, 16> ConsideredInsts;
5535   return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5536                            PSI, BFI, SeenInsts);
5537 }
5538 
5539 
5540 /// Return true if Val is already known to be live at the use site that we're
5541 /// folding it into. If so, there is no cost to include it in the addressing
5542 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5543 /// instruction already.
5544 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5545                                                    Value *KnownLive1,
5546                                                    Value *KnownLive2) {
5547   // If Val is either of the known-live values, we know it is live!
5548   if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5549     return true;
5550 
5551   // All values other than instructions and arguments (e.g. constants) are live.
5552   if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5553     return true;
5554 
5555   // If Val is a constant sized alloca in the entry block, it is live, this is
5556   // true because it is just a reference to the stack/frame pointer, which is
5557   // live for the whole function.
5558   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5559     if (AI->isStaticAlloca())
5560       return true;
5561 
5562   // Check to see if this value is already used in the memory instruction's
5563   // block.  If so, it's already live into the block at the very least, so we
5564   // can reasonably fold it.
5565   return Val->isUsedInBasicBlock(MemoryInst->getParent());
5566 }
5567 
5568 /// It is possible for the addressing mode of the machine to fold the specified
5569 /// instruction into a load or store that ultimately uses it.
5570 /// However, the specified instruction has multiple uses.
5571 /// Given this, it may actually increase register pressure to fold it
5572 /// into the load. For example, consider this code:
5573 ///
5574 ///     X = ...
5575 ///     Y = X+1
5576 ///     use(Y)   -> nonload/store
5577 ///     Z = Y+1
5578 ///     load Z
5579 ///
5580 /// In this case, Y has multiple uses, and can be folded into the load of Z
5581 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
5582 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
5583 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
5584 /// number of computations either.
5585 ///
5586 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
5587 /// X was live across 'load Z' for other reasons, we actually *would* want to
5588 /// fold the addressing mode in the Z case.  This would make Y die earlier.
5589 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5590     Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5591   if (IgnoreProfitability)
5592     return true;
5593 
5594   // AMBefore is the addressing mode before this instruction was folded into it,
5595   // and AMAfter is the addressing mode after the instruction was folded.  Get
5596   // the set of registers referenced by AMAfter and subtract out those
5597   // referenced by AMBefore: this is the set of values which folding in this
5598   // address extends the lifetime of.
5599   //
5600   // Note that there are only two potential values being referenced here,
5601   // BaseReg and ScaleReg (global addresses are always available, as are any
5602   // folded immediates).
5603   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5604 
5605   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5606   // lifetime wasn't extended by adding this instruction.
5607   if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5608     BaseReg = nullptr;
5609   if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5610     ScaledReg = nullptr;
5611 
5612   // If folding this instruction (and it's subexprs) didn't extend any live
5613   // ranges, we're ok with it.
5614   if (!BaseReg && !ScaledReg)
5615     return true;
5616 
5617   // If all uses of this instruction can have the address mode sunk into them,
5618   // we can remove the addressing mode and effectively trade one live register
5619   // for another (at worst.)  In this context, folding an addressing mode into
5620   // the use is just a particularly nice way of sinking it.
5621   SmallVector<std::pair<Use *, Type *>, 16> MemoryUses;
5622   if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI))
5623     return false; // Has a non-memory, non-foldable use!
5624 
5625   // Now that we know that all uses of this instruction are part of a chain of
5626   // computation involving only operations that could theoretically be folded
5627   // into a memory use, loop over each of these memory operation uses and see
5628   // if they could  *actually* fold the instruction.  The assumption is that
5629   // addressing modes are cheap and that duplicating the computation involved
5630   // many times is worthwhile, even on a fastpath. For sinking candidates
5631   // (i.e. cold call sites), this serves as a way to prevent excessive code
5632   // growth since most architectures have some reasonable small and fast way to
5633   // compute an effective address.  (i.e LEA on x86)
5634   SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5635   for (const std::pair<Use *, Type *> &Pair : MemoryUses) {
5636     Value *Address = Pair.first->get();
5637     Instruction *UserI = cast<Instruction>(Pair.first->getUser());
5638     Type *AddressAccessTy = Pair.second;
5639     unsigned AS = Address->getType()->getPointerAddressSpace();
5640 
5641     // Do a match against the root of this address, ignoring profitability. This
5642     // will tell us if the addressing mode for the memory operation will
5643     // *actually* cover the shared instruction.
5644     ExtAddrMode Result;
5645     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5646                                                                       0);
5647     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5648         TPT.getRestorationPoint();
5649     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5650                                   AddressAccessTy, AS, UserI, Result,
5651                                   InsertedInsts, PromotedInsts, TPT,
5652                                   LargeOffsetGEP, OptSize, PSI, BFI);
5653     Matcher.IgnoreProfitability = true;
5654     bool Success = Matcher.matchAddr(Address, 0);
5655     (void)Success;
5656     assert(Success && "Couldn't select *anything*?");
5657 
5658     // The match was to check the profitability, the changes made are not
5659     // part of the original matcher. Therefore, they should be dropped
5660     // otherwise the original matcher will not present the right state.
5661     TPT.rollback(LastKnownGood);
5662 
5663     // If the match didn't cover I, then it won't be shared by it.
5664     if (!is_contained(MatchedAddrModeInsts, I))
5665       return false;
5666 
5667     MatchedAddrModeInsts.clear();
5668   }
5669 
5670   return true;
5671 }
5672 
5673 /// Return true if the specified values are defined in a
5674 /// different basic block than BB.
5675 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5676   if (Instruction *I = dyn_cast<Instruction>(V))
5677     return I->getParent() != BB;
5678   return false;
5679 }
5680 
5681 /// Sink addressing mode computation immediate before MemoryInst if doing so
5682 /// can be done without increasing register pressure.  The need for the
5683 /// register pressure constraint means this can end up being an all or nothing
5684 /// decision for all uses of the same addressing computation.
5685 ///
5686 /// Load and Store Instructions often have addressing modes that can do
5687 /// significant amounts of computation. As such, instruction selection will try
5688 /// to get the load or store to do as much computation as possible for the
5689 /// program. The problem is that isel can only see within a single block. As
5690 /// such, we sink as much legal addressing mode work into the block as possible.
5691 ///
5692 /// This method is used to optimize both load/store and inline asms with memory
5693 /// operands.  It's also used to sink addressing computations feeding into cold
5694 /// call sites into their (cold) basic block.
5695 ///
5696 /// The motivation for handling sinking into cold blocks is that doing so can
5697 /// both enable other address mode sinking (by satisfying the register pressure
5698 /// constraint above), and reduce register pressure globally (by removing the
5699 /// addressing mode computation from the fast path entirely.).
5700 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5701                                         Type *AccessTy, unsigned AddrSpace) {
5702   Value *Repl = Addr;
5703 
5704   // Try to collapse single-value PHI nodes.  This is necessary to undo
5705   // unprofitable PRE transformations.
5706   SmallVector<Value *, 8> worklist;
5707   SmallPtrSet<Value *, 16> Visited;
5708   worklist.push_back(Addr);
5709 
5710   // Use a worklist to iteratively look through PHI and select nodes, and
5711   // ensure that the addressing mode obtained from the non-PHI/select roots of
5712   // the graph are compatible.
5713   bool PhiOrSelectSeen = false;
5714   SmallVector<Instruction *, 16> AddrModeInsts;
5715   const SimplifyQuery SQ(*DL, TLInfo);
5716   AddressingModeCombiner AddrModes(SQ, Addr);
5717   TypePromotionTransaction TPT(RemovedInsts);
5718   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5719       TPT.getRestorationPoint();
5720   while (!worklist.empty()) {
5721     Value *V = worklist.pop_back_val();
5722 
5723     // We allow traversing cyclic Phi nodes.
5724     // In case of success after this loop we ensure that traversing through
5725     // Phi nodes ends up with all cases to compute address of the form
5726     //    BaseGV + Base + Scale * Index + Offset
5727     // where Scale and Offset are constans and BaseGV, Base and Index
5728     // are exactly the same Values in all cases.
5729     // It means that BaseGV, Scale and Offset dominate our memory instruction
5730     // and have the same value as they had in address computation represented
5731     // as Phi. So we can safely sink address computation to memory instruction.
5732     if (!Visited.insert(V).second)
5733       continue;
5734 
5735     // For a PHI node, push all of its incoming values.
5736     if (PHINode *P = dyn_cast<PHINode>(V)) {
5737       append_range(worklist, P->incoming_values());
5738       PhiOrSelectSeen = true;
5739       continue;
5740     }
5741     // Similar for select.
5742     if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5743       worklist.push_back(SI->getFalseValue());
5744       worklist.push_back(SI->getTrueValue());
5745       PhiOrSelectSeen = true;
5746       continue;
5747     }
5748 
5749     // For non-PHIs, determine the addressing mode being computed.  Note that
5750     // the result may differ depending on what other uses our candidate
5751     // addressing instructions might have.
5752     AddrModeInsts.clear();
5753     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5754                                                                       0);
5755     // Defer the query (and possible computation of) the dom tree to point of
5756     // actual use.  It's expected that most address matches don't actually need
5757     // the domtree.
5758     auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5759       Function *F = MemoryInst->getParent()->getParent();
5760       return this->getDT(*F);
5761     };
5762     ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5763         V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5764         *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5765         BFI.get());
5766 
5767     GetElementPtrInst *GEP = LargeOffsetGEP.first;
5768     if (GEP && !NewGEPBases.count(GEP)) {
5769       // If splitting the underlying data structure can reduce the offset of a
5770       // GEP, collect the GEP.  Skip the GEPs that are the new bases of
5771       // previously split data structures.
5772       LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5773       LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5774     }
5775 
5776     NewAddrMode.OriginalValue = V;
5777     if (!AddrModes.addNewAddrMode(NewAddrMode))
5778       break;
5779   }
5780 
5781   // Try to combine the AddrModes we've collected. If we couldn't collect any,
5782   // or we have multiple but either couldn't combine them or combining them
5783   // wouldn't do anything useful, bail out now.
5784   if (!AddrModes.combineAddrModes()) {
5785     TPT.rollback(LastKnownGood);
5786     return false;
5787   }
5788   bool Modified = TPT.commit();
5789 
5790   // Get the combined AddrMode (or the only AddrMode, if we only had one).
5791   ExtAddrMode AddrMode = AddrModes.getAddrMode();
5792 
5793   // If all the instructions matched are already in this BB, don't do anything.
5794   // If we saw a Phi node then it is not local definitely, and if we saw a
5795   // select then we want to push the address calculation past it even if it's
5796   // already in this BB.
5797   if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5798         return IsNonLocalValue(V, MemoryInst->getParent());
5799       })) {
5800     LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
5801                       << "\n");
5802     return Modified;
5803   }
5804 
5805   // Insert this computation right after this user.  Since our caller is
5806   // scanning from the top of the BB to the bottom, reuse of the expr are
5807   // guaranteed to happen later.
5808   IRBuilder<> Builder(MemoryInst);
5809 
5810   // Now that we determined the addressing expression we want to use and know
5811   // that we have to sink it into this block.  Check to see if we have already
5812   // done this for some other load/store instr in this block.  If so, reuse
5813   // the computation.  Before attempting reuse, check if the address is valid
5814   // as it may have been erased.
5815 
5816   WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5817 
5818   Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5819   Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5820   if (SunkAddr) {
5821     LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5822                       << " for " << *MemoryInst << "\n");
5823     if (SunkAddr->getType() != Addr->getType()) {
5824       if (SunkAddr->getType()->getPointerAddressSpace() !=
5825               Addr->getType()->getPointerAddressSpace() &&
5826           !DL->isNonIntegralPointerType(Addr->getType())) {
5827         // There are two reasons the address spaces might not match: a no-op
5828         // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5829         // ptrtoint/inttoptr pair to ensure we match the original semantics.
5830         // TODO: allow bitcast between different address space pointers with the
5831         // same size.
5832         SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5833         SunkAddr =
5834             Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5835       } else
5836         SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5837     }
5838   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5839                                    SubtargetInfo->addrSinkUsingGEPs())) {
5840     // By default, we use the GEP-based method when AA is used later. This
5841     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5842     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5843                       << " for " << *MemoryInst << "\n");
5844     Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5845 
5846     // First, find the pointer.
5847     if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5848       ResultPtr = AddrMode.BaseReg;
5849       AddrMode.BaseReg = nullptr;
5850     }
5851 
5852     if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5853       // We can't add more than one pointer together, nor can we scale a
5854       // pointer (both of which seem meaningless).
5855       if (ResultPtr || AddrMode.Scale != 1)
5856         return Modified;
5857 
5858       ResultPtr = AddrMode.ScaledReg;
5859       AddrMode.Scale = 0;
5860     }
5861 
5862     // It is only safe to sign extend the BaseReg if we know that the math
5863     // required to create it did not overflow before we extend it. Since
5864     // the original IR value was tossed in favor of a constant back when
5865     // the AddrMode was created we need to bail out gracefully if widths
5866     // do not match instead of extending it.
5867     //
5868     // (See below for code to add the scale.)
5869     if (AddrMode.Scale) {
5870       Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5871       if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5872           cast<IntegerType>(ScaledRegTy)->getBitWidth())
5873         return Modified;
5874     }
5875 
5876     GlobalValue *BaseGV = AddrMode.BaseGV;
5877     if (BaseGV != nullptr) {
5878       if (ResultPtr)
5879         return Modified;
5880 
5881       if (BaseGV->isThreadLocal()) {
5882         ResultPtr = Builder.CreateThreadLocalAddress(BaseGV);
5883       } else {
5884         ResultPtr = BaseGV;
5885       }
5886     }
5887 
5888     // If the real base value actually came from an inttoptr, then the matcher
5889     // will look through it and provide only the integer value. In that case,
5890     // use it here.
5891     if (!DL->isNonIntegralPointerType(Addr->getType())) {
5892       if (!ResultPtr && AddrMode.BaseReg) {
5893         ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5894                                            "sunkaddr");
5895         AddrMode.BaseReg = nullptr;
5896       } else if (!ResultPtr && AddrMode.Scale == 1) {
5897         ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5898                                            "sunkaddr");
5899         AddrMode.Scale = 0;
5900       }
5901     }
5902 
5903     if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5904         !AddrMode.BaseOffs) {
5905       SunkAddr = Constant::getNullValue(Addr->getType());
5906     } else if (!ResultPtr) {
5907       return Modified;
5908     } else {
5909       Type *I8PtrTy =
5910           Builder.getPtrTy(Addr->getType()->getPointerAddressSpace());
5911 
5912       // Start with the base register. Do this first so that subsequent address
5913       // matching finds it last, which will prevent it from trying to match it
5914       // as the scaled value in case it happens to be a mul. That would be
5915       // problematic if we've sunk a different mul for the scale, because then
5916       // we'd end up sinking both muls.
5917       if (AddrMode.BaseReg) {
5918         Value *V = AddrMode.BaseReg;
5919         if (V->getType() != IntPtrTy)
5920           V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5921 
5922         ResultIndex = V;
5923       }
5924 
5925       // Add the scale value.
5926       if (AddrMode.Scale) {
5927         Value *V = AddrMode.ScaledReg;
5928         if (V->getType() == IntPtrTy) {
5929           // done.
5930         } else {
5931           assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5932                      cast<IntegerType>(V->getType())->getBitWidth() &&
5933                  "We can't transform if ScaledReg is too narrow");
5934           V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5935         }
5936 
5937         if (AddrMode.Scale != 1)
5938           V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5939                                 "sunkaddr");
5940         if (ResultIndex)
5941           ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5942         else
5943           ResultIndex = V;
5944       }
5945 
5946       // Add in the Base Offset if present.
5947       if (AddrMode.BaseOffs) {
5948         Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5949         if (ResultIndex) {
5950           // We need to add this separately from the scale above to help with
5951           // SDAG consecutive load/store merging.
5952           if (ResultPtr->getType() != I8PtrTy)
5953             ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5954           ResultPtr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5955                                            AddrMode.InBounds);
5956         }
5957 
5958         ResultIndex = V;
5959       }
5960 
5961       if (!ResultIndex) {
5962         SunkAddr = ResultPtr;
5963       } else {
5964         if (ResultPtr->getType() != I8PtrTy)
5965           ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5966         SunkAddr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5967                                         AddrMode.InBounds);
5968       }
5969 
5970       if (SunkAddr->getType() != Addr->getType()) {
5971         if (SunkAddr->getType()->getPointerAddressSpace() !=
5972                 Addr->getType()->getPointerAddressSpace() &&
5973             !DL->isNonIntegralPointerType(Addr->getType())) {
5974           // There are two reasons the address spaces might not match: a no-op
5975           // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5976           // ptrtoint/inttoptr pair to ensure we match the original semantics.
5977           // TODO: allow bitcast between different address space pointers with
5978           // the same size.
5979           SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5980           SunkAddr =
5981               Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5982         } else
5983           SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5984       }
5985     }
5986   } else {
5987     // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5988     // non-integral pointers, so in that case bail out now.
5989     Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5990     Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5991     PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5992     PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5993     if (DL->isNonIntegralPointerType(Addr->getType()) ||
5994         (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5995         (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5996         (AddrMode.BaseGV &&
5997          DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5998       return Modified;
5999 
6000     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
6001                       << " for " << *MemoryInst << "\n");
6002     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
6003     Value *Result = nullptr;
6004 
6005     // Start with the base register. Do this first so that subsequent address
6006     // matching finds it last, which will prevent it from trying to match it
6007     // as the scaled value in case it happens to be a mul. That would be
6008     // problematic if we've sunk a different mul for the scale, because then
6009     // we'd end up sinking both muls.
6010     if (AddrMode.BaseReg) {
6011       Value *V = AddrMode.BaseReg;
6012       if (V->getType()->isPointerTy())
6013         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6014       if (V->getType() != IntPtrTy)
6015         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
6016       Result = V;
6017     }
6018 
6019     // Add the scale value.
6020     if (AddrMode.Scale) {
6021       Value *V = AddrMode.ScaledReg;
6022       if (V->getType() == IntPtrTy) {
6023         // done.
6024       } else if (V->getType()->isPointerTy()) {
6025         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6026       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
6027                  cast<IntegerType>(V->getType())->getBitWidth()) {
6028         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
6029       } else {
6030         // It is only safe to sign extend the BaseReg if we know that the math
6031         // required to create it did not overflow before we extend it. Since
6032         // the original IR value was tossed in favor of a constant back when
6033         // the AddrMode was created we need to bail out gracefully if widths
6034         // do not match instead of extending it.
6035         Instruction *I = dyn_cast_or_null<Instruction>(Result);
6036         if (I && (Result != AddrMode.BaseReg))
6037           I->eraseFromParent();
6038         return Modified;
6039       }
6040       if (AddrMode.Scale != 1)
6041         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
6042                               "sunkaddr");
6043       if (Result)
6044         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6045       else
6046         Result = V;
6047     }
6048 
6049     // Add in the BaseGV if present.
6050     GlobalValue *BaseGV = AddrMode.BaseGV;
6051     if (BaseGV != nullptr) {
6052       Value *BaseGVPtr;
6053       if (BaseGV->isThreadLocal()) {
6054         BaseGVPtr = Builder.CreateThreadLocalAddress(BaseGV);
6055       } else {
6056         BaseGVPtr = BaseGV;
6057       }
6058       Value *V = Builder.CreatePtrToInt(BaseGVPtr, IntPtrTy, "sunkaddr");
6059       if (Result)
6060         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6061       else
6062         Result = V;
6063     }
6064 
6065     // Add in the Base Offset if present.
6066     if (AddrMode.BaseOffs) {
6067       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
6068       if (Result)
6069         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6070       else
6071         Result = V;
6072     }
6073 
6074     if (!Result)
6075       SunkAddr = Constant::getNullValue(Addr->getType());
6076     else
6077       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
6078   }
6079 
6080   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
6081   // Store the newly computed address into the cache. In the case we reused a
6082   // value, this should be idempotent.
6083   SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
6084 
6085   // If we have no uses, recursively delete the value and all dead instructions
6086   // using it.
6087   if (Repl->use_empty()) {
6088     resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
6089       RecursivelyDeleteTriviallyDeadInstructions(
6090           Repl, TLInfo, nullptr,
6091           [&](Value *V) { removeAllAssertingVHReferences(V); });
6092     });
6093   }
6094   ++NumMemoryInsts;
6095   return true;
6096 }
6097 
6098 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
6099 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
6100 /// only handle a 2 operand GEP in the same basic block or a splat constant
6101 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
6102 /// index.
6103 ///
6104 /// If the existing GEP has a vector base pointer that is splat, we can look
6105 /// through the splat to find the scalar pointer. If we can't find a scalar
6106 /// pointer there's nothing we can do.
6107 ///
6108 /// If we have a GEP with more than 2 indices where the middle indices are all
6109 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
6110 ///
6111 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
6112 /// followed by a GEP with an all zeroes vector index. This will enable
6113 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
6114 /// zero index.
6115 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
6116                                                Value *Ptr) {
6117   Value *NewAddr;
6118 
6119   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
6120     // Don't optimize GEPs that don't have indices.
6121     if (!GEP->hasIndices())
6122       return false;
6123 
6124     // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
6125     // FIXME: We should support this by sinking the GEP.
6126     if (MemoryInst->getParent() != GEP->getParent())
6127       return false;
6128 
6129     SmallVector<Value *, 2> Ops(GEP->operands());
6130 
6131     bool RewriteGEP = false;
6132 
6133     if (Ops[0]->getType()->isVectorTy()) {
6134       Ops[0] = getSplatValue(Ops[0]);
6135       if (!Ops[0])
6136         return false;
6137       RewriteGEP = true;
6138     }
6139 
6140     unsigned FinalIndex = Ops.size() - 1;
6141 
6142     // Ensure all but the last index is 0.
6143     // FIXME: This isn't strictly required. All that's required is that they are
6144     // all scalars or splats.
6145     for (unsigned i = 1; i < FinalIndex; ++i) {
6146       auto *C = dyn_cast<Constant>(Ops[i]);
6147       if (!C)
6148         return false;
6149       if (isa<VectorType>(C->getType()))
6150         C = C->getSplatValue();
6151       auto *CI = dyn_cast_or_null<ConstantInt>(C);
6152       if (!CI || !CI->isZero())
6153         return false;
6154       // Scalarize the index if needed.
6155       Ops[i] = CI;
6156     }
6157 
6158     // Try to scalarize the final index.
6159     if (Ops[FinalIndex]->getType()->isVectorTy()) {
6160       if (Value *V = getSplatValue(Ops[FinalIndex])) {
6161         auto *C = dyn_cast<ConstantInt>(V);
6162         // Don't scalarize all zeros vector.
6163         if (!C || !C->isZero()) {
6164           Ops[FinalIndex] = V;
6165           RewriteGEP = true;
6166         }
6167       }
6168     }
6169 
6170     // If we made any changes or the we have extra operands, we need to generate
6171     // new instructions.
6172     if (!RewriteGEP && Ops.size() == 2)
6173       return false;
6174 
6175     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6176 
6177     IRBuilder<> Builder(MemoryInst);
6178 
6179     Type *SourceTy = GEP->getSourceElementType();
6180     Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
6181 
6182     // If the final index isn't a vector, emit a scalar GEP containing all ops
6183     // and a vector GEP with all zeroes final index.
6184     if (!Ops[FinalIndex]->getType()->isVectorTy()) {
6185       NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
6186       auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6187       auto *SecondTy = GetElementPtrInst::getIndexedType(
6188           SourceTy, ArrayRef(Ops).drop_front());
6189       NewAddr =
6190           Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
6191     } else {
6192       Value *Base = Ops[0];
6193       Value *Index = Ops[FinalIndex];
6194 
6195       // Create a scalar GEP if there are more than 2 operands.
6196       if (Ops.size() != 2) {
6197         // Replace the last index with 0.
6198         Ops[FinalIndex] =
6199             Constant::getNullValue(Ops[FinalIndex]->getType()->getScalarType());
6200         Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
6201         SourceTy = GetElementPtrInst::getIndexedType(
6202             SourceTy, ArrayRef(Ops).drop_front());
6203       }
6204 
6205       // Now create the GEP with scalar pointer and vector index.
6206       NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
6207     }
6208   } else if (!isa<Constant>(Ptr)) {
6209     // Not a GEP, maybe its a splat and we can create a GEP to enable
6210     // SelectionDAGBuilder to use it as a uniform base.
6211     Value *V = getSplatValue(Ptr);
6212     if (!V)
6213       return false;
6214 
6215     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6216 
6217     IRBuilder<> Builder(MemoryInst);
6218 
6219     // Emit a vector GEP with a scalar pointer and all 0s vector index.
6220     Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
6221     auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6222     Type *ScalarTy;
6223     if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6224         Intrinsic::masked_gather) {
6225       ScalarTy = MemoryInst->getType()->getScalarType();
6226     } else {
6227       assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6228              Intrinsic::masked_scatter);
6229       ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
6230     }
6231     NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
6232   } else {
6233     // Constant, SelectionDAGBuilder knows to check if its a splat.
6234     return false;
6235   }
6236 
6237   MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
6238 
6239   // If we have no uses, recursively delete the value and all dead instructions
6240   // using it.
6241   if (Ptr->use_empty())
6242     RecursivelyDeleteTriviallyDeadInstructions(
6243         Ptr, TLInfo, nullptr,
6244         [&](Value *V) { removeAllAssertingVHReferences(V); });
6245 
6246   return true;
6247 }
6248 
6249 /// If there are any memory operands, use OptimizeMemoryInst to sink their
6250 /// address computing into the block when possible / profitable.
6251 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
6252   bool MadeChange = false;
6253 
6254   const TargetRegisterInfo *TRI =
6255       TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
6256   TargetLowering::AsmOperandInfoVector TargetConstraints =
6257       TLI->ParseConstraints(*DL, TRI, *CS);
6258   unsigned ArgNo = 0;
6259   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
6260     // Compute the constraint code and ConstraintType to use.
6261     TLI->ComputeConstraintToUse(OpInfo, SDValue());
6262 
6263     // TODO: Also handle C_Address?
6264     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6265         OpInfo.isIndirect) {
6266       Value *OpVal = CS->getArgOperand(ArgNo++);
6267       MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
6268     } else if (OpInfo.Type == InlineAsm::isInput)
6269       ArgNo++;
6270   }
6271 
6272   return MadeChange;
6273 }
6274 
6275 /// Check if all the uses of \p Val are equivalent (or free) zero or
6276 /// sign extensions.
6277 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
6278   assert(!Val->use_empty() && "Input must have at least one use");
6279   const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
6280   bool IsSExt = isa<SExtInst>(FirstUser);
6281   Type *ExtTy = FirstUser->getType();
6282   for (const User *U : Val->users()) {
6283     const Instruction *UI = cast<Instruction>(U);
6284     if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
6285       return false;
6286     Type *CurTy = UI->getType();
6287     // Same input and output types: Same instruction after CSE.
6288     if (CurTy == ExtTy)
6289       continue;
6290 
6291     // If IsSExt is true, we are in this situation:
6292     // a = Val
6293     // b = sext ty1 a to ty2
6294     // c = sext ty1 a to ty3
6295     // Assuming ty2 is shorter than ty3, this could be turned into:
6296     // a = Val
6297     // b = sext ty1 a to ty2
6298     // c = sext ty2 b to ty3
6299     // However, the last sext is not free.
6300     if (IsSExt)
6301       return false;
6302 
6303     // This is a ZExt, maybe this is free to extend from one type to another.
6304     // In that case, we would not account for a different use.
6305     Type *NarrowTy;
6306     Type *LargeTy;
6307     if (ExtTy->getScalarType()->getIntegerBitWidth() >
6308         CurTy->getScalarType()->getIntegerBitWidth()) {
6309       NarrowTy = CurTy;
6310       LargeTy = ExtTy;
6311     } else {
6312       NarrowTy = ExtTy;
6313       LargeTy = CurTy;
6314     }
6315 
6316     if (!TLI.isZExtFree(NarrowTy, LargeTy))
6317       return false;
6318   }
6319   // All uses are the same or can be derived from one another for free.
6320   return true;
6321 }
6322 
6323 /// Try to speculatively promote extensions in \p Exts and continue
6324 /// promoting through newly promoted operands recursively as far as doing so is
6325 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6326 /// When some promotion happened, \p TPT contains the proper state to revert
6327 /// them.
6328 ///
6329 /// \return true if some promotion happened, false otherwise.
6330 bool CodeGenPrepare::tryToPromoteExts(
6331     TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
6332     SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
6333     unsigned CreatedInstsCost) {
6334   bool Promoted = false;
6335 
6336   // Iterate over all the extensions to try to promote them.
6337   for (auto *I : Exts) {
6338     // Early check if we directly have ext(load).
6339     if (isa<LoadInst>(I->getOperand(0))) {
6340       ProfitablyMovedExts.push_back(I);
6341       continue;
6342     }
6343 
6344     // Check whether or not we want to do any promotion.  The reason we have
6345     // this check inside the for loop is to catch the case where an extension
6346     // is directly fed by a load because in such case the extension can be moved
6347     // up without any promotion on its operands.
6348     if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
6349       return false;
6350 
6351     // Get the action to perform the promotion.
6352     TypePromotionHelper::Action TPH =
6353         TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
6354     // Check if we can promote.
6355     if (!TPH) {
6356       // Save the current extension as we cannot move up through its operand.
6357       ProfitablyMovedExts.push_back(I);
6358       continue;
6359     }
6360 
6361     // Save the current state.
6362     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6363         TPT.getRestorationPoint();
6364     SmallVector<Instruction *, 4> NewExts;
6365     unsigned NewCreatedInstsCost = 0;
6366     unsigned ExtCost = !TLI->isExtFree(I);
6367     // Promote.
6368     Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
6369                              &NewExts, nullptr, *TLI);
6370     assert(PromotedVal &&
6371            "TypePromotionHelper should have filtered out those cases");
6372 
6373     // We would be able to merge only one extension in a load.
6374     // Therefore, if we have more than 1 new extension we heuristically
6375     // cut this search path, because it means we degrade the code quality.
6376     // With exactly 2, the transformation is neutral, because we will merge
6377     // one extension but leave one. However, we optimistically keep going,
6378     // because the new extension may be removed too. Also avoid replacing a
6379     // single free extension with multiple extensions, as this increases the
6380     // number of IR instructions while not providing any savings.
6381     long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
6382     // FIXME: It would be possible to propagate a negative value instead of
6383     // conservatively ceiling it to 0.
6384     TotalCreatedInstsCost =
6385         std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
6386     if (!StressExtLdPromotion &&
6387         (TotalCreatedInstsCost > 1 ||
6388          !isPromotedInstructionLegal(*TLI, *DL, PromotedVal) ||
6389          (ExtCost == 0 && NewExts.size() > 1))) {
6390       // This promotion is not profitable, rollback to the previous state, and
6391       // save the current extension in ProfitablyMovedExts as the latest
6392       // speculative promotion turned out to be unprofitable.
6393       TPT.rollback(LastKnownGood);
6394       ProfitablyMovedExts.push_back(I);
6395       continue;
6396     }
6397     // Continue promoting NewExts as far as doing so is profitable.
6398     SmallVector<Instruction *, 2> NewlyMovedExts;
6399     (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
6400     bool NewPromoted = false;
6401     for (auto *ExtInst : NewlyMovedExts) {
6402       Instruction *MovedExt = cast<Instruction>(ExtInst);
6403       Value *ExtOperand = MovedExt->getOperand(0);
6404       // If we have reached to a load, we need this extra profitability check
6405       // as it could potentially be merged into an ext(load).
6406       if (isa<LoadInst>(ExtOperand) &&
6407           !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
6408             (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
6409         continue;
6410 
6411       ProfitablyMovedExts.push_back(MovedExt);
6412       NewPromoted = true;
6413     }
6414 
6415     // If none of speculative promotions for NewExts is profitable, rollback
6416     // and save the current extension (I) as the last profitable extension.
6417     if (!NewPromoted) {
6418       TPT.rollback(LastKnownGood);
6419       ProfitablyMovedExts.push_back(I);
6420       continue;
6421     }
6422     // The promotion is profitable.
6423     Promoted = true;
6424   }
6425   return Promoted;
6426 }
6427 
6428 /// Merging redundant sexts when one is dominating the other.
6429 bool CodeGenPrepare::mergeSExts(Function &F) {
6430   bool Changed = false;
6431   for (auto &Entry : ValToSExtendedUses) {
6432     SExts &Insts = Entry.second;
6433     SExts CurPts;
6434     for (Instruction *Inst : Insts) {
6435       if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
6436           Inst->getOperand(0) != Entry.first)
6437         continue;
6438       bool inserted = false;
6439       for (auto &Pt : CurPts) {
6440         if (getDT(F).dominates(Inst, Pt)) {
6441           replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
6442           RemovedInsts.insert(Pt);
6443           Pt->removeFromParent();
6444           Pt = Inst;
6445           inserted = true;
6446           Changed = true;
6447           break;
6448         }
6449         if (!getDT(F).dominates(Pt, Inst))
6450           // Give up if we need to merge in a common dominator as the
6451           // experiments show it is not profitable.
6452           continue;
6453         replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
6454         RemovedInsts.insert(Inst);
6455         Inst->removeFromParent();
6456         inserted = true;
6457         Changed = true;
6458         break;
6459       }
6460       if (!inserted)
6461         CurPts.push_back(Inst);
6462     }
6463   }
6464   return Changed;
6465 }
6466 
6467 // Splitting large data structures so that the GEPs accessing them can have
6468 // smaller offsets so that they can be sunk to the same blocks as their users.
6469 // For example, a large struct starting from %base is split into two parts
6470 // where the second part starts from %new_base.
6471 //
6472 // Before:
6473 // BB0:
6474 //   %base     =
6475 //
6476 // BB1:
6477 //   %gep0     = gep %base, off0
6478 //   %gep1     = gep %base, off1
6479 //   %gep2     = gep %base, off2
6480 //
6481 // BB2:
6482 //   %load1    = load %gep0
6483 //   %load2    = load %gep1
6484 //   %load3    = load %gep2
6485 //
6486 // After:
6487 // BB0:
6488 //   %base     =
6489 //   %new_base = gep %base, off0
6490 //
6491 // BB1:
6492 //   %new_gep0 = %new_base
6493 //   %new_gep1 = gep %new_base, off1 - off0
6494 //   %new_gep2 = gep %new_base, off2 - off0
6495 //
6496 // BB2:
6497 //   %load1    = load i32, i32* %new_gep0
6498 //   %load2    = load i32, i32* %new_gep1
6499 //   %load3    = load i32, i32* %new_gep2
6500 //
6501 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6502 // their offsets are smaller enough to fit into the addressing mode.
6503 bool CodeGenPrepare::splitLargeGEPOffsets() {
6504   bool Changed = false;
6505   for (auto &Entry : LargeOffsetGEPMap) {
6506     Value *OldBase = Entry.first;
6507     SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6508         &LargeOffsetGEPs = Entry.second;
6509     auto compareGEPOffset =
6510         [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6511             const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6512           if (LHS.first == RHS.first)
6513             return false;
6514           if (LHS.second != RHS.second)
6515             return LHS.second < RHS.second;
6516           return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6517         };
6518     // Sorting all the GEPs of the same data structures based on the offsets.
6519     llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6520     LargeOffsetGEPs.erase(llvm::unique(LargeOffsetGEPs), LargeOffsetGEPs.end());
6521     // Skip if all the GEPs have the same offsets.
6522     if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6523       continue;
6524     GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6525     int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6526     Value *NewBaseGEP = nullptr;
6527 
6528     auto createNewBase = [&](int64_t BaseOffset, Value *OldBase,
6529                              GetElementPtrInst *GEP) {
6530       LLVMContext &Ctx = GEP->getContext();
6531       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6532       Type *I8PtrTy =
6533           PointerType::get(Ctx, GEP->getType()->getPointerAddressSpace());
6534 
6535       BasicBlock::iterator NewBaseInsertPt;
6536       BasicBlock *NewBaseInsertBB;
6537       if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6538         // If the base of the struct is an instruction, the new base will be
6539         // inserted close to it.
6540         NewBaseInsertBB = BaseI->getParent();
6541         if (isa<PHINode>(BaseI))
6542           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6543         else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6544           NewBaseInsertBB =
6545               SplitEdge(NewBaseInsertBB, Invoke->getNormalDest(), DT.get(), LI);
6546           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6547         } else
6548           NewBaseInsertPt = std::next(BaseI->getIterator());
6549       } else {
6550         // If the current base is an argument or global value, the new base
6551         // will be inserted to the entry block.
6552         NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6553         NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6554       }
6555       IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6556       // Create a new base.
6557       Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
6558       NewBaseGEP = OldBase;
6559       if (NewBaseGEP->getType() != I8PtrTy)
6560         NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6561       NewBaseGEP =
6562           NewBaseBuilder.CreatePtrAdd(NewBaseGEP, BaseIndex, "splitgep");
6563       NewGEPBases.insert(NewBaseGEP);
6564       return;
6565     };
6566 
6567     // Check whether all the offsets can be encoded with prefered common base.
6568     if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset(
6569             LargeOffsetGEPs.front().second, LargeOffsetGEPs.back().second)) {
6570       BaseOffset = PreferBase;
6571       // Create a new base if the offset of the BaseGEP can be decoded with one
6572       // instruction.
6573       createNewBase(BaseOffset, OldBase, BaseGEP);
6574     }
6575 
6576     auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6577     while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6578       GetElementPtrInst *GEP = LargeOffsetGEP->first;
6579       int64_t Offset = LargeOffsetGEP->second;
6580       if (Offset != BaseOffset) {
6581         TargetLowering::AddrMode AddrMode;
6582         AddrMode.HasBaseReg = true;
6583         AddrMode.BaseOffs = Offset - BaseOffset;
6584         // The result type of the GEP might not be the type of the memory
6585         // access.
6586         if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6587                                         GEP->getResultElementType(),
6588                                         GEP->getAddressSpace())) {
6589           // We need to create a new base if the offset to the current base is
6590           // too large to fit into the addressing mode. So, a very large struct
6591           // may be split into several parts.
6592           BaseGEP = GEP;
6593           BaseOffset = Offset;
6594           NewBaseGEP = nullptr;
6595         }
6596       }
6597 
6598       // Generate a new GEP to replace the current one.
6599       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6600 
6601       if (!NewBaseGEP) {
6602         // Create a new base if we don't have one yet.  Find the insertion
6603         // pointer for the new base first.
6604         createNewBase(BaseOffset, OldBase, GEP);
6605       }
6606 
6607       IRBuilder<> Builder(GEP);
6608       Value *NewGEP = NewBaseGEP;
6609       if (Offset != BaseOffset) {
6610         // Calculate the new offset for the new GEP.
6611         Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset);
6612         NewGEP = Builder.CreatePtrAdd(NewBaseGEP, Index);
6613       }
6614       replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6615       LargeOffsetGEPID.erase(GEP);
6616       LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6617       GEP->eraseFromParent();
6618       Changed = true;
6619     }
6620   }
6621   return Changed;
6622 }
6623 
6624 bool CodeGenPrepare::optimizePhiType(
6625     PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6626     SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6627   // We are looking for a collection on interconnected phi nodes that together
6628   // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6629   // are of the same type. Convert the whole set of nodes to the type of the
6630   // bitcast.
6631   Type *PhiTy = I->getType();
6632   Type *ConvertTy = nullptr;
6633   if (Visited.count(I) ||
6634       (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6635     return false;
6636 
6637   SmallVector<Instruction *, 4> Worklist;
6638   Worklist.push_back(cast<Instruction>(I));
6639   SmallPtrSet<PHINode *, 4> PhiNodes;
6640   SmallPtrSet<ConstantData *, 4> Constants;
6641   PhiNodes.insert(I);
6642   Visited.insert(I);
6643   SmallPtrSet<Instruction *, 4> Defs;
6644   SmallPtrSet<Instruction *, 4> Uses;
6645   // This works by adding extra bitcasts between load/stores and removing
6646   // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6647   // we can get in the situation where we remove a bitcast in one iteration
6648   // just to add it again in the next. We need to ensure that at least one
6649   // bitcast we remove are anchored to something that will not change back.
6650   bool AnyAnchored = false;
6651 
6652   while (!Worklist.empty()) {
6653     Instruction *II = Worklist.pop_back_val();
6654 
6655     if (auto *Phi = dyn_cast<PHINode>(II)) {
6656       // Handle Defs, which might also be PHI's
6657       for (Value *V : Phi->incoming_values()) {
6658         if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6659           if (!PhiNodes.count(OpPhi)) {
6660             if (!Visited.insert(OpPhi).second)
6661               return false;
6662             PhiNodes.insert(OpPhi);
6663             Worklist.push_back(OpPhi);
6664           }
6665         } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6666           if (!OpLoad->isSimple())
6667             return false;
6668           if (Defs.insert(OpLoad).second)
6669             Worklist.push_back(OpLoad);
6670         } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6671           if (Defs.insert(OpEx).second)
6672             Worklist.push_back(OpEx);
6673         } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6674           if (!ConvertTy)
6675             ConvertTy = OpBC->getOperand(0)->getType();
6676           if (OpBC->getOperand(0)->getType() != ConvertTy)
6677             return false;
6678           if (Defs.insert(OpBC).second) {
6679             Worklist.push_back(OpBC);
6680             AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
6681                            !isa<ExtractElementInst>(OpBC->getOperand(0));
6682           }
6683         } else if (auto *OpC = dyn_cast<ConstantData>(V))
6684           Constants.insert(OpC);
6685         else
6686           return false;
6687       }
6688     }
6689 
6690     // Handle uses which might also be phi's
6691     for (User *V : II->users()) {
6692       if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6693         if (!PhiNodes.count(OpPhi)) {
6694           if (Visited.count(OpPhi))
6695             return false;
6696           PhiNodes.insert(OpPhi);
6697           Visited.insert(OpPhi);
6698           Worklist.push_back(OpPhi);
6699         }
6700       } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
6701         if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
6702           return false;
6703         Uses.insert(OpStore);
6704       } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6705         if (!ConvertTy)
6706           ConvertTy = OpBC->getType();
6707         if (OpBC->getType() != ConvertTy)
6708           return false;
6709         Uses.insert(OpBC);
6710         AnyAnchored |=
6711             any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
6712       } else {
6713         return false;
6714       }
6715     }
6716   }
6717 
6718   if (!ConvertTy || !AnyAnchored ||
6719       !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
6720     return false;
6721 
6722   LLVM_DEBUG(dbgs() << "Converting " << *I << "\n  and connected nodes to "
6723                     << *ConvertTy << "\n");
6724 
6725   // Create all the new phi nodes of the new type, and bitcast any loads to the
6726   // correct type.
6727   ValueToValueMap ValMap;
6728   for (ConstantData *C : Constants)
6729     ValMap[C] = ConstantExpr::getBitCast(C, ConvertTy);
6730   for (Instruction *D : Defs) {
6731     if (isa<BitCastInst>(D)) {
6732       ValMap[D] = D->getOperand(0);
6733       DeletedInstrs.insert(D);
6734     } else {
6735       BasicBlock::iterator insertPt = std::next(D->getIterator());
6736       ValMap[D] = new BitCastInst(D, ConvertTy, D->getName() + ".bc", insertPt);
6737     }
6738   }
6739   for (PHINode *Phi : PhiNodes)
6740     ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
6741                                   Phi->getName() + ".tc", Phi->getIterator());
6742   // Pipe together all the PhiNodes.
6743   for (PHINode *Phi : PhiNodes) {
6744     PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
6745     for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
6746       NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
6747                           Phi->getIncomingBlock(i));
6748     Visited.insert(NewPhi);
6749   }
6750   // And finally pipe up the stores and bitcasts
6751   for (Instruction *U : Uses) {
6752     if (isa<BitCastInst>(U)) {
6753       DeletedInstrs.insert(U);
6754       replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
6755     } else {
6756       U->setOperand(0, new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc",
6757                                        U->getIterator()));
6758     }
6759   }
6760 
6761   // Save the removed phis to be deleted later.
6762   for (PHINode *Phi : PhiNodes)
6763     DeletedInstrs.insert(Phi);
6764   return true;
6765 }
6766 
6767 bool CodeGenPrepare::optimizePhiTypes(Function &F) {
6768   if (!OptimizePhiTypes)
6769     return false;
6770 
6771   bool Changed = false;
6772   SmallPtrSet<PHINode *, 4> Visited;
6773   SmallPtrSet<Instruction *, 4> DeletedInstrs;
6774 
6775   // Attempt to optimize all the phis in the functions to the correct type.
6776   for (auto &BB : F)
6777     for (auto &Phi : BB.phis())
6778       Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
6779 
6780   // Remove any old phi's that have been converted.
6781   for (auto *I : DeletedInstrs) {
6782     replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
6783     I->eraseFromParent();
6784   }
6785 
6786   return Changed;
6787 }
6788 
6789 /// Return true, if an ext(load) can be formed from an extension in
6790 /// \p MovedExts.
6791 bool CodeGenPrepare::canFormExtLd(
6792     const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
6793     Instruction *&Inst, bool HasPromoted) {
6794   for (auto *MovedExtInst : MovedExts) {
6795     if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
6796       LI = cast<LoadInst>(MovedExtInst->getOperand(0));
6797       Inst = MovedExtInst;
6798       break;
6799     }
6800   }
6801   if (!LI)
6802     return false;
6803 
6804   // If they're already in the same block, there's nothing to do.
6805   // Make the cheap checks first if we did not promote.
6806   // If we promoted, we need to check if it is indeed profitable.
6807   if (!HasPromoted && LI->getParent() == Inst->getParent())
6808     return false;
6809 
6810   return TLI->isExtLoad(LI, Inst, *DL);
6811 }
6812 
6813 /// Move a zext or sext fed by a load into the same basic block as the load,
6814 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6815 /// extend into the load.
6816 ///
6817 /// E.g.,
6818 /// \code
6819 /// %ld = load i32* %addr
6820 /// %add = add nuw i32 %ld, 4
6821 /// %zext = zext i32 %add to i64
6822 // \endcode
6823 /// =>
6824 /// \code
6825 /// %ld = load i32* %addr
6826 /// %zext = zext i32 %ld to i64
6827 /// %add = add nuw i64 %zext, 4
6828 /// \encode
6829 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6830 /// allow us to match zext(load i32*) to i64.
6831 ///
6832 /// Also, try to promote the computations used to obtain a sign extended
6833 /// value used into memory accesses.
6834 /// E.g.,
6835 /// \code
6836 /// a = add nsw i32 b, 3
6837 /// d = sext i32 a to i64
6838 /// e = getelementptr ..., i64 d
6839 /// \endcode
6840 /// =>
6841 /// \code
6842 /// f = sext i32 b to i64
6843 /// a = add nsw i64 f, 3
6844 /// e = getelementptr ..., i64 a
6845 /// \endcode
6846 ///
6847 /// \p Inst[in/out] the extension may be modified during the process if some
6848 /// promotions apply.
6849 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6850   bool AllowPromotionWithoutCommonHeader = false;
6851   /// See if it is an interesting sext operations for the address type
6852   /// promotion before trying to promote it, e.g., the ones with the right
6853   /// type and used in memory accesses.
6854   bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6855       *Inst, AllowPromotionWithoutCommonHeader);
6856   TypePromotionTransaction TPT(RemovedInsts);
6857   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6858       TPT.getRestorationPoint();
6859   SmallVector<Instruction *, 1> Exts;
6860   SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6861   Exts.push_back(Inst);
6862 
6863   bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6864 
6865   // Look for a load being extended.
6866   LoadInst *LI = nullptr;
6867   Instruction *ExtFedByLoad;
6868 
6869   // Try to promote a chain of computation if it allows to form an extended
6870   // load.
6871   if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6872     assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6873     TPT.commit();
6874     // Move the extend into the same block as the load.
6875     ExtFedByLoad->moveAfter(LI);
6876     ++NumExtsMoved;
6877     Inst = ExtFedByLoad;
6878     return true;
6879   }
6880 
6881   // Continue promoting SExts if known as considerable depending on targets.
6882   if (ATPConsiderable &&
6883       performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6884                                   HasPromoted, TPT, SpeculativelyMovedExts))
6885     return true;
6886 
6887   TPT.rollback(LastKnownGood);
6888   return false;
6889 }
6890 
6891 // Perform address type promotion if doing so is profitable.
6892 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6893 // instructions that sign extended the same initial value. However, if
6894 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6895 // extension is just profitable.
6896 bool CodeGenPrepare::performAddressTypePromotion(
6897     Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6898     bool HasPromoted, TypePromotionTransaction &TPT,
6899     SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6900   bool Promoted = false;
6901   SmallPtrSet<Instruction *, 1> UnhandledExts;
6902   bool AllSeenFirst = true;
6903   for (auto *I : SpeculativelyMovedExts) {
6904     Value *HeadOfChain = I->getOperand(0);
6905     DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6906         SeenChainsForSExt.find(HeadOfChain);
6907     // If there is an unhandled SExt which has the same header, try to promote
6908     // it as well.
6909     if (AlreadySeen != SeenChainsForSExt.end()) {
6910       if (AlreadySeen->second != nullptr)
6911         UnhandledExts.insert(AlreadySeen->second);
6912       AllSeenFirst = false;
6913     }
6914   }
6915 
6916   if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6917                         SpeculativelyMovedExts.size() == 1)) {
6918     TPT.commit();
6919     if (HasPromoted)
6920       Promoted = true;
6921     for (auto *I : SpeculativelyMovedExts) {
6922       Value *HeadOfChain = I->getOperand(0);
6923       SeenChainsForSExt[HeadOfChain] = nullptr;
6924       ValToSExtendedUses[HeadOfChain].push_back(I);
6925     }
6926     // Update Inst as promotion happen.
6927     Inst = SpeculativelyMovedExts.pop_back_val();
6928   } else {
6929     // This is the first chain visited from the header, keep the current chain
6930     // as unhandled. Defer to promote this until we encounter another SExt
6931     // chain derived from the same header.
6932     for (auto *I : SpeculativelyMovedExts) {
6933       Value *HeadOfChain = I->getOperand(0);
6934       SeenChainsForSExt[HeadOfChain] = Inst;
6935     }
6936     return false;
6937   }
6938 
6939   if (!AllSeenFirst && !UnhandledExts.empty())
6940     for (auto *VisitedSExt : UnhandledExts) {
6941       if (RemovedInsts.count(VisitedSExt))
6942         continue;
6943       TypePromotionTransaction TPT(RemovedInsts);
6944       SmallVector<Instruction *, 1> Exts;
6945       SmallVector<Instruction *, 2> Chains;
6946       Exts.push_back(VisitedSExt);
6947       bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6948       TPT.commit();
6949       if (HasPromoted)
6950         Promoted = true;
6951       for (auto *I : Chains) {
6952         Value *HeadOfChain = I->getOperand(0);
6953         // Mark this as handled.
6954         SeenChainsForSExt[HeadOfChain] = nullptr;
6955         ValToSExtendedUses[HeadOfChain].push_back(I);
6956       }
6957     }
6958   return Promoted;
6959 }
6960 
6961 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6962   BasicBlock *DefBB = I->getParent();
6963 
6964   // If the result of a {s|z}ext and its source are both live out, rewrite all
6965   // other uses of the source with result of extension.
6966   Value *Src = I->getOperand(0);
6967   if (Src->hasOneUse())
6968     return false;
6969 
6970   // Only do this xform if truncating is free.
6971   if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6972     return false;
6973 
6974   // Only safe to perform the optimization if the source is also defined in
6975   // this block.
6976   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6977     return false;
6978 
6979   bool DefIsLiveOut = false;
6980   for (User *U : I->users()) {
6981     Instruction *UI = cast<Instruction>(U);
6982 
6983     // Figure out which BB this ext is used in.
6984     BasicBlock *UserBB = UI->getParent();
6985     if (UserBB == DefBB)
6986       continue;
6987     DefIsLiveOut = true;
6988     break;
6989   }
6990   if (!DefIsLiveOut)
6991     return false;
6992 
6993   // Make sure none of the uses are PHI nodes.
6994   for (User *U : Src->users()) {
6995     Instruction *UI = cast<Instruction>(U);
6996     BasicBlock *UserBB = UI->getParent();
6997     if (UserBB == DefBB)
6998       continue;
6999     // Be conservative. We don't want this xform to end up introducing
7000     // reloads just before load / store instructions.
7001     if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
7002       return false;
7003   }
7004 
7005   // InsertedTruncs - Only insert one trunc in each block once.
7006   DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
7007 
7008   bool MadeChange = false;
7009   for (Use &U : Src->uses()) {
7010     Instruction *User = cast<Instruction>(U.getUser());
7011 
7012     // Figure out which BB this ext is used in.
7013     BasicBlock *UserBB = User->getParent();
7014     if (UserBB == DefBB)
7015       continue;
7016 
7017     // Both src and def are live in this block. Rewrite the use.
7018     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
7019 
7020     if (!InsertedTrunc) {
7021       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
7022       assert(InsertPt != UserBB->end());
7023       InsertedTrunc = new TruncInst(I, Src->getType(), "");
7024       InsertedTrunc->insertBefore(*UserBB, InsertPt);
7025       InsertedInsts.insert(InsertedTrunc);
7026     }
7027 
7028     // Replace a use of the {s|z}ext source with a use of the result.
7029     U = InsertedTrunc;
7030     ++NumExtUses;
7031     MadeChange = true;
7032   }
7033 
7034   return MadeChange;
7035 }
7036 
7037 // Find loads whose uses only use some of the loaded value's bits.  Add an "and"
7038 // just after the load if the target can fold this into one extload instruction,
7039 // with the hope of eliminating some of the other later "and" instructions using
7040 // the loaded value.  "and"s that are made trivially redundant by the insertion
7041 // of the new "and" are removed by this function, while others (e.g. those whose
7042 // path from the load goes through a phi) are left for isel to potentially
7043 // remove.
7044 //
7045 // For example:
7046 //
7047 // b0:
7048 //   x = load i32
7049 //   ...
7050 // b1:
7051 //   y = and x, 0xff
7052 //   z = use y
7053 //
7054 // becomes:
7055 //
7056 // b0:
7057 //   x = load i32
7058 //   x' = and x, 0xff
7059 //   ...
7060 // b1:
7061 //   z = use x'
7062 //
7063 // whereas:
7064 //
7065 // b0:
7066 //   x1 = load i32
7067 //   ...
7068 // b1:
7069 //   x2 = load i32
7070 //   ...
7071 // b2:
7072 //   x = phi x1, x2
7073 //   y = and x, 0xff
7074 //
7075 // becomes (after a call to optimizeLoadExt for each load):
7076 //
7077 // b0:
7078 //   x1 = load i32
7079 //   x1' = and x1, 0xff
7080 //   ...
7081 // b1:
7082 //   x2 = load i32
7083 //   x2' = and x2, 0xff
7084 //   ...
7085 // b2:
7086 //   x = phi x1', x2'
7087 //   y = and x, 0xff
7088 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
7089   if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
7090     return false;
7091 
7092   // Skip loads we've already transformed.
7093   if (Load->hasOneUse() &&
7094       InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
7095     return false;
7096 
7097   // Look at all uses of Load, looking through phis, to determine how many bits
7098   // of the loaded value are needed.
7099   SmallVector<Instruction *, 8> WorkList;
7100   SmallPtrSet<Instruction *, 16> Visited;
7101   SmallVector<Instruction *, 8> AndsToMaybeRemove;
7102   for (auto *U : Load->users())
7103     WorkList.push_back(cast<Instruction>(U));
7104 
7105   EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
7106   unsigned BitWidth = LoadResultVT.getSizeInBits();
7107   // If the BitWidth is 0, do not try to optimize the type
7108   if (BitWidth == 0)
7109     return false;
7110 
7111   APInt DemandBits(BitWidth, 0);
7112   APInt WidestAndBits(BitWidth, 0);
7113 
7114   while (!WorkList.empty()) {
7115     Instruction *I = WorkList.pop_back_val();
7116 
7117     // Break use-def graph loops.
7118     if (!Visited.insert(I).second)
7119       continue;
7120 
7121     // For a PHI node, push all of its users.
7122     if (auto *Phi = dyn_cast<PHINode>(I)) {
7123       for (auto *U : Phi->users())
7124         WorkList.push_back(cast<Instruction>(U));
7125       continue;
7126     }
7127 
7128     switch (I->getOpcode()) {
7129     case Instruction::And: {
7130       auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
7131       if (!AndC)
7132         return false;
7133       APInt AndBits = AndC->getValue();
7134       DemandBits |= AndBits;
7135       // Keep track of the widest and mask we see.
7136       if (AndBits.ugt(WidestAndBits))
7137         WidestAndBits = AndBits;
7138       if (AndBits == WidestAndBits && I->getOperand(0) == Load)
7139         AndsToMaybeRemove.push_back(I);
7140       break;
7141     }
7142 
7143     case Instruction::Shl: {
7144       auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
7145       if (!ShlC)
7146         return false;
7147       uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
7148       DemandBits.setLowBits(BitWidth - ShiftAmt);
7149       break;
7150     }
7151 
7152     case Instruction::Trunc: {
7153       EVT TruncVT = TLI->getValueType(*DL, I->getType());
7154       unsigned TruncBitWidth = TruncVT.getSizeInBits();
7155       DemandBits.setLowBits(TruncBitWidth);
7156       break;
7157     }
7158 
7159     default:
7160       return false;
7161     }
7162   }
7163 
7164   uint32_t ActiveBits = DemandBits.getActiveBits();
7165   // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
7166   // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
7167   // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
7168   // (and (load x) 1) is not matched as a single instruction, rather as a LDR
7169   // followed by an AND.
7170   // TODO: Look into removing this restriction by fixing backends to either
7171   // return false for isLoadExtLegal for i1 or have them select this pattern to
7172   // a single instruction.
7173   //
7174   // Also avoid hoisting if we didn't see any ands with the exact DemandBits
7175   // mask, since these are the only ands that will be removed by isel.
7176   if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
7177       WidestAndBits != DemandBits)
7178     return false;
7179 
7180   LLVMContext &Ctx = Load->getType()->getContext();
7181   Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
7182   EVT TruncVT = TLI->getValueType(*DL, TruncTy);
7183 
7184   // Reject cases that won't be matched as extloads.
7185   if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
7186       !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
7187     return false;
7188 
7189   IRBuilder<> Builder(Load->getNextNonDebugInstruction());
7190   auto *NewAnd = cast<Instruction>(
7191       Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
7192   // Mark this instruction as "inserted by CGP", so that other
7193   // optimizations don't touch it.
7194   InsertedInsts.insert(NewAnd);
7195 
7196   // Replace all uses of load with new and (except for the use of load in the
7197   // new and itself).
7198   replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
7199   NewAnd->setOperand(0, Load);
7200 
7201   // Remove any and instructions that are now redundant.
7202   for (auto *And : AndsToMaybeRemove)
7203     // Check that the and mask is the same as the one we decided to put on the
7204     // new and.
7205     if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
7206       replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
7207       if (&*CurInstIterator == And)
7208         CurInstIterator = std::next(And->getIterator());
7209       And->eraseFromParent();
7210       ++NumAndUses;
7211     }
7212 
7213   ++NumAndsAdded;
7214   return true;
7215 }
7216 
7217 /// Check if V (an operand of a select instruction) is an expensive instruction
7218 /// that is only used once.
7219 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
7220   auto *I = dyn_cast<Instruction>(V);
7221   // If it's safe to speculatively execute, then it should not have side
7222   // effects; therefore, it's safe to sink and possibly *not* execute.
7223   return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
7224          TTI->isExpensiveToSpeculativelyExecute(I);
7225 }
7226 
7227 /// Returns true if a SelectInst should be turned into an explicit branch.
7228 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
7229                                                 const TargetLowering *TLI,
7230                                                 SelectInst *SI) {
7231   // If even a predictable select is cheap, then a branch can't be cheaper.
7232   if (!TLI->isPredictableSelectExpensive())
7233     return false;
7234 
7235   // FIXME: This should use the same heuristics as IfConversion to determine
7236   // whether a select is better represented as a branch.
7237 
7238   // If metadata tells us that the select condition is obviously predictable,
7239   // then we want to replace the select with a branch.
7240   uint64_t TrueWeight, FalseWeight;
7241   if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
7242     uint64_t Max = std::max(TrueWeight, FalseWeight);
7243     uint64_t Sum = TrueWeight + FalseWeight;
7244     if (Sum != 0) {
7245       auto Probability = BranchProbability::getBranchProbability(Max, Sum);
7246       if (Probability > TTI->getPredictableBranchThreshold())
7247         return true;
7248     }
7249   }
7250 
7251   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
7252 
7253   // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7254   // comparison condition. If the compare has more than one use, there's
7255   // probably another cmov or setcc around, so it's not worth emitting a branch.
7256   if (!Cmp || !Cmp->hasOneUse())
7257     return false;
7258 
7259   // If either operand of the select is expensive and only needed on one side
7260   // of the select, we should form a branch.
7261   if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
7262       sinkSelectOperand(TTI, SI->getFalseValue()))
7263     return true;
7264 
7265   return false;
7266 }
7267 
7268 /// If \p isTrue is true, return the true value of \p SI, otherwise return
7269 /// false value of \p SI. If the true/false value of \p SI is defined by any
7270 /// select instructions in \p Selects, look through the defining select
7271 /// instruction until the true/false value is not defined in \p Selects.
7272 static Value *
7273 getTrueOrFalseValue(SelectInst *SI, bool isTrue,
7274                     const SmallPtrSet<const Instruction *, 2> &Selects) {
7275   Value *V = nullptr;
7276 
7277   for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
7278        DefSI = dyn_cast<SelectInst>(V)) {
7279     assert(DefSI->getCondition() == SI->getCondition() &&
7280            "The condition of DefSI does not match with SI");
7281     V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
7282   }
7283 
7284   assert(V && "Failed to get select true/false value");
7285   return V;
7286 }
7287 
7288 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
7289   assert(Shift->isShift() && "Expected a shift");
7290 
7291   // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7292   // general vector shifts, and (3) the shift amount is a select-of-splatted
7293   // values, hoist the shifts before the select:
7294   //   shift Op0, (select Cond, TVal, FVal) -->
7295   //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
7296   //
7297   // This is inverting a generic IR transform when we know that the cost of a
7298   // general vector shift is more than the cost of 2 shift-by-scalars.
7299   // We can't do this effectively in SDAG because we may not be able to
7300   // determine if the select operands are splats from within a basic block.
7301   Type *Ty = Shift->getType();
7302   if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7303     return false;
7304   Value *Cond, *TVal, *FVal;
7305   if (!match(Shift->getOperand(1),
7306              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7307     return false;
7308   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7309     return false;
7310 
7311   IRBuilder<> Builder(Shift);
7312   BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
7313   Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
7314   Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
7315   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7316   replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
7317   Shift->eraseFromParent();
7318   return true;
7319 }
7320 
7321 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
7322   Intrinsic::ID Opcode = Fsh->getIntrinsicID();
7323   assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
7324          "Expected a funnel shift");
7325 
7326   // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7327   // than general vector shifts, and (3) the shift amount is select-of-splatted
7328   // values, hoist the funnel shifts before the select:
7329   //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
7330   //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7331   //
7332   // This is inverting a generic IR transform when we know that the cost of a
7333   // general vector shift is more than the cost of 2 shift-by-scalars.
7334   // We can't do this effectively in SDAG because we may not be able to
7335   // determine if the select operands are splats from within a basic block.
7336   Type *Ty = Fsh->getType();
7337   if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7338     return false;
7339   Value *Cond, *TVal, *FVal;
7340   if (!match(Fsh->getOperand(2),
7341              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7342     return false;
7343   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7344     return false;
7345 
7346   IRBuilder<> Builder(Fsh);
7347   Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
7348   Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
7349   Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
7350   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7351   replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
7352   Fsh->eraseFromParent();
7353   return true;
7354 }
7355 
7356 /// If we have a SelectInst that will likely profit from branch prediction,
7357 /// turn it into a branch.
7358 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
7359   if (DisableSelectToBranch)
7360     return false;
7361 
7362   // If the SelectOptimize pass is enabled, selects have already been optimized.
7363   if (!getCGPassBuilderOption().DisableSelectOptimize)
7364     return false;
7365 
7366   // Find all consecutive select instructions that share the same condition.
7367   SmallVector<SelectInst *, 2> ASI;
7368   ASI.push_back(SI);
7369   for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
7370        It != SI->getParent()->end(); ++It) {
7371     SelectInst *I = dyn_cast<SelectInst>(&*It);
7372     if (I && SI->getCondition() == I->getCondition()) {
7373       ASI.push_back(I);
7374     } else {
7375       break;
7376     }
7377   }
7378 
7379   SelectInst *LastSI = ASI.back();
7380   // Increment the current iterator to skip all the rest of select instructions
7381   // because they will be either "not lowered" or "all lowered" to branch.
7382   CurInstIterator = std::next(LastSI->getIterator());
7383   // Examine debug-info attached to the consecutive select instructions. They
7384   // won't be individually optimised by optimizeInst, so we need to perform
7385   // DbgVariableRecord maintenence here instead.
7386   for (SelectInst *SI : ArrayRef(ASI).drop_front())
7387     fixupDbgVariableRecordsOnInst(*SI);
7388 
7389   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
7390 
7391   // Can we convert the 'select' to CF ?
7392   if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
7393     return false;
7394 
7395   TargetLowering::SelectSupportKind SelectKind;
7396   if (SI->getType()->isVectorTy())
7397     SelectKind = TargetLowering::ScalarCondVectorVal;
7398   else
7399     SelectKind = TargetLowering::ScalarValSelect;
7400 
7401   if (TLI->isSelectSupported(SelectKind) &&
7402       (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) ||
7403        llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
7404     return false;
7405 
7406   // The DominatorTree needs to be rebuilt by any consumers after this
7407   // transformation. We simply reset here rather than setting the ModifiedDT
7408   // flag to avoid restarting the function walk in runOnFunction for each
7409   // select optimized.
7410   DT.reset();
7411 
7412   // Transform a sequence like this:
7413   //    start:
7414   //       %cmp = cmp uge i32 %a, %b
7415   //       %sel = select i1 %cmp, i32 %c, i32 %d
7416   //
7417   // Into:
7418   //    start:
7419   //       %cmp = cmp uge i32 %a, %b
7420   //       %cmp.frozen = freeze %cmp
7421   //       br i1 %cmp.frozen, label %select.true, label %select.false
7422   //    select.true:
7423   //       br label %select.end
7424   //    select.false:
7425   //       br label %select.end
7426   //    select.end:
7427   //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7428   //
7429   // %cmp should be frozen, otherwise it may introduce undefined behavior.
7430   // In addition, we may sink instructions that produce %c or %d from
7431   // the entry block into the destination(s) of the new branch.
7432   // If the true or false blocks do not contain a sunken instruction, that
7433   // block and its branch may be optimized away. In that case, one side of the
7434   // first branch will point directly to select.end, and the corresponding PHI
7435   // predecessor block will be the start block.
7436 
7437   // Collect values that go on the true side and the values that go on the false
7438   // side.
7439   SmallVector<Instruction *> TrueInstrs, FalseInstrs;
7440   for (SelectInst *SI : ASI) {
7441     if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V))
7442       TrueInstrs.push_back(cast<Instruction>(V));
7443     if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V))
7444       FalseInstrs.push_back(cast<Instruction>(V));
7445   }
7446 
7447   // Split the select block, according to how many (if any) values go on each
7448   // side.
7449   BasicBlock *StartBlock = SI->getParent();
7450   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(LastSI));
7451   // We should split before any debug-info.
7452   SplitPt.setHeadBit(true);
7453 
7454   IRBuilder<> IB(SI);
7455   auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
7456 
7457   BasicBlock *TrueBlock = nullptr;
7458   BasicBlock *FalseBlock = nullptr;
7459   BasicBlock *EndBlock = nullptr;
7460   BranchInst *TrueBranch = nullptr;
7461   BranchInst *FalseBranch = nullptr;
7462   if (TrueInstrs.size() == 0) {
7463     FalseBranch = cast<BranchInst>(SplitBlockAndInsertIfElse(
7464         CondFr, SplitPt, false, nullptr, nullptr, LI));
7465     FalseBlock = FalseBranch->getParent();
7466     EndBlock = cast<BasicBlock>(FalseBranch->getOperand(0));
7467   } else if (FalseInstrs.size() == 0) {
7468     TrueBranch = cast<BranchInst>(SplitBlockAndInsertIfThen(
7469         CondFr, SplitPt, false, nullptr, nullptr, LI));
7470     TrueBlock = TrueBranch->getParent();
7471     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7472   } else {
7473     Instruction *ThenTerm = nullptr;
7474     Instruction *ElseTerm = nullptr;
7475     SplitBlockAndInsertIfThenElse(CondFr, SplitPt, &ThenTerm, &ElseTerm,
7476                                   nullptr, nullptr, LI);
7477     TrueBranch = cast<BranchInst>(ThenTerm);
7478     FalseBranch = cast<BranchInst>(ElseTerm);
7479     TrueBlock = TrueBranch->getParent();
7480     FalseBlock = FalseBranch->getParent();
7481     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7482   }
7483 
7484   EndBlock->setName("select.end");
7485   if (TrueBlock)
7486     TrueBlock->setName("select.true.sink");
7487   if (FalseBlock)
7488     FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false"
7489                                                 : "select.false.sink");
7490 
7491   if (IsHugeFunc) {
7492     if (TrueBlock)
7493       FreshBBs.insert(TrueBlock);
7494     if (FalseBlock)
7495       FreshBBs.insert(FalseBlock);
7496     FreshBBs.insert(EndBlock);
7497   }
7498 
7499   BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
7500 
7501   static const unsigned MD[] = {
7502       LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
7503       LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
7504   StartBlock->getTerminator()->copyMetadata(*SI, MD);
7505 
7506   // Sink expensive instructions into the conditional blocks to avoid executing
7507   // them speculatively.
7508   for (Instruction *I : TrueInstrs)
7509     I->moveBefore(TrueBranch);
7510   for (Instruction *I : FalseInstrs)
7511     I->moveBefore(FalseBranch);
7512 
7513   // If we did not create a new block for one of the 'true' or 'false' paths
7514   // of the condition, it means that side of the branch goes to the end block
7515   // directly and the path originates from the start block from the point of
7516   // view of the new PHI.
7517   if (TrueBlock == nullptr)
7518     TrueBlock = StartBlock;
7519   else if (FalseBlock == nullptr)
7520     FalseBlock = StartBlock;
7521 
7522   SmallPtrSet<const Instruction *, 2> INS;
7523   INS.insert(ASI.begin(), ASI.end());
7524   // Use reverse iterator because later select may use the value of the
7525   // earlier select, and we need to propagate value through earlier select
7526   // to get the PHI operand.
7527   for (SelectInst *SI : llvm::reverse(ASI)) {
7528     // The select itself is replaced with a PHI Node.
7529     PHINode *PN = PHINode::Create(SI->getType(), 2, "");
7530     PN->insertBefore(EndBlock->begin());
7531     PN->takeName(SI);
7532     PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7533     PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7534     PN->setDebugLoc(SI->getDebugLoc());
7535 
7536     replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7537     SI->eraseFromParent();
7538     INS.erase(SI);
7539     ++NumSelectsExpanded;
7540   }
7541 
7542   // Instruct OptimizeBlock to skip to the next block.
7543   CurInstIterator = StartBlock->end();
7544   return true;
7545 }
7546 
7547 /// Some targets only accept certain types for splat inputs. For example a VDUP
7548 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7549 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7550 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7551   // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7552   if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7553                             m_Undef(), m_ZeroMask())))
7554     return false;
7555   Type *NewType = TLI->shouldConvertSplatType(SVI);
7556   if (!NewType)
7557     return false;
7558 
7559   auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7560   assert(!NewType->isVectorTy() && "Expected a scalar type!");
7561   assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7562          "Expected a type of the same size!");
7563   auto *NewVecType =
7564       FixedVectorType::get(NewType, SVIVecType->getNumElements());
7565 
7566   // Create a bitcast (shuffle (insert (bitcast(..))))
7567   IRBuilder<> Builder(SVI->getContext());
7568   Builder.SetInsertPoint(SVI);
7569   Value *BC1 = Builder.CreateBitCast(
7570       cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7571   Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7572   Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7573 
7574   replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7575   RecursivelyDeleteTriviallyDeadInstructions(
7576       SVI, TLInfo, nullptr,
7577       [&](Value *V) { removeAllAssertingVHReferences(V); });
7578 
7579   // Also hoist the bitcast up to its operand if it they are not in the same
7580   // block.
7581   if (auto *BCI = dyn_cast<Instruction>(BC1))
7582     if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7583       if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7584           !Op->isTerminator() && !Op->isEHPad())
7585         BCI->moveAfter(Op);
7586 
7587   return true;
7588 }
7589 
7590 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7591   // If the operands of I can be folded into a target instruction together with
7592   // I, duplicate and sink them.
7593   SmallVector<Use *, 4> OpsToSink;
7594   if (!TTI->isProfitableToSinkOperands(I, OpsToSink))
7595     return false;
7596 
7597   // OpsToSink can contain multiple uses in a use chain (e.g.
7598   // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7599   // uses must come first, so we process the ops in reverse order so as to not
7600   // create invalid IR.
7601   BasicBlock *TargetBB = I->getParent();
7602   bool Changed = false;
7603   SmallVector<Use *, 4> ToReplace;
7604   Instruction *InsertPoint = I;
7605   DenseMap<const Instruction *, unsigned long> InstOrdering;
7606   unsigned long InstNumber = 0;
7607   for (const auto &I : *TargetBB)
7608     InstOrdering[&I] = InstNumber++;
7609 
7610   for (Use *U : reverse(OpsToSink)) {
7611     auto *UI = cast<Instruction>(U->get());
7612     if (isa<PHINode>(UI))
7613       continue;
7614     if (UI->getParent() == TargetBB) {
7615       if (InstOrdering[UI] < InstOrdering[InsertPoint])
7616         InsertPoint = UI;
7617       continue;
7618     }
7619     ToReplace.push_back(U);
7620   }
7621 
7622   SetVector<Instruction *> MaybeDead;
7623   DenseMap<Instruction *, Instruction *> NewInstructions;
7624   for (Use *U : ToReplace) {
7625     auto *UI = cast<Instruction>(U->get());
7626     Instruction *NI = UI->clone();
7627 
7628     if (IsHugeFunc) {
7629       // Now we clone an instruction, its operands' defs may sink to this BB
7630       // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7631       for (Value *Op : NI->operands())
7632         if (auto *OpDef = dyn_cast<Instruction>(Op))
7633           FreshBBs.insert(OpDef->getParent());
7634     }
7635 
7636     NewInstructions[UI] = NI;
7637     MaybeDead.insert(UI);
7638     LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7639     NI->insertBefore(InsertPoint);
7640     InsertPoint = NI;
7641     InsertedInsts.insert(NI);
7642 
7643     // Update the use for the new instruction, making sure that we update the
7644     // sunk instruction uses, if it is part of a chain that has already been
7645     // sunk.
7646     Instruction *OldI = cast<Instruction>(U->getUser());
7647     if (NewInstructions.count(OldI))
7648       NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
7649     else
7650       U->set(NI);
7651     Changed = true;
7652   }
7653 
7654   // Remove instructions that are dead after sinking.
7655   for (auto *I : MaybeDead) {
7656     if (!I->hasNUsesOrMore(1)) {
7657       LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7658       I->eraseFromParent();
7659     }
7660   }
7661 
7662   return Changed;
7663 }
7664 
7665 bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7666   Value *Cond = SI->getCondition();
7667   Type *OldType = Cond->getType();
7668   LLVMContext &Context = Cond->getContext();
7669   EVT OldVT = TLI->getValueType(*DL, OldType);
7670   MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
7671   unsigned RegWidth = RegType.getSizeInBits();
7672 
7673   if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
7674     return false;
7675 
7676   // If the register width is greater than the type width, expand the condition
7677   // of the switch instruction and each case constant to the width of the
7678   // register. By widening the type of the switch condition, subsequent
7679   // comparisons (for case comparisons) will not need to be extended to the
7680   // preferred register width, so we will potentially eliminate N-1 extends,
7681   // where N is the number of cases in the switch.
7682   auto *NewType = Type::getIntNTy(Context, RegWidth);
7683 
7684   // Extend the switch condition and case constants using the target preferred
7685   // extend unless the switch condition is a function argument with an extend
7686   // attribute. In that case, we can avoid an unnecessary mask/extension by
7687   // matching the argument extension instead.
7688   Instruction::CastOps ExtType = Instruction::ZExt;
7689   // Some targets prefer SExt over ZExt.
7690   if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
7691     ExtType = Instruction::SExt;
7692 
7693   if (auto *Arg = dyn_cast<Argument>(Cond)) {
7694     if (Arg->hasSExtAttr())
7695       ExtType = Instruction::SExt;
7696     if (Arg->hasZExtAttr())
7697       ExtType = Instruction::ZExt;
7698   }
7699 
7700   auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
7701   ExtInst->insertBefore(SI);
7702   ExtInst->setDebugLoc(SI->getDebugLoc());
7703   SI->setCondition(ExtInst);
7704   for (auto Case : SI->cases()) {
7705     const APInt &NarrowConst = Case.getCaseValue()->getValue();
7706     APInt WideConst = (ExtType == Instruction::ZExt)
7707                           ? NarrowConst.zext(RegWidth)
7708                           : NarrowConst.sext(RegWidth);
7709     Case.setValue(ConstantInt::get(Context, WideConst));
7710   }
7711 
7712   return true;
7713 }
7714 
7715 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
7716   // The SCCP optimization tends to produce code like this:
7717   //   switch(x) { case 42: phi(42, ...) }
7718   // Materializing the constant for the phi-argument needs instructions; So we
7719   // change the code to:
7720   //   switch(x) { case 42: phi(x, ...) }
7721 
7722   Value *Condition = SI->getCondition();
7723   // Avoid endless loop in degenerate case.
7724   if (isa<ConstantInt>(*Condition))
7725     return false;
7726 
7727   bool Changed = false;
7728   BasicBlock *SwitchBB = SI->getParent();
7729   Type *ConditionType = Condition->getType();
7730 
7731   for (const SwitchInst::CaseHandle &Case : SI->cases()) {
7732     ConstantInt *CaseValue = Case.getCaseValue();
7733     BasicBlock *CaseBB = Case.getCaseSuccessor();
7734     // Set to true if we previously checked that `CaseBB` is only reached by
7735     // a single case from this switch.
7736     bool CheckedForSinglePred = false;
7737     for (PHINode &PHI : CaseBB->phis()) {
7738       Type *PHIType = PHI.getType();
7739       // If ZExt is free then we can also catch patterns like this:
7740       //   switch((i32)x) { case 42: phi((i64)42, ...); }
7741       // and replace `(i64)42` with `zext i32 %x to i64`.
7742       bool TryZExt =
7743           PHIType->isIntegerTy() &&
7744           PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
7745           TLI->isZExtFree(ConditionType, PHIType);
7746       if (PHIType == ConditionType || TryZExt) {
7747         // Set to true to skip this case because of multiple preds.
7748         bool SkipCase = false;
7749         Value *Replacement = nullptr;
7750         for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
7751           Value *PHIValue = PHI.getIncomingValue(I);
7752           if (PHIValue != CaseValue) {
7753             if (!TryZExt)
7754               continue;
7755             ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
7756             if (!PHIValueInt ||
7757                 PHIValueInt->getValue() !=
7758                     CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
7759               continue;
7760           }
7761           if (PHI.getIncomingBlock(I) != SwitchBB)
7762             continue;
7763           // We cannot optimize if there are multiple case labels jumping to
7764           // this block.  This check may get expensive when there are many
7765           // case labels so we test for it last.
7766           if (!CheckedForSinglePred) {
7767             CheckedForSinglePred = true;
7768             if (SI->findCaseDest(CaseBB) == nullptr) {
7769               SkipCase = true;
7770               break;
7771             }
7772           }
7773 
7774           if (Replacement == nullptr) {
7775             if (PHIValue == CaseValue) {
7776               Replacement = Condition;
7777             } else {
7778               IRBuilder<> Builder(SI);
7779               Replacement = Builder.CreateZExt(Condition, PHIType);
7780             }
7781           }
7782           PHI.setIncomingValue(I, Replacement);
7783           Changed = true;
7784         }
7785         if (SkipCase)
7786           break;
7787       }
7788     }
7789   }
7790   return Changed;
7791 }
7792 
7793 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
7794   bool Changed = optimizeSwitchType(SI);
7795   Changed |= optimizeSwitchPhiConstants(SI);
7796   return Changed;
7797 }
7798 
7799 namespace {
7800 
7801 /// Helper class to promote a scalar operation to a vector one.
7802 /// This class is used to move downward extractelement transition.
7803 /// E.g.,
7804 /// a = vector_op <2 x i32>
7805 /// b = extractelement <2 x i32> a, i32 0
7806 /// c = scalar_op b
7807 /// store c
7808 ///
7809 /// =>
7810 /// a = vector_op <2 x i32>
7811 /// c = vector_op a (equivalent to scalar_op on the related lane)
7812 /// * d = extractelement <2 x i32> c, i32 0
7813 /// * store d
7814 /// Assuming both extractelement and store can be combine, we get rid of the
7815 /// transition.
7816 class VectorPromoteHelper {
7817   /// DataLayout associated with the current module.
7818   const DataLayout &DL;
7819 
7820   /// Used to perform some checks on the legality of vector operations.
7821   const TargetLowering &TLI;
7822 
7823   /// Used to estimated the cost of the promoted chain.
7824   const TargetTransformInfo &TTI;
7825 
7826   /// The transition being moved downwards.
7827   Instruction *Transition;
7828 
7829   /// The sequence of instructions to be promoted.
7830   SmallVector<Instruction *, 4> InstsToBePromoted;
7831 
7832   /// Cost of combining a store and an extract.
7833   unsigned StoreExtractCombineCost;
7834 
7835   /// Instruction that will be combined with the transition.
7836   Instruction *CombineInst = nullptr;
7837 
7838   /// The instruction that represents the current end of the transition.
7839   /// Since we are faking the promotion until we reach the end of the chain
7840   /// of computation, we need a way to get the current end of the transition.
7841   Instruction *getEndOfTransition() const {
7842     if (InstsToBePromoted.empty())
7843       return Transition;
7844     return InstsToBePromoted.back();
7845   }
7846 
7847   /// Return the index of the original value in the transition.
7848   /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7849   /// c, is at index 0.
7850   unsigned getTransitionOriginalValueIdx() const {
7851     assert(isa<ExtractElementInst>(Transition) &&
7852            "Other kind of transitions are not supported yet");
7853     return 0;
7854   }
7855 
7856   /// Return the index of the index in the transition.
7857   /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7858   /// is at index 1.
7859   unsigned getTransitionIdx() const {
7860     assert(isa<ExtractElementInst>(Transition) &&
7861            "Other kind of transitions are not supported yet");
7862     return 1;
7863   }
7864 
7865   /// Get the type of the transition.
7866   /// This is the type of the original value.
7867   /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7868   /// transition is <2 x i32>.
7869   Type *getTransitionType() const {
7870     return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
7871   }
7872 
7873   /// Promote \p ToBePromoted by moving \p Def downward through.
7874   /// I.e., we have the following sequence:
7875   /// Def = Transition <ty1> a to <ty2>
7876   /// b = ToBePromoted <ty2> Def, ...
7877   /// =>
7878   /// b = ToBePromoted <ty1> a, ...
7879   /// Def = Transition <ty1> ToBePromoted to <ty2>
7880   void promoteImpl(Instruction *ToBePromoted);
7881 
7882   /// Check whether or not it is profitable to promote all the
7883   /// instructions enqueued to be promoted.
7884   bool isProfitableToPromote() {
7885     Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
7886     unsigned Index = isa<ConstantInt>(ValIdx)
7887                          ? cast<ConstantInt>(ValIdx)->getZExtValue()
7888                          : -1;
7889     Type *PromotedType = getTransitionType();
7890 
7891     StoreInst *ST = cast<StoreInst>(CombineInst);
7892     unsigned AS = ST->getPointerAddressSpace();
7893     // Check if this store is supported.
7894     if (!TLI.allowsMisalignedMemoryAccesses(
7895             TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
7896             ST->getAlign())) {
7897       // If this is not supported, there is no way we can combine
7898       // the extract with the store.
7899       return false;
7900     }
7901 
7902     // The scalar chain of computation has to pay for the transition
7903     // scalar to vector.
7904     // The vector chain has to account for the combining cost.
7905     enum TargetTransformInfo::TargetCostKind CostKind =
7906         TargetTransformInfo::TCK_RecipThroughput;
7907     InstructionCost ScalarCost =
7908         TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
7909     InstructionCost VectorCost = StoreExtractCombineCost;
7910     for (const auto &Inst : InstsToBePromoted) {
7911       // Compute the cost.
7912       // By construction, all instructions being promoted are arithmetic ones.
7913       // Moreover, one argument is a constant that can be viewed as a splat
7914       // constant.
7915       Value *Arg0 = Inst->getOperand(0);
7916       bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
7917                             isa<ConstantFP>(Arg0);
7918       TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
7919       if (IsArg0Constant)
7920         Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7921       else
7922         Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7923 
7924       ScalarCost += TTI.getArithmeticInstrCost(
7925           Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
7926       VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
7927                                                CostKind, Arg0Info, Arg1Info);
7928     }
7929     LLVM_DEBUG(
7930         dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7931                << ScalarCost << "\nVector: " << VectorCost << '\n');
7932     return ScalarCost > VectorCost;
7933   }
7934 
7935   /// Generate a constant vector with \p Val with the same
7936   /// number of elements as the transition.
7937   /// \p UseSplat defines whether or not \p Val should be replicated
7938   /// across the whole vector.
7939   /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7940   /// otherwise we generate a vector with as many undef as possible:
7941   /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7942   /// used at the index of the extract.
7943   Value *getConstantVector(Constant *Val, bool UseSplat) const {
7944     unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
7945     if (!UseSplat) {
7946       // If we cannot determine where the constant must be, we have to
7947       // use a splat constant.
7948       Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
7949       if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
7950         ExtractIdx = CstVal->getSExtValue();
7951       else
7952         UseSplat = true;
7953     }
7954 
7955     ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7956     if (UseSplat)
7957       return ConstantVector::getSplat(EC, Val);
7958 
7959     if (!EC.isScalable()) {
7960       SmallVector<Constant *, 4> ConstVec;
7961       UndefValue *UndefVal = UndefValue::get(Val->getType());
7962       for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7963         if (Idx == ExtractIdx)
7964           ConstVec.push_back(Val);
7965         else
7966           ConstVec.push_back(UndefVal);
7967       }
7968       return ConstantVector::get(ConstVec);
7969     } else
7970       llvm_unreachable(
7971           "Generate scalable vector for non-splat is unimplemented");
7972   }
7973 
7974   /// Check if promoting to a vector type an operand at \p OperandIdx
7975   /// in \p Use can trigger undefined behavior.
7976   static bool canCauseUndefinedBehavior(const Instruction *Use,
7977                                         unsigned OperandIdx) {
7978     // This is not safe to introduce undef when the operand is on
7979     // the right hand side of a division-like instruction.
7980     if (OperandIdx != 1)
7981       return false;
7982     switch (Use->getOpcode()) {
7983     default:
7984       return false;
7985     case Instruction::SDiv:
7986     case Instruction::UDiv:
7987     case Instruction::SRem:
7988     case Instruction::URem:
7989       return true;
7990     case Instruction::FDiv:
7991     case Instruction::FRem:
7992       return !Use->hasNoNaNs();
7993     }
7994     llvm_unreachable(nullptr);
7995   }
7996 
7997 public:
7998   VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
7999                       const TargetTransformInfo &TTI, Instruction *Transition,
8000                       unsigned CombineCost)
8001       : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
8002         StoreExtractCombineCost(CombineCost) {
8003     assert(Transition && "Do not know how to promote null");
8004   }
8005 
8006   /// Check if we can promote \p ToBePromoted to \p Type.
8007   bool canPromote(const Instruction *ToBePromoted) const {
8008     // We could support CastInst too.
8009     return isa<BinaryOperator>(ToBePromoted);
8010   }
8011 
8012   /// Check if it is profitable to promote \p ToBePromoted
8013   /// by moving downward the transition through.
8014   bool shouldPromote(const Instruction *ToBePromoted) const {
8015     // Promote only if all the operands can be statically expanded.
8016     // Indeed, we do not want to introduce any new kind of transitions.
8017     for (const Use &U : ToBePromoted->operands()) {
8018       const Value *Val = U.get();
8019       if (Val == getEndOfTransition()) {
8020         // If the use is a division and the transition is on the rhs,
8021         // we cannot promote the operation, otherwise we may create a
8022         // division by zero.
8023         if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
8024           return false;
8025         continue;
8026       }
8027       if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
8028           !isa<ConstantFP>(Val))
8029         return false;
8030     }
8031     // Check that the resulting operation is legal.
8032     int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
8033     if (!ISDOpcode)
8034       return false;
8035     return StressStoreExtract ||
8036            TLI.isOperationLegalOrCustom(
8037                ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
8038   }
8039 
8040   /// Check whether or not \p Use can be combined
8041   /// with the transition.
8042   /// I.e., is it possible to do Use(Transition) => AnotherUse?
8043   bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
8044 
8045   /// Record \p ToBePromoted as part of the chain to be promoted.
8046   void enqueueForPromotion(Instruction *ToBePromoted) {
8047     InstsToBePromoted.push_back(ToBePromoted);
8048   }
8049 
8050   /// Set the instruction that will be combined with the transition.
8051   void recordCombineInstruction(Instruction *ToBeCombined) {
8052     assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
8053     CombineInst = ToBeCombined;
8054   }
8055 
8056   /// Promote all the instructions enqueued for promotion if it is
8057   /// is profitable.
8058   /// \return True if the promotion happened, false otherwise.
8059   bool promote() {
8060     // Check if there is something to promote.
8061     // Right now, if we do not have anything to combine with,
8062     // we assume the promotion is not profitable.
8063     if (InstsToBePromoted.empty() || !CombineInst)
8064       return false;
8065 
8066     // Check cost.
8067     if (!StressStoreExtract && !isProfitableToPromote())
8068       return false;
8069 
8070     // Promote.
8071     for (auto &ToBePromoted : InstsToBePromoted)
8072       promoteImpl(ToBePromoted);
8073     InstsToBePromoted.clear();
8074     return true;
8075   }
8076 };
8077 
8078 } // end anonymous namespace
8079 
8080 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
8081   // At this point, we know that all the operands of ToBePromoted but Def
8082   // can be statically promoted.
8083   // For Def, we need to use its parameter in ToBePromoted:
8084   // b = ToBePromoted ty1 a
8085   // Def = Transition ty1 b to ty2
8086   // Move the transition down.
8087   // 1. Replace all uses of the promoted operation by the transition.
8088   // = ... b => = ... Def.
8089   assert(ToBePromoted->getType() == Transition->getType() &&
8090          "The type of the result of the transition does not match "
8091          "the final type");
8092   ToBePromoted->replaceAllUsesWith(Transition);
8093   // 2. Update the type of the uses.
8094   // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
8095   Type *TransitionTy = getTransitionType();
8096   ToBePromoted->mutateType(TransitionTy);
8097   // 3. Update all the operands of the promoted operation with promoted
8098   // operands.
8099   // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
8100   for (Use &U : ToBePromoted->operands()) {
8101     Value *Val = U.get();
8102     Value *NewVal = nullptr;
8103     if (Val == Transition)
8104       NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
8105     else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
8106              isa<ConstantFP>(Val)) {
8107       // Use a splat constant if it is not safe to use undef.
8108       NewVal = getConstantVector(
8109           cast<Constant>(Val),
8110           isa<UndefValue>(Val) ||
8111               canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
8112     } else
8113       llvm_unreachable("Did you modified shouldPromote and forgot to update "
8114                        "this?");
8115     ToBePromoted->setOperand(U.getOperandNo(), NewVal);
8116   }
8117   Transition->moveAfter(ToBePromoted);
8118   Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
8119 }
8120 
8121 /// Some targets can do store(extractelement) with one instruction.
8122 /// Try to push the extractelement towards the stores when the target
8123 /// has this feature and this is profitable.
8124 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
8125   unsigned CombineCost = std::numeric_limits<unsigned>::max();
8126   if (DisableStoreExtract ||
8127       (!StressStoreExtract &&
8128        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
8129                                        Inst->getOperand(1), CombineCost)))
8130     return false;
8131 
8132   // At this point we know that Inst is a vector to scalar transition.
8133   // Try to move it down the def-use chain, until:
8134   // - We can combine the transition with its single use
8135   //   => we got rid of the transition.
8136   // - We escape the current basic block
8137   //   => we would need to check that we are moving it at a cheaper place and
8138   //      we do not do that for now.
8139   BasicBlock *Parent = Inst->getParent();
8140   LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
8141   VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
8142   // If the transition has more than one use, assume this is not going to be
8143   // beneficial.
8144   while (Inst->hasOneUse()) {
8145     Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
8146     LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
8147 
8148     if (ToBePromoted->getParent() != Parent) {
8149       LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
8150                         << ToBePromoted->getParent()->getName()
8151                         << ") than the transition (" << Parent->getName()
8152                         << ").\n");
8153       return false;
8154     }
8155 
8156     if (VPH.canCombine(ToBePromoted)) {
8157       LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
8158                         << "will be combined with: " << *ToBePromoted << '\n');
8159       VPH.recordCombineInstruction(ToBePromoted);
8160       bool Changed = VPH.promote();
8161       NumStoreExtractExposed += Changed;
8162       return Changed;
8163     }
8164 
8165     LLVM_DEBUG(dbgs() << "Try promoting.\n");
8166     if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
8167       return false;
8168 
8169     LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
8170 
8171     VPH.enqueueForPromotion(ToBePromoted);
8172     Inst = ToBePromoted;
8173   }
8174   return false;
8175 }
8176 
8177 /// For the instruction sequence of store below, F and I values
8178 /// are bundled together as an i64 value before being stored into memory.
8179 /// Sometimes it is more efficient to generate separate stores for F and I,
8180 /// which can remove the bitwise instructions or sink them to colder places.
8181 ///
8182 ///   (store (or (zext (bitcast F to i32) to i64),
8183 ///              (shl (zext I to i64), 32)), addr)  -->
8184 ///   (store F, addr) and (store I, addr+4)
8185 ///
8186 /// Similarly, splitting for other merged store can also be beneficial, like:
8187 /// For pair of {i32, i32}, i64 store --> two i32 stores.
8188 /// For pair of {i32, i16}, i64 store --> two i32 stores.
8189 /// For pair of {i16, i16}, i32 store --> two i16 stores.
8190 /// For pair of {i16, i8},  i32 store --> two i16 stores.
8191 /// For pair of {i8, i8},   i16 store --> two i8 stores.
8192 ///
8193 /// We allow each target to determine specifically which kind of splitting is
8194 /// supported.
8195 ///
8196 /// The store patterns are commonly seen from the simple code snippet below
8197 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
8198 ///   void goo(const std::pair<int, float> &);
8199 ///   hoo() {
8200 ///     ...
8201 ///     goo(std::make_pair(tmp, ftmp));
8202 ///     ...
8203 ///   }
8204 ///
8205 /// Although we already have similar splitting in DAG Combine, we duplicate
8206 /// it in CodeGenPrepare to catch the case in which pattern is across
8207 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
8208 /// during code expansion.
8209 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
8210                                 const TargetLowering &TLI) {
8211   // Handle simple but common cases only.
8212   Type *StoreType = SI.getValueOperand()->getType();
8213 
8214   // The code below assumes shifting a value by <number of bits>,
8215   // whereas scalable vectors would have to be shifted by
8216   // <2log(vscale) + number of bits> in order to store the
8217   // low/high parts. Bailing out for now.
8218   if (StoreType->isScalableTy())
8219     return false;
8220 
8221   if (!DL.typeSizeEqualsStoreSize(StoreType) ||
8222       DL.getTypeSizeInBits(StoreType) == 0)
8223     return false;
8224 
8225   unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
8226   Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
8227   if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
8228     return false;
8229 
8230   // Don't split the store if it is volatile.
8231   if (SI.isVolatile())
8232     return false;
8233 
8234   // Match the following patterns:
8235   // (store (or (zext LValue to i64),
8236   //            (shl (zext HValue to i64), 32)), HalfValBitSize)
8237   //  or
8238   // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
8239   //            (zext LValue to i64),
8240   // Expect both operands of OR and the first operand of SHL have only
8241   // one use.
8242   Value *LValue, *HValue;
8243   if (!match(SI.getValueOperand(),
8244              m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
8245                     m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
8246                                    m_SpecificInt(HalfValBitSize))))))
8247     return false;
8248 
8249   // Check LValue and HValue are int with size less or equal than 32.
8250   if (!LValue->getType()->isIntegerTy() ||
8251       DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
8252       !HValue->getType()->isIntegerTy() ||
8253       DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
8254     return false;
8255 
8256   // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8257   // as the input of target query.
8258   auto *LBC = dyn_cast<BitCastInst>(LValue);
8259   auto *HBC = dyn_cast<BitCastInst>(HValue);
8260   EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
8261                   : EVT::getEVT(LValue->getType());
8262   EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
8263                    : EVT::getEVT(HValue->getType());
8264   if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
8265     return false;
8266 
8267   // Start to split store.
8268   IRBuilder<> Builder(SI.getContext());
8269   Builder.SetInsertPoint(&SI);
8270 
8271   // If LValue/HValue is a bitcast in another BB, create a new one in current
8272   // BB so it may be merged with the splitted stores by dag combiner.
8273   if (LBC && LBC->getParent() != SI.getParent())
8274     LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
8275   if (HBC && HBC->getParent() != SI.getParent())
8276     HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
8277 
8278   bool IsLE = SI.getDataLayout().isLittleEndian();
8279   auto CreateSplitStore = [&](Value *V, bool Upper) {
8280     V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
8281     Value *Addr = SI.getPointerOperand();
8282     Align Alignment = SI.getAlign();
8283     const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
8284     if (IsOffsetStore) {
8285       Addr = Builder.CreateGEP(
8286           SplitStoreType, Addr,
8287           ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
8288 
8289       // When splitting the store in half, naturally one half will retain the
8290       // alignment of the original wider store, regardless of whether it was
8291       // over-aligned or not, while the other will require adjustment.
8292       Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
8293     }
8294     Builder.CreateAlignedStore(V, Addr, Alignment);
8295   };
8296 
8297   CreateSplitStore(LValue, false);
8298   CreateSplitStore(HValue, true);
8299 
8300   // Delete the old store.
8301   SI.eraseFromParent();
8302   return true;
8303 }
8304 
8305 // Return true if the GEP has two operands, the first operand is of a sequential
8306 // type, and the second operand is a constant.
8307 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
8308   gep_type_iterator I = gep_type_begin(*GEP);
8309   return GEP->getNumOperands() == 2 && I.isSequential() &&
8310          isa<ConstantInt>(GEP->getOperand(1));
8311 }
8312 
8313 // Try unmerging GEPs to reduce liveness interference (register pressure) across
8314 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8315 // reducing liveness interference across those edges benefits global register
8316 // allocation. Currently handles only certain cases.
8317 //
8318 // For example, unmerge %GEPI and %UGEPI as below.
8319 //
8320 // ---------- BEFORE ----------
8321 // SrcBlock:
8322 //   ...
8323 //   %GEPIOp = ...
8324 //   ...
8325 //   %GEPI = gep %GEPIOp, Idx
8326 //   ...
8327 //   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8328 //   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8329 //   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8330 //   %UGEPI)
8331 //
8332 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8333 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8334 // ...
8335 //
8336 // DstBi:
8337 //   ...
8338 //   %UGEPI = gep %GEPIOp, UIdx
8339 // ...
8340 // ---------------------------
8341 //
8342 // ---------- AFTER ----------
8343 // SrcBlock:
8344 //   ... (same as above)
8345 //    (* %GEPI is still alive on the indirectbr edges)
8346 //    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8347 //    unmerging)
8348 // ...
8349 //
8350 // DstBi:
8351 //   ...
8352 //   %UGEPI = gep %GEPI, (UIdx-Idx)
8353 //   ...
8354 // ---------------------------
8355 //
8356 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8357 // no longer alive on them.
8358 //
8359 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8360 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8361 // not to disable further simplications and optimizations as a result of GEP
8362 // merging.
8363 //
8364 // Note this unmerging may increase the length of the data flow critical path
8365 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8366 // between the register pressure and the length of data-flow critical
8367 // path. Restricting this to the uncommon IndirectBr case would minimize the
8368 // impact of potentially longer critical path, if any, and the impact on compile
8369 // time.
8370 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
8371                                              const TargetTransformInfo *TTI) {
8372   BasicBlock *SrcBlock = GEPI->getParent();
8373   // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8374   // (non-IndirectBr) cases exit early here.
8375   if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
8376     return false;
8377   // Check that GEPI is a simple gep with a single constant index.
8378   if (!GEPSequentialConstIndexed(GEPI))
8379     return false;
8380   ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
8381   // Check that GEPI is a cheap one.
8382   if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
8383                          TargetTransformInfo::TCK_SizeAndLatency) >
8384       TargetTransformInfo::TCC_Basic)
8385     return false;
8386   Value *GEPIOp = GEPI->getOperand(0);
8387   // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8388   if (!isa<Instruction>(GEPIOp))
8389     return false;
8390   auto *GEPIOpI = cast<Instruction>(GEPIOp);
8391   if (GEPIOpI->getParent() != SrcBlock)
8392     return false;
8393   // Check that GEP is used outside the block, meaning it's alive on the
8394   // IndirectBr edge(s).
8395   if (llvm::none_of(GEPI->users(), [&](User *Usr) {
8396         if (auto *I = dyn_cast<Instruction>(Usr)) {
8397           if (I->getParent() != SrcBlock) {
8398             return true;
8399           }
8400         }
8401         return false;
8402       }))
8403     return false;
8404   // The second elements of the GEP chains to be unmerged.
8405   std::vector<GetElementPtrInst *> UGEPIs;
8406   // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8407   // on IndirectBr edges.
8408   for (User *Usr : GEPIOp->users()) {
8409     if (Usr == GEPI)
8410       continue;
8411     // Check if Usr is an Instruction. If not, give up.
8412     if (!isa<Instruction>(Usr))
8413       return false;
8414     auto *UI = cast<Instruction>(Usr);
8415     // Check if Usr in the same block as GEPIOp, which is fine, skip.
8416     if (UI->getParent() == SrcBlock)
8417       continue;
8418     // Check if Usr is a GEP. If not, give up.
8419     if (!isa<GetElementPtrInst>(Usr))
8420       return false;
8421     auto *UGEPI = cast<GetElementPtrInst>(Usr);
8422     // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8423     // the pointer operand to it. If so, record it in the vector. If not, give
8424     // up.
8425     if (!GEPSequentialConstIndexed(UGEPI))
8426       return false;
8427     if (UGEPI->getOperand(0) != GEPIOp)
8428       return false;
8429     if (UGEPI->getSourceElementType() != GEPI->getSourceElementType())
8430       return false;
8431     if (GEPIIdx->getType() !=
8432         cast<ConstantInt>(UGEPI->getOperand(1))->getType())
8433       return false;
8434     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8435     if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
8436                            TargetTransformInfo::TCK_SizeAndLatency) >
8437         TargetTransformInfo::TCC_Basic)
8438       return false;
8439     UGEPIs.push_back(UGEPI);
8440   }
8441   if (UGEPIs.size() == 0)
8442     return false;
8443   // Check the materializing cost of (Uidx-Idx).
8444   for (GetElementPtrInst *UGEPI : UGEPIs) {
8445     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8446     APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
8447     InstructionCost ImmCost = TTI->getIntImmCost(
8448         NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
8449     if (ImmCost > TargetTransformInfo::TCC_Basic)
8450       return false;
8451   }
8452   // Now unmerge between GEPI and UGEPIs.
8453   for (GetElementPtrInst *UGEPI : UGEPIs) {
8454     UGEPI->setOperand(0, GEPI);
8455     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8456     Constant *NewUGEPIIdx = ConstantInt::get(
8457         GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
8458     UGEPI->setOperand(1, NewUGEPIIdx);
8459     // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8460     // inbounds to avoid UB.
8461     if (!GEPI->isInBounds()) {
8462       UGEPI->setIsInBounds(false);
8463     }
8464   }
8465   // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8466   // alive on IndirectBr edges).
8467   assert(llvm::none_of(GEPIOp->users(),
8468                        [&](User *Usr) {
8469                          return cast<Instruction>(Usr)->getParent() != SrcBlock;
8470                        }) &&
8471          "GEPIOp is used outside SrcBlock");
8472   return true;
8473 }
8474 
8475 static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
8476                            SmallSet<BasicBlock *, 32> &FreshBBs,
8477                            bool IsHugeFunc) {
8478   // Try and convert
8479   //  %c = icmp ult %x, 8
8480   //  br %c, bla, blb
8481   //  %tc = lshr %x, 3
8482   // to
8483   //  %tc = lshr %x, 3
8484   //  %c = icmp eq %tc, 0
8485   //  br %c, bla, blb
8486   // Creating the cmp to zero can be better for the backend, especially if the
8487   // lshr produces flags that can be used automatically.
8488   if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
8489     return false;
8490 
8491   ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
8492   if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
8493     return false;
8494 
8495   Value *X = Cmp->getOperand(0);
8496   APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
8497 
8498   for (auto *U : X->users()) {
8499     Instruction *UI = dyn_cast<Instruction>(U);
8500     // A quick dominance check
8501     if (!UI ||
8502         (UI->getParent() != Branch->getParent() &&
8503          UI->getParent() != Branch->getSuccessor(0) &&
8504          UI->getParent() != Branch->getSuccessor(1)) ||
8505         (UI->getParent() != Branch->getParent() &&
8506          !UI->getParent()->getSinglePredecessor()))
8507       continue;
8508 
8509     if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
8510         match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
8511       IRBuilder<> Builder(Branch);
8512       if (UI->getParent() != Branch->getParent())
8513         UI->moveBefore(Branch);
8514       UI->dropPoisonGeneratingFlags();
8515       Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
8516                                         ConstantInt::get(UI->getType(), 0));
8517       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8518       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8519       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8520       return true;
8521     }
8522     if (Cmp->isEquality() &&
8523         (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8524          match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) {
8525       IRBuilder<> Builder(Branch);
8526       if (UI->getParent() != Branch->getParent())
8527         UI->moveBefore(Branch);
8528       UI->dropPoisonGeneratingFlags();
8529       Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8530                                         ConstantInt::get(UI->getType(), 0));
8531       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8532       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8533       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8534       return true;
8535     }
8536   }
8537   return false;
8538 }
8539 
8540 bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8541   bool AnyChange = false;
8542   AnyChange = fixupDbgVariableRecordsOnInst(*I);
8543 
8544   // Bail out if we inserted the instruction to prevent optimizations from
8545   // stepping on each other's toes.
8546   if (InsertedInsts.count(I))
8547     return AnyChange;
8548 
8549   // TODO: Move into the switch on opcode below here.
8550   if (PHINode *P = dyn_cast<PHINode>(I)) {
8551     // It is possible for very late stage optimizations (such as SimplifyCFG)
8552     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
8553     // trivial PHI, go ahead and zap it here.
8554     if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8555       LargeOffsetGEPMap.erase(P);
8556       replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8557       P->eraseFromParent();
8558       ++NumPHIsElim;
8559       return true;
8560     }
8561     return AnyChange;
8562   }
8563 
8564   if (CastInst *CI = dyn_cast<CastInst>(I)) {
8565     // If the source of the cast is a constant, then this should have
8566     // already been constant folded.  The only reason NOT to constant fold
8567     // it is if something (e.g. LSR) was careful to place the constant
8568     // evaluation in a block other than then one that uses it (e.g. to hoist
8569     // the address of globals out of a loop).  If this is the case, we don't
8570     // want to forward-subst the cast.
8571     if (isa<Constant>(CI->getOperand(0)))
8572       return AnyChange;
8573 
8574     if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8575       return true;
8576 
8577     if ((isa<UIToFPInst>(I) || isa<SIToFPInst>(I) || isa<FPToUIInst>(I) ||
8578          isa<TruncInst>(I)) &&
8579         TLI->optimizeExtendOrTruncateConversion(
8580             I, LI->getLoopFor(I->getParent()), *TTI))
8581       return true;
8582 
8583     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8584       /// Sink a zext or sext into its user blocks if the target type doesn't
8585       /// fit in one register
8586       if (TLI->getTypeAction(CI->getContext(),
8587                              TLI->getValueType(*DL, CI->getType())) ==
8588           TargetLowering::TypeExpandInteger) {
8589         return SinkCast(CI);
8590       } else {
8591         if (TLI->optimizeExtendOrTruncateConversion(
8592                 I, LI->getLoopFor(I->getParent()), *TTI))
8593           return true;
8594 
8595         bool MadeChange = optimizeExt(I);
8596         return MadeChange | optimizeExtUses(I);
8597       }
8598     }
8599     return AnyChange;
8600   }
8601 
8602   if (auto *Cmp = dyn_cast<CmpInst>(I))
8603     if (optimizeCmp(Cmp, ModifiedDT))
8604       return true;
8605 
8606   if (match(I, m_URem(m_Value(), m_Value())))
8607     if (optimizeURem(I))
8608       return true;
8609 
8610   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8611     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8612     bool Modified = optimizeLoadExt(LI);
8613     unsigned AS = LI->getPointerAddressSpace();
8614     Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8615     return Modified;
8616   }
8617 
8618   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8619     if (splitMergedValStore(*SI, *DL, *TLI))
8620       return true;
8621     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8622     unsigned AS = SI->getPointerAddressSpace();
8623     return optimizeMemoryInst(I, SI->getOperand(1),
8624                               SI->getOperand(0)->getType(), AS);
8625   }
8626 
8627   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8628     unsigned AS = RMW->getPointerAddressSpace();
8629     return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8630   }
8631 
8632   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8633     unsigned AS = CmpX->getPointerAddressSpace();
8634     return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8635                               CmpX->getCompareOperand()->getType(), AS);
8636   }
8637 
8638   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8639 
8640   if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8641       sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8642     return true;
8643 
8644   // TODO: Move this into the switch on opcode - it handles shifts already.
8645   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8646                 BinOp->getOpcode() == Instruction::LShr)) {
8647     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8648     if (CI && TLI->hasExtractBitsInsn())
8649       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8650         return true;
8651   }
8652 
8653   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8654     if (GEPI->hasAllZeroIndices()) {
8655       /// The GEP operand must be a pointer, so must its result -> BitCast
8656       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8657                                         GEPI->getName(), GEPI->getIterator());
8658       NC->setDebugLoc(GEPI->getDebugLoc());
8659       replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8660       RecursivelyDeleteTriviallyDeadInstructions(
8661           GEPI, TLInfo, nullptr,
8662           [&](Value *V) { removeAllAssertingVHReferences(V); });
8663       ++NumGEPsElim;
8664       optimizeInst(NC, ModifiedDT);
8665       return true;
8666     }
8667     if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
8668       return true;
8669     }
8670   }
8671 
8672   if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
8673     // freeze(icmp a, const)) -> icmp (freeze a), const
8674     // This helps generate efficient conditional jumps.
8675     Instruction *CmpI = nullptr;
8676     if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
8677       CmpI = II;
8678     else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
8679       CmpI = F->getFastMathFlags().none() ? F : nullptr;
8680 
8681     if (CmpI && CmpI->hasOneUse()) {
8682       auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
8683       bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
8684                     isa<ConstantPointerNull>(Op0);
8685       bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
8686                     isa<ConstantPointerNull>(Op1);
8687       if (Const0 || Const1) {
8688         if (!Const0 || !Const1) {
8689           auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI->getIterator());
8690           F->takeName(FI);
8691           CmpI->setOperand(Const0 ? 1 : 0, F);
8692         }
8693         replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
8694         FI->eraseFromParent();
8695         return true;
8696       }
8697     }
8698     return AnyChange;
8699   }
8700 
8701   if (tryToSinkFreeOperands(I))
8702     return true;
8703 
8704   switch (I->getOpcode()) {
8705   case Instruction::Shl:
8706   case Instruction::LShr:
8707   case Instruction::AShr:
8708     return optimizeShiftInst(cast<BinaryOperator>(I));
8709   case Instruction::Call:
8710     return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
8711   case Instruction::Select:
8712     return optimizeSelectInst(cast<SelectInst>(I));
8713   case Instruction::ShuffleVector:
8714     return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
8715   case Instruction::Switch:
8716     return optimizeSwitchInst(cast<SwitchInst>(I));
8717   case Instruction::ExtractElement:
8718     return optimizeExtractElementInst(cast<ExtractElementInst>(I));
8719   case Instruction::Br:
8720     return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
8721   }
8722 
8723   return AnyChange;
8724 }
8725 
8726 /// Given an OR instruction, check to see if this is a bitreverse
8727 /// idiom. If so, insert the new intrinsic and return true.
8728 bool CodeGenPrepare::makeBitReverse(Instruction &I) {
8729   if (!I.getType()->isIntegerTy() ||
8730       !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
8731                                      TLI->getValueType(*DL, I.getType(), true)))
8732     return false;
8733 
8734   SmallVector<Instruction *, 4> Insts;
8735   if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
8736     return false;
8737   Instruction *LastInst = Insts.back();
8738   replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
8739   RecursivelyDeleteTriviallyDeadInstructions(
8740       &I, TLInfo, nullptr,
8741       [&](Value *V) { removeAllAssertingVHReferences(V); });
8742   return true;
8743 }
8744 
8745 // In this pass we look for GEP and cast instructions that are used
8746 // across basic blocks and rewrite them to improve basic-block-at-a-time
8747 // selection.
8748 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
8749   SunkAddrs.clear();
8750   bool MadeChange = false;
8751 
8752   do {
8753     CurInstIterator = BB.begin();
8754     ModifiedDT = ModifyDT::NotModifyDT;
8755     while (CurInstIterator != BB.end()) {
8756       MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
8757       if (ModifiedDT != ModifyDT::NotModifyDT) {
8758         // For huge function we tend to quickly go though the inner optmization
8759         // opportunities in the BB. So we go back to the BB head to re-optimize
8760         // each instruction instead of go back to the function head.
8761         if (IsHugeFunc) {
8762           DT.reset();
8763           getDT(*BB.getParent());
8764           break;
8765         } else {
8766           return true;
8767         }
8768       }
8769     }
8770   } while (ModifiedDT == ModifyDT::ModifyInstDT);
8771 
8772   bool MadeBitReverse = true;
8773   while (MadeBitReverse) {
8774     MadeBitReverse = false;
8775     for (auto &I : reverse(BB)) {
8776       if (makeBitReverse(I)) {
8777         MadeBitReverse = MadeChange = true;
8778         break;
8779       }
8780     }
8781   }
8782   MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
8783 
8784   return MadeChange;
8785 }
8786 
8787 // Some CGP optimizations may move or alter what's computed in a block. Check
8788 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8789 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
8790   assert(isa<DbgValueInst>(I));
8791   DbgValueInst &DVI = *cast<DbgValueInst>(I);
8792 
8793   // Does this dbg.value refer to a sunk address calculation?
8794   bool AnyChange = false;
8795   SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(),
8796                                      DVI.location_ops().end());
8797   for (Value *Location : LocationOps) {
8798     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8799     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8800     if (SunkAddr) {
8801       // Point dbg.value at locally computed address, which should give the best
8802       // opportunity to be accurately lowered. This update may change the type
8803       // of pointer being referred to; however this makes no difference to
8804       // debugging information, and we can't generate bitcasts that may affect
8805       // codegen.
8806       DVI.replaceVariableLocationOp(Location, SunkAddr);
8807       AnyChange = true;
8808     }
8809   }
8810   return AnyChange;
8811 }
8812 
8813 bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction &I) {
8814   bool AnyChange = false;
8815   for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange()))
8816     AnyChange |= fixupDbgVariableRecord(DVR);
8817   return AnyChange;
8818 }
8819 
8820 // FIXME: should updating debug-info really cause the "changed" flag to fire,
8821 // which can cause a function to be reprocessed?
8822 bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord &DVR) {
8823   if (DVR.Type != DbgVariableRecord::LocationType::Value &&
8824       DVR.Type != DbgVariableRecord::LocationType::Assign)
8825     return false;
8826 
8827   // Does this DbgVariableRecord refer to a sunk address calculation?
8828   bool AnyChange = false;
8829   SmallDenseSet<Value *> LocationOps(DVR.location_ops().begin(),
8830                                      DVR.location_ops().end());
8831   for (Value *Location : LocationOps) {
8832     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8833     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8834     if (SunkAddr) {
8835       // Point dbg.value at locally computed address, which should give the best
8836       // opportunity to be accurately lowered. This update may change the type
8837       // of pointer being referred to; however this makes no difference to
8838       // debugging information, and we can't generate bitcasts that may affect
8839       // codegen.
8840       DVR.replaceVariableLocationOp(Location, SunkAddr);
8841       AnyChange = true;
8842     }
8843   }
8844   return AnyChange;
8845 }
8846 
8847 static void DbgInserterHelper(DbgValueInst *DVI, Instruction *VI) {
8848   DVI->removeFromParent();
8849   if (isa<PHINode>(VI))
8850     DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
8851   else
8852     DVI->insertAfter(VI);
8853 }
8854 
8855 static void DbgInserterHelper(DbgVariableRecord *DVR, Instruction *VI) {
8856   DVR->removeFromParent();
8857   BasicBlock *VIBB = VI->getParent();
8858   if (isa<PHINode>(VI))
8859     VIBB->insertDbgRecordBefore(DVR, VIBB->getFirstInsertionPt());
8860   else
8861     VIBB->insertDbgRecordAfter(DVR, VI);
8862 }
8863 
8864 // A llvm.dbg.value may be using a value before its definition, due to
8865 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8866 // them by moving the dbg.value to immediately after the value definition.
8867 // FIXME: Ideally this should never be necessary, and this has the potential
8868 // to re-order dbg.value intrinsics.
8869 bool CodeGenPrepare::placeDbgValues(Function &F) {
8870   bool MadeChange = false;
8871   DominatorTree DT(F);
8872 
8873   auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) {
8874     SmallVector<Instruction *, 4> VIs;
8875     for (Value *V : DbgItem->location_ops())
8876       if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
8877         VIs.push_back(VI);
8878 
8879     // This item may depend on multiple instructions, complicating any
8880     // potential sink. This block takes the defensive approach, opting to
8881     // "undef" the item if it has more than one instruction and any of them do
8882     // not dominate iem.
8883     for (Instruction *VI : VIs) {
8884       if (VI->isTerminator())
8885         continue;
8886 
8887       // If VI is a phi in a block with an EHPad terminator, we can't insert
8888       // after it.
8889       if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
8890         continue;
8891 
8892       // If the defining instruction dominates the dbg.value, we do not need
8893       // to move the dbg.value.
8894       if (DT.dominates(VI, Position))
8895         continue;
8896 
8897       // If we depend on multiple instructions and any of them doesn't
8898       // dominate this DVI, we probably can't salvage it: moving it to
8899       // after any of the instructions could cause us to lose the others.
8900       if (VIs.size() > 1) {
8901         LLVM_DEBUG(
8902             dbgs()
8903             << "Unable to find valid location for Debug Value, undefing:\n"
8904             << *DbgItem);
8905         DbgItem->setKillLocation();
8906         break;
8907       }
8908 
8909       LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8910                         << *DbgItem << ' ' << *VI);
8911       DbgInserterHelper(DbgItem, VI);
8912       MadeChange = true;
8913       ++NumDbgValueMoved;
8914     }
8915   };
8916 
8917   for (BasicBlock &BB : F) {
8918     for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
8919       // Process dbg.value intrinsics.
8920       DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
8921       if (DVI) {
8922         DbgProcessor(DVI, DVI);
8923         continue;
8924       }
8925 
8926       // If this isn't a dbg.value, process any attached DbgVariableRecord
8927       // records attached to this instruction.
8928       for (DbgVariableRecord &DVR : llvm::make_early_inc_range(
8929                filterDbgVars(Insn.getDbgRecordRange()))) {
8930         if (DVR.Type != DbgVariableRecord::LocationType::Value)
8931           continue;
8932         DbgProcessor(&DVR, &Insn);
8933       }
8934     }
8935   }
8936 
8937   return MadeChange;
8938 }
8939 
8940 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8941 // probes can be chained dependencies of other regular DAG nodes and block DAG
8942 // combine optimizations.
8943 bool CodeGenPrepare::placePseudoProbes(Function &F) {
8944   bool MadeChange = false;
8945   for (auto &Block : F) {
8946     // Move the rest probes to the beginning of the block.
8947     auto FirstInst = Block.getFirstInsertionPt();
8948     while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
8949       ++FirstInst;
8950     BasicBlock::iterator I(FirstInst);
8951     I++;
8952     while (I != Block.end()) {
8953       if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
8954         II->moveBefore(&*FirstInst);
8955         MadeChange = true;
8956       }
8957     }
8958   }
8959   return MadeChange;
8960 }
8961 
8962 /// Scale down both weights to fit into uint32_t.
8963 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
8964   uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
8965   uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
8966   NewTrue = NewTrue / Scale;
8967   NewFalse = NewFalse / Scale;
8968 }
8969 
8970 /// Some targets prefer to split a conditional branch like:
8971 /// \code
8972 ///   %0 = icmp ne i32 %a, 0
8973 ///   %1 = icmp ne i32 %b, 0
8974 ///   %or.cond = or i1 %0, %1
8975 ///   br i1 %or.cond, label %TrueBB, label %FalseBB
8976 /// \endcode
8977 /// into multiple branch instructions like:
8978 /// \code
8979 ///   bb1:
8980 ///     %0 = icmp ne i32 %a, 0
8981 ///     br i1 %0, label %TrueBB, label %bb2
8982 ///   bb2:
8983 ///     %1 = icmp ne i32 %b, 0
8984 ///     br i1 %1, label %TrueBB, label %FalseBB
8985 /// \endcode
8986 /// This usually allows instruction selection to do even further optimizations
8987 /// and combine the compare with the branch instruction. Currently this is
8988 /// applied for targets which have "cheap" jump instructions.
8989 ///
8990 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8991 ///
8992 bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
8993   if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
8994     return false;
8995 
8996   bool MadeChange = false;
8997   for (auto &BB : F) {
8998     // Does this BB end with the following?
8999     //   %cond1 = icmp|fcmp|binary instruction ...
9000     //   %cond2 = icmp|fcmp|binary instruction ...
9001     //   %cond.or = or|and i1 %cond1, cond2
9002     //   br i1 %cond.or label %dest1, label %dest2"
9003     Instruction *LogicOp;
9004     BasicBlock *TBB, *FBB;
9005     if (!match(BB.getTerminator(),
9006                m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
9007       continue;
9008 
9009     auto *Br1 = cast<BranchInst>(BB.getTerminator());
9010     if (Br1->getMetadata(LLVMContext::MD_unpredictable))
9011       continue;
9012 
9013     // The merging of mostly empty BB can cause a degenerate branch.
9014     if (TBB == FBB)
9015       continue;
9016 
9017     unsigned Opc;
9018     Value *Cond1, *Cond2;
9019     if (match(LogicOp,
9020               m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
9021       Opc = Instruction::And;
9022     else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
9023                                         m_OneUse(m_Value(Cond2)))))
9024       Opc = Instruction::Or;
9025     else
9026       continue;
9027 
9028     auto IsGoodCond = [](Value *Cond) {
9029       return match(
9030           Cond,
9031           m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
9032                                            m_LogicalOr(m_Value(), m_Value()))));
9033     };
9034     if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
9035       continue;
9036 
9037     LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
9038 
9039     // Create a new BB.
9040     auto *TmpBB =
9041         BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
9042                            BB.getParent(), BB.getNextNode());
9043     if (IsHugeFunc)
9044       FreshBBs.insert(TmpBB);
9045 
9046     // Update original basic block by using the first condition directly by the
9047     // branch instruction and removing the no longer needed and/or instruction.
9048     Br1->setCondition(Cond1);
9049     LogicOp->eraseFromParent();
9050 
9051     // Depending on the condition we have to either replace the true or the
9052     // false successor of the original branch instruction.
9053     if (Opc == Instruction::And)
9054       Br1->setSuccessor(0, TmpBB);
9055     else
9056       Br1->setSuccessor(1, TmpBB);
9057 
9058     // Fill in the new basic block.
9059     auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
9060     if (auto *I = dyn_cast<Instruction>(Cond2)) {
9061       I->removeFromParent();
9062       I->insertBefore(Br2);
9063     }
9064 
9065     // Update PHI nodes in both successors. The original BB needs to be
9066     // replaced in one successor's PHI nodes, because the branch comes now from
9067     // the newly generated BB (NewBB). In the other successor we need to add one
9068     // incoming edge to the PHI nodes, because both branch instructions target
9069     // now the same successor. Depending on the original branch condition
9070     // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
9071     // we perform the correct update for the PHI nodes.
9072     // This doesn't change the successor order of the just created branch
9073     // instruction (or any other instruction).
9074     if (Opc == Instruction::Or)
9075       std::swap(TBB, FBB);
9076 
9077     // Replace the old BB with the new BB.
9078     TBB->replacePhiUsesWith(&BB, TmpBB);
9079 
9080     // Add another incoming edge from the new BB.
9081     for (PHINode &PN : FBB->phis()) {
9082       auto *Val = PN.getIncomingValueForBlock(&BB);
9083       PN.addIncoming(Val, TmpBB);
9084     }
9085 
9086     // Update the branch weights (from SelectionDAGBuilder::
9087     // FindMergedConditions).
9088     if (Opc == Instruction::Or) {
9089       // Codegen X | Y as:
9090       // BB1:
9091       //   jmp_if_X TBB
9092       //   jmp TmpBB
9093       // TmpBB:
9094       //   jmp_if_Y TBB
9095       //   jmp FBB
9096       //
9097 
9098       // We have flexibility in setting Prob for BB1 and Prob for NewBB.
9099       // The requirement is that
9100       //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
9101       //     = TrueProb for original BB.
9102       // Assuming the original weights are A and B, one choice is to set BB1's
9103       // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
9104       // assumes that
9105       //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
9106       // Another choice is to assume TrueProb for BB1 equals to TrueProb for
9107       // TmpBB, but the math is more complicated.
9108       uint64_t TrueWeight, FalseWeight;
9109       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9110         uint64_t NewTrueWeight = TrueWeight;
9111         uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
9112         scaleWeights(NewTrueWeight, NewFalseWeight);
9113         Br1->setMetadata(LLVMContext::MD_prof,
9114                          MDBuilder(Br1->getContext())
9115                              .createBranchWeights(TrueWeight, FalseWeight,
9116                                                   hasBranchWeightOrigin(*Br1)));
9117 
9118         NewTrueWeight = TrueWeight;
9119         NewFalseWeight = 2 * FalseWeight;
9120         scaleWeights(NewTrueWeight, NewFalseWeight);
9121         Br2->setMetadata(LLVMContext::MD_prof,
9122                          MDBuilder(Br2->getContext())
9123                              .createBranchWeights(TrueWeight, FalseWeight));
9124       }
9125     } else {
9126       // Codegen X & Y as:
9127       // BB1:
9128       //   jmp_if_X TmpBB
9129       //   jmp FBB
9130       // TmpBB:
9131       //   jmp_if_Y TBB
9132       //   jmp FBB
9133       //
9134       //  This requires creation of TmpBB after CurBB.
9135 
9136       // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
9137       // The requirement is that
9138       //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
9139       //     = FalseProb for original BB.
9140       // Assuming the original weights are A and B, one choice is to set BB1's
9141       // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
9142       // assumes that
9143       //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
9144       uint64_t TrueWeight, FalseWeight;
9145       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9146         uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
9147         uint64_t NewFalseWeight = FalseWeight;
9148         scaleWeights(NewTrueWeight, NewFalseWeight);
9149         Br1->setMetadata(LLVMContext::MD_prof,
9150                          MDBuilder(Br1->getContext())
9151                              .createBranchWeights(TrueWeight, FalseWeight));
9152 
9153         NewTrueWeight = 2 * TrueWeight;
9154         NewFalseWeight = FalseWeight;
9155         scaleWeights(NewTrueWeight, NewFalseWeight);
9156         Br2->setMetadata(LLVMContext::MD_prof,
9157                          MDBuilder(Br2->getContext())
9158                              .createBranchWeights(TrueWeight, FalseWeight));
9159       }
9160     }
9161 
9162     ModifiedDT = ModifyDT::ModifyBBDT;
9163     MadeChange = true;
9164 
9165     LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
9166                TmpBB->dump());
9167   }
9168   return MadeChange;
9169 }
9170