xref: /llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp (revision 509af087cccca3bf8a90bc9871335224226dc6fe)
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/CodeGenPrepare.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/CodeGenTypes/MachineValueType.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/Argument.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfo.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/GlobalVariable.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAArch64.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/MDBuilder.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/Operator.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/IR/ProfDataUtils.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/Use.h"
75 #include "llvm/IR/User.h"
76 #include "llvm/IR/Value.h"
77 #include "llvm/IR/ValueHandle.h"
78 #include "llvm/IR/ValueMap.h"
79 #include "llvm/InitializePasses.h"
80 #include "llvm/Pass.h"
81 #include "llvm/Support/BlockFrequency.h"
82 #include "llvm/Support/BranchProbability.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/CommandLine.h"
85 #include "llvm/Support/Compiler.h"
86 #include "llvm/Support/Debug.h"
87 #include "llvm/Support/ErrorHandling.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
91 #include "llvm/Target/TargetOptions.h"
92 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
93 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
96 #include "llvm/Transforms/Utils/SizeOpts.h"
97 #include <algorithm>
98 #include <cassert>
99 #include <cstdint>
100 #include <iterator>
101 #include <limits>
102 #include <memory>
103 #include <optional>
104 #include <utility>
105 #include <vector>
106 
107 using namespace llvm;
108 using namespace llvm::PatternMatch;
109 
110 #define DEBUG_TYPE "codegenprepare"
111 
112 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
113 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
114 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
115 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
116                       "sunken Cmps");
117 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
118                        "of sunken Casts");
119 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
120                           "computations were sunk");
121 STATISTIC(NumMemoryInstsPhiCreated,
122           "Number of phis created when address "
123           "computations were sunk to memory instructions");
124 STATISTIC(NumMemoryInstsSelectCreated,
125           "Number of select created when address "
126           "computations were sunk to memory instructions");
127 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
128 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
129 STATISTIC(NumAndsAdded,
130           "Number of and mask instructions added to form ext loads");
131 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
132 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
133 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
134 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
135 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
136 
137 static cl::opt<bool> DisableBranchOpts(
138     "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
139     cl::desc("Disable branch optimizations in CodeGenPrepare"));
140 
141 static cl::opt<bool>
142     DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
143                   cl::desc("Disable GC optimizations in CodeGenPrepare"));
144 
145 static cl::opt<bool>
146     DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
147                           cl::init(false),
148                           cl::desc("Disable select to branch conversion."));
149 
150 static cl::opt<bool>
151     AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
152                       cl::desc("Address sinking in CGP using GEPs."));
153 
154 static cl::opt<bool>
155     EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
156                         cl::desc("Enable sinkinig and/cmp into branches."));
157 
158 static cl::opt<bool> DisableStoreExtract(
159     "disable-cgp-store-extract", cl::Hidden, cl::init(false),
160     cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161 
162 static cl::opt<bool> StressStoreExtract(
163     "stress-cgp-store-extract", cl::Hidden, cl::init(false),
164     cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165 
166 static cl::opt<bool> DisableExtLdPromotion(
167     "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168     cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
169              "CodeGenPrepare"));
170 
171 static cl::opt<bool> StressExtLdPromotion(
172     "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
173     cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
174              "optimization in CodeGenPrepare"));
175 
176 static cl::opt<bool> DisablePreheaderProtect(
177     "disable-preheader-prot", cl::Hidden, cl::init(false),
178     cl::desc("Disable protection against removing loop preheaders"));
179 
180 static cl::opt<bool> ProfileGuidedSectionPrefix(
181     "profile-guided-section-prefix", cl::Hidden, cl::init(true),
182     cl::desc("Use profile info to add section prefix for hot/cold functions"));
183 
184 static cl::opt<bool> ProfileUnknownInSpecialSection(
185     "profile-unknown-in-special-section", cl::Hidden,
186     cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
187              "profile, we cannot tell the function is cold for sure because "
188              "it may be a function newly added without ever being sampled. "
189              "With the flag enabled, compiler can put such profile unknown "
190              "functions into a special section, so runtime system can choose "
191              "to handle it in a different way than .text section, to save "
192              "RAM for example. "));
193 
194 static cl::opt<bool> BBSectionsGuidedSectionPrefix(
195     "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
196     cl::desc("Use the basic-block-sections profile to determine the text "
197              "section prefix for hot functions. Functions with "
198              "basic-block-sections profile will be placed in `.text.hot` "
199              "regardless of their FDO profile info. Other functions won't be "
200              "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
201              "profiles."));
202 
203 static cl::opt<uint64_t> FreqRatioToSkipMerge(
204     "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
205     cl::desc("Skip merging empty blocks if (frequency of empty block) / "
206              "(frequency of destination block) is greater than this ratio"));
207 
208 static cl::opt<bool> ForceSplitStore(
209     "force-split-store", cl::Hidden, cl::init(false),
210     cl::desc("Force store splitting no matter what the target query says."));
211 
212 static cl::opt<bool> EnableTypePromotionMerge(
213     "cgp-type-promotion-merge", cl::Hidden,
214     cl::desc("Enable merging of redundant sexts when one is dominating"
215              " the other."),
216     cl::init(true));
217 
218 static cl::opt<bool> DisableComplexAddrModes(
219     "disable-complex-addr-modes", cl::Hidden, cl::init(false),
220     cl::desc("Disables combining addressing modes with different parts "
221              "in optimizeMemoryInst."));
222 
223 static cl::opt<bool>
224     AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
225                     cl::desc("Allow creation of Phis in Address sinking."));
226 
227 static cl::opt<bool> AddrSinkNewSelects(
228     "addr-sink-new-select", cl::Hidden, cl::init(true),
229     cl::desc("Allow creation of selects in Address sinking."));
230 
231 static cl::opt<bool> AddrSinkCombineBaseReg(
232     "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
233     cl::desc("Allow combining of BaseReg field in Address sinking."));
234 
235 static cl::opt<bool> AddrSinkCombineBaseGV(
236     "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
237     cl::desc("Allow combining of BaseGV field in Address sinking."));
238 
239 static cl::opt<bool> AddrSinkCombineBaseOffs(
240     "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
241     cl::desc("Allow combining of BaseOffs field in Address sinking."));
242 
243 static cl::opt<bool> AddrSinkCombineScaledReg(
244     "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
245     cl::desc("Allow combining of ScaledReg field in Address sinking."));
246 
247 static cl::opt<bool>
248     EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
249                          cl::init(true),
250                          cl::desc("Enable splitting large offset of GEP."));
251 
252 static cl::opt<bool> EnableICMP_EQToICMP_ST(
253     "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
254     cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
255 
256 static cl::opt<bool>
257     VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
258                      cl::desc("Enable BFI update verification for "
259                               "CodeGenPrepare."));
260 
261 static cl::opt<bool>
262     OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true),
263                      cl::desc("Enable converting phi types in CodeGenPrepare"));
264 
265 static cl::opt<unsigned>
266     HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
267                             cl::desc("Least BB number of huge function."));
268 
269 static cl::opt<unsigned>
270     MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
271                           cl::Hidden,
272                           cl::desc("Max number of address users to look at"));
273 
274 static cl::opt<bool>
275     DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false),
276                       cl::desc("Disable elimination of dead PHI nodes."));
277 
278 namespace {
279 
280 enum ExtType {
281   ZeroExtension, // Zero extension has been seen.
282   SignExtension, // Sign extension has been seen.
283   BothExtension  // This extension type is used if we saw sext after
284                  // ZeroExtension had been set, or if we saw zext after
285                  // SignExtension had been set. It makes the type
286                  // information of a promoted instruction invalid.
287 };
288 
289 enum ModifyDT {
290   NotModifyDT, // Not Modify any DT.
291   ModifyBBDT,  // Modify the Basic Block Dominator Tree.
292   ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
293                // This usually means we move/delete/insert instruction
294                // in a Basic Block. So we should re-iterate instructions
295                // in such Basic Block.
296 };
297 
298 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
299 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
300 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
301 using SExts = SmallVector<Instruction *, 16>;
302 using ValueToSExts = MapVector<Value *, SExts>;
303 
304 class TypePromotionTransaction;
305 
306 class CodeGenPrepare {
307   friend class CodeGenPrepareLegacyPass;
308   const TargetMachine *TM = nullptr;
309   const TargetSubtargetInfo *SubtargetInfo = nullptr;
310   const TargetLowering *TLI = nullptr;
311   const TargetRegisterInfo *TRI = nullptr;
312   const TargetTransformInfo *TTI = nullptr;
313   const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
314   const TargetLibraryInfo *TLInfo = nullptr;
315   LoopInfo *LI = nullptr;
316   std::unique_ptr<BlockFrequencyInfo> BFI;
317   std::unique_ptr<BranchProbabilityInfo> BPI;
318   ProfileSummaryInfo *PSI = nullptr;
319 
320   /// As we scan instructions optimizing them, this is the next instruction
321   /// to optimize. Transforms that can invalidate this should update it.
322   BasicBlock::iterator CurInstIterator;
323 
324   /// Keeps track of non-local addresses that have been sunk into a block.
325   /// This allows us to avoid inserting duplicate code for blocks with
326   /// multiple load/stores of the same address. The usage of WeakTrackingVH
327   /// enables SunkAddrs to be treated as a cache whose entries can be
328   /// invalidated if a sunken address computation has been erased.
329   ValueMap<Value *, WeakTrackingVH> SunkAddrs;
330 
331   /// Keeps track of all instructions inserted for the current function.
332   SetOfInstrs InsertedInsts;
333 
334   /// Keeps track of the type of the related instruction before their
335   /// promotion for the current function.
336   InstrToOrigTy PromotedInsts;
337 
338   /// Keep track of instructions removed during promotion.
339   SetOfInstrs RemovedInsts;
340 
341   /// Keep track of sext chains based on their initial value.
342   DenseMap<Value *, Instruction *> SeenChainsForSExt;
343 
344   /// Keep track of GEPs accessing the same data structures such as structs or
345   /// arrays that are candidates to be split later because of their large
346   /// size.
347   MapVector<AssertingVH<Value>,
348             SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
349       LargeOffsetGEPMap;
350 
351   /// Keep track of new GEP base after splitting the GEPs having large offset.
352   SmallSet<AssertingVH<Value>, 2> NewGEPBases;
353 
354   /// Map serial numbers to Large offset GEPs.
355   DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
356 
357   /// Keep track of SExt promoted.
358   ValueToSExts ValToSExtendedUses;
359 
360   /// True if the function has the OptSize attribute.
361   bool OptSize;
362 
363   /// DataLayout for the Function being processed.
364   const DataLayout *DL = nullptr;
365 
366   /// Building the dominator tree can be expensive, so we only build it
367   /// lazily and update it when required.
368   std::unique_ptr<DominatorTree> DT;
369 
370 public:
371   CodeGenPrepare(){};
372   CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
373   /// If encounter huge function, we need to limit the build time.
374   bool IsHugeFunc = false;
375 
376   /// FreshBBs is like worklist, it collected the updated BBs which need
377   /// to be optimized again.
378   /// Note: Consider building time in this pass, when a BB updated, we need
379   /// to insert such BB into FreshBBs for huge function.
380   SmallSet<BasicBlock *, 32> FreshBBs;
381 
382   void releaseMemory() {
383     // Clear per function information.
384     InsertedInsts.clear();
385     PromotedInsts.clear();
386     FreshBBs.clear();
387     BPI.reset();
388     BFI.reset();
389   }
390 
391   bool run(Function &F, FunctionAnalysisManager &AM);
392 
393 private:
394   template <typename F>
395   void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
396     // Substituting can cause recursive simplifications, which can invalidate
397     // our iterator.  Use a WeakTrackingVH to hold onto it in case this
398     // happens.
399     Value *CurValue = &*CurInstIterator;
400     WeakTrackingVH IterHandle(CurValue);
401 
402     f();
403 
404     // If the iterator instruction was recursively deleted, start over at the
405     // start of the block.
406     if (IterHandle != CurValue) {
407       CurInstIterator = BB->begin();
408       SunkAddrs.clear();
409     }
410   }
411 
412   // Get the DominatorTree, building if necessary.
413   DominatorTree &getDT(Function &F) {
414     if (!DT)
415       DT = std::make_unique<DominatorTree>(F);
416     return *DT;
417   }
418 
419   void removeAllAssertingVHReferences(Value *V);
420   bool eliminateAssumptions(Function &F);
421   bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr);
422   bool eliminateMostlyEmptyBlocks(Function &F);
423   BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
424   bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
425   void eliminateMostlyEmptyBlock(BasicBlock *BB);
426   bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
427                                      bool isPreheader);
428   bool makeBitReverse(Instruction &I);
429   bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
430   bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
431   bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
432                           unsigned AddrSpace);
433   bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
434   bool optimizeInlineAsmInst(CallInst *CS);
435   bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
436   bool optimizeExt(Instruction *&I);
437   bool optimizeExtUses(Instruction *I);
438   bool optimizeLoadExt(LoadInst *Load);
439   bool optimizeShiftInst(BinaryOperator *BO);
440   bool optimizeFunnelShift(IntrinsicInst *Fsh);
441   bool optimizeSelectInst(SelectInst *SI);
442   bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
443   bool optimizeSwitchType(SwitchInst *SI);
444   bool optimizeSwitchPhiConstants(SwitchInst *SI);
445   bool optimizeSwitchInst(SwitchInst *SI);
446   bool optimizeExtractElementInst(Instruction *Inst);
447   bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
448   bool fixupDbgValue(Instruction *I);
449   bool fixupDbgVariableRecord(DbgVariableRecord &I);
450   bool fixupDbgVariableRecordsOnInst(Instruction &I);
451   bool placeDbgValues(Function &F);
452   bool placePseudoProbes(Function &F);
453   bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
454                     LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
455   bool tryToPromoteExts(TypePromotionTransaction &TPT,
456                         const SmallVectorImpl<Instruction *> &Exts,
457                         SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
458                         unsigned CreatedInstsCost = 0);
459   bool mergeSExts(Function &F);
460   bool splitLargeGEPOffsets();
461   bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
462                        SmallPtrSetImpl<Instruction *> &DeletedInstrs);
463   bool optimizePhiTypes(Function &F);
464   bool performAddressTypePromotion(
465       Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
466       bool HasPromoted, TypePromotionTransaction &TPT,
467       SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
468   bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
469   bool simplifyOffsetableRelocate(GCStatepointInst &I);
470 
471   bool tryToSinkFreeOperands(Instruction *I);
472   bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
473                                    CmpInst *Cmp, Intrinsic::ID IID);
474   bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
475   bool optimizeURem(Instruction *Rem);
476   bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
477   bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
478   void verifyBFIUpdates(Function &F);
479   bool _run(Function &F);
480 };
481 
482 class CodeGenPrepareLegacyPass : public FunctionPass {
483 public:
484   static char ID; // Pass identification, replacement for typeid
485 
486   CodeGenPrepareLegacyPass() : FunctionPass(ID) {
487     initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
488   }
489 
490   bool runOnFunction(Function &F) override;
491 
492   StringRef getPassName() const override { return "CodeGen Prepare"; }
493 
494   void getAnalysisUsage(AnalysisUsage &AU) const override {
495     // FIXME: When we can selectively preserve passes, preserve the domtree.
496     AU.addRequired<ProfileSummaryInfoWrapperPass>();
497     AU.addRequired<TargetLibraryInfoWrapperPass>();
498     AU.addRequired<TargetPassConfig>();
499     AU.addRequired<TargetTransformInfoWrapperPass>();
500     AU.addRequired<LoopInfoWrapperPass>();
501     AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
502   }
503 };
504 
505 } // end anonymous namespace
506 
507 char CodeGenPrepareLegacyPass::ID = 0;
508 
509 bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) {
510   if (skipFunction(F))
511     return false;
512   auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
513   CodeGenPrepare CGP(TM);
514   CGP.DL = &F.getDataLayout();
515   CGP.SubtargetInfo = TM->getSubtargetImpl(F);
516   CGP.TLI = CGP.SubtargetInfo->getTargetLowering();
517   CGP.TRI = CGP.SubtargetInfo->getRegisterInfo();
518   CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
519   CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
520   CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
521   CGP.BPI.reset(new BranchProbabilityInfo(F, *CGP.LI));
522   CGP.BFI.reset(new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI));
523   CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
524   auto BBSPRWP =
525       getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
526   CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr;
527 
528   return CGP._run(F);
529 }
530 
531 INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE,
532                       "Optimize for code generation", false, false)
533 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass)
534 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
535 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
536 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
537 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
538 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
539 INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE,
540                     "Optimize for code generation", false, false)
541 
542 FunctionPass *llvm::createCodeGenPrepareLegacyPass() {
543   return new CodeGenPrepareLegacyPass();
544 }
545 
546 PreservedAnalyses CodeGenPreparePass::run(Function &F,
547                                           FunctionAnalysisManager &AM) {
548   CodeGenPrepare CGP(TM);
549 
550   bool Changed = CGP.run(F, AM);
551   if (!Changed)
552     return PreservedAnalyses::all();
553 
554   PreservedAnalyses PA;
555   PA.preserve<TargetLibraryAnalysis>();
556   PA.preserve<TargetIRAnalysis>();
557   PA.preserve<LoopAnalysis>();
558   return PA;
559 }
560 
561 bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) {
562   DL = &F.getDataLayout();
563   SubtargetInfo = TM->getSubtargetImpl(F);
564   TLI = SubtargetInfo->getTargetLowering();
565   TRI = SubtargetInfo->getRegisterInfo();
566   TLInfo = &AM.getResult<TargetLibraryAnalysis>(F);
567   TTI = &AM.getResult<TargetIRAnalysis>(F);
568   LI = &AM.getResult<LoopAnalysis>(F);
569   BPI.reset(new BranchProbabilityInfo(F, *LI));
570   BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
571   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
572   PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
573   BBSectionsProfileReader =
574       AM.getCachedResult<BasicBlockSectionsProfileReaderAnalysis>(F);
575   return _run(F);
576 }
577 
578 bool CodeGenPrepare::_run(Function &F) {
579   bool EverMadeChange = false;
580 
581   OptSize = F.hasOptSize();
582   // Use the basic-block-sections profile to promote hot functions to .text.hot
583   // if requested.
584   if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
585       BBSectionsProfileReader->isFunctionHot(F.getName())) {
586     F.setSectionPrefix("hot");
587   } else if (ProfileGuidedSectionPrefix) {
588     // The hot attribute overwrites profile count based hotness while profile
589     // counts based hotness overwrite the cold attribute.
590     // This is a conservative behabvior.
591     if (F.hasFnAttribute(Attribute::Hot) ||
592         PSI->isFunctionHotInCallGraph(&F, *BFI))
593       F.setSectionPrefix("hot");
594     // If PSI shows this function is not hot, we will placed the function
595     // into unlikely section if (1) PSI shows this is a cold function, or
596     // (2) the function has a attribute of cold.
597     else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
598              F.hasFnAttribute(Attribute::Cold))
599       F.setSectionPrefix("unlikely");
600     else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
601              PSI->isFunctionHotnessUnknown(F))
602       F.setSectionPrefix("unknown");
603   }
604 
605   /// This optimization identifies DIV instructions that can be
606   /// profitably bypassed and carried out with a shorter, faster divide.
607   if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
608     const DenseMap<unsigned int, unsigned int> &BypassWidths =
609         TLI->getBypassSlowDivWidths();
610     BasicBlock *BB = &*F.begin();
611     while (BB != nullptr) {
612       // bypassSlowDivision may create new BBs, but we don't want to reapply the
613       // optimization to those blocks.
614       BasicBlock *Next = BB->getNextNode();
615       // F.hasOptSize is already checked in the outer if statement.
616       if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
617         EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
618       BB = Next;
619     }
620   }
621 
622   // Get rid of @llvm.assume builtins before attempting to eliminate empty
623   // blocks, since there might be blocks that only contain @llvm.assume calls
624   // (plus arguments that we can get rid of).
625   EverMadeChange |= eliminateAssumptions(F);
626 
627   // Eliminate blocks that contain only PHI nodes and an
628   // unconditional branch.
629   EverMadeChange |= eliminateMostlyEmptyBlocks(F);
630 
631   ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
632   if (!DisableBranchOpts)
633     EverMadeChange |= splitBranchCondition(F, ModifiedDT);
634 
635   // Split some critical edges where one of the sources is an indirect branch,
636   // to help generate sane code for PHIs involving such edges.
637   EverMadeChange |=
638       SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
639 
640   // If we are optimzing huge function, we need to consider the build time.
641   // Because the basic algorithm's complex is near O(N!).
642   IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
643 
644   // Transformations above may invalidate dominator tree and/or loop info.
645   DT.reset();
646   LI->releaseMemory();
647   LI->analyze(getDT(F));
648 
649   bool MadeChange = true;
650   bool FuncIterated = false;
651   while (MadeChange) {
652     MadeChange = false;
653 
654     for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
655       if (FuncIterated && !FreshBBs.contains(&BB))
656         continue;
657 
658       ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
659       bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
660 
661       if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
662         DT.reset();
663 
664       MadeChange |= Changed;
665       if (IsHugeFunc) {
666         // If the BB is updated, it may still has chance to be optimized.
667         // This usually happen at sink optimization.
668         // For example:
669         //
670         // bb0:
671         // %and = and i32 %a, 4
672         // %cmp = icmp eq i32 %and, 0
673         //
674         // If the %cmp sink to other BB, the %and will has chance to sink.
675         if (Changed)
676           FreshBBs.insert(&BB);
677         else if (FuncIterated)
678           FreshBBs.erase(&BB);
679       } else {
680         // For small/normal functions, we restart BB iteration if the dominator
681         // tree of the Function was changed.
682         if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
683           break;
684       }
685     }
686     // We have iterated all the BB in the (only work for huge) function.
687     FuncIterated = IsHugeFunc;
688 
689     if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
690       MadeChange |= mergeSExts(F);
691     if (!LargeOffsetGEPMap.empty())
692       MadeChange |= splitLargeGEPOffsets();
693     MadeChange |= optimizePhiTypes(F);
694 
695     if (MadeChange)
696       eliminateFallThrough(F, DT.get());
697 
698 #ifndef NDEBUG
699     if (MadeChange && VerifyLoopInfo)
700       LI->verify(getDT(F));
701 #endif
702 
703     // Really free removed instructions during promotion.
704     for (Instruction *I : RemovedInsts)
705       I->deleteValue();
706 
707     EverMadeChange |= MadeChange;
708     SeenChainsForSExt.clear();
709     ValToSExtendedUses.clear();
710     RemovedInsts.clear();
711     LargeOffsetGEPMap.clear();
712     LargeOffsetGEPID.clear();
713   }
714 
715   NewGEPBases.clear();
716   SunkAddrs.clear();
717 
718   if (!DisableBranchOpts) {
719     MadeChange = false;
720     // Use a set vector to get deterministic iteration order. The order the
721     // blocks are removed may affect whether or not PHI nodes in successors
722     // are removed.
723     SmallSetVector<BasicBlock *, 8> WorkList;
724     for (BasicBlock &BB : F) {
725       SmallVector<BasicBlock *, 2> Successors(successors(&BB));
726       MadeChange |= ConstantFoldTerminator(&BB, true);
727       if (!MadeChange)
728         continue;
729 
730       for (BasicBlock *Succ : Successors)
731         if (pred_empty(Succ))
732           WorkList.insert(Succ);
733     }
734 
735     // Delete the dead blocks and any of their dead successors.
736     MadeChange |= !WorkList.empty();
737     while (!WorkList.empty()) {
738       BasicBlock *BB = WorkList.pop_back_val();
739       SmallVector<BasicBlock *, 2> Successors(successors(BB));
740 
741       DeleteDeadBlock(BB);
742 
743       for (BasicBlock *Succ : Successors)
744         if (pred_empty(Succ))
745           WorkList.insert(Succ);
746     }
747 
748     // Merge pairs of basic blocks with unconditional branches, connected by
749     // a single edge.
750     if (EverMadeChange || MadeChange)
751       MadeChange |= eliminateFallThrough(F);
752 
753     EverMadeChange |= MadeChange;
754   }
755 
756   if (!DisableGCOpts) {
757     SmallVector<GCStatepointInst *, 2> Statepoints;
758     for (BasicBlock &BB : F)
759       for (Instruction &I : BB)
760         if (auto *SP = dyn_cast<GCStatepointInst>(&I))
761           Statepoints.push_back(SP);
762     for (auto &I : Statepoints)
763       EverMadeChange |= simplifyOffsetableRelocate(*I);
764   }
765 
766   // Do this last to clean up use-before-def scenarios introduced by other
767   // preparatory transforms.
768   EverMadeChange |= placeDbgValues(F);
769   EverMadeChange |= placePseudoProbes(F);
770 
771 #ifndef NDEBUG
772   if (VerifyBFIUpdates)
773     verifyBFIUpdates(F);
774 #endif
775 
776   return EverMadeChange;
777 }
778 
779 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
780   bool MadeChange = false;
781   for (BasicBlock &BB : F) {
782     CurInstIterator = BB.begin();
783     while (CurInstIterator != BB.end()) {
784       Instruction *I = &*(CurInstIterator++);
785       if (auto *Assume = dyn_cast<AssumeInst>(I)) {
786         MadeChange = true;
787         Value *Operand = Assume->getOperand(0);
788         Assume->eraseFromParent();
789 
790         resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
791           RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
792         });
793       }
794     }
795   }
796   return MadeChange;
797 }
798 
799 /// An instruction is about to be deleted, so remove all references to it in our
800 /// GEP-tracking data strcutures.
801 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
802   LargeOffsetGEPMap.erase(V);
803   NewGEPBases.erase(V);
804 
805   auto GEP = dyn_cast<GetElementPtrInst>(V);
806   if (!GEP)
807     return;
808 
809   LargeOffsetGEPID.erase(GEP);
810 
811   auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
812   if (VecI == LargeOffsetGEPMap.end())
813     return;
814 
815   auto &GEPVector = VecI->second;
816   llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
817 
818   if (GEPVector.empty())
819     LargeOffsetGEPMap.erase(VecI);
820 }
821 
822 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
823 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
824   DominatorTree NewDT(F);
825   LoopInfo NewLI(NewDT);
826   BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
827   BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
828   NewBFI.verifyMatch(*BFI);
829 }
830 
831 /// Merge basic blocks which are connected by a single edge, where one of the
832 /// basic blocks has a single successor pointing to the other basic block,
833 /// which has a single predecessor.
834 bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) {
835   bool Changed = false;
836   // Scan all of the blocks in the function, except for the entry block.
837   // Use a temporary array to avoid iterator being invalidated when
838   // deleting blocks.
839   SmallVector<WeakTrackingVH, 16> Blocks;
840   for (auto &Block : llvm::drop_begin(F))
841     Blocks.push_back(&Block);
842 
843   SmallSet<WeakTrackingVH, 16> Preds;
844   for (auto &Block : Blocks) {
845     auto *BB = cast_or_null<BasicBlock>(Block);
846     if (!BB)
847       continue;
848     // If the destination block has a single pred, then this is a trivial
849     // edge, just collapse it.
850     BasicBlock *SinglePred = BB->getSinglePredecessor();
851 
852     // Don't merge if BB's address is taken.
853     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
854       continue;
855 
856     // Make an effort to skip unreachable blocks.
857     if (DT && !DT->isReachableFromEntry(BB))
858       continue;
859 
860     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
861     if (Term && !Term->isConditional()) {
862       Changed = true;
863       LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
864 
865       // Merge BB into SinglePred and delete it.
866       MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr,
867                                 /* MemDep */ nullptr,
868                                 /* PredecessorWithTwoSuccessors */ false, DT);
869       Preds.insert(SinglePred);
870 
871       if (IsHugeFunc) {
872         // Update FreshBBs to optimize the merged BB.
873         FreshBBs.insert(SinglePred);
874         FreshBBs.erase(BB);
875       }
876     }
877   }
878 
879   // (Repeatedly) merging blocks into their predecessors can create redundant
880   // debug intrinsics.
881   for (const auto &Pred : Preds)
882     if (auto *BB = cast_or_null<BasicBlock>(Pred))
883       RemoveRedundantDbgInstrs(BB);
884 
885   return Changed;
886 }
887 
888 /// Find a destination block from BB if BB is mergeable empty block.
889 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
890   // If this block doesn't end with an uncond branch, ignore it.
891   BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
892   if (!BI || !BI->isUnconditional())
893     return nullptr;
894 
895   // If the instruction before the branch (skipping debug info) isn't a phi
896   // node, then other stuff is happening here.
897   BasicBlock::iterator BBI = BI->getIterator();
898   if (BBI != BB->begin()) {
899     --BBI;
900     while (isa<DbgInfoIntrinsic>(BBI)) {
901       if (BBI == BB->begin())
902         break;
903       --BBI;
904     }
905     if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
906       return nullptr;
907   }
908 
909   // Do not break infinite loops.
910   BasicBlock *DestBB = BI->getSuccessor(0);
911   if (DestBB == BB)
912     return nullptr;
913 
914   if (!canMergeBlocks(BB, DestBB))
915     DestBB = nullptr;
916 
917   return DestBB;
918 }
919 
920 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
921 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
922 /// edges in ways that are non-optimal for isel. Start by eliminating these
923 /// blocks so we can split them the way we want them.
924 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
925   SmallPtrSet<BasicBlock *, 16> Preheaders;
926   SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
927   while (!LoopList.empty()) {
928     Loop *L = LoopList.pop_back_val();
929     llvm::append_range(LoopList, *L);
930     if (BasicBlock *Preheader = L->getLoopPreheader())
931       Preheaders.insert(Preheader);
932   }
933 
934   bool MadeChange = false;
935   // Copy blocks into a temporary array to avoid iterator invalidation issues
936   // as we remove them.
937   // Note that this intentionally skips the entry block.
938   SmallVector<WeakTrackingVH, 16> Blocks;
939   for (auto &Block : llvm::drop_begin(F)) {
940     // Delete phi nodes that could block deleting other empty blocks.
941     if (!DisableDeletePHIs)
942       MadeChange |= DeleteDeadPHIs(&Block, TLInfo);
943     Blocks.push_back(&Block);
944   }
945 
946   for (auto &Block : Blocks) {
947     BasicBlock *BB = cast_or_null<BasicBlock>(Block);
948     if (!BB)
949       continue;
950     BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
951     if (!DestBB ||
952         !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
953       continue;
954 
955     eliminateMostlyEmptyBlock(BB);
956     MadeChange = true;
957   }
958   return MadeChange;
959 }
960 
961 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
962                                                    BasicBlock *DestBB,
963                                                    bool isPreheader) {
964   // Do not delete loop preheaders if doing so would create a critical edge.
965   // Loop preheaders can be good locations to spill registers. If the
966   // preheader is deleted and we create a critical edge, registers may be
967   // spilled in the loop body instead.
968   if (!DisablePreheaderProtect && isPreheader &&
969       !(BB->getSinglePredecessor() &&
970         BB->getSinglePredecessor()->getSingleSuccessor()))
971     return false;
972 
973   // Skip merging if the block's successor is also a successor to any callbr
974   // that leads to this block.
975   // FIXME: Is this really needed? Is this a correctness issue?
976   for (BasicBlock *Pred : predecessors(BB)) {
977     if (isa<CallBrInst>(Pred->getTerminator()) &&
978         llvm::is_contained(successors(Pred), DestBB))
979       return false;
980   }
981 
982   // Try to skip merging if the unique predecessor of BB is terminated by a
983   // switch or indirect branch instruction, and BB is used as an incoming block
984   // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
985   // add COPY instructions in the predecessor of BB instead of BB (if it is not
986   // merged). Note that the critical edge created by merging such blocks wont be
987   // split in MachineSink because the jump table is not analyzable. By keeping
988   // such empty block (BB), ISel will place COPY instructions in BB, not in the
989   // predecessor of BB.
990   BasicBlock *Pred = BB->getUniquePredecessor();
991   if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
992                  isa<IndirectBrInst>(Pred->getTerminator())))
993     return true;
994 
995   if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
996     return true;
997 
998   // We use a simple cost heuristic which determine skipping merging is
999   // profitable if the cost of skipping merging is less than the cost of
1000   // merging : Cost(skipping merging) < Cost(merging BB), where the
1001   // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1002   // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1003   // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1004   //   Freq(Pred) / Freq(BB) > 2.
1005   // Note that if there are multiple empty blocks sharing the same incoming
1006   // value for the PHIs in the DestBB, we consider them together. In such
1007   // case, Cost(merging BB) will be the sum of their frequencies.
1008 
1009   if (!isa<PHINode>(DestBB->begin()))
1010     return true;
1011 
1012   SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
1013 
1014   // Find all other incoming blocks from which incoming values of all PHIs in
1015   // DestBB are the same as the ones from BB.
1016   for (BasicBlock *DestBBPred : predecessors(DestBB)) {
1017     if (DestBBPred == BB)
1018       continue;
1019 
1020     if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
1021           return DestPN.getIncomingValueForBlock(BB) ==
1022                  DestPN.getIncomingValueForBlock(DestBBPred);
1023         }))
1024       SameIncomingValueBBs.insert(DestBBPred);
1025   }
1026 
1027   // See if all BB's incoming values are same as the value from Pred. In this
1028   // case, no reason to skip merging because COPYs are expected to be place in
1029   // Pred already.
1030   if (SameIncomingValueBBs.count(Pred))
1031     return true;
1032 
1033   BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
1034   BlockFrequency BBFreq = BFI->getBlockFreq(BB);
1035 
1036   for (auto *SameValueBB : SameIncomingValueBBs)
1037     if (SameValueBB->getUniquePredecessor() == Pred &&
1038         DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
1039       BBFreq += BFI->getBlockFreq(SameValueBB);
1040 
1041   std::optional<BlockFrequency> Limit = BBFreq.mul(FreqRatioToSkipMerge);
1042   return !Limit || PredFreq <= *Limit;
1043 }
1044 
1045 /// Return true if we can merge BB into DestBB if there is a single
1046 /// unconditional branch between them, and BB contains no other non-phi
1047 /// instructions.
1048 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1049                                     const BasicBlock *DestBB) const {
1050   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1051   // the successor.  If there are more complex condition (e.g. preheaders),
1052   // don't mess around with them.
1053   for (const PHINode &PN : BB->phis()) {
1054     for (const User *U : PN.users()) {
1055       const Instruction *UI = cast<Instruction>(U);
1056       if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1057         return false;
1058       // If User is inside DestBB block and it is a PHINode then check
1059       // incoming value. If incoming value is not from BB then this is
1060       // a complex condition (e.g. preheaders) we want to avoid here.
1061       if (UI->getParent() == DestBB) {
1062         if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1063           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1064             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1065             if (Insn && Insn->getParent() == BB &&
1066                 Insn->getParent() != UPN->getIncomingBlock(I))
1067               return false;
1068           }
1069       }
1070     }
1071   }
1072 
1073   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1074   // and DestBB may have conflicting incoming values for the block.  If so, we
1075   // can't merge the block.
1076   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1077   if (!DestBBPN)
1078     return true; // no conflict.
1079 
1080   // Collect the preds of BB.
1081   SmallPtrSet<const BasicBlock *, 16> BBPreds;
1082   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1083     // It is faster to get preds from a PHI than with pred_iterator.
1084     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1085       BBPreds.insert(BBPN->getIncomingBlock(i));
1086   } else {
1087     BBPreds.insert(pred_begin(BB), pred_end(BB));
1088   }
1089 
1090   // Walk the preds of DestBB.
1091   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1092     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1093     if (BBPreds.count(Pred)) { // Common predecessor?
1094       for (const PHINode &PN : DestBB->phis()) {
1095         const Value *V1 = PN.getIncomingValueForBlock(Pred);
1096         const Value *V2 = PN.getIncomingValueForBlock(BB);
1097 
1098         // If V2 is a phi node in BB, look up what the mapped value will be.
1099         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1100           if (V2PN->getParent() == BB)
1101             V2 = V2PN->getIncomingValueForBlock(Pred);
1102 
1103         // If there is a conflict, bail out.
1104         if (V1 != V2)
1105           return false;
1106       }
1107     }
1108   }
1109 
1110   return true;
1111 }
1112 
1113 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1114 static void replaceAllUsesWith(Value *Old, Value *New,
1115                                SmallSet<BasicBlock *, 32> &FreshBBs,
1116                                bool IsHuge) {
1117   auto *OldI = dyn_cast<Instruction>(Old);
1118   if (OldI) {
1119     for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1120          UI != E; ++UI) {
1121       Instruction *User = cast<Instruction>(*UI);
1122       if (IsHuge)
1123         FreshBBs.insert(User->getParent());
1124     }
1125   }
1126   Old->replaceAllUsesWith(New);
1127 }
1128 
1129 /// Eliminate a basic block that has only phi's and an unconditional branch in
1130 /// it.
1131 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1132   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1133   BasicBlock *DestBB = BI->getSuccessor(0);
1134 
1135   LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1136                     << *BB << *DestBB);
1137 
1138   // If the destination block has a single pred, then this is a trivial edge,
1139   // just collapse it.
1140   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1141     if (SinglePred != DestBB) {
1142       assert(SinglePred == BB &&
1143              "Single predecessor not the same as predecessor");
1144       // Merge DestBB into SinglePred/BB and delete it.
1145       MergeBlockIntoPredecessor(DestBB);
1146       // Note: BB(=SinglePred) will not be deleted on this path.
1147       // DestBB(=its single successor) is the one that was deleted.
1148       LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1149 
1150       if (IsHugeFunc) {
1151         // Update FreshBBs to optimize the merged BB.
1152         FreshBBs.insert(SinglePred);
1153         FreshBBs.erase(DestBB);
1154       }
1155       return;
1156     }
1157   }
1158 
1159   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
1160   // to handle the new incoming edges it is about to have.
1161   for (PHINode &PN : DestBB->phis()) {
1162     // Remove the incoming value for BB, and remember it.
1163     Value *InVal = PN.removeIncomingValue(BB, false);
1164 
1165     // Two options: either the InVal is a phi node defined in BB or it is some
1166     // value that dominates BB.
1167     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1168     if (InValPhi && InValPhi->getParent() == BB) {
1169       // Add all of the input values of the input PHI as inputs of this phi.
1170       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1171         PN.addIncoming(InValPhi->getIncomingValue(i),
1172                        InValPhi->getIncomingBlock(i));
1173     } else {
1174       // Otherwise, add one instance of the dominating value for each edge that
1175       // we will be adding.
1176       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1177         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1178           PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1179       } else {
1180         for (BasicBlock *Pred : predecessors(BB))
1181           PN.addIncoming(InVal, Pred);
1182       }
1183     }
1184   }
1185 
1186   // Preserve loop Metadata.
1187   if (BI->hasMetadata(LLVMContext::MD_loop)) {
1188     for (auto *Pred : predecessors(BB))
1189       Pred->getTerminator()->copyMetadata(*BI, LLVMContext::MD_loop);
1190   }
1191 
1192   // The PHIs are now updated, change everything that refers to BB to use
1193   // DestBB and remove BB.
1194   BB->replaceAllUsesWith(DestBB);
1195   BB->eraseFromParent();
1196   ++NumBlocksElim;
1197 
1198   LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1199 }
1200 
1201 // Computes a map of base pointer relocation instructions to corresponding
1202 // derived pointer relocation instructions given a vector of all relocate calls
1203 static void computeBaseDerivedRelocateMap(
1204     const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1205     MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>>
1206         &RelocateInstMap) {
1207   // Collect information in two maps: one primarily for locating the base object
1208   // while filling the second map; the second map is the final structure holding
1209   // a mapping between Base and corresponding Derived relocate calls
1210   MapVector<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1211   for (auto *ThisRelocate : AllRelocateCalls) {
1212     auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1213                             ThisRelocate->getDerivedPtrIndex());
1214     RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1215   }
1216   for (auto &Item : RelocateIdxMap) {
1217     std::pair<unsigned, unsigned> Key = Item.first;
1218     if (Key.first == Key.second)
1219       // Base relocation: nothing to insert
1220       continue;
1221 
1222     GCRelocateInst *I = Item.second;
1223     auto BaseKey = std::make_pair(Key.first, Key.first);
1224 
1225     // We're iterating over RelocateIdxMap so we cannot modify it.
1226     auto MaybeBase = RelocateIdxMap.find(BaseKey);
1227     if (MaybeBase == RelocateIdxMap.end())
1228       // TODO: We might want to insert a new base object relocate and gep off
1229       // that, if there are enough derived object relocates.
1230       continue;
1231 
1232     RelocateInstMap[MaybeBase->second].push_back(I);
1233   }
1234 }
1235 
1236 // Accepts a GEP and extracts the operands into a vector provided they're all
1237 // small integer constants
1238 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1239                                           SmallVectorImpl<Value *> &OffsetV) {
1240   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1241     // Only accept small constant integer operands
1242     auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1243     if (!Op || Op->getZExtValue() > 20)
1244       return false;
1245   }
1246 
1247   for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1248     OffsetV.push_back(GEP->getOperand(i));
1249   return true;
1250 }
1251 
1252 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1253 // replace, computes a replacement, and affects it.
1254 static bool
1255 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1256                           const SmallVectorImpl<GCRelocateInst *> &Targets) {
1257   bool MadeChange = false;
1258   // We must ensure the relocation of derived pointer is defined after
1259   // relocation of base pointer. If we find a relocation corresponding to base
1260   // defined earlier than relocation of base then we move relocation of base
1261   // right before found relocation. We consider only relocation in the same
1262   // basic block as relocation of base. Relocations from other basic block will
1263   // be skipped by optimization and we do not care about them.
1264   for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1265        &*R != RelocatedBase; ++R)
1266     if (auto *RI = dyn_cast<GCRelocateInst>(R))
1267       if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1268         if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1269           RelocatedBase->moveBefore(RI);
1270           MadeChange = true;
1271           break;
1272         }
1273 
1274   for (GCRelocateInst *ToReplace : Targets) {
1275     assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1276            "Not relocating a derived object of the original base object");
1277     if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1278       // A duplicate relocate call. TODO: coalesce duplicates.
1279       continue;
1280     }
1281 
1282     if (RelocatedBase->getParent() != ToReplace->getParent()) {
1283       // Base and derived relocates are in different basic blocks.
1284       // In this case transform is only valid when base dominates derived
1285       // relocate. However it would be too expensive to check dominance
1286       // for each such relocate, so we skip the whole transformation.
1287       continue;
1288     }
1289 
1290     Value *Base = ToReplace->getBasePtr();
1291     auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1292     if (!Derived || Derived->getPointerOperand() != Base)
1293       continue;
1294 
1295     SmallVector<Value *, 2> OffsetV;
1296     if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1297       continue;
1298 
1299     // Create a Builder and replace the target callsite with a gep
1300     assert(RelocatedBase->getNextNode() &&
1301            "Should always have one since it's not a terminator");
1302 
1303     // Insert after RelocatedBase
1304     IRBuilder<> Builder(RelocatedBase->getNextNode());
1305     Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1306 
1307     // If gc_relocate does not match the actual type, cast it to the right type.
1308     // In theory, there must be a bitcast after gc_relocate if the type does not
1309     // match, and we should reuse it to get the derived pointer. But it could be
1310     // cases like this:
1311     // bb1:
1312     //  ...
1313     //  %g1 = call coldcc i8 addrspace(1)*
1314     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1315     //
1316     // bb2:
1317     //  ...
1318     //  %g2 = call coldcc i8 addrspace(1)*
1319     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1320     //
1321     // merge:
1322     //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1323     //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1324     //
1325     // In this case, we can not find the bitcast any more. So we insert a new
1326     // bitcast no matter there is already one or not. In this way, we can handle
1327     // all cases, and the extra bitcast should be optimized away in later
1328     // passes.
1329     Value *ActualRelocatedBase = RelocatedBase;
1330     if (RelocatedBase->getType() != Base->getType()) {
1331       ActualRelocatedBase =
1332           Builder.CreateBitCast(RelocatedBase, Base->getType());
1333     }
1334     Value *Replacement =
1335         Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1336                           ArrayRef(OffsetV));
1337     Replacement->takeName(ToReplace);
1338     // If the newly generated derived pointer's type does not match the original
1339     // derived pointer's type, cast the new derived pointer to match it. Same
1340     // reasoning as above.
1341     Value *ActualReplacement = Replacement;
1342     if (Replacement->getType() != ToReplace->getType()) {
1343       ActualReplacement =
1344           Builder.CreateBitCast(Replacement, ToReplace->getType());
1345     }
1346     ToReplace->replaceAllUsesWith(ActualReplacement);
1347     ToReplace->eraseFromParent();
1348 
1349     MadeChange = true;
1350   }
1351   return MadeChange;
1352 }
1353 
1354 // Turns this:
1355 //
1356 // %base = ...
1357 // %ptr = gep %base + 15
1358 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1359 // %base' = relocate(%tok, i32 4, i32 4)
1360 // %ptr' = relocate(%tok, i32 4, i32 5)
1361 // %val = load %ptr'
1362 //
1363 // into this:
1364 //
1365 // %base = ...
1366 // %ptr = gep %base + 15
1367 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1368 // %base' = gc.relocate(%tok, i32 4, i32 4)
1369 // %ptr' = gep %base' + 15
1370 // %val = load %ptr'
1371 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1372   bool MadeChange = false;
1373   SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1374   for (auto *U : I.users())
1375     if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1376       // Collect all the relocate calls associated with a statepoint
1377       AllRelocateCalls.push_back(Relocate);
1378 
1379   // We need at least one base pointer relocation + one derived pointer
1380   // relocation to mangle
1381   if (AllRelocateCalls.size() < 2)
1382     return false;
1383 
1384   // RelocateInstMap is a mapping from the base relocate instruction to the
1385   // corresponding derived relocate instructions
1386   MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> RelocateInstMap;
1387   computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1388   if (RelocateInstMap.empty())
1389     return false;
1390 
1391   for (auto &Item : RelocateInstMap)
1392     // Item.first is the RelocatedBase to offset against
1393     // Item.second is the vector of Targets to replace
1394     MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1395   return MadeChange;
1396 }
1397 
1398 /// Sink the specified cast instruction into its user blocks.
1399 static bool SinkCast(CastInst *CI) {
1400   BasicBlock *DefBB = CI->getParent();
1401 
1402   /// InsertedCasts - Only insert a cast in each block once.
1403   DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1404 
1405   bool MadeChange = false;
1406   for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1407        UI != E;) {
1408     Use &TheUse = UI.getUse();
1409     Instruction *User = cast<Instruction>(*UI);
1410 
1411     // Figure out which BB this cast is used in.  For PHI's this is the
1412     // appropriate predecessor block.
1413     BasicBlock *UserBB = User->getParent();
1414     if (PHINode *PN = dyn_cast<PHINode>(User)) {
1415       UserBB = PN->getIncomingBlock(TheUse);
1416     }
1417 
1418     // Preincrement use iterator so we don't invalidate it.
1419     ++UI;
1420 
1421     // The first insertion point of a block containing an EH pad is after the
1422     // pad.  If the pad is the user, we cannot sink the cast past the pad.
1423     if (User->isEHPad())
1424       continue;
1425 
1426     // If the block selected to receive the cast is an EH pad that does not
1427     // allow non-PHI instructions before the terminator, we can't sink the
1428     // cast.
1429     if (UserBB->getTerminator()->isEHPad())
1430       continue;
1431 
1432     // If this user is in the same block as the cast, don't change the cast.
1433     if (UserBB == DefBB)
1434       continue;
1435 
1436     // If we have already inserted a cast into this block, use it.
1437     CastInst *&InsertedCast = InsertedCasts[UserBB];
1438 
1439     if (!InsertedCast) {
1440       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1441       assert(InsertPt != UserBB->end());
1442       InsertedCast = cast<CastInst>(CI->clone());
1443       InsertedCast->insertBefore(*UserBB, InsertPt);
1444     }
1445 
1446     // Replace a use of the cast with a use of the new cast.
1447     TheUse = InsertedCast;
1448     MadeChange = true;
1449     ++NumCastUses;
1450   }
1451 
1452   // If we removed all uses, nuke the cast.
1453   if (CI->use_empty()) {
1454     salvageDebugInfo(*CI);
1455     CI->eraseFromParent();
1456     MadeChange = true;
1457   }
1458 
1459   return MadeChange;
1460 }
1461 
1462 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1463 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1464 /// reduce the number of virtual registers that must be created and coalesced.
1465 ///
1466 /// Return true if any changes are made.
1467 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1468                                        const DataLayout &DL) {
1469   // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1470   // than sinking only nop casts, but is helpful on some platforms.
1471   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1472     if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1473                                  ASC->getDestAddressSpace()))
1474       return false;
1475   }
1476 
1477   // If this is a noop copy,
1478   EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1479   EVT DstVT = TLI.getValueType(DL, CI->getType());
1480 
1481   // This is an fp<->int conversion?
1482   if (SrcVT.isInteger() != DstVT.isInteger())
1483     return false;
1484 
1485   // If this is an extension, it will be a zero or sign extension, which
1486   // isn't a noop.
1487   if (SrcVT.bitsLT(DstVT))
1488     return false;
1489 
1490   // If these values will be promoted, find out what they will be promoted
1491   // to.  This helps us consider truncates on PPC as noop copies when they
1492   // are.
1493   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1494       TargetLowering::TypePromoteInteger)
1495     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1496   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1497       TargetLowering::TypePromoteInteger)
1498     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1499 
1500   // If, after promotion, these are the same types, this is a noop copy.
1501   if (SrcVT != DstVT)
1502     return false;
1503 
1504   return SinkCast(CI);
1505 }
1506 
1507 // Match a simple increment by constant operation.  Note that if a sub is
1508 // matched, the step is negated (as if the step had been canonicalized to
1509 // an add, even though we leave the instruction alone.)
1510 static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1511                            Constant *&Step) {
1512   if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1513       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1514                        m_Instruction(LHS), m_Constant(Step)))))
1515     return true;
1516   if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1517       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1518                        m_Instruction(LHS), m_Constant(Step))))) {
1519     Step = ConstantExpr::getNeg(Step);
1520     return true;
1521   }
1522   return false;
1523 }
1524 
1525 /// If given \p PN is an inductive variable with value IVInc coming from the
1526 /// backedge, and on each iteration it gets increased by Step, return pair
1527 /// <IVInc, Step>. Otherwise, return std::nullopt.
1528 static std::optional<std::pair<Instruction *, Constant *>>
1529 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1530   const Loop *L = LI->getLoopFor(PN->getParent());
1531   if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1532     return std::nullopt;
1533   auto *IVInc =
1534       dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1535   if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1536     return std::nullopt;
1537   Instruction *LHS = nullptr;
1538   Constant *Step = nullptr;
1539   if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1540     return std::make_pair(IVInc, Step);
1541   return std::nullopt;
1542 }
1543 
1544 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1545   auto *I = dyn_cast<Instruction>(V);
1546   if (!I)
1547     return false;
1548   Instruction *LHS = nullptr;
1549   Constant *Step = nullptr;
1550   if (!matchIncrement(I, LHS, Step))
1551     return false;
1552   if (auto *PN = dyn_cast<PHINode>(LHS))
1553     if (auto IVInc = getIVIncrement(PN, LI))
1554       return IVInc->first == I;
1555   return false;
1556 }
1557 
1558 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1559                                                  Value *Arg0, Value *Arg1,
1560                                                  CmpInst *Cmp,
1561                                                  Intrinsic::ID IID) {
1562   auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1563     if (!isIVIncrement(BO, LI))
1564       return false;
1565     const Loop *L = LI->getLoopFor(BO->getParent());
1566     assert(L && "L should not be null after isIVIncrement()");
1567     // Do not risk on moving increment into a child loop.
1568     if (LI->getLoopFor(Cmp->getParent()) != L)
1569       return false;
1570 
1571     // Finally, we need to ensure that the insert point will dominate all
1572     // existing uses of the increment.
1573 
1574     auto &DT = getDT(*BO->getParent()->getParent());
1575     if (DT.dominates(Cmp->getParent(), BO->getParent()))
1576       // If we're moving up the dom tree, all uses are trivially dominated.
1577       // (This is the common case for code produced by LSR.)
1578       return true;
1579 
1580     // Otherwise, special case the single use in the phi recurrence.
1581     return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1582   };
1583   if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1584     // We used to use a dominator tree here to allow multi-block optimization.
1585     // But that was problematic because:
1586     // 1. It could cause a perf regression by hoisting the math op into the
1587     //    critical path.
1588     // 2. It could cause a perf regression by creating a value that was live
1589     //    across multiple blocks and increasing register pressure.
1590     // 3. Use of a dominator tree could cause large compile-time regression.
1591     //    This is because we recompute the DT on every change in the main CGP
1592     //    run-loop. The recomputing is probably unnecessary in many cases, so if
1593     //    that was fixed, using a DT here would be ok.
1594     //
1595     // There is one important particular case we still want to handle: if BO is
1596     // the IV increment. Important properties that make it profitable:
1597     // - We can speculate IV increment anywhere in the loop (as long as the
1598     //   indvar Phi is its only user);
1599     // - Upon computing Cmp, we effectively compute something equivalent to the
1600     //   IV increment (despite it loops differently in the IR). So moving it up
1601     //   to the cmp point does not really increase register pressure.
1602     return false;
1603   }
1604 
1605   // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1606   if (BO->getOpcode() == Instruction::Add &&
1607       IID == Intrinsic::usub_with_overflow) {
1608     assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1609     Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1610   }
1611 
1612   // Insert at the first instruction of the pair.
1613   Instruction *InsertPt = nullptr;
1614   for (Instruction &Iter : *Cmp->getParent()) {
1615     // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1616     // the overflow intrinsic are defined.
1617     if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1618       InsertPt = &Iter;
1619       break;
1620     }
1621   }
1622   assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1623 
1624   IRBuilder<> Builder(InsertPt);
1625   Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1626   if (BO->getOpcode() != Instruction::Xor) {
1627     Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1628     replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1629   } else
1630     assert(BO->hasOneUse() &&
1631            "Patterns with XOr should use the BO only in the compare");
1632   Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1633   replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1634   Cmp->eraseFromParent();
1635   BO->eraseFromParent();
1636   return true;
1637 }
1638 
1639 /// Match special-case patterns that check for unsigned add overflow.
1640 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1641                                                    BinaryOperator *&Add) {
1642   // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1643   // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1644   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1645 
1646   // We are not expecting non-canonical/degenerate code. Just bail out.
1647   if (isa<Constant>(A))
1648     return false;
1649 
1650   ICmpInst::Predicate Pred = Cmp->getPredicate();
1651   if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1652     B = ConstantInt::get(B->getType(), 1);
1653   else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1654     B = Constant::getAllOnesValue(B->getType());
1655   else
1656     return false;
1657 
1658   // Check the users of the variable operand of the compare looking for an add
1659   // with the adjusted constant.
1660   for (User *U : A->users()) {
1661     if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1662       Add = cast<BinaryOperator>(U);
1663       return true;
1664     }
1665   }
1666   return false;
1667 }
1668 
1669 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1670 /// intrinsic. Return true if any changes were made.
1671 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1672                                                ModifyDT &ModifiedDT) {
1673   bool EdgeCase = false;
1674   Value *A, *B;
1675   BinaryOperator *Add;
1676   if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1677     if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1678       return false;
1679     // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1680     A = Add->getOperand(0);
1681     B = Add->getOperand(1);
1682     EdgeCase = true;
1683   }
1684 
1685   if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1686                                  TLI->getValueType(*DL, Add->getType()),
1687                                  Add->hasNUsesOrMore(EdgeCase ? 1 : 2)))
1688     return false;
1689 
1690   // We don't want to move around uses of condition values this late, so we
1691   // check if it is legal to create the call to the intrinsic in the basic
1692   // block containing the icmp.
1693   if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1694     return false;
1695 
1696   if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1697                                    Intrinsic::uadd_with_overflow))
1698     return false;
1699 
1700   // Reset callers - do not crash by iterating over a dead instruction.
1701   ModifiedDT = ModifyDT::ModifyInstDT;
1702   return true;
1703 }
1704 
1705 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1706                                                ModifyDT &ModifiedDT) {
1707   // We are not expecting non-canonical/degenerate code. Just bail out.
1708   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1709   if (isa<Constant>(A) && isa<Constant>(B))
1710     return false;
1711 
1712   // Convert (A u> B) to (A u< B) to simplify pattern matching.
1713   ICmpInst::Predicate Pred = Cmp->getPredicate();
1714   if (Pred == ICmpInst::ICMP_UGT) {
1715     std::swap(A, B);
1716     Pred = ICmpInst::ICMP_ULT;
1717   }
1718   // Convert special-case: (A == 0) is the same as (A u< 1).
1719   if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1720     B = ConstantInt::get(B->getType(), 1);
1721     Pred = ICmpInst::ICMP_ULT;
1722   }
1723   // Convert special-case: (A != 0) is the same as (0 u< A).
1724   if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1725     std::swap(A, B);
1726     Pred = ICmpInst::ICMP_ULT;
1727   }
1728   if (Pred != ICmpInst::ICMP_ULT)
1729     return false;
1730 
1731   // Walk the users of a variable operand of a compare looking for a subtract or
1732   // add with that same operand. Also match the 2nd operand of the compare to
1733   // the add/sub, but that may be a negated constant operand of an add.
1734   Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1735   BinaryOperator *Sub = nullptr;
1736   for (User *U : CmpVariableOperand->users()) {
1737     // A - B, A u< B --> usubo(A, B)
1738     if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1739       Sub = cast<BinaryOperator>(U);
1740       break;
1741     }
1742 
1743     // A + (-C), A u< C (canonicalized form of (sub A, C))
1744     const APInt *CmpC, *AddC;
1745     if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1746         match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1747       Sub = cast<BinaryOperator>(U);
1748       break;
1749     }
1750   }
1751   if (!Sub)
1752     return false;
1753 
1754   if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1755                                  TLI->getValueType(*DL, Sub->getType()),
1756                                  Sub->hasNUsesOrMore(1)))
1757     return false;
1758 
1759   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1760                                    Cmp, Intrinsic::usub_with_overflow))
1761     return false;
1762 
1763   // Reset callers - do not crash by iterating over a dead instruction.
1764   ModifiedDT = ModifyDT::ModifyInstDT;
1765   return true;
1766 }
1767 
1768 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1769 /// registers that must be created and coalesced. This is a clear win except on
1770 /// targets with multiple condition code registers (PowerPC), where it might
1771 /// lose; some adjustment may be wanted there.
1772 ///
1773 /// Return true if any changes are made.
1774 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1775   if (TLI.hasMultipleConditionRegisters())
1776     return false;
1777 
1778   // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1779   if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1780     return false;
1781 
1782   // Only insert a cmp in each block once.
1783   DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1784 
1785   bool MadeChange = false;
1786   for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1787        UI != E;) {
1788     Use &TheUse = UI.getUse();
1789     Instruction *User = cast<Instruction>(*UI);
1790 
1791     // Preincrement use iterator so we don't invalidate it.
1792     ++UI;
1793 
1794     // Don't bother for PHI nodes.
1795     if (isa<PHINode>(User))
1796       continue;
1797 
1798     // Figure out which BB this cmp is used in.
1799     BasicBlock *UserBB = User->getParent();
1800     BasicBlock *DefBB = Cmp->getParent();
1801 
1802     // If this user is in the same block as the cmp, don't change the cmp.
1803     if (UserBB == DefBB)
1804       continue;
1805 
1806     // If we have already inserted a cmp into this block, use it.
1807     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1808 
1809     if (!InsertedCmp) {
1810       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1811       assert(InsertPt != UserBB->end());
1812       InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1813                                     Cmp->getOperand(0), Cmp->getOperand(1), "");
1814       InsertedCmp->insertBefore(*UserBB, InsertPt);
1815       // Propagate the debug info.
1816       InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1817     }
1818 
1819     // Replace a use of the cmp with a use of the new cmp.
1820     TheUse = InsertedCmp;
1821     MadeChange = true;
1822     ++NumCmpUses;
1823   }
1824 
1825   // If we removed all uses, nuke the cmp.
1826   if (Cmp->use_empty()) {
1827     Cmp->eraseFromParent();
1828     MadeChange = true;
1829   }
1830 
1831   return MadeChange;
1832 }
1833 
1834 /// For pattern like:
1835 ///
1836 ///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1837 ///   ...
1838 /// DomBB:
1839 ///   ...
1840 ///   br DomCond, TrueBB, CmpBB
1841 /// CmpBB: (with DomBB being the single predecessor)
1842 ///   ...
1843 ///   Cmp = icmp eq CmpOp0, CmpOp1
1844 ///   ...
1845 ///
1846 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1847 /// different from lowering of icmp eq (PowerPC). This function try to convert
1848 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1849 /// After that, DomCond and Cmp can use the same comparison so reduce one
1850 /// comparison.
1851 ///
1852 /// Return true if any changes are made.
1853 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1854                                        const TargetLowering &TLI) {
1855   if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1856     return false;
1857 
1858   ICmpInst::Predicate Pred = Cmp->getPredicate();
1859   if (Pred != ICmpInst::ICMP_EQ)
1860     return false;
1861 
1862   // If icmp eq has users other than BranchInst and SelectInst, converting it to
1863   // icmp slt/sgt would introduce more redundant LLVM IR.
1864   for (User *U : Cmp->users()) {
1865     if (isa<BranchInst>(U))
1866       continue;
1867     if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1868       continue;
1869     return false;
1870   }
1871 
1872   // This is a cheap/incomplete check for dominance - just match a single
1873   // predecessor with a conditional branch.
1874   BasicBlock *CmpBB = Cmp->getParent();
1875   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1876   if (!DomBB)
1877     return false;
1878 
1879   // We want to ensure that the only way control gets to the comparison of
1880   // interest is that a less/greater than comparison on the same operands is
1881   // false.
1882   Value *DomCond;
1883   BasicBlock *TrueBB, *FalseBB;
1884   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1885     return false;
1886   if (CmpBB != FalseBB)
1887     return false;
1888 
1889   Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1890   ICmpInst::Predicate DomPred;
1891   if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1892     return false;
1893   if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1894     return false;
1895 
1896   // Convert the equality comparison to the opposite of the dominating
1897   // comparison and swap the direction for all branch/select users.
1898   // We have conceptually converted:
1899   // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1900   // to
1901   // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1902   // And similarly for branches.
1903   for (User *U : Cmp->users()) {
1904     if (auto *BI = dyn_cast<BranchInst>(U)) {
1905       assert(BI->isConditional() && "Must be conditional");
1906       BI->swapSuccessors();
1907       continue;
1908     }
1909     if (auto *SI = dyn_cast<SelectInst>(U)) {
1910       // Swap operands
1911       SI->swapValues();
1912       SI->swapProfMetadata();
1913       continue;
1914     }
1915     llvm_unreachable("Must be a branch or a select");
1916   }
1917   Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1918   return true;
1919 }
1920 
1921 /// Many architectures use the same instruction for both subtract and cmp. Try
1922 /// to swap cmp operands to match subtract operations to allow for CSE.
1923 static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
1924   Value *Op0 = Cmp->getOperand(0);
1925   Value *Op1 = Cmp->getOperand(1);
1926   if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
1927       isa<Constant>(Op1) || Op0 == Op1)
1928     return false;
1929 
1930   // If a subtract already has the same operands as a compare, swapping would be
1931   // bad. If a subtract has the same operands as a compare but in reverse order,
1932   // then swapping is good.
1933   int GoodToSwap = 0;
1934   unsigned NumInspected = 0;
1935   for (const User *U : Op0->users()) {
1936     // Avoid walking many users.
1937     if (++NumInspected > 128)
1938       return false;
1939     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
1940       GoodToSwap++;
1941     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
1942       GoodToSwap--;
1943   }
1944 
1945   if (GoodToSwap > 0) {
1946     Cmp->swapOperands();
1947     return true;
1948   }
1949   return false;
1950 }
1951 
1952 static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI,
1953                                   const DataLayout &DL) {
1954   FCmpInst *FCmp = dyn_cast<FCmpInst>(Cmp);
1955   if (!FCmp)
1956     return false;
1957 
1958   // Don't fold if the target offers free fabs and the predicate is legal.
1959   EVT VT = TLI.getValueType(DL, Cmp->getOperand(0)->getType());
1960   if (TLI.isFAbsFree(VT) &&
1961       TLI.isCondCodeLegal(getFCmpCondCode(FCmp->getPredicate()),
1962                           VT.getSimpleVT()))
1963     return false;
1964 
1965   // Reverse the canonicalization if it is a FP class test
1966   auto ShouldReverseTransform = [](FPClassTest ClassTest) {
1967     return ClassTest == fcInf || ClassTest == (fcInf | fcNan);
1968   };
1969   auto [ClassVal, ClassTest] =
1970       fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1971                       FCmp->getOperand(0), FCmp->getOperand(1));
1972   if (!ClassVal)
1973     return false;
1974 
1975   if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest))
1976     return false;
1977 
1978   IRBuilder<> Builder(Cmp);
1979   Value *IsFPClass = Builder.createIsFPClass(ClassVal, ClassTest);
1980   Cmp->replaceAllUsesWith(IsFPClass);
1981   RecursivelyDeleteTriviallyDeadInstructions(Cmp);
1982   return true;
1983 }
1984 
1985 static bool isRemOfLoopIncrementWithLoopInvariant(Instruction *Rem,
1986                                                   const LoopInfo *LI,
1987                                                   Value *&RemAmtOut,
1988                                                   PHINode *&LoopIncrPNOut) {
1989   Value *Incr, *RemAmt;
1990   // NB: If RemAmt is a power of 2 it *should* have been transformed by now.
1991   if (!match(Rem, m_URem(m_Value(Incr), m_Value(RemAmt))))
1992     return false;
1993 
1994   // Find out loop increment PHI.
1995   auto *PN = dyn_cast<PHINode>(Incr);
1996   if (!PN)
1997     return false;
1998 
1999   // This isn't strictly necessary, what we really need is one increment and any
2000   // amount of initial values all being the same.
2001   if (PN->getNumIncomingValues() != 2)
2002     return false;
2003 
2004   // Only trivially analyzable loops.
2005   Loop *L = LI->getLoopFor(PN->getParent());
2006   if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
2007     return false;
2008 
2009   // Req that the remainder is in the loop
2010   if (!L->contains(Rem))
2011     return false;
2012 
2013   // Only works if the remainder amount is a loop invaraint
2014   if (!L->isLoopInvariant(RemAmt))
2015     return false;
2016 
2017   // Is the PHI a loop increment?
2018   auto LoopIncrInfo = getIVIncrement(PN, LI);
2019   if (!LoopIncrInfo)
2020     return false;
2021 
2022   // We need remainder_amount % increment_amount to be zero. Increment of one
2023   // satisfies that without any special logic and is overwhelmingly the common
2024   // case.
2025   if (!match(LoopIncrInfo->second, m_One()))
2026     return false;
2027 
2028   // Need the increment to not overflow.
2029   if (!match(LoopIncrInfo->first, m_c_NUWAdd(m_Specific(PN), m_Value())))
2030     return false;
2031 
2032   // Set output variables.
2033   RemAmtOut = RemAmt;
2034   LoopIncrPNOut = PN;
2035 
2036   return true;
2037 }
2038 
2039 // Try to transform:
2040 //
2041 // for(i = Start; i < End; ++i)
2042 //    Rem = (i nuw+ IncrLoopInvariant) u% RemAmtLoopInvariant;
2043 //
2044 // ->
2045 //
2046 // Rem = (Start nuw+ IncrLoopInvariant) % RemAmtLoopInvariant;
2047 // for(i = Start; i < End; ++i, ++rem)
2048 //    Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
2049 //
2050 // Currently only implemented for `IncrLoopInvariant` being zero.
2051 static bool foldURemOfLoopIncrement(Instruction *Rem, const DataLayout *DL,
2052                                     const LoopInfo *LI,
2053                                     SmallSet<BasicBlock *, 32> &FreshBBs,
2054                                     bool IsHuge) {
2055   Value *RemAmt;
2056   PHINode *LoopIncrPN;
2057   if (!isRemOfLoopIncrementWithLoopInvariant(Rem, LI, RemAmt, LoopIncrPN))
2058     return false;
2059 
2060   // Only non-constant remainder as the extra IV is probably not profitable
2061   // in that case.
2062   //
2063   // Potential TODO(1): `urem` of a const ends up as `mul` + `shift` + `add`. If
2064   // we can rule out register pressure and ensure this `urem` is executed each
2065   // iteration, its probably profitable to handle the const case as well.
2066   //
2067   // Potential TODO(2): Should we have a check for how "nested" this remainder
2068   // operation is? The new code runs every iteration so if the remainder is
2069   // guarded behind unlikely conditions this might not be worth it.
2070   if (match(RemAmt, m_ImmConstant()))
2071     return false;
2072 
2073   Loop *L = LI->getLoopFor(LoopIncrPN->getParent());
2074   Value *Start = LoopIncrPN->getIncomingValueForBlock(L->getLoopPreheader());
2075   // If we can't fully optimize out the `rem`, skip this transform.
2076   Start = simplifyURemInst(Start, RemAmt, *DL);
2077   if (!Start)
2078     return false;
2079 
2080   // Create new remainder with induction variable.
2081   Type *Ty = Rem->getType();
2082   IRBuilder<> Builder(Rem->getContext());
2083 
2084   Builder.SetInsertPoint(LoopIncrPN);
2085   PHINode *NewRem = Builder.CreatePHI(Ty, 2);
2086 
2087   Builder.SetInsertPoint(cast<Instruction>(
2088       LoopIncrPN->getIncomingValueForBlock(L->getLoopLatch())));
2089   // `(add (urem x, y), 1)` is always nuw.
2090   Value *RemAdd = Builder.CreateNUWAdd(NewRem, ConstantInt::get(Ty, 1));
2091   Value *RemCmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, RemAdd, RemAmt);
2092   Value *RemSel =
2093       Builder.CreateSelect(RemCmp, Constant::getNullValue(Ty), RemAdd);
2094 
2095   NewRem->addIncoming(Start, L->getLoopPreheader());
2096   NewRem->addIncoming(RemSel, L->getLoopLatch());
2097 
2098   // Insert all touched BBs.
2099   FreshBBs.insert(LoopIncrPN->getParent());
2100   FreshBBs.insert(L->getLoopLatch());
2101   FreshBBs.insert(Rem->getParent());
2102 
2103   replaceAllUsesWith(Rem, NewRem, FreshBBs, IsHuge);
2104   Rem->eraseFromParent();
2105   return true;
2106 }
2107 
2108 bool CodeGenPrepare::optimizeURem(Instruction *Rem) {
2109   if (foldURemOfLoopIncrement(Rem, DL, LI, FreshBBs, IsHugeFunc))
2110     return true;
2111   return false;
2112 }
2113 
2114 /// Some targets have better codegen for `ctpop(X) u< 2` than `ctpop(X) == 1`.
2115 /// This function converts `ctpop(X) ==/!= 1` into `ctpop(X) u</u> 2/1` if the
2116 /// result cannot be zero.
2117 static bool adjustIsPower2Test(CmpInst *Cmp, const TargetLowering &TLI,
2118                                const TargetTransformInfo &TTI,
2119                                const DataLayout &DL) {
2120   ICmpInst::Predicate Pred;
2121   if (!match(Cmp, m_ICmp(Pred, m_Intrinsic<Intrinsic::ctpop>(), m_One())))
2122     return false;
2123   if (!ICmpInst::isEquality(Pred))
2124     return false;
2125   auto *II = cast<IntrinsicInst>(Cmp->getOperand(0));
2126 
2127   if (isKnownNonZero(II, DL)) {
2128     if (Pred == ICmpInst::ICMP_EQ) {
2129       Cmp->setOperand(1, ConstantInt::get(II->getType(), 2));
2130       Cmp->setPredicate(ICmpInst::ICMP_ULT);
2131     } else {
2132       Cmp->setPredicate(ICmpInst::ICMP_UGT);
2133     }
2134     return true;
2135   }
2136   return false;
2137 }
2138 
2139 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
2140   if (sinkCmpExpression(Cmp, *TLI))
2141     return true;
2142 
2143   if (combineToUAddWithOverflow(Cmp, ModifiedDT))
2144     return true;
2145 
2146   if (combineToUSubWithOverflow(Cmp, ModifiedDT))
2147     return true;
2148 
2149   if (foldICmpWithDominatingICmp(Cmp, *TLI))
2150     return true;
2151 
2152   if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
2153     return true;
2154 
2155   if (foldFCmpToFPClassTest(Cmp, *TLI, *DL))
2156     return true;
2157 
2158   if (adjustIsPower2Test(Cmp, *TLI, *TTI, *DL))
2159     return true;
2160 
2161   return false;
2162 }
2163 
2164 /// Duplicate and sink the given 'and' instruction into user blocks where it is
2165 /// used in a compare to allow isel to generate better code for targets where
2166 /// this operation can be combined.
2167 ///
2168 /// Return true if any changes are made.
2169 static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
2170                                   SetOfInstrs &InsertedInsts) {
2171   // Double-check that we're not trying to optimize an instruction that was
2172   // already optimized by some other part of this pass.
2173   assert(!InsertedInsts.count(AndI) &&
2174          "Attempting to optimize already optimized and instruction");
2175   (void)InsertedInsts;
2176 
2177   // Nothing to do for single use in same basic block.
2178   if (AndI->hasOneUse() &&
2179       AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
2180     return false;
2181 
2182   // Try to avoid cases where sinking/duplicating is likely to increase register
2183   // pressure.
2184   if (!isa<ConstantInt>(AndI->getOperand(0)) &&
2185       !isa<ConstantInt>(AndI->getOperand(1)) &&
2186       AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
2187     return false;
2188 
2189   for (auto *U : AndI->users()) {
2190     Instruction *User = cast<Instruction>(U);
2191 
2192     // Only sink 'and' feeding icmp with 0.
2193     if (!isa<ICmpInst>(User))
2194       return false;
2195 
2196     auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
2197     if (!CmpC || !CmpC->isZero())
2198       return false;
2199   }
2200 
2201   if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
2202     return false;
2203 
2204   LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2205   LLVM_DEBUG(AndI->getParent()->dump());
2206 
2207   // Push the 'and' into the same block as the icmp 0.  There should only be
2208   // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2209   // others, so we don't need to keep track of which BBs we insert into.
2210   for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
2211        UI != E;) {
2212     Use &TheUse = UI.getUse();
2213     Instruction *User = cast<Instruction>(*UI);
2214 
2215     // Preincrement use iterator so we don't invalidate it.
2216     ++UI;
2217 
2218     LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
2219 
2220     // Keep the 'and' in the same place if the use is already in the same block.
2221     Instruction *InsertPt =
2222         User->getParent() == AndI->getParent() ? AndI : User;
2223     Instruction *InsertedAnd = BinaryOperator::Create(
2224         Instruction::And, AndI->getOperand(0), AndI->getOperand(1), "",
2225         InsertPt->getIterator());
2226     // Propagate the debug info.
2227     InsertedAnd->setDebugLoc(AndI->getDebugLoc());
2228 
2229     // Replace a use of the 'and' with a use of the new 'and'.
2230     TheUse = InsertedAnd;
2231     ++NumAndUses;
2232     LLVM_DEBUG(User->getParent()->dump());
2233   }
2234 
2235   // We removed all uses, nuke the and.
2236   AndI->eraseFromParent();
2237   return true;
2238 }
2239 
2240 /// Check if the candidates could be combined with a shift instruction, which
2241 /// includes:
2242 /// 1. Truncate instruction
2243 /// 2. And instruction and the imm is a mask of the low bits:
2244 /// imm & (imm+1) == 0
2245 static bool isExtractBitsCandidateUse(Instruction *User) {
2246   if (!isa<TruncInst>(User)) {
2247     if (User->getOpcode() != Instruction::And ||
2248         !isa<ConstantInt>(User->getOperand(1)))
2249       return false;
2250 
2251     const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
2252 
2253     if ((Cimm & (Cimm + 1)).getBoolValue())
2254       return false;
2255   }
2256   return true;
2257 }
2258 
2259 /// Sink both shift and truncate instruction to the use of truncate's BB.
2260 static bool
2261 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
2262                      DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
2263                      const TargetLowering &TLI, const DataLayout &DL) {
2264   BasicBlock *UserBB = User->getParent();
2265   DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
2266   auto *TruncI = cast<TruncInst>(User);
2267   bool MadeChange = false;
2268 
2269   for (Value::user_iterator TruncUI = TruncI->user_begin(),
2270                             TruncE = TruncI->user_end();
2271        TruncUI != TruncE;) {
2272 
2273     Use &TruncTheUse = TruncUI.getUse();
2274     Instruction *TruncUser = cast<Instruction>(*TruncUI);
2275     // Preincrement use iterator so we don't invalidate it.
2276 
2277     ++TruncUI;
2278 
2279     int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
2280     if (!ISDOpcode)
2281       continue;
2282 
2283     // If the use is actually a legal node, there will not be an
2284     // implicit truncate.
2285     // FIXME: always querying the result type is just an
2286     // approximation; some nodes' legality is determined by the
2287     // operand or other means. There's no good way to find out though.
2288     if (TLI.isOperationLegalOrCustom(
2289             ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
2290       continue;
2291 
2292     // Don't bother for PHI nodes.
2293     if (isa<PHINode>(TruncUser))
2294       continue;
2295 
2296     BasicBlock *TruncUserBB = TruncUser->getParent();
2297 
2298     if (UserBB == TruncUserBB)
2299       continue;
2300 
2301     BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
2302     CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
2303 
2304     if (!InsertedShift && !InsertedTrunc) {
2305       BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
2306       assert(InsertPt != TruncUserBB->end());
2307       // Sink the shift
2308       if (ShiftI->getOpcode() == Instruction::AShr)
2309         InsertedShift =
2310             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2311       else
2312         InsertedShift =
2313             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2314       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2315       InsertedShift->insertBefore(*TruncUserBB, InsertPt);
2316 
2317       // Sink the trunc
2318       BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
2319       TruncInsertPt++;
2320       // It will go ahead of any debug-info.
2321       TruncInsertPt.setHeadBit(true);
2322       assert(TruncInsertPt != TruncUserBB->end());
2323 
2324       InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2325                                        TruncI->getType(), "");
2326       InsertedTrunc->insertBefore(*TruncUserBB, TruncInsertPt);
2327       InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2328 
2329       MadeChange = true;
2330 
2331       TruncTheUse = InsertedTrunc;
2332     }
2333   }
2334   return MadeChange;
2335 }
2336 
2337 /// Sink the shift *right* instruction into user blocks if the uses could
2338 /// potentially be combined with this shift instruction and generate BitExtract
2339 /// instruction. It will only be applied if the architecture supports BitExtract
2340 /// instruction. Here is an example:
2341 /// BB1:
2342 ///   %x.extract.shift = lshr i64 %arg1, 32
2343 /// BB2:
2344 ///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
2345 /// ==>
2346 ///
2347 /// BB2:
2348 ///   %x.extract.shift.1 = lshr i64 %arg1, 32
2349 ///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2350 ///
2351 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2352 /// instruction.
2353 /// Return true if any changes are made.
2354 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
2355                                 const TargetLowering &TLI,
2356                                 const DataLayout &DL) {
2357   BasicBlock *DefBB = ShiftI->getParent();
2358 
2359   /// Only insert instructions in each block once.
2360   DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
2361 
2362   bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2363 
2364   bool MadeChange = false;
2365   for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2366        UI != E;) {
2367     Use &TheUse = UI.getUse();
2368     Instruction *User = cast<Instruction>(*UI);
2369     // Preincrement use iterator so we don't invalidate it.
2370     ++UI;
2371 
2372     // Don't bother for PHI nodes.
2373     if (isa<PHINode>(User))
2374       continue;
2375 
2376     if (!isExtractBitsCandidateUse(User))
2377       continue;
2378 
2379     BasicBlock *UserBB = User->getParent();
2380 
2381     if (UserBB == DefBB) {
2382       // If the shift and truncate instruction are in the same BB. The use of
2383       // the truncate(TruncUse) may still introduce another truncate if not
2384       // legal. In this case, we would like to sink both shift and truncate
2385       // instruction to the BB of TruncUse.
2386       // for example:
2387       // BB1:
2388       // i64 shift.result = lshr i64 opnd, imm
2389       // trunc.result = trunc shift.result to i16
2390       //
2391       // BB2:
2392       //   ----> We will have an implicit truncate here if the architecture does
2393       //   not have i16 compare.
2394       // cmp i16 trunc.result, opnd2
2395       //
2396       if (isa<TruncInst>(User) &&
2397           shiftIsLegal
2398           // If the type of the truncate is legal, no truncate will be
2399           // introduced in other basic blocks.
2400           && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2401         MadeChange =
2402             SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2403 
2404       continue;
2405     }
2406     // If we have already inserted a shift into this block, use it.
2407     BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2408 
2409     if (!InsertedShift) {
2410       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2411       assert(InsertPt != UserBB->end());
2412 
2413       if (ShiftI->getOpcode() == Instruction::AShr)
2414         InsertedShift =
2415             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2416       else
2417         InsertedShift =
2418             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2419       InsertedShift->insertBefore(*UserBB, InsertPt);
2420       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2421 
2422       MadeChange = true;
2423     }
2424 
2425     // Replace a use of the shift with a use of the new shift.
2426     TheUse = InsertedShift;
2427   }
2428 
2429   // If we removed all uses, or there are none, nuke the shift.
2430   if (ShiftI->use_empty()) {
2431     salvageDebugInfo(*ShiftI);
2432     ShiftI->eraseFromParent();
2433     MadeChange = true;
2434   }
2435 
2436   return MadeChange;
2437 }
2438 
2439 /// If counting leading or trailing zeros is an expensive operation and a zero
2440 /// input is defined, add a check for zero to avoid calling the intrinsic.
2441 ///
2442 /// We want to transform:
2443 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2444 ///
2445 /// into:
2446 ///   entry:
2447 ///     %cmpz = icmp eq i64 %A, 0
2448 ///     br i1 %cmpz, label %cond.end, label %cond.false
2449 ///   cond.false:
2450 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2451 ///     br label %cond.end
2452 ///   cond.end:
2453 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2454 ///
2455 /// If the transform is performed, return true and set ModifiedDT to true.
2456 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2457                                   LoopInfo &LI,
2458                                   const TargetLowering *TLI,
2459                                   const DataLayout *DL, ModifyDT &ModifiedDT,
2460                                   SmallSet<BasicBlock *, 32> &FreshBBs,
2461                                   bool IsHugeFunc) {
2462   // If a zero input is undefined, it doesn't make sense to despeculate that.
2463   if (match(CountZeros->getOperand(1), m_One()))
2464     return false;
2465 
2466   // If it's cheap to speculate, there's nothing to do.
2467   Type *Ty = CountZeros->getType();
2468   auto IntrinsicID = CountZeros->getIntrinsicID();
2469   if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2470       (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2471     return false;
2472 
2473   // Only handle legal scalar cases. Anything else requires too much work.
2474   unsigned SizeInBits = Ty->getScalarSizeInBits();
2475   if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2476     return false;
2477 
2478   // Bail if the value is never zero.
2479   Use &Op = CountZeros->getOperandUse(0);
2480   if (isKnownNonZero(Op, *DL))
2481     return false;
2482 
2483   // The intrinsic will be sunk behind a compare against zero and branch.
2484   BasicBlock *StartBlock = CountZeros->getParent();
2485   BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2486   if (IsHugeFunc)
2487     FreshBBs.insert(CallBlock);
2488 
2489   // Create another block after the count zero intrinsic. A PHI will be added
2490   // in this block to select the result of the intrinsic or the bit-width
2491   // constant if the input to the intrinsic is zero.
2492   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(CountZeros));
2493   // Any debug-info after CountZeros should not be included.
2494   SplitPt.setHeadBit(true);
2495   BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2496   if (IsHugeFunc)
2497     FreshBBs.insert(EndBlock);
2498 
2499   // Update the LoopInfo. The new blocks are in the same loop as the start
2500   // block.
2501   if (Loop *L = LI.getLoopFor(StartBlock)) {
2502     L->addBasicBlockToLoop(CallBlock, LI);
2503     L->addBasicBlockToLoop(EndBlock, LI);
2504   }
2505 
2506   // Set up a builder to create a compare, conditional branch, and PHI.
2507   IRBuilder<> Builder(CountZeros->getContext());
2508   Builder.SetInsertPoint(StartBlock->getTerminator());
2509   Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2510 
2511   // Replace the unconditional branch that was created by the first split with
2512   // a compare against zero and a conditional branch.
2513   Value *Zero = Constant::getNullValue(Ty);
2514   // Avoid introducing branch on poison. This also replaces the ctz operand.
2515   if (!isGuaranteedNotToBeUndefOrPoison(Op))
2516     Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2517   Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2518   Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2519   StartBlock->getTerminator()->eraseFromParent();
2520 
2521   // Create a PHI in the end block to select either the output of the intrinsic
2522   // or the bit width of the operand.
2523   Builder.SetInsertPoint(EndBlock, EndBlock->begin());
2524   PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2525   replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2526   Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2527   PN->addIncoming(BitWidth, StartBlock);
2528   PN->addIncoming(CountZeros, CallBlock);
2529 
2530   // We are explicitly handling the zero case, so we can set the intrinsic's
2531   // undefined zero argument to 'true'. This will also prevent reprocessing the
2532   // intrinsic; we only despeculate when a zero input is defined.
2533   CountZeros->setArgOperand(1, Builder.getTrue());
2534   ModifiedDT = ModifyDT::ModifyBBDT;
2535   return true;
2536 }
2537 
2538 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2539   BasicBlock *BB = CI->getParent();
2540 
2541   // Lower inline assembly if we can.
2542   // If we found an inline asm expession, and if the target knows how to
2543   // lower it to normal LLVM code, do so now.
2544   if (CI->isInlineAsm()) {
2545     if (TLI->ExpandInlineAsm(CI)) {
2546       // Avoid invalidating the iterator.
2547       CurInstIterator = BB->begin();
2548       // Avoid processing instructions out of order, which could cause
2549       // reuse before a value is defined.
2550       SunkAddrs.clear();
2551       return true;
2552     }
2553     // Sink address computing for memory operands into the block.
2554     if (optimizeInlineAsmInst(CI))
2555       return true;
2556   }
2557 
2558   // Align the pointer arguments to this call if the target thinks it's a good
2559   // idea
2560   unsigned MinSize;
2561   Align PrefAlign;
2562   if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2563     for (auto &Arg : CI->args()) {
2564       // We want to align both objects whose address is used directly and
2565       // objects whose address is used in casts and GEPs, though it only makes
2566       // sense for GEPs if the offset is a multiple of the desired alignment and
2567       // if size - offset meets the size threshold.
2568       if (!Arg->getType()->isPointerTy())
2569         continue;
2570       APInt Offset(DL->getIndexSizeInBits(
2571                        cast<PointerType>(Arg->getType())->getAddressSpace()),
2572                    0);
2573       Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2574       uint64_t Offset2 = Offset.getLimitedValue();
2575       if (!isAligned(PrefAlign, Offset2))
2576         continue;
2577       AllocaInst *AI;
2578       if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2579           DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2580         AI->setAlignment(PrefAlign);
2581       // Global variables can only be aligned if they are defined in this
2582       // object (i.e. they are uniquely initialized in this object), and
2583       // over-aligning global variables that have an explicit section is
2584       // forbidden.
2585       GlobalVariable *GV;
2586       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2587           GV->getPointerAlignment(*DL) < PrefAlign &&
2588           DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2589         GV->setAlignment(PrefAlign);
2590     }
2591   }
2592   // If this is a memcpy (or similar) then we may be able to improve the
2593   // alignment.
2594   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2595     Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2596     MaybeAlign MIDestAlign = MI->getDestAlign();
2597     if (!MIDestAlign || DestAlign > *MIDestAlign)
2598       MI->setDestAlignment(DestAlign);
2599     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2600       MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2601       Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2602       if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2603         MTI->setSourceAlignment(SrcAlign);
2604     }
2605   }
2606 
2607   // If we have a cold call site, try to sink addressing computation into the
2608   // cold block.  This interacts with our handling for loads and stores to
2609   // ensure that we can fold all uses of a potential addressing computation
2610   // into their uses.  TODO: generalize this to work over profiling data
2611   if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
2612       !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2613     for (auto &Arg : CI->args()) {
2614       if (!Arg->getType()->isPointerTy())
2615         continue;
2616       unsigned AS = Arg->getType()->getPointerAddressSpace();
2617       if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS))
2618         return true;
2619     }
2620 
2621   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2622   if (II) {
2623     switch (II->getIntrinsicID()) {
2624     default:
2625       break;
2626     case Intrinsic::assume:
2627       llvm_unreachable("llvm.assume should have been removed already");
2628     case Intrinsic::allow_runtime_check:
2629     case Intrinsic::allow_ubsan_check:
2630     case Intrinsic::experimental_widenable_condition: {
2631       // Give up on future widening opportunities so that we can fold away dead
2632       // paths and merge blocks before going into block-local instruction
2633       // selection.
2634       if (II->use_empty()) {
2635         II->eraseFromParent();
2636         return true;
2637       }
2638       Constant *RetVal = ConstantInt::getTrue(II->getContext());
2639       resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2640         replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2641       });
2642       return true;
2643     }
2644     case Intrinsic::objectsize:
2645       llvm_unreachable("llvm.objectsize.* should have been lowered already");
2646     case Intrinsic::is_constant:
2647       llvm_unreachable("llvm.is.constant.* should have been lowered already");
2648     case Intrinsic::aarch64_stlxr:
2649     case Intrinsic::aarch64_stxr: {
2650       ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2651       if (!ExtVal || !ExtVal->hasOneUse() ||
2652           ExtVal->getParent() == CI->getParent())
2653         return false;
2654       // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2655       ExtVal->moveBefore(CI);
2656       // Mark this instruction as "inserted by CGP", so that other
2657       // optimizations don't touch it.
2658       InsertedInsts.insert(ExtVal);
2659       return true;
2660     }
2661 
2662     case Intrinsic::launder_invariant_group:
2663     case Intrinsic::strip_invariant_group: {
2664       Value *ArgVal = II->getArgOperand(0);
2665       auto it = LargeOffsetGEPMap.find(II);
2666       if (it != LargeOffsetGEPMap.end()) {
2667         // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2668         // Make sure not to have to deal with iterator invalidation
2669         // after possibly adding ArgVal to LargeOffsetGEPMap.
2670         auto GEPs = std::move(it->second);
2671         LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2672         LargeOffsetGEPMap.erase(II);
2673       }
2674 
2675       replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2676       II->eraseFromParent();
2677       return true;
2678     }
2679     case Intrinsic::cttz:
2680     case Intrinsic::ctlz:
2681       // If counting zeros is expensive, try to avoid it.
2682       return despeculateCountZeros(II, *LI, TLI, DL, ModifiedDT, FreshBBs,
2683                                    IsHugeFunc);
2684     case Intrinsic::fshl:
2685     case Intrinsic::fshr:
2686       return optimizeFunnelShift(II);
2687     case Intrinsic::dbg_assign:
2688     case Intrinsic::dbg_value:
2689       return fixupDbgValue(II);
2690     case Intrinsic::masked_gather:
2691       return optimizeGatherScatterInst(II, II->getArgOperand(0));
2692     case Intrinsic::masked_scatter:
2693       return optimizeGatherScatterInst(II, II->getArgOperand(1));
2694     }
2695 
2696     SmallVector<Value *, 2> PtrOps;
2697     Type *AccessTy;
2698     if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2699       while (!PtrOps.empty()) {
2700         Value *PtrVal = PtrOps.pop_back_val();
2701         unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2702         if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2703           return true;
2704       }
2705   }
2706 
2707   // From here on out we're working with named functions.
2708   auto *Callee = CI->getCalledFunction();
2709   if (!Callee)
2710     return false;
2711 
2712   // Lower all default uses of _chk calls.  This is very similar
2713   // to what InstCombineCalls does, but here we are only lowering calls
2714   // to fortified library functions (e.g. __memcpy_chk) that have the default
2715   // "don't know" as the objectsize.  Anything else should be left alone.
2716   FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2717   IRBuilder<> Builder(CI);
2718   if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2719     replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2720     CI->eraseFromParent();
2721     return true;
2722   }
2723 
2724   // SCCP may have propagated, among other things, C++ static variables across
2725   // calls. If this happens to be the case, we may want to undo it in order to
2726   // avoid redundant pointer computation of the constant, as the function method
2727   // returning the constant needs to be executed anyways.
2728   auto GetUniformReturnValue = [](const Function *F) -> GlobalVariable * {
2729     if (!F->getReturnType()->isPointerTy())
2730       return nullptr;
2731 
2732     GlobalVariable *UniformValue = nullptr;
2733     for (auto &BB : *F) {
2734       if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) {
2735         if (auto *V = dyn_cast<GlobalVariable>(RI->getReturnValue())) {
2736           if (!UniformValue)
2737             UniformValue = V;
2738           else if (V != UniformValue)
2739             return nullptr;
2740         } else {
2741           return nullptr;
2742         }
2743       }
2744     }
2745 
2746     return UniformValue;
2747   };
2748 
2749   if (Callee->hasExactDefinition()) {
2750     if (GlobalVariable *RV = GetUniformReturnValue(Callee)) {
2751       bool MadeChange = false;
2752       for (Use &U : make_early_inc_range(RV->uses())) {
2753         auto *I = dyn_cast<Instruction>(U.getUser());
2754         if (!I || I->getParent() != CI->getParent()) {
2755           // Limit to the same basic block to avoid extending the call-site live
2756           // range, which otherwise could increase register pressure.
2757           continue;
2758         }
2759         if (CI->comesBefore(I)) {
2760           U.set(CI);
2761           MadeChange = true;
2762         }
2763       }
2764 
2765       return MadeChange;
2766     }
2767   }
2768 
2769   return false;
2770 }
2771 
2772 static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo,
2773                                           const CallInst *CI) {
2774   assert(CI && CI->use_empty());
2775 
2776   if (const auto *II = dyn_cast<IntrinsicInst>(CI))
2777     switch (II->getIntrinsicID()) {
2778     case Intrinsic::memset:
2779     case Intrinsic::memcpy:
2780     case Intrinsic::memmove:
2781       return true;
2782     default:
2783       return false;
2784     }
2785 
2786   LibFunc LF;
2787   Function *Callee = CI->getCalledFunction();
2788   if (Callee && TLInfo && TLInfo->getLibFunc(*Callee, LF))
2789     switch (LF) {
2790     case LibFunc_strcpy:
2791     case LibFunc_strncpy:
2792     case LibFunc_strcat:
2793     case LibFunc_strncat:
2794       return true;
2795     default:
2796       return false;
2797     }
2798 
2799   return false;
2800 }
2801 
2802 /// Look for opportunities to duplicate return instructions to the predecessor
2803 /// to enable tail call optimizations. The case it is currently looking for is
2804 /// the following one. Known intrinsics or library function that may be tail
2805 /// called are taken into account as well.
2806 /// @code
2807 /// bb0:
2808 ///   %tmp0 = tail call i32 @f0()
2809 ///   br label %return
2810 /// bb1:
2811 ///   %tmp1 = tail call i32 @f1()
2812 ///   br label %return
2813 /// bb2:
2814 ///   %tmp2 = tail call i32 @f2()
2815 ///   br label %return
2816 /// return:
2817 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2818 ///   ret i32 %retval
2819 /// @endcode
2820 ///
2821 /// =>
2822 ///
2823 /// @code
2824 /// bb0:
2825 ///   %tmp0 = tail call i32 @f0()
2826 ///   ret i32 %tmp0
2827 /// bb1:
2828 ///   %tmp1 = tail call i32 @f1()
2829 ///   ret i32 %tmp1
2830 /// bb2:
2831 ///   %tmp2 = tail call i32 @f2()
2832 ///   ret i32 %tmp2
2833 /// @endcode
2834 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2835                                                 ModifyDT &ModifiedDT) {
2836   if (!BB->getTerminator())
2837     return false;
2838 
2839   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2840   if (!RetI)
2841     return false;
2842 
2843   assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop");
2844 
2845   PHINode *PN = nullptr;
2846   ExtractValueInst *EVI = nullptr;
2847   BitCastInst *BCI = nullptr;
2848   Value *V = RetI->getReturnValue();
2849   if (V) {
2850     BCI = dyn_cast<BitCastInst>(V);
2851     if (BCI)
2852       V = BCI->getOperand(0);
2853 
2854     EVI = dyn_cast<ExtractValueInst>(V);
2855     if (EVI) {
2856       V = EVI->getOperand(0);
2857       if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2858         return false;
2859     }
2860 
2861     PN = dyn_cast<PHINode>(V);
2862   }
2863 
2864   if (PN && PN->getParent() != BB)
2865     return false;
2866 
2867   auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2868     const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2869     if (BC && BC->hasOneUse())
2870       Inst = BC->user_back();
2871 
2872     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2873       return II->getIntrinsicID() == Intrinsic::lifetime_end;
2874     return false;
2875   };
2876 
2877   SmallVector<const IntrinsicInst *, 4> FakeUses;
2878 
2879   auto isFakeUse = [&FakeUses](const Instruction *Inst) {
2880     if (auto *II = dyn_cast<IntrinsicInst>(Inst);
2881         II && II->getIntrinsicID() == Intrinsic::fake_use) {
2882       // Record the instruction so it can be preserved when the exit block is
2883       // removed. Do not preserve the fake use that uses the result of the
2884       // PHI instruction.
2885       // Do not copy fake uses that use the result of a PHI node.
2886       // FIXME: If we do want to copy the fake use into the return blocks, we
2887       // have to figure out which of the PHI node operands to use for each
2888       // copy.
2889       if (!isa<PHINode>(II->getOperand(0))) {
2890         FakeUses.push_back(II);
2891       }
2892       return true;
2893     }
2894 
2895     return false;
2896   };
2897 
2898   // Make sure there are no instructions between the first instruction
2899   // and return.
2900   const Instruction *BI = BB->getFirstNonPHI();
2901   // Skip over debug and the bitcast.
2902   while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2903          isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI) ||
2904          isFakeUse(BI))
2905     BI = BI->getNextNode();
2906   if (BI != RetI)
2907     return false;
2908 
2909   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2910   /// call.
2911   const Function *F = BB->getParent();
2912   SmallVector<BasicBlock *, 4> TailCallBBs;
2913   // Record the call instructions so we can insert any fake uses
2914   // that need to be preserved before them.
2915   SmallVector<CallInst *, 4> CallInsts;
2916   if (PN) {
2917     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2918       // Look through bitcasts.
2919       Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2920       CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2921       BasicBlock *PredBB = PN->getIncomingBlock(I);
2922       // Make sure the phi value is indeed produced by the tail call.
2923       if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2924           TLI->mayBeEmittedAsTailCall(CI) &&
2925           attributesPermitTailCall(F, CI, RetI, *TLI)) {
2926         TailCallBBs.push_back(PredBB);
2927         CallInsts.push_back(CI);
2928       } else {
2929         // Consider the cases in which the phi value is indirectly produced by
2930         // the tail call, for example when encountering memset(), memmove(),
2931         // strcpy(), whose return value may have been optimized out. In such
2932         // cases, the value needs to be the first function argument.
2933         //
2934         // bb0:
2935         //   tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
2936         //   br label %return
2937         // return:
2938         //   %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
2939         if (PredBB && PredBB->getSingleSuccessor() == BB)
2940           CI = dyn_cast_or_null<CallInst>(
2941               PredBB->getTerminator()->getPrevNonDebugInstruction(true));
2942 
2943         if (CI && CI->use_empty() &&
2944             isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2945             IncomingVal == CI->getArgOperand(0) &&
2946             TLI->mayBeEmittedAsTailCall(CI) &&
2947             attributesPermitTailCall(F, CI, RetI, *TLI)) {
2948           TailCallBBs.push_back(PredBB);
2949           CallInsts.push_back(CI);
2950         }
2951       }
2952     }
2953   } else {
2954     SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2955     for (BasicBlock *Pred : predecessors(BB)) {
2956       if (!VisitedBBs.insert(Pred).second)
2957         continue;
2958       if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2959         CallInst *CI = dyn_cast<CallInst>(I);
2960         if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2961             attributesPermitTailCall(F, CI, RetI, *TLI)) {
2962           // Either we return void or the return value must be the first
2963           // argument of a known intrinsic or library function.
2964           if (!V || isa<UndefValue>(V) ||
2965               (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2966                V == CI->getArgOperand(0))) {
2967             TailCallBBs.push_back(Pred);
2968             CallInsts.push_back(CI);
2969           }
2970         }
2971       }
2972     }
2973   }
2974 
2975   bool Changed = false;
2976   for (auto const &TailCallBB : TailCallBBs) {
2977     // Make sure the call instruction is followed by an unconditional branch to
2978     // the return block.
2979     BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2980     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2981       continue;
2982 
2983     // Duplicate the return into TailCallBB.
2984     (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2985     assert(!VerifyBFIUpdates ||
2986            BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2987     BFI->setBlockFreq(BB,
2988                       (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)));
2989     ModifiedDT = ModifyDT::ModifyBBDT;
2990     Changed = true;
2991     ++NumRetsDup;
2992   }
2993 
2994   // If we eliminated all predecessors of the block, delete the block now.
2995   if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) {
2996     // Copy the fake uses found in the original return block to all blocks
2997     // that contain tail calls.
2998     for (auto *CI : CallInsts) {
2999       for (auto const *FakeUse : FakeUses) {
3000         auto *ClonedInst = FakeUse->clone();
3001         ClonedInst->insertBefore(CI);
3002       }
3003     }
3004     BB->eraseFromParent();
3005   }
3006 
3007   return Changed;
3008 }
3009 
3010 //===----------------------------------------------------------------------===//
3011 // Memory Optimization
3012 //===----------------------------------------------------------------------===//
3013 
3014 namespace {
3015 
3016 /// This is an extended version of TargetLowering::AddrMode
3017 /// which holds actual Value*'s for register values.
3018 struct ExtAddrMode : public TargetLowering::AddrMode {
3019   Value *BaseReg = nullptr;
3020   Value *ScaledReg = nullptr;
3021   Value *OriginalValue = nullptr;
3022   bool InBounds = true;
3023 
3024   enum FieldName {
3025     NoField = 0x00,
3026     BaseRegField = 0x01,
3027     BaseGVField = 0x02,
3028     BaseOffsField = 0x04,
3029     ScaledRegField = 0x08,
3030     ScaleField = 0x10,
3031     MultipleFields = 0xff
3032   };
3033 
3034   ExtAddrMode() = default;
3035 
3036   void print(raw_ostream &OS) const;
3037   void dump() const;
3038 
3039   FieldName compare(const ExtAddrMode &other) {
3040     // First check that the types are the same on each field, as differing types
3041     // is something we can't cope with later on.
3042     if (BaseReg && other.BaseReg &&
3043         BaseReg->getType() != other.BaseReg->getType())
3044       return MultipleFields;
3045     if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
3046       return MultipleFields;
3047     if (ScaledReg && other.ScaledReg &&
3048         ScaledReg->getType() != other.ScaledReg->getType())
3049       return MultipleFields;
3050 
3051     // Conservatively reject 'inbounds' mismatches.
3052     if (InBounds != other.InBounds)
3053       return MultipleFields;
3054 
3055     // Check each field to see if it differs.
3056     unsigned Result = NoField;
3057     if (BaseReg != other.BaseReg)
3058       Result |= BaseRegField;
3059     if (BaseGV != other.BaseGV)
3060       Result |= BaseGVField;
3061     if (BaseOffs != other.BaseOffs)
3062       Result |= BaseOffsField;
3063     if (ScaledReg != other.ScaledReg)
3064       Result |= ScaledRegField;
3065     // Don't count 0 as being a different scale, because that actually means
3066     // unscaled (which will already be counted by having no ScaledReg).
3067     if (Scale && other.Scale && Scale != other.Scale)
3068       Result |= ScaleField;
3069 
3070     if (llvm::popcount(Result) > 1)
3071       return MultipleFields;
3072     else
3073       return static_cast<FieldName>(Result);
3074   }
3075 
3076   // An AddrMode is trivial if it involves no calculation i.e. it is just a base
3077   // with no offset.
3078   bool isTrivial() {
3079     // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
3080     // trivial if at most one of these terms is nonzero, except that BaseGV and
3081     // BaseReg both being zero actually means a null pointer value, which we
3082     // consider to be 'non-zero' here.
3083     return !BaseOffs && !Scale && !(BaseGV && BaseReg);
3084   }
3085 
3086   Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
3087     switch (Field) {
3088     default:
3089       return nullptr;
3090     case BaseRegField:
3091       return BaseReg;
3092     case BaseGVField:
3093       return BaseGV;
3094     case ScaledRegField:
3095       return ScaledReg;
3096     case BaseOffsField:
3097       return ConstantInt::get(IntPtrTy, BaseOffs);
3098     }
3099   }
3100 
3101   void SetCombinedField(FieldName Field, Value *V,
3102                         const SmallVectorImpl<ExtAddrMode> &AddrModes) {
3103     switch (Field) {
3104     default:
3105       llvm_unreachable("Unhandled fields are expected to be rejected earlier");
3106       break;
3107     case ExtAddrMode::BaseRegField:
3108       BaseReg = V;
3109       break;
3110     case ExtAddrMode::BaseGVField:
3111       // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
3112       // in the BaseReg field.
3113       assert(BaseReg == nullptr);
3114       BaseReg = V;
3115       BaseGV = nullptr;
3116       break;
3117     case ExtAddrMode::ScaledRegField:
3118       ScaledReg = V;
3119       // If we have a mix of scaled and unscaled addrmodes then we want scale
3120       // to be the scale and not zero.
3121       if (!Scale)
3122         for (const ExtAddrMode &AM : AddrModes)
3123           if (AM.Scale) {
3124             Scale = AM.Scale;
3125             break;
3126           }
3127       break;
3128     case ExtAddrMode::BaseOffsField:
3129       // The offset is no longer a constant, so it goes in ScaledReg with a
3130       // scale of 1.
3131       assert(ScaledReg == nullptr);
3132       ScaledReg = V;
3133       Scale = 1;
3134       BaseOffs = 0;
3135       break;
3136     }
3137   }
3138 };
3139 
3140 #ifndef NDEBUG
3141 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
3142   AM.print(OS);
3143   return OS;
3144 }
3145 #endif
3146 
3147 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3148 void ExtAddrMode::print(raw_ostream &OS) const {
3149   bool NeedPlus = false;
3150   OS << "[";
3151   if (InBounds)
3152     OS << "inbounds ";
3153   if (BaseGV) {
3154     OS << "GV:";
3155     BaseGV->printAsOperand(OS, /*PrintType=*/false);
3156     NeedPlus = true;
3157   }
3158 
3159   if (BaseOffs) {
3160     OS << (NeedPlus ? " + " : "") << BaseOffs;
3161     NeedPlus = true;
3162   }
3163 
3164   if (BaseReg) {
3165     OS << (NeedPlus ? " + " : "") << "Base:";
3166     BaseReg->printAsOperand(OS, /*PrintType=*/false);
3167     NeedPlus = true;
3168   }
3169   if (Scale) {
3170     OS << (NeedPlus ? " + " : "") << Scale << "*";
3171     ScaledReg->printAsOperand(OS, /*PrintType=*/false);
3172   }
3173 
3174   OS << ']';
3175 }
3176 
3177 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
3178   print(dbgs());
3179   dbgs() << '\n';
3180 }
3181 #endif
3182 
3183 } // end anonymous namespace
3184 
3185 namespace {
3186 
3187 /// This class provides transaction based operation on the IR.
3188 /// Every change made through this class is recorded in the internal state and
3189 /// can be undone (rollback) until commit is called.
3190 /// CGP does not check if instructions could be speculatively executed when
3191 /// moved. Preserving the original location would pessimize the debugging
3192 /// experience, as well as negatively impact the quality of sample PGO.
3193 class TypePromotionTransaction {
3194   /// This represents the common interface of the individual transaction.
3195   /// Each class implements the logic for doing one specific modification on
3196   /// the IR via the TypePromotionTransaction.
3197   class TypePromotionAction {
3198   protected:
3199     /// The Instruction modified.
3200     Instruction *Inst;
3201 
3202   public:
3203     /// Constructor of the action.
3204     /// The constructor performs the related action on the IR.
3205     TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
3206 
3207     virtual ~TypePromotionAction() = default;
3208 
3209     /// Undo the modification done by this action.
3210     /// When this method is called, the IR must be in the same state as it was
3211     /// before this action was applied.
3212     /// \pre Undoing the action works if and only if the IR is in the exact same
3213     /// state as it was directly after this action was applied.
3214     virtual void undo() = 0;
3215 
3216     /// Advocate every change made by this action.
3217     /// When the results on the IR of the action are to be kept, it is important
3218     /// to call this function, otherwise hidden information may be kept forever.
3219     virtual void commit() {
3220       // Nothing to be done, this action is not doing anything.
3221     }
3222   };
3223 
3224   /// Utility to remember the position of an instruction.
3225   class InsertionHandler {
3226     /// Position of an instruction.
3227     /// Either an instruction:
3228     /// - Is the first in a basic block: BB is used.
3229     /// - Has a previous instruction: PrevInst is used.
3230     union {
3231       Instruction *PrevInst;
3232       BasicBlock *BB;
3233     } Point;
3234     std::optional<DbgRecord::self_iterator> BeforeDbgRecord = std::nullopt;
3235 
3236     /// Remember whether or not the instruction had a previous instruction.
3237     bool HasPrevInstruction;
3238 
3239   public:
3240     /// Record the position of \p Inst.
3241     InsertionHandler(Instruction *Inst) {
3242       HasPrevInstruction = (Inst != &*(Inst->getParent()->begin()));
3243       BasicBlock *BB = Inst->getParent();
3244 
3245       // Record where we would have to re-insert the instruction in the sequence
3246       // of DbgRecords, if we ended up reinserting.
3247       if (BB->IsNewDbgInfoFormat)
3248         BeforeDbgRecord = Inst->getDbgReinsertionPosition();
3249 
3250       if (HasPrevInstruction) {
3251         Point.PrevInst = &*std::prev(Inst->getIterator());
3252       } else {
3253         Point.BB = BB;
3254       }
3255     }
3256 
3257     /// Insert \p Inst at the recorded position.
3258     void insert(Instruction *Inst) {
3259       if (HasPrevInstruction) {
3260         if (Inst->getParent())
3261           Inst->removeFromParent();
3262         Inst->insertAfter(&*Point.PrevInst);
3263       } else {
3264         BasicBlock::iterator Position = Point.BB->getFirstInsertionPt();
3265         if (Inst->getParent())
3266           Inst->moveBefore(*Point.BB, Position);
3267         else
3268           Inst->insertBefore(*Point.BB, Position);
3269       }
3270 
3271       Inst->getParent()->reinsertInstInDbgRecords(Inst, BeforeDbgRecord);
3272     }
3273   };
3274 
3275   /// Move an instruction before another.
3276   class InstructionMoveBefore : public TypePromotionAction {
3277     /// Original position of the instruction.
3278     InsertionHandler Position;
3279 
3280   public:
3281     /// Move \p Inst before \p Before.
3282     InstructionMoveBefore(Instruction *Inst, Instruction *Before)
3283         : TypePromotionAction(Inst), Position(Inst) {
3284       LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
3285                         << "\n");
3286       Inst->moveBefore(Before);
3287     }
3288 
3289     /// Move the instruction back to its original position.
3290     void undo() override {
3291       LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
3292       Position.insert(Inst);
3293     }
3294   };
3295 
3296   /// Set the operand of an instruction with a new value.
3297   class OperandSetter : public TypePromotionAction {
3298     /// Original operand of the instruction.
3299     Value *Origin;
3300 
3301     /// Index of the modified instruction.
3302     unsigned Idx;
3303 
3304   public:
3305     /// Set \p Idx operand of \p Inst with \p NewVal.
3306     OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
3307         : TypePromotionAction(Inst), Idx(Idx) {
3308       LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
3309                         << "for:" << *Inst << "\n"
3310                         << "with:" << *NewVal << "\n");
3311       Origin = Inst->getOperand(Idx);
3312       Inst->setOperand(Idx, NewVal);
3313     }
3314 
3315     /// Restore the original value of the instruction.
3316     void undo() override {
3317       LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
3318                         << "for: " << *Inst << "\n"
3319                         << "with: " << *Origin << "\n");
3320       Inst->setOperand(Idx, Origin);
3321     }
3322   };
3323 
3324   /// Hide the operands of an instruction.
3325   /// Do as if this instruction was not using any of its operands.
3326   class OperandsHider : public TypePromotionAction {
3327     /// The list of original operands.
3328     SmallVector<Value *, 4> OriginalValues;
3329 
3330   public:
3331     /// Remove \p Inst from the uses of the operands of \p Inst.
3332     OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
3333       LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
3334       unsigned NumOpnds = Inst->getNumOperands();
3335       OriginalValues.reserve(NumOpnds);
3336       for (unsigned It = 0; It < NumOpnds; ++It) {
3337         // Save the current operand.
3338         Value *Val = Inst->getOperand(It);
3339         OriginalValues.push_back(Val);
3340         // Set a dummy one.
3341         // We could use OperandSetter here, but that would imply an overhead
3342         // that we are not willing to pay.
3343         Inst->setOperand(It, PoisonValue::get(Val->getType()));
3344       }
3345     }
3346 
3347     /// Restore the original list of uses.
3348     void undo() override {
3349       LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3350       for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3351         Inst->setOperand(It, OriginalValues[It]);
3352     }
3353   };
3354 
3355   /// Build a truncate instruction.
3356   class TruncBuilder : public TypePromotionAction {
3357     Value *Val;
3358 
3359   public:
3360     /// Build a truncate instruction of \p Opnd producing a \p Ty
3361     /// result.
3362     /// trunc Opnd to Ty.
3363     TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3364       IRBuilder<> Builder(Opnd);
3365       Builder.SetCurrentDebugLocation(DebugLoc());
3366       Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3367       LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3368     }
3369 
3370     /// Get the built value.
3371     Value *getBuiltValue() { return Val; }
3372 
3373     /// Remove the built instruction.
3374     void undo() override {
3375       LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3376       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3377         IVal->eraseFromParent();
3378     }
3379   };
3380 
3381   /// Build a sign extension instruction.
3382   class SExtBuilder : public TypePromotionAction {
3383     Value *Val;
3384 
3385   public:
3386     /// Build a sign extension instruction of \p Opnd producing a \p Ty
3387     /// result.
3388     /// sext Opnd to Ty.
3389     SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3390         : TypePromotionAction(InsertPt) {
3391       IRBuilder<> Builder(InsertPt);
3392       Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3393       LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3394     }
3395 
3396     /// Get the built value.
3397     Value *getBuiltValue() { return Val; }
3398 
3399     /// Remove the built instruction.
3400     void undo() override {
3401       LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3402       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3403         IVal->eraseFromParent();
3404     }
3405   };
3406 
3407   /// Build a zero extension instruction.
3408   class ZExtBuilder : public TypePromotionAction {
3409     Value *Val;
3410 
3411   public:
3412     /// Build a zero extension instruction of \p Opnd producing a \p Ty
3413     /// result.
3414     /// zext Opnd to Ty.
3415     ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3416         : TypePromotionAction(InsertPt) {
3417       IRBuilder<> Builder(InsertPt);
3418       Builder.SetCurrentDebugLocation(DebugLoc());
3419       Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3420       LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3421     }
3422 
3423     /// Get the built value.
3424     Value *getBuiltValue() { return Val; }
3425 
3426     /// Remove the built instruction.
3427     void undo() override {
3428       LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3429       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3430         IVal->eraseFromParent();
3431     }
3432   };
3433 
3434   /// Mutate an instruction to another type.
3435   class TypeMutator : public TypePromotionAction {
3436     /// Record the original type.
3437     Type *OrigTy;
3438 
3439   public:
3440     /// Mutate the type of \p Inst into \p NewTy.
3441     TypeMutator(Instruction *Inst, Type *NewTy)
3442         : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3443       LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3444                         << "\n");
3445       Inst->mutateType(NewTy);
3446     }
3447 
3448     /// Mutate the instruction back to its original type.
3449     void undo() override {
3450       LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3451                         << "\n");
3452       Inst->mutateType(OrigTy);
3453     }
3454   };
3455 
3456   /// Replace the uses of an instruction by another instruction.
3457   class UsesReplacer : public TypePromotionAction {
3458     /// Helper structure to keep track of the replaced uses.
3459     struct InstructionAndIdx {
3460       /// The instruction using the instruction.
3461       Instruction *Inst;
3462 
3463       /// The index where this instruction is used for Inst.
3464       unsigned Idx;
3465 
3466       InstructionAndIdx(Instruction *Inst, unsigned Idx)
3467           : Inst(Inst), Idx(Idx) {}
3468     };
3469 
3470     /// Keep track of the original uses (pair Instruction, Index).
3471     SmallVector<InstructionAndIdx, 4> OriginalUses;
3472     /// Keep track of the debug users.
3473     SmallVector<DbgValueInst *, 1> DbgValues;
3474     /// And non-instruction debug-users too.
3475     SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
3476 
3477     /// Keep track of the new value so that we can undo it by replacing
3478     /// instances of the new value with the original value.
3479     Value *New;
3480 
3481     using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3482 
3483   public:
3484     /// Replace all the use of \p Inst by \p New.
3485     UsesReplacer(Instruction *Inst, Value *New)
3486         : TypePromotionAction(Inst), New(New) {
3487       LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3488                         << "\n");
3489       // Record the original uses.
3490       for (Use &U : Inst->uses()) {
3491         Instruction *UserI = cast<Instruction>(U.getUser());
3492         OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3493       }
3494       // Record the debug uses separately. They are not in the instruction's
3495       // use list, but they are replaced by RAUW.
3496       findDbgValues(DbgValues, Inst, &DbgVariableRecords);
3497 
3498       // Now, we can replace the uses.
3499       Inst->replaceAllUsesWith(New);
3500     }
3501 
3502     /// Reassign the original uses of Inst to Inst.
3503     void undo() override {
3504       LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3505       for (InstructionAndIdx &Use : OriginalUses)
3506         Use.Inst->setOperand(Use.Idx, Inst);
3507       // RAUW has replaced all original uses with references to the new value,
3508       // including the debug uses. Since we are undoing the replacements,
3509       // the original debug uses must also be reinstated to maintain the
3510       // correctness and utility of debug value instructions.
3511       for (auto *DVI : DbgValues)
3512         DVI->replaceVariableLocationOp(New, Inst);
3513       // Similar story with DbgVariableRecords, the non-instruction
3514       // representation of dbg.values.
3515       for (DbgVariableRecord *DVR : DbgVariableRecords)
3516         DVR->replaceVariableLocationOp(New, Inst);
3517     }
3518   };
3519 
3520   /// Remove an instruction from the IR.
3521   class InstructionRemover : public TypePromotionAction {
3522     /// Original position of the instruction.
3523     InsertionHandler Inserter;
3524 
3525     /// Helper structure to hide all the link to the instruction. In other
3526     /// words, this helps to do as if the instruction was removed.
3527     OperandsHider Hider;
3528 
3529     /// Keep track of the uses replaced, if any.
3530     UsesReplacer *Replacer = nullptr;
3531 
3532     /// Keep track of instructions removed.
3533     SetOfInstrs &RemovedInsts;
3534 
3535   public:
3536     /// Remove all reference of \p Inst and optionally replace all its
3537     /// uses with New.
3538     /// \p RemovedInsts Keep track of the instructions removed by this Action.
3539     /// \pre If !Inst->use_empty(), then New != nullptr
3540     InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3541                        Value *New = nullptr)
3542         : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3543           RemovedInsts(RemovedInsts) {
3544       if (New)
3545         Replacer = new UsesReplacer(Inst, New);
3546       LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3547       RemovedInsts.insert(Inst);
3548       /// The instructions removed here will be freed after completing
3549       /// optimizeBlock() for all blocks as we need to keep track of the
3550       /// removed instructions during promotion.
3551       Inst->removeFromParent();
3552     }
3553 
3554     ~InstructionRemover() override { delete Replacer; }
3555 
3556     InstructionRemover &operator=(const InstructionRemover &other) = delete;
3557     InstructionRemover(const InstructionRemover &other) = delete;
3558 
3559     /// Resurrect the instruction and reassign it to the proper uses if
3560     /// new value was provided when build this action.
3561     void undo() override {
3562       LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3563       Inserter.insert(Inst);
3564       if (Replacer)
3565         Replacer->undo();
3566       Hider.undo();
3567       RemovedInsts.erase(Inst);
3568     }
3569   };
3570 
3571 public:
3572   /// Restoration point.
3573   /// The restoration point is a pointer to an action instead of an iterator
3574   /// because the iterator may be invalidated but not the pointer.
3575   using ConstRestorationPt = const TypePromotionAction *;
3576 
3577   TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3578       : RemovedInsts(RemovedInsts) {}
3579 
3580   /// Advocate every changes made in that transaction. Return true if any change
3581   /// happen.
3582   bool commit();
3583 
3584   /// Undo all the changes made after the given point.
3585   void rollback(ConstRestorationPt Point);
3586 
3587   /// Get the current restoration point.
3588   ConstRestorationPt getRestorationPoint() const;
3589 
3590   /// \name API for IR modification with state keeping to support rollback.
3591   /// @{
3592   /// Same as Instruction::setOperand.
3593   void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3594 
3595   /// Same as Instruction::eraseFromParent.
3596   void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3597 
3598   /// Same as Value::replaceAllUsesWith.
3599   void replaceAllUsesWith(Instruction *Inst, Value *New);
3600 
3601   /// Same as Value::mutateType.
3602   void mutateType(Instruction *Inst, Type *NewTy);
3603 
3604   /// Same as IRBuilder::createTrunc.
3605   Value *createTrunc(Instruction *Opnd, Type *Ty);
3606 
3607   /// Same as IRBuilder::createSExt.
3608   Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3609 
3610   /// Same as IRBuilder::createZExt.
3611   Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3612 
3613 private:
3614   /// The ordered list of actions made so far.
3615   SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3616 
3617   using CommitPt =
3618       SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3619 
3620   SetOfInstrs &RemovedInsts;
3621 };
3622 
3623 } // end anonymous namespace
3624 
3625 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3626                                           Value *NewVal) {
3627   Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3628       Inst, Idx, NewVal));
3629 }
3630 
3631 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3632                                                 Value *NewVal) {
3633   Actions.push_back(
3634       std::make_unique<TypePromotionTransaction::InstructionRemover>(
3635           Inst, RemovedInsts, NewVal));
3636 }
3637 
3638 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3639                                                   Value *New) {
3640   Actions.push_back(
3641       std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3642 }
3643 
3644 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3645   Actions.push_back(
3646       std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3647 }
3648 
3649 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3650   std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3651   Value *Val = Ptr->getBuiltValue();
3652   Actions.push_back(std::move(Ptr));
3653   return Val;
3654 }
3655 
3656 Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3657                                             Type *Ty) {
3658   std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3659   Value *Val = Ptr->getBuiltValue();
3660   Actions.push_back(std::move(Ptr));
3661   return Val;
3662 }
3663 
3664 Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3665                                             Type *Ty) {
3666   std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3667   Value *Val = Ptr->getBuiltValue();
3668   Actions.push_back(std::move(Ptr));
3669   return Val;
3670 }
3671 
3672 TypePromotionTransaction::ConstRestorationPt
3673 TypePromotionTransaction::getRestorationPoint() const {
3674   return !Actions.empty() ? Actions.back().get() : nullptr;
3675 }
3676 
3677 bool TypePromotionTransaction::commit() {
3678   for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3679     Action->commit();
3680   bool Modified = !Actions.empty();
3681   Actions.clear();
3682   return Modified;
3683 }
3684 
3685 void TypePromotionTransaction::rollback(
3686     TypePromotionTransaction::ConstRestorationPt Point) {
3687   while (!Actions.empty() && Point != Actions.back().get()) {
3688     std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3689     Curr->undo();
3690   }
3691 }
3692 
3693 namespace {
3694 
3695 /// A helper class for matching addressing modes.
3696 ///
3697 /// This encapsulates the logic for matching the target-legal addressing modes.
3698 class AddressingModeMatcher {
3699   SmallVectorImpl<Instruction *> &AddrModeInsts;
3700   const TargetLowering &TLI;
3701   const TargetRegisterInfo &TRI;
3702   const DataLayout &DL;
3703   const LoopInfo &LI;
3704   const std::function<const DominatorTree &()> getDTFn;
3705 
3706   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3707   /// the memory instruction that we're computing this address for.
3708   Type *AccessTy;
3709   unsigned AddrSpace;
3710   Instruction *MemoryInst;
3711 
3712   /// This is the addressing mode that we're building up. This is
3713   /// part of the return value of this addressing mode matching stuff.
3714   ExtAddrMode &AddrMode;
3715 
3716   /// The instructions inserted by other CodeGenPrepare optimizations.
3717   const SetOfInstrs &InsertedInsts;
3718 
3719   /// A map from the instructions to their type before promotion.
3720   InstrToOrigTy &PromotedInsts;
3721 
3722   /// The ongoing transaction where every action should be registered.
3723   TypePromotionTransaction &TPT;
3724 
3725   // A GEP which has too large offset to be folded into the addressing mode.
3726   std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3727 
3728   /// This is set to true when we should not do profitability checks.
3729   /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3730   bool IgnoreProfitability;
3731 
3732   /// True if we are optimizing for size.
3733   bool OptSize = false;
3734 
3735   ProfileSummaryInfo *PSI;
3736   BlockFrequencyInfo *BFI;
3737 
3738   AddressingModeMatcher(
3739       SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3740       const TargetRegisterInfo &TRI, const LoopInfo &LI,
3741       const std::function<const DominatorTree &()> getDTFn, Type *AT,
3742       unsigned AS, Instruction *MI, ExtAddrMode &AM,
3743       const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3744       TypePromotionTransaction &TPT,
3745       std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3746       bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3747       : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3748         DL(MI->getDataLayout()), LI(LI), getDTFn(getDTFn),
3749         AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3750         InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3751         LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3752     IgnoreProfitability = false;
3753   }
3754 
3755 public:
3756   /// Find the maximal addressing mode that a load/store of V can fold,
3757   /// give an access type of AccessTy.  This returns a list of involved
3758   /// instructions in AddrModeInsts.
3759   /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3760   /// optimizations.
3761   /// \p PromotedInsts maps the instructions to their type before promotion.
3762   /// \p The ongoing transaction where every action should be registered.
3763   static ExtAddrMode
3764   Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3765         SmallVectorImpl<Instruction *> &AddrModeInsts,
3766         const TargetLowering &TLI, const LoopInfo &LI,
3767         const std::function<const DominatorTree &()> getDTFn,
3768         const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3769         InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3770         std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3771         bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3772     ExtAddrMode Result;
3773 
3774     bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3775                                          AccessTy, AS, MemoryInst, Result,
3776                                          InsertedInsts, PromotedInsts, TPT,
3777                                          LargeOffsetGEP, OptSize, PSI, BFI)
3778                        .matchAddr(V, 0);
3779     (void)Success;
3780     assert(Success && "Couldn't select *anything*?");
3781     return Result;
3782   }
3783 
3784 private:
3785   bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3786   bool matchAddr(Value *Addr, unsigned Depth);
3787   bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3788                           bool *MovedAway = nullptr);
3789   bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3790                                             ExtAddrMode &AMBefore,
3791                                             ExtAddrMode &AMAfter);
3792   bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3793   bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3794                              Value *PromotedOperand) const;
3795 };
3796 
3797 class PhiNodeSet;
3798 
3799 /// An iterator for PhiNodeSet.
3800 class PhiNodeSetIterator {
3801   PhiNodeSet *const Set;
3802   size_t CurrentIndex = 0;
3803 
3804 public:
3805   /// The constructor. Start should point to either a valid element, or be equal
3806   /// to the size of the underlying SmallVector of the PhiNodeSet.
3807   PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3808   PHINode *operator*() const;
3809   PhiNodeSetIterator &operator++();
3810   bool operator==(const PhiNodeSetIterator &RHS) const;
3811   bool operator!=(const PhiNodeSetIterator &RHS) const;
3812 };
3813 
3814 /// Keeps a set of PHINodes.
3815 ///
3816 /// This is a minimal set implementation for a specific use case:
3817 /// It is very fast when there are very few elements, but also provides good
3818 /// performance when there are many. It is similar to SmallPtrSet, but also
3819 /// provides iteration by insertion order, which is deterministic and stable
3820 /// across runs. It is also similar to SmallSetVector, but provides removing
3821 /// elements in O(1) time. This is achieved by not actually removing the element
3822 /// from the underlying vector, so comes at the cost of using more memory, but
3823 /// that is fine, since PhiNodeSets are used as short lived objects.
3824 class PhiNodeSet {
3825   friend class PhiNodeSetIterator;
3826 
3827   using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3828   using iterator = PhiNodeSetIterator;
3829 
3830   /// Keeps the elements in the order of their insertion in the underlying
3831   /// vector. To achieve constant time removal, it never deletes any element.
3832   SmallVector<PHINode *, 32> NodeList;
3833 
3834   /// Keeps the elements in the underlying set implementation. This (and not the
3835   /// NodeList defined above) is the source of truth on whether an element
3836   /// is actually in the collection.
3837   MapType NodeMap;
3838 
3839   /// Points to the first valid (not deleted) element when the set is not empty
3840   /// and the value is not zero. Equals to the size of the underlying vector
3841   /// when the set is empty. When the value is 0, as in the beginning, the
3842   /// first element may or may not be valid.
3843   size_t FirstValidElement = 0;
3844 
3845 public:
3846   /// Inserts a new element to the collection.
3847   /// \returns true if the element is actually added, i.e. was not in the
3848   /// collection before the operation.
3849   bool insert(PHINode *Ptr) {
3850     if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3851       NodeList.push_back(Ptr);
3852       return true;
3853     }
3854     return false;
3855   }
3856 
3857   /// Removes the element from the collection.
3858   /// \returns whether the element is actually removed, i.e. was in the
3859   /// collection before the operation.
3860   bool erase(PHINode *Ptr) {
3861     if (NodeMap.erase(Ptr)) {
3862       SkipRemovedElements(FirstValidElement);
3863       return true;
3864     }
3865     return false;
3866   }
3867 
3868   /// Removes all elements and clears the collection.
3869   void clear() {
3870     NodeMap.clear();
3871     NodeList.clear();
3872     FirstValidElement = 0;
3873   }
3874 
3875   /// \returns an iterator that will iterate the elements in the order of
3876   /// insertion.
3877   iterator begin() {
3878     if (FirstValidElement == 0)
3879       SkipRemovedElements(FirstValidElement);
3880     return PhiNodeSetIterator(this, FirstValidElement);
3881   }
3882 
3883   /// \returns an iterator that points to the end of the collection.
3884   iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3885 
3886   /// Returns the number of elements in the collection.
3887   size_t size() const { return NodeMap.size(); }
3888 
3889   /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3890   size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3891 
3892 private:
3893   /// Updates the CurrentIndex so that it will point to a valid element.
3894   ///
3895   /// If the element of NodeList at CurrentIndex is valid, it does not
3896   /// change it. If there are no more valid elements, it updates CurrentIndex
3897   /// to point to the end of the NodeList.
3898   void SkipRemovedElements(size_t &CurrentIndex) {
3899     while (CurrentIndex < NodeList.size()) {
3900       auto it = NodeMap.find(NodeList[CurrentIndex]);
3901       // If the element has been deleted and added again later, NodeMap will
3902       // point to a different index, so CurrentIndex will still be invalid.
3903       if (it != NodeMap.end() && it->second == CurrentIndex)
3904         break;
3905       ++CurrentIndex;
3906     }
3907   }
3908 };
3909 
3910 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3911     : Set(Set), CurrentIndex(Start) {}
3912 
3913 PHINode *PhiNodeSetIterator::operator*() const {
3914   assert(CurrentIndex < Set->NodeList.size() &&
3915          "PhiNodeSet access out of range");
3916   return Set->NodeList[CurrentIndex];
3917 }
3918 
3919 PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3920   assert(CurrentIndex < Set->NodeList.size() &&
3921          "PhiNodeSet access out of range");
3922   ++CurrentIndex;
3923   Set->SkipRemovedElements(CurrentIndex);
3924   return *this;
3925 }
3926 
3927 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3928   return CurrentIndex == RHS.CurrentIndex;
3929 }
3930 
3931 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3932   return !((*this) == RHS);
3933 }
3934 
3935 /// Keep track of simplification of Phi nodes.
3936 /// Accept the set of all phi nodes and erase phi node from this set
3937 /// if it is simplified.
3938 class SimplificationTracker {
3939   DenseMap<Value *, Value *> Storage;
3940   const SimplifyQuery &SQ;
3941   // Tracks newly created Phi nodes. The elements are iterated by insertion
3942   // order.
3943   PhiNodeSet AllPhiNodes;
3944   // Tracks newly created Select nodes.
3945   SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3946 
3947 public:
3948   SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3949 
3950   Value *Get(Value *V) {
3951     do {
3952       auto SV = Storage.find(V);
3953       if (SV == Storage.end())
3954         return V;
3955       V = SV->second;
3956     } while (true);
3957   }
3958 
3959   Value *Simplify(Value *Val) {
3960     SmallVector<Value *, 32> WorkList;
3961     SmallPtrSet<Value *, 32> Visited;
3962     WorkList.push_back(Val);
3963     while (!WorkList.empty()) {
3964       auto *P = WorkList.pop_back_val();
3965       if (!Visited.insert(P).second)
3966         continue;
3967       if (auto *PI = dyn_cast<Instruction>(P))
3968         if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3969           for (auto *U : PI->users())
3970             WorkList.push_back(cast<Value>(U));
3971           Put(PI, V);
3972           PI->replaceAllUsesWith(V);
3973           if (auto *PHI = dyn_cast<PHINode>(PI))
3974             AllPhiNodes.erase(PHI);
3975           if (auto *Select = dyn_cast<SelectInst>(PI))
3976             AllSelectNodes.erase(Select);
3977           PI->eraseFromParent();
3978         }
3979     }
3980     return Get(Val);
3981   }
3982 
3983   void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3984 
3985   void ReplacePhi(PHINode *From, PHINode *To) {
3986     Value *OldReplacement = Get(From);
3987     while (OldReplacement != From) {
3988       From = To;
3989       To = dyn_cast<PHINode>(OldReplacement);
3990       OldReplacement = Get(From);
3991     }
3992     assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3993     Put(From, To);
3994     From->replaceAllUsesWith(To);
3995     AllPhiNodes.erase(From);
3996     From->eraseFromParent();
3997   }
3998 
3999   PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
4000 
4001   void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
4002 
4003   void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
4004 
4005   unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
4006 
4007   unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
4008 
4009   void destroyNewNodes(Type *CommonType) {
4010     // For safe erasing, replace the uses with dummy value first.
4011     auto *Dummy = PoisonValue::get(CommonType);
4012     for (auto *I : AllPhiNodes) {
4013       I->replaceAllUsesWith(Dummy);
4014       I->eraseFromParent();
4015     }
4016     AllPhiNodes.clear();
4017     for (auto *I : AllSelectNodes) {
4018       I->replaceAllUsesWith(Dummy);
4019       I->eraseFromParent();
4020     }
4021     AllSelectNodes.clear();
4022   }
4023 };
4024 
4025 /// A helper class for combining addressing modes.
4026 class AddressingModeCombiner {
4027   typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
4028   typedef std::pair<PHINode *, PHINode *> PHIPair;
4029 
4030 private:
4031   /// The addressing modes we've collected.
4032   SmallVector<ExtAddrMode, 16> AddrModes;
4033 
4034   /// The field in which the AddrModes differ, when we have more than one.
4035   ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
4036 
4037   /// Are the AddrModes that we have all just equal to their original values?
4038   bool AllAddrModesTrivial = true;
4039 
4040   /// Common Type for all different fields in addressing modes.
4041   Type *CommonType = nullptr;
4042 
4043   /// SimplifyQuery for simplifyInstruction utility.
4044   const SimplifyQuery &SQ;
4045 
4046   /// Original Address.
4047   Value *Original;
4048 
4049   /// Common value among addresses
4050   Value *CommonValue = nullptr;
4051 
4052 public:
4053   AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
4054       : SQ(_SQ), Original(OriginalValue) {}
4055 
4056   ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
4057 
4058   /// Get the combined AddrMode
4059   const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
4060 
4061   /// Add a new AddrMode if it's compatible with the AddrModes we already
4062   /// have.
4063   /// \return True iff we succeeded in doing so.
4064   bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
4065     // Take note of if we have any non-trivial AddrModes, as we need to detect
4066     // when all AddrModes are trivial as then we would introduce a phi or select
4067     // which just duplicates what's already there.
4068     AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
4069 
4070     // If this is the first addrmode then everything is fine.
4071     if (AddrModes.empty()) {
4072       AddrModes.emplace_back(NewAddrMode);
4073       return true;
4074     }
4075 
4076     // Figure out how different this is from the other address modes, which we
4077     // can do just by comparing against the first one given that we only care
4078     // about the cumulative difference.
4079     ExtAddrMode::FieldName ThisDifferentField =
4080         AddrModes[0].compare(NewAddrMode);
4081     if (DifferentField == ExtAddrMode::NoField)
4082       DifferentField = ThisDifferentField;
4083     else if (DifferentField != ThisDifferentField)
4084       DifferentField = ExtAddrMode::MultipleFields;
4085 
4086     // If NewAddrMode differs in more than one dimension we cannot handle it.
4087     bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
4088 
4089     // If Scale Field is different then we reject.
4090     CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
4091 
4092     // We also must reject the case when base offset is different and
4093     // scale reg is not null, we cannot handle this case due to merge of
4094     // different offsets will be used as ScaleReg.
4095     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
4096                               !NewAddrMode.ScaledReg);
4097 
4098     // We also must reject the case when GV is different and BaseReg installed
4099     // due to we want to use base reg as a merge of GV values.
4100     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
4101                               !NewAddrMode.HasBaseReg);
4102 
4103     // Even if NewAddMode is the same we still need to collect it due to
4104     // original value is different. And later we will need all original values
4105     // as anchors during finding the common Phi node.
4106     if (CanHandle)
4107       AddrModes.emplace_back(NewAddrMode);
4108     else
4109       AddrModes.clear();
4110 
4111     return CanHandle;
4112   }
4113 
4114   /// Combine the addressing modes we've collected into a single
4115   /// addressing mode.
4116   /// \return True iff we successfully combined them or we only had one so
4117   /// didn't need to combine them anyway.
4118   bool combineAddrModes() {
4119     // If we have no AddrModes then they can't be combined.
4120     if (AddrModes.size() == 0)
4121       return false;
4122 
4123     // A single AddrMode can trivially be combined.
4124     if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
4125       return true;
4126 
4127     // If the AddrModes we collected are all just equal to the value they are
4128     // derived from then combining them wouldn't do anything useful.
4129     if (AllAddrModesTrivial)
4130       return false;
4131 
4132     if (!addrModeCombiningAllowed())
4133       return false;
4134 
4135     // Build a map between <original value, basic block where we saw it> to
4136     // value of base register.
4137     // Bail out if there is no common type.
4138     FoldAddrToValueMapping Map;
4139     if (!initializeMap(Map))
4140       return false;
4141 
4142     CommonValue = findCommon(Map);
4143     if (CommonValue)
4144       AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
4145     return CommonValue != nullptr;
4146   }
4147 
4148 private:
4149   /// `CommonValue` may be a placeholder inserted by us.
4150   /// If the placeholder is not used, we should remove this dead instruction.
4151   void eraseCommonValueIfDead() {
4152     if (CommonValue && CommonValue->getNumUses() == 0)
4153       if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue))
4154         CommonInst->eraseFromParent();
4155   }
4156 
4157   /// Initialize Map with anchor values. For address seen
4158   /// we set the value of different field saw in this address.
4159   /// At the same time we find a common type for different field we will
4160   /// use to create new Phi/Select nodes. Keep it in CommonType field.
4161   /// Return false if there is no common type found.
4162   bool initializeMap(FoldAddrToValueMapping &Map) {
4163     // Keep track of keys where the value is null. We will need to replace it
4164     // with constant null when we know the common type.
4165     SmallVector<Value *, 2> NullValue;
4166     Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
4167     for (auto &AM : AddrModes) {
4168       Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
4169       if (DV) {
4170         auto *Type = DV->getType();
4171         if (CommonType && CommonType != Type)
4172           return false;
4173         CommonType = Type;
4174         Map[AM.OriginalValue] = DV;
4175       } else {
4176         NullValue.push_back(AM.OriginalValue);
4177       }
4178     }
4179     assert(CommonType && "At least one non-null value must be!");
4180     for (auto *V : NullValue)
4181       Map[V] = Constant::getNullValue(CommonType);
4182     return true;
4183   }
4184 
4185   /// We have mapping between value A and other value B where B was a field in
4186   /// addressing mode represented by A. Also we have an original value C
4187   /// representing an address we start with. Traversing from C through phi and
4188   /// selects we ended up with A's in a map. This utility function tries to find
4189   /// a value V which is a field in addressing mode C and traversing through phi
4190   /// nodes and selects we will end up in corresponded values B in a map.
4191   /// The utility will create a new Phi/Selects if needed.
4192   // The simple example looks as follows:
4193   // BB1:
4194   //   p1 = b1 + 40
4195   //   br cond BB2, BB3
4196   // BB2:
4197   //   p2 = b2 + 40
4198   //   br BB3
4199   // BB3:
4200   //   p = phi [p1, BB1], [p2, BB2]
4201   //   v = load p
4202   // Map is
4203   //   p1 -> b1
4204   //   p2 -> b2
4205   // Request is
4206   //   p -> ?
4207   // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
4208   Value *findCommon(FoldAddrToValueMapping &Map) {
4209     // Tracks the simplification of newly created phi nodes. The reason we use
4210     // this mapping is because we will add new created Phi nodes in AddrToBase.
4211     // Simplification of Phi nodes is recursive, so some Phi node may
4212     // be simplified after we added it to AddrToBase. In reality this
4213     // simplification is possible only if original phi/selects were not
4214     // simplified yet.
4215     // Using this mapping we can find the current value in AddrToBase.
4216     SimplificationTracker ST(SQ);
4217 
4218     // First step, DFS to create PHI nodes for all intermediate blocks.
4219     // Also fill traverse order for the second step.
4220     SmallVector<Value *, 32> TraverseOrder;
4221     InsertPlaceholders(Map, TraverseOrder, ST);
4222 
4223     // Second Step, fill new nodes by merged values and simplify if possible.
4224     FillPlaceholders(Map, TraverseOrder, ST);
4225 
4226     if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
4227       ST.destroyNewNodes(CommonType);
4228       return nullptr;
4229     }
4230 
4231     // Now we'd like to match New Phi nodes to existed ones.
4232     unsigned PhiNotMatchedCount = 0;
4233     if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
4234       ST.destroyNewNodes(CommonType);
4235       return nullptr;
4236     }
4237 
4238     auto *Result = ST.Get(Map.find(Original)->second);
4239     if (Result) {
4240       NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
4241       NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
4242     }
4243     return Result;
4244   }
4245 
4246   /// Try to match PHI node to Candidate.
4247   /// Matcher tracks the matched Phi nodes.
4248   bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
4249                     SmallSetVector<PHIPair, 8> &Matcher,
4250                     PhiNodeSet &PhiNodesToMatch) {
4251     SmallVector<PHIPair, 8> WorkList;
4252     Matcher.insert({PHI, Candidate});
4253     SmallSet<PHINode *, 8> MatchedPHIs;
4254     MatchedPHIs.insert(PHI);
4255     WorkList.push_back({PHI, Candidate});
4256     SmallSet<PHIPair, 8> Visited;
4257     while (!WorkList.empty()) {
4258       auto Item = WorkList.pop_back_val();
4259       if (!Visited.insert(Item).second)
4260         continue;
4261       // We iterate over all incoming values to Phi to compare them.
4262       // If values are different and both of them Phi and the first one is a
4263       // Phi we added (subject to match) and both of them is in the same basic
4264       // block then we can match our pair if values match. So we state that
4265       // these values match and add it to work list to verify that.
4266       for (auto *B : Item.first->blocks()) {
4267         Value *FirstValue = Item.first->getIncomingValueForBlock(B);
4268         Value *SecondValue = Item.second->getIncomingValueForBlock(B);
4269         if (FirstValue == SecondValue)
4270           continue;
4271 
4272         PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
4273         PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
4274 
4275         // One of them is not Phi or
4276         // The first one is not Phi node from the set we'd like to match or
4277         // Phi nodes from different basic blocks then
4278         // we will not be able to match.
4279         if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
4280             FirstPhi->getParent() != SecondPhi->getParent())
4281           return false;
4282 
4283         // If we already matched them then continue.
4284         if (Matcher.count({FirstPhi, SecondPhi}))
4285           continue;
4286         // So the values are different and does not match. So we need them to
4287         // match. (But we register no more than one match per PHI node, so that
4288         // we won't later try to replace them twice.)
4289         if (MatchedPHIs.insert(FirstPhi).second)
4290           Matcher.insert({FirstPhi, SecondPhi});
4291         // But me must check it.
4292         WorkList.push_back({FirstPhi, SecondPhi});
4293       }
4294     }
4295     return true;
4296   }
4297 
4298   /// For the given set of PHI nodes (in the SimplificationTracker) try
4299   /// to find their equivalents.
4300   /// Returns false if this matching fails and creation of new Phi is disabled.
4301   bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
4302                    unsigned &PhiNotMatchedCount) {
4303     // Matched and PhiNodesToMatch iterate their elements in a deterministic
4304     // order, so the replacements (ReplacePhi) are also done in a deterministic
4305     // order.
4306     SmallSetVector<PHIPair, 8> Matched;
4307     SmallPtrSet<PHINode *, 8> WillNotMatch;
4308     PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
4309     while (PhiNodesToMatch.size()) {
4310       PHINode *PHI = *PhiNodesToMatch.begin();
4311 
4312       // Add us, if no Phi nodes in the basic block we do not match.
4313       WillNotMatch.clear();
4314       WillNotMatch.insert(PHI);
4315 
4316       // Traverse all Phis until we found equivalent or fail to do that.
4317       bool IsMatched = false;
4318       for (auto &P : PHI->getParent()->phis()) {
4319         // Skip new Phi nodes.
4320         if (PhiNodesToMatch.count(&P))
4321           continue;
4322         if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
4323           break;
4324         // If it does not match, collect all Phi nodes from matcher.
4325         // if we end up with no match, them all these Phi nodes will not match
4326         // later.
4327         for (auto M : Matched)
4328           WillNotMatch.insert(M.first);
4329         Matched.clear();
4330       }
4331       if (IsMatched) {
4332         // Replace all matched values and erase them.
4333         for (auto MV : Matched)
4334           ST.ReplacePhi(MV.first, MV.second);
4335         Matched.clear();
4336         continue;
4337       }
4338       // If we are not allowed to create new nodes then bail out.
4339       if (!AllowNewPhiNodes)
4340         return false;
4341       // Just remove all seen values in matcher. They will not match anything.
4342       PhiNotMatchedCount += WillNotMatch.size();
4343       for (auto *P : WillNotMatch)
4344         PhiNodesToMatch.erase(P);
4345     }
4346     return true;
4347   }
4348   /// Fill the placeholders with values from predecessors and simplify them.
4349   void FillPlaceholders(FoldAddrToValueMapping &Map,
4350                         SmallVectorImpl<Value *> &TraverseOrder,
4351                         SimplificationTracker &ST) {
4352     while (!TraverseOrder.empty()) {
4353       Value *Current = TraverseOrder.pop_back_val();
4354       assert(Map.contains(Current) && "No node to fill!!!");
4355       Value *V = Map[Current];
4356 
4357       if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
4358         // CurrentValue also must be Select.
4359         auto *CurrentSelect = cast<SelectInst>(Current);
4360         auto *TrueValue = CurrentSelect->getTrueValue();
4361         assert(Map.contains(TrueValue) && "No True Value!");
4362         Select->setTrueValue(ST.Get(Map[TrueValue]));
4363         auto *FalseValue = CurrentSelect->getFalseValue();
4364         assert(Map.contains(FalseValue) && "No False Value!");
4365         Select->setFalseValue(ST.Get(Map[FalseValue]));
4366       } else {
4367         // Must be a Phi node then.
4368         auto *PHI = cast<PHINode>(V);
4369         // Fill the Phi node with values from predecessors.
4370         for (auto *B : predecessors(PHI->getParent())) {
4371           Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
4372           assert(Map.contains(PV) && "No predecessor Value!");
4373           PHI->addIncoming(ST.Get(Map[PV]), B);
4374         }
4375       }
4376       Map[Current] = ST.Simplify(V);
4377     }
4378   }
4379 
4380   /// Starting from original value recursively iterates over def-use chain up to
4381   /// known ending values represented in a map. For each traversed phi/select
4382   /// inserts a placeholder Phi or Select.
4383   /// Reports all new created Phi/Select nodes by adding them to set.
4384   /// Also reports and order in what values have been traversed.
4385   void InsertPlaceholders(FoldAddrToValueMapping &Map,
4386                           SmallVectorImpl<Value *> &TraverseOrder,
4387                           SimplificationTracker &ST) {
4388     SmallVector<Value *, 32> Worklist;
4389     assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
4390            "Address must be a Phi or Select node");
4391     auto *Dummy = PoisonValue::get(CommonType);
4392     Worklist.push_back(Original);
4393     while (!Worklist.empty()) {
4394       Value *Current = Worklist.pop_back_val();
4395       // if it is already visited or it is an ending value then skip it.
4396       if (Map.contains(Current))
4397         continue;
4398       TraverseOrder.push_back(Current);
4399 
4400       // CurrentValue must be a Phi node or select. All others must be covered
4401       // by anchors.
4402       if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
4403         // Is it OK to get metadata from OrigSelect?!
4404         // Create a Select placeholder with dummy value.
4405         SelectInst *Select =
4406             SelectInst::Create(CurrentSelect->getCondition(), Dummy, Dummy,
4407                                CurrentSelect->getName(),
4408                                CurrentSelect->getIterator(), CurrentSelect);
4409         Map[Current] = Select;
4410         ST.insertNewSelect(Select);
4411         // We are interested in True and False values.
4412         Worklist.push_back(CurrentSelect->getTrueValue());
4413         Worklist.push_back(CurrentSelect->getFalseValue());
4414       } else {
4415         // It must be a Phi node then.
4416         PHINode *CurrentPhi = cast<PHINode>(Current);
4417         unsigned PredCount = CurrentPhi->getNumIncomingValues();
4418         PHINode *PHI =
4419             PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi->getIterator());
4420         Map[Current] = PHI;
4421         ST.insertNewPhi(PHI);
4422         append_range(Worklist, CurrentPhi->incoming_values());
4423       }
4424     }
4425   }
4426 
4427   bool addrModeCombiningAllowed() {
4428     if (DisableComplexAddrModes)
4429       return false;
4430     switch (DifferentField) {
4431     default:
4432       return false;
4433     case ExtAddrMode::BaseRegField:
4434       return AddrSinkCombineBaseReg;
4435     case ExtAddrMode::BaseGVField:
4436       return AddrSinkCombineBaseGV;
4437     case ExtAddrMode::BaseOffsField:
4438       return AddrSinkCombineBaseOffs;
4439     case ExtAddrMode::ScaledRegField:
4440       return AddrSinkCombineScaledReg;
4441     }
4442   }
4443 };
4444 } // end anonymous namespace
4445 
4446 /// Try adding ScaleReg*Scale to the current addressing mode.
4447 /// Return true and update AddrMode if this addr mode is legal for the target,
4448 /// false if not.
4449 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
4450                                              unsigned Depth) {
4451   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4452   // mode.  Just process that directly.
4453   if (Scale == 1)
4454     return matchAddr(ScaleReg, Depth);
4455 
4456   // If the scale is 0, it takes nothing to add this.
4457   if (Scale == 0)
4458     return true;
4459 
4460   // If we already have a scale of this value, we can add to it, otherwise, we
4461   // need an available scale field.
4462   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
4463     return false;
4464 
4465   ExtAddrMode TestAddrMode = AddrMode;
4466 
4467   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
4468   // [A+B + A*7] -> [B+A*8].
4469   TestAddrMode.Scale += Scale;
4470   TestAddrMode.ScaledReg = ScaleReg;
4471 
4472   // If the new address isn't legal, bail out.
4473   if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
4474     return false;
4475 
4476   // It was legal, so commit it.
4477   AddrMode = TestAddrMode;
4478 
4479   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
4480   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
4481   // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4482   // go any further: we can reuse it and cannot eliminate it.
4483   ConstantInt *CI = nullptr;
4484   Value *AddLHS = nullptr;
4485   if (isa<Instruction>(ScaleReg) && // not a constant expr.
4486       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4487       !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4488     TestAddrMode.InBounds = false;
4489     TestAddrMode.ScaledReg = AddLHS;
4490     TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4491 
4492     // If this addressing mode is legal, commit it and remember that we folded
4493     // this instruction.
4494     if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4495       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4496       AddrMode = TestAddrMode;
4497       return true;
4498     }
4499     // Restore status quo.
4500     TestAddrMode = AddrMode;
4501   }
4502 
4503   // If this is an add recurrence with a constant step, return the increment
4504   // instruction and the canonicalized step.
4505   auto GetConstantStep =
4506       [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4507     auto *PN = dyn_cast<PHINode>(V);
4508     if (!PN)
4509       return std::nullopt;
4510     auto IVInc = getIVIncrement(PN, &LI);
4511     if (!IVInc)
4512       return std::nullopt;
4513     // TODO: The result of the intrinsics above is two-complement. However when
4514     // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4515     // If it has nuw or nsw flags, we need to make sure that these flags are
4516     // inferrable at the point of memory instruction. Otherwise we are replacing
4517     // well-defined two-complement computation with poison. Currently, to avoid
4518     // potentially complex analysis needed to prove this, we reject such cases.
4519     if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4520       if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4521         return std::nullopt;
4522     if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4523       return std::make_pair(IVInc->first, ConstantStep->getValue());
4524     return std::nullopt;
4525   };
4526 
4527   // Try to account for the following special case:
4528   // 1. ScaleReg is an inductive variable;
4529   // 2. We use it with non-zero offset;
4530   // 3. IV's increment is available at the point of memory instruction.
4531   //
4532   // In this case, we may reuse the IV increment instead of the IV Phi to
4533   // achieve the following advantages:
4534   // 1. If IV step matches the offset, we will have no need in the offset;
4535   // 2. Even if they don't match, we will reduce the overlap of living IV
4536   //    and IV increment, that will potentially lead to better register
4537   //    assignment.
4538   if (AddrMode.BaseOffs) {
4539     if (auto IVStep = GetConstantStep(ScaleReg)) {
4540       Instruction *IVInc = IVStep->first;
4541       // The following assert is important to ensure a lack of infinite loops.
4542       // This transforms is (intentionally) the inverse of the one just above.
4543       // If they don't agree on the definition of an increment, we'd alternate
4544       // back and forth indefinitely.
4545       assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4546       APInt Step = IVStep->second;
4547       APInt Offset = Step * AddrMode.Scale;
4548       if (Offset.isSignedIntN(64)) {
4549         TestAddrMode.InBounds = false;
4550         TestAddrMode.ScaledReg = IVInc;
4551         TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4552         // If this addressing mode is legal, commit it..
4553         // (Note that we defer the (expensive) domtree base legality check
4554         // to the very last possible point.)
4555         if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4556             getDTFn().dominates(IVInc, MemoryInst)) {
4557           AddrModeInsts.push_back(cast<Instruction>(IVInc));
4558           AddrMode = TestAddrMode;
4559           return true;
4560         }
4561         // Restore status quo.
4562         TestAddrMode = AddrMode;
4563       }
4564     }
4565   }
4566 
4567   // Otherwise, just return what we have.
4568   return true;
4569 }
4570 
4571 /// This is a little filter, which returns true if an addressing computation
4572 /// involving I might be folded into a load/store accessing it.
4573 /// This doesn't need to be perfect, but needs to accept at least
4574 /// the set of instructions that MatchOperationAddr can.
4575 static bool MightBeFoldableInst(Instruction *I) {
4576   switch (I->getOpcode()) {
4577   case Instruction::BitCast:
4578   case Instruction::AddrSpaceCast:
4579     // Don't touch identity bitcasts.
4580     if (I->getType() == I->getOperand(0)->getType())
4581       return false;
4582     return I->getType()->isIntOrPtrTy();
4583   case Instruction::PtrToInt:
4584     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4585     return true;
4586   case Instruction::IntToPtr:
4587     // We know the input is intptr_t, so this is foldable.
4588     return true;
4589   case Instruction::Add:
4590     return true;
4591   case Instruction::Mul:
4592   case Instruction::Shl:
4593     // Can only handle X*C and X << C.
4594     return isa<ConstantInt>(I->getOperand(1));
4595   case Instruction::GetElementPtr:
4596     return true;
4597   default:
4598     return false;
4599   }
4600 }
4601 
4602 /// Check whether or not \p Val is a legal instruction for \p TLI.
4603 /// \note \p Val is assumed to be the product of some type promotion.
4604 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4605 /// to be legal, as the non-promoted value would have had the same state.
4606 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
4607                                        const DataLayout &DL, Value *Val) {
4608   Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4609   if (!PromotedInst)
4610     return false;
4611   int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4612   // If the ISDOpcode is undefined, it was undefined before the promotion.
4613   if (!ISDOpcode)
4614     return true;
4615   // Otherwise, check if the promoted instruction is legal or not.
4616   return TLI.isOperationLegalOrCustom(
4617       ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4618 }
4619 
4620 namespace {
4621 
4622 /// Hepler class to perform type promotion.
4623 class TypePromotionHelper {
4624   /// Utility function to add a promoted instruction \p ExtOpnd to
4625   /// \p PromotedInsts and record the type of extension we have seen.
4626   static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4627                               Instruction *ExtOpnd, bool IsSExt) {
4628     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4629     InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4630     if (It != PromotedInsts.end()) {
4631       // If the new extension is same as original, the information in
4632       // PromotedInsts[ExtOpnd] is still correct.
4633       if (It->second.getInt() == ExtTy)
4634         return;
4635 
4636       // Now the new extension is different from old extension, we make
4637       // the type information invalid by setting extension type to
4638       // BothExtension.
4639       ExtTy = BothExtension;
4640     }
4641     PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4642   }
4643 
4644   /// Utility function to query the original type of instruction \p Opnd
4645   /// with a matched extension type. If the extension doesn't match, we
4646   /// cannot use the information we had on the original type.
4647   /// BothExtension doesn't match any extension type.
4648   static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4649                                  Instruction *Opnd, bool IsSExt) {
4650     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4651     InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4652     if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4653       return It->second.getPointer();
4654     return nullptr;
4655   }
4656 
4657   /// Utility function to check whether or not a sign or zero extension
4658   /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4659   /// either using the operands of \p Inst or promoting \p Inst.
4660   /// The type of the extension is defined by \p IsSExt.
4661   /// In other words, check if:
4662   /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4663   /// #1 Promotion applies:
4664   /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4665   /// #2 Operand reuses:
4666   /// ext opnd1 to ConsideredExtType.
4667   /// \p PromotedInsts maps the instructions to their type before promotion.
4668   static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4669                             const InstrToOrigTy &PromotedInsts, bool IsSExt);
4670 
4671   /// Utility function to determine if \p OpIdx should be promoted when
4672   /// promoting \p Inst.
4673   static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4674     return !(isa<SelectInst>(Inst) && OpIdx == 0);
4675   }
4676 
4677   /// Utility function to promote the operand of \p Ext when this
4678   /// operand is a promotable trunc or sext or zext.
4679   /// \p PromotedInsts maps the instructions to their type before promotion.
4680   /// \p CreatedInstsCost[out] contains the cost of all instructions
4681   /// created to promote the operand of Ext.
4682   /// Newly added extensions are inserted in \p Exts.
4683   /// Newly added truncates are inserted in \p Truncs.
4684   /// Should never be called directly.
4685   /// \return The promoted value which is used instead of Ext.
4686   static Value *promoteOperandForTruncAndAnyExt(
4687       Instruction *Ext, TypePromotionTransaction &TPT,
4688       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4689       SmallVectorImpl<Instruction *> *Exts,
4690       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4691 
4692   /// Utility function to promote the operand of \p Ext when this
4693   /// operand is promotable and is not a supported trunc or sext.
4694   /// \p PromotedInsts maps the instructions to their type before promotion.
4695   /// \p CreatedInstsCost[out] contains the cost of all the instructions
4696   /// created to promote the operand of Ext.
4697   /// Newly added extensions are inserted in \p Exts.
4698   /// Newly added truncates are inserted in \p Truncs.
4699   /// Should never be called directly.
4700   /// \return The promoted value which is used instead of Ext.
4701   static Value *promoteOperandForOther(Instruction *Ext,
4702                                        TypePromotionTransaction &TPT,
4703                                        InstrToOrigTy &PromotedInsts,
4704                                        unsigned &CreatedInstsCost,
4705                                        SmallVectorImpl<Instruction *> *Exts,
4706                                        SmallVectorImpl<Instruction *> *Truncs,
4707                                        const TargetLowering &TLI, bool IsSExt);
4708 
4709   /// \see promoteOperandForOther.
4710   static Value *signExtendOperandForOther(
4711       Instruction *Ext, TypePromotionTransaction &TPT,
4712       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4713       SmallVectorImpl<Instruction *> *Exts,
4714       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4715     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4716                                   Exts, Truncs, TLI, true);
4717   }
4718 
4719   /// \see promoteOperandForOther.
4720   static Value *zeroExtendOperandForOther(
4721       Instruction *Ext, TypePromotionTransaction &TPT,
4722       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4723       SmallVectorImpl<Instruction *> *Exts,
4724       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4725     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4726                                   Exts, Truncs, TLI, false);
4727   }
4728 
4729 public:
4730   /// Type for the utility function that promotes the operand of Ext.
4731   using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4732                             InstrToOrigTy &PromotedInsts,
4733                             unsigned &CreatedInstsCost,
4734                             SmallVectorImpl<Instruction *> *Exts,
4735                             SmallVectorImpl<Instruction *> *Truncs,
4736                             const TargetLowering &TLI);
4737 
4738   /// Given a sign/zero extend instruction \p Ext, return the appropriate
4739   /// action to promote the operand of \p Ext instead of using Ext.
4740   /// \return NULL if no promotable action is possible with the current
4741   /// sign extension.
4742   /// \p InsertedInsts keeps track of all the instructions inserted by the
4743   /// other CodeGenPrepare optimizations. This information is important
4744   /// because we do not want to promote these instructions as CodeGenPrepare
4745   /// will reinsert them later. Thus creating an infinite loop: create/remove.
4746   /// \p PromotedInsts maps the instructions to their type before promotion.
4747   static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4748                           const TargetLowering &TLI,
4749                           const InstrToOrigTy &PromotedInsts);
4750 };
4751 
4752 } // end anonymous namespace
4753 
4754 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4755                                         Type *ConsideredExtType,
4756                                         const InstrToOrigTy &PromotedInsts,
4757                                         bool IsSExt) {
4758   // The promotion helper does not know how to deal with vector types yet.
4759   // To be able to fix that, we would need to fix the places where we
4760   // statically extend, e.g., constants and such.
4761   if (Inst->getType()->isVectorTy())
4762     return false;
4763 
4764   // We can always get through zext.
4765   if (isa<ZExtInst>(Inst))
4766     return true;
4767 
4768   // sext(sext) is ok too.
4769   if (IsSExt && isa<SExtInst>(Inst))
4770     return true;
4771 
4772   // We can get through binary operator, if it is legal. In other words, the
4773   // binary operator must have a nuw or nsw flag.
4774   if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4775     if (isa<OverflowingBinaryOperator>(BinOp) &&
4776         ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4777          (IsSExt && BinOp->hasNoSignedWrap())))
4778       return true;
4779 
4780   // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4781   if ((Inst->getOpcode() == Instruction::And ||
4782        Inst->getOpcode() == Instruction::Or))
4783     return true;
4784 
4785   // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4786   if (Inst->getOpcode() == Instruction::Xor) {
4787     // Make sure it is not a NOT.
4788     if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4789       if (!Cst->getValue().isAllOnes())
4790         return true;
4791   }
4792 
4793   // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4794   // It may change a poisoned value into a regular value, like
4795   //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
4796   //          poisoned value                    regular value
4797   // It should be OK since undef covers valid value.
4798   if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4799     return true;
4800 
4801   // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4802   // It may change a poisoned value into a regular value, like
4803   //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
4804   //          poisoned value                    regular value
4805   // It should be OK since undef covers valid value.
4806   if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4807     const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4808     if (ExtInst->hasOneUse()) {
4809       const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4810       if (AndInst && AndInst->getOpcode() == Instruction::And) {
4811         const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4812         if (Cst &&
4813             Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4814           return true;
4815       }
4816     }
4817   }
4818 
4819   // Check if we can do the following simplification.
4820   // ext(trunc(opnd)) --> ext(opnd)
4821   if (!isa<TruncInst>(Inst))
4822     return false;
4823 
4824   Value *OpndVal = Inst->getOperand(0);
4825   // Check if we can use this operand in the extension.
4826   // If the type is larger than the result type of the extension, we cannot.
4827   if (!OpndVal->getType()->isIntegerTy() ||
4828       OpndVal->getType()->getIntegerBitWidth() >
4829           ConsideredExtType->getIntegerBitWidth())
4830     return false;
4831 
4832   // If the operand of the truncate is not an instruction, we will not have
4833   // any information on the dropped bits.
4834   // (Actually we could for constant but it is not worth the extra logic).
4835   Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4836   if (!Opnd)
4837     return false;
4838 
4839   // Check if the source of the type is narrow enough.
4840   // I.e., check that trunc just drops extended bits of the same kind of
4841   // the extension.
4842   // #1 get the type of the operand and check the kind of the extended bits.
4843   const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4844   if (OpndType)
4845     ;
4846   else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4847     OpndType = Opnd->getOperand(0)->getType();
4848   else
4849     return false;
4850 
4851   // #2 check that the truncate just drops extended bits.
4852   return Inst->getType()->getIntegerBitWidth() >=
4853          OpndType->getIntegerBitWidth();
4854 }
4855 
4856 TypePromotionHelper::Action TypePromotionHelper::getAction(
4857     Instruction *Ext, const SetOfInstrs &InsertedInsts,
4858     const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4859   assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4860          "Unexpected instruction type");
4861   Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4862   Type *ExtTy = Ext->getType();
4863   bool IsSExt = isa<SExtInst>(Ext);
4864   // If the operand of the extension is not an instruction, we cannot
4865   // get through.
4866   // If it, check we can get through.
4867   if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4868     return nullptr;
4869 
4870   // Do not promote if the operand has been added by codegenprepare.
4871   // Otherwise, it means we are undoing an optimization that is likely to be
4872   // redone, thus causing potential infinite loop.
4873   if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4874     return nullptr;
4875 
4876   // SExt or Trunc instructions.
4877   // Return the related handler.
4878   if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4879       isa<ZExtInst>(ExtOpnd))
4880     return promoteOperandForTruncAndAnyExt;
4881 
4882   // Regular instruction.
4883   // Abort early if we will have to insert non-free instructions.
4884   if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4885     return nullptr;
4886   return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4887 }
4888 
4889 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4890     Instruction *SExt, TypePromotionTransaction &TPT,
4891     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4892     SmallVectorImpl<Instruction *> *Exts,
4893     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4894   // By construction, the operand of SExt is an instruction. Otherwise we cannot
4895   // get through it and this method should not be called.
4896   Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4897   Value *ExtVal = SExt;
4898   bool HasMergedNonFreeExt = false;
4899   if (isa<ZExtInst>(SExtOpnd)) {
4900     // Replace s|zext(zext(opnd))
4901     // => zext(opnd).
4902     HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4903     Value *ZExt =
4904         TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4905     TPT.replaceAllUsesWith(SExt, ZExt);
4906     TPT.eraseInstruction(SExt);
4907     ExtVal = ZExt;
4908   } else {
4909     // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4910     // => z|sext(opnd).
4911     TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4912   }
4913   CreatedInstsCost = 0;
4914 
4915   // Remove dead code.
4916   if (SExtOpnd->use_empty())
4917     TPT.eraseInstruction(SExtOpnd);
4918 
4919   // Check if the extension is still needed.
4920   Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4921   if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4922     if (ExtInst) {
4923       if (Exts)
4924         Exts->push_back(ExtInst);
4925       CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4926     }
4927     return ExtVal;
4928   }
4929 
4930   // At this point we have: ext ty opnd to ty.
4931   // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4932   Value *NextVal = ExtInst->getOperand(0);
4933   TPT.eraseInstruction(ExtInst, NextVal);
4934   return NextVal;
4935 }
4936 
4937 Value *TypePromotionHelper::promoteOperandForOther(
4938     Instruction *Ext, TypePromotionTransaction &TPT,
4939     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4940     SmallVectorImpl<Instruction *> *Exts,
4941     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4942     bool IsSExt) {
4943   // By construction, the operand of Ext is an instruction. Otherwise we cannot
4944   // get through it and this method should not be called.
4945   Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4946   CreatedInstsCost = 0;
4947   if (!ExtOpnd->hasOneUse()) {
4948     // ExtOpnd will be promoted.
4949     // All its uses, but Ext, will need to use a truncated value of the
4950     // promoted version.
4951     // Create the truncate now.
4952     Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4953     if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4954       // Insert it just after the definition.
4955       ITrunc->moveAfter(ExtOpnd);
4956       if (Truncs)
4957         Truncs->push_back(ITrunc);
4958     }
4959 
4960     TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4961     // Restore the operand of Ext (which has been replaced by the previous call
4962     // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4963     TPT.setOperand(Ext, 0, ExtOpnd);
4964   }
4965 
4966   // Get through the Instruction:
4967   // 1. Update its type.
4968   // 2. Replace the uses of Ext by Inst.
4969   // 3. Extend each operand that needs to be extended.
4970 
4971   // Remember the original type of the instruction before promotion.
4972   // This is useful to know that the high bits are sign extended bits.
4973   addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4974   // Step #1.
4975   TPT.mutateType(ExtOpnd, Ext->getType());
4976   // Step #2.
4977   TPT.replaceAllUsesWith(Ext, ExtOpnd);
4978   // Step #3.
4979   LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4980   for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4981        ++OpIdx) {
4982     LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4983     if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4984         !shouldExtOperand(ExtOpnd, OpIdx)) {
4985       LLVM_DEBUG(dbgs() << "No need to propagate\n");
4986       continue;
4987     }
4988     // Check if we can statically extend the operand.
4989     Value *Opnd = ExtOpnd->getOperand(OpIdx);
4990     if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4991       LLVM_DEBUG(dbgs() << "Statically extend\n");
4992       unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4993       APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4994                             : Cst->getValue().zext(BitWidth);
4995       TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4996       continue;
4997     }
4998     // UndefValue are typed, so we have to statically sign extend them.
4999     if (isa<UndefValue>(Opnd)) {
5000       LLVM_DEBUG(dbgs() << "Statically extend\n");
5001       TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
5002       continue;
5003     }
5004 
5005     // Otherwise we have to explicitly sign extend the operand.
5006     Value *ValForExtOpnd = IsSExt
5007                                ? TPT.createSExt(ExtOpnd, Opnd, Ext->getType())
5008                                : TPT.createZExt(ExtOpnd, Opnd, Ext->getType());
5009     TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
5010     Instruction *InstForExtOpnd = dyn_cast<Instruction>(ValForExtOpnd);
5011     if (!InstForExtOpnd)
5012       continue;
5013 
5014     if (Exts)
5015       Exts->push_back(InstForExtOpnd);
5016 
5017     CreatedInstsCost += !TLI.isExtFree(InstForExtOpnd);
5018   }
5019   LLVM_DEBUG(dbgs() << "Extension is useless now\n");
5020   TPT.eraseInstruction(Ext);
5021   return ExtOpnd;
5022 }
5023 
5024 /// Check whether or not promoting an instruction to a wider type is profitable.
5025 /// \p NewCost gives the cost of extension instructions created by the
5026 /// promotion.
5027 /// \p OldCost gives the cost of extension instructions before the promotion
5028 /// plus the number of instructions that have been
5029 /// matched in the addressing mode the promotion.
5030 /// \p PromotedOperand is the value that has been promoted.
5031 /// \return True if the promotion is profitable, false otherwise.
5032 bool AddressingModeMatcher::isPromotionProfitable(
5033     unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
5034   LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
5035                     << '\n');
5036   // The cost of the new extensions is greater than the cost of the
5037   // old extension plus what we folded.
5038   // This is not profitable.
5039   if (NewCost > OldCost)
5040     return false;
5041   if (NewCost < OldCost)
5042     return true;
5043   // The promotion is neutral but it may help folding the sign extension in
5044   // loads for instance.
5045   // Check that we did not create an illegal instruction.
5046   return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
5047 }
5048 
5049 /// Given an instruction or constant expr, see if we can fold the operation
5050 /// into the addressing mode. If so, update the addressing mode and return
5051 /// true, otherwise return false without modifying AddrMode.
5052 /// If \p MovedAway is not NULL, it contains the information of whether or
5053 /// not AddrInst has to be folded into the addressing mode on success.
5054 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
5055 /// because it has been moved away.
5056 /// Thus AddrInst must not be added in the matched instructions.
5057 /// This state can happen when AddrInst is a sext, since it may be moved away.
5058 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
5059 /// not be referenced anymore.
5060 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
5061                                                unsigned Depth,
5062                                                bool *MovedAway) {
5063   // Avoid exponential behavior on extremely deep expression trees.
5064   if (Depth >= 5)
5065     return false;
5066 
5067   // By default, all matched instructions stay in place.
5068   if (MovedAway)
5069     *MovedAway = false;
5070 
5071   switch (Opcode) {
5072   case Instruction::PtrToInt:
5073     // PtrToInt is always a noop, as we know that the int type is pointer sized.
5074     return matchAddr(AddrInst->getOperand(0), Depth);
5075   case Instruction::IntToPtr: {
5076     auto AS = AddrInst->getType()->getPointerAddressSpace();
5077     auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
5078     // This inttoptr is a no-op if the integer type is pointer sized.
5079     if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
5080       return matchAddr(AddrInst->getOperand(0), Depth);
5081     return false;
5082   }
5083   case Instruction::BitCast:
5084     // BitCast is always a noop, and we can handle it as long as it is
5085     // int->int or pointer->pointer (we don't want int<->fp or something).
5086     if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
5087         // Don't touch identity bitcasts.  These were probably put here by LSR,
5088         // and we don't want to mess around with them.  Assume it knows what it
5089         // is doing.
5090         AddrInst->getOperand(0)->getType() != AddrInst->getType())
5091       return matchAddr(AddrInst->getOperand(0), Depth);
5092     return false;
5093   case Instruction::AddrSpaceCast: {
5094     unsigned SrcAS =
5095         AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
5096     unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
5097     if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
5098       return matchAddr(AddrInst->getOperand(0), Depth);
5099     return false;
5100   }
5101   case Instruction::Add: {
5102     // Check to see if we can merge in one operand, then the other.  If so, we
5103     // win.
5104     ExtAddrMode BackupAddrMode = AddrMode;
5105     unsigned OldSize = AddrModeInsts.size();
5106     // Start a transaction at this point.
5107     // The LHS may match but not the RHS.
5108     // Therefore, we need a higher level restoration point to undo partially
5109     // matched operation.
5110     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5111         TPT.getRestorationPoint();
5112 
5113     // Try to match an integer constant second to increase its chance of ending
5114     // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
5115     int First = 0, Second = 1;
5116     if (isa<ConstantInt>(AddrInst->getOperand(First))
5117       && !isa<ConstantInt>(AddrInst->getOperand(Second)))
5118         std::swap(First, Second);
5119     AddrMode.InBounds = false;
5120     if (matchAddr(AddrInst->getOperand(First), Depth + 1) &&
5121         matchAddr(AddrInst->getOperand(Second), Depth + 1))
5122       return true;
5123 
5124     // Restore the old addr mode info.
5125     AddrMode = BackupAddrMode;
5126     AddrModeInsts.resize(OldSize);
5127     TPT.rollback(LastKnownGood);
5128 
5129     // Otherwise this was over-aggressive.  Try merging operands in the opposite
5130     // order.
5131     if (matchAddr(AddrInst->getOperand(Second), Depth + 1) &&
5132         matchAddr(AddrInst->getOperand(First), Depth + 1))
5133       return true;
5134 
5135     // Otherwise we definitely can't merge the ADD in.
5136     AddrMode = BackupAddrMode;
5137     AddrModeInsts.resize(OldSize);
5138     TPT.rollback(LastKnownGood);
5139     break;
5140   }
5141   // case Instruction::Or:
5142   //  TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
5143   // break;
5144   case Instruction::Mul:
5145   case Instruction::Shl: {
5146     // Can only handle X*C and X << C.
5147     AddrMode.InBounds = false;
5148     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
5149     if (!RHS || RHS->getBitWidth() > 64)
5150       return false;
5151     int64_t Scale = Opcode == Instruction::Shl
5152                         ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
5153                         : RHS->getSExtValue();
5154 
5155     return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
5156   }
5157   case Instruction::GetElementPtr: {
5158     // Scan the GEP.  We check it if it contains constant offsets and at most
5159     // one variable offset.
5160     int VariableOperand = -1;
5161     unsigned VariableScale = 0;
5162 
5163     int64_t ConstantOffset = 0;
5164     gep_type_iterator GTI = gep_type_begin(AddrInst);
5165     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
5166       if (StructType *STy = GTI.getStructTypeOrNull()) {
5167         const StructLayout *SL = DL.getStructLayout(STy);
5168         unsigned Idx =
5169             cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
5170         ConstantOffset += SL->getElementOffset(Idx);
5171       } else {
5172         TypeSize TS = GTI.getSequentialElementStride(DL);
5173         if (TS.isNonZero()) {
5174           // The optimisations below currently only work for fixed offsets.
5175           if (TS.isScalable())
5176             return false;
5177           int64_t TypeSize = TS.getFixedValue();
5178           if (ConstantInt *CI =
5179                   dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
5180             const APInt &CVal = CI->getValue();
5181             if (CVal.getSignificantBits() <= 64) {
5182               ConstantOffset += CVal.getSExtValue() * TypeSize;
5183               continue;
5184             }
5185           }
5186           // We only allow one variable index at the moment.
5187           if (VariableOperand != -1)
5188             return false;
5189 
5190           // Remember the variable index.
5191           VariableOperand = i;
5192           VariableScale = TypeSize;
5193         }
5194       }
5195     }
5196 
5197     // A common case is for the GEP to only do a constant offset.  In this case,
5198     // just add it to the disp field and check validity.
5199     if (VariableOperand == -1) {
5200       AddrMode.BaseOffs += ConstantOffset;
5201       if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5202           if (!cast<GEPOperator>(AddrInst)->isInBounds())
5203             AddrMode.InBounds = false;
5204           return true;
5205       }
5206       AddrMode.BaseOffs -= ConstantOffset;
5207 
5208       if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
5209           TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
5210           ConstantOffset > 0) {
5211           // Record GEPs with non-zero offsets as candidates for splitting in
5212           // the event that the offset cannot fit into the r+i addressing mode.
5213           // Simple and common case that only one GEP is used in calculating the
5214           // address for the memory access.
5215           Value *Base = AddrInst->getOperand(0);
5216           auto *BaseI = dyn_cast<Instruction>(Base);
5217           auto *GEP = cast<GetElementPtrInst>(AddrInst);
5218           if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
5219               (BaseI && !isa<CastInst>(BaseI) &&
5220                !isa<GetElementPtrInst>(BaseI))) {
5221             // Make sure the parent block allows inserting non-PHI instructions
5222             // before the terminator.
5223             BasicBlock *Parent = BaseI ? BaseI->getParent()
5224                                        : &GEP->getFunction()->getEntryBlock();
5225             if (!Parent->getTerminator()->isEHPad())
5226             LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
5227           }
5228       }
5229 
5230       return false;
5231     }
5232 
5233     // Save the valid addressing mode in case we can't match.
5234     ExtAddrMode BackupAddrMode = AddrMode;
5235     unsigned OldSize = AddrModeInsts.size();
5236 
5237     // See if the scale and offset amount is valid for this target.
5238     AddrMode.BaseOffs += ConstantOffset;
5239     if (!cast<GEPOperator>(AddrInst)->isInBounds())
5240       AddrMode.InBounds = false;
5241 
5242     // Match the base operand of the GEP.
5243     if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
5244       // If it couldn't be matched, just stuff the value in a register.
5245       if (AddrMode.HasBaseReg) {
5246         AddrMode = BackupAddrMode;
5247         AddrModeInsts.resize(OldSize);
5248         return false;
5249       }
5250       AddrMode.HasBaseReg = true;
5251       AddrMode.BaseReg = AddrInst->getOperand(0);
5252     }
5253 
5254     // Match the remaining variable portion of the GEP.
5255     if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
5256                           Depth)) {
5257       // If it couldn't be matched, try stuffing the base into a register
5258       // instead of matching it, and retrying the match of the scale.
5259       AddrMode = BackupAddrMode;
5260       AddrModeInsts.resize(OldSize);
5261       if (AddrMode.HasBaseReg)
5262         return false;
5263       AddrMode.HasBaseReg = true;
5264       AddrMode.BaseReg = AddrInst->getOperand(0);
5265       AddrMode.BaseOffs += ConstantOffset;
5266       if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
5267                             VariableScale, Depth)) {
5268         // If even that didn't work, bail.
5269         AddrMode = BackupAddrMode;
5270         AddrModeInsts.resize(OldSize);
5271         return false;
5272       }
5273     }
5274 
5275     return true;
5276   }
5277   case Instruction::SExt:
5278   case Instruction::ZExt: {
5279     Instruction *Ext = dyn_cast<Instruction>(AddrInst);
5280     if (!Ext)
5281       return false;
5282 
5283     // Try to move this ext out of the way of the addressing mode.
5284     // Ask for a method for doing so.
5285     TypePromotionHelper::Action TPH =
5286         TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
5287     if (!TPH)
5288       return false;
5289 
5290     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5291         TPT.getRestorationPoint();
5292     unsigned CreatedInstsCost = 0;
5293     unsigned ExtCost = !TLI.isExtFree(Ext);
5294     Value *PromotedOperand =
5295         TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
5296     // SExt has been moved away.
5297     // Thus either it will be rematched later in the recursive calls or it is
5298     // gone. Anyway, we must not fold it into the addressing mode at this point.
5299     // E.g.,
5300     // op = add opnd, 1
5301     // idx = ext op
5302     // addr = gep base, idx
5303     // is now:
5304     // promotedOpnd = ext opnd            <- no match here
5305     // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
5306     // addr = gep base, op                <- match
5307     if (MovedAway)
5308       *MovedAway = true;
5309 
5310     assert(PromotedOperand &&
5311            "TypePromotionHelper should have filtered out those cases");
5312 
5313     ExtAddrMode BackupAddrMode = AddrMode;
5314     unsigned OldSize = AddrModeInsts.size();
5315 
5316     if (!matchAddr(PromotedOperand, Depth) ||
5317         // The total of the new cost is equal to the cost of the created
5318         // instructions.
5319         // The total of the old cost is equal to the cost of the extension plus
5320         // what we have saved in the addressing mode.
5321         !isPromotionProfitable(CreatedInstsCost,
5322                                ExtCost + (AddrModeInsts.size() - OldSize),
5323                                PromotedOperand)) {
5324       AddrMode = BackupAddrMode;
5325       AddrModeInsts.resize(OldSize);
5326       LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5327       TPT.rollback(LastKnownGood);
5328       return false;
5329     }
5330     return true;
5331   }
5332   case Instruction::Call:
5333     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(AddrInst)) {
5334       if (II->getIntrinsicID() == Intrinsic::threadlocal_address) {
5335         GlobalValue &GV = cast<GlobalValue>(*II->getArgOperand(0));
5336         if (TLI.addressingModeSupportsTLS(GV))
5337           return matchAddr(AddrInst->getOperand(0), Depth);
5338       }
5339     }
5340     break;
5341   }
5342   return false;
5343 }
5344 
5345 /// If we can, try to add the value of 'Addr' into the current addressing mode.
5346 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5347 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
5348 /// for the target.
5349 ///
5350 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
5351   // Start a transaction at this point that we will rollback if the matching
5352   // fails.
5353   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5354       TPT.getRestorationPoint();
5355   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
5356     if (CI->getValue().isSignedIntN(64)) {
5357       // Fold in immediates if legal for the target.
5358       AddrMode.BaseOffs += CI->getSExtValue();
5359       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5360         return true;
5361       AddrMode.BaseOffs -= CI->getSExtValue();
5362     }
5363   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
5364     // If this is a global variable, try to fold it into the addressing mode.
5365     if (!AddrMode.BaseGV) {
5366       AddrMode.BaseGV = GV;
5367       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5368         return true;
5369       AddrMode.BaseGV = nullptr;
5370     }
5371   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
5372     ExtAddrMode BackupAddrMode = AddrMode;
5373     unsigned OldSize = AddrModeInsts.size();
5374 
5375     // Check to see if it is possible to fold this operation.
5376     bool MovedAway = false;
5377     if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
5378       // This instruction may have been moved away. If so, there is nothing
5379       // to check here.
5380       if (MovedAway)
5381         return true;
5382       // Okay, it's possible to fold this.  Check to see if it is actually
5383       // *profitable* to do so.  We use a simple cost model to avoid increasing
5384       // register pressure too much.
5385       if (I->hasOneUse() ||
5386           isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
5387         AddrModeInsts.push_back(I);
5388         return true;
5389       }
5390 
5391       // It isn't profitable to do this, roll back.
5392       AddrMode = BackupAddrMode;
5393       AddrModeInsts.resize(OldSize);
5394       TPT.rollback(LastKnownGood);
5395     }
5396   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
5397     if (matchOperationAddr(CE, CE->getOpcode(), Depth))
5398       return true;
5399     TPT.rollback(LastKnownGood);
5400   } else if (isa<ConstantPointerNull>(Addr)) {
5401     // Null pointer gets folded without affecting the addressing mode.
5402     return true;
5403   }
5404 
5405   // Worse case, the target should support [reg] addressing modes. :)
5406   if (!AddrMode.HasBaseReg) {
5407     AddrMode.HasBaseReg = true;
5408     AddrMode.BaseReg = Addr;
5409     // Still check for legality in case the target supports [imm] but not [i+r].
5410     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5411       return true;
5412     AddrMode.HasBaseReg = false;
5413     AddrMode.BaseReg = nullptr;
5414   }
5415 
5416   // If the base register is already taken, see if we can do [r+r].
5417   if (AddrMode.Scale == 0) {
5418     AddrMode.Scale = 1;
5419     AddrMode.ScaledReg = Addr;
5420     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5421       return true;
5422     AddrMode.Scale = 0;
5423     AddrMode.ScaledReg = nullptr;
5424   }
5425   // Couldn't match.
5426   TPT.rollback(LastKnownGood);
5427   return false;
5428 }
5429 
5430 /// Check to see if all uses of OpVal by the specified inline asm call are due
5431 /// to memory operands. If so, return true, otherwise return false.
5432 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
5433                                     const TargetLowering &TLI,
5434                                     const TargetRegisterInfo &TRI) {
5435   const Function *F = CI->getFunction();
5436   TargetLowering::AsmOperandInfoVector TargetConstraints =
5437       TLI.ParseConstraints(F->getDataLayout(), &TRI, *CI);
5438 
5439   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5440     // Compute the constraint code and ConstraintType to use.
5441     TLI.ComputeConstraintToUse(OpInfo, SDValue());
5442 
5443     // If this asm operand is our Value*, and if it isn't an indirect memory
5444     // operand, we can't fold it!  TODO: Also handle C_Address?
5445     if (OpInfo.CallOperandVal == OpVal &&
5446         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
5447          !OpInfo.isIndirect))
5448       return false;
5449   }
5450 
5451   return true;
5452 }
5453 
5454 /// Recursively walk all the uses of I until we find a memory use.
5455 /// If we find an obviously non-foldable instruction, return true.
5456 /// Add accessed addresses and types to MemoryUses.
5457 static bool FindAllMemoryUses(
5458     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5459     SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
5460     const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
5461     BlockFrequencyInfo *BFI, unsigned &SeenInsts) {
5462   // If we already considered this instruction, we're done.
5463   if (!ConsideredInsts.insert(I).second)
5464     return false;
5465 
5466   // If this is an obviously unfoldable instruction, bail out.
5467   if (!MightBeFoldableInst(I))
5468     return true;
5469 
5470   // Loop over all the uses, recursively processing them.
5471   for (Use &U : I->uses()) {
5472     // Conservatively return true if we're seeing a large number or a deep chain
5473     // of users. This avoids excessive compilation times in pathological cases.
5474     if (SeenInsts++ >= MaxAddressUsersToScan)
5475       return true;
5476 
5477     Instruction *UserI = cast<Instruction>(U.getUser());
5478     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
5479       MemoryUses.push_back({&U, LI->getType()});
5480       continue;
5481     }
5482 
5483     if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5484       if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5485         return true; // Storing addr, not into addr.
5486       MemoryUses.push_back({&U, SI->getValueOperand()->getType()});
5487       continue;
5488     }
5489 
5490     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5491       if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5492         return true; // Storing addr, not into addr.
5493       MemoryUses.push_back({&U, RMW->getValOperand()->getType()});
5494       continue;
5495     }
5496 
5497     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5498       if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5499         return true; // Storing addr, not into addr.
5500       MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()});
5501       continue;
5502     }
5503 
5504     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5505       if (CI->hasFnAttr(Attribute::Cold)) {
5506         // If this is a cold call, we can sink the addressing calculation into
5507         // the cold path.  See optimizeCallInst
5508         bool OptForSize =
5509             OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
5510         if (!OptForSize)
5511           continue;
5512       }
5513 
5514       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5515       if (!IA)
5516         return true;
5517 
5518       // If this is a memory operand, we're cool, otherwise bail out.
5519       if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5520         return true;
5521       continue;
5522     }
5523 
5524     if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5525                           PSI, BFI, SeenInsts))
5526       return true;
5527   }
5528 
5529   return false;
5530 }
5531 
5532 static bool FindAllMemoryUses(
5533     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5534     const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize,
5535     ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
5536   unsigned SeenInsts = 0;
5537   SmallPtrSet<Instruction *, 16> ConsideredInsts;
5538   return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5539                            PSI, BFI, SeenInsts);
5540 }
5541 
5542 
5543 /// Return true if Val is already known to be live at the use site that we're
5544 /// folding it into. If so, there is no cost to include it in the addressing
5545 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5546 /// instruction already.
5547 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5548                                                    Value *KnownLive1,
5549                                                    Value *KnownLive2) {
5550   // If Val is either of the known-live values, we know it is live!
5551   if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5552     return true;
5553 
5554   // All values other than instructions and arguments (e.g. constants) are live.
5555   if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5556     return true;
5557 
5558   // If Val is a constant sized alloca in the entry block, it is live, this is
5559   // true because it is just a reference to the stack/frame pointer, which is
5560   // live for the whole function.
5561   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5562     if (AI->isStaticAlloca())
5563       return true;
5564 
5565   // Check to see if this value is already used in the memory instruction's
5566   // block.  If so, it's already live into the block at the very least, so we
5567   // can reasonably fold it.
5568   return Val->isUsedInBasicBlock(MemoryInst->getParent());
5569 }
5570 
5571 /// It is possible for the addressing mode of the machine to fold the specified
5572 /// instruction into a load or store that ultimately uses it.
5573 /// However, the specified instruction has multiple uses.
5574 /// Given this, it may actually increase register pressure to fold it
5575 /// into the load. For example, consider this code:
5576 ///
5577 ///     X = ...
5578 ///     Y = X+1
5579 ///     use(Y)   -> nonload/store
5580 ///     Z = Y+1
5581 ///     load Z
5582 ///
5583 /// In this case, Y has multiple uses, and can be folded into the load of Z
5584 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
5585 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
5586 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
5587 /// number of computations either.
5588 ///
5589 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
5590 /// X was live across 'load Z' for other reasons, we actually *would* want to
5591 /// fold the addressing mode in the Z case.  This would make Y die earlier.
5592 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5593     Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5594   if (IgnoreProfitability)
5595     return true;
5596 
5597   // AMBefore is the addressing mode before this instruction was folded into it,
5598   // and AMAfter is the addressing mode after the instruction was folded.  Get
5599   // the set of registers referenced by AMAfter and subtract out those
5600   // referenced by AMBefore: this is the set of values which folding in this
5601   // address extends the lifetime of.
5602   //
5603   // Note that there are only two potential values being referenced here,
5604   // BaseReg and ScaleReg (global addresses are always available, as are any
5605   // folded immediates).
5606   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5607 
5608   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5609   // lifetime wasn't extended by adding this instruction.
5610   if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5611     BaseReg = nullptr;
5612   if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5613     ScaledReg = nullptr;
5614 
5615   // If folding this instruction (and it's subexprs) didn't extend any live
5616   // ranges, we're ok with it.
5617   if (!BaseReg && !ScaledReg)
5618     return true;
5619 
5620   // If all uses of this instruction can have the address mode sunk into them,
5621   // we can remove the addressing mode and effectively trade one live register
5622   // for another (at worst.)  In this context, folding an addressing mode into
5623   // the use is just a particularly nice way of sinking it.
5624   SmallVector<std::pair<Use *, Type *>, 16> MemoryUses;
5625   if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI))
5626     return false; // Has a non-memory, non-foldable use!
5627 
5628   // Now that we know that all uses of this instruction are part of a chain of
5629   // computation involving only operations that could theoretically be folded
5630   // into a memory use, loop over each of these memory operation uses and see
5631   // if they could  *actually* fold the instruction.  The assumption is that
5632   // addressing modes are cheap and that duplicating the computation involved
5633   // many times is worthwhile, even on a fastpath. For sinking candidates
5634   // (i.e. cold call sites), this serves as a way to prevent excessive code
5635   // growth since most architectures have some reasonable small and fast way to
5636   // compute an effective address.  (i.e LEA on x86)
5637   SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5638   for (const std::pair<Use *, Type *> &Pair : MemoryUses) {
5639     Value *Address = Pair.first->get();
5640     Instruction *UserI = cast<Instruction>(Pair.first->getUser());
5641     Type *AddressAccessTy = Pair.second;
5642     unsigned AS = Address->getType()->getPointerAddressSpace();
5643 
5644     // Do a match against the root of this address, ignoring profitability. This
5645     // will tell us if the addressing mode for the memory operation will
5646     // *actually* cover the shared instruction.
5647     ExtAddrMode Result;
5648     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5649                                                                       0);
5650     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5651         TPT.getRestorationPoint();
5652     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5653                                   AddressAccessTy, AS, UserI, Result,
5654                                   InsertedInsts, PromotedInsts, TPT,
5655                                   LargeOffsetGEP, OptSize, PSI, BFI);
5656     Matcher.IgnoreProfitability = true;
5657     bool Success = Matcher.matchAddr(Address, 0);
5658     (void)Success;
5659     assert(Success && "Couldn't select *anything*?");
5660 
5661     // The match was to check the profitability, the changes made are not
5662     // part of the original matcher. Therefore, they should be dropped
5663     // otherwise the original matcher will not present the right state.
5664     TPT.rollback(LastKnownGood);
5665 
5666     // If the match didn't cover I, then it won't be shared by it.
5667     if (!is_contained(MatchedAddrModeInsts, I))
5668       return false;
5669 
5670     MatchedAddrModeInsts.clear();
5671   }
5672 
5673   return true;
5674 }
5675 
5676 /// Return true if the specified values are defined in a
5677 /// different basic block than BB.
5678 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5679   if (Instruction *I = dyn_cast<Instruction>(V))
5680     return I->getParent() != BB;
5681   return false;
5682 }
5683 
5684 /// Sink addressing mode computation immediate before MemoryInst if doing so
5685 /// can be done without increasing register pressure.  The need for the
5686 /// register pressure constraint means this can end up being an all or nothing
5687 /// decision for all uses of the same addressing computation.
5688 ///
5689 /// Load and Store Instructions often have addressing modes that can do
5690 /// significant amounts of computation. As such, instruction selection will try
5691 /// to get the load or store to do as much computation as possible for the
5692 /// program. The problem is that isel can only see within a single block. As
5693 /// such, we sink as much legal addressing mode work into the block as possible.
5694 ///
5695 /// This method is used to optimize both load/store and inline asms with memory
5696 /// operands.  It's also used to sink addressing computations feeding into cold
5697 /// call sites into their (cold) basic block.
5698 ///
5699 /// The motivation for handling sinking into cold blocks is that doing so can
5700 /// both enable other address mode sinking (by satisfying the register pressure
5701 /// constraint above), and reduce register pressure globally (by removing the
5702 /// addressing mode computation from the fast path entirely.).
5703 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5704                                         Type *AccessTy, unsigned AddrSpace) {
5705   Value *Repl = Addr;
5706 
5707   // Try to collapse single-value PHI nodes.  This is necessary to undo
5708   // unprofitable PRE transformations.
5709   SmallVector<Value *, 8> worklist;
5710   SmallPtrSet<Value *, 16> Visited;
5711   worklist.push_back(Addr);
5712 
5713   // Use a worklist to iteratively look through PHI and select nodes, and
5714   // ensure that the addressing mode obtained from the non-PHI/select roots of
5715   // the graph are compatible.
5716   bool PhiOrSelectSeen = false;
5717   SmallVector<Instruction *, 16> AddrModeInsts;
5718   const SimplifyQuery SQ(*DL, TLInfo);
5719   AddressingModeCombiner AddrModes(SQ, Addr);
5720   TypePromotionTransaction TPT(RemovedInsts);
5721   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5722       TPT.getRestorationPoint();
5723   while (!worklist.empty()) {
5724     Value *V = worklist.pop_back_val();
5725 
5726     // We allow traversing cyclic Phi nodes.
5727     // In case of success after this loop we ensure that traversing through
5728     // Phi nodes ends up with all cases to compute address of the form
5729     //    BaseGV + Base + Scale * Index + Offset
5730     // where Scale and Offset are constans and BaseGV, Base and Index
5731     // are exactly the same Values in all cases.
5732     // It means that BaseGV, Scale and Offset dominate our memory instruction
5733     // and have the same value as they had in address computation represented
5734     // as Phi. So we can safely sink address computation to memory instruction.
5735     if (!Visited.insert(V).second)
5736       continue;
5737 
5738     // For a PHI node, push all of its incoming values.
5739     if (PHINode *P = dyn_cast<PHINode>(V)) {
5740       append_range(worklist, P->incoming_values());
5741       PhiOrSelectSeen = true;
5742       continue;
5743     }
5744     // Similar for select.
5745     if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5746       worklist.push_back(SI->getFalseValue());
5747       worklist.push_back(SI->getTrueValue());
5748       PhiOrSelectSeen = true;
5749       continue;
5750     }
5751 
5752     // For non-PHIs, determine the addressing mode being computed.  Note that
5753     // the result may differ depending on what other uses our candidate
5754     // addressing instructions might have.
5755     AddrModeInsts.clear();
5756     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5757                                                                       0);
5758     // Defer the query (and possible computation of) the dom tree to point of
5759     // actual use.  It's expected that most address matches don't actually need
5760     // the domtree.
5761     auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5762       Function *F = MemoryInst->getParent()->getParent();
5763       return this->getDT(*F);
5764     };
5765     ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5766         V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5767         *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5768         BFI.get());
5769 
5770     GetElementPtrInst *GEP = LargeOffsetGEP.first;
5771     if (GEP && !NewGEPBases.count(GEP)) {
5772       // If splitting the underlying data structure can reduce the offset of a
5773       // GEP, collect the GEP.  Skip the GEPs that are the new bases of
5774       // previously split data structures.
5775       LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5776       LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5777     }
5778 
5779     NewAddrMode.OriginalValue = V;
5780     if (!AddrModes.addNewAddrMode(NewAddrMode))
5781       break;
5782   }
5783 
5784   // Try to combine the AddrModes we've collected. If we couldn't collect any,
5785   // or we have multiple but either couldn't combine them or combining them
5786   // wouldn't do anything useful, bail out now.
5787   if (!AddrModes.combineAddrModes()) {
5788     TPT.rollback(LastKnownGood);
5789     return false;
5790   }
5791   bool Modified = TPT.commit();
5792 
5793   // Get the combined AddrMode (or the only AddrMode, if we only had one).
5794   ExtAddrMode AddrMode = AddrModes.getAddrMode();
5795 
5796   // If all the instructions matched are already in this BB, don't do anything.
5797   // If we saw a Phi node then it is not local definitely, and if we saw a
5798   // select then we want to push the address calculation past it even if it's
5799   // already in this BB.
5800   if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5801         return IsNonLocalValue(V, MemoryInst->getParent());
5802       })) {
5803     LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
5804                       << "\n");
5805     return Modified;
5806   }
5807 
5808   // Insert this computation right after this user.  Since our caller is
5809   // scanning from the top of the BB to the bottom, reuse of the expr are
5810   // guaranteed to happen later.
5811   IRBuilder<> Builder(MemoryInst);
5812 
5813   // Now that we determined the addressing expression we want to use and know
5814   // that we have to sink it into this block.  Check to see if we have already
5815   // done this for some other load/store instr in this block.  If so, reuse
5816   // the computation.  Before attempting reuse, check if the address is valid
5817   // as it may have been erased.
5818 
5819   WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5820 
5821   Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5822   Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5823   if (SunkAddr) {
5824     LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5825                       << " for " << *MemoryInst << "\n");
5826     if (SunkAddr->getType() != Addr->getType()) {
5827       if (SunkAddr->getType()->getPointerAddressSpace() !=
5828               Addr->getType()->getPointerAddressSpace() &&
5829           !DL->isNonIntegralPointerType(Addr->getType())) {
5830         // There are two reasons the address spaces might not match: a no-op
5831         // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5832         // ptrtoint/inttoptr pair to ensure we match the original semantics.
5833         // TODO: allow bitcast between different address space pointers with the
5834         // same size.
5835         SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5836         SunkAddr =
5837             Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5838       } else
5839         SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5840     }
5841   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5842                                    SubtargetInfo->addrSinkUsingGEPs())) {
5843     // By default, we use the GEP-based method when AA is used later. This
5844     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5845     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5846                       << " for " << *MemoryInst << "\n");
5847     Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5848 
5849     // First, find the pointer.
5850     if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5851       ResultPtr = AddrMode.BaseReg;
5852       AddrMode.BaseReg = nullptr;
5853     }
5854 
5855     if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5856       // We can't add more than one pointer together, nor can we scale a
5857       // pointer (both of which seem meaningless).
5858       if (ResultPtr || AddrMode.Scale != 1)
5859         return Modified;
5860 
5861       ResultPtr = AddrMode.ScaledReg;
5862       AddrMode.Scale = 0;
5863     }
5864 
5865     // It is only safe to sign extend the BaseReg if we know that the math
5866     // required to create it did not overflow before we extend it. Since
5867     // the original IR value was tossed in favor of a constant back when
5868     // the AddrMode was created we need to bail out gracefully if widths
5869     // do not match instead of extending it.
5870     //
5871     // (See below for code to add the scale.)
5872     if (AddrMode.Scale) {
5873       Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5874       if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5875           cast<IntegerType>(ScaledRegTy)->getBitWidth())
5876         return Modified;
5877     }
5878 
5879     GlobalValue *BaseGV = AddrMode.BaseGV;
5880     if (BaseGV != nullptr) {
5881       if (ResultPtr)
5882         return Modified;
5883 
5884       if (BaseGV->isThreadLocal()) {
5885         ResultPtr = Builder.CreateThreadLocalAddress(BaseGV);
5886       } else {
5887         ResultPtr = BaseGV;
5888       }
5889     }
5890 
5891     // If the real base value actually came from an inttoptr, then the matcher
5892     // will look through it and provide only the integer value. In that case,
5893     // use it here.
5894     if (!DL->isNonIntegralPointerType(Addr->getType())) {
5895       if (!ResultPtr && AddrMode.BaseReg) {
5896         ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5897                                            "sunkaddr");
5898         AddrMode.BaseReg = nullptr;
5899       } else if (!ResultPtr && AddrMode.Scale == 1) {
5900         ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5901                                            "sunkaddr");
5902         AddrMode.Scale = 0;
5903       }
5904     }
5905 
5906     if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5907         !AddrMode.BaseOffs) {
5908       SunkAddr = Constant::getNullValue(Addr->getType());
5909     } else if (!ResultPtr) {
5910       return Modified;
5911     } else {
5912       Type *I8PtrTy =
5913           Builder.getPtrTy(Addr->getType()->getPointerAddressSpace());
5914 
5915       // Start with the base register. Do this first so that subsequent address
5916       // matching finds it last, which will prevent it from trying to match it
5917       // as the scaled value in case it happens to be a mul. That would be
5918       // problematic if we've sunk a different mul for the scale, because then
5919       // we'd end up sinking both muls.
5920       if (AddrMode.BaseReg) {
5921         Value *V = AddrMode.BaseReg;
5922         if (V->getType() != IntPtrTy)
5923           V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5924 
5925         ResultIndex = V;
5926       }
5927 
5928       // Add the scale value.
5929       if (AddrMode.Scale) {
5930         Value *V = AddrMode.ScaledReg;
5931         if (V->getType() == IntPtrTy) {
5932           // done.
5933         } else {
5934           assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5935                      cast<IntegerType>(V->getType())->getBitWidth() &&
5936                  "We can't transform if ScaledReg is too narrow");
5937           V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5938         }
5939 
5940         if (AddrMode.Scale != 1)
5941           V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5942                                 "sunkaddr");
5943         if (ResultIndex)
5944           ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5945         else
5946           ResultIndex = V;
5947       }
5948 
5949       // Add in the Base Offset if present.
5950       if (AddrMode.BaseOffs) {
5951         Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5952         if (ResultIndex) {
5953           // We need to add this separately from the scale above to help with
5954           // SDAG consecutive load/store merging.
5955           if (ResultPtr->getType() != I8PtrTy)
5956             ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5957           ResultPtr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5958                                            AddrMode.InBounds);
5959         }
5960 
5961         ResultIndex = V;
5962       }
5963 
5964       if (!ResultIndex) {
5965         SunkAddr = ResultPtr;
5966       } else {
5967         if (ResultPtr->getType() != I8PtrTy)
5968           ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5969         SunkAddr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5970                                         AddrMode.InBounds);
5971       }
5972 
5973       if (SunkAddr->getType() != Addr->getType()) {
5974         if (SunkAddr->getType()->getPointerAddressSpace() !=
5975                 Addr->getType()->getPointerAddressSpace() &&
5976             !DL->isNonIntegralPointerType(Addr->getType())) {
5977           // There are two reasons the address spaces might not match: a no-op
5978           // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5979           // ptrtoint/inttoptr pair to ensure we match the original semantics.
5980           // TODO: allow bitcast between different address space pointers with
5981           // the same size.
5982           SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5983           SunkAddr =
5984               Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5985         } else
5986           SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5987       }
5988     }
5989   } else {
5990     // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5991     // non-integral pointers, so in that case bail out now.
5992     Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5993     Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5994     PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5995     PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5996     if (DL->isNonIntegralPointerType(Addr->getType()) ||
5997         (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5998         (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5999         (AddrMode.BaseGV &&
6000          DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
6001       return Modified;
6002 
6003     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
6004                       << " for " << *MemoryInst << "\n");
6005     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
6006     Value *Result = nullptr;
6007 
6008     // Start with the base register. Do this first so that subsequent address
6009     // matching finds it last, which will prevent it from trying to match it
6010     // as the scaled value in case it happens to be a mul. That would be
6011     // problematic if we've sunk a different mul for the scale, because then
6012     // we'd end up sinking both muls.
6013     if (AddrMode.BaseReg) {
6014       Value *V = AddrMode.BaseReg;
6015       if (V->getType()->isPointerTy())
6016         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6017       if (V->getType() != IntPtrTy)
6018         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
6019       Result = V;
6020     }
6021 
6022     // Add the scale value.
6023     if (AddrMode.Scale) {
6024       Value *V = AddrMode.ScaledReg;
6025       if (V->getType() == IntPtrTy) {
6026         // done.
6027       } else if (V->getType()->isPointerTy()) {
6028         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
6029       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
6030                  cast<IntegerType>(V->getType())->getBitWidth()) {
6031         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
6032       } else {
6033         // It is only safe to sign extend the BaseReg if we know that the math
6034         // required to create it did not overflow before we extend it. Since
6035         // the original IR value was tossed in favor of a constant back when
6036         // the AddrMode was created we need to bail out gracefully if widths
6037         // do not match instead of extending it.
6038         Instruction *I = dyn_cast_or_null<Instruction>(Result);
6039         if (I && (Result != AddrMode.BaseReg))
6040           I->eraseFromParent();
6041         return Modified;
6042       }
6043       if (AddrMode.Scale != 1)
6044         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
6045                               "sunkaddr");
6046       if (Result)
6047         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6048       else
6049         Result = V;
6050     }
6051 
6052     // Add in the BaseGV if present.
6053     GlobalValue *BaseGV = AddrMode.BaseGV;
6054     if (BaseGV != nullptr) {
6055       Value *BaseGVPtr;
6056       if (BaseGV->isThreadLocal()) {
6057         BaseGVPtr = Builder.CreateThreadLocalAddress(BaseGV);
6058       } else {
6059         BaseGVPtr = BaseGV;
6060       }
6061       Value *V = Builder.CreatePtrToInt(BaseGVPtr, IntPtrTy, "sunkaddr");
6062       if (Result)
6063         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6064       else
6065         Result = V;
6066     }
6067 
6068     // Add in the Base Offset if present.
6069     if (AddrMode.BaseOffs) {
6070       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
6071       if (Result)
6072         Result = Builder.CreateAdd(Result, V, "sunkaddr");
6073       else
6074         Result = V;
6075     }
6076 
6077     if (!Result)
6078       SunkAddr = Constant::getNullValue(Addr->getType());
6079     else
6080       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
6081   }
6082 
6083   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
6084   // Store the newly computed address into the cache. In the case we reused a
6085   // value, this should be idempotent.
6086   SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
6087 
6088   // If we have no uses, recursively delete the value and all dead instructions
6089   // using it.
6090   if (Repl->use_empty()) {
6091     resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
6092       RecursivelyDeleteTriviallyDeadInstructions(
6093           Repl, TLInfo, nullptr,
6094           [&](Value *V) { removeAllAssertingVHReferences(V); });
6095     });
6096   }
6097   ++NumMemoryInsts;
6098   return true;
6099 }
6100 
6101 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
6102 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
6103 /// only handle a 2 operand GEP in the same basic block or a splat constant
6104 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
6105 /// index.
6106 ///
6107 /// If the existing GEP has a vector base pointer that is splat, we can look
6108 /// through the splat to find the scalar pointer. If we can't find a scalar
6109 /// pointer there's nothing we can do.
6110 ///
6111 /// If we have a GEP with more than 2 indices where the middle indices are all
6112 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
6113 ///
6114 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
6115 /// followed by a GEP with an all zeroes vector index. This will enable
6116 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
6117 /// zero index.
6118 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
6119                                                Value *Ptr) {
6120   Value *NewAddr;
6121 
6122   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
6123     // Don't optimize GEPs that don't have indices.
6124     if (!GEP->hasIndices())
6125       return false;
6126 
6127     // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
6128     // FIXME: We should support this by sinking the GEP.
6129     if (MemoryInst->getParent() != GEP->getParent())
6130       return false;
6131 
6132     SmallVector<Value *, 2> Ops(GEP->operands());
6133 
6134     bool RewriteGEP = false;
6135 
6136     if (Ops[0]->getType()->isVectorTy()) {
6137       Ops[0] = getSplatValue(Ops[0]);
6138       if (!Ops[0])
6139         return false;
6140       RewriteGEP = true;
6141     }
6142 
6143     unsigned FinalIndex = Ops.size() - 1;
6144 
6145     // Ensure all but the last index is 0.
6146     // FIXME: This isn't strictly required. All that's required is that they are
6147     // all scalars or splats.
6148     for (unsigned i = 1; i < FinalIndex; ++i) {
6149       auto *C = dyn_cast<Constant>(Ops[i]);
6150       if (!C)
6151         return false;
6152       if (isa<VectorType>(C->getType()))
6153         C = C->getSplatValue();
6154       auto *CI = dyn_cast_or_null<ConstantInt>(C);
6155       if (!CI || !CI->isZero())
6156         return false;
6157       // Scalarize the index if needed.
6158       Ops[i] = CI;
6159     }
6160 
6161     // Try to scalarize the final index.
6162     if (Ops[FinalIndex]->getType()->isVectorTy()) {
6163       if (Value *V = getSplatValue(Ops[FinalIndex])) {
6164         auto *C = dyn_cast<ConstantInt>(V);
6165         // Don't scalarize all zeros vector.
6166         if (!C || !C->isZero()) {
6167           Ops[FinalIndex] = V;
6168           RewriteGEP = true;
6169         }
6170       }
6171     }
6172 
6173     // If we made any changes or the we have extra operands, we need to generate
6174     // new instructions.
6175     if (!RewriteGEP && Ops.size() == 2)
6176       return false;
6177 
6178     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6179 
6180     IRBuilder<> Builder(MemoryInst);
6181 
6182     Type *SourceTy = GEP->getSourceElementType();
6183     Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
6184 
6185     // If the final index isn't a vector, emit a scalar GEP containing all ops
6186     // and a vector GEP with all zeroes final index.
6187     if (!Ops[FinalIndex]->getType()->isVectorTy()) {
6188       NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
6189       auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6190       auto *SecondTy = GetElementPtrInst::getIndexedType(
6191           SourceTy, ArrayRef(Ops).drop_front());
6192       NewAddr =
6193           Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
6194     } else {
6195       Value *Base = Ops[0];
6196       Value *Index = Ops[FinalIndex];
6197 
6198       // Create a scalar GEP if there are more than 2 operands.
6199       if (Ops.size() != 2) {
6200         // Replace the last index with 0.
6201         Ops[FinalIndex] =
6202             Constant::getNullValue(Ops[FinalIndex]->getType()->getScalarType());
6203         Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
6204         SourceTy = GetElementPtrInst::getIndexedType(
6205             SourceTy, ArrayRef(Ops).drop_front());
6206       }
6207 
6208       // Now create the GEP with scalar pointer and vector index.
6209       NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
6210     }
6211   } else if (!isa<Constant>(Ptr)) {
6212     // Not a GEP, maybe its a splat and we can create a GEP to enable
6213     // SelectionDAGBuilder to use it as a uniform base.
6214     Value *V = getSplatValue(Ptr);
6215     if (!V)
6216       return false;
6217 
6218     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
6219 
6220     IRBuilder<> Builder(MemoryInst);
6221 
6222     // Emit a vector GEP with a scalar pointer and all 0s vector index.
6223     Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
6224     auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
6225     Type *ScalarTy;
6226     if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6227         Intrinsic::masked_gather) {
6228       ScalarTy = MemoryInst->getType()->getScalarType();
6229     } else {
6230       assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
6231              Intrinsic::masked_scatter);
6232       ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
6233     }
6234     NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
6235   } else {
6236     // Constant, SelectionDAGBuilder knows to check if its a splat.
6237     return false;
6238   }
6239 
6240   MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
6241 
6242   // If we have no uses, recursively delete the value and all dead instructions
6243   // using it.
6244   if (Ptr->use_empty())
6245     RecursivelyDeleteTriviallyDeadInstructions(
6246         Ptr, TLInfo, nullptr,
6247         [&](Value *V) { removeAllAssertingVHReferences(V); });
6248 
6249   return true;
6250 }
6251 
6252 /// If there are any memory operands, use OptimizeMemoryInst to sink their
6253 /// address computing into the block when possible / profitable.
6254 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
6255   bool MadeChange = false;
6256 
6257   const TargetRegisterInfo *TRI =
6258       TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
6259   TargetLowering::AsmOperandInfoVector TargetConstraints =
6260       TLI->ParseConstraints(*DL, TRI, *CS);
6261   unsigned ArgNo = 0;
6262   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
6263     // Compute the constraint code and ConstraintType to use.
6264     TLI->ComputeConstraintToUse(OpInfo, SDValue());
6265 
6266     // TODO: Also handle C_Address?
6267     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6268         OpInfo.isIndirect) {
6269       Value *OpVal = CS->getArgOperand(ArgNo++);
6270       MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
6271     } else if (OpInfo.Type == InlineAsm::isInput)
6272       ArgNo++;
6273   }
6274 
6275   return MadeChange;
6276 }
6277 
6278 /// Check if all the uses of \p Val are equivalent (or free) zero or
6279 /// sign extensions.
6280 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
6281   assert(!Val->use_empty() && "Input must have at least one use");
6282   const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
6283   bool IsSExt = isa<SExtInst>(FirstUser);
6284   Type *ExtTy = FirstUser->getType();
6285   for (const User *U : Val->users()) {
6286     const Instruction *UI = cast<Instruction>(U);
6287     if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
6288       return false;
6289     Type *CurTy = UI->getType();
6290     // Same input and output types: Same instruction after CSE.
6291     if (CurTy == ExtTy)
6292       continue;
6293 
6294     // If IsSExt is true, we are in this situation:
6295     // a = Val
6296     // b = sext ty1 a to ty2
6297     // c = sext ty1 a to ty3
6298     // Assuming ty2 is shorter than ty3, this could be turned into:
6299     // a = Val
6300     // b = sext ty1 a to ty2
6301     // c = sext ty2 b to ty3
6302     // However, the last sext is not free.
6303     if (IsSExt)
6304       return false;
6305 
6306     // This is a ZExt, maybe this is free to extend from one type to another.
6307     // In that case, we would not account for a different use.
6308     Type *NarrowTy;
6309     Type *LargeTy;
6310     if (ExtTy->getScalarType()->getIntegerBitWidth() >
6311         CurTy->getScalarType()->getIntegerBitWidth()) {
6312       NarrowTy = CurTy;
6313       LargeTy = ExtTy;
6314     } else {
6315       NarrowTy = ExtTy;
6316       LargeTy = CurTy;
6317     }
6318 
6319     if (!TLI.isZExtFree(NarrowTy, LargeTy))
6320       return false;
6321   }
6322   // All uses are the same or can be derived from one another for free.
6323   return true;
6324 }
6325 
6326 /// Try to speculatively promote extensions in \p Exts and continue
6327 /// promoting through newly promoted operands recursively as far as doing so is
6328 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6329 /// When some promotion happened, \p TPT contains the proper state to revert
6330 /// them.
6331 ///
6332 /// \return true if some promotion happened, false otherwise.
6333 bool CodeGenPrepare::tryToPromoteExts(
6334     TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
6335     SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
6336     unsigned CreatedInstsCost) {
6337   bool Promoted = false;
6338 
6339   // Iterate over all the extensions to try to promote them.
6340   for (auto *I : Exts) {
6341     // Early check if we directly have ext(load).
6342     if (isa<LoadInst>(I->getOperand(0))) {
6343       ProfitablyMovedExts.push_back(I);
6344       continue;
6345     }
6346 
6347     // Check whether or not we want to do any promotion.  The reason we have
6348     // this check inside the for loop is to catch the case where an extension
6349     // is directly fed by a load because in such case the extension can be moved
6350     // up without any promotion on its operands.
6351     if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
6352       return false;
6353 
6354     // Get the action to perform the promotion.
6355     TypePromotionHelper::Action TPH =
6356         TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
6357     // Check if we can promote.
6358     if (!TPH) {
6359       // Save the current extension as we cannot move up through its operand.
6360       ProfitablyMovedExts.push_back(I);
6361       continue;
6362     }
6363 
6364     // Save the current state.
6365     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6366         TPT.getRestorationPoint();
6367     SmallVector<Instruction *, 4> NewExts;
6368     unsigned NewCreatedInstsCost = 0;
6369     unsigned ExtCost = !TLI->isExtFree(I);
6370     // Promote.
6371     Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
6372                              &NewExts, nullptr, *TLI);
6373     assert(PromotedVal &&
6374            "TypePromotionHelper should have filtered out those cases");
6375 
6376     // We would be able to merge only one extension in a load.
6377     // Therefore, if we have more than 1 new extension we heuristically
6378     // cut this search path, because it means we degrade the code quality.
6379     // With exactly 2, the transformation is neutral, because we will merge
6380     // one extension but leave one. However, we optimistically keep going,
6381     // because the new extension may be removed too. Also avoid replacing a
6382     // single free extension with multiple extensions, as this increases the
6383     // number of IR instructions while not providing any savings.
6384     long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
6385     // FIXME: It would be possible to propagate a negative value instead of
6386     // conservatively ceiling it to 0.
6387     TotalCreatedInstsCost =
6388         std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
6389     if (!StressExtLdPromotion &&
6390         (TotalCreatedInstsCost > 1 ||
6391          !isPromotedInstructionLegal(*TLI, *DL, PromotedVal) ||
6392          (ExtCost == 0 && NewExts.size() > 1))) {
6393       // This promotion is not profitable, rollback to the previous state, and
6394       // save the current extension in ProfitablyMovedExts as the latest
6395       // speculative promotion turned out to be unprofitable.
6396       TPT.rollback(LastKnownGood);
6397       ProfitablyMovedExts.push_back(I);
6398       continue;
6399     }
6400     // Continue promoting NewExts as far as doing so is profitable.
6401     SmallVector<Instruction *, 2> NewlyMovedExts;
6402     (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
6403     bool NewPromoted = false;
6404     for (auto *ExtInst : NewlyMovedExts) {
6405       Instruction *MovedExt = cast<Instruction>(ExtInst);
6406       Value *ExtOperand = MovedExt->getOperand(0);
6407       // If we have reached to a load, we need this extra profitability check
6408       // as it could potentially be merged into an ext(load).
6409       if (isa<LoadInst>(ExtOperand) &&
6410           !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
6411             (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
6412         continue;
6413 
6414       ProfitablyMovedExts.push_back(MovedExt);
6415       NewPromoted = true;
6416     }
6417 
6418     // If none of speculative promotions for NewExts is profitable, rollback
6419     // and save the current extension (I) as the last profitable extension.
6420     if (!NewPromoted) {
6421       TPT.rollback(LastKnownGood);
6422       ProfitablyMovedExts.push_back(I);
6423       continue;
6424     }
6425     // The promotion is profitable.
6426     Promoted = true;
6427   }
6428   return Promoted;
6429 }
6430 
6431 /// Merging redundant sexts when one is dominating the other.
6432 bool CodeGenPrepare::mergeSExts(Function &F) {
6433   bool Changed = false;
6434   for (auto &Entry : ValToSExtendedUses) {
6435     SExts &Insts = Entry.second;
6436     SExts CurPts;
6437     for (Instruction *Inst : Insts) {
6438       if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
6439           Inst->getOperand(0) != Entry.first)
6440         continue;
6441       bool inserted = false;
6442       for (auto &Pt : CurPts) {
6443         if (getDT(F).dominates(Inst, Pt)) {
6444           replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
6445           RemovedInsts.insert(Pt);
6446           Pt->removeFromParent();
6447           Pt = Inst;
6448           inserted = true;
6449           Changed = true;
6450           break;
6451         }
6452         if (!getDT(F).dominates(Pt, Inst))
6453           // Give up if we need to merge in a common dominator as the
6454           // experiments show it is not profitable.
6455           continue;
6456         replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
6457         RemovedInsts.insert(Inst);
6458         Inst->removeFromParent();
6459         inserted = true;
6460         Changed = true;
6461         break;
6462       }
6463       if (!inserted)
6464         CurPts.push_back(Inst);
6465     }
6466   }
6467   return Changed;
6468 }
6469 
6470 // Splitting large data structures so that the GEPs accessing them can have
6471 // smaller offsets so that they can be sunk to the same blocks as their users.
6472 // For example, a large struct starting from %base is split into two parts
6473 // where the second part starts from %new_base.
6474 //
6475 // Before:
6476 // BB0:
6477 //   %base     =
6478 //
6479 // BB1:
6480 //   %gep0     = gep %base, off0
6481 //   %gep1     = gep %base, off1
6482 //   %gep2     = gep %base, off2
6483 //
6484 // BB2:
6485 //   %load1    = load %gep0
6486 //   %load2    = load %gep1
6487 //   %load3    = load %gep2
6488 //
6489 // After:
6490 // BB0:
6491 //   %base     =
6492 //   %new_base = gep %base, off0
6493 //
6494 // BB1:
6495 //   %new_gep0 = %new_base
6496 //   %new_gep1 = gep %new_base, off1 - off0
6497 //   %new_gep2 = gep %new_base, off2 - off0
6498 //
6499 // BB2:
6500 //   %load1    = load i32, i32* %new_gep0
6501 //   %load2    = load i32, i32* %new_gep1
6502 //   %load3    = load i32, i32* %new_gep2
6503 //
6504 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6505 // their offsets are smaller enough to fit into the addressing mode.
6506 bool CodeGenPrepare::splitLargeGEPOffsets() {
6507   bool Changed = false;
6508   for (auto &Entry : LargeOffsetGEPMap) {
6509     Value *OldBase = Entry.first;
6510     SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6511         &LargeOffsetGEPs = Entry.second;
6512     auto compareGEPOffset =
6513         [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6514             const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6515           if (LHS.first == RHS.first)
6516             return false;
6517           if (LHS.second != RHS.second)
6518             return LHS.second < RHS.second;
6519           return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6520         };
6521     // Sorting all the GEPs of the same data structures based on the offsets.
6522     llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6523     LargeOffsetGEPs.erase(llvm::unique(LargeOffsetGEPs), LargeOffsetGEPs.end());
6524     // Skip if all the GEPs have the same offsets.
6525     if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6526       continue;
6527     GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6528     int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6529     Value *NewBaseGEP = nullptr;
6530 
6531     auto createNewBase = [&](int64_t BaseOffset, Value *OldBase,
6532                              GetElementPtrInst *GEP) {
6533       LLVMContext &Ctx = GEP->getContext();
6534       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6535       Type *I8PtrTy =
6536           PointerType::get(Ctx, GEP->getType()->getPointerAddressSpace());
6537 
6538       BasicBlock::iterator NewBaseInsertPt;
6539       BasicBlock *NewBaseInsertBB;
6540       if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6541         // If the base of the struct is an instruction, the new base will be
6542         // inserted close to it.
6543         NewBaseInsertBB = BaseI->getParent();
6544         if (isa<PHINode>(BaseI))
6545           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6546         else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6547           NewBaseInsertBB =
6548               SplitEdge(NewBaseInsertBB, Invoke->getNormalDest(), DT.get(), LI);
6549           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6550         } else
6551           NewBaseInsertPt = std::next(BaseI->getIterator());
6552       } else {
6553         // If the current base is an argument or global value, the new base
6554         // will be inserted to the entry block.
6555         NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6556         NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6557       }
6558       IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6559       // Create a new base.
6560       Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
6561       NewBaseGEP = OldBase;
6562       if (NewBaseGEP->getType() != I8PtrTy)
6563         NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6564       NewBaseGEP =
6565           NewBaseBuilder.CreatePtrAdd(NewBaseGEP, BaseIndex, "splitgep");
6566       NewGEPBases.insert(NewBaseGEP);
6567       return;
6568     };
6569 
6570     // Check whether all the offsets can be encoded with prefered common base.
6571     if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset(
6572             LargeOffsetGEPs.front().second, LargeOffsetGEPs.back().second)) {
6573       BaseOffset = PreferBase;
6574       // Create a new base if the offset of the BaseGEP can be decoded with one
6575       // instruction.
6576       createNewBase(BaseOffset, OldBase, BaseGEP);
6577     }
6578 
6579     auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6580     while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6581       GetElementPtrInst *GEP = LargeOffsetGEP->first;
6582       int64_t Offset = LargeOffsetGEP->second;
6583       if (Offset != BaseOffset) {
6584         TargetLowering::AddrMode AddrMode;
6585         AddrMode.HasBaseReg = true;
6586         AddrMode.BaseOffs = Offset - BaseOffset;
6587         // The result type of the GEP might not be the type of the memory
6588         // access.
6589         if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6590                                         GEP->getResultElementType(),
6591                                         GEP->getAddressSpace())) {
6592           // We need to create a new base if the offset to the current base is
6593           // too large to fit into the addressing mode. So, a very large struct
6594           // may be split into several parts.
6595           BaseGEP = GEP;
6596           BaseOffset = Offset;
6597           NewBaseGEP = nullptr;
6598         }
6599       }
6600 
6601       // Generate a new GEP to replace the current one.
6602       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6603 
6604       if (!NewBaseGEP) {
6605         // Create a new base if we don't have one yet.  Find the insertion
6606         // pointer for the new base first.
6607         createNewBase(BaseOffset, OldBase, GEP);
6608       }
6609 
6610       IRBuilder<> Builder(GEP);
6611       Value *NewGEP = NewBaseGEP;
6612       if (Offset != BaseOffset) {
6613         // Calculate the new offset for the new GEP.
6614         Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset);
6615         NewGEP = Builder.CreatePtrAdd(NewBaseGEP, Index);
6616       }
6617       replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6618       LargeOffsetGEPID.erase(GEP);
6619       LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6620       GEP->eraseFromParent();
6621       Changed = true;
6622     }
6623   }
6624   return Changed;
6625 }
6626 
6627 bool CodeGenPrepare::optimizePhiType(
6628     PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6629     SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6630   // We are looking for a collection on interconnected phi nodes that together
6631   // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6632   // are of the same type. Convert the whole set of nodes to the type of the
6633   // bitcast.
6634   Type *PhiTy = I->getType();
6635   Type *ConvertTy = nullptr;
6636   if (Visited.count(I) ||
6637       (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6638     return false;
6639 
6640   SmallVector<Instruction *, 4> Worklist;
6641   Worklist.push_back(cast<Instruction>(I));
6642   SmallPtrSet<PHINode *, 4> PhiNodes;
6643   SmallPtrSet<ConstantData *, 4> Constants;
6644   PhiNodes.insert(I);
6645   Visited.insert(I);
6646   SmallPtrSet<Instruction *, 4> Defs;
6647   SmallPtrSet<Instruction *, 4> Uses;
6648   // This works by adding extra bitcasts between load/stores and removing
6649   // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6650   // we can get in the situation where we remove a bitcast in one iteration
6651   // just to add it again in the next. We need to ensure that at least one
6652   // bitcast we remove are anchored to something that will not change back.
6653   bool AnyAnchored = false;
6654 
6655   while (!Worklist.empty()) {
6656     Instruction *II = Worklist.pop_back_val();
6657 
6658     if (auto *Phi = dyn_cast<PHINode>(II)) {
6659       // Handle Defs, which might also be PHI's
6660       for (Value *V : Phi->incoming_values()) {
6661         if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6662           if (!PhiNodes.count(OpPhi)) {
6663             if (!Visited.insert(OpPhi).second)
6664               return false;
6665             PhiNodes.insert(OpPhi);
6666             Worklist.push_back(OpPhi);
6667           }
6668         } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6669           if (!OpLoad->isSimple())
6670             return false;
6671           if (Defs.insert(OpLoad).second)
6672             Worklist.push_back(OpLoad);
6673         } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6674           if (Defs.insert(OpEx).second)
6675             Worklist.push_back(OpEx);
6676         } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6677           if (!ConvertTy)
6678             ConvertTy = OpBC->getOperand(0)->getType();
6679           if (OpBC->getOperand(0)->getType() != ConvertTy)
6680             return false;
6681           if (Defs.insert(OpBC).second) {
6682             Worklist.push_back(OpBC);
6683             AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
6684                            !isa<ExtractElementInst>(OpBC->getOperand(0));
6685           }
6686         } else if (auto *OpC = dyn_cast<ConstantData>(V))
6687           Constants.insert(OpC);
6688         else
6689           return false;
6690       }
6691     }
6692 
6693     // Handle uses which might also be phi's
6694     for (User *V : II->users()) {
6695       if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6696         if (!PhiNodes.count(OpPhi)) {
6697           if (Visited.count(OpPhi))
6698             return false;
6699           PhiNodes.insert(OpPhi);
6700           Visited.insert(OpPhi);
6701           Worklist.push_back(OpPhi);
6702         }
6703       } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
6704         if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
6705           return false;
6706         Uses.insert(OpStore);
6707       } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6708         if (!ConvertTy)
6709           ConvertTy = OpBC->getType();
6710         if (OpBC->getType() != ConvertTy)
6711           return false;
6712         Uses.insert(OpBC);
6713         AnyAnchored |=
6714             any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
6715       } else {
6716         return false;
6717       }
6718     }
6719   }
6720 
6721   if (!ConvertTy || !AnyAnchored ||
6722       !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
6723     return false;
6724 
6725   LLVM_DEBUG(dbgs() << "Converting " << *I << "\n  and connected nodes to "
6726                     << *ConvertTy << "\n");
6727 
6728   // Create all the new phi nodes of the new type, and bitcast any loads to the
6729   // correct type.
6730   ValueToValueMap ValMap;
6731   for (ConstantData *C : Constants)
6732     ValMap[C] = ConstantExpr::getBitCast(C, ConvertTy);
6733   for (Instruction *D : Defs) {
6734     if (isa<BitCastInst>(D)) {
6735       ValMap[D] = D->getOperand(0);
6736       DeletedInstrs.insert(D);
6737     } else {
6738       BasicBlock::iterator insertPt = std::next(D->getIterator());
6739       ValMap[D] = new BitCastInst(D, ConvertTy, D->getName() + ".bc", insertPt);
6740     }
6741   }
6742   for (PHINode *Phi : PhiNodes)
6743     ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
6744                                   Phi->getName() + ".tc", Phi->getIterator());
6745   // Pipe together all the PhiNodes.
6746   for (PHINode *Phi : PhiNodes) {
6747     PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
6748     for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
6749       NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
6750                           Phi->getIncomingBlock(i));
6751     Visited.insert(NewPhi);
6752   }
6753   // And finally pipe up the stores and bitcasts
6754   for (Instruction *U : Uses) {
6755     if (isa<BitCastInst>(U)) {
6756       DeletedInstrs.insert(U);
6757       replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
6758     } else {
6759       U->setOperand(0, new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc",
6760                                        U->getIterator()));
6761     }
6762   }
6763 
6764   // Save the removed phis to be deleted later.
6765   for (PHINode *Phi : PhiNodes)
6766     DeletedInstrs.insert(Phi);
6767   return true;
6768 }
6769 
6770 bool CodeGenPrepare::optimizePhiTypes(Function &F) {
6771   if (!OptimizePhiTypes)
6772     return false;
6773 
6774   bool Changed = false;
6775   SmallPtrSet<PHINode *, 4> Visited;
6776   SmallPtrSet<Instruction *, 4> DeletedInstrs;
6777 
6778   // Attempt to optimize all the phis in the functions to the correct type.
6779   for (auto &BB : F)
6780     for (auto &Phi : BB.phis())
6781       Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
6782 
6783   // Remove any old phi's that have been converted.
6784   for (auto *I : DeletedInstrs) {
6785     replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
6786     I->eraseFromParent();
6787   }
6788 
6789   return Changed;
6790 }
6791 
6792 /// Return true, if an ext(load) can be formed from an extension in
6793 /// \p MovedExts.
6794 bool CodeGenPrepare::canFormExtLd(
6795     const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
6796     Instruction *&Inst, bool HasPromoted) {
6797   for (auto *MovedExtInst : MovedExts) {
6798     if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
6799       LI = cast<LoadInst>(MovedExtInst->getOperand(0));
6800       Inst = MovedExtInst;
6801       break;
6802     }
6803   }
6804   if (!LI)
6805     return false;
6806 
6807   // If they're already in the same block, there's nothing to do.
6808   // Make the cheap checks first if we did not promote.
6809   // If we promoted, we need to check if it is indeed profitable.
6810   if (!HasPromoted && LI->getParent() == Inst->getParent())
6811     return false;
6812 
6813   return TLI->isExtLoad(LI, Inst, *DL);
6814 }
6815 
6816 /// Move a zext or sext fed by a load into the same basic block as the load,
6817 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6818 /// extend into the load.
6819 ///
6820 /// E.g.,
6821 /// \code
6822 /// %ld = load i32* %addr
6823 /// %add = add nuw i32 %ld, 4
6824 /// %zext = zext i32 %add to i64
6825 // \endcode
6826 /// =>
6827 /// \code
6828 /// %ld = load i32* %addr
6829 /// %zext = zext i32 %ld to i64
6830 /// %add = add nuw i64 %zext, 4
6831 /// \encode
6832 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6833 /// allow us to match zext(load i32*) to i64.
6834 ///
6835 /// Also, try to promote the computations used to obtain a sign extended
6836 /// value used into memory accesses.
6837 /// E.g.,
6838 /// \code
6839 /// a = add nsw i32 b, 3
6840 /// d = sext i32 a to i64
6841 /// e = getelementptr ..., i64 d
6842 /// \endcode
6843 /// =>
6844 /// \code
6845 /// f = sext i32 b to i64
6846 /// a = add nsw i64 f, 3
6847 /// e = getelementptr ..., i64 a
6848 /// \endcode
6849 ///
6850 /// \p Inst[in/out] the extension may be modified during the process if some
6851 /// promotions apply.
6852 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6853   bool AllowPromotionWithoutCommonHeader = false;
6854   /// See if it is an interesting sext operations for the address type
6855   /// promotion before trying to promote it, e.g., the ones with the right
6856   /// type and used in memory accesses.
6857   bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6858       *Inst, AllowPromotionWithoutCommonHeader);
6859   TypePromotionTransaction TPT(RemovedInsts);
6860   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6861       TPT.getRestorationPoint();
6862   SmallVector<Instruction *, 1> Exts;
6863   SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6864   Exts.push_back(Inst);
6865 
6866   bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6867 
6868   // Look for a load being extended.
6869   LoadInst *LI = nullptr;
6870   Instruction *ExtFedByLoad;
6871 
6872   // Try to promote a chain of computation if it allows to form an extended
6873   // load.
6874   if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6875     assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6876     TPT.commit();
6877     // Move the extend into the same block as the load.
6878     ExtFedByLoad->moveAfter(LI);
6879     ++NumExtsMoved;
6880     Inst = ExtFedByLoad;
6881     return true;
6882   }
6883 
6884   // Continue promoting SExts if known as considerable depending on targets.
6885   if (ATPConsiderable &&
6886       performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6887                                   HasPromoted, TPT, SpeculativelyMovedExts))
6888     return true;
6889 
6890   TPT.rollback(LastKnownGood);
6891   return false;
6892 }
6893 
6894 // Perform address type promotion if doing so is profitable.
6895 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6896 // instructions that sign extended the same initial value. However, if
6897 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6898 // extension is just profitable.
6899 bool CodeGenPrepare::performAddressTypePromotion(
6900     Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6901     bool HasPromoted, TypePromotionTransaction &TPT,
6902     SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6903   bool Promoted = false;
6904   SmallPtrSet<Instruction *, 1> UnhandledExts;
6905   bool AllSeenFirst = true;
6906   for (auto *I : SpeculativelyMovedExts) {
6907     Value *HeadOfChain = I->getOperand(0);
6908     DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6909         SeenChainsForSExt.find(HeadOfChain);
6910     // If there is an unhandled SExt which has the same header, try to promote
6911     // it as well.
6912     if (AlreadySeen != SeenChainsForSExt.end()) {
6913       if (AlreadySeen->second != nullptr)
6914         UnhandledExts.insert(AlreadySeen->second);
6915       AllSeenFirst = false;
6916     }
6917   }
6918 
6919   if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6920                         SpeculativelyMovedExts.size() == 1)) {
6921     TPT.commit();
6922     if (HasPromoted)
6923       Promoted = true;
6924     for (auto *I : SpeculativelyMovedExts) {
6925       Value *HeadOfChain = I->getOperand(0);
6926       SeenChainsForSExt[HeadOfChain] = nullptr;
6927       ValToSExtendedUses[HeadOfChain].push_back(I);
6928     }
6929     // Update Inst as promotion happen.
6930     Inst = SpeculativelyMovedExts.pop_back_val();
6931   } else {
6932     // This is the first chain visited from the header, keep the current chain
6933     // as unhandled. Defer to promote this until we encounter another SExt
6934     // chain derived from the same header.
6935     for (auto *I : SpeculativelyMovedExts) {
6936       Value *HeadOfChain = I->getOperand(0);
6937       SeenChainsForSExt[HeadOfChain] = Inst;
6938     }
6939     return false;
6940   }
6941 
6942   if (!AllSeenFirst && !UnhandledExts.empty())
6943     for (auto *VisitedSExt : UnhandledExts) {
6944       if (RemovedInsts.count(VisitedSExt))
6945         continue;
6946       TypePromotionTransaction TPT(RemovedInsts);
6947       SmallVector<Instruction *, 1> Exts;
6948       SmallVector<Instruction *, 2> Chains;
6949       Exts.push_back(VisitedSExt);
6950       bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6951       TPT.commit();
6952       if (HasPromoted)
6953         Promoted = true;
6954       for (auto *I : Chains) {
6955         Value *HeadOfChain = I->getOperand(0);
6956         // Mark this as handled.
6957         SeenChainsForSExt[HeadOfChain] = nullptr;
6958         ValToSExtendedUses[HeadOfChain].push_back(I);
6959       }
6960     }
6961   return Promoted;
6962 }
6963 
6964 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6965   BasicBlock *DefBB = I->getParent();
6966 
6967   // If the result of a {s|z}ext and its source are both live out, rewrite all
6968   // other uses of the source with result of extension.
6969   Value *Src = I->getOperand(0);
6970   if (Src->hasOneUse())
6971     return false;
6972 
6973   // Only do this xform if truncating is free.
6974   if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6975     return false;
6976 
6977   // Only safe to perform the optimization if the source is also defined in
6978   // this block.
6979   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6980     return false;
6981 
6982   bool DefIsLiveOut = false;
6983   for (User *U : I->users()) {
6984     Instruction *UI = cast<Instruction>(U);
6985 
6986     // Figure out which BB this ext is used in.
6987     BasicBlock *UserBB = UI->getParent();
6988     if (UserBB == DefBB)
6989       continue;
6990     DefIsLiveOut = true;
6991     break;
6992   }
6993   if (!DefIsLiveOut)
6994     return false;
6995 
6996   // Make sure none of the uses are PHI nodes.
6997   for (User *U : Src->users()) {
6998     Instruction *UI = cast<Instruction>(U);
6999     BasicBlock *UserBB = UI->getParent();
7000     if (UserBB == DefBB)
7001       continue;
7002     // Be conservative. We don't want this xform to end up introducing
7003     // reloads just before load / store instructions.
7004     if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
7005       return false;
7006   }
7007 
7008   // InsertedTruncs - Only insert one trunc in each block once.
7009   DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
7010 
7011   bool MadeChange = false;
7012   for (Use &U : Src->uses()) {
7013     Instruction *User = cast<Instruction>(U.getUser());
7014 
7015     // Figure out which BB this ext is used in.
7016     BasicBlock *UserBB = User->getParent();
7017     if (UserBB == DefBB)
7018       continue;
7019 
7020     // Both src and def are live in this block. Rewrite the use.
7021     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
7022 
7023     if (!InsertedTrunc) {
7024       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
7025       assert(InsertPt != UserBB->end());
7026       InsertedTrunc = new TruncInst(I, Src->getType(), "");
7027       InsertedTrunc->insertBefore(*UserBB, InsertPt);
7028       InsertedInsts.insert(InsertedTrunc);
7029     }
7030 
7031     // Replace a use of the {s|z}ext source with a use of the result.
7032     U = InsertedTrunc;
7033     ++NumExtUses;
7034     MadeChange = true;
7035   }
7036 
7037   return MadeChange;
7038 }
7039 
7040 // Find loads whose uses only use some of the loaded value's bits.  Add an "and"
7041 // just after the load if the target can fold this into one extload instruction,
7042 // with the hope of eliminating some of the other later "and" instructions using
7043 // the loaded value.  "and"s that are made trivially redundant by the insertion
7044 // of the new "and" are removed by this function, while others (e.g. those whose
7045 // path from the load goes through a phi) are left for isel to potentially
7046 // remove.
7047 //
7048 // For example:
7049 //
7050 // b0:
7051 //   x = load i32
7052 //   ...
7053 // b1:
7054 //   y = and x, 0xff
7055 //   z = use y
7056 //
7057 // becomes:
7058 //
7059 // b0:
7060 //   x = load i32
7061 //   x' = and x, 0xff
7062 //   ...
7063 // b1:
7064 //   z = use x'
7065 //
7066 // whereas:
7067 //
7068 // b0:
7069 //   x1 = load i32
7070 //   ...
7071 // b1:
7072 //   x2 = load i32
7073 //   ...
7074 // b2:
7075 //   x = phi x1, x2
7076 //   y = and x, 0xff
7077 //
7078 // becomes (after a call to optimizeLoadExt for each load):
7079 //
7080 // b0:
7081 //   x1 = load i32
7082 //   x1' = and x1, 0xff
7083 //   ...
7084 // b1:
7085 //   x2 = load i32
7086 //   x2' = and x2, 0xff
7087 //   ...
7088 // b2:
7089 //   x = phi x1', x2'
7090 //   y = and x, 0xff
7091 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
7092   if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
7093     return false;
7094 
7095   // Skip loads we've already transformed.
7096   if (Load->hasOneUse() &&
7097       InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
7098     return false;
7099 
7100   // Look at all uses of Load, looking through phis, to determine how many bits
7101   // of the loaded value are needed.
7102   SmallVector<Instruction *, 8> WorkList;
7103   SmallPtrSet<Instruction *, 16> Visited;
7104   SmallVector<Instruction *, 8> AndsToMaybeRemove;
7105   for (auto *U : Load->users())
7106     WorkList.push_back(cast<Instruction>(U));
7107 
7108   EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
7109   unsigned BitWidth = LoadResultVT.getSizeInBits();
7110   // If the BitWidth is 0, do not try to optimize the type
7111   if (BitWidth == 0)
7112     return false;
7113 
7114   APInt DemandBits(BitWidth, 0);
7115   APInt WidestAndBits(BitWidth, 0);
7116 
7117   while (!WorkList.empty()) {
7118     Instruction *I = WorkList.pop_back_val();
7119 
7120     // Break use-def graph loops.
7121     if (!Visited.insert(I).second)
7122       continue;
7123 
7124     // For a PHI node, push all of its users.
7125     if (auto *Phi = dyn_cast<PHINode>(I)) {
7126       for (auto *U : Phi->users())
7127         WorkList.push_back(cast<Instruction>(U));
7128       continue;
7129     }
7130 
7131     switch (I->getOpcode()) {
7132     case Instruction::And: {
7133       auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
7134       if (!AndC)
7135         return false;
7136       APInt AndBits = AndC->getValue();
7137       DemandBits |= AndBits;
7138       // Keep track of the widest and mask we see.
7139       if (AndBits.ugt(WidestAndBits))
7140         WidestAndBits = AndBits;
7141       if (AndBits == WidestAndBits && I->getOperand(0) == Load)
7142         AndsToMaybeRemove.push_back(I);
7143       break;
7144     }
7145 
7146     case Instruction::Shl: {
7147       auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
7148       if (!ShlC)
7149         return false;
7150       uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
7151       DemandBits.setLowBits(BitWidth - ShiftAmt);
7152       break;
7153     }
7154 
7155     case Instruction::Trunc: {
7156       EVT TruncVT = TLI->getValueType(*DL, I->getType());
7157       unsigned TruncBitWidth = TruncVT.getSizeInBits();
7158       DemandBits.setLowBits(TruncBitWidth);
7159       break;
7160     }
7161 
7162     default:
7163       return false;
7164     }
7165   }
7166 
7167   uint32_t ActiveBits = DemandBits.getActiveBits();
7168   // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
7169   // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
7170   // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
7171   // (and (load x) 1) is not matched as a single instruction, rather as a LDR
7172   // followed by an AND.
7173   // TODO: Look into removing this restriction by fixing backends to either
7174   // return false for isLoadExtLegal for i1 or have them select this pattern to
7175   // a single instruction.
7176   //
7177   // Also avoid hoisting if we didn't see any ands with the exact DemandBits
7178   // mask, since these are the only ands that will be removed by isel.
7179   if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
7180       WidestAndBits != DemandBits)
7181     return false;
7182 
7183   LLVMContext &Ctx = Load->getType()->getContext();
7184   Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
7185   EVT TruncVT = TLI->getValueType(*DL, TruncTy);
7186 
7187   // Reject cases that won't be matched as extloads.
7188   if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
7189       !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
7190     return false;
7191 
7192   IRBuilder<> Builder(Load->getNextNonDebugInstruction());
7193   auto *NewAnd = cast<Instruction>(
7194       Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
7195   // Mark this instruction as "inserted by CGP", so that other
7196   // optimizations don't touch it.
7197   InsertedInsts.insert(NewAnd);
7198 
7199   // Replace all uses of load with new and (except for the use of load in the
7200   // new and itself).
7201   replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
7202   NewAnd->setOperand(0, Load);
7203 
7204   // Remove any and instructions that are now redundant.
7205   for (auto *And : AndsToMaybeRemove)
7206     // Check that the and mask is the same as the one we decided to put on the
7207     // new and.
7208     if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
7209       replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
7210       if (&*CurInstIterator == And)
7211         CurInstIterator = std::next(And->getIterator());
7212       And->eraseFromParent();
7213       ++NumAndUses;
7214     }
7215 
7216   ++NumAndsAdded;
7217   return true;
7218 }
7219 
7220 /// Check if V (an operand of a select instruction) is an expensive instruction
7221 /// that is only used once.
7222 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
7223   auto *I = dyn_cast<Instruction>(V);
7224   // If it's safe to speculatively execute, then it should not have side
7225   // effects; therefore, it's safe to sink and possibly *not* execute.
7226   return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
7227          TTI->isExpensiveToSpeculativelyExecute(I);
7228 }
7229 
7230 /// Returns true if a SelectInst should be turned into an explicit branch.
7231 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
7232                                                 const TargetLowering *TLI,
7233                                                 SelectInst *SI) {
7234   // If even a predictable select is cheap, then a branch can't be cheaper.
7235   if (!TLI->isPredictableSelectExpensive())
7236     return false;
7237 
7238   // FIXME: This should use the same heuristics as IfConversion to determine
7239   // whether a select is better represented as a branch.
7240 
7241   // If metadata tells us that the select condition is obviously predictable,
7242   // then we want to replace the select with a branch.
7243   uint64_t TrueWeight, FalseWeight;
7244   if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
7245     uint64_t Max = std::max(TrueWeight, FalseWeight);
7246     uint64_t Sum = TrueWeight + FalseWeight;
7247     if (Sum != 0) {
7248       auto Probability = BranchProbability::getBranchProbability(Max, Sum);
7249       if (Probability > TTI->getPredictableBranchThreshold())
7250         return true;
7251     }
7252   }
7253 
7254   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
7255 
7256   // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7257   // comparison condition. If the compare has more than one use, there's
7258   // probably another cmov or setcc around, so it's not worth emitting a branch.
7259   if (!Cmp || !Cmp->hasOneUse())
7260     return false;
7261 
7262   // If either operand of the select is expensive and only needed on one side
7263   // of the select, we should form a branch.
7264   if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
7265       sinkSelectOperand(TTI, SI->getFalseValue()))
7266     return true;
7267 
7268   return false;
7269 }
7270 
7271 /// If \p isTrue is true, return the true value of \p SI, otherwise return
7272 /// false value of \p SI. If the true/false value of \p SI is defined by any
7273 /// select instructions in \p Selects, look through the defining select
7274 /// instruction until the true/false value is not defined in \p Selects.
7275 static Value *
7276 getTrueOrFalseValue(SelectInst *SI, bool isTrue,
7277                     const SmallPtrSet<const Instruction *, 2> &Selects) {
7278   Value *V = nullptr;
7279 
7280   for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
7281        DefSI = dyn_cast<SelectInst>(V)) {
7282     assert(DefSI->getCondition() == SI->getCondition() &&
7283            "The condition of DefSI does not match with SI");
7284     V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
7285   }
7286 
7287   assert(V && "Failed to get select true/false value");
7288   return V;
7289 }
7290 
7291 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
7292   assert(Shift->isShift() && "Expected a shift");
7293 
7294   // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7295   // general vector shifts, and (3) the shift amount is a select-of-splatted
7296   // values, hoist the shifts before the select:
7297   //   shift Op0, (select Cond, TVal, FVal) -->
7298   //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
7299   //
7300   // This is inverting a generic IR transform when we know that the cost of a
7301   // general vector shift is more than the cost of 2 shift-by-scalars.
7302   // We can't do this effectively in SDAG because we may not be able to
7303   // determine if the select operands are splats from within a basic block.
7304   Type *Ty = Shift->getType();
7305   if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7306     return false;
7307   Value *Cond, *TVal, *FVal;
7308   if (!match(Shift->getOperand(1),
7309              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7310     return false;
7311   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7312     return false;
7313 
7314   IRBuilder<> Builder(Shift);
7315   BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
7316   Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
7317   Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
7318   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7319   replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
7320   Shift->eraseFromParent();
7321   return true;
7322 }
7323 
7324 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
7325   Intrinsic::ID Opcode = Fsh->getIntrinsicID();
7326   assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
7327          "Expected a funnel shift");
7328 
7329   // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7330   // than general vector shifts, and (3) the shift amount is select-of-splatted
7331   // values, hoist the funnel shifts before the select:
7332   //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
7333   //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7334   //
7335   // This is inverting a generic IR transform when we know that the cost of a
7336   // general vector shift is more than the cost of 2 shift-by-scalars.
7337   // We can't do this effectively in SDAG because we may not be able to
7338   // determine if the select operands are splats from within a basic block.
7339   Type *Ty = Fsh->getType();
7340   if (!Ty->isVectorTy() || !TTI->isVectorShiftByScalarCheap(Ty))
7341     return false;
7342   Value *Cond, *TVal, *FVal;
7343   if (!match(Fsh->getOperand(2),
7344              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7345     return false;
7346   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7347     return false;
7348 
7349   IRBuilder<> Builder(Fsh);
7350   Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
7351   Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
7352   Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
7353   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7354   replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
7355   Fsh->eraseFromParent();
7356   return true;
7357 }
7358 
7359 /// If we have a SelectInst that will likely profit from branch prediction,
7360 /// turn it into a branch.
7361 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
7362   if (DisableSelectToBranch)
7363     return false;
7364 
7365   // If the SelectOptimize pass is enabled, selects have already been optimized.
7366   if (!getCGPassBuilderOption().DisableSelectOptimize)
7367     return false;
7368 
7369   // Find all consecutive select instructions that share the same condition.
7370   SmallVector<SelectInst *, 2> ASI;
7371   ASI.push_back(SI);
7372   for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
7373        It != SI->getParent()->end(); ++It) {
7374     SelectInst *I = dyn_cast<SelectInst>(&*It);
7375     if (I && SI->getCondition() == I->getCondition()) {
7376       ASI.push_back(I);
7377     } else {
7378       break;
7379     }
7380   }
7381 
7382   SelectInst *LastSI = ASI.back();
7383   // Increment the current iterator to skip all the rest of select instructions
7384   // because they will be either "not lowered" or "all lowered" to branch.
7385   CurInstIterator = std::next(LastSI->getIterator());
7386   // Examine debug-info attached to the consecutive select instructions. They
7387   // won't be individually optimised by optimizeInst, so we need to perform
7388   // DbgVariableRecord maintenence here instead.
7389   for (SelectInst *SI : ArrayRef(ASI).drop_front())
7390     fixupDbgVariableRecordsOnInst(*SI);
7391 
7392   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
7393 
7394   // Can we convert the 'select' to CF ?
7395   if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
7396     return false;
7397 
7398   TargetLowering::SelectSupportKind SelectKind;
7399   if (SI->getType()->isVectorTy())
7400     SelectKind = TargetLowering::ScalarCondVectorVal;
7401   else
7402     SelectKind = TargetLowering::ScalarValSelect;
7403 
7404   if (TLI->isSelectSupported(SelectKind) &&
7405       (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
7406        llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
7407     return false;
7408 
7409   // The DominatorTree needs to be rebuilt by any consumers after this
7410   // transformation. We simply reset here rather than setting the ModifiedDT
7411   // flag to avoid restarting the function walk in runOnFunction for each
7412   // select optimized.
7413   DT.reset();
7414 
7415   // Transform a sequence like this:
7416   //    start:
7417   //       %cmp = cmp uge i32 %a, %b
7418   //       %sel = select i1 %cmp, i32 %c, i32 %d
7419   //
7420   // Into:
7421   //    start:
7422   //       %cmp = cmp uge i32 %a, %b
7423   //       %cmp.frozen = freeze %cmp
7424   //       br i1 %cmp.frozen, label %select.true, label %select.false
7425   //    select.true:
7426   //       br label %select.end
7427   //    select.false:
7428   //       br label %select.end
7429   //    select.end:
7430   //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7431   //
7432   // %cmp should be frozen, otherwise it may introduce undefined behavior.
7433   // In addition, we may sink instructions that produce %c or %d from
7434   // the entry block into the destination(s) of the new branch.
7435   // If the true or false blocks do not contain a sunken instruction, that
7436   // block and its branch may be optimized away. In that case, one side of the
7437   // first branch will point directly to select.end, and the corresponding PHI
7438   // predecessor block will be the start block.
7439 
7440   // Collect values that go on the true side and the values that go on the false
7441   // side.
7442   SmallVector<Instruction *> TrueInstrs, FalseInstrs;
7443   for (SelectInst *SI : ASI) {
7444     if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V))
7445       TrueInstrs.push_back(cast<Instruction>(V));
7446     if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V))
7447       FalseInstrs.push_back(cast<Instruction>(V));
7448   }
7449 
7450   // Split the select block, according to how many (if any) values go on each
7451   // side.
7452   BasicBlock *StartBlock = SI->getParent();
7453   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(LastSI));
7454   // We should split before any debug-info.
7455   SplitPt.setHeadBit(true);
7456 
7457   IRBuilder<> IB(SI);
7458   auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
7459 
7460   BasicBlock *TrueBlock = nullptr;
7461   BasicBlock *FalseBlock = nullptr;
7462   BasicBlock *EndBlock = nullptr;
7463   BranchInst *TrueBranch = nullptr;
7464   BranchInst *FalseBranch = nullptr;
7465   if (TrueInstrs.size() == 0) {
7466     FalseBranch = cast<BranchInst>(SplitBlockAndInsertIfElse(
7467         CondFr, SplitPt, false, nullptr, nullptr, LI));
7468     FalseBlock = FalseBranch->getParent();
7469     EndBlock = cast<BasicBlock>(FalseBranch->getOperand(0));
7470   } else if (FalseInstrs.size() == 0) {
7471     TrueBranch = cast<BranchInst>(SplitBlockAndInsertIfThen(
7472         CondFr, SplitPt, false, nullptr, nullptr, LI));
7473     TrueBlock = TrueBranch->getParent();
7474     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7475   } else {
7476     Instruction *ThenTerm = nullptr;
7477     Instruction *ElseTerm = nullptr;
7478     SplitBlockAndInsertIfThenElse(CondFr, SplitPt, &ThenTerm, &ElseTerm,
7479                                   nullptr, nullptr, LI);
7480     TrueBranch = cast<BranchInst>(ThenTerm);
7481     FalseBranch = cast<BranchInst>(ElseTerm);
7482     TrueBlock = TrueBranch->getParent();
7483     FalseBlock = FalseBranch->getParent();
7484     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7485   }
7486 
7487   EndBlock->setName("select.end");
7488   if (TrueBlock)
7489     TrueBlock->setName("select.true.sink");
7490   if (FalseBlock)
7491     FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false"
7492                                                 : "select.false.sink");
7493 
7494   if (IsHugeFunc) {
7495     if (TrueBlock)
7496       FreshBBs.insert(TrueBlock);
7497     if (FalseBlock)
7498       FreshBBs.insert(FalseBlock);
7499     FreshBBs.insert(EndBlock);
7500   }
7501 
7502   BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
7503 
7504   static const unsigned MD[] = {
7505       LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
7506       LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
7507   StartBlock->getTerminator()->copyMetadata(*SI, MD);
7508 
7509   // Sink expensive instructions into the conditional blocks to avoid executing
7510   // them speculatively.
7511   for (Instruction *I : TrueInstrs)
7512     I->moveBefore(TrueBranch);
7513   for (Instruction *I : FalseInstrs)
7514     I->moveBefore(FalseBranch);
7515 
7516   // If we did not create a new block for one of the 'true' or 'false' paths
7517   // of the condition, it means that side of the branch goes to the end block
7518   // directly and the path originates from the start block from the point of
7519   // view of the new PHI.
7520   if (TrueBlock == nullptr)
7521     TrueBlock = StartBlock;
7522   else if (FalseBlock == nullptr)
7523     FalseBlock = StartBlock;
7524 
7525   SmallPtrSet<const Instruction *, 2> INS;
7526   INS.insert(ASI.begin(), ASI.end());
7527   // Use reverse iterator because later select may use the value of the
7528   // earlier select, and we need to propagate value through earlier select
7529   // to get the PHI operand.
7530   for (SelectInst *SI : llvm::reverse(ASI)) {
7531     // The select itself is replaced with a PHI Node.
7532     PHINode *PN = PHINode::Create(SI->getType(), 2, "");
7533     PN->insertBefore(EndBlock->begin());
7534     PN->takeName(SI);
7535     PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7536     PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7537     PN->setDebugLoc(SI->getDebugLoc());
7538 
7539     replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7540     SI->eraseFromParent();
7541     INS.erase(SI);
7542     ++NumSelectsExpanded;
7543   }
7544 
7545   // Instruct OptimizeBlock to skip to the next block.
7546   CurInstIterator = StartBlock->end();
7547   return true;
7548 }
7549 
7550 /// Some targets only accept certain types for splat inputs. For example a VDUP
7551 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7552 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7553 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7554   // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7555   if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7556                             m_Undef(), m_ZeroMask())))
7557     return false;
7558   Type *NewType = TLI->shouldConvertSplatType(SVI);
7559   if (!NewType)
7560     return false;
7561 
7562   auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7563   assert(!NewType->isVectorTy() && "Expected a scalar type!");
7564   assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7565          "Expected a type of the same size!");
7566   auto *NewVecType =
7567       FixedVectorType::get(NewType, SVIVecType->getNumElements());
7568 
7569   // Create a bitcast (shuffle (insert (bitcast(..))))
7570   IRBuilder<> Builder(SVI->getContext());
7571   Builder.SetInsertPoint(SVI);
7572   Value *BC1 = Builder.CreateBitCast(
7573       cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7574   Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7575   Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7576 
7577   replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7578   RecursivelyDeleteTriviallyDeadInstructions(
7579       SVI, TLInfo, nullptr,
7580       [&](Value *V) { removeAllAssertingVHReferences(V); });
7581 
7582   // Also hoist the bitcast up to its operand if it they are not in the same
7583   // block.
7584   if (auto *BCI = dyn_cast<Instruction>(BC1))
7585     if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7586       if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7587           !Op->isTerminator() && !Op->isEHPad())
7588         BCI->moveAfter(Op);
7589 
7590   return true;
7591 }
7592 
7593 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7594   // If the operands of I can be folded into a target instruction together with
7595   // I, duplicate and sink them.
7596   SmallVector<Use *, 4> OpsToSink;
7597   if (!TTI->isProfitableToSinkOperands(I, OpsToSink))
7598     return false;
7599 
7600   // OpsToSink can contain multiple uses in a use chain (e.g.
7601   // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7602   // uses must come first, so we process the ops in reverse order so as to not
7603   // create invalid IR.
7604   BasicBlock *TargetBB = I->getParent();
7605   bool Changed = false;
7606   SmallVector<Use *, 4> ToReplace;
7607   Instruction *InsertPoint = I;
7608   DenseMap<const Instruction *, unsigned long> InstOrdering;
7609   unsigned long InstNumber = 0;
7610   for (const auto &I : *TargetBB)
7611     InstOrdering[&I] = InstNumber++;
7612 
7613   for (Use *U : reverse(OpsToSink)) {
7614     auto *UI = cast<Instruction>(U->get());
7615     if (isa<PHINode>(UI))
7616       continue;
7617     if (UI->getParent() == TargetBB) {
7618       if (InstOrdering[UI] < InstOrdering[InsertPoint])
7619         InsertPoint = UI;
7620       continue;
7621     }
7622     ToReplace.push_back(U);
7623   }
7624 
7625   SetVector<Instruction *> MaybeDead;
7626   DenseMap<Instruction *, Instruction *> NewInstructions;
7627   for (Use *U : ToReplace) {
7628     auto *UI = cast<Instruction>(U->get());
7629     Instruction *NI = UI->clone();
7630 
7631     if (IsHugeFunc) {
7632       // Now we clone an instruction, its operands' defs may sink to this BB
7633       // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7634       for (Value *Op : NI->operands())
7635         if (auto *OpDef = dyn_cast<Instruction>(Op))
7636           FreshBBs.insert(OpDef->getParent());
7637     }
7638 
7639     NewInstructions[UI] = NI;
7640     MaybeDead.insert(UI);
7641     LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7642     NI->insertBefore(InsertPoint);
7643     InsertPoint = NI;
7644     InsertedInsts.insert(NI);
7645 
7646     // Update the use for the new instruction, making sure that we update the
7647     // sunk instruction uses, if it is part of a chain that has already been
7648     // sunk.
7649     Instruction *OldI = cast<Instruction>(U->getUser());
7650     if (NewInstructions.count(OldI))
7651       NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
7652     else
7653       U->set(NI);
7654     Changed = true;
7655   }
7656 
7657   // Remove instructions that are dead after sinking.
7658   for (auto *I : MaybeDead) {
7659     if (!I->hasNUsesOrMore(1)) {
7660       LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7661       I->eraseFromParent();
7662     }
7663   }
7664 
7665   return Changed;
7666 }
7667 
7668 bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7669   Value *Cond = SI->getCondition();
7670   Type *OldType = Cond->getType();
7671   LLVMContext &Context = Cond->getContext();
7672   EVT OldVT = TLI->getValueType(*DL, OldType);
7673   MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
7674   unsigned RegWidth = RegType.getSizeInBits();
7675 
7676   if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
7677     return false;
7678 
7679   // If the register width is greater than the type width, expand the condition
7680   // of the switch instruction and each case constant to the width of the
7681   // register. By widening the type of the switch condition, subsequent
7682   // comparisons (for case comparisons) will not need to be extended to the
7683   // preferred register width, so we will potentially eliminate N-1 extends,
7684   // where N is the number of cases in the switch.
7685   auto *NewType = Type::getIntNTy(Context, RegWidth);
7686 
7687   // Extend the switch condition and case constants using the target preferred
7688   // extend unless the switch condition is a function argument with an extend
7689   // attribute. In that case, we can avoid an unnecessary mask/extension by
7690   // matching the argument extension instead.
7691   Instruction::CastOps ExtType = Instruction::ZExt;
7692   // Some targets prefer SExt over ZExt.
7693   if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
7694     ExtType = Instruction::SExt;
7695 
7696   if (auto *Arg = dyn_cast<Argument>(Cond)) {
7697     if (Arg->hasSExtAttr())
7698       ExtType = Instruction::SExt;
7699     if (Arg->hasZExtAttr())
7700       ExtType = Instruction::ZExt;
7701   }
7702 
7703   auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
7704   ExtInst->insertBefore(SI);
7705   ExtInst->setDebugLoc(SI->getDebugLoc());
7706   SI->setCondition(ExtInst);
7707   for (auto Case : SI->cases()) {
7708     const APInt &NarrowConst = Case.getCaseValue()->getValue();
7709     APInt WideConst = (ExtType == Instruction::ZExt)
7710                           ? NarrowConst.zext(RegWidth)
7711                           : NarrowConst.sext(RegWidth);
7712     Case.setValue(ConstantInt::get(Context, WideConst));
7713   }
7714 
7715   return true;
7716 }
7717 
7718 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
7719   // The SCCP optimization tends to produce code like this:
7720   //   switch(x) { case 42: phi(42, ...) }
7721   // Materializing the constant for the phi-argument needs instructions; So we
7722   // change the code to:
7723   //   switch(x) { case 42: phi(x, ...) }
7724 
7725   Value *Condition = SI->getCondition();
7726   // Avoid endless loop in degenerate case.
7727   if (isa<ConstantInt>(*Condition))
7728     return false;
7729 
7730   bool Changed = false;
7731   BasicBlock *SwitchBB = SI->getParent();
7732   Type *ConditionType = Condition->getType();
7733 
7734   for (const SwitchInst::CaseHandle &Case : SI->cases()) {
7735     ConstantInt *CaseValue = Case.getCaseValue();
7736     BasicBlock *CaseBB = Case.getCaseSuccessor();
7737     // Set to true if we previously checked that `CaseBB` is only reached by
7738     // a single case from this switch.
7739     bool CheckedForSinglePred = false;
7740     for (PHINode &PHI : CaseBB->phis()) {
7741       Type *PHIType = PHI.getType();
7742       // If ZExt is free then we can also catch patterns like this:
7743       //   switch((i32)x) { case 42: phi((i64)42, ...); }
7744       // and replace `(i64)42` with `zext i32 %x to i64`.
7745       bool TryZExt =
7746           PHIType->isIntegerTy() &&
7747           PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
7748           TLI->isZExtFree(ConditionType, PHIType);
7749       if (PHIType == ConditionType || TryZExt) {
7750         // Set to true to skip this case because of multiple preds.
7751         bool SkipCase = false;
7752         Value *Replacement = nullptr;
7753         for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
7754           Value *PHIValue = PHI.getIncomingValue(I);
7755           if (PHIValue != CaseValue) {
7756             if (!TryZExt)
7757               continue;
7758             ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
7759             if (!PHIValueInt ||
7760                 PHIValueInt->getValue() !=
7761                     CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
7762               continue;
7763           }
7764           if (PHI.getIncomingBlock(I) != SwitchBB)
7765             continue;
7766           // We cannot optimize if there are multiple case labels jumping to
7767           // this block.  This check may get expensive when there are many
7768           // case labels so we test for it last.
7769           if (!CheckedForSinglePred) {
7770             CheckedForSinglePred = true;
7771             if (SI->findCaseDest(CaseBB) == nullptr) {
7772               SkipCase = true;
7773               break;
7774             }
7775           }
7776 
7777           if (Replacement == nullptr) {
7778             if (PHIValue == CaseValue) {
7779               Replacement = Condition;
7780             } else {
7781               IRBuilder<> Builder(SI);
7782               Replacement = Builder.CreateZExt(Condition, PHIType);
7783             }
7784           }
7785           PHI.setIncomingValue(I, Replacement);
7786           Changed = true;
7787         }
7788         if (SkipCase)
7789           break;
7790       }
7791     }
7792   }
7793   return Changed;
7794 }
7795 
7796 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
7797   bool Changed = optimizeSwitchType(SI);
7798   Changed |= optimizeSwitchPhiConstants(SI);
7799   return Changed;
7800 }
7801 
7802 namespace {
7803 
7804 /// Helper class to promote a scalar operation to a vector one.
7805 /// This class is used to move downward extractelement transition.
7806 /// E.g.,
7807 /// a = vector_op <2 x i32>
7808 /// b = extractelement <2 x i32> a, i32 0
7809 /// c = scalar_op b
7810 /// store c
7811 ///
7812 /// =>
7813 /// a = vector_op <2 x i32>
7814 /// c = vector_op a (equivalent to scalar_op on the related lane)
7815 /// * d = extractelement <2 x i32> c, i32 0
7816 /// * store d
7817 /// Assuming both extractelement and store can be combine, we get rid of the
7818 /// transition.
7819 class VectorPromoteHelper {
7820   /// DataLayout associated with the current module.
7821   const DataLayout &DL;
7822 
7823   /// Used to perform some checks on the legality of vector operations.
7824   const TargetLowering &TLI;
7825 
7826   /// Used to estimated the cost of the promoted chain.
7827   const TargetTransformInfo &TTI;
7828 
7829   /// The transition being moved downwards.
7830   Instruction *Transition;
7831 
7832   /// The sequence of instructions to be promoted.
7833   SmallVector<Instruction *, 4> InstsToBePromoted;
7834 
7835   /// Cost of combining a store and an extract.
7836   unsigned StoreExtractCombineCost;
7837 
7838   /// Instruction that will be combined with the transition.
7839   Instruction *CombineInst = nullptr;
7840 
7841   /// The instruction that represents the current end of the transition.
7842   /// Since we are faking the promotion until we reach the end of the chain
7843   /// of computation, we need a way to get the current end of the transition.
7844   Instruction *getEndOfTransition() const {
7845     if (InstsToBePromoted.empty())
7846       return Transition;
7847     return InstsToBePromoted.back();
7848   }
7849 
7850   /// Return the index of the original value in the transition.
7851   /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7852   /// c, is at index 0.
7853   unsigned getTransitionOriginalValueIdx() const {
7854     assert(isa<ExtractElementInst>(Transition) &&
7855            "Other kind of transitions are not supported yet");
7856     return 0;
7857   }
7858 
7859   /// Return the index of the index in the transition.
7860   /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7861   /// is at index 1.
7862   unsigned getTransitionIdx() const {
7863     assert(isa<ExtractElementInst>(Transition) &&
7864            "Other kind of transitions are not supported yet");
7865     return 1;
7866   }
7867 
7868   /// Get the type of the transition.
7869   /// This is the type of the original value.
7870   /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7871   /// transition is <2 x i32>.
7872   Type *getTransitionType() const {
7873     return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
7874   }
7875 
7876   /// Promote \p ToBePromoted by moving \p Def downward through.
7877   /// I.e., we have the following sequence:
7878   /// Def = Transition <ty1> a to <ty2>
7879   /// b = ToBePromoted <ty2> Def, ...
7880   /// =>
7881   /// b = ToBePromoted <ty1> a, ...
7882   /// Def = Transition <ty1> ToBePromoted to <ty2>
7883   void promoteImpl(Instruction *ToBePromoted);
7884 
7885   /// Check whether or not it is profitable to promote all the
7886   /// instructions enqueued to be promoted.
7887   bool isProfitableToPromote() {
7888     Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
7889     unsigned Index = isa<ConstantInt>(ValIdx)
7890                          ? cast<ConstantInt>(ValIdx)->getZExtValue()
7891                          : -1;
7892     Type *PromotedType = getTransitionType();
7893 
7894     StoreInst *ST = cast<StoreInst>(CombineInst);
7895     unsigned AS = ST->getPointerAddressSpace();
7896     // Check if this store is supported.
7897     if (!TLI.allowsMisalignedMemoryAccesses(
7898             TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
7899             ST->getAlign())) {
7900       // If this is not supported, there is no way we can combine
7901       // the extract with the store.
7902       return false;
7903     }
7904 
7905     // The scalar chain of computation has to pay for the transition
7906     // scalar to vector.
7907     // The vector chain has to account for the combining cost.
7908     enum TargetTransformInfo::TargetCostKind CostKind =
7909         TargetTransformInfo::TCK_RecipThroughput;
7910     InstructionCost ScalarCost =
7911         TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
7912     InstructionCost VectorCost = StoreExtractCombineCost;
7913     for (const auto &Inst : InstsToBePromoted) {
7914       // Compute the cost.
7915       // By construction, all instructions being promoted are arithmetic ones.
7916       // Moreover, one argument is a constant that can be viewed as a splat
7917       // constant.
7918       Value *Arg0 = Inst->getOperand(0);
7919       bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
7920                             isa<ConstantFP>(Arg0);
7921       TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
7922       if (IsArg0Constant)
7923         Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7924       else
7925         Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7926 
7927       ScalarCost += TTI.getArithmeticInstrCost(
7928           Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
7929       VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
7930                                                CostKind, Arg0Info, Arg1Info);
7931     }
7932     LLVM_DEBUG(
7933         dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7934                << ScalarCost << "\nVector: " << VectorCost << '\n');
7935     return ScalarCost > VectorCost;
7936   }
7937 
7938   /// Generate a constant vector with \p Val with the same
7939   /// number of elements as the transition.
7940   /// \p UseSplat defines whether or not \p Val should be replicated
7941   /// across the whole vector.
7942   /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7943   /// otherwise we generate a vector with as many undef as possible:
7944   /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7945   /// used at the index of the extract.
7946   Value *getConstantVector(Constant *Val, bool UseSplat) const {
7947     unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
7948     if (!UseSplat) {
7949       // If we cannot determine where the constant must be, we have to
7950       // use a splat constant.
7951       Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
7952       if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
7953         ExtractIdx = CstVal->getSExtValue();
7954       else
7955         UseSplat = true;
7956     }
7957 
7958     ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7959     if (UseSplat)
7960       return ConstantVector::getSplat(EC, Val);
7961 
7962     if (!EC.isScalable()) {
7963       SmallVector<Constant *, 4> ConstVec;
7964       UndefValue *UndefVal = UndefValue::get(Val->getType());
7965       for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7966         if (Idx == ExtractIdx)
7967           ConstVec.push_back(Val);
7968         else
7969           ConstVec.push_back(UndefVal);
7970       }
7971       return ConstantVector::get(ConstVec);
7972     } else
7973       llvm_unreachable(
7974           "Generate scalable vector for non-splat is unimplemented");
7975   }
7976 
7977   /// Check if promoting to a vector type an operand at \p OperandIdx
7978   /// in \p Use can trigger undefined behavior.
7979   static bool canCauseUndefinedBehavior(const Instruction *Use,
7980                                         unsigned OperandIdx) {
7981     // This is not safe to introduce undef when the operand is on
7982     // the right hand side of a division-like instruction.
7983     if (OperandIdx != 1)
7984       return false;
7985     switch (Use->getOpcode()) {
7986     default:
7987       return false;
7988     case Instruction::SDiv:
7989     case Instruction::UDiv:
7990     case Instruction::SRem:
7991     case Instruction::URem:
7992       return true;
7993     case Instruction::FDiv:
7994     case Instruction::FRem:
7995       return !Use->hasNoNaNs();
7996     }
7997     llvm_unreachable(nullptr);
7998   }
7999 
8000 public:
8001   VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
8002                       const TargetTransformInfo &TTI, Instruction *Transition,
8003                       unsigned CombineCost)
8004       : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
8005         StoreExtractCombineCost(CombineCost) {
8006     assert(Transition && "Do not know how to promote null");
8007   }
8008 
8009   /// Check if we can promote \p ToBePromoted to \p Type.
8010   bool canPromote(const Instruction *ToBePromoted) const {
8011     // We could support CastInst too.
8012     return isa<BinaryOperator>(ToBePromoted);
8013   }
8014 
8015   /// Check if it is profitable to promote \p ToBePromoted
8016   /// by moving downward the transition through.
8017   bool shouldPromote(const Instruction *ToBePromoted) const {
8018     // Promote only if all the operands can be statically expanded.
8019     // Indeed, we do not want to introduce any new kind of transitions.
8020     for (const Use &U : ToBePromoted->operands()) {
8021       const Value *Val = U.get();
8022       if (Val == getEndOfTransition()) {
8023         // If the use is a division and the transition is on the rhs,
8024         // we cannot promote the operation, otherwise we may create a
8025         // division by zero.
8026         if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
8027           return false;
8028         continue;
8029       }
8030       if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
8031           !isa<ConstantFP>(Val))
8032         return false;
8033     }
8034     // Check that the resulting operation is legal.
8035     int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
8036     if (!ISDOpcode)
8037       return false;
8038     return StressStoreExtract ||
8039            TLI.isOperationLegalOrCustom(
8040                ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
8041   }
8042 
8043   /// Check whether or not \p Use can be combined
8044   /// with the transition.
8045   /// I.e., is it possible to do Use(Transition) => AnotherUse?
8046   bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
8047 
8048   /// Record \p ToBePromoted as part of the chain to be promoted.
8049   void enqueueForPromotion(Instruction *ToBePromoted) {
8050     InstsToBePromoted.push_back(ToBePromoted);
8051   }
8052 
8053   /// Set the instruction that will be combined with the transition.
8054   void recordCombineInstruction(Instruction *ToBeCombined) {
8055     assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
8056     CombineInst = ToBeCombined;
8057   }
8058 
8059   /// Promote all the instructions enqueued for promotion if it is
8060   /// is profitable.
8061   /// \return True if the promotion happened, false otherwise.
8062   bool promote() {
8063     // Check if there is something to promote.
8064     // Right now, if we do not have anything to combine with,
8065     // we assume the promotion is not profitable.
8066     if (InstsToBePromoted.empty() || !CombineInst)
8067       return false;
8068 
8069     // Check cost.
8070     if (!StressStoreExtract && !isProfitableToPromote())
8071       return false;
8072 
8073     // Promote.
8074     for (auto &ToBePromoted : InstsToBePromoted)
8075       promoteImpl(ToBePromoted);
8076     InstsToBePromoted.clear();
8077     return true;
8078   }
8079 };
8080 
8081 } // end anonymous namespace
8082 
8083 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
8084   // At this point, we know that all the operands of ToBePromoted but Def
8085   // can be statically promoted.
8086   // For Def, we need to use its parameter in ToBePromoted:
8087   // b = ToBePromoted ty1 a
8088   // Def = Transition ty1 b to ty2
8089   // Move the transition down.
8090   // 1. Replace all uses of the promoted operation by the transition.
8091   // = ... b => = ... Def.
8092   assert(ToBePromoted->getType() == Transition->getType() &&
8093          "The type of the result of the transition does not match "
8094          "the final type");
8095   ToBePromoted->replaceAllUsesWith(Transition);
8096   // 2. Update the type of the uses.
8097   // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
8098   Type *TransitionTy = getTransitionType();
8099   ToBePromoted->mutateType(TransitionTy);
8100   // 3. Update all the operands of the promoted operation with promoted
8101   // operands.
8102   // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
8103   for (Use &U : ToBePromoted->operands()) {
8104     Value *Val = U.get();
8105     Value *NewVal = nullptr;
8106     if (Val == Transition)
8107       NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
8108     else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
8109              isa<ConstantFP>(Val)) {
8110       // Use a splat constant if it is not safe to use undef.
8111       NewVal = getConstantVector(
8112           cast<Constant>(Val),
8113           isa<UndefValue>(Val) ||
8114               canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
8115     } else
8116       llvm_unreachable("Did you modified shouldPromote and forgot to update "
8117                        "this?");
8118     ToBePromoted->setOperand(U.getOperandNo(), NewVal);
8119   }
8120   Transition->moveAfter(ToBePromoted);
8121   Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
8122 }
8123 
8124 /// Some targets can do store(extractelement) with one instruction.
8125 /// Try to push the extractelement towards the stores when the target
8126 /// has this feature and this is profitable.
8127 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
8128   unsigned CombineCost = std::numeric_limits<unsigned>::max();
8129   if (DisableStoreExtract ||
8130       (!StressStoreExtract &&
8131        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
8132                                        Inst->getOperand(1), CombineCost)))
8133     return false;
8134 
8135   // At this point we know that Inst is a vector to scalar transition.
8136   // Try to move it down the def-use chain, until:
8137   // - We can combine the transition with its single use
8138   //   => we got rid of the transition.
8139   // - We escape the current basic block
8140   //   => we would need to check that we are moving it at a cheaper place and
8141   //      we do not do that for now.
8142   BasicBlock *Parent = Inst->getParent();
8143   LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
8144   VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
8145   // If the transition has more than one use, assume this is not going to be
8146   // beneficial.
8147   while (Inst->hasOneUse()) {
8148     Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
8149     LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
8150 
8151     if (ToBePromoted->getParent() != Parent) {
8152       LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
8153                         << ToBePromoted->getParent()->getName()
8154                         << ") than the transition (" << Parent->getName()
8155                         << ").\n");
8156       return false;
8157     }
8158 
8159     if (VPH.canCombine(ToBePromoted)) {
8160       LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
8161                         << "will be combined with: " << *ToBePromoted << '\n');
8162       VPH.recordCombineInstruction(ToBePromoted);
8163       bool Changed = VPH.promote();
8164       NumStoreExtractExposed += Changed;
8165       return Changed;
8166     }
8167 
8168     LLVM_DEBUG(dbgs() << "Try promoting.\n");
8169     if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
8170       return false;
8171 
8172     LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
8173 
8174     VPH.enqueueForPromotion(ToBePromoted);
8175     Inst = ToBePromoted;
8176   }
8177   return false;
8178 }
8179 
8180 /// For the instruction sequence of store below, F and I values
8181 /// are bundled together as an i64 value before being stored into memory.
8182 /// Sometimes it is more efficient to generate separate stores for F and I,
8183 /// which can remove the bitwise instructions or sink them to colder places.
8184 ///
8185 ///   (store (or (zext (bitcast F to i32) to i64),
8186 ///              (shl (zext I to i64), 32)), addr)  -->
8187 ///   (store F, addr) and (store I, addr+4)
8188 ///
8189 /// Similarly, splitting for other merged store can also be beneficial, like:
8190 /// For pair of {i32, i32}, i64 store --> two i32 stores.
8191 /// For pair of {i32, i16}, i64 store --> two i32 stores.
8192 /// For pair of {i16, i16}, i32 store --> two i16 stores.
8193 /// For pair of {i16, i8},  i32 store --> two i16 stores.
8194 /// For pair of {i8, i8},   i16 store --> two i8 stores.
8195 ///
8196 /// We allow each target to determine specifically which kind of splitting is
8197 /// supported.
8198 ///
8199 /// The store patterns are commonly seen from the simple code snippet below
8200 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
8201 ///   void goo(const std::pair<int, float> &);
8202 ///   hoo() {
8203 ///     ...
8204 ///     goo(std::make_pair(tmp, ftmp));
8205 ///     ...
8206 ///   }
8207 ///
8208 /// Although we already have similar splitting in DAG Combine, we duplicate
8209 /// it in CodeGenPrepare to catch the case in which pattern is across
8210 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
8211 /// during code expansion.
8212 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
8213                                 const TargetLowering &TLI) {
8214   // Handle simple but common cases only.
8215   Type *StoreType = SI.getValueOperand()->getType();
8216 
8217   // The code below assumes shifting a value by <number of bits>,
8218   // whereas scalable vectors would have to be shifted by
8219   // <2log(vscale) + number of bits> in order to store the
8220   // low/high parts. Bailing out for now.
8221   if (StoreType->isScalableTy())
8222     return false;
8223 
8224   if (!DL.typeSizeEqualsStoreSize(StoreType) ||
8225       DL.getTypeSizeInBits(StoreType) == 0)
8226     return false;
8227 
8228   unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
8229   Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
8230   if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
8231     return false;
8232 
8233   // Don't split the store if it is volatile.
8234   if (SI.isVolatile())
8235     return false;
8236 
8237   // Match the following patterns:
8238   // (store (or (zext LValue to i64),
8239   //            (shl (zext HValue to i64), 32)), HalfValBitSize)
8240   //  or
8241   // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
8242   //            (zext LValue to i64),
8243   // Expect both operands of OR and the first operand of SHL have only
8244   // one use.
8245   Value *LValue, *HValue;
8246   if (!match(SI.getValueOperand(),
8247              m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
8248                     m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
8249                                    m_SpecificInt(HalfValBitSize))))))
8250     return false;
8251 
8252   // Check LValue and HValue are int with size less or equal than 32.
8253   if (!LValue->getType()->isIntegerTy() ||
8254       DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
8255       !HValue->getType()->isIntegerTy() ||
8256       DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
8257     return false;
8258 
8259   // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8260   // as the input of target query.
8261   auto *LBC = dyn_cast<BitCastInst>(LValue);
8262   auto *HBC = dyn_cast<BitCastInst>(HValue);
8263   EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
8264                   : EVT::getEVT(LValue->getType());
8265   EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
8266                    : EVT::getEVT(HValue->getType());
8267   if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
8268     return false;
8269 
8270   // Start to split store.
8271   IRBuilder<> Builder(SI.getContext());
8272   Builder.SetInsertPoint(&SI);
8273 
8274   // If LValue/HValue is a bitcast in another BB, create a new one in current
8275   // BB so it may be merged with the splitted stores by dag combiner.
8276   if (LBC && LBC->getParent() != SI.getParent())
8277     LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
8278   if (HBC && HBC->getParent() != SI.getParent())
8279     HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
8280 
8281   bool IsLE = SI.getDataLayout().isLittleEndian();
8282   auto CreateSplitStore = [&](Value *V, bool Upper) {
8283     V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
8284     Value *Addr = SI.getPointerOperand();
8285     Align Alignment = SI.getAlign();
8286     const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
8287     if (IsOffsetStore) {
8288       Addr = Builder.CreateGEP(
8289           SplitStoreType, Addr,
8290           ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
8291 
8292       // When splitting the store in half, naturally one half will retain the
8293       // alignment of the original wider store, regardless of whether it was
8294       // over-aligned or not, while the other will require adjustment.
8295       Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
8296     }
8297     Builder.CreateAlignedStore(V, Addr, Alignment);
8298   };
8299 
8300   CreateSplitStore(LValue, false);
8301   CreateSplitStore(HValue, true);
8302 
8303   // Delete the old store.
8304   SI.eraseFromParent();
8305   return true;
8306 }
8307 
8308 // Return true if the GEP has two operands, the first operand is of a sequential
8309 // type, and the second operand is a constant.
8310 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
8311   gep_type_iterator I = gep_type_begin(*GEP);
8312   return GEP->getNumOperands() == 2 && I.isSequential() &&
8313          isa<ConstantInt>(GEP->getOperand(1));
8314 }
8315 
8316 // Try unmerging GEPs to reduce liveness interference (register pressure) across
8317 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8318 // reducing liveness interference across those edges benefits global register
8319 // allocation. Currently handles only certain cases.
8320 //
8321 // For example, unmerge %GEPI and %UGEPI as below.
8322 //
8323 // ---------- BEFORE ----------
8324 // SrcBlock:
8325 //   ...
8326 //   %GEPIOp = ...
8327 //   ...
8328 //   %GEPI = gep %GEPIOp, Idx
8329 //   ...
8330 //   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8331 //   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8332 //   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8333 //   %UGEPI)
8334 //
8335 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8336 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8337 // ...
8338 //
8339 // DstBi:
8340 //   ...
8341 //   %UGEPI = gep %GEPIOp, UIdx
8342 // ...
8343 // ---------------------------
8344 //
8345 // ---------- AFTER ----------
8346 // SrcBlock:
8347 //   ... (same as above)
8348 //    (* %GEPI is still alive on the indirectbr edges)
8349 //    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8350 //    unmerging)
8351 // ...
8352 //
8353 // DstBi:
8354 //   ...
8355 //   %UGEPI = gep %GEPI, (UIdx-Idx)
8356 //   ...
8357 // ---------------------------
8358 //
8359 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8360 // no longer alive on them.
8361 //
8362 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8363 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8364 // not to disable further simplications and optimizations as a result of GEP
8365 // merging.
8366 //
8367 // Note this unmerging may increase the length of the data flow critical path
8368 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8369 // between the register pressure and the length of data-flow critical
8370 // path. Restricting this to the uncommon IndirectBr case would minimize the
8371 // impact of potentially longer critical path, if any, and the impact on compile
8372 // time.
8373 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
8374                                              const TargetTransformInfo *TTI) {
8375   BasicBlock *SrcBlock = GEPI->getParent();
8376   // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8377   // (non-IndirectBr) cases exit early here.
8378   if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
8379     return false;
8380   // Check that GEPI is a simple gep with a single constant index.
8381   if (!GEPSequentialConstIndexed(GEPI))
8382     return false;
8383   ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
8384   // Check that GEPI is a cheap one.
8385   if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
8386                          TargetTransformInfo::TCK_SizeAndLatency) >
8387       TargetTransformInfo::TCC_Basic)
8388     return false;
8389   Value *GEPIOp = GEPI->getOperand(0);
8390   // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8391   if (!isa<Instruction>(GEPIOp))
8392     return false;
8393   auto *GEPIOpI = cast<Instruction>(GEPIOp);
8394   if (GEPIOpI->getParent() != SrcBlock)
8395     return false;
8396   // Check that GEP is used outside the block, meaning it's alive on the
8397   // IndirectBr edge(s).
8398   if (llvm::none_of(GEPI->users(), [&](User *Usr) {
8399         if (auto *I = dyn_cast<Instruction>(Usr)) {
8400           if (I->getParent() != SrcBlock) {
8401             return true;
8402           }
8403         }
8404         return false;
8405       }))
8406     return false;
8407   // The second elements of the GEP chains to be unmerged.
8408   std::vector<GetElementPtrInst *> UGEPIs;
8409   // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8410   // on IndirectBr edges.
8411   for (User *Usr : GEPIOp->users()) {
8412     if (Usr == GEPI)
8413       continue;
8414     // Check if Usr is an Instruction. If not, give up.
8415     if (!isa<Instruction>(Usr))
8416       return false;
8417     auto *UI = cast<Instruction>(Usr);
8418     // Check if Usr in the same block as GEPIOp, which is fine, skip.
8419     if (UI->getParent() == SrcBlock)
8420       continue;
8421     // Check if Usr is a GEP. If not, give up.
8422     if (!isa<GetElementPtrInst>(Usr))
8423       return false;
8424     auto *UGEPI = cast<GetElementPtrInst>(Usr);
8425     // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8426     // the pointer operand to it. If so, record it in the vector. If not, give
8427     // up.
8428     if (!GEPSequentialConstIndexed(UGEPI))
8429       return false;
8430     if (UGEPI->getOperand(0) != GEPIOp)
8431       return false;
8432     if (UGEPI->getSourceElementType() != GEPI->getSourceElementType())
8433       return false;
8434     if (GEPIIdx->getType() !=
8435         cast<ConstantInt>(UGEPI->getOperand(1))->getType())
8436       return false;
8437     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8438     if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
8439                            TargetTransformInfo::TCK_SizeAndLatency) >
8440         TargetTransformInfo::TCC_Basic)
8441       return false;
8442     UGEPIs.push_back(UGEPI);
8443   }
8444   if (UGEPIs.size() == 0)
8445     return false;
8446   // Check the materializing cost of (Uidx-Idx).
8447   for (GetElementPtrInst *UGEPI : UGEPIs) {
8448     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8449     APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
8450     InstructionCost ImmCost = TTI->getIntImmCost(
8451         NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
8452     if (ImmCost > TargetTransformInfo::TCC_Basic)
8453       return false;
8454   }
8455   // Now unmerge between GEPI and UGEPIs.
8456   for (GetElementPtrInst *UGEPI : UGEPIs) {
8457     UGEPI->setOperand(0, GEPI);
8458     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8459     Constant *NewUGEPIIdx = ConstantInt::get(
8460         GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
8461     UGEPI->setOperand(1, NewUGEPIIdx);
8462     // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8463     // inbounds to avoid UB.
8464     if (!GEPI->isInBounds()) {
8465       UGEPI->setIsInBounds(false);
8466     }
8467   }
8468   // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8469   // alive on IndirectBr edges).
8470   assert(llvm::none_of(GEPIOp->users(),
8471                        [&](User *Usr) {
8472                          return cast<Instruction>(Usr)->getParent() != SrcBlock;
8473                        }) &&
8474          "GEPIOp is used outside SrcBlock");
8475   return true;
8476 }
8477 
8478 static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
8479                            SmallSet<BasicBlock *, 32> &FreshBBs,
8480                            bool IsHugeFunc) {
8481   // Try and convert
8482   //  %c = icmp ult %x, 8
8483   //  br %c, bla, blb
8484   //  %tc = lshr %x, 3
8485   // to
8486   //  %tc = lshr %x, 3
8487   //  %c = icmp eq %tc, 0
8488   //  br %c, bla, blb
8489   // Creating the cmp to zero can be better for the backend, especially if the
8490   // lshr produces flags that can be used automatically.
8491   if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
8492     return false;
8493 
8494   ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
8495   if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
8496     return false;
8497 
8498   Value *X = Cmp->getOperand(0);
8499   APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
8500 
8501   for (auto *U : X->users()) {
8502     Instruction *UI = dyn_cast<Instruction>(U);
8503     // A quick dominance check
8504     if (!UI ||
8505         (UI->getParent() != Branch->getParent() &&
8506          UI->getParent() != Branch->getSuccessor(0) &&
8507          UI->getParent() != Branch->getSuccessor(1)) ||
8508         (UI->getParent() != Branch->getParent() &&
8509          !UI->getParent()->getSinglePredecessor()))
8510       continue;
8511 
8512     if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
8513         match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
8514       IRBuilder<> Builder(Branch);
8515       if (UI->getParent() != Branch->getParent())
8516         UI->moveBefore(Branch);
8517       UI->dropPoisonGeneratingFlags();
8518       Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
8519                                         ConstantInt::get(UI->getType(), 0));
8520       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8521       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8522       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8523       return true;
8524     }
8525     if (Cmp->isEquality() &&
8526         (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8527          match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) {
8528       IRBuilder<> Builder(Branch);
8529       if (UI->getParent() != Branch->getParent())
8530         UI->moveBefore(Branch);
8531       UI->dropPoisonGeneratingFlags();
8532       Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8533                                         ConstantInt::get(UI->getType(), 0));
8534       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8535       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8536       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8537       return true;
8538     }
8539   }
8540   return false;
8541 }
8542 
8543 bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8544   bool AnyChange = false;
8545   AnyChange = fixupDbgVariableRecordsOnInst(*I);
8546 
8547   // Bail out if we inserted the instruction to prevent optimizations from
8548   // stepping on each other's toes.
8549   if (InsertedInsts.count(I))
8550     return AnyChange;
8551 
8552   // TODO: Move into the switch on opcode below here.
8553   if (PHINode *P = dyn_cast<PHINode>(I)) {
8554     // It is possible for very late stage optimizations (such as SimplifyCFG)
8555     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
8556     // trivial PHI, go ahead and zap it here.
8557     if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8558       LargeOffsetGEPMap.erase(P);
8559       replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8560       P->eraseFromParent();
8561       ++NumPHIsElim;
8562       return true;
8563     }
8564     return AnyChange;
8565   }
8566 
8567   if (CastInst *CI = dyn_cast<CastInst>(I)) {
8568     // If the source of the cast is a constant, then this should have
8569     // already been constant folded.  The only reason NOT to constant fold
8570     // it is if something (e.g. LSR) was careful to place the constant
8571     // evaluation in a block other than then one that uses it (e.g. to hoist
8572     // the address of globals out of a loop).  If this is the case, we don't
8573     // want to forward-subst the cast.
8574     if (isa<Constant>(CI->getOperand(0)))
8575       return AnyChange;
8576 
8577     if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8578       return true;
8579 
8580     if ((isa<UIToFPInst>(I) || isa<SIToFPInst>(I) || isa<FPToUIInst>(I) ||
8581          isa<TruncInst>(I)) &&
8582         TLI->optimizeExtendOrTruncateConversion(
8583             I, LI->getLoopFor(I->getParent()), *TTI))
8584       return true;
8585 
8586     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8587       /// Sink a zext or sext into its user blocks if the target type doesn't
8588       /// fit in one register
8589       if (TLI->getTypeAction(CI->getContext(),
8590                              TLI->getValueType(*DL, CI->getType())) ==
8591           TargetLowering::TypeExpandInteger) {
8592         return SinkCast(CI);
8593       } else {
8594         if (TLI->optimizeExtendOrTruncateConversion(
8595                 I, LI->getLoopFor(I->getParent()), *TTI))
8596           return true;
8597 
8598         bool MadeChange = optimizeExt(I);
8599         return MadeChange | optimizeExtUses(I);
8600       }
8601     }
8602     return AnyChange;
8603   }
8604 
8605   if (auto *Cmp = dyn_cast<CmpInst>(I))
8606     if (optimizeCmp(Cmp, ModifiedDT))
8607       return true;
8608 
8609   if (match(I, m_URem(m_Value(), m_Value())))
8610     if (optimizeURem(I))
8611       return true;
8612 
8613   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8614     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8615     bool Modified = optimizeLoadExt(LI);
8616     unsigned AS = LI->getPointerAddressSpace();
8617     Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8618     return Modified;
8619   }
8620 
8621   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8622     if (splitMergedValStore(*SI, *DL, *TLI))
8623       return true;
8624     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8625     unsigned AS = SI->getPointerAddressSpace();
8626     return optimizeMemoryInst(I, SI->getOperand(1),
8627                               SI->getOperand(0)->getType(), AS);
8628   }
8629 
8630   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8631     unsigned AS = RMW->getPointerAddressSpace();
8632     return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8633   }
8634 
8635   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8636     unsigned AS = CmpX->getPointerAddressSpace();
8637     return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8638                               CmpX->getCompareOperand()->getType(), AS);
8639   }
8640 
8641   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8642 
8643   if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8644       sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8645     return true;
8646 
8647   // TODO: Move this into the switch on opcode - it handles shifts already.
8648   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8649                 BinOp->getOpcode() == Instruction::LShr)) {
8650     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8651     if (CI && TLI->hasExtractBitsInsn())
8652       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8653         return true;
8654   }
8655 
8656   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8657     if (GEPI->hasAllZeroIndices()) {
8658       /// The GEP operand must be a pointer, so must its result -> BitCast
8659       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8660                                         GEPI->getName(), GEPI->getIterator());
8661       NC->setDebugLoc(GEPI->getDebugLoc());
8662       replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8663       RecursivelyDeleteTriviallyDeadInstructions(
8664           GEPI, TLInfo, nullptr,
8665           [&](Value *V) { removeAllAssertingVHReferences(V); });
8666       ++NumGEPsElim;
8667       optimizeInst(NC, ModifiedDT);
8668       return true;
8669     }
8670     if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
8671       return true;
8672     }
8673   }
8674 
8675   if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
8676     // freeze(icmp a, const)) -> icmp (freeze a), const
8677     // This helps generate efficient conditional jumps.
8678     Instruction *CmpI = nullptr;
8679     if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
8680       CmpI = II;
8681     else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
8682       CmpI = F->getFastMathFlags().none() ? F : nullptr;
8683 
8684     if (CmpI && CmpI->hasOneUse()) {
8685       auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
8686       bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
8687                     isa<ConstantPointerNull>(Op0);
8688       bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
8689                     isa<ConstantPointerNull>(Op1);
8690       if (Const0 || Const1) {
8691         if (!Const0 || !Const1) {
8692           auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI->getIterator());
8693           F->takeName(FI);
8694           CmpI->setOperand(Const0 ? 1 : 0, F);
8695         }
8696         replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
8697         FI->eraseFromParent();
8698         return true;
8699       }
8700     }
8701     return AnyChange;
8702   }
8703 
8704   if (tryToSinkFreeOperands(I))
8705     return true;
8706 
8707   switch (I->getOpcode()) {
8708   case Instruction::Shl:
8709   case Instruction::LShr:
8710   case Instruction::AShr:
8711     return optimizeShiftInst(cast<BinaryOperator>(I));
8712   case Instruction::Call:
8713     return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
8714   case Instruction::Select:
8715     return optimizeSelectInst(cast<SelectInst>(I));
8716   case Instruction::ShuffleVector:
8717     return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
8718   case Instruction::Switch:
8719     return optimizeSwitchInst(cast<SwitchInst>(I));
8720   case Instruction::ExtractElement:
8721     return optimizeExtractElementInst(cast<ExtractElementInst>(I));
8722   case Instruction::Br:
8723     return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
8724   }
8725 
8726   return AnyChange;
8727 }
8728 
8729 /// Given an OR instruction, check to see if this is a bitreverse
8730 /// idiom. If so, insert the new intrinsic and return true.
8731 bool CodeGenPrepare::makeBitReverse(Instruction &I) {
8732   if (!I.getType()->isIntegerTy() ||
8733       !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
8734                                      TLI->getValueType(*DL, I.getType(), true)))
8735     return false;
8736 
8737   SmallVector<Instruction *, 4> Insts;
8738   if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
8739     return false;
8740   Instruction *LastInst = Insts.back();
8741   replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
8742   RecursivelyDeleteTriviallyDeadInstructions(
8743       &I, TLInfo, nullptr,
8744       [&](Value *V) { removeAllAssertingVHReferences(V); });
8745   return true;
8746 }
8747 
8748 // In this pass we look for GEP and cast instructions that are used
8749 // across basic blocks and rewrite them to improve basic-block-at-a-time
8750 // selection.
8751 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
8752   SunkAddrs.clear();
8753   bool MadeChange = false;
8754 
8755   do {
8756     CurInstIterator = BB.begin();
8757     ModifiedDT = ModifyDT::NotModifyDT;
8758     while (CurInstIterator != BB.end()) {
8759       MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
8760       if (ModifiedDT != ModifyDT::NotModifyDT) {
8761         // For huge function we tend to quickly go though the inner optmization
8762         // opportunities in the BB. So we go back to the BB head to re-optimize
8763         // each instruction instead of go back to the function head.
8764         if (IsHugeFunc) {
8765           DT.reset();
8766           getDT(*BB.getParent());
8767           break;
8768         } else {
8769           return true;
8770         }
8771       }
8772     }
8773   } while (ModifiedDT == ModifyDT::ModifyInstDT);
8774 
8775   bool MadeBitReverse = true;
8776   while (MadeBitReverse) {
8777     MadeBitReverse = false;
8778     for (auto &I : reverse(BB)) {
8779       if (makeBitReverse(I)) {
8780         MadeBitReverse = MadeChange = true;
8781         break;
8782       }
8783     }
8784   }
8785   MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
8786 
8787   return MadeChange;
8788 }
8789 
8790 // Some CGP optimizations may move or alter what's computed in a block. Check
8791 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8792 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
8793   assert(isa<DbgValueInst>(I));
8794   DbgValueInst &DVI = *cast<DbgValueInst>(I);
8795 
8796   // Does this dbg.value refer to a sunk address calculation?
8797   bool AnyChange = false;
8798   SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(),
8799                                      DVI.location_ops().end());
8800   for (Value *Location : LocationOps) {
8801     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8802     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8803     if (SunkAddr) {
8804       // Point dbg.value at locally computed address, which should give the best
8805       // opportunity to be accurately lowered. This update may change the type
8806       // of pointer being referred to; however this makes no difference to
8807       // debugging information, and we can't generate bitcasts that may affect
8808       // codegen.
8809       DVI.replaceVariableLocationOp(Location, SunkAddr);
8810       AnyChange = true;
8811     }
8812   }
8813   return AnyChange;
8814 }
8815 
8816 bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction &I) {
8817   bool AnyChange = false;
8818   for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange()))
8819     AnyChange |= fixupDbgVariableRecord(DVR);
8820   return AnyChange;
8821 }
8822 
8823 // FIXME: should updating debug-info really cause the "changed" flag to fire,
8824 // which can cause a function to be reprocessed?
8825 bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord &DVR) {
8826   if (DVR.Type != DbgVariableRecord::LocationType::Value &&
8827       DVR.Type != DbgVariableRecord::LocationType::Assign)
8828     return false;
8829 
8830   // Does this DbgVariableRecord refer to a sunk address calculation?
8831   bool AnyChange = false;
8832   SmallDenseSet<Value *> LocationOps(DVR.location_ops().begin(),
8833                                      DVR.location_ops().end());
8834   for (Value *Location : LocationOps) {
8835     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8836     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8837     if (SunkAddr) {
8838       // Point dbg.value at locally computed address, which should give the best
8839       // opportunity to be accurately lowered. This update may change the type
8840       // of pointer being referred to; however this makes no difference to
8841       // debugging information, and we can't generate bitcasts that may affect
8842       // codegen.
8843       DVR.replaceVariableLocationOp(Location, SunkAddr);
8844       AnyChange = true;
8845     }
8846   }
8847   return AnyChange;
8848 }
8849 
8850 static void DbgInserterHelper(DbgValueInst *DVI, Instruction *VI) {
8851   DVI->removeFromParent();
8852   if (isa<PHINode>(VI))
8853     DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
8854   else
8855     DVI->insertAfter(VI);
8856 }
8857 
8858 static void DbgInserterHelper(DbgVariableRecord *DVR, Instruction *VI) {
8859   DVR->removeFromParent();
8860   BasicBlock *VIBB = VI->getParent();
8861   if (isa<PHINode>(VI))
8862     VIBB->insertDbgRecordBefore(DVR, VIBB->getFirstInsertionPt());
8863   else
8864     VIBB->insertDbgRecordAfter(DVR, VI);
8865 }
8866 
8867 // A llvm.dbg.value may be using a value before its definition, due to
8868 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8869 // them by moving the dbg.value to immediately after the value definition.
8870 // FIXME: Ideally this should never be necessary, and this has the potential
8871 // to re-order dbg.value intrinsics.
8872 bool CodeGenPrepare::placeDbgValues(Function &F) {
8873   bool MadeChange = false;
8874   DominatorTree DT(F);
8875 
8876   auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) {
8877     SmallVector<Instruction *, 4> VIs;
8878     for (Value *V : DbgItem->location_ops())
8879       if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
8880         VIs.push_back(VI);
8881 
8882     // This item may depend on multiple instructions, complicating any
8883     // potential sink. This block takes the defensive approach, opting to
8884     // "undef" the item if it has more than one instruction and any of them do
8885     // not dominate iem.
8886     for (Instruction *VI : VIs) {
8887       if (VI->isTerminator())
8888         continue;
8889 
8890       // If VI is a phi in a block with an EHPad terminator, we can't insert
8891       // after it.
8892       if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
8893         continue;
8894 
8895       // If the defining instruction dominates the dbg.value, we do not need
8896       // to move the dbg.value.
8897       if (DT.dominates(VI, Position))
8898         continue;
8899 
8900       // If we depend on multiple instructions and any of them doesn't
8901       // dominate this DVI, we probably can't salvage it: moving it to
8902       // after any of the instructions could cause us to lose the others.
8903       if (VIs.size() > 1) {
8904         LLVM_DEBUG(
8905             dbgs()
8906             << "Unable to find valid location for Debug Value, undefing:\n"
8907             << *DbgItem);
8908         DbgItem->setKillLocation();
8909         break;
8910       }
8911 
8912       LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8913                         << *DbgItem << ' ' << *VI);
8914       DbgInserterHelper(DbgItem, VI);
8915       MadeChange = true;
8916       ++NumDbgValueMoved;
8917     }
8918   };
8919 
8920   for (BasicBlock &BB : F) {
8921     for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
8922       // Process dbg.value intrinsics.
8923       DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
8924       if (DVI) {
8925         DbgProcessor(DVI, DVI);
8926         continue;
8927       }
8928 
8929       // If this isn't a dbg.value, process any attached DbgVariableRecord
8930       // records attached to this instruction.
8931       for (DbgVariableRecord &DVR : llvm::make_early_inc_range(
8932                filterDbgVars(Insn.getDbgRecordRange()))) {
8933         if (DVR.Type != DbgVariableRecord::LocationType::Value)
8934           continue;
8935         DbgProcessor(&DVR, &Insn);
8936       }
8937     }
8938   }
8939 
8940   return MadeChange;
8941 }
8942 
8943 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8944 // probes can be chained dependencies of other regular DAG nodes and block DAG
8945 // combine optimizations.
8946 bool CodeGenPrepare::placePseudoProbes(Function &F) {
8947   bool MadeChange = false;
8948   for (auto &Block : F) {
8949     // Move the rest probes to the beginning of the block.
8950     auto FirstInst = Block.getFirstInsertionPt();
8951     while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
8952       ++FirstInst;
8953     BasicBlock::iterator I(FirstInst);
8954     I++;
8955     while (I != Block.end()) {
8956       if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
8957         II->moveBefore(&*FirstInst);
8958         MadeChange = true;
8959       }
8960     }
8961   }
8962   return MadeChange;
8963 }
8964 
8965 /// Scale down both weights to fit into uint32_t.
8966 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
8967   uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
8968   uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
8969   NewTrue = NewTrue / Scale;
8970   NewFalse = NewFalse / Scale;
8971 }
8972 
8973 /// Some targets prefer to split a conditional branch like:
8974 /// \code
8975 ///   %0 = icmp ne i32 %a, 0
8976 ///   %1 = icmp ne i32 %b, 0
8977 ///   %or.cond = or i1 %0, %1
8978 ///   br i1 %or.cond, label %TrueBB, label %FalseBB
8979 /// \endcode
8980 /// into multiple branch instructions like:
8981 /// \code
8982 ///   bb1:
8983 ///     %0 = icmp ne i32 %a, 0
8984 ///     br i1 %0, label %TrueBB, label %bb2
8985 ///   bb2:
8986 ///     %1 = icmp ne i32 %b, 0
8987 ///     br i1 %1, label %TrueBB, label %FalseBB
8988 /// \endcode
8989 /// This usually allows instruction selection to do even further optimizations
8990 /// and combine the compare with the branch instruction. Currently this is
8991 /// applied for targets which have "cheap" jump instructions.
8992 ///
8993 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8994 ///
8995 bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
8996   if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
8997     return false;
8998 
8999   bool MadeChange = false;
9000   for (auto &BB : F) {
9001     // Does this BB end with the following?
9002     //   %cond1 = icmp|fcmp|binary instruction ...
9003     //   %cond2 = icmp|fcmp|binary instruction ...
9004     //   %cond.or = or|and i1 %cond1, cond2
9005     //   br i1 %cond.or label %dest1, label %dest2"
9006     Instruction *LogicOp;
9007     BasicBlock *TBB, *FBB;
9008     if (!match(BB.getTerminator(),
9009                m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
9010       continue;
9011 
9012     auto *Br1 = cast<BranchInst>(BB.getTerminator());
9013     if (Br1->getMetadata(LLVMContext::MD_unpredictable))
9014       continue;
9015 
9016     // The merging of mostly empty BB can cause a degenerate branch.
9017     if (TBB == FBB)
9018       continue;
9019 
9020     unsigned Opc;
9021     Value *Cond1, *Cond2;
9022     if (match(LogicOp,
9023               m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
9024       Opc = Instruction::And;
9025     else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
9026                                         m_OneUse(m_Value(Cond2)))))
9027       Opc = Instruction::Or;
9028     else
9029       continue;
9030 
9031     auto IsGoodCond = [](Value *Cond) {
9032       return match(
9033           Cond,
9034           m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
9035                                            m_LogicalOr(m_Value(), m_Value()))));
9036     };
9037     if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
9038       continue;
9039 
9040     LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
9041 
9042     // Create a new BB.
9043     auto *TmpBB =
9044         BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
9045                            BB.getParent(), BB.getNextNode());
9046     if (IsHugeFunc)
9047       FreshBBs.insert(TmpBB);
9048 
9049     // Update original basic block by using the first condition directly by the
9050     // branch instruction and removing the no longer needed and/or instruction.
9051     Br1->setCondition(Cond1);
9052     LogicOp->eraseFromParent();
9053 
9054     // Depending on the condition we have to either replace the true or the
9055     // false successor of the original branch instruction.
9056     if (Opc == Instruction::And)
9057       Br1->setSuccessor(0, TmpBB);
9058     else
9059       Br1->setSuccessor(1, TmpBB);
9060 
9061     // Fill in the new basic block.
9062     auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
9063     if (auto *I = dyn_cast<Instruction>(Cond2)) {
9064       I->removeFromParent();
9065       I->insertBefore(Br2);
9066     }
9067 
9068     // Update PHI nodes in both successors. The original BB needs to be
9069     // replaced in one successor's PHI nodes, because the branch comes now from
9070     // the newly generated BB (NewBB). In the other successor we need to add one
9071     // incoming edge to the PHI nodes, because both branch instructions target
9072     // now the same successor. Depending on the original branch condition
9073     // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
9074     // we perform the correct update for the PHI nodes.
9075     // This doesn't change the successor order of the just created branch
9076     // instruction (or any other instruction).
9077     if (Opc == Instruction::Or)
9078       std::swap(TBB, FBB);
9079 
9080     // Replace the old BB with the new BB.
9081     TBB->replacePhiUsesWith(&BB, TmpBB);
9082 
9083     // Add another incoming edge from the new BB.
9084     for (PHINode &PN : FBB->phis()) {
9085       auto *Val = PN.getIncomingValueForBlock(&BB);
9086       PN.addIncoming(Val, TmpBB);
9087     }
9088 
9089     // Update the branch weights (from SelectionDAGBuilder::
9090     // FindMergedConditions).
9091     if (Opc == Instruction::Or) {
9092       // Codegen X | Y as:
9093       // BB1:
9094       //   jmp_if_X TBB
9095       //   jmp TmpBB
9096       // TmpBB:
9097       //   jmp_if_Y TBB
9098       //   jmp FBB
9099       //
9100 
9101       // We have flexibility in setting Prob for BB1 and Prob for NewBB.
9102       // The requirement is that
9103       //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
9104       //     = TrueProb for original BB.
9105       // Assuming the original weights are A and B, one choice is to set BB1's
9106       // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
9107       // assumes that
9108       //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
9109       // Another choice is to assume TrueProb for BB1 equals to TrueProb for
9110       // TmpBB, but the math is more complicated.
9111       uint64_t TrueWeight, FalseWeight;
9112       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9113         uint64_t NewTrueWeight = TrueWeight;
9114         uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
9115         scaleWeights(NewTrueWeight, NewFalseWeight);
9116         Br1->setMetadata(LLVMContext::MD_prof,
9117                          MDBuilder(Br1->getContext())
9118                              .createBranchWeights(TrueWeight, FalseWeight,
9119                                                   hasBranchWeightOrigin(*Br1)));
9120 
9121         NewTrueWeight = TrueWeight;
9122         NewFalseWeight = 2 * FalseWeight;
9123         scaleWeights(NewTrueWeight, NewFalseWeight);
9124         Br2->setMetadata(LLVMContext::MD_prof,
9125                          MDBuilder(Br2->getContext())
9126                              .createBranchWeights(TrueWeight, FalseWeight));
9127       }
9128     } else {
9129       // Codegen X & Y as:
9130       // BB1:
9131       //   jmp_if_X TmpBB
9132       //   jmp FBB
9133       // TmpBB:
9134       //   jmp_if_Y TBB
9135       //   jmp FBB
9136       //
9137       //  This requires creation of TmpBB after CurBB.
9138 
9139       // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
9140       // The requirement is that
9141       //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
9142       //     = FalseProb for original BB.
9143       // Assuming the original weights are A and B, one choice is to set BB1's
9144       // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
9145       // assumes that
9146       //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
9147       uint64_t TrueWeight, FalseWeight;
9148       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
9149         uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
9150         uint64_t NewFalseWeight = FalseWeight;
9151         scaleWeights(NewTrueWeight, NewFalseWeight);
9152         Br1->setMetadata(LLVMContext::MD_prof,
9153                          MDBuilder(Br1->getContext())
9154                              .createBranchWeights(TrueWeight, FalseWeight));
9155 
9156         NewTrueWeight = 2 * TrueWeight;
9157         NewFalseWeight = FalseWeight;
9158         scaleWeights(NewTrueWeight, NewFalseWeight);
9159         Br2->setMetadata(LLVMContext::MD_prof,
9160                          MDBuilder(Br2->getContext())
9161                              .createBranchWeights(TrueWeight, FalseWeight));
9162       }
9163     }
9164 
9165     ModifiedDT = ModifyDT::ModifyBBDT;
9166     MadeChange = true;
9167 
9168     LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
9169                TmpBB->dump());
9170   }
9171   return MadeChange;
9172 }
9173