xref: /llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp (revision 731ae694a3d8f4d39e855c9a82c97d4f170fd48a)
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/CodeGenPrepare.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/PointerIntPair.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/BranchProbabilityInfo.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/CodeGenTypes/MachineValueType.h"
44 #include "llvm/Config/llvm-config.h"
45 #include "llvm/IR/Argument.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugInfo.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Dominators.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/GlobalVariable.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAArch64.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/MDBuilder.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/Operator.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/IR/ProfDataUtils.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Type.h"
74 #include "llvm/IR/Use.h"
75 #include "llvm/IR/User.h"
76 #include "llvm/IR/Value.h"
77 #include "llvm/IR/ValueHandle.h"
78 #include "llvm/IR/ValueMap.h"
79 #include "llvm/InitializePasses.h"
80 #include "llvm/Pass.h"
81 #include "llvm/Support/BlockFrequency.h"
82 #include "llvm/Support/BranchProbability.h"
83 #include "llvm/Support/Casting.h"
84 #include "llvm/Support/CommandLine.h"
85 #include "llvm/Support/Compiler.h"
86 #include "llvm/Support/Debug.h"
87 #include "llvm/Support/ErrorHandling.h"
88 #include "llvm/Support/MathExtras.h"
89 #include "llvm/Support/raw_ostream.h"
90 #include "llvm/Target/TargetMachine.h"
91 #include "llvm/Target/TargetOptions.h"
92 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
93 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
96 #include "llvm/Transforms/Utils/SizeOpts.h"
97 #include <algorithm>
98 #include <cassert>
99 #include <cstdint>
100 #include <iterator>
101 #include <limits>
102 #include <memory>
103 #include <optional>
104 #include <utility>
105 #include <vector>
106 
107 using namespace llvm;
108 using namespace llvm::PatternMatch;
109 
110 #define DEBUG_TYPE "codegenprepare"
111 
112 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
113 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
114 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
115 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
116                       "sunken Cmps");
117 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
118                        "of sunken Casts");
119 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
120                           "computations were sunk");
121 STATISTIC(NumMemoryInstsPhiCreated,
122           "Number of phis created when address "
123           "computations were sunk to memory instructions");
124 STATISTIC(NumMemoryInstsSelectCreated,
125           "Number of select created when address "
126           "computations were sunk to memory instructions");
127 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
128 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
129 STATISTIC(NumAndsAdded,
130           "Number of and mask instructions added to form ext loads");
131 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
132 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
133 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
134 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
135 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
136 
137 static cl::opt<bool> DisableBranchOpts(
138     "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
139     cl::desc("Disable branch optimizations in CodeGenPrepare"));
140 
141 static cl::opt<bool>
142     DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
143                   cl::desc("Disable GC optimizations in CodeGenPrepare"));
144 
145 static cl::opt<bool>
146     DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
147                           cl::init(false),
148                           cl::desc("Disable select to branch conversion."));
149 
150 static cl::opt<bool>
151     AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
152                       cl::desc("Address sinking in CGP using GEPs."));
153 
154 static cl::opt<bool>
155     EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
156                         cl::desc("Enable sinkinig and/cmp into branches."));
157 
158 static cl::opt<bool> DisableStoreExtract(
159     "disable-cgp-store-extract", cl::Hidden, cl::init(false),
160     cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
161 
162 static cl::opt<bool> StressStoreExtract(
163     "stress-cgp-store-extract", cl::Hidden, cl::init(false),
164     cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
165 
166 static cl::opt<bool> DisableExtLdPromotion(
167     "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168     cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
169              "CodeGenPrepare"));
170 
171 static cl::opt<bool> StressExtLdPromotion(
172     "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
173     cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
174              "optimization in CodeGenPrepare"));
175 
176 static cl::opt<bool> DisablePreheaderProtect(
177     "disable-preheader-prot", cl::Hidden, cl::init(false),
178     cl::desc("Disable protection against removing loop preheaders"));
179 
180 static cl::opt<bool> ProfileGuidedSectionPrefix(
181     "profile-guided-section-prefix", cl::Hidden, cl::init(true),
182     cl::desc("Use profile info to add section prefix for hot/cold functions"));
183 
184 static cl::opt<bool> ProfileUnknownInSpecialSection(
185     "profile-unknown-in-special-section", cl::Hidden,
186     cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
187              "profile, we cannot tell the function is cold for sure because "
188              "it may be a function newly added without ever being sampled. "
189              "With the flag enabled, compiler can put such profile unknown "
190              "functions into a special section, so runtime system can choose "
191              "to handle it in a different way than .text section, to save "
192              "RAM for example. "));
193 
194 static cl::opt<bool> BBSectionsGuidedSectionPrefix(
195     "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
196     cl::desc("Use the basic-block-sections profile to determine the text "
197              "section prefix for hot functions. Functions with "
198              "basic-block-sections profile will be placed in `.text.hot` "
199              "regardless of their FDO profile info. Other functions won't be "
200              "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
201              "profiles."));
202 
203 static cl::opt<uint64_t> FreqRatioToSkipMerge(
204     "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
205     cl::desc("Skip merging empty blocks if (frequency of empty block) / "
206              "(frequency of destination block) is greater than this ratio"));
207 
208 static cl::opt<bool> ForceSplitStore(
209     "force-split-store", cl::Hidden, cl::init(false),
210     cl::desc("Force store splitting no matter what the target query says."));
211 
212 static cl::opt<bool> EnableTypePromotionMerge(
213     "cgp-type-promotion-merge", cl::Hidden,
214     cl::desc("Enable merging of redundant sexts when one is dominating"
215              " the other."),
216     cl::init(true));
217 
218 static cl::opt<bool> DisableComplexAddrModes(
219     "disable-complex-addr-modes", cl::Hidden, cl::init(false),
220     cl::desc("Disables combining addressing modes with different parts "
221              "in optimizeMemoryInst."));
222 
223 static cl::opt<bool>
224     AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
225                     cl::desc("Allow creation of Phis in Address sinking."));
226 
227 static cl::opt<bool> AddrSinkNewSelects(
228     "addr-sink-new-select", cl::Hidden, cl::init(true),
229     cl::desc("Allow creation of selects in Address sinking."));
230 
231 static cl::opt<bool> AddrSinkCombineBaseReg(
232     "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
233     cl::desc("Allow combining of BaseReg field in Address sinking."));
234 
235 static cl::opt<bool> AddrSinkCombineBaseGV(
236     "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
237     cl::desc("Allow combining of BaseGV field in Address sinking."));
238 
239 static cl::opt<bool> AddrSinkCombineBaseOffs(
240     "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
241     cl::desc("Allow combining of BaseOffs field in Address sinking."));
242 
243 static cl::opt<bool> AddrSinkCombineScaledReg(
244     "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
245     cl::desc("Allow combining of ScaledReg field in Address sinking."));
246 
247 static cl::opt<bool>
248     EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
249                          cl::init(true),
250                          cl::desc("Enable splitting large offset of GEP."));
251 
252 static cl::opt<bool> EnableICMP_EQToICMP_ST(
253     "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
254     cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
255 
256 static cl::opt<bool>
257     VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
258                      cl::desc("Enable BFI update verification for "
259                               "CodeGenPrepare."));
260 
261 static cl::opt<bool>
262     OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true),
263                      cl::desc("Enable converting phi types in CodeGenPrepare"));
264 
265 static cl::opt<unsigned>
266     HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
267                             cl::desc("Least BB number of huge function."));
268 
269 static cl::opt<unsigned>
270     MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
271                           cl::Hidden,
272                           cl::desc("Max number of address users to look at"));
273 
274 static cl::opt<bool>
275     DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false),
276                       cl::desc("Disable elimination of dead PHI nodes."));
277 
278 namespace {
279 
280 enum ExtType {
281   ZeroExtension, // Zero extension has been seen.
282   SignExtension, // Sign extension has been seen.
283   BothExtension  // This extension type is used if we saw sext after
284                  // ZeroExtension had been set, or if we saw zext after
285                  // SignExtension had been set. It makes the type
286                  // information of a promoted instruction invalid.
287 };
288 
289 enum ModifyDT {
290   NotModifyDT, // Not Modify any DT.
291   ModifyBBDT,  // Modify the Basic Block Dominator Tree.
292   ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
293                // This usually means we move/delete/insert instruction
294                // in a Basic Block. So we should re-iterate instructions
295                // in such Basic Block.
296 };
297 
298 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
299 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
300 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
301 using SExts = SmallVector<Instruction *, 16>;
302 using ValueToSExts = MapVector<Value *, SExts>;
303 
304 class TypePromotionTransaction;
305 
306 class CodeGenPrepare {
307   friend class CodeGenPrepareLegacyPass;
308   const TargetMachine *TM = nullptr;
309   const TargetSubtargetInfo *SubtargetInfo = nullptr;
310   const TargetLowering *TLI = nullptr;
311   const TargetRegisterInfo *TRI = nullptr;
312   const TargetTransformInfo *TTI = nullptr;
313   const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
314   const TargetLibraryInfo *TLInfo = nullptr;
315   LoopInfo *LI = nullptr;
316   std::unique_ptr<BlockFrequencyInfo> BFI;
317   std::unique_ptr<BranchProbabilityInfo> BPI;
318   ProfileSummaryInfo *PSI = nullptr;
319 
320   /// As we scan instructions optimizing them, this is the next instruction
321   /// to optimize. Transforms that can invalidate this should update it.
322   BasicBlock::iterator CurInstIterator;
323 
324   /// Keeps track of non-local addresses that have been sunk into a block.
325   /// This allows us to avoid inserting duplicate code for blocks with
326   /// multiple load/stores of the same address. The usage of WeakTrackingVH
327   /// enables SunkAddrs to be treated as a cache whose entries can be
328   /// invalidated if a sunken address computation has been erased.
329   ValueMap<Value *, WeakTrackingVH> SunkAddrs;
330 
331   /// Keeps track of all instructions inserted for the current function.
332   SetOfInstrs InsertedInsts;
333 
334   /// Keeps track of the type of the related instruction before their
335   /// promotion for the current function.
336   InstrToOrigTy PromotedInsts;
337 
338   /// Keep track of instructions removed during promotion.
339   SetOfInstrs RemovedInsts;
340 
341   /// Keep track of sext chains based on their initial value.
342   DenseMap<Value *, Instruction *> SeenChainsForSExt;
343 
344   /// Keep track of GEPs accessing the same data structures such as structs or
345   /// arrays that are candidates to be split later because of their large
346   /// size.
347   MapVector<AssertingVH<Value>,
348             SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
349       LargeOffsetGEPMap;
350 
351   /// Keep track of new GEP base after splitting the GEPs having large offset.
352   SmallSet<AssertingVH<Value>, 2> NewGEPBases;
353 
354   /// Map serial numbers to Large offset GEPs.
355   DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
356 
357   /// Keep track of SExt promoted.
358   ValueToSExts ValToSExtendedUses;
359 
360   /// True if the function has the OptSize attribute.
361   bool OptSize;
362 
363   /// DataLayout for the Function being processed.
364   const DataLayout *DL = nullptr;
365 
366   /// Building the dominator tree can be expensive, so we only build it
367   /// lazily and update it when required.
368   std::unique_ptr<DominatorTree> DT;
369 
370 public:
371   CodeGenPrepare(){};
372   CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
373   /// If encounter huge function, we need to limit the build time.
374   bool IsHugeFunc = false;
375 
376   /// FreshBBs is like worklist, it collected the updated BBs which need
377   /// to be optimized again.
378   /// Note: Consider building time in this pass, when a BB updated, we need
379   /// to insert such BB into FreshBBs for huge function.
380   SmallSet<BasicBlock *, 32> FreshBBs;
381 
382   void releaseMemory() {
383     // Clear per function information.
384     InsertedInsts.clear();
385     PromotedInsts.clear();
386     FreshBBs.clear();
387     BPI.reset();
388     BFI.reset();
389   }
390 
391   bool run(Function &F, FunctionAnalysisManager &AM);
392 
393 private:
394   template <typename F>
395   void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
396     // Substituting can cause recursive simplifications, which can invalidate
397     // our iterator.  Use a WeakTrackingVH to hold onto it in case this
398     // happens.
399     Value *CurValue = &*CurInstIterator;
400     WeakTrackingVH IterHandle(CurValue);
401 
402     f();
403 
404     // If the iterator instruction was recursively deleted, start over at the
405     // start of the block.
406     if (IterHandle != CurValue) {
407       CurInstIterator = BB->begin();
408       SunkAddrs.clear();
409     }
410   }
411 
412   // Get the DominatorTree, building if necessary.
413   DominatorTree &getDT(Function &F) {
414     if (!DT)
415       DT = std::make_unique<DominatorTree>(F);
416     return *DT;
417   }
418 
419   void removeAllAssertingVHReferences(Value *V);
420   bool eliminateAssumptions(Function &F);
421   bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr);
422   bool eliminateMostlyEmptyBlocks(Function &F);
423   BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
424   bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
425   void eliminateMostlyEmptyBlock(BasicBlock *BB);
426   bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
427                                      bool isPreheader);
428   bool makeBitReverse(Instruction &I);
429   bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
430   bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
431   bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
432                           unsigned AddrSpace);
433   bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
434   bool optimizeInlineAsmInst(CallInst *CS);
435   bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
436   bool optimizeExt(Instruction *&I);
437   bool optimizeExtUses(Instruction *I);
438   bool optimizeLoadExt(LoadInst *Load);
439   bool optimizeShiftInst(BinaryOperator *BO);
440   bool optimizeFunnelShift(IntrinsicInst *Fsh);
441   bool optimizeSelectInst(SelectInst *SI);
442   bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
443   bool optimizeSwitchType(SwitchInst *SI);
444   bool optimizeSwitchPhiConstants(SwitchInst *SI);
445   bool optimizeSwitchInst(SwitchInst *SI);
446   bool optimizeExtractElementInst(Instruction *Inst);
447   bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
448   bool fixupDbgValue(Instruction *I);
449   bool fixupDbgVariableRecord(DbgVariableRecord &I);
450   bool fixupDbgVariableRecordsOnInst(Instruction &I);
451   bool placeDbgValues(Function &F);
452   bool placePseudoProbes(Function &F);
453   bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
454                     LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
455   bool tryToPromoteExts(TypePromotionTransaction &TPT,
456                         const SmallVectorImpl<Instruction *> &Exts,
457                         SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
458                         unsigned CreatedInstsCost = 0);
459   bool mergeSExts(Function &F);
460   bool splitLargeGEPOffsets();
461   bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
462                        SmallPtrSetImpl<Instruction *> &DeletedInstrs);
463   bool optimizePhiTypes(Function &F);
464   bool performAddressTypePromotion(
465       Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
466       bool HasPromoted, TypePromotionTransaction &TPT,
467       SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
468   bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
469   bool simplifyOffsetableRelocate(GCStatepointInst &I);
470 
471   bool tryToSinkFreeOperands(Instruction *I);
472   bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
473                                    CmpInst *Cmp, Intrinsic::ID IID);
474   bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
475   bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
476   bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
477   void verifyBFIUpdates(Function &F);
478   bool _run(Function &F);
479 };
480 
481 class CodeGenPrepareLegacyPass : public FunctionPass {
482 public:
483   static char ID; // Pass identification, replacement for typeid
484 
485   CodeGenPrepareLegacyPass() : FunctionPass(ID) {
486     initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
487   }
488 
489   bool runOnFunction(Function &F) override;
490 
491   StringRef getPassName() const override { return "CodeGen Prepare"; }
492 
493   void getAnalysisUsage(AnalysisUsage &AU) const override {
494     // FIXME: When we can selectively preserve passes, preserve the domtree.
495     AU.addRequired<ProfileSummaryInfoWrapperPass>();
496     AU.addRequired<TargetLibraryInfoWrapperPass>();
497     AU.addRequired<TargetPassConfig>();
498     AU.addRequired<TargetTransformInfoWrapperPass>();
499     AU.addRequired<LoopInfoWrapperPass>();
500     AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
501   }
502 };
503 
504 } // end anonymous namespace
505 
506 char CodeGenPrepareLegacyPass::ID = 0;
507 
508 bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) {
509   if (skipFunction(F))
510     return false;
511   auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
512   CodeGenPrepare CGP(TM);
513   CGP.DL = &F.getDataLayout();
514   CGP.SubtargetInfo = TM->getSubtargetImpl(F);
515   CGP.TLI = CGP.SubtargetInfo->getTargetLowering();
516   CGP.TRI = CGP.SubtargetInfo->getRegisterInfo();
517   CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
518   CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
519   CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
520   CGP.BPI.reset(new BranchProbabilityInfo(F, *CGP.LI));
521   CGP.BFI.reset(new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI));
522   CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
523   auto BBSPRWP =
524       getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
525   CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr;
526 
527   return CGP._run(F);
528 }
529 
530 INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE,
531                       "Optimize for code generation", false, false)
532 INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass)
533 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
534 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
535 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
536 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
537 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
538 INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE,
539                     "Optimize for code generation", false, false)
540 
541 FunctionPass *llvm::createCodeGenPrepareLegacyPass() {
542   return new CodeGenPrepareLegacyPass();
543 }
544 
545 PreservedAnalyses CodeGenPreparePass::run(Function &F,
546                                           FunctionAnalysisManager &AM) {
547   CodeGenPrepare CGP(TM);
548 
549   bool Changed = CGP.run(F, AM);
550   if (!Changed)
551     return PreservedAnalyses::all();
552 
553   PreservedAnalyses PA;
554   PA.preserve<TargetLibraryAnalysis>();
555   PA.preserve<TargetIRAnalysis>();
556   PA.preserve<LoopAnalysis>();
557   return PA;
558 }
559 
560 bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) {
561   DL = &F.getDataLayout();
562   SubtargetInfo = TM->getSubtargetImpl(F);
563   TLI = SubtargetInfo->getTargetLowering();
564   TRI = SubtargetInfo->getRegisterInfo();
565   TLInfo = &AM.getResult<TargetLibraryAnalysis>(F);
566   TTI = &AM.getResult<TargetIRAnalysis>(F);
567   LI = &AM.getResult<LoopAnalysis>(F);
568   BPI.reset(new BranchProbabilityInfo(F, *LI));
569   BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
570   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
571   PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
572   BBSectionsProfileReader =
573       AM.getCachedResult<BasicBlockSectionsProfileReaderAnalysis>(F);
574   return _run(F);
575 }
576 
577 bool CodeGenPrepare::_run(Function &F) {
578   bool EverMadeChange = false;
579 
580   OptSize = F.hasOptSize();
581   // Use the basic-block-sections profile to promote hot functions to .text.hot
582   // if requested.
583   if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
584       BBSectionsProfileReader->isFunctionHot(F.getName())) {
585     F.setSectionPrefix("hot");
586   } else if (ProfileGuidedSectionPrefix) {
587     // The hot attribute overwrites profile count based hotness while profile
588     // counts based hotness overwrite the cold attribute.
589     // This is a conservative behabvior.
590     if (F.hasFnAttribute(Attribute::Hot) ||
591         PSI->isFunctionHotInCallGraph(&F, *BFI))
592       F.setSectionPrefix("hot");
593     // If PSI shows this function is not hot, we will placed the function
594     // into unlikely section if (1) PSI shows this is a cold function, or
595     // (2) the function has a attribute of cold.
596     else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
597              F.hasFnAttribute(Attribute::Cold))
598       F.setSectionPrefix("unlikely");
599     else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
600              PSI->isFunctionHotnessUnknown(F))
601       F.setSectionPrefix("unknown");
602   }
603 
604   /// This optimization identifies DIV instructions that can be
605   /// profitably bypassed and carried out with a shorter, faster divide.
606   if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
607     const DenseMap<unsigned int, unsigned int> &BypassWidths =
608         TLI->getBypassSlowDivWidths();
609     BasicBlock *BB = &*F.begin();
610     while (BB != nullptr) {
611       // bypassSlowDivision may create new BBs, but we don't want to reapply the
612       // optimization to those blocks.
613       BasicBlock *Next = BB->getNextNode();
614       // F.hasOptSize is already checked in the outer if statement.
615       if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
616         EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
617       BB = Next;
618     }
619   }
620 
621   // Get rid of @llvm.assume builtins before attempting to eliminate empty
622   // blocks, since there might be blocks that only contain @llvm.assume calls
623   // (plus arguments that we can get rid of).
624   EverMadeChange |= eliminateAssumptions(F);
625 
626   // Eliminate blocks that contain only PHI nodes and an
627   // unconditional branch.
628   EverMadeChange |= eliminateMostlyEmptyBlocks(F);
629 
630   ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
631   if (!DisableBranchOpts)
632     EverMadeChange |= splitBranchCondition(F, ModifiedDT);
633 
634   // Split some critical edges where one of the sources is an indirect branch,
635   // to help generate sane code for PHIs involving such edges.
636   EverMadeChange |=
637       SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
638 
639   // If we are optimzing huge function, we need to consider the build time.
640   // Because the basic algorithm's complex is near O(N!).
641   IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
642 
643   // Transformations above may invalidate dominator tree and/or loop info.
644   DT.reset();
645   LI->releaseMemory();
646   LI->analyze(getDT(F));
647 
648   bool MadeChange = true;
649   bool FuncIterated = false;
650   while (MadeChange) {
651     MadeChange = false;
652 
653     for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
654       if (FuncIterated && !FreshBBs.contains(&BB))
655         continue;
656 
657       ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
658       bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
659 
660       if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
661         DT.reset();
662 
663       MadeChange |= Changed;
664       if (IsHugeFunc) {
665         // If the BB is updated, it may still has chance to be optimized.
666         // This usually happen at sink optimization.
667         // For example:
668         //
669         // bb0:
670         // %and = and i32 %a, 4
671         // %cmp = icmp eq i32 %and, 0
672         //
673         // If the %cmp sink to other BB, the %and will has chance to sink.
674         if (Changed)
675           FreshBBs.insert(&BB);
676         else if (FuncIterated)
677           FreshBBs.erase(&BB);
678       } else {
679         // For small/normal functions, we restart BB iteration if the dominator
680         // tree of the Function was changed.
681         if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
682           break;
683       }
684     }
685     // We have iterated all the BB in the (only work for huge) function.
686     FuncIterated = IsHugeFunc;
687 
688     if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
689       MadeChange |= mergeSExts(F);
690     if (!LargeOffsetGEPMap.empty())
691       MadeChange |= splitLargeGEPOffsets();
692     MadeChange |= optimizePhiTypes(F);
693 
694     if (MadeChange)
695       eliminateFallThrough(F, DT.get());
696 
697 #ifndef NDEBUG
698     if (MadeChange && VerifyLoopInfo)
699       LI->verify(getDT(F));
700 #endif
701 
702     // Really free removed instructions during promotion.
703     for (Instruction *I : RemovedInsts)
704       I->deleteValue();
705 
706     EverMadeChange |= MadeChange;
707     SeenChainsForSExt.clear();
708     ValToSExtendedUses.clear();
709     RemovedInsts.clear();
710     LargeOffsetGEPMap.clear();
711     LargeOffsetGEPID.clear();
712   }
713 
714   NewGEPBases.clear();
715   SunkAddrs.clear();
716 
717   if (!DisableBranchOpts) {
718     MadeChange = false;
719     // Use a set vector to get deterministic iteration order. The order the
720     // blocks are removed may affect whether or not PHI nodes in successors
721     // are removed.
722     SmallSetVector<BasicBlock *, 8> WorkList;
723     for (BasicBlock &BB : F) {
724       SmallVector<BasicBlock *, 2> Successors(successors(&BB));
725       MadeChange |= ConstantFoldTerminator(&BB, true);
726       if (!MadeChange)
727         continue;
728 
729       for (BasicBlock *Succ : Successors)
730         if (pred_empty(Succ))
731           WorkList.insert(Succ);
732     }
733 
734     // Delete the dead blocks and any of their dead successors.
735     MadeChange |= !WorkList.empty();
736     while (!WorkList.empty()) {
737       BasicBlock *BB = WorkList.pop_back_val();
738       SmallVector<BasicBlock *, 2> Successors(successors(BB));
739 
740       DeleteDeadBlock(BB);
741 
742       for (BasicBlock *Succ : Successors)
743         if (pred_empty(Succ))
744           WorkList.insert(Succ);
745     }
746 
747     // Merge pairs of basic blocks with unconditional branches, connected by
748     // a single edge.
749     if (EverMadeChange || MadeChange)
750       MadeChange |= eliminateFallThrough(F);
751 
752     EverMadeChange |= MadeChange;
753   }
754 
755   if (!DisableGCOpts) {
756     SmallVector<GCStatepointInst *, 2> Statepoints;
757     for (BasicBlock &BB : F)
758       for (Instruction &I : BB)
759         if (auto *SP = dyn_cast<GCStatepointInst>(&I))
760           Statepoints.push_back(SP);
761     for (auto &I : Statepoints)
762       EverMadeChange |= simplifyOffsetableRelocate(*I);
763   }
764 
765   // Do this last to clean up use-before-def scenarios introduced by other
766   // preparatory transforms.
767   EverMadeChange |= placeDbgValues(F);
768   EverMadeChange |= placePseudoProbes(F);
769 
770 #ifndef NDEBUG
771   if (VerifyBFIUpdates)
772     verifyBFIUpdates(F);
773 #endif
774 
775   return EverMadeChange;
776 }
777 
778 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
779   bool MadeChange = false;
780   for (BasicBlock &BB : F) {
781     CurInstIterator = BB.begin();
782     while (CurInstIterator != BB.end()) {
783       Instruction *I = &*(CurInstIterator++);
784       if (auto *Assume = dyn_cast<AssumeInst>(I)) {
785         MadeChange = true;
786         Value *Operand = Assume->getOperand(0);
787         Assume->eraseFromParent();
788 
789         resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
790           RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
791         });
792       }
793     }
794   }
795   return MadeChange;
796 }
797 
798 /// An instruction is about to be deleted, so remove all references to it in our
799 /// GEP-tracking data strcutures.
800 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
801   LargeOffsetGEPMap.erase(V);
802   NewGEPBases.erase(V);
803 
804   auto GEP = dyn_cast<GetElementPtrInst>(V);
805   if (!GEP)
806     return;
807 
808   LargeOffsetGEPID.erase(GEP);
809 
810   auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
811   if (VecI == LargeOffsetGEPMap.end())
812     return;
813 
814   auto &GEPVector = VecI->second;
815   llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
816 
817   if (GEPVector.empty())
818     LargeOffsetGEPMap.erase(VecI);
819 }
820 
821 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
822 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
823   DominatorTree NewDT(F);
824   LoopInfo NewLI(NewDT);
825   BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
826   BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
827   NewBFI.verifyMatch(*BFI);
828 }
829 
830 /// Merge basic blocks which are connected by a single edge, where one of the
831 /// basic blocks has a single successor pointing to the other basic block,
832 /// which has a single predecessor.
833 bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) {
834   bool Changed = false;
835   // Scan all of the blocks in the function, except for the entry block.
836   // Use a temporary array to avoid iterator being invalidated when
837   // deleting blocks.
838   SmallVector<WeakTrackingVH, 16> Blocks;
839   for (auto &Block : llvm::drop_begin(F))
840     Blocks.push_back(&Block);
841 
842   SmallSet<WeakTrackingVH, 16> Preds;
843   for (auto &Block : Blocks) {
844     auto *BB = cast_or_null<BasicBlock>(Block);
845     if (!BB)
846       continue;
847     // If the destination block has a single pred, then this is a trivial
848     // edge, just collapse it.
849     BasicBlock *SinglePred = BB->getSinglePredecessor();
850 
851     // Don't merge if BB's address is taken.
852     if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
853       continue;
854 
855     // Make an effort to skip unreachable blocks.
856     if (DT && !DT->isReachableFromEntry(BB))
857       continue;
858 
859     BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
860     if (Term && !Term->isConditional()) {
861       Changed = true;
862       LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
863 
864       // Merge BB into SinglePred and delete it.
865       MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr,
866                                 /* MemDep */ nullptr,
867                                 /* PredecessorWithTwoSuccessors */ false, DT);
868       Preds.insert(SinglePred);
869 
870       if (IsHugeFunc) {
871         // Update FreshBBs to optimize the merged BB.
872         FreshBBs.insert(SinglePred);
873         FreshBBs.erase(BB);
874       }
875     }
876   }
877 
878   // (Repeatedly) merging blocks into their predecessors can create redundant
879   // debug intrinsics.
880   for (const auto &Pred : Preds)
881     if (auto *BB = cast_or_null<BasicBlock>(Pred))
882       RemoveRedundantDbgInstrs(BB);
883 
884   return Changed;
885 }
886 
887 /// Find a destination block from BB if BB is mergeable empty block.
888 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
889   // If this block doesn't end with an uncond branch, ignore it.
890   BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
891   if (!BI || !BI->isUnconditional())
892     return nullptr;
893 
894   // If the instruction before the branch (skipping debug info) isn't a phi
895   // node, then other stuff is happening here.
896   BasicBlock::iterator BBI = BI->getIterator();
897   if (BBI != BB->begin()) {
898     --BBI;
899     while (isa<DbgInfoIntrinsic>(BBI)) {
900       if (BBI == BB->begin())
901         break;
902       --BBI;
903     }
904     if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
905       return nullptr;
906   }
907 
908   // Do not break infinite loops.
909   BasicBlock *DestBB = BI->getSuccessor(0);
910   if (DestBB == BB)
911     return nullptr;
912 
913   if (!canMergeBlocks(BB, DestBB))
914     DestBB = nullptr;
915 
916   return DestBB;
917 }
918 
919 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
920 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
921 /// edges in ways that are non-optimal for isel. Start by eliminating these
922 /// blocks so we can split them the way we want them.
923 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
924   SmallPtrSet<BasicBlock *, 16> Preheaders;
925   SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
926   while (!LoopList.empty()) {
927     Loop *L = LoopList.pop_back_val();
928     llvm::append_range(LoopList, *L);
929     if (BasicBlock *Preheader = L->getLoopPreheader())
930       Preheaders.insert(Preheader);
931   }
932 
933   bool MadeChange = false;
934   // Copy blocks into a temporary array to avoid iterator invalidation issues
935   // as we remove them.
936   // Note that this intentionally skips the entry block.
937   SmallVector<WeakTrackingVH, 16> Blocks;
938   for (auto &Block : llvm::drop_begin(F)) {
939     // Delete phi nodes that could block deleting other empty blocks.
940     if (!DisableDeletePHIs)
941       MadeChange |= DeleteDeadPHIs(&Block, TLInfo);
942     Blocks.push_back(&Block);
943   }
944 
945   for (auto &Block : Blocks) {
946     BasicBlock *BB = cast_or_null<BasicBlock>(Block);
947     if (!BB)
948       continue;
949     BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
950     if (!DestBB ||
951         !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
952       continue;
953 
954     eliminateMostlyEmptyBlock(BB);
955     MadeChange = true;
956   }
957   return MadeChange;
958 }
959 
960 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
961                                                    BasicBlock *DestBB,
962                                                    bool isPreheader) {
963   // Do not delete loop preheaders if doing so would create a critical edge.
964   // Loop preheaders can be good locations to spill registers. If the
965   // preheader is deleted and we create a critical edge, registers may be
966   // spilled in the loop body instead.
967   if (!DisablePreheaderProtect && isPreheader &&
968       !(BB->getSinglePredecessor() &&
969         BB->getSinglePredecessor()->getSingleSuccessor()))
970     return false;
971 
972   // Skip merging if the block's successor is also a successor to any callbr
973   // that leads to this block.
974   // FIXME: Is this really needed? Is this a correctness issue?
975   for (BasicBlock *Pred : predecessors(BB)) {
976     if (isa<CallBrInst>(Pred->getTerminator()) &&
977         llvm::is_contained(successors(Pred), DestBB))
978       return false;
979   }
980 
981   // Try to skip merging if the unique predecessor of BB is terminated by a
982   // switch or indirect branch instruction, and BB is used as an incoming block
983   // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
984   // add COPY instructions in the predecessor of BB instead of BB (if it is not
985   // merged). Note that the critical edge created by merging such blocks wont be
986   // split in MachineSink because the jump table is not analyzable. By keeping
987   // such empty block (BB), ISel will place COPY instructions in BB, not in the
988   // predecessor of BB.
989   BasicBlock *Pred = BB->getUniquePredecessor();
990   if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
991                  isa<IndirectBrInst>(Pred->getTerminator())))
992     return true;
993 
994   if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
995     return true;
996 
997   // We use a simple cost heuristic which determine skipping merging is
998   // profitable if the cost of skipping merging is less than the cost of
999   // merging : Cost(skipping merging) < Cost(merging BB), where the
1000   // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1001   // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1002   // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1003   //   Freq(Pred) / Freq(BB) > 2.
1004   // Note that if there are multiple empty blocks sharing the same incoming
1005   // value for the PHIs in the DestBB, we consider them together. In such
1006   // case, Cost(merging BB) will be the sum of their frequencies.
1007 
1008   if (!isa<PHINode>(DestBB->begin()))
1009     return true;
1010 
1011   SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
1012 
1013   // Find all other incoming blocks from which incoming values of all PHIs in
1014   // DestBB are the same as the ones from BB.
1015   for (BasicBlock *DestBBPred : predecessors(DestBB)) {
1016     if (DestBBPred == BB)
1017       continue;
1018 
1019     if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
1020           return DestPN.getIncomingValueForBlock(BB) ==
1021                  DestPN.getIncomingValueForBlock(DestBBPred);
1022         }))
1023       SameIncomingValueBBs.insert(DestBBPred);
1024   }
1025 
1026   // See if all BB's incoming values are same as the value from Pred. In this
1027   // case, no reason to skip merging because COPYs are expected to be place in
1028   // Pred already.
1029   if (SameIncomingValueBBs.count(Pred))
1030     return true;
1031 
1032   BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
1033   BlockFrequency BBFreq = BFI->getBlockFreq(BB);
1034 
1035   for (auto *SameValueBB : SameIncomingValueBBs)
1036     if (SameValueBB->getUniquePredecessor() == Pred &&
1037         DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
1038       BBFreq += BFI->getBlockFreq(SameValueBB);
1039 
1040   std::optional<BlockFrequency> Limit = BBFreq.mul(FreqRatioToSkipMerge);
1041   return !Limit || PredFreq <= *Limit;
1042 }
1043 
1044 /// Return true if we can merge BB into DestBB if there is a single
1045 /// unconditional branch between them, and BB contains no other non-phi
1046 /// instructions.
1047 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1048                                     const BasicBlock *DestBB) const {
1049   // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1050   // the successor.  If there are more complex condition (e.g. preheaders),
1051   // don't mess around with them.
1052   for (const PHINode &PN : BB->phis()) {
1053     for (const User *U : PN.users()) {
1054       const Instruction *UI = cast<Instruction>(U);
1055       if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1056         return false;
1057       // If User is inside DestBB block and it is a PHINode then check
1058       // incoming value. If incoming value is not from BB then this is
1059       // a complex condition (e.g. preheaders) we want to avoid here.
1060       if (UI->getParent() == DestBB) {
1061         if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1062           for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1063             Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1064             if (Insn && Insn->getParent() == BB &&
1065                 Insn->getParent() != UPN->getIncomingBlock(I))
1066               return false;
1067           }
1068       }
1069     }
1070   }
1071 
1072   // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1073   // and DestBB may have conflicting incoming values for the block.  If so, we
1074   // can't merge the block.
1075   const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1076   if (!DestBBPN)
1077     return true; // no conflict.
1078 
1079   // Collect the preds of BB.
1080   SmallPtrSet<const BasicBlock *, 16> BBPreds;
1081   if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1082     // It is faster to get preds from a PHI than with pred_iterator.
1083     for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1084       BBPreds.insert(BBPN->getIncomingBlock(i));
1085   } else {
1086     BBPreds.insert(pred_begin(BB), pred_end(BB));
1087   }
1088 
1089   // Walk the preds of DestBB.
1090   for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1091     BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1092     if (BBPreds.count(Pred)) { // Common predecessor?
1093       for (const PHINode &PN : DestBB->phis()) {
1094         const Value *V1 = PN.getIncomingValueForBlock(Pred);
1095         const Value *V2 = PN.getIncomingValueForBlock(BB);
1096 
1097         // If V2 is a phi node in BB, look up what the mapped value will be.
1098         if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1099           if (V2PN->getParent() == BB)
1100             V2 = V2PN->getIncomingValueForBlock(Pred);
1101 
1102         // If there is a conflict, bail out.
1103         if (V1 != V2)
1104           return false;
1105       }
1106     }
1107   }
1108 
1109   return true;
1110 }
1111 
1112 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1113 static void replaceAllUsesWith(Value *Old, Value *New,
1114                                SmallSet<BasicBlock *, 32> &FreshBBs,
1115                                bool IsHuge) {
1116   auto *OldI = dyn_cast<Instruction>(Old);
1117   if (OldI) {
1118     for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1119          UI != E; ++UI) {
1120       Instruction *User = cast<Instruction>(*UI);
1121       if (IsHuge)
1122         FreshBBs.insert(User->getParent());
1123     }
1124   }
1125   Old->replaceAllUsesWith(New);
1126 }
1127 
1128 /// Eliminate a basic block that has only phi's and an unconditional branch in
1129 /// it.
1130 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1131   BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1132   BasicBlock *DestBB = BI->getSuccessor(0);
1133 
1134   LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1135                     << *BB << *DestBB);
1136 
1137   // If the destination block has a single pred, then this is a trivial edge,
1138   // just collapse it.
1139   if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1140     if (SinglePred != DestBB) {
1141       assert(SinglePred == BB &&
1142              "Single predecessor not the same as predecessor");
1143       // Merge DestBB into SinglePred/BB and delete it.
1144       MergeBlockIntoPredecessor(DestBB);
1145       // Note: BB(=SinglePred) will not be deleted on this path.
1146       // DestBB(=its single successor) is the one that was deleted.
1147       LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1148 
1149       if (IsHugeFunc) {
1150         // Update FreshBBs to optimize the merged BB.
1151         FreshBBs.insert(SinglePred);
1152         FreshBBs.erase(DestBB);
1153       }
1154       return;
1155     }
1156   }
1157 
1158   // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
1159   // to handle the new incoming edges it is about to have.
1160   for (PHINode &PN : DestBB->phis()) {
1161     // Remove the incoming value for BB, and remember it.
1162     Value *InVal = PN.removeIncomingValue(BB, false);
1163 
1164     // Two options: either the InVal is a phi node defined in BB or it is some
1165     // value that dominates BB.
1166     PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1167     if (InValPhi && InValPhi->getParent() == BB) {
1168       // Add all of the input values of the input PHI as inputs of this phi.
1169       for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1170         PN.addIncoming(InValPhi->getIncomingValue(i),
1171                        InValPhi->getIncomingBlock(i));
1172     } else {
1173       // Otherwise, add one instance of the dominating value for each edge that
1174       // we will be adding.
1175       if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1176         for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1177           PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1178       } else {
1179         for (BasicBlock *Pred : predecessors(BB))
1180           PN.addIncoming(InVal, Pred);
1181       }
1182     }
1183   }
1184 
1185   // The PHIs are now updated, change everything that refers to BB to use
1186   // DestBB and remove BB.
1187   BB->replaceAllUsesWith(DestBB);
1188   BB->eraseFromParent();
1189   ++NumBlocksElim;
1190 
1191   LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1192 }
1193 
1194 // Computes a map of base pointer relocation instructions to corresponding
1195 // derived pointer relocation instructions given a vector of all relocate calls
1196 static void computeBaseDerivedRelocateMap(
1197     const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1198     MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>>
1199         &RelocateInstMap) {
1200   // Collect information in two maps: one primarily for locating the base object
1201   // while filling the second map; the second map is the final structure holding
1202   // a mapping between Base and corresponding Derived relocate calls
1203   MapVector<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1204   for (auto *ThisRelocate : AllRelocateCalls) {
1205     auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1206                             ThisRelocate->getDerivedPtrIndex());
1207     RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1208   }
1209   for (auto &Item : RelocateIdxMap) {
1210     std::pair<unsigned, unsigned> Key = Item.first;
1211     if (Key.first == Key.second)
1212       // Base relocation: nothing to insert
1213       continue;
1214 
1215     GCRelocateInst *I = Item.second;
1216     auto BaseKey = std::make_pair(Key.first, Key.first);
1217 
1218     // We're iterating over RelocateIdxMap so we cannot modify it.
1219     auto MaybeBase = RelocateIdxMap.find(BaseKey);
1220     if (MaybeBase == RelocateIdxMap.end())
1221       // TODO: We might want to insert a new base object relocate and gep off
1222       // that, if there are enough derived object relocates.
1223       continue;
1224 
1225     RelocateInstMap[MaybeBase->second].push_back(I);
1226   }
1227 }
1228 
1229 // Accepts a GEP and extracts the operands into a vector provided they're all
1230 // small integer constants
1231 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1232                                           SmallVectorImpl<Value *> &OffsetV) {
1233   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1234     // Only accept small constant integer operands
1235     auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1236     if (!Op || Op->getZExtValue() > 20)
1237       return false;
1238   }
1239 
1240   for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1241     OffsetV.push_back(GEP->getOperand(i));
1242   return true;
1243 }
1244 
1245 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1246 // replace, computes a replacement, and affects it.
1247 static bool
1248 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1249                           const SmallVectorImpl<GCRelocateInst *> &Targets) {
1250   bool MadeChange = false;
1251   // We must ensure the relocation of derived pointer is defined after
1252   // relocation of base pointer. If we find a relocation corresponding to base
1253   // defined earlier than relocation of base then we move relocation of base
1254   // right before found relocation. We consider only relocation in the same
1255   // basic block as relocation of base. Relocations from other basic block will
1256   // be skipped by optimization and we do not care about them.
1257   for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1258        &*R != RelocatedBase; ++R)
1259     if (auto *RI = dyn_cast<GCRelocateInst>(R))
1260       if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1261         if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1262           RelocatedBase->moveBefore(RI);
1263           MadeChange = true;
1264           break;
1265         }
1266 
1267   for (GCRelocateInst *ToReplace : Targets) {
1268     assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1269            "Not relocating a derived object of the original base object");
1270     if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1271       // A duplicate relocate call. TODO: coalesce duplicates.
1272       continue;
1273     }
1274 
1275     if (RelocatedBase->getParent() != ToReplace->getParent()) {
1276       // Base and derived relocates are in different basic blocks.
1277       // In this case transform is only valid when base dominates derived
1278       // relocate. However it would be too expensive to check dominance
1279       // for each such relocate, so we skip the whole transformation.
1280       continue;
1281     }
1282 
1283     Value *Base = ToReplace->getBasePtr();
1284     auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1285     if (!Derived || Derived->getPointerOperand() != Base)
1286       continue;
1287 
1288     SmallVector<Value *, 2> OffsetV;
1289     if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1290       continue;
1291 
1292     // Create a Builder and replace the target callsite with a gep
1293     assert(RelocatedBase->getNextNode() &&
1294            "Should always have one since it's not a terminator");
1295 
1296     // Insert after RelocatedBase
1297     IRBuilder<> Builder(RelocatedBase->getNextNode());
1298     Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1299 
1300     // If gc_relocate does not match the actual type, cast it to the right type.
1301     // In theory, there must be a bitcast after gc_relocate if the type does not
1302     // match, and we should reuse it to get the derived pointer. But it could be
1303     // cases like this:
1304     // bb1:
1305     //  ...
1306     //  %g1 = call coldcc i8 addrspace(1)*
1307     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1308     //
1309     // bb2:
1310     //  ...
1311     //  %g2 = call coldcc i8 addrspace(1)*
1312     //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1313     //
1314     // merge:
1315     //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1316     //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1317     //
1318     // In this case, we can not find the bitcast any more. So we insert a new
1319     // bitcast no matter there is already one or not. In this way, we can handle
1320     // all cases, and the extra bitcast should be optimized away in later
1321     // passes.
1322     Value *ActualRelocatedBase = RelocatedBase;
1323     if (RelocatedBase->getType() != Base->getType()) {
1324       ActualRelocatedBase =
1325           Builder.CreateBitCast(RelocatedBase, Base->getType());
1326     }
1327     Value *Replacement =
1328         Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1329                           ArrayRef(OffsetV));
1330     Replacement->takeName(ToReplace);
1331     // If the newly generated derived pointer's type does not match the original
1332     // derived pointer's type, cast the new derived pointer to match it. Same
1333     // reasoning as above.
1334     Value *ActualReplacement = Replacement;
1335     if (Replacement->getType() != ToReplace->getType()) {
1336       ActualReplacement =
1337           Builder.CreateBitCast(Replacement, ToReplace->getType());
1338     }
1339     ToReplace->replaceAllUsesWith(ActualReplacement);
1340     ToReplace->eraseFromParent();
1341 
1342     MadeChange = true;
1343   }
1344   return MadeChange;
1345 }
1346 
1347 // Turns this:
1348 //
1349 // %base = ...
1350 // %ptr = gep %base + 15
1351 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1352 // %base' = relocate(%tok, i32 4, i32 4)
1353 // %ptr' = relocate(%tok, i32 4, i32 5)
1354 // %val = load %ptr'
1355 //
1356 // into this:
1357 //
1358 // %base = ...
1359 // %ptr = gep %base + 15
1360 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1361 // %base' = gc.relocate(%tok, i32 4, i32 4)
1362 // %ptr' = gep %base' + 15
1363 // %val = load %ptr'
1364 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1365   bool MadeChange = false;
1366   SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1367   for (auto *U : I.users())
1368     if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1369       // Collect all the relocate calls associated with a statepoint
1370       AllRelocateCalls.push_back(Relocate);
1371 
1372   // We need at least one base pointer relocation + one derived pointer
1373   // relocation to mangle
1374   if (AllRelocateCalls.size() < 2)
1375     return false;
1376 
1377   // RelocateInstMap is a mapping from the base relocate instruction to the
1378   // corresponding derived relocate instructions
1379   MapVector<GCRelocateInst *, SmallVector<GCRelocateInst *, 0>> RelocateInstMap;
1380   computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1381   if (RelocateInstMap.empty())
1382     return false;
1383 
1384   for (auto &Item : RelocateInstMap)
1385     // Item.first is the RelocatedBase to offset against
1386     // Item.second is the vector of Targets to replace
1387     MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1388   return MadeChange;
1389 }
1390 
1391 /// Sink the specified cast instruction into its user blocks.
1392 static bool SinkCast(CastInst *CI) {
1393   BasicBlock *DefBB = CI->getParent();
1394 
1395   /// InsertedCasts - Only insert a cast in each block once.
1396   DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1397 
1398   bool MadeChange = false;
1399   for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1400        UI != E;) {
1401     Use &TheUse = UI.getUse();
1402     Instruction *User = cast<Instruction>(*UI);
1403 
1404     // Figure out which BB this cast is used in.  For PHI's this is the
1405     // appropriate predecessor block.
1406     BasicBlock *UserBB = User->getParent();
1407     if (PHINode *PN = dyn_cast<PHINode>(User)) {
1408       UserBB = PN->getIncomingBlock(TheUse);
1409     }
1410 
1411     // Preincrement use iterator so we don't invalidate it.
1412     ++UI;
1413 
1414     // The first insertion point of a block containing an EH pad is after the
1415     // pad.  If the pad is the user, we cannot sink the cast past the pad.
1416     if (User->isEHPad())
1417       continue;
1418 
1419     // If the block selected to receive the cast is an EH pad that does not
1420     // allow non-PHI instructions before the terminator, we can't sink the
1421     // cast.
1422     if (UserBB->getTerminator()->isEHPad())
1423       continue;
1424 
1425     // If this user is in the same block as the cast, don't change the cast.
1426     if (UserBB == DefBB)
1427       continue;
1428 
1429     // If we have already inserted a cast into this block, use it.
1430     CastInst *&InsertedCast = InsertedCasts[UserBB];
1431 
1432     if (!InsertedCast) {
1433       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1434       assert(InsertPt != UserBB->end());
1435       InsertedCast = cast<CastInst>(CI->clone());
1436       InsertedCast->insertBefore(*UserBB, InsertPt);
1437     }
1438 
1439     // Replace a use of the cast with a use of the new cast.
1440     TheUse = InsertedCast;
1441     MadeChange = true;
1442     ++NumCastUses;
1443   }
1444 
1445   // If we removed all uses, nuke the cast.
1446   if (CI->use_empty()) {
1447     salvageDebugInfo(*CI);
1448     CI->eraseFromParent();
1449     MadeChange = true;
1450   }
1451 
1452   return MadeChange;
1453 }
1454 
1455 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1456 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1457 /// reduce the number of virtual registers that must be created and coalesced.
1458 ///
1459 /// Return true if any changes are made.
1460 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1461                                        const DataLayout &DL) {
1462   // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1463   // than sinking only nop casts, but is helpful on some platforms.
1464   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1465     if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1466                                  ASC->getDestAddressSpace()))
1467       return false;
1468   }
1469 
1470   // If this is a noop copy,
1471   EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1472   EVT DstVT = TLI.getValueType(DL, CI->getType());
1473 
1474   // This is an fp<->int conversion?
1475   if (SrcVT.isInteger() != DstVT.isInteger())
1476     return false;
1477 
1478   // If this is an extension, it will be a zero or sign extension, which
1479   // isn't a noop.
1480   if (SrcVT.bitsLT(DstVT))
1481     return false;
1482 
1483   // If these values will be promoted, find out what they will be promoted
1484   // to.  This helps us consider truncates on PPC as noop copies when they
1485   // are.
1486   if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1487       TargetLowering::TypePromoteInteger)
1488     SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1489   if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1490       TargetLowering::TypePromoteInteger)
1491     DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1492 
1493   // If, after promotion, these are the same types, this is a noop copy.
1494   if (SrcVT != DstVT)
1495     return false;
1496 
1497   return SinkCast(CI);
1498 }
1499 
1500 // Match a simple increment by constant operation.  Note that if a sub is
1501 // matched, the step is negated (as if the step had been canonicalized to
1502 // an add, even though we leave the instruction alone.)
1503 static bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1504                            Constant *&Step) {
1505   if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1506       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1507                        m_Instruction(LHS), m_Constant(Step)))))
1508     return true;
1509   if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1510       match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1511                        m_Instruction(LHS), m_Constant(Step))))) {
1512     Step = ConstantExpr::getNeg(Step);
1513     return true;
1514   }
1515   return false;
1516 }
1517 
1518 /// If given \p PN is an inductive variable with value IVInc coming from the
1519 /// backedge, and on each iteration it gets increased by Step, return pair
1520 /// <IVInc, Step>. Otherwise, return std::nullopt.
1521 static std::optional<std::pair<Instruction *, Constant *>>
1522 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1523   const Loop *L = LI->getLoopFor(PN->getParent());
1524   if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1525     return std::nullopt;
1526   auto *IVInc =
1527       dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1528   if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1529     return std::nullopt;
1530   Instruction *LHS = nullptr;
1531   Constant *Step = nullptr;
1532   if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1533     return std::make_pair(IVInc, Step);
1534   return std::nullopt;
1535 }
1536 
1537 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1538   auto *I = dyn_cast<Instruction>(V);
1539   if (!I)
1540     return false;
1541   Instruction *LHS = nullptr;
1542   Constant *Step = nullptr;
1543   if (!matchIncrement(I, LHS, Step))
1544     return false;
1545   if (auto *PN = dyn_cast<PHINode>(LHS))
1546     if (auto IVInc = getIVIncrement(PN, LI))
1547       return IVInc->first == I;
1548   return false;
1549 }
1550 
1551 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1552                                                  Value *Arg0, Value *Arg1,
1553                                                  CmpInst *Cmp,
1554                                                  Intrinsic::ID IID) {
1555   auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1556     if (!isIVIncrement(BO, LI))
1557       return false;
1558     const Loop *L = LI->getLoopFor(BO->getParent());
1559     assert(L && "L should not be null after isIVIncrement()");
1560     // Do not risk on moving increment into a child loop.
1561     if (LI->getLoopFor(Cmp->getParent()) != L)
1562       return false;
1563 
1564     // Finally, we need to ensure that the insert point will dominate all
1565     // existing uses of the increment.
1566 
1567     auto &DT = getDT(*BO->getParent()->getParent());
1568     if (DT.dominates(Cmp->getParent(), BO->getParent()))
1569       // If we're moving up the dom tree, all uses are trivially dominated.
1570       // (This is the common case for code produced by LSR.)
1571       return true;
1572 
1573     // Otherwise, special case the single use in the phi recurrence.
1574     return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1575   };
1576   if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1577     // We used to use a dominator tree here to allow multi-block optimization.
1578     // But that was problematic because:
1579     // 1. It could cause a perf regression by hoisting the math op into the
1580     //    critical path.
1581     // 2. It could cause a perf regression by creating a value that was live
1582     //    across multiple blocks and increasing register pressure.
1583     // 3. Use of a dominator tree could cause large compile-time regression.
1584     //    This is because we recompute the DT on every change in the main CGP
1585     //    run-loop. The recomputing is probably unnecessary in many cases, so if
1586     //    that was fixed, using a DT here would be ok.
1587     //
1588     // There is one important particular case we still want to handle: if BO is
1589     // the IV increment. Important properties that make it profitable:
1590     // - We can speculate IV increment anywhere in the loop (as long as the
1591     //   indvar Phi is its only user);
1592     // - Upon computing Cmp, we effectively compute something equivalent to the
1593     //   IV increment (despite it loops differently in the IR). So moving it up
1594     //   to the cmp point does not really increase register pressure.
1595     return false;
1596   }
1597 
1598   // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1599   if (BO->getOpcode() == Instruction::Add &&
1600       IID == Intrinsic::usub_with_overflow) {
1601     assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1602     Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1603   }
1604 
1605   // Insert at the first instruction of the pair.
1606   Instruction *InsertPt = nullptr;
1607   for (Instruction &Iter : *Cmp->getParent()) {
1608     // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1609     // the overflow intrinsic are defined.
1610     if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1611       InsertPt = &Iter;
1612       break;
1613     }
1614   }
1615   assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1616 
1617   IRBuilder<> Builder(InsertPt);
1618   Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1619   if (BO->getOpcode() != Instruction::Xor) {
1620     Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1621     replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1622   } else
1623     assert(BO->hasOneUse() &&
1624            "Patterns with XOr should use the BO only in the compare");
1625   Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1626   replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1627   Cmp->eraseFromParent();
1628   BO->eraseFromParent();
1629   return true;
1630 }
1631 
1632 /// Match special-case patterns that check for unsigned add overflow.
1633 static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1634                                                    BinaryOperator *&Add) {
1635   // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1636   // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1637   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1638 
1639   // We are not expecting non-canonical/degenerate code. Just bail out.
1640   if (isa<Constant>(A))
1641     return false;
1642 
1643   ICmpInst::Predicate Pred = Cmp->getPredicate();
1644   if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1645     B = ConstantInt::get(B->getType(), 1);
1646   else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1647     B = Constant::getAllOnesValue(B->getType());
1648   else
1649     return false;
1650 
1651   // Check the users of the variable operand of the compare looking for an add
1652   // with the adjusted constant.
1653   for (User *U : A->users()) {
1654     if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1655       Add = cast<BinaryOperator>(U);
1656       return true;
1657     }
1658   }
1659   return false;
1660 }
1661 
1662 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1663 /// intrinsic. Return true if any changes were made.
1664 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1665                                                ModifyDT &ModifiedDT) {
1666   bool EdgeCase = false;
1667   Value *A, *B;
1668   BinaryOperator *Add;
1669   if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1670     if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1671       return false;
1672     // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1673     A = Add->getOperand(0);
1674     B = Add->getOperand(1);
1675     EdgeCase = true;
1676   }
1677 
1678   if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1679                                  TLI->getValueType(*DL, Add->getType()),
1680                                  Add->hasNUsesOrMore(EdgeCase ? 1 : 2)))
1681     return false;
1682 
1683   // We don't want to move around uses of condition values this late, so we
1684   // check if it is legal to create the call to the intrinsic in the basic
1685   // block containing the icmp.
1686   if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1687     return false;
1688 
1689   if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1690                                    Intrinsic::uadd_with_overflow))
1691     return false;
1692 
1693   // Reset callers - do not crash by iterating over a dead instruction.
1694   ModifiedDT = ModifyDT::ModifyInstDT;
1695   return true;
1696 }
1697 
1698 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1699                                                ModifyDT &ModifiedDT) {
1700   // We are not expecting non-canonical/degenerate code. Just bail out.
1701   Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1702   if (isa<Constant>(A) && isa<Constant>(B))
1703     return false;
1704 
1705   // Convert (A u> B) to (A u< B) to simplify pattern matching.
1706   ICmpInst::Predicate Pred = Cmp->getPredicate();
1707   if (Pred == ICmpInst::ICMP_UGT) {
1708     std::swap(A, B);
1709     Pred = ICmpInst::ICMP_ULT;
1710   }
1711   // Convert special-case: (A == 0) is the same as (A u< 1).
1712   if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1713     B = ConstantInt::get(B->getType(), 1);
1714     Pred = ICmpInst::ICMP_ULT;
1715   }
1716   // Convert special-case: (A != 0) is the same as (0 u< A).
1717   if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1718     std::swap(A, B);
1719     Pred = ICmpInst::ICMP_ULT;
1720   }
1721   if (Pred != ICmpInst::ICMP_ULT)
1722     return false;
1723 
1724   // Walk the users of a variable operand of a compare looking for a subtract or
1725   // add with that same operand. Also match the 2nd operand of the compare to
1726   // the add/sub, but that may be a negated constant operand of an add.
1727   Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1728   BinaryOperator *Sub = nullptr;
1729   for (User *U : CmpVariableOperand->users()) {
1730     // A - B, A u< B --> usubo(A, B)
1731     if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1732       Sub = cast<BinaryOperator>(U);
1733       break;
1734     }
1735 
1736     // A + (-C), A u< C (canonicalized form of (sub A, C))
1737     const APInt *CmpC, *AddC;
1738     if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1739         match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1740       Sub = cast<BinaryOperator>(U);
1741       break;
1742     }
1743   }
1744   if (!Sub)
1745     return false;
1746 
1747   if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1748                                  TLI->getValueType(*DL, Sub->getType()),
1749                                  Sub->hasNUsesOrMore(1)))
1750     return false;
1751 
1752   if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1753                                    Cmp, Intrinsic::usub_with_overflow))
1754     return false;
1755 
1756   // Reset callers - do not crash by iterating over a dead instruction.
1757   ModifiedDT = ModifyDT::ModifyInstDT;
1758   return true;
1759 }
1760 
1761 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1762 /// registers that must be created and coalesced. This is a clear win except on
1763 /// targets with multiple condition code registers (PowerPC), where it might
1764 /// lose; some adjustment may be wanted there.
1765 ///
1766 /// Return true if any changes are made.
1767 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1768   if (TLI.hasMultipleConditionRegisters())
1769     return false;
1770 
1771   // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1772   if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1773     return false;
1774 
1775   // Only insert a cmp in each block once.
1776   DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1777 
1778   bool MadeChange = false;
1779   for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1780        UI != E;) {
1781     Use &TheUse = UI.getUse();
1782     Instruction *User = cast<Instruction>(*UI);
1783 
1784     // Preincrement use iterator so we don't invalidate it.
1785     ++UI;
1786 
1787     // Don't bother for PHI nodes.
1788     if (isa<PHINode>(User))
1789       continue;
1790 
1791     // Figure out which BB this cmp is used in.
1792     BasicBlock *UserBB = User->getParent();
1793     BasicBlock *DefBB = Cmp->getParent();
1794 
1795     // If this user is in the same block as the cmp, don't change the cmp.
1796     if (UserBB == DefBB)
1797       continue;
1798 
1799     // If we have already inserted a cmp into this block, use it.
1800     CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1801 
1802     if (!InsertedCmp) {
1803       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1804       assert(InsertPt != UserBB->end());
1805       InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1806                                     Cmp->getOperand(0), Cmp->getOperand(1), "");
1807       InsertedCmp->insertBefore(*UserBB, InsertPt);
1808       // Propagate the debug info.
1809       InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1810     }
1811 
1812     // Replace a use of the cmp with a use of the new cmp.
1813     TheUse = InsertedCmp;
1814     MadeChange = true;
1815     ++NumCmpUses;
1816   }
1817 
1818   // If we removed all uses, nuke the cmp.
1819   if (Cmp->use_empty()) {
1820     Cmp->eraseFromParent();
1821     MadeChange = true;
1822   }
1823 
1824   return MadeChange;
1825 }
1826 
1827 /// For pattern like:
1828 ///
1829 ///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1830 ///   ...
1831 /// DomBB:
1832 ///   ...
1833 ///   br DomCond, TrueBB, CmpBB
1834 /// CmpBB: (with DomBB being the single predecessor)
1835 ///   ...
1836 ///   Cmp = icmp eq CmpOp0, CmpOp1
1837 ///   ...
1838 ///
1839 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1840 /// different from lowering of icmp eq (PowerPC). This function try to convert
1841 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1842 /// After that, DomCond and Cmp can use the same comparison so reduce one
1843 /// comparison.
1844 ///
1845 /// Return true if any changes are made.
1846 static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1847                                        const TargetLowering &TLI) {
1848   if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1849     return false;
1850 
1851   ICmpInst::Predicate Pred = Cmp->getPredicate();
1852   if (Pred != ICmpInst::ICMP_EQ)
1853     return false;
1854 
1855   // If icmp eq has users other than BranchInst and SelectInst, converting it to
1856   // icmp slt/sgt would introduce more redundant LLVM IR.
1857   for (User *U : Cmp->users()) {
1858     if (isa<BranchInst>(U))
1859       continue;
1860     if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1861       continue;
1862     return false;
1863   }
1864 
1865   // This is a cheap/incomplete check for dominance - just match a single
1866   // predecessor with a conditional branch.
1867   BasicBlock *CmpBB = Cmp->getParent();
1868   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1869   if (!DomBB)
1870     return false;
1871 
1872   // We want to ensure that the only way control gets to the comparison of
1873   // interest is that a less/greater than comparison on the same operands is
1874   // false.
1875   Value *DomCond;
1876   BasicBlock *TrueBB, *FalseBB;
1877   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1878     return false;
1879   if (CmpBB != FalseBB)
1880     return false;
1881 
1882   Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1883   ICmpInst::Predicate DomPred;
1884   if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1885     return false;
1886   if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1887     return false;
1888 
1889   // Convert the equality comparison to the opposite of the dominating
1890   // comparison and swap the direction for all branch/select users.
1891   // We have conceptually converted:
1892   // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1893   // to
1894   // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1895   // And similarly for branches.
1896   for (User *U : Cmp->users()) {
1897     if (auto *BI = dyn_cast<BranchInst>(U)) {
1898       assert(BI->isConditional() && "Must be conditional");
1899       BI->swapSuccessors();
1900       continue;
1901     }
1902     if (auto *SI = dyn_cast<SelectInst>(U)) {
1903       // Swap operands
1904       SI->swapValues();
1905       SI->swapProfMetadata();
1906       continue;
1907     }
1908     llvm_unreachable("Must be a branch or a select");
1909   }
1910   Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1911   return true;
1912 }
1913 
1914 /// Many architectures use the same instruction for both subtract and cmp. Try
1915 /// to swap cmp operands to match subtract operations to allow for CSE.
1916 static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
1917   Value *Op0 = Cmp->getOperand(0);
1918   Value *Op1 = Cmp->getOperand(1);
1919   if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
1920       isa<Constant>(Op1) || Op0 == Op1)
1921     return false;
1922 
1923   // If a subtract already has the same operands as a compare, swapping would be
1924   // bad. If a subtract has the same operands as a compare but in reverse order,
1925   // then swapping is good.
1926   int GoodToSwap = 0;
1927   unsigned NumInspected = 0;
1928   for (const User *U : Op0->users()) {
1929     // Avoid walking many users.
1930     if (++NumInspected > 128)
1931       return false;
1932     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
1933       GoodToSwap++;
1934     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
1935       GoodToSwap--;
1936   }
1937 
1938   if (GoodToSwap > 0) {
1939     Cmp->swapOperands();
1940     return true;
1941   }
1942   return false;
1943 }
1944 
1945 static bool foldFCmpToFPClassTest(CmpInst *Cmp, const TargetLowering &TLI,
1946                                   const DataLayout &DL) {
1947   FCmpInst *FCmp = dyn_cast<FCmpInst>(Cmp);
1948   if (!FCmp)
1949     return false;
1950 
1951   // Don't fold if the target offers free fabs and the predicate is legal.
1952   EVT VT = TLI.getValueType(DL, Cmp->getOperand(0)->getType());
1953   if (TLI.isFAbsFree(VT) &&
1954       TLI.isCondCodeLegal(getFCmpCondCode(FCmp->getPredicate()),
1955                           VT.getSimpleVT()))
1956     return false;
1957 
1958   // Reverse the canonicalization if it is a FP class test
1959   auto ShouldReverseTransform = [](FPClassTest ClassTest) {
1960     return ClassTest == fcInf || ClassTest == (fcInf | fcNan);
1961   };
1962   auto [ClassVal, ClassTest] =
1963       fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1964                       FCmp->getOperand(0), FCmp->getOperand(1));
1965   if (!ClassVal)
1966     return false;
1967 
1968   if (!ShouldReverseTransform(ClassTest) && !ShouldReverseTransform(~ClassTest))
1969     return false;
1970 
1971   IRBuilder<> Builder(Cmp);
1972   Value *IsFPClass = Builder.createIsFPClass(ClassVal, ClassTest);
1973   Cmp->replaceAllUsesWith(IsFPClass);
1974   RecursivelyDeleteTriviallyDeadInstructions(Cmp);
1975   return true;
1976 }
1977 
1978 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
1979   if (sinkCmpExpression(Cmp, *TLI))
1980     return true;
1981 
1982   if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1983     return true;
1984 
1985   if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1986     return true;
1987 
1988   if (foldICmpWithDominatingICmp(Cmp, *TLI))
1989     return true;
1990 
1991   if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
1992     return true;
1993 
1994   if (foldFCmpToFPClassTest(Cmp, *TLI, *DL))
1995     return true;
1996 
1997   return false;
1998 }
1999 
2000 /// Duplicate and sink the given 'and' instruction into user blocks where it is
2001 /// used in a compare to allow isel to generate better code for targets where
2002 /// this operation can be combined.
2003 ///
2004 /// Return true if any changes are made.
2005 static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
2006                                   SetOfInstrs &InsertedInsts) {
2007   // Double-check that we're not trying to optimize an instruction that was
2008   // already optimized by some other part of this pass.
2009   assert(!InsertedInsts.count(AndI) &&
2010          "Attempting to optimize already optimized and instruction");
2011   (void)InsertedInsts;
2012 
2013   // Nothing to do for single use in same basic block.
2014   if (AndI->hasOneUse() &&
2015       AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
2016     return false;
2017 
2018   // Try to avoid cases where sinking/duplicating is likely to increase register
2019   // pressure.
2020   if (!isa<ConstantInt>(AndI->getOperand(0)) &&
2021       !isa<ConstantInt>(AndI->getOperand(1)) &&
2022       AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
2023     return false;
2024 
2025   for (auto *U : AndI->users()) {
2026     Instruction *User = cast<Instruction>(U);
2027 
2028     // Only sink 'and' feeding icmp with 0.
2029     if (!isa<ICmpInst>(User))
2030       return false;
2031 
2032     auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
2033     if (!CmpC || !CmpC->isZero())
2034       return false;
2035   }
2036 
2037   if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
2038     return false;
2039 
2040   LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2041   LLVM_DEBUG(AndI->getParent()->dump());
2042 
2043   // Push the 'and' into the same block as the icmp 0.  There should only be
2044   // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2045   // others, so we don't need to keep track of which BBs we insert into.
2046   for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
2047        UI != E;) {
2048     Use &TheUse = UI.getUse();
2049     Instruction *User = cast<Instruction>(*UI);
2050 
2051     // Preincrement use iterator so we don't invalidate it.
2052     ++UI;
2053 
2054     LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
2055 
2056     // Keep the 'and' in the same place if the use is already in the same block.
2057     Instruction *InsertPt =
2058         User->getParent() == AndI->getParent() ? AndI : User;
2059     Instruction *InsertedAnd = BinaryOperator::Create(
2060         Instruction::And, AndI->getOperand(0), AndI->getOperand(1), "",
2061         InsertPt->getIterator());
2062     // Propagate the debug info.
2063     InsertedAnd->setDebugLoc(AndI->getDebugLoc());
2064 
2065     // Replace a use of the 'and' with a use of the new 'and'.
2066     TheUse = InsertedAnd;
2067     ++NumAndUses;
2068     LLVM_DEBUG(User->getParent()->dump());
2069   }
2070 
2071   // We removed all uses, nuke the and.
2072   AndI->eraseFromParent();
2073   return true;
2074 }
2075 
2076 /// Check if the candidates could be combined with a shift instruction, which
2077 /// includes:
2078 /// 1. Truncate instruction
2079 /// 2. And instruction and the imm is a mask of the low bits:
2080 /// imm & (imm+1) == 0
2081 static bool isExtractBitsCandidateUse(Instruction *User) {
2082   if (!isa<TruncInst>(User)) {
2083     if (User->getOpcode() != Instruction::And ||
2084         !isa<ConstantInt>(User->getOperand(1)))
2085       return false;
2086 
2087     const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
2088 
2089     if ((Cimm & (Cimm + 1)).getBoolValue())
2090       return false;
2091   }
2092   return true;
2093 }
2094 
2095 /// Sink both shift and truncate instruction to the use of truncate's BB.
2096 static bool
2097 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
2098                      DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
2099                      const TargetLowering &TLI, const DataLayout &DL) {
2100   BasicBlock *UserBB = User->getParent();
2101   DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
2102   auto *TruncI = cast<TruncInst>(User);
2103   bool MadeChange = false;
2104 
2105   for (Value::user_iterator TruncUI = TruncI->user_begin(),
2106                             TruncE = TruncI->user_end();
2107        TruncUI != TruncE;) {
2108 
2109     Use &TruncTheUse = TruncUI.getUse();
2110     Instruction *TruncUser = cast<Instruction>(*TruncUI);
2111     // Preincrement use iterator so we don't invalidate it.
2112 
2113     ++TruncUI;
2114 
2115     int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
2116     if (!ISDOpcode)
2117       continue;
2118 
2119     // If the use is actually a legal node, there will not be an
2120     // implicit truncate.
2121     // FIXME: always querying the result type is just an
2122     // approximation; some nodes' legality is determined by the
2123     // operand or other means. There's no good way to find out though.
2124     if (TLI.isOperationLegalOrCustom(
2125             ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
2126       continue;
2127 
2128     // Don't bother for PHI nodes.
2129     if (isa<PHINode>(TruncUser))
2130       continue;
2131 
2132     BasicBlock *TruncUserBB = TruncUser->getParent();
2133 
2134     if (UserBB == TruncUserBB)
2135       continue;
2136 
2137     BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
2138     CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
2139 
2140     if (!InsertedShift && !InsertedTrunc) {
2141       BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
2142       assert(InsertPt != TruncUserBB->end());
2143       // Sink the shift
2144       if (ShiftI->getOpcode() == Instruction::AShr)
2145         InsertedShift =
2146             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2147       else
2148         InsertedShift =
2149             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2150       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2151       InsertedShift->insertBefore(*TruncUserBB, InsertPt);
2152 
2153       // Sink the trunc
2154       BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
2155       TruncInsertPt++;
2156       // It will go ahead of any debug-info.
2157       TruncInsertPt.setHeadBit(true);
2158       assert(TruncInsertPt != TruncUserBB->end());
2159 
2160       InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2161                                        TruncI->getType(), "");
2162       InsertedTrunc->insertBefore(*TruncUserBB, TruncInsertPt);
2163       InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2164 
2165       MadeChange = true;
2166 
2167       TruncTheUse = InsertedTrunc;
2168     }
2169   }
2170   return MadeChange;
2171 }
2172 
2173 /// Sink the shift *right* instruction into user blocks if the uses could
2174 /// potentially be combined with this shift instruction and generate BitExtract
2175 /// instruction. It will only be applied if the architecture supports BitExtract
2176 /// instruction. Here is an example:
2177 /// BB1:
2178 ///   %x.extract.shift = lshr i64 %arg1, 32
2179 /// BB2:
2180 ///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
2181 /// ==>
2182 ///
2183 /// BB2:
2184 ///   %x.extract.shift.1 = lshr i64 %arg1, 32
2185 ///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2186 ///
2187 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2188 /// instruction.
2189 /// Return true if any changes are made.
2190 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
2191                                 const TargetLowering &TLI,
2192                                 const DataLayout &DL) {
2193   BasicBlock *DefBB = ShiftI->getParent();
2194 
2195   /// Only insert instructions in each block once.
2196   DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
2197 
2198   bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2199 
2200   bool MadeChange = false;
2201   for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2202        UI != E;) {
2203     Use &TheUse = UI.getUse();
2204     Instruction *User = cast<Instruction>(*UI);
2205     // Preincrement use iterator so we don't invalidate it.
2206     ++UI;
2207 
2208     // Don't bother for PHI nodes.
2209     if (isa<PHINode>(User))
2210       continue;
2211 
2212     if (!isExtractBitsCandidateUse(User))
2213       continue;
2214 
2215     BasicBlock *UserBB = User->getParent();
2216 
2217     if (UserBB == DefBB) {
2218       // If the shift and truncate instruction are in the same BB. The use of
2219       // the truncate(TruncUse) may still introduce another truncate if not
2220       // legal. In this case, we would like to sink both shift and truncate
2221       // instruction to the BB of TruncUse.
2222       // for example:
2223       // BB1:
2224       // i64 shift.result = lshr i64 opnd, imm
2225       // trunc.result = trunc shift.result to i16
2226       //
2227       // BB2:
2228       //   ----> We will have an implicit truncate here if the architecture does
2229       //   not have i16 compare.
2230       // cmp i16 trunc.result, opnd2
2231       //
2232       if (isa<TruncInst>(User) &&
2233           shiftIsLegal
2234           // If the type of the truncate is legal, no truncate will be
2235           // introduced in other basic blocks.
2236           && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2237         MadeChange =
2238             SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2239 
2240       continue;
2241     }
2242     // If we have already inserted a shift into this block, use it.
2243     BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2244 
2245     if (!InsertedShift) {
2246       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2247       assert(InsertPt != UserBB->end());
2248 
2249       if (ShiftI->getOpcode() == Instruction::AShr)
2250         InsertedShift =
2251             BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2252       else
2253         InsertedShift =
2254             BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2255       InsertedShift->insertBefore(*UserBB, InsertPt);
2256       InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2257 
2258       MadeChange = true;
2259     }
2260 
2261     // Replace a use of the shift with a use of the new shift.
2262     TheUse = InsertedShift;
2263   }
2264 
2265   // If we removed all uses, or there are none, nuke the shift.
2266   if (ShiftI->use_empty()) {
2267     salvageDebugInfo(*ShiftI);
2268     ShiftI->eraseFromParent();
2269     MadeChange = true;
2270   }
2271 
2272   return MadeChange;
2273 }
2274 
2275 /// If counting leading or trailing zeros is an expensive operation and a zero
2276 /// input is defined, add a check for zero to avoid calling the intrinsic.
2277 ///
2278 /// We want to transform:
2279 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2280 ///
2281 /// into:
2282 ///   entry:
2283 ///     %cmpz = icmp eq i64 %A, 0
2284 ///     br i1 %cmpz, label %cond.end, label %cond.false
2285 ///   cond.false:
2286 ///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2287 ///     br label %cond.end
2288 ///   cond.end:
2289 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2290 ///
2291 /// If the transform is performed, return true and set ModifiedDT to true.
2292 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2293                                   LoopInfo &LI,
2294                                   const TargetLowering *TLI,
2295                                   const DataLayout *DL, ModifyDT &ModifiedDT,
2296                                   SmallSet<BasicBlock *, 32> &FreshBBs,
2297                                   bool IsHugeFunc) {
2298   // If a zero input is undefined, it doesn't make sense to despeculate that.
2299   if (match(CountZeros->getOperand(1), m_One()))
2300     return false;
2301 
2302   // If it's cheap to speculate, there's nothing to do.
2303   Type *Ty = CountZeros->getType();
2304   auto IntrinsicID = CountZeros->getIntrinsicID();
2305   if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2306       (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2307     return false;
2308 
2309   // Only handle legal scalar cases. Anything else requires too much work.
2310   unsigned SizeInBits = Ty->getScalarSizeInBits();
2311   if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2312     return false;
2313 
2314   // Bail if the value is never zero.
2315   Use &Op = CountZeros->getOperandUse(0);
2316   if (isKnownNonZero(Op, *DL))
2317     return false;
2318 
2319   // The intrinsic will be sunk behind a compare against zero and branch.
2320   BasicBlock *StartBlock = CountZeros->getParent();
2321   BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2322   if (IsHugeFunc)
2323     FreshBBs.insert(CallBlock);
2324 
2325   // Create another block after the count zero intrinsic. A PHI will be added
2326   // in this block to select the result of the intrinsic or the bit-width
2327   // constant if the input to the intrinsic is zero.
2328   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(CountZeros));
2329   // Any debug-info after CountZeros should not be included.
2330   SplitPt.setHeadBit(true);
2331   BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2332   if (IsHugeFunc)
2333     FreshBBs.insert(EndBlock);
2334 
2335   // Update the LoopInfo. The new blocks are in the same loop as the start
2336   // block.
2337   if (Loop *L = LI.getLoopFor(StartBlock)) {
2338     L->addBasicBlockToLoop(CallBlock, LI);
2339     L->addBasicBlockToLoop(EndBlock, LI);
2340   }
2341 
2342   // Set up a builder to create a compare, conditional branch, and PHI.
2343   IRBuilder<> Builder(CountZeros->getContext());
2344   Builder.SetInsertPoint(StartBlock->getTerminator());
2345   Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2346 
2347   // Replace the unconditional branch that was created by the first split with
2348   // a compare against zero and a conditional branch.
2349   Value *Zero = Constant::getNullValue(Ty);
2350   // Avoid introducing branch on poison. This also replaces the ctz operand.
2351   if (!isGuaranteedNotToBeUndefOrPoison(Op))
2352     Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2353   Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2354   Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2355   StartBlock->getTerminator()->eraseFromParent();
2356 
2357   // Create a PHI in the end block to select either the output of the intrinsic
2358   // or the bit width of the operand.
2359   Builder.SetInsertPoint(EndBlock, EndBlock->begin());
2360   PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2361   replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2362   Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2363   PN->addIncoming(BitWidth, StartBlock);
2364   PN->addIncoming(CountZeros, CallBlock);
2365 
2366   // We are explicitly handling the zero case, so we can set the intrinsic's
2367   // undefined zero argument to 'true'. This will also prevent reprocessing the
2368   // intrinsic; we only despeculate when a zero input is defined.
2369   CountZeros->setArgOperand(1, Builder.getTrue());
2370   ModifiedDT = ModifyDT::ModifyBBDT;
2371   return true;
2372 }
2373 
2374 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2375   BasicBlock *BB = CI->getParent();
2376 
2377   // Lower inline assembly if we can.
2378   // If we found an inline asm expession, and if the target knows how to
2379   // lower it to normal LLVM code, do so now.
2380   if (CI->isInlineAsm()) {
2381     if (TLI->ExpandInlineAsm(CI)) {
2382       // Avoid invalidating the iterator.
2383       CurInstIterator = BB->begin();
2384       // Avoid processing instructions out of order, which could cause
2385       // reuse before a value is defined.
2386       SunkAddrs.clear();
2387       return true;
2388     }
2389     // Sink address computing for memory operands into the block.
2390     if (optimizeInlineAsmInst(CI))
2391       return true;
2392   }
2393 
2394   // Align the pointer arguments to this call if the target thinks it's a good
2395   // idea
2396   unsigned MinSize;
2397   Align PrefAlign;
2398   if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2399     for (auto &Arg : CI->args()) {
2400       // We want to align both objects whose address is used directly and
2401       // objects whose address is used in casts and GEPs, though it only makes
2402       // sense for GEPs if the offset is a multiple of the desired alignment and
2403       // if size - offset meets the size threshold.
2404       if (!Arg->getType()->isPointerTy())
2405         continue;
2406       APInt Offset(DL->getIndexSizeInBits(
2407                        cast<PointerType>(Arg->getType())->getAddressSpace()),
2408                    0);
2409       Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2410       uint64_t Offset2 = Offset.getLimitedValue();
2411       if (!isAligned(PrefAlign, Offset2))
2412         continue;
2413       AllocaInst *AI;
2414       if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2415           DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2416         AI->setAlignment(PrefAlign);
2417       // Global variables can only be aligned if they are defined in this
2418       // object (i.e. they are uniquely initialized in this object), and
2419       // over-aligning global variables that have an explicit section is
2420       // forbidden.
2421       GlobalVariable *GV;
2422       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2423           GV->getPointerAlignment(*DL) < PrefAlign &&
2424           DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2425         GV->setAlignment(PrefAlign);
2426     }
2427   }
2428   // If this is a memcpy (or similar) then we may be able to improve the
2429   // alignment.
2430   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2431     Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2432     MaybeAlign MIDestAlign = MI->getDestAlign();
2433     if (!MIDestAlign || DestAlign > *MIDestAlign)
2434       MI->setDestAlignment(DestAlign);
2435     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2436       MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2437       Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2438       if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2439         MTI->setSourceAlignment(SrcAlign);
2440     }
2441   }
2442 
2443   // If we have a cold call site, try to sink addressing computation into the
2444   // cold block.  This interacts with our handling for loads and stores to
2445   // ensure that we can fold all uses of a potential addressing computation
2446   // into their uses.  TODO: generalize this to work over profiling data
2447   if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
2448       !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2449     for (auto &Arg : CI->args()) {
2450       if (!Arg->getType()->isPointerTy())
2451         continue;
2452       unsigned AS = Arg->getType()->getPointerAddressSpace();
2453       if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS))
2454         return true;
2455     }
2456 
2457   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2458   if (II) {
2459     switch (II->getIntrinsicID()) {
2460     default:
2461       break;
2462     case Intrinsic::assume:
2463       llvm_unreachable("llvm.assume should have been removed already");
2464     case Intrinsic::allow_runtime_check:
2465     case Intrinsic::allow_ubsan_check:
2466     case Intrinsic::experimental_widenable_condition: {
2467       // Give up on future widening opportunities so that we can fold away dead
2468       // paths and merge blocks before going into block-local instruction
2469       // selection.
2470       if (II->use_empty()) {
2471         II->eraseFromParent();
2472         return true;
2473       }
2474       Constant *RetVal = ConstantInt::getTrue(II->getContext());
2475       resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2476         replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2477       });
2478       return true;
2479     }
2480     case Intrinsic::objectsize:
2481       llvm_unreachable("llvm.objectsize.* should have been lowered already");
2482     case Intrinsic::is_constant:
2483       llvm_unreachable("llvm.is.constant.* should have been lowered already");
2484     case Intrinsic::aarch64_stlxr:
2485     case Intrinsic::aarch64_stxr: {
2486       ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2487       if (!ExtVal || !ExtVal->hasOneUse() ||
2488           ExtVal->getParent() == CI->getParent())
2489         return false;
2490       // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2491       ExtVal->moveBefore(CI);
2492       // Mark this instruction as "inserted by CGP", so that other
2493       // optimizations don't touch it.
2494       InsertedInsts.insert(ExtVal);
2495       return true;
2496     }
2497 
2498     case Intrinsic::launder_invariant_group:
2499     case Intrinsic::strip_invariant_group: {
2500       Value *ArgVal = II->getArgOperand(0);
2501       auto it = LargeOffsetGEPMap.find(II);
2502       if (it != LargeOffsetGEPMap.end()) {
2503         // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2504         // Make sure not to have to deal with iterator invalidation
2505         // after possibly adding ArgVal to LargeOffsetGEPMap.
2506         auto GEPs = std::move(it->second);
2507         LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2508         LargeOffsetGEPMap.erase(II);
2509       }
2510 
2511       replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2512       II->eraseFromParent();
2513       return true;
2514     }
2515     case Intrinsic::cttz:
2516     case Intrinsic::ctlz:
2517       // If counting zeros is expensive, try to avoid it.
2518       return despeculateCountZeros(II, *LI, TLI, DL, ModifiedDT, FreshBBs,
2519                                    IsHugeFunc);
2520     case Intrinsic::fshl:
2521     case Intrinsic::fshr:
2522       return optimizeFunnelShift(II);
2523     case Intrinsic::dbg_assign:
2524     case Intrinsic::dbg_value:
2525       return fixupDbgValue(II);
2526     case Intrinsic::masked_gather:
2527       return optimizeGatherScatterInst(II, II->getArgOperand(0));
2528     case Intrinsic::masked_scatter:
2529       return optimizeGatherScatterInst(II, II->getArgOperand(1));
2530     }
2531 
2532     SmallVector<Value *, 2> PtrOps;
2533     Type *AccessTy;
2534     if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2535       while (!PtrOps.empty()) {
2536         Value *PtrVal = PtrOps.pop_back_val();
2537         unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2538         if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2539           return true;
2540       }
2541   }
2542 
2543   // From here on out we're working with named functions.
2544   if (!CI->getCalledFunction())
2545     return false;
2546 
2547   // Lower all default uses of _chk calls.  This is very similar
2548   // to what InstCombineCalls does, but here we are only lowering calls
2549   // to fortified library functions (e.g. __memcpy_chk) that have the default
2550   // "don't know" as the objectsize.  Anything else should be left alone.
2551   FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2552   IRBuilder<> Builder(CI);
2553   if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2554     replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2555     CI->eraseFromParent();
2556     return true;
2557   }
2558 
2559   return false;
2560 }
2561 
2562 static bool isIntrinsicOrLFToBeTailCalled(const TargetLibraryInfo *TLInfo,
2563                                           const CallInst *CI) {
2564   assert(CI && CI->use_empty());
2565 
2566   if (const auto *II = dyn_cast<IntrinsicInst>(CI))
2567     switch (II->getIntrinsicID()) {
2568     case Intrinsic::memset:
2569     case Intrinsic::memcpy:
2570     case Intrinsic::memmove:
2571       return true;
2572     default:
2573       return false;
2574     }
2575 
2576   LibFunc LF;
2577   Function *Callee = CI->getCalledFunction();
2578   if (Callee && TLInfo && TLInfo->getLibFunc(*Callee, LF))
2579     switch (LF) {
2580     case LibFunc_strcpy:
2581     case LibFunc_strncpy:
2582     case LibFunc_strcat:
2583     case LibFunc_strncat:
2584       return true;
2585     default:
2586       return false;
2587     }
2588 
2589   return false;
2590 }
2591 
2592 /// Look for opportunities to duplicate return instructions to the predecessor
2593 /// to enable tail call optimizations. The case it is currently looking for is
2594 /// the following one. Known intrinsics or library function that may be tail
2595 /// called are taken into account as well.
2596 /// @code
2597 /// bb0:
2598 ///   %tmp0 = tail call i32 @f0()
2599 ///   br label %return
2600 /// bb1:
2601 ///   %tmp1 = tail call i32 @f1()
2602 ///   br label %return
2603 /// bb2:
2604 ///   %tmp2 = tail call i32 @f2()
2605 ///   br label %return
2606 /// return:
2607 ///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2608 ///   ret i32 %retval
2609 /// @endcode
2610 ///
2611 /// =>
2612 ///
2613 /// @code
2614 /// bb0:
2615 ///   %tmp0 = tail call i32 @f0()
2616 ///   ret i32 %tmp0
2617 /// bb1:
2618 ///   %tmp1 = tail call i32 @f1()
2619 ///   ret i32 %tmp1
2620 /// bb2:
2621 ///   %tmp2 = tail call i32 @f2()
2622 ///   ret i32 %tmp2
2623 /// @endcode
2624 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2625                                                 ModifyDT &ModifiedDT) {
2626   if (!BB->getTerminator())
2627     return false;
2628 
2629   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2630   if (!RetI)
2631     return false;
2632 
2633   assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop");
2634 
2635   PHINode *PN = nullptr;
2636   ExtractValueInst *EVI = nullptr;
2637   BitCastInst *BCI = nullptr;
2638   Value *V = RetI->getReturnValue();
2639   if (V) {
2640     BCI = dyn_cast<BitCastInst>(V);
2641     if (BCI)
2642       V = BCI->getOperand(0);
2643 
2644     EVI = dyn_cast<ExtractValueInst>(V);
2645     if (EVI) {
2646       V = EVI->getOperand(0);
2647       if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2648         return false;
2649     }
2650 
2651     PN = dyn_cast<PHINode>(V);
2652   }
2653 
2654   if (PN && PN->getParent() != BB)
2655     return false;
2656 
2657   auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2658     const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2659     if (BC && BC->hasOneUse())
2660       Inst = BC->user_back();
2661 
2662     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2663       return II->getIntrinsicID() == Intrinsic::lifetime_end;
2664     return false;
2665   };
2666 
2667   // Make sure there are no instructions between the first instruction
2668   // and return.
2669   const Instruction *BI = BB->getFirstNonPHI();
2670   // Skip over debug and the bitcast.
2671   while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2672          isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2673     BI = BI->getNextNode();
2674   if (BI != RetI)
2675     return false;
2676 
2677   /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2678   /// call.
2679   const Function *F = BB->getParent();
2680   SmallVector<BasicBlock *, 4> TailCallBBs;
2681   if (PN) {
2682     for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2683       // Look through bitcasts.
2684       Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2685       CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2686       BasicBlock *PredBB = PN->getIncomingBlock(I);
2687       // Make sure the phi value is indeed produced by the tail call.
2688       if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2689           TLI->mayBeEmittedAsTailCall(CI) &&
2690           attributesPermitTailCall(F, CI, RetI, *TLI)) {
2691         TailCallBBs.push_back(PredBB);
2692       } else {
2693         // Consider the cases in which the phi value is indirectly produced by
2694         // the tail call, for example when encountering memset(), memmove(),
2695         // strcpy(), whose return value may have been optimized out. In such
2696         // cases, the value needs to be the first function argument.
2697         //
2698         // bb0:
2699         //   tail call void @llvm.memset.p0.i64(ptr %0, i8 0, i64 %1)
2700         //   br label %return
2701         // return:
2702         //   %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ]
2703         if (PredBB && PredBB->getSingleSuccessor() == BB)
2704           CI = dyn_cast_or_null<CallInst>(
2705               PredBB->getTerminator()->getPrevNonDebugInstruction(true));
2706 
2707         if (CI && CI->use_empty() &&
2708             isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2709             IncomingVal == CI->getArgOperand(0) &&
2710             TLI->mayBeEmittedAsTailCall(CI) &&
2711             attributesPermitTailCall(F, CI, RetI, *TLI))
2712           TailCallBBs.push_back(PredBB);
2713       }
2714     }
2715   } else {
2716     SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2717     for (BasicBlock *Pred : predecessors(BB)) {
2718       if (!VisitedBBs.insert(Pred).second)
2719         continue;
2720       if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2721         CallInst *CI = dyn_cast<CallInst>(I);
2722         if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2723             attributesPermitTailCall(F, CI, RetI, *TLI)) {
2724           // Either we return void or the return value must be the first
2725           // argument of a known intrinsic or library function.
2726           if (!V || isa<UndefValue>(V) ||
2727               (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
2728                V == CI->getArgOperand(0))) {
2729             TailCallBBs.push_back(Pred);
2730           }
2731         }
2732       }
2733     }
2734   }
2735 
2736   bool Changed = false;
2737   for (auto const &TailCallBB : TailCallBBs) {
2738     // Make sure the call instruction is followed by an unconditional branch to
2739     // the return block.
2740     BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2741     if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2742       continue;
2743 
2744     // Duplicate the return into TailCallBB.
2745     (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2746     assert(!VerifyBFIUpdates ||
2747            BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2748     BFI->setBlockFreq(BB,
2749                       (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)));
2750     ModifiedDT = ModifyDT::ModifyBBDT;
2751     Changed = true;
2752     ++NumRetsDup;
2753   }
2754 
2755   // If we eliminated all predecessors of the block, delete the block now.
2756   if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2757     BB->eraseFromParent();
2758 
2759   return Changed;
2760 }
2761 
2762 //===----------------------------------------------------------------------===//
2763 // Memory Optimization
2764 //===----------------------------------------------------------------------===//
2765 
2766 namespace {
2767 
2768 /// This is an extended version of TargetLowering::AddrMode
2769 /// which holds actual Value*'s for register values.
2770 struct ExtAddrMode : public TargetLowering::AddrMode {
2771   Value *BaseReg = nullptr;
2772   Value *ScaledReg = nullptr;
2773   Value *OriginalValue = nullptr;
2774   bool InBounds = true;
2775 
2776   enum FieldName {
2777     NoField = 0x00,
2778     BaseRegField = 0x01,
2779     BaseGVField = 0x02,
2780     BaseOffsField = 0x04,
2781     ScaledRegField = 0x08,
2782     ScaleField = 0x10,
2783     MultipleFields = 0xff
2784   };
2785 
2786   ExtAddrMode() = default;
2787 
2788   void print(raw_ostream &OS) const;
2789   void dump() const;
2790 
2791   FieldName compare(const ExtAddrMode &other) {
2792     // First check that the types are the same on each field, as differing types
2793     // is something we can't cope with later on.
2794     if (BaseReg && other.BaseReg &&
2795         BaseReg->getType() != other.BaseReg->getType())
2796       return MultipleFields;
2797     if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
2798       return MultipleFields;
2799     if (ScaledReg && other.ScaledReg &&
2800         ScaledReg->getType() != other.ScaledReg->getType())
2801       return MultipleFields;
2802 
2803     // Conservatively reject 'inbounds' mismatches.
2804     if (InBounds != other.InBounds)
2805       return MultipleFields;
2806 
2807     // Check each field to see if it differs.
2808     unsigned Result = NoField;
2809     if (BaseReg != other.BaseReg)
2810       Result |= BaseRegField;
2811     if (BaseGV != other.BaseGV)
2812       Result |= BaseGVField;
2813     if (BaseOffs != other.BaseOffs)
2814       Result |= BaseOffsField;
2815     if (ScaledReg != other.ScaledReg)
2816       Result |= ScaledRegField;
2817     // Don't count 0 as being a different scale, because that actually means
2818     // unscaled (which will already be counted by having no ScaledReg).
2819     if (Scale && other.Scale && Scale != other.Scale)
2820       Result |= ScaleField;
2821 
2822     if (llvm::popcount(Result) > 1)
2823       return MultipleFields;
2824     else
2825       return static_cast<FieldName>(Result);
2826   }
2827 
2828   // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2829   // with no offset.
2830   bool isTrivial() {
2831     // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2832     // trivial if at most one of these terms is nonzero, except that BaseGV and
2833     // BaseReg both being zero actually means a null pointer value, which we
2834     // consider to be 'non-zero' here.
2835     return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2836   }
2837 
2838   Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2839     switch (Field) {
2840     default:
2841       return nullptr;
2842     case BaseRegField:
2843       return BaseReg;
2844     case BaseGVField:
2845       return BaseGV;
2846     case ScaledRegField:
2847       return ScaledReg;
2848     case BaseOffsField:
2849       return ConstantInt::get(IntPtrTy, BaseOffs);
2850     }
2851   }
2852 
2853   void SetCombinedField(FieldName Field, Value *V,
2854                         const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2855     switch (Field) {
2856     default:
2857       llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2858       break;
2859     case ExtAddrMode::BaseRegField:
2860       BaseReg = V;
2861       break;
2862     case ExtAddrMode::BaseGVField:
2863       // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2864       // in the BaseReg field.
2865       assert(BaseReg == nullptr);
2866       BaseReg = V;
2867       BaseGV = nullptr;
2868       break;
2869     case ExtAddrMode::ScaledRegField:
2870       ScaledReg = V;
2871       // If we have a mix of scaled and unscaled addrmodes then we want scale
2872       // to be the scale and not zero.
2873       if (!Scale)
2874         for (const ExtAddrMode &AM : AddrModes)
2875           if (AM.Scale) {
2876             Scale = AM.Scale;
2877             break;
2878           }
2879       break;
2880     case ExtAddrMode::BaseOffsField:
2881       // The offset is no longer a constant, so it goes in ScaledReg with a
2882       // scale of 1.
2883       assert(ScaledReg == nullptr);
2884       ScaledReg = V;
2885       Scale = 1;
2886       BaseOffs = 0;
2887       break;
2888     }
2889   }
2890 };
2891 
2892 #ifndef NDEBUG
2893 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2894   AM.print(OS);
2895   return OS;
2896 }
2897 #endif
2898 
2899 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2900 void ExtAddrMode::print(raw_ostream &OS) const {
2901   bool NeedPlus = false;
2902   OS << "[";
2903   if (InBounds)
2904     OS << "inbounds ";
2905   if (BaseGV) {
2906     OS << "GV:";
2907     BaseGV->printAsOperand(OS, /*PrintType=*/false);
2908     NeedPlus = true;
2909   }
2910 
2911   if (BaseOffs) {
2912     OS << (NeedPlus ? " + " : "") << BaseOffs;
2913     NeedPlus = true;
2914   }
2915 
2916   if (BaseReg) {
2917     OS << (NeedPlus ? " + " : "") << "Base:";
2918     BaseReg->printAsOperand(OS, /*PrintType=*/false);
2919     NeedPlus = true;
2920   }
2921   if (Scale) {
2922     OS << (NeedPlus ? " + " : "") << Scale << "*";
2923     ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2924   }
2925 
2926   OS << ']';
2927 }
2928 
2929 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2930   print(dbgs());
2931   dbgs() << '\n';
2932 }
2933 #endif
2934 
2935 } // end anonymous namespace
2936 
2937 namespace {
2938 
2939 /// This class provides transaction based operation on the IR.
2940 /// Every change made through this class is recorded in the internal state and
2941 /// can be undone (rollback) until commit is called.
2942 /// CGP does not check if instructions could be speculatively executed when
2943 /// moved. Preserving the original location would pessimize the debugging
2944 /// experience, as well as negatively impact the quality of sample PGO.
2945 class TypePromotionTransaction {
2946   /// This represents the common interface of the individual transaction.
2947   /// Each class implements the logic for doing one specific modification on
2948   /// the IR via the TypePromotionTransaction.
2949   class TypePromotionAction {
2950   protected:
2951     /// The Instruction modified.
2952     Instruction *Inst;
2953 
2954   public:
2955     /// Constructor of the action.
2956     /// The constructor performs the related action on the IR.
2957     TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2958 
2959     virtual ~TypePromotionAction() = default;
2960 
2961     /// Undo the modification done by this action.
2962     /// When this method is called, the IR must be in the same state as it was
2963     /// before this action was applied.
2964     /// \pre Undoing the action works if and only if the IR is in the exact same
2965     /// state as it was directly after this action was applied.
2966     virtual void undo() = 0;
2967 
2968     /// Advocate every change made by this action.
2969     /// When the results on the IR of the action are to be kept, it is important
2970     /// to call this function, otherwise hidden information may be kept forever.
2971     virtual void commit() {
2972       // Nothing to be done, this action is not doing anything.
2973     }
2974   };
2975 
2976   /// Utility to remember the position of an instruction.
2977   class InsertionHandler {
2978     /// Position of an instruction.
2979     /// Either an instruction:
2980     /// - Is the first in a basic block: BB is used.
2981     /// - Has a previous instruction: PrevInst is used.
2982     union {
2983       Instruction *PrevInst;
2984       BasicBlock *BB;
2985     } Point;
2986     std::optional<DbgRecord::self_iterator> BeforeDbgRecord = std::nullopt;
2987 
2988     /// Remember whether or not the instruction had a previous instruction.
2989     bool HasPrevInstruction;
2990 
2991   public:
2992     /// Record the position of \p Inst.
2993     InsertionHandler(Instruction *Inst) {
2994       HasPrevInstruction = (Inst != &*(Inst->getParent()->begin()));
2995       BasicBlock *BB = Inst->getParent();
2996 
2997       // Record where we would have to re-insert the instruction in the sequence
2998       // of DbgRecords, if we ended up reinserting.
2999       if (BB->IsNewDbgInfoFormat)
3000         BeforeDbgRecord = Inst->getDbgReinsertionPosition();
3001 
3002       if (HasPrevInstruction) {
3003         Point.PrevInst = &*std::prev(Inst->getIterator());
3004       } else {
3005         Point.BB = BB;
3006       }
3007     }
3008 
3009     /// Insert \p Inst at the recorded position.
3010     void insert(Instruction *Inst) {
3011       if (HasPrevInstruction) {
3012         if (Inst->getParent())
3013           Inst->removeFromParent();
3014         Inst->insertAfter(&*Point.PrevInst);
3015       } else {
3016         BasicBlock::iterator Position = Point.BB->getFirstInsertionPt();
3017         if (Inst->getParent())
3018           Inst->moveBefore(*Point.BB, Position);
3019         else
3020           Inst->insertBefore(*Point.BB, Position);
3021       }
3022 
3023       Inst->getParent()->reinsertInstInDbgRecords(Inst, BeforeDbgRecord);
3024     }
3025   };
3026 
3027   /// Move an instruction before another.
3028   class InstructionMoveBefore : public TypePromotionAction {
3029     /// Original position of the instruction.
3030     InsertionHandler Position;
3031 
3032   public:
3033     /// Move \p Inst before \p Before.
3034     InstructionMoveBefore(Instruction *Inst, Instruction *Before)
3035         : TypePromotionAction(Inst), Position(Inst) {
3036       LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
3037                         << "\n");
3038       Inst->moveBefore(Before);
3039     }
3040 
3041     /// Move the instruction back to its original position.
3042     void undo() override {
3043       LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
3044       Position.insert(Inst);
3045     }
3046   };
3047 
3048   /// Set the operand of an instruction with a new value.
3049   class OperandSetter : public TypePromotionAction {
3050     /// Original operand of the instruction.
3051     Value *Origin;
3052 
3053     /// Index of the modified instruction.
3054     unsigned Idx;
3055 
3056   public:
3057     /// Set \p Idx operand of \p Inst with \p NewVal.
3058     OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
3059         : TypePromotionAction(Inst), Idx(Idx) {
3060       LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
3061                         << "for:" << *Inst << "\n"
3062                         << "with:" << *NewVal << "\n");
3063       Origin = Inst->getOperand(Idx);
3064       Inst->setOperand(Idx, NewVal);
3065     }
3066 
3067     /// Restore the original value of the instruction.
3068     void undo() override {
3069       LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
3070                         << "for: " << *Inst << "\n"
3071                         << "with: " << *Origin << "\n");
3072       Inst->setOperand(Idx, Origin);
3073     }
3074   };
3075 
3076   /// Hide the operands of an instruction.
3077   /// Do as if this instruction was not using any of its operands.
3078   class OperandsHider : public TypePromotionAction {
3079     /// The list of original operands.
3080     SmallVector<Value *, 4> OriginalValues;
3081 
3082   public:
3083     /// Remove \p Inst from the uses of the operands of \p Inst.
3084     OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
3085       LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
3086       unsigned NumOpnds = Inst->getNumOperands();
3087       OriginalValues.reserve(NumOpnds);
3088       for (unsigned It = 0; It < NumOpnds; ++It) {
3089         // Save the current operand.
3090         Value *Val = Inst->getOperand(It);
3091         OriginalValues.push_back(Val);
3092         // Set a dummy one.
3093         // We could use OperandSetter here, but that would imply an overhead
3094         // that we are not willing to pay.
3095         Inst->setOperand(It, UndefValue::get(Val->getType()));
3096       }
3097     }
3098 
3099     /// Restore the original list of uses.
3100     void undo() override {
3101       LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3102       for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3103         Inst->setOperand(It, OriginalValues[It]);
3104     }
3105   };
3106 
3107   /// Build a truncate instruction.
3108   class TruncBuilder : public TypePromotionAction {
3109     Value *Val;
3110 
3111   public:
3112     /// Build a truncate instruction of \p Opnd producing a \p Ty
3113     /// result.
3114     /// trunc Opnd to Ty.
3115     TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3116       IRBuilder<> Builder(Opnd);
3117       Builder.SetCurrentDebugLocation(DebugLoc());
3118       Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3119       LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3120     }
3121 
3122     /// Get the built value.
3123     Value *getBuiltValue() { return Val; }
3124 
3125     /// Remove the built instruction.
3126     void undo() override {
3127       LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3128       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3129         IVal->eraseFromParent();
3130     }
3131   };
3132 
3133   /// Build a sign extension instruction.
3134   class SExtBuilder : public TypePromotionAction {
3135     Value *Val;
3136 
3137   public:
3138     /// Build a sign extension instruction of \p Opnd producing a \p Ty
3139     /// result.
3140     /// sext Opnd to Ty.
3141     SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3142         : TypePromotionAction(InsertPt) {
3143       IRBuilder<> Builder(InsertPt);
3144       Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3145       LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3146     }
3147 
3148     /// Get the built value.
3149     Value *getBuiltValue() { return Val; }
3150 
3151     /// Remove the built instruction.
3152     void undo() override {
3153       LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3154       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3155         IVal->eraseFromParent();
3156     }
3157   };
3158 
3159   /// Build a zero extension instruction.
3160   class ZExtBuilder : public TypePromotionAction {
3161     Value *Val;
3162 
3163   public:
3164     /// Build a zero extension instruction of \p Opnd producing a \p Ty
3165     /// result.
3166     /// zext Opnd to Ty.
3167     ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3168         : TypePromotionAction(InsertPt) {
3169       IRBuilder<> Builder(InsertPt);
3170       Builder.SetCurrentDebugLocation(DebugLoc());
3171       Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3172       LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3173     }
3174 
3175     /// Get the built value.
3176     Value *getBuiltValue() { return Val; }
3177 
3178     /// Remove the built instruction.
3179     void undo() override {
3180       LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3181       if (Instruction *IVal = dyn_cast<Instruction>(Val))
3182         IVal->eraseFromParent();
3183     }
3184   };
3185 
3186   /// Mutate an instruction to another type.
3187   class TypeMutator : public TypePromotionAction {
3188     /// Record the original type.
3189     Type *OrigTy;
3190 
3191   public:
3192     /// Mutate the type of \p Inst into \p NewTy.
3193     TypeMutator(Instruction *Inst, Type *NewTy)
3194         : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3195       LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3196                         << "\n");
3197       Inst->mutateType(NewTy);
3198     }
3199 
3200     /// Mutate the instruction back to its original type.
3201     void undo() override {
3202       LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3203                         << "\n");
3204       Inst->mutateType(OrigTy);
3205     }
3206   };
3207 
3208   /// Replace the uses of an instruction by another instruction.
3209   class UsesReplacer : public TypePromotionAction {
3210     /// Helper structure to keep track of the replaced uses.
3211     struct InstructionAndIdx {
3212       /// The instruction using the instruction.
3213       Instruction *Inst;
3214 
3215       /// The index where this instruction is used for Inst.
3216       unsigned Idx;
3217 
3218       InstructionAndIdx(Instruction *Inst, unsigned Idx)
3219           : Inst(Inst), Idx(Idx) {}
3220     };
3221 
3222     /// Keep track of the original uses (pair Instruction, Index).
3223     SmallVector<InstructionAndIdx, 4> OriginalUses;
3224     /// Keep track of the debug users.
3225     SmallVector<DbgValueInst *, 1> DbgValues;
3226     /// And non-instruction debug-users too.
3227     SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
3228 
3229     /// Keep track of the new value so that we can undo it by replacing
3230     /// instances of the new value with the original value.
3231     Value *New;
3232 
3233     using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3234 
3235   public:
3236     /// Replace all the use of \p Inst by \p New.
3237     UsesReplacer(Instruction *Inst, Value *New)
3238         : TypePromotionAction(Inst), New(New) {
3239       LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3240                         << "\n");
3241       // Record the original uses.
3242       for (Use &U : Inst->uses()) {
3243         Instruction *UserI = cast<Instruction>(U.getUser());
3244         OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3245       }
3246       // Record the debug uses separately. They are not in the instruction's
3247       // use list, but they are replaced by RAUW.
3248       findDbgValues(DbgValues, Inst, &DbgVariableRecords);
3249 
3250       // Now, we can replace the uses.
3251       Inst->replaceAllUsesWith(New);
3252     }
3253 
3254     /// Reassign the original uses of Inst to Inst.
3255     void undo() override {
3256       LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3257       for (InstructionAndIdx &Use : OriginalUses)
3258         Use.Inst->setOperand(Use.Idx, Inst);
3259       // RAUW has replaced all original uses with references to the new value,
3260       // including the debug uses. Since we are undoing the replacements,
3261       // the original debug uses must also be reinstated to maintain the
3262       // correctness and utility of debug value instructions.
3263       for (auto *DVI : DbgValues)
3264         DVI->replaceVariableLocationOp(New, Inst);
3265       // Similar story with DbgVariableRecords, the non-instruction
3266       // representation of dbg.values.
3267       for (DbgVariableRecord *DVR : DbgVariableRecords)
3268         DVR->replaceVariableLocationOp(New, Inst);
3269     }
3270   };
3271 
3272   /// Remove an instruction from the IR.
3273   class InstructionRemover : public TypePromotionAction {
3274     /// Original position of the instruction.
3275     InsertionHandler Inserter;
3276 
3277     /// Helper structure to hide all the link to the instruction. In other
3278     /// words, this helps to do as if the instruction was removed.
3279     OperandsHider Hider;
3280 
3281     /// Keep track of the uses replaced, if any.
3282     UsesReplacer *Replacer = nullptr;
3283 
3284     /// Keep track of instructions removed.
3285     SetOfInstrs &RemovedInsts;
3286 
3287   public:
3288     /// Remove all reference of \p Inst and optionally replace all its
3289     /// uses with New.
3290     /// \p RemovedInsts Keep track of the instructions removed by this Action.
3291     /// \pre If !Inst->use_empty(), then New != nullptr
3292     InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3293                        Value *New = nullptr)
3294         : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3295           RemovedInsts(RemovedInsts) {
3296       if (New)
3297         Replacer = new UsesReplacer(Inst, New);
3298       LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3299       RemovedInsts.insert(Inst);
3300       /// The instructions removed here will be freed after completing
3301       /// optimizeBlock() for all blocks as we need to keep track of the
3302       /// removed instructions during promotion.
3303       Inst->removeFromParent();
3304     }
3305 
3306     ~InstructionRemover() override { delete Replacer; }
3307 
3308     InstructionRemover &operator=(const InstructionRemover &other) = delete;
3309     InstructionRemover(const InstructionRemover &other) = delete;
3310 
3311     /// Resurrect the instruction and reassign it to the proper uses if
3312     /// new value was provided when build this action.
3313     void undo() override {
3314       LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3315       Inserter.insert(Inst);
3316       if (Replacer)
3317         Replacer->undo();
3318       Hider.undo();
3319       RemovedInsts.erase(Inst);
3320     }
3321   };
3322 
3323 public:
3324   /// Restoration point.
3325   /// The restoration point is a pointer to an action instead of an iterator
3326   /// because the iterator may be invalidated but not the pointer.
3327   using ConstRestorationPt = const TypePromotionAction *;
3328 
3329   TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3330       : RemovedInsts(RemovedInsts) {}
3331 
3332   /// Advocate every changes made in that transaction. Return true if any change
3333   /// happen.
3334   bool commit();
3335 
3336   /// Undo all the changes made after the given point.
3337   void rollback(ConstRestorationPt Point);
3338 
3339   /// Get the current restoration point.
3340   ConstRestorationPt getRestorationPoint() const;
3341 
3342   /// \name API for IR modification with state keeping to support rollback.
3343   /// @{
3344   /// Same as Instruction::setOperand.
3345   void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3346 
3347   /// Same as Instruction::eraseFromParent.
3348   void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3349 
3350   /// Same as Value::replaceAllUsesWith.
3351   void replaceAllUsesWith(Instruction *Inst, Value *New);
3352 
3353   /// Same as Value::mutateType.
3354   void mutateType(Instruction *Inst, Type *NewTy);
3355 
3356   /// Same as IRBuilder::createTrunc.
3357   Value *createTrunc(Instruction *Opnd, Type *Ty);
3358 
3359   /// Same as IRBuilder::createSExt.
3360   Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3361 
3362   /// Same as IRBuilder::createZExt.
3363   Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3364 
3365 private:
3366   /// The ordered list of actions made so far.
3367   SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3368 
3369   using CommitPt =
3370       SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3371 
3372   SetOfInstrs &RemovedInsts;
3373 };
3374 
3375 } // end anonymous namespace
3376 
3377 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3378                                           Value *NewVal) {
3379   Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3380       Inst, Idx, NewVal));
3381 }
3382 
3383 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3384                                                 Value *NewVal) {
3385   Actions.push_back(
3386       std::make_unique<TypePromotionTransaction::InstructionRemover>(
3387           Inst, RemovedInsts, NewVal));
3388 }
3389 
3390 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3391                                                   Value *New) {
3392   Actions.push_back(
3393       std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3394 }
3395 
3396 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3397   Actions.push_back(
3398       std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3399 }
3400 
3401 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3402   std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3403   Value *Val = Ptr->getBuiltValue();
3404   Actions.push_back(std::move(Ptr));
3405   return Val;
3406 }
3407 
3408 Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3409                                             Type *Ty) {
3410   std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3411   Value *Val = Ptr->getBuiltValue();
3412   Actions.push_back(std::move(Ptr));
3413   return Val;
3414 }
3415 
3416 Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3417                                             Type *Ty) {
3418   std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3419   Value *Val = Ptr->getBuiltValue();
3420   Actions.push_back(std::move(Ptr));
3421   return Val;
3422 }
3423 
3424 TypePromotionTransaction::ConstRestorationPt
3425 TypePromotionTransaction::getRestorationPoint() const {
3426   return !Actions.empty() ? Actions.back().get() : nullptr;
3427 }
3428 
3429 bool TypePromotionTransaction::commit() {
3430   for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3431     Action->commit();
3432   bool Modified = !Actions.empty();
3433   Actions.clear();
3434   return Modified;
3435 }
3436 
3437 void TypePromotionTransaction::rollback(
3438     TypePromotionTransaction::ConstRestorationPt Point) {
3439   while (!Actions.empty() && Point != Actions.back().get()) {
3440     std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3441     Curr->undo();
3442   }
3443 }
3444 
3445 namespace {
3446 
3447 /// A helper class for matching addressing modes.
3448 ///
3449 /// This encapsulates the logic for matching the target-legal addressing modes.
3450 class AddressingModeMatcher {
3451   SmallVectorImpl<Instruction *> &AddrModeInsts;
3452   const TargetLowering &TLI;
3453   const TargetRegisterInfo &TRI;
3454   const DataLayout &DL;
3455   const LoopInfo &LI;
3456   const std::function<const DominatorTree &()> getDTFn;
3457 
3458   /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3459   /// the memory instruction that we're computing this address for.
3460   Type *AccessTy;
3461   unsigned AddrSpace;
3462   Instruction *MemoryInst;
3463 
3464   /// This is the addressing mode that we're building up. This is
3465   /// part of the return value of this addressing mode matching stuff.
3466   ExtAddrMode &AddrMode;
3467 
3468   /// The instructions inserted by other CodeGenPrepare optimizations.
3469   const SetOfInstrs &InsertedInsts;
3470 
3471   /// A map from the instructions to their type before promotion.
3472   InstrToOrigTy &PromotedInsts;
3473 
3474   /// The ongoing transaction where every action should be registered.
3475   TypePromotionTransaction &TPT;
3476 
3477   // A GEP which has too large offset to be folded into the addressing mode.
3478   std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3479 
3480   /// This is set to true when we should not do profitability checks.
3481   /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3482   bool IgnoreProfitability;
3483 
3484   /// True if we are optimizing for size.
3485   bool OptSize = false;
3486 
3487   ProfileSummaryInfo *PSI;
3488   BlockFrequencyInfo *BFI;
3489 
3490   AddressingModeMatcher(
3491       SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3492       const TargetRegisterInfo &TRI, const LoopInfo &LI,
3493       const std::function<const DominatorTree &()> getDTFn, Type *AT,
3494       unsigned AS, Instruction *MI, ExtAddrMode &AM,
3495       const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3496       TypePromotionTransaction &TPT,
3497       std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3498       bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3499       : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3500         DL(MI->getDataLayout()), LI(LI), getDTFn(getDTFn),
3501         AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3502         InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3503         LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3504     IgnoreProfitability = false;
3505   }
3506 
3507 public:
3508   /// Find the maximal addressing mode that a load/store of V can fold,
3509   /// give an access type of AccessTy.  This returns a list of involved
3510   /// instructions in AddrModeInsts.
3511   /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3512   /// optimizations.
3513   /// \p PromotedInsts maps the instructions to their type before promotion.
3514   /// \p The ongoing transaction where every action should be registered.
3515   static ExtAddrMode
3516   Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3517         SmallVectorImpl<Instruction *> &AddrModeInsts,
3518         const TargetLowering &TLI, const LoopInfo &LI,
3519         const std::function<const DominatorTree &()> getDTFn,
3520         const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3521         InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3522         std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3523         bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3524     ExtAddrMode Result;
3525 
3526     bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3527                                          AccessTy, AS, MemoryInst, Result,
3528                                          InsertedInsts, PromotedInsts, TPT,
3529                                          LargeOffsetGEP, OptSize, PSI, BFI)
3530                        .matchAddr(V, 0);
3531     (void)Success;
3532     assert(Success && "Couldn't select *anything*?");
3533     return Result;
3534   }
3535 
3536 private:
3537   bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3538   bool matchAddr(Value *Addr, unsigned Depth);
3539   bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3540                           bool *MovedAway = nullptr);
3541   bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3542                                             ExtAddrMode &AMBefore,
3543                                             ExtAddrMode &AMAfter);
3544   bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3545   bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3546                              Value *PromotedOperand) const;
3547 };
3548 
3549 class PhiNodeSet;
3550 
3551 /// An iterator for PhiNodeSet.
3552 class PhiNodeSetIterator {
3553   PhiNodeSet *const Set;
3554   size_t CurrentIndex = 0;
3555 
3556 public:
3557   /// The constructor. Start should point to either a valid element, or be equal
3558   /// to the size of the underlying SmallVector of the PhiNodeSet.
3559   PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3560   PHINode *operator*() const;
3561   PhiNodeSetIterator &operator++();
3562   bool operator==(const PhiNodeSetIterator &RHS) const;
3563   bool operator!=(const PhiNodeSetIterator &RHS) const;
3564 };
3565 
3566 /// Keeps a set of PHINodes.
3567 ///
3568 /// This is a minimal set implementation for a specific use case:
3569 /// It is very fast when there are very few elements, but also provides good
3570 /// performance when there are many. It is similar to SmallPtrSet, but also
3571 /// provides iteration by insertion order, which is deterministic and stable
3572 /// across runs. It is also similar to SmallSetVector, but provides removing
3573 /// elements in O(1) time. This is achieved by not actually removing the element
3574 /// from the underlying vector, so comes at the cost of using more memory, but
3575 /// that is fine, since PhiNodeSets are used as short lived objects.
3576 class PhiNodeSet {
3577   friend class PhiNodeSetIterator;
3578 
3579   using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3580   using iterator = PhiNodeSetIterator;
3581 
3582   /// Keeps the elements in the order of their insertion in the underlying
3583   /// vector. To achieve constant time removal, it never deletes any element.
3584   SmallVector<PHINode *, 32> NodeList;
3585 
3586   /// Keeps the elements in the underlying set implementation. This (and not the
3587   /// NodeList defined above) is the source of truth on whether an element
3588   /// is actually in the collection.
3589   MapType NodeMap;
3590 
3591   /// Points to the first valid (not deleted) element when the set is not empty
3592   /// and the value is not zero. Equals to the size of the underlying vector
3593   /// when the set is empty. When the value is 0, as in the beginning, the
3594   /// first element may or may not be valid.
3595   size_t FirstValidElement = 0;
3596 
3597 public:
3598   /// Inserts a new element to the collection.
3599   /// \returns true if the element is actually added, i.e. was not in the
3600   /// collection before the operation.
3601   bool insert(PHINode *Ptr) {
3602     if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3603       NodeList.push_back(Ptr);
3604       return true;
3605     }
3606     return false;
3607   }
3608 
3609   /// Removes the element from the collection.
3610   /// \returns whether the element is actually removed, i.e. was in the
3611   /// collection before the operation.
3612   bool erase(PHINode *Ptr) {
3613     if (NodeMap.erase(Ptr)) {
3614       SkipRemovedElements(FirstValidElement);
3615       return true;
3616     }
3617     return false;
3618   }
3619 
3620   /// Removes all elements and clears the collection.
3621   void clear() {
3622     NodeMap.clear();
3623     NodeList.clear();
3624     FirstValidElement = 0;
3625   }
3626 
3627   /// \returns an iterator that will iterate the elements in the order of
3628   /// insertion.
3629   iterator begin() {
3630     if (FirstValidElement == 0)
3631       SkipRemovedElements(FirstValidElement);
3632     return PhiNodeSetIterator(this, FirstValidElement);
3633   }
3634 
3635   /// \returns an iterator that points to the end of the collection.
3636   iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3637 
3638   /// Returns the number of elements in the collection.
3639   size_t size() const { return NodeMap.size(); }
3640 
3641   /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3642   size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3643 
3644 private:
3645   /// Updates the CurrentIndex so that it will point to a valid element.
3646   ///
3647   /// If the element of NodeList at CurrentIndex is valid, it does not
3648   /// change it. If there are no more valid elements, it updates CurrentIndex
3649   /// to point to the end of the NodeList.
3650   void SkipRemovedElements(size_t &CurrentIndex) {
3651     while (CurrentIndex < NodeList.size()) {
3652       auto it = NodeMap.find(NodeList[CurrentIndex]);
3653       // If the element has been deleted and added again later, NodeMap will
3654       // point to a different index, so CurrentIndex will still be invalid.
3655       if (it != NodeMap.end() && it->second == CurrentIndex)
3656         break;
3657       ++CurrentIndex;
3658     }
3659   }
3660 };
3661 
3662 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3663     : Set(Set), CurrentIndex(Start) {}
3664 
3665 PHINode *PhiNodeSetIterator::operator*() const {
3666   assert(CurrentIndex < Set->NodeList.size() &&
3667          "PhiNodeSet access out of range");
3668   return Set->NodeList[CurrentIndex];
3669 }
3670 
3671 PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3672   assert(CurrentIndex < Set->NodeList.size() &&
3673          "PhiNodeSet access out of range");
3674   ++CurrentIndex;
3675   Set->SkipRemovedElements(CurrentIndex);
3676   return *this;
3677 }
3678 
3679 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3680   return CurrentIndex == RHS.CurrentIndex;
3681 }
3682 
3683 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3684   return !((*this) == RHS);
3685 }
3686 
3687 /// Keep track of simplification of Phi nodes.
3688 /// Accept the set of all phi nodes and erase phi node from this set
3689 /// if it is simplified.
3690 class SimplificationTracker {
3691   DenseMap<Value *, Value *> Storage;
3692   const SimplifyQuery &SQ;
3693   // Tracks newly created Phi nodes. The elements are iterated by insertion
3694   // order.
3695   PhiNodeSet AllPhiNodes;
3696   // Tracks newly created Select nodes.
3697   SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3698 
3699 public:
3700   SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3701 
3702   Value *Get(Value *V) {
3703     do {
3704       auto SV = Storage.find(V);
3705       if (SV == Storage.end())
3706         return V;
3707       V = SV->second;
3708     } while (true);
3709   }
3710 
3711   Value *Simplify(Value *Val) {
3712     SmallVector<Value *, 32> WorkList;
3713     SmallPtrSet<Value *, 32> Visited;
3714     WorkList.push_back(Val);
3715     while (!WorkList.empty()) {
3716       auto *P = WorkList.pop_back_val();
3717       if (!Visited.insert(P).second)
3718         continue;
3719       if (auto *PI = dyn_cast<Instruction>(P))
3720         if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3721           for (auto *U : PI->users())
3722             WorkList.push_back(cast<Value>(U));
3723           Put(PI, V);
3724           PI->replaceAllUsesWith(V);
3725           if (auto *PHI = dyn_cast<PHINode>(PI))
3726             AllPhiNodes.erase(PHI);
3727           if (auto *Select = dyn_cast<SelectInst>(PI))
3728             AllSelectNodes.erase(Select);
3729           PI->eraseFromParent();
3730         }
3731     }
3732     return Get(Val);
3733   }
3734 
3735   void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3736 
3737   void ReplacePhi(PHINode *From, PHINode *To) {
3738     Value *OldReplacement = Get(From);
3739     while (OldReplacement != From) {
3740       From = To;
3741       To = dyn_cast<PHINode>(OldReplacement);
3742       OldReplacement = Get(From);
3743     }
3744     assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3745     Put(From, To);
3746     From->replaceAllUsesWith(To);
3747     AllPhiNodes.erase(From);
3748     From->eraseFromParent();
3749   }
3750 
3751   PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
3752 
3753   void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3754 
3755   void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3756 
3757   unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3758 
3759   unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3760 
3761   void destroyNewNodes(Type *CommonType) {
3762     // For safe erasing, replace the uses with dummy value first.
3763     auto *Dummy = PoisonValue::get(CommonType);
3764     for (auto *I : AllPhiNodes) {
3765       I->replaceAllUsesWith(Dummy);
3766       I->eraseFromParent();
3767     }
3768     AllPhiNodes.clear();
3769     for (auto *I : AllSelectNodes) {
3770       I->replaceAllUsesWith(Dummy);
3771       I->eraseFromParent();
3772     }
3773     AllSelectNodes.clear();
3774   }
3775 };
3776 
3777 /// A helper class for combining addressing modes.
3778 class AddressingModeCombiner {
3779   typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3780   typedef std::pair<PHINode *, PHINode *> PHIPair;
3781 
3782 private:
3783   /// The addressing modes we've collected.
3784   SmallVector<ExtAddrMode, 16> AddrModes;
3785 
3786   /// The field in which the AddrModes differ, when we have more than one.
3787   ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3788 
3789   /// Are the AddrModes that we have all just equal to their original values?
3790   bool AllAddrModesTrivial = true;
3791 
3792   /// Common Type for all different fields in addressing modes.
3793   Type *CommonType = nullptr;
3794 
3795   /// SimplifyQuery for simplifyInstruction utility.
3796   const SimplifyQuery &SQ;
3797 
3798   /// Original Address.
3799   Value *Original;
3800 
3801   /// Common value among addresses
3802   Value *CommonValue = nullptr;
3803 
3804 public:
3805   AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3806       : SQ(_SQ), Original(OriginalValue) {}
3807 
3808   ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
3809 
3810   /// Get the combined AddrMode
3811   const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
3812 
3813   /// Add a new AddrMode if it's compatible with the AddrModes we already
3814   /// have.
3815   /// \return True iff we succeeded in doing so.
3816   bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3817     // Take note of if we have any non-trivial AddrModes, as we need to detect
3818     // when all AddrModes are trivial as then we would introduce a phi or select
3819     // which just duplicates what's already there.
3820     AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3821 
3822     // If this is the first addrmode then everything is fine.
3823     if (AddrModes.empty()) {
3824       AddrModes.emplace_back(NewAddrMode);
3825       return true;
3826     }
3827 
3828     // Figure out how different this is from the other address modes, which we
3829     // can do just by comparing against the first one given that we only care
3830     // about the cumulative difference.
3831     ExtAddrMode::FieldName ThisDifferentField =
3832         AddrModes[0].compare(NewAddrMode);
3833     if (DifferentField == ExtAddrMode::NoField)
3834       DifferentField = ThisDifferentField;
3835     else if (DifferentField != ThisDifferentField)
3836       DifferentField = ExtAddrMode::MultipleFields;
3837 
3838     // If NewAddrMode differs in more than one dimension we cannot handle it.
3839     bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3840 
3841     // If Scale Field is different then we reject.
3842     CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3843 
3844     // We also must reject the case when base offset is different and
3845     // scale reg is not null, we cannot handle this case due to merge of
3846     // different offsets will be used as ScaleReg.
3847     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3848                               !NewAddrMode.ScaledReg);
3849 
3850     // We also must reject the case when GV is different and BaseReg installed
3851     // due to we want to use base reg as a merge of GV values.
3852     CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3853                               !NewAddrMode.HasBaseReg);
3854 
3855     // Even if NewAddMode is the same we still need to collect it due to
3856     // original value is different. And later we will need all original values
3857     // as anchors during finding the common Phi node.
3858     if (CanHandle)
3859       AddrModes.emplace_back(NewAddrMode);
3860     else
3861       AddrModes.clear();
3862 
3863     return CanHandle;
3864   }
3865 
3866   /// Combine the addressing modes we've collected into a single
3867   /// addressing mode.
3868   /// \return True iff we successfully combined them or we only had one so
3869   /// didn't need to combine them anyway.
3870   bool combineAddrModes() {
3871     // If we have no AddrModes then they can't be combined.
3872     if (AddrModes.size() == 0)
3873       return false;
3874 
3875     // A single AddrMode can trivially be combined.
3876     if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3877       return true;
3878 
3879     // If the AddrModes we collected are all just equal to the value they are
3880     // derived from then combining them wouldn't do anything useful.
3881     if (AllAddrModesTrivial)
3882       return false;
3883 
3884     if (!addrModeCombiningAllowed())
3885       return false;
3886 
3887     // Build a map between <original value, basic block where we saw it> to
3888     // value of base register.
3889     // Bail out if there is no common type.
3890     FoldAddrToValueMapping Map;
3891     if (!initializeMap(Map))
3892       return false;
3893 
3894     CommonValue = findCommon(Map);
3895     if (CommonValue)
3896       AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3897     return CommonValue != nullptr;
3898   }
3899 
3900 private:
3901   /// `CommonValue` may be a placeholder inserted by us.
3902   /// If the placeholder is not used, we should remove this dead instruction.
3903   void eraseCommonValueIfDead() {
3904     if (CommonValue && CommonValue->getNumUses() == 0)
3905       if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue))
3906         CommonInst->eraseFromParent();
3907   }
3908 
3909   /// Initialize Map with anchor values. For address seen
3910   /// we set the value of different field saw in this address.
3911   /// At the same time we find a common type for different field we will
3912   /// use to create new Phi/Select nodes. Keep it in CommonType field.
3913   /// Return false if there is no common type found.
3914   bool initializeMap(FoldAddrToValueMapping &Map) {
3915     // Keep track of keys where the value is null. We will need to replace it
3916     // with constant null when we know the common type.
3917     SmallVector<Value *, 2> NullValue;
3918     Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3919     for (auto &AM : AddrModes) {
3920       Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3921       if (DV) {
3922         auto *Type = DV->getType();
3923         if (CommonType && CommonType != Type)
3924           return false;
3925         CommonType = Type;
3926         Map[AM.OriginalValue] = DV;
3927       } else {
3928         NullValue.push_back(AM.OriginalValue);
3929       }
3930     }
3931     assert(CommonType && "At least one non-null value must be!");
3932     for (auto *V : NullValue)
3933       Map[V] = Constant::getNullValue(CommonType);
3934     return true;
3935   }
3936 
3937   /// We have mapping between value A and other value B where B was a field in
3938   /// addressing mode represented by A. Also we have an original value C
3939   /// representing an address we start with. Traversing from C through phi and
3940   /// selects we ended up with A's in a map. This utility function tries to find
3941   /// a value V which is a field in addressing mode C and traversing through phi
3942   /// nodes and selects we will end up in corresponded values B in a map.
3943   /// The utility will create a new Phi/Selects if needed.
3944   // The simple example looks as follows:
3945   // BB1:
3946   //   p1 = b1 + 40
3947   //   br cond BB2, BB3
3948   // BB2:
3949   //   p2 = b2 + 40
3950   //   br BB3
3951   // BB3:
3952   //   p = phi [p1, BB1], [p2, BB2]
3953   //   v = load p
3954   // Map is
3955   //   p1 -> b1
3956   //   p2 -> b2
3957   // Request is
3958   //   p -> ?
3959   // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3960   Value *findCommon(FoldAddrToValueMapping &Map) {
3961     // Tracks the simplification of newly created phi nodes. The reason we use
3962     // this mapping is because we will add new created Phi nodes in AddrToBase.
3963     // Simplification of Phi nodes is recursive, so some Phi node may
3964     // be simplified after we added it to AddrToBase. In reality this
3965     // simplification is possible only if original phi/selects were not
3966     // simplified yet.
3967     // Using this mapping we can find the current value in AddrToBase.
3968     SimplificationTracker ST(SQ);
3969 
3970     // First step, DFS to create PHI nodes for all intermediate blocks.
3971     // Also fill traverse order for the second step.
3972     SmallVector<Value *, 32> TraverseOrder;
3973     InsertPlaceholders(Map, TraverseOrder, ST);
3974 
3975     // Second Step, fill new nodes by merged values and simplify if possible.
3976     FillPlaceholders(Map, TraverseOrder, ST);
3977 
3978     if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3979       ST.destroyNewNodes(CommonType);
3980       return nullptr;
3981     }
3982 
3983     // Now we'd like to match New Phi nodes to existed ones.
3984     unsigned PhiNotMatchedCount = 0;
3985     if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3986       ST.destroyNewNodes(CommonType);
3987       return nullptr;
3988     }
3989 
3990     auto *Result = ST.Get(Map.find(Original)->second);
3991     if (Result) {
3992       NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3993       NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3994     }
3995     return Result;
3996   }
3997 
3998   /// Try to match PHI node to Candidate.
3999   /// Matcher tracks the matched Phi nodes.
4000   bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
4001                     SmallSetVector<PHIPair, 8> &Matcher,
4002                     PhiNodeSet &PhiNodesToMatch) {
4003     SmallVector<PHIPair, 8> WorkList;
4004     Matcher.insert({PHI, Candidate});
4005     SmallSet<PHINode *, 8> MatchedPHIs;
4006     MatchedPHIs.insert(PHI);
4007     WorkList.push_back({PHI, Candidate});
4008     SmallSet<PHIPair, 8> Visited;
4009     while (!WorkList.empty()) {
4010       auto Item = WorkList.pop_back_val();
4011       if (!Visited.insert(Item).second)
4012         continue;
4013       // We iterate over all incoming values to Phi to compare them.
4014       // If values are different and both of them Phi and the first one is a
4015       // Phi we added (subject to match) and both of them is in the same basic
4016       // block then we can match our pair if values match. So we state that
4017       // these values match and add it to work list to verify that.
4018       for (auto *B : Item.first->blocks()) {
4019         Value *FirstValue = Item.first->getIncomingValueForBlock(B);
4020         Value *SecondValue = Item.second->getIncomingValueForBlock(B);
4021         if (FirstValue == SecondValue)
4022           continue;
4023 
4024         PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
4025         PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
4026 
4027         // One of them is not Phi or
4028         // The first one is not Phi node from the set we'd like to match or
4029         // Phi nodes from different basic blocks then
4030         // we will not be able to match.
4031         if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
4032             FirstPhi->getParent() != SecondPhi->getParent())
4033           return false;
4034 
4035         // If we already matched them then continue.
4036         if (Matcher.count({FirstPhi, SecondPhi}))
4037           continue;
4038         // So the values are different and does not match. So we need them to
4039         // match. (But we register no more than one match per PHI node, so that
4040         // we won't later try to replace them twice.)
4041         if (MatchedPHIs.insert(FirstPhi).second)
4042           Matcher.insert({FirstPhi, SecondPhi});
4043         // But me must check it.
4044         WorkList.push_back({FirstPhi, SecondPhi});
4045       }
4046     }
4047     return true;
4048   }
4049 
4050   /// For the given set of PHI nodes (in the SimplificationTracker) try
4051   /// to find their equivalents.
4052   /// Returns false if this matching fails and creation of new Phi is disabled.
4053   bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
4054                    unsigned &PhiNotMatchedCount) {
4055     // Matched and PhiNodesToMatch iterate their elements in a deterministic
4056     // order, so the replacements (ReplacePhi) are also done in a deterministic
4057     // order.
4058     SmallSetVector<PHIPair, 8> Matched;
4059     SmallPtrSet<PHINode *, 8> WillNotMatch;
4060     PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
4061     while (PhiNodesToMatch.size()) {
4062       PHINode *PHI = *PhiNodesToMatch.begin();
4063 
4064       // Add us, if no Phi nodes in the basic block we do not match.
4065       WillNotMatch.clear();
4066       WillNotMatch.insert(PHI);
4067 
4068       // Traverse all Phis until we found equivalent or fail to do that.
4069       bool IsMatched = false;
4070       for (auto &P : PHI->getParent()->phis()) {
4071         // Skip new Phi nodes.
4072         if (PhiNodesToMatch.count(&P))
4073           continue;
4074         if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
4075           break;
4076         // If it does not match, collect all Phi nodes from matcher.
4077         // if we end up with no match, them all these Phi nodes will not match
4078         // later.
4079         for (auto M : Matched)
4080           WillNotMatch.insert(M.first);
4081         Matched.clear();
4082       }
4083       if (IsMatched) {
4084         // Replace all matched values and erase them.
4085         for (auto MV : Matched)
4086           ST.ReplacePhi(MV.first, MV.second);
4087         Matched.clear();
4088         continue;
4089       }
4090       // If we are not allowed to create new nodes then bail out.
4091       if (!AllowNewPhiNodes)
4092         return false;
4093       // Just remove all seen values in matcher. They will not match anything.
4094       PhiNotMatchedCount += WillNotMatch.size();
4095       for (auto *P : WillNotMatch)
4096         PhiNodesToMatch.erase(P);
4097     }
4098     return true;
4099   }
4100   /// Fill the placeholders with values from predecessors and simplify them.
4101   void FillPlaceholders(FoldAddrToValueMapping &Map,
4102                         SmallVectorImpl<Value *> &TraverseOrder,
4103                         SimplificationTracker &ST) {
4104     while (!TraverseOrder.empty()) {
4105       Value *Current = TraverseOrder.pop_back_val();
4106       assert(Map.contains(Current) && "No node to fill!!!");
4107       Value *V = Map[Current];
4108 
4109       if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
4110         // CurrentValue also must be Select.
4111         auto *CurrentSelect = cast<SelectInst>(Current);
4112         auto *TrueValue = CurrentSelect->getTrueValue();
4113         assert(Map.contains(TrueValue) && "No True Value!");
4114         Select->setTrueValue(ST.Get(Map[TrueValue]));
4115         auto *FalseValue = CurrentSelect->getFalseValue();
4116         assert(Map.contains(FalseValue) && "No False Value!");
4117         Select->setFalseValue(ST.Get(Map[FalseValue]));
4118       } else {
4119         // Must be a Phi node then.
4120         auto *PHI = cast<PHINode>(V);
4121         // Fill the Phi node with values from predecessors.
4122         for (auto *B : predecessors(PHI->getParent())) {
4123           Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
4124           assert(Map.contains(PV) && "No predecessor Value!");
4125           PHI->addIncoming(ST.Get(Map[PV]), B);
4126         }
4127       }
4128       Map[Current] = ST.Simplify(V);
4129     }
4130   }
4131 
4132   /// Starting from original value recursively iterates over def-use chain up to
4133   /// known ending values represented in a map. For each traversed phi/select
4134   /// inserts a placeholder Phi or Select.
4135   /// Reports all new created Phi/Select nodes by adding them to set.
4136   /// Also reports and order in what values have been traversed.
4137   void InsertPlaceholders(FoldAddrToValueMapping &Map,
4138                           SmallVectorImpl<Value *> &TraverseOrder,
4139                           SimplificationTracker &ST) {
4140     SmallVector<Value *, 32> Worklist;
4141     assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
4142            "Address must be a Phi or Select node");
4143     auto *Dummy = PoisonValue::get(CommonType);
4144     Worklist.push_back(Original);
4145     while (!Worklist.empty()) {
4146       Value *Current = Worklist.pop_back_val();
4147       // if it is already visited or it is an ending value then skip it.
4148       if (Map.contains(Current))
4149         continue;
4150       TraverseOrder.push_back(Current);
4151 
4152       // CurrentValue must be a Phi node or select. All others must be covered
4153       // by anchors.
4154       if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
4155         // Is it OK to get metadata from OrigSelect?!
4156         // Create a Select placeholder with dummy value.
4157         SelectInst *Select =
4158             SelectInst::Create(CurrentSelect->getCondition(), Dummy, Dummy,
4159                                CurrentSelect->getName(),
4160                                CurrentSelect->getIterator(), CurrentSelect);
4161         Map[Current] = Select;
4162         ST.insertNewSelect(Select);
4163         // We are interested in True and False values.
4164         Worklist.push_back(CurrentSelect->getTrueValue());
4165         Worklist.push_back(CurrentSelect->getFalseValue());
4166       } else {
4167         // It must be a Phi node then.
4168         PHINode *CurrentPhi = cast<PHINode>(Current);
4169         unsigned PredCount = CurrentPhi->getNumIncomingValues();
4170         PHINode *PHI =
4171             PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi->getIterator());
4172         Map[Current] = PHI;
4173         ST.insertNewPhi(PHI);
4174         append_range(Worklist, CurrentPhi->incoming_values());
4175       }
4176     }
4177   }
4178 
4179   bool addrModeCombiningAllowed() {
4180     if (DisableComplexAddrModes)
4181       return false;
4182     switch (DifferentField) {
4183     default:
4184       return false;
4185     case ExtAddrMode::BaseRegField:
4186       return AddrSinkCombineBaseReg;
4187     case ExtAddrMode::BaseGVField:
4188       return AddrSinkCombineBaseGV;
4189     case ExtAddrMode::BaseOffsField:
4190       return AddrSinkCombineBaseOffs;
4191     case ExtAddrMode::ScaledRegField:
4192       return AddrSinkCombineScaledReg;
4193     }
4194   }
4195 };
4196 } // end anonymous namespace
4197 
4198 /// Try adding ScaleReg*Scale to the current addressing mode.
4199 /// Return true and update AddrMode if this addr mode is legal for the target,
4200 /// false if not.
4201 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
4202                                              unsigned Depth) {
4203   // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4204   // mode.  Just process that directly.
4205   if (Scale == 1)
4206     return matchAddr(ScaleReg, Depth);
4207 
4208   // If the scale is 0, it takes nothing to add this.
4209   if (Scale == 0)
4210     return true;
4211 
4212   // If we already have a scale of this value, we can add to it, otherwise, we
4213   // need an available scale field.
4214   if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
4215     return false;
4216 
4217   ExtAddrMode TestAddrMode = AddrMode;
4218 
4219   // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
4220   // [A+B + A*7] -> [B+A*8].
4221   TestAddrMode.Scale += Scale;
4222   TestAddrMode.ScaledReg = ScaleReg;
4223 
4224   // If the new address isn't legal, bail out.
4225   if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
4226     return false;
4227 
4228   // It was legal, so commit it.
4229   AddrMode = TestAddrMode;
4230 
4231   // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
4232   // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
4233   // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4234   // go any further: we can reuse it and cannot eliminate it.
4235   ConstantInt *CI = nullptr;
4236   Value *AddLHS = nullptr;
4237   if (isa<Instruction>(ScaleReg) && // not a constant expr.
4238       match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4239       !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4240     TestAddrMode.InBounds = false;
4241     TestAddrMode.ScaledReg = AddLHS;
4242     TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4243 
4244     // If this addressing mode is legal, commit it and remember that we folded
4245     // this instruction.
4246     if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4247       AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4248       AddrMode = TestAddrMode;
4249       return true;
4250     }
4251     // Restore status quo.
4252     TestAddrMode = AddrMode;
4253   }
4254 
4255   // If this is an add recurrence with a constant step, return the increment
4256   // instruction and the canonicalized step.
4257   auto GetConstantStep =
4258       [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4259     auto *PN = dyn_cast<PHINode>(V);
4260     if (!PN)
4261       return std::nullopt;
4262     auto IVInc = getIVIncrement(PN, &LI);
4263     if (!IVInc)
4264       return std::nullopt;
4265     // TODO: The result of the intrinsics above is two-complement. However when
4266     // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4267     // If it has nuw or nsw flags, we need to make sure that these flags are
4268     // inferrable at the point of memory instruction. Otherwise we are replacing
4269     // well-defined two-complement computation with poison. Currently, to avoid
4270     // potentially complex analysis needed to prove this, we reject such cases.
4271     if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4272       if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4273         return std::nullopt;
4274     if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4275       return std::make_pair(IVInc->first, ConstantStep->getValue());
4276     return std::nullopt;
4277   };
4278 
4279   // Try to account for the following special case:
4280   // 1. ScaleReg is an inductive variable;
4281   // 2. We use it with non-zero offset;
4282   // 3. IV's increment is available at the point of memory instruction.
4283   //
4284   // In this case, we may reuse the IV increment instead of the IV Phi to
4285   // achieve the following advantages:
4286   // 1. If IV step matches the offset, we will have no need in the offset;
4287   // 2. Even if they don't match, we will reduce the overlap of living IV
4288   //    and IV increment, that will potentially lead to better register
4289   //    assignment.
4290   if (AddrMode.BaseOffs) {
4291     if (auto IVStep = GetConstantStep(ScaleReg)) {
4292       Instruction *IVInc = IVStep->first;
4293       // The following assert is important to ensure a lack of infinite loops.
4294       // This transforms is (intentionally) the inverse of the one just above.
4295       // If they don't agree on the definition of an increment, we'd alternate
4296       // back and forth indefinitely.
4297       assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4298       APInt Step = IVStep->second;
4299       APInt Offset = Step * AddrMode.Scale;
4300       if (Offset.isSignedIntN(64)) {
4301         TestAddrMode.InBounds = false;
4302         TestAddrMode.ScaledReg = IVInc;
4303         TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4304         // If this addressing mode is legal, commit it..
4305         // (Note that we defer the (expensive) domtree base legality check
4306         // to the very last possible point.)
4307         if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4308             getDTFn().dominates(IVInc, MemoryInst)) {
4309           AddrModeInsts.push_back(cast<Instruction>(IVInc));
4310           AddrMode = TestAddrMode;
4311           return true;
4312         }
4313         // Restore status quo.
4314         TestAddrMode = AddrMode;
4315       }
4316     }
4317   }
4318 
4319   // Otherwise, just return what we have.
4320   return true;
4321 }
4322 
4323 /// This is a little filter, which returns true if an addressing computation
4324 /// involving I might be folded into a load/store accessing it.
4325 /// This doesn't need to be perfect, but needs to accept at least
4326 /// the set of instructions that MatchOperationAddr can.
4327 static bool MightBeFoldableInst(Instruction *I) {
4328   switch (I->getOpcode()) {
4329   case Instruction::BitCast:
4330   case Instruction::AddrSpaceCast:
4331     // Don't touch identity bitcasts.
4332     if (I->getType() == I->getOperand(0)->getType())
4333       return false;
4334     return I->getType()->isIntOrPtrTy();
4335   case Instruction::PtrToInt:
4336     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4337     return true;
4338   case Instruction::IntToPtr:
4339     // We know the input is intptr_t, so this is foldable.
4340     return true;
4341   case Instruction::Add:
4342     return true;
4343   case Instruction::Mul:
4344   case Instruction::Shl:
4345     // Can only handle X*C and X << C.
4346     return isa<ConstantInt>(I->getOperand(1));
4347   case Instruction::GetElementPtr:
4348     return true;
4349   default:
4350     return false;
4351   }
4352 }
4353 
4354 /// Check whether or not \p Val is a legal instruction for \p TLI.
4355 /// \note \p Val is assumed to be the product of some type promotion.
4356 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4357 /// to be legal, as the non-promoted value would have had the same state.
4358 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
4359                                        const DataLayout &DL, Value *Val) {
4360   Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4361   if (!PromotedInst)
4362     return false;
4363   int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4364   // If the ISDOpcode is undefined, it was undefined before the promotion.
4365   if (!ISDOpcode)
4366     return true;
4367   // Otherwise, check if the promoted instruction is legal or not.
4368   return TLI.isOperationLegalOrCustom(
4369       ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4370 }
4371 
4372 namespace {
4373 
4374 /// Hepler class to perform type promotion.
4375 class TypePromotionHelper {
4376   /// Utility function to add a promoted instruction \p ExtOpnd to
4377   /// \p PromotedInsts and record the type of extension we have seen.
4378   static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4379                               Instruction *ExtOpnd, bool IsSExt) {
4380     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4381     InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4382     if (It != PromotedInsts.end()) {
4383       // If the new extension is same as original, the information in
4384       // PromotedInsts[ExtOpnd] is still correct.
4385       if (It->second.getInt() == ExtTy)
4386         return;
4387 
4388       // Now the new extension is different from old extension, we make
4389       // the type information invalid by setting extension type to
4390       // BothExtension.
4391       ExtTy = BothExtension;
4392     }
4393     PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4394   }
4395 
4396   /// Utility function to query the original type of instruction \p Opnd
4397   /// with a matched extension type. If the extension doesn't match, we
4398   /// cannot use the information we had on the original type.
4399   /// BothExtension doesn't match any extension type.
4400   static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4401                                  Instruction *Opnd, bool IsSExt) {
4402     ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4403     InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4404     if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4405       return It->second.getPointer();
4406     return nullptr;
4407   }
4408 
4409   /// Utility function to check whether or not a sign or zero extension
4410   /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4411   /// either using the operands of \p Inst or promoting \p Inst.
4412   /// The type of the extension is defined by \p IsSExt.
4413   /// In other words, check if:
4414   /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4415   /// #1 Promotion applies:
4416   /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4417   /// #2 Operand reuses:
4418   /// ext opnd1 to ConsideredExtType.
4419   /// \p PromotedInsts maps the instructions to their type before promotion.
4420   static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4421                             const InstrToOrigTy &PromotedInsts, bool IsSExt);
4422 
4423   /// Utility function to determine if \p OpIdx should be promoted when
4424   /// promoting \p Inst.
4425   static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4426     return !(isa<SelectInst>(Inst) && OpIdx == 0);
4427   }
4428 
4429   /// Utility function to promote the operand of \p Ext when this
4430   /// operand is a promotable trunc or sext or zext.
4431   /// \p PromotedInsts maps the instructions to their type before promotion.
4432   /// \p CreatedInstsCost[out] contains the cost of all instructions
4433   /// created to promote the operand of Ext.
4434   /// Newly added extensions are inserted in \p Exts.
4435   /// Newly added truncates are inserted in \p Truncs.
4436   /// Should never be called directly.
4437   /// \return The promoted value which is used instead of Ext.
4438   static Value *promoteOperandForTruncAndAnyExt(
4439       Instruction *Ext, TypePromotionTransaction &TPT,
4440       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4441       SmallVectorImpl<Instruction *> *Exts,
4442       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4443 
4444   /// Utility function to promote the operand of \p Ext when this
4445   /// operand is promotable and is not a supported trunc or sext.
4446   /// \p PromotedInsts maps the instructions to their type before promotion.
4447   /// \p CreatedInstsCost[out] contains the cost of all the instructions
4448   /// created to promote the operand of Ext.
4449   /// Newly added extensions are inserted in \p Exts.
4450   /// Newly added truncates are inserted in \p Truncs.
4451   /// Should never be called directly.
4452   /// \return The promoted value which is used instead of Ext.
4453   static Value *promoteOperandForOther(Instruction *Ext,
4454                                        TypePromotionTransaction &TPT,
4455                                        InstrToOrigTy &PromotedInsts,
4456                                        unsigned &CreatedInstsCost,
4457                                        SmallVectorImpl<Instruction *> *Exts,
4458                                        SmallVectorImpl<Instruction *> *Truncs,
4459                                        const TargetLowering &TLI, bool IsSExt);
4460 
4461   /// \see promoteOperandForOther.
4462   static Value *signExtendOperandForOther(
4463       Instruction *Ext, TypePromotionTransaction &TPT,
4464       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4465       SmallVectorImpl<Instruction *> *Exts,
4466       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4467     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4468                                   Exts, Truncs, TLI, true);
4469   }
4470 
4471   /// \see promoteOperandForOther.
4472   static Value *zeroExtendOperandForOther(
4473       Instruction *Ext, TypePromotionTransaction &TPT,
4474       InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4475       SmallVectorImpl<Instruction *> *Exts,
4476       SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4477     return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4478                                   Exts, Truncs, TLI, false);
4479   }
4480 
4481 public:
4482   /// Type for the utility function that promotes the operand of Ext.
4483   using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4484                             InstrToOrigTy &PromotedInsts,
4485                             unsigned &CreatedInstsCost,
4486                             SmallVectorImpl<Instruction *> *Exts,
4487                             SmallVectorImpl<Instruction *> *Truncs,
4488                             const TargetLowering &TLI);
4489 
4490   /// Given a sign/zero extend instruction \p Ext, return the appropriate
4491   /// action to promote the operand of \p Ext instead of using Ext.
4492   /// \return NULL if no promotable action is possible with the current
4493   /// sign extension.
4494   /// \p InsertedInsts keeps track of all the instructions inserted by the
4495   /// other CodeGenPrepare optimizations. This information is important
4496   /// because we do not want to promote these instructions as CodeGenPrepare
4497   /// will reinsert them later. Thus creating an infinite loop: create/remove.
4498   /// \p PromotedInsts maps the instructions to their type before promotion.
4499   static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4500                           const TargetLowering &TLI,
4501                           const InstrToOrigTy &PromotedInsts);
4502 };
4503 
4504 } // end anonymous namespace
4505 
4506 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4507                                         Type *ConsideredExtType,
4508                                         const InstrToOrigTy &PromotedInsts,
4509                                         bool IsSExt) {
4510   // The promotion helper does not know how to deal with vector types yet.
4511   // To be able to fix that, we would need to fix the places where we
4512   // statically extend, e.g., constants and such.
4513   if (Inst->getType()->isVectorTy())
4514     return false;
4515 
4516   // We can always get through zext.
4517   if (isa<ZExtInst>(Inst))
4518     return true;
4519 
4520   // sext(sext) is ok too.
4521   if (IsSExt && isa<SExtInst>(Inst))
4522     return true;
4523 
4524   // We can get through binary operator, if it is legal. In other words, the
4525   // binary operator must have a nuw or nsw flag.
4526   if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4527     if (isa<OverflowingBinaryOperator>(BinOp) &&
4528         ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4529          (IsSExt && BinOp->hasNoSignedWrap())))
4530       return true;
4531 
4532   // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4533   if ((Inst->getOpcode() == Instruction::And ||
4534        Inst->getOpcode() == Instruction::Or))
4535     return true;
4536 
4537   // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4538   if (Inst->getOpcode() == Instruction::Xor) {
4539     // Make sure it is not a NOT.
4540     if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4541       if (!Cst->getValue().isAllOnes())
4542         return true;
4543   }
4544 
4545   // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4546   // It may change a poisoned value into a regular value, like
4547   //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
4548   //          poisoned value                    regular value
4549   // It should be OK since undef covers valid value.
4550   if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4551     return true;
4552 
4553   // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4554   // It may change a poisoned value into a regular value, like
4555   //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
4556   //          poisoned value                    regular value
4557   // It should be OK since undef covers valid value.
4558   if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4559     const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4560     if (ExtInst->hasOneUse()) {
4561       const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4562       if (AndInst && AndInst->getOpcode() == Instruction::And) {
4563         const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4564         if (Cst &&
4565             Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4566           return true;
4567       }
4568     }
4569   }
4570 
4571   // Check if we can do the following simplification.
4572   // ext(trunc(opnd)) --> ext(opnd)
4573   if (!isa<TruncInst>(Inst))
4574     return false;
4575 
4576   Value *OpndVal = Inst->getOperand(0);
4577   // Check if we can use this operand in the extension.
4578   // If the type is larger than the result type of the extension, we cannot.
4579   if (!OpndVal->getType()->isIntegerTy() ||
4580       OpndVal->getType()->getIntegerBitWidth() >
4581           ConsideredExtType->getIntegerBitWidth())
4582     return false;
4583 
4584   // If the operand of the truncate is not an instruction, we will not have
4585   // any information on the dropped bits.
4586   // (Actually we could for constant but it is not worth the extra logic).
4587   Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4588   if (!Opnd)
4589     return false;
4590 
4591   // Check if the source of the type is narrow enough.
4592   // I.e., check that trunc just drops extended bits of the same kind of
4593   // the extension.
4594   // #1 get the type of the operand and check the kind of the extended bits.
4595   const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4596   if (OpndType)
4597     ;
4598   else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4599     OpndType = Opnd->getOperand(0)->getType();
4600   else
4601     return false;
4602 
4603   // #2 check that the truncate just drops extended bits.
4604   return Inst->getType()->getIntegerBitWidth() >=
4605          OpndType->getIntegerBitWidth();
4606 }
4607 
4608 TypePromotionHelper::Action TypePromotionHelper::getAction(
4609     Instruction *Ext, const SetOfInstrs &InsertedInsts,
4610     const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4611   assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4612          "Unexpected instruction type");
4613   Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4614   Type *ExtTy = Ext->getType();
4615   bool IsSExt = isa<SExtInst>(Ext);
4616   // If the operand of the extension is not an instruction, we cannot
4617   // get through.
4618   // If it, check we can get through.
4619   if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4620     return nullptr;
4621 
4622   // Do not promote if the operand has been added by codegenprepare.
4623   // Otherwise, it means we are undoing an optimization that is likely to be
4624   // redone, thus causing potential infinite loop.
4625   if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4626     return nullptr;
4627 
4628   // SExt or Trunc instructions.
4629   // Return the related handler.
4630   if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4631       isa<ZExtInst>(ExtOpnd))
4632     return promoteOperandForTruncAndAnyExt;
4633 
4634   // Regular instruction.
4635   // Abort early if we will have to insert non-free instructions.
4636   if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4637     return nullptr;
4638   return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4639 }
4640 
4641 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4642     Instruction *SExt, TypePromotionTransaction &TPT,
4643     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4644     SmallVectorImpl<Instruction *> *Exts,
4645     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4646   // By construction, the operand of SExt is an instruction. Otherwise we cannot
4647   // get through it and this method should not be called.
4648   Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4649   Value *ExtVal = SExt;
4650   bool HasMergedNonFreeExt = false;
4651   if (isa<ZExtInst>(SExtOpnd)) {
4652     // Replace s|zext(zext(opnd))
4653     // => zext(opnd).
4654     HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4655     Value *ZExt =
4656         TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4657     TPT.replaceAllUsesWith(SExt, ZExt);
4658     TPT.eraseInstruction(SExt);
4659     ExtVal = ZExt;
4660   } else {
4661     // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4662     // => z|sext(opnd).
4663     TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4664   }
4665   CreatedInstsCost = 0;
4666 
4667   // Remove dead code.
4668   if (SExtOpnd->use_empty())
4669     TPT.eraseInstruction(SExtOpnd);
4670 
4671   // Check if the extension is still needed.
4672   Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4673   if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4674     if (ExtInst) {
4675       if (Exts)
4676         Exts->push_back(ExtInst);
4677       CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4678     }
4679     return ExtVal;
4680   }
4681 
4682   // At this point we have: ext ty opnd to ty.
4683   // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4684   Value *NextVal = ExtInst->getOperand(0);
4685   TPT.eraseInstruction(ExtInst, NextVal);
4686   return NextVal;
4687 }
4688 
4689 Value *TypePromotionHelper::promoteOperandForOther(
4690     Instruction *Ext, TypePromotionTransaction &TPT,
4691     InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4692     SmallVectorImpl<Instruction *> *Exts,
4693     SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4694     bool IsSExt) {
4695   // By construction, the operand of Ext is an instruction. Otherwise we cannot
4696   // get through it and this method should not be called.
4697   Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4698   CreatedInstsCost = 0;
4699   if (!ExtOpnd->hasOneUse()) {
4700     // ExtOpnd will be promoted.
4701     // All its uses, but Ext, will need to use a truncated value of the
4702     // promoted version.
4703     // Create the truncate now.
4704     Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4705     if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4706       // Insert it just after the definition.
4707       ITrunc->moveAfter(ExtOpnd);
4708       if (Truncs)
4709         Truncs->push_back(ITrunc);
4710     }
4711 
4712     TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4713     // Restore the operand of Ext (which has been replaced by the previous call
4714     // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4715     TPT.setOperand(Ext, 0, ExtOpnd);
4716   }
4717 
4718   // Get through the Instruction:
4719   // 1. Update its type.
4720   // 2. Replace the uses of Ext by Inst.
4721   // 3. Extend each operand that needs to be extended.
4722 
4723   // Remember the original type of the instruction before promotion.
4724   // This is useful to know that the high bits are sign extended bits.
4725   addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4726   // Step #1.
4727   TPT.mutateType(ExtOpnd, Ext->getType());
4728   // Step #2.
4729   TPT.replaceAllUsesWith(Ext, ExtOpnd);
4730   // Step #3.
4731   LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4732   for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4733        ++OpIdx) {
4734     LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4735     if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4736         !shouldExtOperand(ExtOpnd, OpIdx)) {
4737       LLVM_DEBUG(dbgs() << "No need to propagate\n");
4738       continue;
4739     }
4740     // Check if we can statically extend the operand.
4741     Value *Opnd = ExtOpnd->getOperand(OpIdx);
4742     if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4743       LLVM_DEBUG(dbgs() << "Statically extend\n");
4744       unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4745       APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4746                             : Cst->getValue().zext(BitWidth);
4747       TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4748       continue;
4749     }
4750     // UndefValue are typed, so we have to statically sign extend them.
4751     if (isa<UndefValue>(Opnd)) {
4752       LLVM_DEBUG(dbgs() << "Statically extend\n");
4753       TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4754       continue;
4755     }
4756 
4757     // Otherwise we have to explicitly sign extend the operand.
4758     Value *ValForExtOpnd = IsSExt
4759                                ? TPT.createSExt(ExtOpnd, Opnd, Ext->getType())
4760                                : TPT.createZExt(ExtOpnd, Opnd, Ext->getType());
4761     TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4762     Instruction *InstForExtOpnd = dyn_cast<Instruction>(ValForExtOpnd);
4763     if (!InstForExtOpnd)
4764       continue;
4765 
4766     if (Exts)
4767       Exts->push_back(InstForExtOpnd);
4768 
4769     CreatedInstsCost += !TLI.isExtFree(InstForExtOpnd);
4770   }
4771   LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4772   TPT.eraseInstruction(Ext);
4773   return ExtOpnd;
4774 }
4775 
4776 /// Check whether or not promoting an instruction to a wider type is profitable.
4777 /// \p NewCost gives the cost of extension instructions created by the
4778 /// promotion.
4779 /// \p OldCost gives the cost of extension instructions before the promotion
4780 /// plus the number of instructions that have been
4781 /// matched in the addressing mode the promotion.
4782 /// \p PromotedOperand is the value that has been promoted.
4783 /// \return True if the promotion is profitable, false otherwise.
4784 bool AddressingModeMatcher::isPromotionProfitable(
4785     unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4786   LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4787                     << '\n');
4788   // The cost of the new extensions is greater than the cost of the
4789   // old extension plus what we folded.
4790   // This is not profitable.
4791   if (NewCost > OldCost)
4792     return false;
4793   if (NewCost < OldCost)
4794     return true;
4795   // The promotion is neutral but it may help folding the sign extension in
4796   // loads for instance.
4797   // Check that we did not create an illegal instruction.
4798   return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4799 }
4800 
4801 /// Given an instruction or constant expr, see if we can fold the operation
4802 /// into the addressing mode. If so, update the addressing mode and return
4803 /// true, otherwise return false without modifying AddrMode.
4804 /// If \p MovedAway is not NULL, it contains the information of whether or
4805 /// not AddrInst has to be folded into the addressing mode on success.
4806 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4807 /// because it has been moved away.
4808 /// Thus AddrInst must not be added in the matched instructions.
4809 /// This state can happen when AddrInst is a sext, since it may be moved away.
4810 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4811 /// not be referenced anymore.
4812 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4813                                                unsigned Depth,
4814                                                bool *MovedAway) {
4815   // Avoid exponential behavior on extremely deep expression trees.
4816   if (Depth >= 5)
4817     return false;
4818 
4819   // By default, all matched instructions stay in place.
4820   if (MovedAway)
4821     *MovedAway = false;
4822 
4823   switch (Opcode) {
4824   case Instruction::PtrToInt:
4825     // PtrToInt is always a noop, as we know that the int type is pointer sized.
4826     return matchAddr(AddrInst->getOperand(0), Depth);
4827   case Instruction::IntToPtr: {
4828     auto AS = AddrInst->getType()->getPointerAddressSpace();
4829     auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4830     // This inttoptr is a no-op if the integer type is pointer sized.
4831     if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4832       return matchAddr(AddrInst->getOperand(0), Depth);
4833     return false;
4834   }
4835   case Instruction::BitCast:
4836     // BitCast is always a noop, and we can handle it as long as it is
4837     // int->int or pointer->pointer (we don't want int<->fp or something).
4838     if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4839         // Don't touch identity bitcasts.  These were probably put here by LSR,
4840         // and we don't want to mess around with them.  Assume it knows what it
4841         // is doing.
4842         AddrInst->getOperand(0)->getType() != AddrInst->getType())
4843       return matchAddr(AddrInst->getOperand(0), Depth);
4844     return false;
4845   case Instruction::AddrSpaceCast: {
4846     unsigned SrcAS =
4847         AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4848     unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4849     if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4850       return matchAddr(AddrInst->getOperand(0), Depth);
4851     return false;
4852   }
4853   case Instruction::Add: {
4854     // Check to see if we can merge in one operand, then the other.  If so, we
4855     // win.
4856     ExtAddrMode BackupAddrMode = AddrMode;
4857     unsigned OldSize = AddrModeInsts.size();
4858     // Start a transaction at this point.
4859     // The LHS may match but not the RHS.
4860     // Therefore, we need a higher level restoration point to undo partially
4861     // matched operation.
4862     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4863         TPT.getRestorationPoint();
4864 
4865     // Try to match an integer constant second to increase its chance of ending
4866     // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
4867     int First = 0, Second = 1;
4868     if (isa<ConstantInt>(AddrInst->getOperand(First))
4869       && !isa<ConstantInt>(AddrInst->getOperand(Second)))
4870         std::swap(First, Second);
4871     AddrMode.InBounds = false;
4872     if (matchAddr(AddrInst->getOperand(First), Depth + 1) &&
4873         matchAddr(AddrInst->getOperand(Second), Depth + 1))
4874       return true;
4875 
4876     // Restore the old addr mode info.
4877     AddrMode = BackupAddrMode;
4878     AddrModeInsts.resize(OldSize);
4879     TPT.rollback(LastKnownGood);
4880 
4881     // Otherwise this was over-aggressive.  Try merging operands in the opposite
4882     // order.
4883     if (matchAddr(AddrInst->getOperand(Second), Depth + 1) &&
4884         matchAddr(AddrInst->getOperand(First), Depth + 1))
4885       return true;
4886 
4887     // Otherwise we definitely can't merge the ADD in.
4888     AddrMode = BackupAddrMode;
4889     AddrModeInsts.resize(OldSize);
4890     TPT.rollback(LastKnownGood);
4891     break;
4892   }
4893   // case Instruction::Or:
4894   //  TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4895   // break;
4896   case Instruction::Mul:
4897   case Instruction::Shl: {
4898     // Can only handle X*C and X << C.
4899     AddrMode.InBounds = false;
4900     ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4901     if (!RHS || RHS->getBitWidth() > 64)
4902       return false;
4903     int64_t Scale = Opcode == Instruction::Shl
4904                         ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
4905                         : RHS->getSExtValue();
4906 
4907     return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4908   }
4909   case Instruction::GetElementPtr: {
4910     // Scan the GEP.  We check it if it contains constant offsets and at most
4911     // one variable offset.
4912     int VariableOperand = -1;
4913     unsigned VariableScale = 0;
4914 
4915     int64_t ConstantOffset = 0;
4916     gep_type_iterator GTI = gep_type_begin(AddrInst);
4917     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4918       if (StructType *STy = GTI.getStructTypeOrNull()) {
4919         const StructLayout *SL = DL.getStructLayout(STy);
4920         unsigned Idx =
4921             cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4922         ConstantOffset += SL->getElementOffset(Idx);
4923       } else {
4924         TypeSize TS = GTI.getSequentialElementStride(DL);
4925         if (TS.isNonZero()) {
4926           // The optimisations below currently only work for fixed offsets.
4927           if (TS.isScalable())
4928             return false;
4929           int64_t TypeSize = TS.getFixedValue();
4930           if (ConstantInt *CI =
4931                   dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4932             const APInt &CVal = CI->getValue();
4933             if (CVal.getSignificantBits() <= 64) {
4934               ConstantOffset += CVal.getSExtValue() * TypeSize;
4935               continue;
4936             }
4937           }
4938           // We only allow one variable index at the moment.
4939           if (VariableOperand != -1)
4940             return false;
4941 
4942           // Remember the variable index.
4943           VariableOperand = i;
4944           VariableScale = TypeSize;
4945         }
4946       }
4947     }
4948 
4949     // A common case is for the GEP to only do a constant offset.  In this case,
4950     // just add it to the disp field and check validity.
4951     if (VariableOperand == -1) {
4952       AddrMode.BaseOffs += ConstantOffset;
4953       if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4954           if (!cast<GEPOperator>(AddrInst)->isInBounds())
4955             AddrMode.InBounds = false;
4956           return true;
4957       }
4958       AddrMode.BaseOffs -= ConstantOffset;
4959 
4960       if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4961           TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4962           ConstantOffset > 0) {
4963           // Record GEPs with non-zero offsets as candidates for splitting in
4964           // the event that the offset cannot fit into the r+i addressing mode.
4965           // Simple and common case that only one GEP is used in calculating the
4966           // address for the memory access.
4967           Value *Base = AddrInst->getOperand(0);
4968           auto *BaseI = dyn_cast<Instruction>(Base);
4969           auto *GEP = cast<GetElementPtrInst>(AddrInst);
4970           if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4971               (BaseI && !isa<CastInst>(BaseI) &&
4972                !isa<GetElementPtrInst>(BaseI))) {
4973             // Make sure the parent block allows inserting non-PHI instructions
4974             // before the terminator.
4975             BasicBlock *Parent = BaseI ? BaseI->getParent()
4976                                        : &GEP->getFunction()->getEntryBlock();
4977             if (!Parent->getTerminator()->isEHPad())
4978             LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4979           }
4980       }
4981 
4982       return false;
4983     }
4984 
4985     // Save the valid addressing mode in case we can't match.
4986     ExtAddrMode BackupAddrMode = AddrMode;
4987     unsigned OldSize = AddrModeInsts.size();
4988 
4989     // See if the scale and offset amount is valid for this target.
4990     AddrMode.BaseOffs += ConstantOffset;
4991     if (!cast<GEPOperator>(AddrInst)->isInBounds())
4992       AddrMode.InBounds = false;
4993 
4994     // Match the base operand of the GEP.
4995     if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4996       // If it couldn't be matched, just stuff the value in a register.
4997       if (AddrMode.HasBaseReg) {
4998         AddrMode = BackupAddrMode;
4999         AddrModeInsts.resize(OldSize);
5000         return false;
5001       }
5002       AddrMode.HasBaseReg = true;
5003       AddrMode.BaseReg = AddrInst->getOperand(0);
5004     }
5005 
5006     // Match the remaining variable portion of the GEP.
5007     if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
5008                           Depth)) {
5009       // If it couldn't be matched, try stuffing the base into a register
5010       // instead of matching it, and retrying the match of the scale.
5011       AddrMode = BackupAddrMode;
5012       AddrModeInsts.resize(OldSize);
5013       if (AddrMode.HasBaseReg)
5014         return false;
5015       AddrMode.HasBaseReg = true;
5016       AddrMode.BaseReg = AddrInst->getOperand(0);
5017       AddrMode.BaseOffs += ConstantOffset;
5018       if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
5019                             VariableScale, Depth)) {
5020         // If even that didn't work, bail.
5021         AddrMode = BackupAddrMode;
5022         AddrModeInsts.resize(OldSize);
5023         return false;
5024       }
5025     }
5026 
5027     return true;
5028   }
5029   case Instruction::SExt:
5030   case Instruction::ZExt: {
5031     Instruction *Ext = dyn_cast<Instruction>(AddrInst);
5032     if (!Ext)
5033       return false;
5034 
5035     // Try to move this ext out of the way of the addressing mode.
5036     // Ask for a method for doing so.
5037     TypePromotionHelper::Action TPH =
5038         TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
5039     if (!TPH)
5040       return false;
5041 
5042     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5043         TPT.getRestorationPoint();
5044     unsigned CreatedInstsCost = 0;
5045     unsigned ExtCost = !TLI.isExtFree(Ext);
5046     Value *PromotedOperand =
5047         TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
5048     // SExt has been moved away.
5049     // Thus either it will be rematched later in the recursive calls or it is
5050     // gone. Anyway, we must not fold it into the addressing mode at this point.
5051     // E.g.,
5052     // op = add opnd, 1
5053     // idx = ext op
5054     // addr = gep base, idx
5055     // is now:
5056     // promotedOpnd = ext opnd            <- no match here
5057     // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
5058     // addr = gep base, op                <- match
5059     if (MovedAway)
5060       *MovedAway = true;
5061 
5062     assert(PromotedOperand &&
5063            "TypePromotionHelper should have filtered out those cases");
5064 
5065     ExtAddrMode BackupAddrMode = AddrMode;
5066     unsigned OldSize = AddrModeInsts.size();
5067 
5068     if (!matchAddr(PromotedOperand, Depth) ||
5069         // The total of the new cost is equal to the cost of the created
5070         // instructions.
5071         // The total of the old cost is equal to the cost of the extension plus
5072         // what we have saved in the addressing mode.
5073         !isPromotionProfitable(CreatedInstsCost,
5074                                ExtCost + (AddrModeInsts.size() - OldSize),
5075                                PromotedOperand)) {
5076       AddrMode = BackupAddrMode;
5077       AddrModeInsts.resize(OldSize);
5078       LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
5079       TPT.rollback(LastKnownGood);
5080       return false;
5081     }
5082     return true;
5083   }
5084   case Instruction::Call:
5085     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(AddrInst)) {
5086       if (II->getIntrinsicID() == Intrinsic::threadlocal_address) {
5087         GlobalValue &GV = cast<GlobalValue>(*II->getArgOperand(0));
5088         if (TLI.addressingModeSupportsTLS(GV))
5089           return matchAddr(AddrInst->getOperand(0), Depth);
5090       }
5091     }
5092     break;
5093   }
5094   return false;
5095 }
5096 
5097 /// If we can, try to add the value of 'Addr' into the current addressing mode.
5098 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
5099 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
5100 /// for the target.
5101 ///
5102 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
5103   // Start a transaction at this point that we will rollback if the matching
5104   // fails.
5105   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5106       TPT.getRestorationPoint();
5107   if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
5108     if (CI->getValue().isSignedIntN(64)) {
5109       // Fold in immediates if legal for the target.
5110       AddrMode.BaseOffs += CI->getSExtValue();
5111       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5112         return true;
5113       AddrMode.BaseOffs -= CI->getSExtValue();
5114     }
5115   } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
5116     // If this is a global variable, try to fold it into the addressing mode.
5117     if (!AddrMode.BaseGV) {
5118       AddrMode.BaseGV = GV;
5119       if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5120         return true;
5121       AddrMode.BaseGV = nullptr;
5122     }
5123   } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
5124     ExtAddrMode BackupAddrMode = AddrMode;
5125     unsigned OldSize = AddrModeInsts.size();
5126 
5127     // Check to see if it is possible to fold this operation.
5128     bool MovedAway = false;
5129     if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
5130       // This instruction may have been moved away. If so, there is nothing
5131       // to check here.
5132       if (MovedAway)
5133         return true;
5134       // Okay, it's possible to fold this.  Check to see if it is actually
5135       // *profitable* to do so.  We use a simple cost model to avoid increasing
5136       // register pressure too much.
5137       if (I->hasOneUse() ||
5138           isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
5139         AddrModeInsts.push_back(I);
5140         return true;
5141       }
5142 
5143       // It isn't profitable to do this, roll back.
5144       AddrMode = BackupAddrMode;
5145       AddrModeInsts.resize(OldSize);
5146       TPT.rollback(LastKnownGood);
5147     }
5148   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
5149     if (matchOperationAddr(CE, CE->getOpcode(), Depth))
5150       return true;
5151     TPT.rollback(LastKnownGood);
5152   } else if (isa<ConstantPointerNull>(Addr)) {
5153     // Null pointer gets folded without affecting the addressing mode.
5154     return true;
5155   }
5156 
5157   // Worse case, the target should support [reg] addressing modes. :)
5158   if (!AddrMode.HasBaseReg) {
5159     AddrMode.HasBaseReg = true;
5160     AddrMode.BaseReg = Addr;
5161     // Still check for legality in case the target supports [imm] but not [i+r].
5162     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5163       return true;
5164     AddrMode.HasBaseReg = false;
5165     AddrMode.BaseReg = nullptr;
5166   }
5167 
5168   // If the base register is already taken, see if we can do [r+r].
5169   if (AddrMode.Scale == 0) {
5170     AddrMode.Scale = 1;
5171     AddrMode.ScaledReg = Addr;
5172     if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5173       return true;
5174     AddrMode.Scale = 0;
5175     AddrMode.ScaledReg = nullptr;
5176   }
5177   // Couldn't match.
5178   TPT.rollback(LastKnownGood);
5179   return false;
5180 }
5181 
5182 /// Check to see if all uses of OpVal by the specified inline asm call are due
5183 /// to memory operands. If so, return true, otherwise return false.
5184 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
5185                                     const TargetLowering &TLI,
5186                                     const TargetRegisterInfo &TRI) {
5187   const Function *F = CI->getFunction();
5188   TargetLowering::AsmOperandInfoVector TargetConstraints =
5189       TLI.ParseConstraints(F->getDataLayout(), &TRI, *CI);
5190 
5191   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5192     // Compute the constraint code and ConstraintType to use.
5193     TLI.ComputeConstraintToUse(OpInfo, SDValue());
5194 
5195     // If this asm operand is our Value*, and if it isn't an indirect memory
5196     // operand, we can't fold it!  TODO: Also handle C_Address?
5197     if (OpInfo.CallOperandVal == OpVal &&
5198         (OpInfo.ConstraintType != TargetLowering::C_Memory ||
5199          !OpInfo.isIndirect))
5200       return false;
5201   }
5202 
5203   return true;
5204 }
5205 
5206 /// Recursively walk all the uses of I until we find a memory use.
5207 /// If we find an obviously non-foldable instruction, return true.
5208 /// Add accessed addresses and types to MemoryUses.
5209 static bool FindAllMemoryUses(
5210     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5211     SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
5212     const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
5213     BlockFrequencyInfo *BFI, unsigned &SeenInsts) {
5214   // If we already considered this instruction, we're done.
5215   if (!ConsideredInsts.insert(I).second)
5216     return false;
5217 
5218   // If this is an obviously unfoldable instruction, bail out.
5219   if (!MightBeFoldableInst(I))
5220     return true;
5221 
5222   // Loop over all the uses, recursively processing them.
5223   for (Use &U : I->uses()) {
5224     // Conservatively return true if we're seeing a large number or a deep chain
5225     // of users. This avoids excessive compilation times in pathological cases.
5226     if (SeenInsts++ >= MaxAddressUsersToScan)
5227       return true;
5228 
5229     Instruction *UserI = cast<Instruction>(U.getUser());
5230     if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
5231       MemoryUses.push_back({&U, LI->getType()});
5232       continue;
5233     }
5234 
5235     if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5236       if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5237         return true; // Storing addr, not into addr.
5238       MemoryUses.push_back({&U, SI->getValueOperand()->getType()});
5239       continue;
5240     }
5241 
5242     if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5243       if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5244         return true; // Storing addr, not into addr.
5245       MemoryUses.push_back({&U, RMW->getValOperand()->getType()});
5246       continue;
5247     }
5248 
5249     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5250       if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5251         return true; // Storing addr, not into addr.
5252       MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()});
5253       continue;
5254     }
5255 
5256     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5257       if (CI->hasFnAttr(Attribute::Cold)) {
5258         // If this is a cold call, we can sink the addressing calculation into
5259         // the cold path.  See optimizeCallInst
5260         bool OptForSize =
5261             OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
5262         if (!OptForSize)
5263           continue;
5264       }
5265 
5266       InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5267       if (!IA)
5268         return true;
5269 
5270       // If this is a memory operand, we're cool, otherwise bail out.
5271       if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5272         return true;
5273       continue;
5274     }
5275 
5276     if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5277                           PSI, BFI, SeenInsts))
5278       return true;
5279   }
5280 
5281   return false;
5282 }
5283 
5284 static bool FindAllMemoryUses(
5285     Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5286     const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize,
5287     ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
5288   unsigned SeenInsts = 0;
5289   SmallPtrSet<Instruction *, 16> ConsideredInsts;
5290   return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5291                            PSI, BFI, SeenInsts);
5292 }
5293 
5294 
5295 /// Return true if Val is already known to be live at the use site that we're
5296 /// folding it into. If so, there is no cost to include it in the addressing
5297 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5298 /// instruction already.
5299 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5300                                                    Value *KnownLive1,
5301                                                    Value *KnownLive2) {
5302   // If Val is either of the known-live values, we know it is live!
5303   if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5304     return true;
5305 
5306   // All values other than instructions and arguments (e.g. constants) are live.
5307   if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5308     return true;
5309 
5310   // If Val is a constant sized alloca in the entry block, it is live, this is
5311   // true because it is just a reference to the stack/frame pointer, which is
5312   // live for the whole function.
5313   if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5314     if (AI->isStaticAlloca())
5315       return true;
5316 
5317   // Check to see if this value is already used in the memory instruction's
5318   // block.  If so, it's already live into the block at the very least, so we
5319   // can reasonably fold it.
5320   return Val->isUsedInBasicBlock(MemoryInst->getParent());
5321 }
5322 
5323 /// It is possible for the addressing mode of the machine to fold the specified
5324 /// instruction into a load or store that ultimately uses it.
5325 /// However, the specified instruction has multiple uses.
5326 /// Given this, it may actually increase register pressure to fold it
5327 /// into the load. For example, consider this code:
5328 ///
5329 ///     X = ...
5330 ///     Y = X+1
5331 ///     use(Y)   -> nonload/store
5332 ///     Z = Y+1
5333 ///     load Z
5334 ///
5335 /// In this case, Y has multiple uses, and can be folded into the load of Z
5336 /// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
5337 /// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
5338 /// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
5339 /// number of computations either.
5340 ///
5341 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
5342 /// X was live across 'load Z' for other reasons, we actually *would* want to
5343 /// fold the addressing mode in the Z case.  This would make Y die earlier.
5344 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5345     Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5346   if (IgnoreProfitability)
5347     return true;
5348 
5349   // AMBefore is the addressing mode before this instruction was folded into it,
5350   // and AMAfter is the addressing mode after the instruction was folded.  Get
5351   // the set of registers referenced by AMAfter and subtract out those
5352   // referenced by AMBefore: this is the set of values which folding in this
5353   // address extends the lifetime of.
5354   //
5355   // Note that there are only two potential values being referenced here,
5356   // BaseReg and ScaleReg (global addresses are always available, as are any
5357   // folded immediates).
5358   Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5359 
5360   // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5361   // lifetime wasn't extended by adding this instruction.
5362   if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5363     BaseReg = nullptr;
5364   if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5365     ScaledReg = nullptr;
5366 
5367   // If folding this instruction (and it's subexprs) didn't extend any live
5368   // ranges, we're ok with it.
5369   if (!BaseReg && !ScaledReg)
5370     return true;
5371 
5372   // If all uses of this instruction can have the address mode sunk into them,
5373   // we can remove the addressing mode and effectively trade one live register
5374   // for another (at worst.)  In this context, folding an addressing mode into
5375   // the use is just a particularly nice way of sinking it.
5376   SmallVector<std::pair<Use *, Type *>, 16> MemoryUses;
5377   if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI))
5378     return false; // Has a non-memory, non-foldable use!
5379 
5380   // Now that we know that all uses of this instruction are part of a chain of
5381   // computation involving only operations that could theoretically be folded
5382   // into a memory use, loop over each of these memory operation uses and see
5383   // if they could  *actually* fold the instruction.  The assumption is that
5384   // addressing modes are cheap and that duplicating the computation involved
5385   // many times is worthwhile, even on a fastpath. For sinking candidates
5386   // (i.e. cold call sites), this serves as a way to prevent excessive code
5387   // growth since most architectures have some reasonable small and fast way to
5388   // compute an effective address.  (i.e LEA on x86)
5389   SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5390   for (const std::pair<Use *, Type *> &Pair : MemoryUses) {
5391     Value *Address = Pair.first->get();
5392     Instruction *UserI = cast<Instruction>(Pair.first->getUser());
5393     Type *AddressAccessTy = Pair.second;
5394     unsigned AS = Address->getType()->getPointerAddressSpace();
5395 
5396     // Do a match against the root of this address, ignoring profitability. This
5397     // will tell us if the addressing mode for the memory operation will
5398     // *actually* cover the shared instruction.
5399     ExtAddrMode Result;
5400     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5401                                                                       0);
5402     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5403         TPT.getRestorationPoint();
5404     AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5405                                   AddressAccessTy, AS, UserI, Result,
5406                                   InsertedInsts, PromotedInsts, TPT,
5407                                   LargeOffsetGEP, OptSize, PSI, BFI);
5408     Matcher.IgnoreProfitability = true;
5409     bool Success = Matcher.matchAddr(Address, 0);
5410     (void)Success;
5411     assert(Success && "Couldn't select *anything*?");
5412 
5413     // The match was to check the profitability, the changes made are not
5414     // part of the original matcher. Therefore, they should be dropped
5415     // otherwise the original matcher will not present the right state.
5416     TPT.rollback(LastKnownGood);
5417 
5418     // If the match didn't cover I, then it won't be shared by it.
5419     if (!is_contained(MatchedAddrModeInsts, I))
5420       return false;
5421 
5422     MatchedAddrModeInsts.clear();
5423   }
5424 
5425   return true;
5426 }
5427 
5428 /// Return true if the specified values are defined in a
5429 /// different basic block than BB.
5430 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5431   if (Instruction *I = dyn_cast<Instruction>(V))
5432     return I->getParent() != BB;
5433   return false;
5434 }
5435 
5436 /// Sink addressing mode computation immediate before MemoryInst if doing so
5437 /// can be done without increasing register pressure.  The need for the
5438 /// register pressure constraint means this can end up being an all or nothing
5439 /// decision for all uses of the same addressing computation.
5440 ///
5441 /// Load and Store Instructions often have addressing modes that can do
5442 /// significant amounts of computation. As such, instruction selection will try
5443 /// to get the load or store to do as much computation as possible for the
5444 /// program. The problem is that isel can only see within a single block. As
5445 /// such, we sink as much legal addressing mode work into the block as possible.
5446 ///
5447 /// This method is used to optimize both load/store and inline asms with memory
5448 /// operands.  It's also used to sink addressing computations feeding into cold
5449 /// call sites into their (cold) basic block.
5450 ///
5451 /// The motivation for handling sinking into cold blocks is that doing so can
5452 /// both enable other address mode sinking (by satisfying the register pressure
5453 /// constraint above), and reduce register pressure globally (by removing the
5454 /// addressing mode computation from the fast path entirely.).
5455 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5456                                         Type *AccessTy, unsigned AddrSpace) {
5457   Value *Repl = Addr;
5458 
5459   // Try to collapse single-value PHI nodes.  This is necessary to undo
5460   // unprofitable PRE transformations.
5461   SmallVector<Value *, 8> worklist;
5462   SmallPtrSet<Value *, 16> Visited;
5463   worklist.push_back(Addr);
5464 
5465   // Use a worklist to iteratively look through PHI and select nodes, and
5466   // ensure that the addressing mode obtained from the non-PHI/select roots of
5467   // the graph are compatible.
5468   bool PhiOrSelectSeen = false;
5469   SmallVector<Instruction *, 16> AddrModeInsts;
5470   const SimplifyQuery SQ(*DL, TLInfo);
5471   AddressingModeCombiner AddrModes(SQ, Addr);
5472   TypePromotionTransaction TPT(RemovedInsts);
5473   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5474       TPT.getRestorationPoint();
5475   while (!worklist.empty()) {
5476     Value *V = worklist.pop_back_val();
5477 
5478     // We allow traversing cyclic Phi nodes.
5479     // In case of success after this loop we ensure that traversing through
5480     // Phi nodes ends up with all cases to compute address of the form
5481     //    BaseGV + Base + Scale * Index + Offset
5482     // where Scale and Offset are constans and BaseGV, Base and Index
5483     // are exactly the same Values in all cases.
5484     // It means that BaseGV, Scale and Offset dominate our memory instruction
5485     // and have the same value as they had in address computation represented
5486     // as Phi. So we can safely sink address computation to memory instruction.
5487     if (!Visited.insert(V).second)
5488       continue;
5489 
5490     // For a PHI node, push all of its incoming values.
5491     if (PHINode *P = dyn_cast<PHINode>(V)) {
5492       append_range(worklist, P->incoming_values());
5493       PhiOrSelectSeen = true;
5494       continue;
5495     }
5496     // Similar for select.
5497     if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5498       worklist.push_back(SI->getFalseValue());
5499       worklist.push_back(SI->getTrueValue());
5500       PhiOrSelectSeen = true;
5501       continue;
5502     }
5503 
5504     // For non-PHIs, determine the addressing mode being computed.  Note that
5505     // the result may differ depending on what other uses our candidate
5506     // addressing instructions might have.
5507     AddrModeInsts.clear();
5508     std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5509                                                                       0);
5510     // Defer the query (and possible computation of) the dom tree to point of
5511     // actual use.  It's expected that most address matches don't actually need
5512     // the domtree.
5513     auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5514       Function *F = MemoryInst->getParent()->getParent();
5515       return this->getDT(*F);
5516     };
5517     ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5518         V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5519         *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5520         BFI.get());
5521 
5522     GetElementPtrInst *GEP = LargeOffsetGEP.first;
5523     if (GEP && !NewGEPBases.count(GEP)) {
5524       // If splitting the underlying data structure can reduce the offset of a
5525       // GEP, collect the GEP.  Skip the GEPs that are the new bases of
5526       // previously split data structures.
5527       LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5528       LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5529     }
5530 
5531     NewAddrMode.OriginalValue = V;
5532     if (!AddrModes.addNewAddrMode(NewAddrMode))
5533       break;
5534   }
5535 
5536   // Try to combine the AddrModes we've collected. If we couldn't collect any,
5537   // or we have multiple but either couldn't combine them or combining them
5538   // wouldn't do anything useful, bail out now.
5539   if (!AddrModes.combineAddrModes()) {
5540     TPT.rollback(LastKnownGood);
5541     return false;
5542   }
5543   bool Modified = TPT.commit();
5544 
5545   // Get the combined AddrMode (or the only AddrMode, if we only had one).
5546   ExtAddrMode AddrMode = AddrModes.getAddrMode();
5547 
5548   // If all the instructions matched are already in this BB, don't do anything.
5549   // If we saw a Phi node then it is not local definitely, and if we saw a
5550   // select then we want to push the address calculation past it even if it's
5551   // already in this BB.
5552   if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5553         return IsNonLocalValue(V, MemoryInst->getParent());
5554       })) {
5555     LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
5556                       << "\n");
5557     return Modified;
5558   }
5559 
5560   // Insert this computation right after this user.  Since our caller is
5561   // scanning from the top of the BB to the bottom, reuse of the expr are
5562   // guaranteed to happen later.
5563   IRBuilder<> Builder(MemoryInst);
5564 
5565   // Now that we determined the addressing expression we want to use and know
5566   // that we have to sink it into this block.  Check to see if we have already
5567   // done this for some other load/store instr in this block.  If so, reuse
5568   // the computation.  Before attempting reuse, check if the address is valid
5569   // as it may have been erased.
5570 
5571   WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5572 
5573   Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5574   Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5575   if (SunkAddr) {
5576     LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5577                       << " for " << *MemoryInst << "\n");
5578     if (SunkAddr->getType() != Addr->getType()) {
5579       if (SunkAddr->getType()->getPointerAddressSpace() !=
5580               Addr->getType()->getPointerAddressSpace() &&
5581           !DL->isNonIntegralPointerType(Addr->getType())) {
5582         // There are two reasons the address spaces might not match: a no-op
5583         // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5584         // ptrtoint/inttoptr pair to ensure we match the original semantics.
5585         // TODO: allow bitcast between different address space pointers with the
5586         // same size.
5587         SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5588         SunkAddr =
5589             Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5590       } else
5591         SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5592     }
5593   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5594                                    SubtargetInfo->addrSinkUsingGEPs())) {
5595     // By default, we use the GEP-based method when AA is used later. This
5596     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5597     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5598                       << " for " << *MemoryInst << "\n");
5599     Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5600 
5601     // First, find the pointer.
5602     if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5603       ResultPtr = AddrMode.BaseReg;
5604       AddrMode.BaseReg = nullptr;
5605     }
5606 
5607     if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5608       // We can't add more than one pointer together, nor can we scale a
5609       // pointer (both of which seem meaningless).
5610       if (ResultPtr || AddrMode.Scale != 1)
5611         return Modified;
5612 
5613       ResultPtr = AddrMode.ScaledReg;
5614       AddrMode.Scale = 0;
5615     }
5616 
5617     // It is only safe to sign extend the BaseReg if we know that the math
5618     // required to create it did not overflow before we extend it. Since
5619     // the original IR value was tossed in favor of a constant back when
5620     // the AddrMode was created we need to bail out gracefully if widths
5621     // do not match instead of extending it.
5622     //
5623     // (See below for code to add the scale.)
5624     if (AddrMode.Scale) {
5625       Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5626       if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5627           cast<IntegerType>(ScaledRegTy)->getBitWidth())
5628         return Modified;
5629     }
5630 
5631     GlobalValue *BaseGV = AddrMode.BaseGV;
5632     if (BaseGV != nullptr) {
5633       if (ResultPtr)
5634         return Modified;
5635 
5636       if (BaseGV->isThreadLocal()) {
5637         ResultPtr = Builder.CreateThreadLocalAddress(BaseGV);
5638       } else {
5639         ResultPtr = BaseGV;
5640       }
5641     }
5642 
5643     // If the real base value actually came from an inttoptr, then the matcher
5644     // will look through it and provide only the integer value. In that case,
5645     // use it here.
5646     if (!DL->isNonIntegralPointerType(Addr->getType())) {
5647       if (!ResultPtr && AddrMode.BaseReg) {
5648         ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5649                                            "sunkaddr");
5650         AddrMode.BaseReg = nullptr;
5651       } else if (!ResultPtr && AddrMode.Scale == 1) {
5652         ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5653                                            "sunkaddr");
5654         AddrMode.Scale = 0;
5655       }
5656     }
5657 
5658     if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5659         !AddrMode.BaseOffs) {
5660       SunkAddr = Constant::getNullValue(Addr->getType());
5661     } else if (!ResultPtr) {
5662       return Modified;
5663     } else {
5664       Type *I8PtrTy =
5665           Builder.getPtrTy(Addr->getType()->getPointerAddressSpace());
5666 
5667       // Start with the base register. Do this first so that subsequent address
5668       // matching finds it last, which will prevent it from trying to match it
5669       // as the scaled value in case it happens to be a mul. That would be
5670       // problematic if we've sunk a different mul for the scale, because then
5671       // we'd end up sinking both muls.
5672       if (AddrMode.BaseReg) {
5673         Value *V = AddrMode.BaseReg;
5674         if (V->getType() != IntPtrTy)
5675           V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5676 
5677         ResultIndex = V;
5678       }
5679 
5680       // Add the scale value.
5681       if (AddrMode.Scale) {
5682         Value *V = AddrMode.ScaledReg;
5683         if (V->getType() == IntPtrTy) {
5684           // done.
5685         } else {
5686           assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5687                      cast<IntegerType>(V->getType())->getBitWidth() &&
5688                  "We can't transform if ScaledReg is too narrow");
5689           V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5690         }
5691 
5692         if (AddrMode.Scale != 1)
5693           V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5694                                 "sunkaddr");
5695         if (ResultIndex)
5696           ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5697         else
5698           ResultIndex = V;
5699       }
5700 
5701       // Add in the Base Offset if present.
5702       if (AddrMode.BaseOffs) {
5703         Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5704         if (ResultIndex) {
5705           // We need to add this separately from the scale above to help with
5706           // SDAG consecutive load/store merging.
5707           if (ResultPtr->getType() != I8PtrTy)
5708             ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5709           ResultPtr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5710                                            AddrMode.InBounds);
5711         }
5712 
5713         ResultIndex = V;
5714       }
5715 
5716       if (!ResultIndex) {
5717         SunkAddr = ResultPtr;
5718       } else {
5719         if (ResultPtr->getType() != I8PtrTy)
5720           ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5721         SunkAddr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5722                                         AddrMode.InBounds);
5723       }
5724 
5725       if (SunkAddr->getType() != Addr->getType()) {
5726         if (SunkAddr->getType()->getPointerAddressSpace() !=
5727                 Addr->getType()->getPointerAddressSpace() &&
5728             !DL->isNonIntegralPointerType(Addr->getType())) {
5729           // There are two reasons the address spaces might not match: a no-op
5730           // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5731           // ptrtoint/inttoptr pair to ensure we match the original semantics.
5732           // TODO: allow bitcast between different address space pointers with
5733           // the same size.
5734           SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5735           SunkAddr =
5736               Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5737         } else
5738           SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5739       }
5740     }
5741   } else {
5742     // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5743     // non-integral pointers, so in that case bail out now.
5744     Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5745     Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5746     PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5747     PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5748     if (DL->isNonIntegralPointerType(Addr->getType()) ||
5749         (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5750         (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5751         (AddrMode.BaseGV &&
5752          DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5753       return Modified;
5754 
5755     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5756                       << " for " << *MemoryInst << "\n");
5757     Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5758     Value *Result = nullptr;
5759 
5760     // Start with the base register. Do this first so that subsequent address
5761     // matching finds it last, which will prevent it from trying to match it
5762     // as the scaled value in case it happens to be a mul. That would be
5763     // problematic if we've sunk a different mul for the scale, because then
5764     // we'd end up sinking both muls.
5765     if (AddrMode.BaseReg) {
5766       Value *V = AddrMode.BaseReg;
5767       if (V->getType()->isPointerTy())
5768         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5769       if (V->getType() != IntPtrTy)
5770         V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5771       Result = V;
5772     }
5773 
5774     // Add the scale value.
5775     if (AddrMode.Scale) {
5776       Value *V = AddrMode.ScaledReg;
5777       if (V->getType() == IntPtrTy) {
5778         // done.
5779       } else if (V->getType()->isPointerTy()) {
5780         V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5781       } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5782                  cast<IntegerType>(V->getType())->getBitWidth()) {
5783         V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5784       } else {
5785         // It is only safe to sign extend the BaseReg if we know that the math
5786         // required to create it did not overflow before we extend it. Since
5787         // the original IR value was tossed in favor of a constant back when
5788         // the AddrMode was created we need to bail out gracefully if widths
5789         // do not match instead of extending it.
5790         Instruction *I = dyn_cast_or_null<Instruction>(Result);
5791         if (I && (Result != AddrMode.BaseReg))
5792           I->eraseFromParent();
5793         return Modified;
5794       }
5795       if (AddrMode.Scale != 1)
5796         V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5797                               "sunkaddr");
5798       if (Result)
5799         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5800       else
5801         Result = V;
5802     }
5803 
5804     // Add in the BaseGV if present.
5805     GlobalValue *BaseGV = AddrMode.BaseGV;
5806     if (BaseGV != nullptr) {
5807       Value *BaseGVPtr;
5808       if (BaseGV->isThreadLocal()) {
5809         BaseGVPtr = Builder.CreateThreadLocalAddress(BaseGV);
5810       } else {
5811         BaseGVPtr = BaseGV;
5812       }
5813       Value *V = Builder.CreatePtrToInt(BaseGVPtr, IntPtrTy, "sunkaddr");
5814       if (Result)
5815         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5816       else
5817         Result = V;
5818     }
5819 
5820     // Add in the Base Offset if present.
5821     if (AddrMode.BaseOffs) {
5822       Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5823       if (Result)
5824         Result = Builder.CreateAdd(Result, V, "sunkaddr");
5825       else
5826         Result = V;
5827     }
5828 
5829     if (!Result)
5830       SunkAddr = Constant::getNullValue(Addr->getType());
5831     else
5832       SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
5833   }
5834 
5835   MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
5836   // Store the newly computed address into the cache. In the case we reused a
5837   // value, this should be idempotent.
5838   SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
5839 
5840   // If we have no uses, recursively delete the value and all dead instructions
5841   // using it.
5842   if (Repl->use_empty()) {
5843     resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
5844       RecursivelyDeleteTriviallyDeadInstructions(
5845           Repl, TLInfo, nullptr,
5846           [&](Value *V) { removeAllAssertingVHReferences(V); });
5847     });
5848   }
5849   ++NumMemoryInsts;
5850   return true;
5851 }
5852 
5853 /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5854 /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5855 /// only handle a 2 operand GEP in the same basic block or a splat constant
5856 /// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5857 /// index.
5858 ///
5859 /// If the existing GEP has a vector base pointer that is splat, we can look
5860 /// through the splat to find the scalar pointer. If we can't find a scalar
5861 /// pointer there's nothing we can do.
5862 ///
5863 /// If we have a GEP with more than 2 indices where the middle indices are all
5864 /// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5865 ///
5866 /// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5867 /// followed by a GEP with an all zeroes vector index. This will enable
5868 /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
5869 /// zero index.
5870 bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
5871                                                Value *Ptr) {
5872   Value *NewAddr;
5873 
5874   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
5875     // Don't optimize GEPs that don't have indices.
5876     if (!GEP->hasIndices())
5877       return false;
5878 
5879     // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5880     // FIXME: We should support this by sinking the GEP.
5881     if (MemoryInst->getParent() != GEP->getParent())
5882       return false;
5883 
5884     SmallVector<Value *, 2> Ops(GEP->operands());
5885 
5886     bool RewriteGEP = false;
5887 
5888     if (Ops[0]->getType()->isVectorTy()) {
5889       Ops[0] = getSplatValue(Ops[0]);
5890       if (!Ops[0])
5891         return false;
5892       RewriteGEP = true;
5893     }
5894 
5895     unsigned FinalIndex = Ops.size() - 1;
5896 
5897     // Ensure all but the last index is 0.
5898     // FIXME: This isn't strictly required. All that's required is that they are
5899     // all scalars or splats.
5900     for (unsigned i = 1; i < FinalIndex; ++i) {
5901       auto *C = dyn_cast<Constant>(Ops[i]);
5902       if (!C)
5903         return false;
5904       if (isa<VectorType>(C->getType()))
5905         C = C->getSplatValue();
5906       auto *CI = dyn_cast_or_null<ConstantInt>(C);
5907       if (!CI || !CI->isZero())
5908         return false;
5909       // Scalarize the index if needed.
5910       Ops[i] = CI;
5911     }
5912 
5913     // Try to scalarize the final index.
5914     if (Ops[FinalIndex]->getType()->isVectorTy()) {
5915       if (Value *V = getSplatValue(Ops[FinalIndex])) {
5916         auto *C = dyn_cast<ConstantInt>(V);
5917         // Don't scalarize all zeros vector.
5918         if (!C || !C->isZero()) {
5919           Ops[FinalIndex] = V;
5920           RewriteGEP = true;
5921         }
5922       }
5923     }
5924 
5925     // If we made any changes or the we have extra operands, we need to generate
5926     // new instructions.
5927     if (!RewriteGEP && Ops.size() == 2)
5928       return false;
5929 
5930     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5931 
5932     IRBuilder<> Builder(MemoryInst);
5933 
5934     Type *SourceTy = GEP->getSourceElementType();
5935     Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
5936 
5937     // If the final index isn't a vector, emit a scalar GEP containing all ops
5938     // and a vector GEP with all zeroes final index.
5939     if (!Ops[FinalIndex]->getType()->isVectorTy()) {
5940       NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
5941       auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5942       auto *SecondTy = GetElementPtrInst::getIndexedType(
5943           SourceTy, ArrayRef(Ops).drop_front());
5944       NewAddr =
5945           Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
5946     } else {
5947       Value *Base = Ops[0];
5948       Value *Index = Ops[FinalIndex];
5949 
5950       // Create a scalar GEP if there are more than 2 operands.
5951       if (Ops.size() != 2) {
5952         // Replace the last index with 0.
5953         Ops[FinalIndex] =
5954             Constant::getNullValue(Ops[FinalIndex]->getType()->getScalarType());
5955         Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
5956         SourceTy = GetElementPtrInst::getIndexedType(
5957             SourceTy, ArrayRef(Ops).drop_front());
5958       }
5959 
5960       // Now create the GEP with scalar pointer and vector index.
5961       NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
5962     }
5963   } else if (!isa<Constant>(Ptr)) {
5964     // Not a GEP, maybe its a splat and we can create a GEP to enable
5965     // SelectionDAGBuilder to use it as a uniform base.
5966     Value *V = getSplatValue(Ptr);
5967     if (!V)
5968       return false;
5969 
5970     auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5971 
5972     IRBuilder<> Builder(MemoryInst);
5973 
5974     // Emit a vector GEP with a scalar pointer and all 0s vector index.
5975     Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
5976     auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5977     Type *ScalarTy;
5978     if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5979         Intrinsic::masked_gather) {
5980       ScalarTy = MemoryInst->getType()->getScalarType();
5981     } else {
5982       assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5983              Intrinsic::masked_scatter);
5984       ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
5985     }
5986     NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
5987   } else {
5988     // Constant, SelectionDAGBuilder knows to check if its a splat.
5989     return false;
5990   }
5991 
5992   MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
5993 
5994   // If we have no uses, recursively delete the value and all dead instructions
5995   // using it.
5996   if (Ptr->use_empty())
5997     RecursivelyDeleteTriviallyDeadInstructions(
5998         Ptr, TLInfo, nullptr,
5999         [&](Value *V) { removeAllAssertingVHReferences(V); });
6000 
6001   return true;
6002 }
6003 
6004 /// If there are any memory operands, use OptimizeMemoryInst to sink their
6005 /// address computing into the block when possible / profitable.
6006 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
6007   bool MadeChange = false;
6008 
6009   const TargetRegisterInfo *TRI =
6010       TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
6011   TargetLowering::AsmOperandInfoVector TargetConstraints =
6012       TLI->ParseConstraints(*DL, TRI, *CS);
6013   unsigned ArgNo = 0;
6014   for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
6015     // Compute the constraint code and ConstraintType to use.
6016     TLI->ComputeConstraintToUse(OpInfo, SDValue());
6017 
6018     // TODO: Also handle C_Address?
6019     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6020         OpInfo.isIndirect) {
6021       Value *OpVal = CS->getArgOperand(ArgNo++);
6022       MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
6023     } else if (OpInfo.Type == InlineAsm::isInput)
6024       ArgNo++;
6025   }
6026 
6027   return MadeChange;
6028 }
6029 
6030 /// Check if all the uses of \p Val are equivalent (or free) zero or
6031 /// sign extensions.
6032 static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
6033   assert(!Val->use_empty() && "Input must have at least one use");
6034   const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
6035   bool IsSExt = isa<SExtInst>(FirstUser);
6036   Type *ExtTy = FirstUser->getType();
6037   for (const User *U : Val->users()) {
6038     const Instruction *UI = cast<Instruction>(U);
6039     if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
6040       return false;
6041     Type *CurTy = UI->getType();
6042     // Same input and output types: Same instruction after CSE.
6043     if (CurTy == ExtTy)
6044       continue;
6045 
6046     // If IsSExt is true, we are in this situation:
6047     // a = Val
6048     // b = sext ty1 a to ty2
6049     // c = sext ty1 a to ty3
6050     // Assuming ty2 is shorter than ty3, this could be turned into:
6051     // a = Val
6052     // b = sext ty1 a to ty2
6053     // c = sext ty2 b to ty3
6054     // However, the last sext is not free.
6055     if (IsSExt)
6056       return false;
6057 
6058     // This is a ZExt, maybe this is free to extend from one type to another.
6059     // In that case, we would not account for a different use.
6060     Type *NarrowTy;
6061     Type *LargeTy;
6062     if (ExtTy->getScalarType()->getIntegerBitWidth() >
6063         CurTy->getScalarType()->getIntegerBitWidth()) {
6064       NarrowTy = CurTy;
6065       LargeTy = ExtTy;
6066     } else {
6067       NarrowTy = ExtTy;
6068       LargeTy = CurTy;
6069     }
6070 
6071     if (!TLI.isZExtFree(NarrowTy, LargeTy))
6072       return false;
6073   }
6074   // All uses are the same or can be derived from one another for free.
6075   return true;
6076 }
6077 
6078 /// Try to speculatively promote extensions in \p Exts and continue
6079 /// promoting through newly promoted operands recursively as far as doing so is
6080 /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
6081 /// When some promotion happened, \p TPT contains the proper state to revert
6082 /// them.
6083 ///
6084 /// \return true if some promotion happened, false otherwise.
6085 bool CodeGenPrepare::tryToPromoteExts(
6086     TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
6087     SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
6088     unsigned CreatedInstsCost) {
6089   bool Promoted = false;
6090 
6091   // Iterate over all the extensions to try to promote them.
6092   for (auto *I : Exts) {
6093     // Early check if we directly have ext(load).
6094     if (isa<LoadInst>(I->getOperand(0))) {
6095       ProfitablyMovedExts.push_back(I);
6096       continue;
6097     }
6098 
6099     // Check whether or not we want to do any promotion.  The reason we have
6100     // this check inside the for loop is to catch the case where an extension
6101     // is directly fed by a load because in such case the extension can be moved
6102     // up without any promotion on its operands.
6103     if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
6104       return false;
6105 
6106     // Get the action to perform the promotion.
6107     TypePromotionHelper::Action TPH =
6108         TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
6109     // Check if we can promote.
6110     if (!TPH) {
6111       // Save the current extension as we cannot move up through its operand.
6112       ProfitablyMovedExts.push_back(I);
6113       continue;
6114     }
6115 
6116     // Save the current state.
6117     TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6118         TPT.getRestorationPoint();
6119     SmallVector<Instruction *, 4> NewExts;
6120     unsigned NewCreatedInstsCost = 0;
6121     unsigned ExtCost = !TLI->isExtFree(I);
6122     // Promote.
6123     Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
6124                              &NewExts, nullptr, *TLI);
6125     assert(PromotedVal &&
6126            "TypePromotionHelper should have filtered out those cases");
6127 
6128     // We would be able to merge only one extension in a load.
6129     // Therefore, if we have more than 1 new extension we heuristically
6130     // cut this search path, because it means we degrade the code quality.
6131     // With exactly 2, the transformation is neutral, because we will merge
6132     // one extension but leave one. However, we optimistically keep going,
6133     // because the new extension may be removed too. Also avoid replacing a
6134     // single free extension with multiple extensions, as this increases the
6135     // number of IR instructions while not providing any savings.
6136     long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
6137     // FIXME: It would be possible to propagate a negative value instead of
6138     // conservatively ceiling it to 0.
6139     TotalCreatedInstsCost =
6140         std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
6141     if (!StressExtLdPromotion &&
6142         (TotalCreatedInstsCost > 1 ||
6143          !isPromotedInstructionLegal(*TLI, *DL, PromotedVal) ||
6144          (ExtCost == 0 && NewExts.size() > 1))) {
6145       // This promotion is not profitable, rollback to the previous state, and
6146       // save the current extension in ProfitablyMovedExts as the latest
6147       // speculative promotion turned out to be unprofitable.
6148       TPT.rollback(LastKnownGood);
6149       ProfitablyMovedExts.push_back(I);
6150       continue;
6151     }
6152     // Continue promoting NewExts as far as doing so is profitable.
6153     SmallVector<Instruction *, 2> NewlyMovedExts;
6154     (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
6155     bool NewPromoted = false;
6156     for (auto *ExtInst : NewlyMovedExts) {
6157       Instruction *MovedExt = cast<Instruction>(ExtInst);
6158       Value *ExtOperand = MovedExt->getOperand(0);
6159       // If we have reached to a load, we need this extra profitability check
6160       // as it could potentially be merged into an ext(load).
6161       if (isa<LoadInst>(ExtOperand) &&
6162           !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
6163             (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
6164         continue;
6165 
6166       ProfitablyMovedExts.push_back(MovedExt);
6167       NewPromoted = true;
6168     }
6169 
6170     // If none of speculative promotions for NewExts is profitable, rollback
6171     // and save the current extension (I) as the last profitable extension.
6172     if (!NewPromoted) {
6173       TPT.rollback(LastKnownGood);
6174       ProfitablyMovedExts.push_back(I);
6175       continue;
6176     }
6177     // The promotion is profitable.
6178     Promoted = true;
6179   }
6180   return Promoted;
6181 }
6182 
6183 /// Merging redundant sexts when one is dominating the other.
6184 bool CodeGenPrepare::mergeSExts(Function &F) {
6185   bool Changed = false;
6186   for (auto &Entry : ValToSExtendedUses) {
6187     SExts &Insts = Entry.second;
6188     SExts CurPts;
6189     for (Instruction *Inst : Insts) {
6190       if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
6191           Inst->getOperand(0) != Entry.first)
6192         continue;
6193       bool inserted = false;
6194       for (auto &Pt : CurPts) {
6195         if (getDT(F).dominates(Inst, Pt)) {
6196           replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
6197           RemovedInsts.insert(Pt);
6198           Pt->removeFromParent();
6199           Pt = Inst;
6200           inserted = true;
6201           Changed = true;
6202           break;
6203         }
6204         if (!getDT(F).dominates(Pt, Inst))
6205           // Give up if we need to merge in a common dominator as the
6206           // experiments show it is not profitable.
6207           continue;
6208         replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
6209         RemovedInsts.insert(Inst);
6210         Inst->removeFromParent();
6211         inserted = true;
6212         Changed = true;
6213         break;
6214       }
6215       if (!inserted)
6216         CurPts.push_back(Inst);
6217     }
6218   }
6219   return Changed;
6220 }
6221 
6222 // Splitting large data structures so that the GEPs accessing them can have
6223 // smaller offsets so that they can be sunk to the same blocks as their users.
6224 // For example, a large struct starting from %base is split into two parts
6225 // where the second part starts from %new_base.
6226 //
6227 // Before:
6228 // BB0:
6229 //   %base     =
6230 //
6231 // BB1:
6232 //   %gep0     = gep %base, off0
6233 //   %gep1     = gep %base, off1
6234 //   %gep2     = gep %base, off2
6235 //
6236 // BB2:
6237 //   %load1    = load %gep0
6238 //   %load2    = load %gep1
6239 //   %load3    = load %gep2
6240 //
6241 // After:
6242 // BB0:
6243 //   %base     =
6244 //   %new_base = gep %base, off0
6245 //
6246 // BB1:
6247 //   %new_gep0 = %new_base
6248 //   %new_gep1 = gep %new_base, off1 - off0
6249 //   %new_gep2 = gep %new_base, off2 - off0
6250 //
6251 // BB2:
6252 //   %load1    = load i32, i32* %new_gep0
6253 //   %load2    = load i32, i32* %new_gep1
6254 //   %load3    = load i32, i32* %new_gep2
6255 //
6256 // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6257 // their offsets are smaller enough to fit into the addressing mode.
6258 bool CodeGenPrepare::splitLargeGEPOffsets() {
6259   bool Changed = false;
6260   for (auto &Entry : LargeOffsetGEPMap) {
6261     Value *OldBase = Entry.first;
6262     SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6263         &LargeOffsetGEPs = Entry.second;
6264     auto compareGEPOffset =
6265         [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6266             const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6267           if (LHS.first == RHS.first)
6268             return false;
6269           if (LHS.second != RHS.second)
6270             return LHS.second < RHS.second;
6271           return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6272         };
6273     // Sorting all the GEPs of the same data structures based on the offsets.
6274     llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6275     LargeOffsetGEPs.erase(llvm::unique(LargeOffsetGEPs), LargeOffsetGEPs.end());
6276     // Skip if all the GEPs have the same offsets.
6277     if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6278       continue;
6279     GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6280     int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6281     Value *NewBaseGEP = nullptr;
6282 
6283     auto createNewBase = [&](int64_t BaseOffset, Value *OldBase,
6284                              GetElementPtrInst *GEP) {
6285       LLVMContext &Ctx = GEP->getContext();
6286       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6287       Type *I8PtrTy =
6288           PointerType::get(Ctx, GEP->getType()->getPointerAddressSpace());
6289 
6290       BasicBlock::iterator NewBaseInsertPt;
6291       BasicBlock *NewBaseInsertBB;
6292       if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6293         // If the base of the struct is an instruction, the new base will be
6294         // inserted close to it.
6295         NewBaseInsertBB = BaseI->getParent();
6296         if (isa<PHINode>(BaseI))
6297           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6298         else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6299           NewBaseInsertBB =
6300               SplitEdge(NewBaseInsertBB, Invoke->getNormalDest(), DT.get(), LI);
6301           NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6302         } else
6303           NewBaseInsertPt = std::next(BaseI->getIterator());
6304       } else {
6305         // If the current base is an argument or global value, the new base
6306         // will be inserted to the entry block.
6307         NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6308         NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6309       }
6310       IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6311       // Create a new base.
6312       Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
6313       NewBaseGEP = OldBase;
6314       if (NewBaseGEP->getType() != I8PtrTy)
6315         NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6316       NewBaseGEP =
6317           NewBaseBuilder.CreatePtrAdd(NewBaseGEP, BaseIndex, "splitgep");
6318       NewGEPBases.insert(NewBaseGEP);
6319       return;
6320     };
6321 
6322     // Check whether all the offsets can be encoded with prefered common base.
6323     if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset(
6324             LargeOffsetGEPs.front().second, LargeOffsetGEPs.back().second)) {
6325       BaseOffset = PreferBase;
6326       // Create a new base if the offset of the BaseGEP can be decoded with one
6327       // instruction.
6328       createNewBase(BaseOffset, OldBase, BaseGEP);
6329     }
6330 
6331     auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6332     while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6333       GetElementPtrInst *GEP = LargeOffsetGEP->first;
6334       int64_t Offset = LargeOffsetGEP->second;
6335       if (Offset != BaseOffset) {
6336         TargetLowering::AddrMode AddrMode;
6337         AddrMode.HasBaseReg = true;
6338         AddrMode.BaseOffs = Offset - BaseOffset;
6339         // The result type of the GEP might not be the type of the memory
6340         // access.
6341         if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6342                                         GEP->getResultElementType(),
6343                                         GEP->getAddressSpace())) {
6344           // We need to create a new base if the offset to the current base is
6345           // too large to fit into the addressing mode. So, a very large struct
6346           // may be split into several parts.
6347           BaseGEP = GEP;
6348           BaseOffset = Offset;
6349           NewBaseGEP = nullptr;
6350         }
6351       }
6352 
6353       // Generate a new GEP to replace the current one.
6354       Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6355 
6356       if (!NewBaseGEP) {
6357         // Create a new base if we don't have one yet.  Find the insertion
6358         // pointer for the new base first.
6359         createNewBase(BaseOffset, OldBase, GEP);
6360       }
6361 
6362       IRBuilder<> Builder(GEP);
6363       Value *NewGEP = NewBaseGEP;
6364       if (Offset != BaseOffset) {
6365         // Calculate the new offset for the new GEP.
6366         Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset);
6367         NewGEP = Builder.CreatePtrAdd(NewBaseGEP, Index);
6368       }
6369       replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6370       LargeOffsetGEPID.erase(GEP);
6371       LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6372       GEP->eraseFromParent();
6373       Changed = true;
6374     }
6375   }
6376   return Changed;
6377 }
6378 
6379 bool CodeGenPrepare::optimizePhiType(
6380     PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6381     SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6382   // We are looking for a collection on interconnected phi nodes that together
6383   // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6384   // are of the same type. Convert the whole set of nodes to the type of the
6385   // bitcast.
6386   Type *PhiTy = I->getType();
6387   Type *ConvertTy = nullptr;
6388   if (Visited.count(I) ||
6389       (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6390     return false;
6391 
6392   SmallVector<Instruction *, 4> Worklist;
6393   Worklist.push_back(cast<Instruction>(I));
6394   SmallPtrSet<PHINode *, 4> PhiNodes;
6395   SmallPtrSet<ConstantData *, 4> Constants;
6396   PhiNodes.insert(I);
6397   Visited.insert(I);
6398   SmallPtrSet<Instruction *, 4> Defs;
6399   SmallPtrSet<Instruction *, 4> Uses;
6400   // This works by adding extra bitcasts between load/stores and removing
6401   // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6402   // we can get in the situation where we remove a bitcast in one iteration
6403   // just to add it again in the next. We need to ensure that at least one
6404   // bitcast we remove are anchored to something that will not change back.
6405   bool AnyAnchored = false;
6406 
6407   while (!Worklist.empty()) {
6408     Instruction *II = Worklist.pop_back_val();
6409 
6410     if (auto *Phi = dyn_cast<PHINode>(II)) {
6411       // Handle Defs, which might also be PHI's
6412       for (Value *V : Phi->incoming_values()) {
6413         if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6414           if (!PhiNodes.count(OpPhi)) {
6415             if (!Visited.insert(OpPhi).second)
6416               return false;
6417             PhiNodes.insert(OpPhi);
6418             Worklist.push_back(OpPhi);
6419           }
6420         } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6421           if (!OpLoad->isSimple())
6422             return false;
6423           if (Defs.insert(OpLoad).second)
6424             Worklist.push_back(OpLoad);
6425         } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6426           if (Defs.insert(OpEx).second)
6427             Worklist.push_back(OpEx);
6428         } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6429           if (!ConvertTy)
6430             ConvertTy = OpBC->getOperand(0)->getType();
6431           if (OpBC->getOperand(0)->getType() != ConvertTy)
6432             return false;
6433           if (Defs.insert(OpBC).second) {
6434             Worklist.push_back(OpBC);
6435             AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
6436                            !isa<ExtractElementInst>(OpBC->getOperand(0));
6437           }
6438         } else if (auto *OpC = dyn_cast<ConstantData>(V))
6439           Constants.insert(OpC);
6440         else
6441           return false;
6442       }
6443     }
6444 
6445     // Handle uses which might also be phi's
6446     for (User *V : II->users()) {
6447       if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6448         if (!PhiNodes.count(OpPhi)) {
6449           if (Visited.count(OpPhi))
6450             return false;
6451           PhiNodes.insert(OpPhi);
6452           Visited.insert(OpPhi);
6453           Worklist.push_back(OpPhi);
6454         }
6455       } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
6456         if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
6457           return false;
6458         Uses.insert(OpStore);
6459       } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6460         if (!ConvertTy)
6461           ConvertTy = OpBC->getType();
6462         if (OpBC->getType() != ConvertTy)
6463           return false;
6464         Uses.insert(OpBC);
6465         AnyAnchored |=
6466             any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
6467       } else {
6468         return false;
6469       }
6470     }
6471   }
6472 
6473   if (!ConvertTy || !AnyAnchored ||
6474       !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
6475     return false;
6476 
6477   LLVM_DEBUG(dbgs() << "Converting " << *I << "\n  and connected nodes to "
6478                     << *ConvertTy << "\n");
6479 
6480   // Create all the new phi nodes of the new type, and bitcast any loads to the
6481   // correct type.
6482   ValueToValueMap ValMap;
6483   for (ConstantData *C : Constants)
6484     ValMap[C] = ConstantExpr::getBitCast(C, ConvertTy);
6485   for (Instruction *D : Defs) {
6486     if (isa<BitCastInst>(D)) {
6487       ValMap[D] = D->getOperand(0);
6488       DeletedInstrs.insert(D);
6489     } else {
6490       BasicBlock::iterator insertPt = std::next(D->getIterator());
6491       ValMap[D] = new BitCastInst(D, ConvertTy, D->getName() + ".bc", insertPt);
6492     }
6493   }
6494   for (PHINode *Phi : PhiNodes)
6495     ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
6496                                   Phi->getName() + ".tc", Phi->getIterator());
6497   // Pipe together all the PhiNodes.
6498   for (PHINode *Phi : PhiNodes) {
6499     PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
6500     for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
6501       NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
6502                           Phi->getIncomingBlock(i));
6503     Visited.insert(NewPhi);
6504   }
6505   // And finally pipe up the stores and bitcasts
6506   for (Instruction *U : Uses) {
6507     if (isa<BitCastInst>(U)) {
6508       DeletedInstrs.insert(U);
6509       replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
6510     } else {
6511       U->setOperand(0, new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc",
6512                                        U->getIterator()));
6513     }
6514   }
6515 
6516   // Save the removed phis to be deleted later.
6517   for (PHINode *Phi : PhiNodes)
6518     DeletedInstrs.insert(Phi);
6519   return true;
6520 }
6521 
6522 bool CodeGenPrepare::optimizePhiTypes(Function &F) {
6523   if (!OptimizePhiTypes)
6524     return false;
6525 
6526   bool Changed = false;
6527   SmallPtrSet<PHINode *, 4> Visited;
6528   SmallPtrSet<Instruction *, 4> DeletedInstrs;
6529 
6530   // Attempt to optimize all the phis in the functions to the correct type.
6531   for (auto &BB : F)
6532     for (auto &Phi : BB.phis())
6533       Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
6534 
6535   // Remove any old phi's that have been converted.
6536   for (auto *I : DeletedInstrs) {
6537     replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
6538     I->eraseFromParent();
6539   }
6540 
6541   return Changed;
6542 }
6543 
6544 /// Return true, if an ext(load) can be formed from an extension in
6545 /// \p MovedExts.
6546 bool CodeGenPrepare::canFormExtLd(
6547     const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
6548     Instruction *&Inst, bool HasPromoted) {
6549   for (auto *MovedExtInst : MovedExts) {
6550     if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
6551       LI = cast<LoadInst>(MovedExtInst->getOperand(0));
6552       Inst = MovedExtInst;
6553       break;
6554     }
6555   }
6556   if (!LI)
6557     return false;
6558 
6559   // If they're already in the same block, there's nothing to do.
6560   // Make the cheap checks first if we did not promote.
6561   // If we promoted, we need to check if it is indeed profitable.
6562   if (!HasPromoted && LI->getParent() == Inst->getParent())
6563     return false;
6564 
6565   return TLI->isExtLoad(LI, Inst, *DL);
6566 }
6567 
6568 /// Move a zext or sext fed by a load into the same basic block as the load,
6569 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
6570 /// extend into the load.
6571 ///
6572 /// E.g.,
6573 /// \code
6574 /// %ld = load i32* %addr
6575 /// %add = add nuw i32 %ld, 4
6576 /// %zext = zext i32 %add to i64
6577 // \endcode
6578 /// =>
6579 /// \code
6580 /// %ld = load i32* %addr
6581 /// %zext = zext i32 %ld to i64
6582 /// %add = add nuw i64 %zext, 4
6583 /// \encode
6584 /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6585 /// allow us to match zext(load i32*) to i64.
6586 ///
6587 /// Also, try to promote the computations used to obtain a sign extended
6588 /// value used into memory accesses.
6589 /// E.g.,
6590 /// \code
6591 /// a = add nsw i32 b, 3
6592 /// d = sext i32 a to i64
6593 /// e = getelementptr ..., i64 d
6594 /// \endcode
6595 /// =>
6596 /// \code
6597 /// f = sext i32 b to i64
6598 /// a = add nsw i64 f, 3
6599 /// e = getelementptr ..., i64 a
6600 /// \endcode
6601 ///
6602 /// \p Inst[in/out] the extension may be modified during the process if some
6603 /// promotions apply.
6604 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6605   bool AllowPromotionWithoutCommonHeader = false;
6606   /// See if it is an interesting sext operations for the address type
6607   /// promotion before trying to promote it, e.g., the ones with the right
6608   /// type and used in memory accesses.
6609   bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6610       *Inst, AllowPromotionWithoutCommonHeader);
6611   TypePromotionTransaction TPT(RemovedInsts);
6612   TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6613       TPT.getRestorationPoint();
6614   SmallVector<Instruction *, 1> Exts;
6615   SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6616   Exts.push_back(Inst);
6617 
6618   bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6619 
6620   // Look for a load being extended.
6621   LoadInst *LI = nullptr;
6622   Instruction *ExtFedByLoad;
6623 
6624   // Try to promote a chain of computation if it allows to form an extended
6625   // load.
6626   if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6627     assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6628     TPT.commit();
6629     // Move the extend into the same block as the load.
6630     ExtFedByLoad->moveAfter(LI);
6631     ++NumExtsMoved;
6632     Inst = ExtFedByLoad;
6633     return true;
6634   }
6635 
6636   // Continue promoting SExts if known as considerable depending on targets.
6637   if (ATPConsiderable &&
6638       performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6639                                   HasPromoted, TPT, SpeculativelyMovedExts))
6640     return true;
6641 
6642   TPT.rollback(LastKnownGood);
6643   return false;
6644 }
6645 
6646 // Perform address type promotion if doing so is profitable.
6647 // If AllowPromotionWithoutCommonHeader == false, we should find other sext
6648 // instructions that sign extended the same initial value. However, if
6649 // AllowPromotionWithoutCommonHeader == true, we expect promoting the
6650 // extension is just profitable.
6651 bool CodeGenPrepare::performAddressTypePromotion(
6652     Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6653     bool HasPromoted, TypePromotionTransaction &TPT,
6654     SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6655   bool Promoted = false;
6656   SmallPtrSet<Instruction *, 1> UnhandledExts;
6657   bool AllSeenFirst = true;
6658   for (auto *I : SpeculativelyMovedExts) {
6659     Value *HeadOfChain = I->getOperand(0);
6660     DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6661         SeenChainsForSExt.find(HeadOfChain);
6662     // If there is an unhandled SExt which has the same header, try to promote
6663     // it as well.
6664     if (AlreadySeen != SeenChainsForSExt.end()) {
6665       if (AlreadySeen->second != nullptr)
6666         UnhandledExts.insert(AlreadySeen->second);
6667       AllSeenFirst = false;
6668     }
6669   }
6670 
6671   if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6672                         SpeculativelyMovedExts.size() == 1)) {
6673     TPT.commit();
6674     if (HasPromoted)
6675       Promoted = true;
6676     for (auto *I : SpeculativelyMovedExts) {
6677       Value *HeadOfChain = I->getOperand(0);
6678       SeenChainsForSExt[HeadOfChain] = nullptr;
6679       ValToSExtendedUses[HeadOfChain].push_back(I);
6680     }
6681     // Update Inst as promotion happen.
6682     Inst = SpeculativelyMovedExts.pop_back_val();
6683   } else {
6684     // This is the first chain visited from the header, keep the current chain
6685     // as unhandled. Defer to promote this until we encounter another SExt
6686     // chain derived from the same header.
6687     for (auto *I : SpeculativelyMovedExts) {
6688       Value *HeadOfChain = I->getOperand(0);
6689       SeenChainsForSExt[HeadOfChain] = Inst;
6690     }
6691     return false;
6692   }
6693 
6694   if (!AllSeenFirst && !UnhandledExts.empty())
6695     for (auto *VisitedSExt : UnhandledExts) {
6696       if (RemovedInsts.count(VisitedSExt))
6697         continue;
6698       TypePromotionTransaction TPT(RemovedInsts);
6699       SmallVector<Instruction *, 1> Exts;
6700       SmallVector<Instruction *, 2> Chains;
6701       Exts.push_back(VisitedSExt);
6702       bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6703       TPT.commit();
6704       if (HasPromoted)
6705         Promoted = true;
6706       for (auto *I : Chains) {
6707         Value *HeadOfChain = I->getOperand(0);
6708         // Mark this as handled.
6709         SeenChainsForSExt[HeadOfChain] = nullptr;
6710         ValToSExtendedUses[HeadOfChain].push_back(I);
6711       }
6712     }
6713   return Promoted;
6714 }
6715 
6716 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6717   BasicBlock *DefBB = I->getParent();
6718 
6719   // If the result of a {s|z}ext and its source are both live out, rewrite all
6720   // other uses of the source with result of extension.
6721   Value *Src = I->getOperand(0);
6722   if (Src->hasOneUse())
6723     return false;
6724 
6725   // Only do this xform if truncating is free.
6726   if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6727     return false;
6728 
6729   // Only safe to perform the optimization if the source is also defined in
6730   // this block.
6731   if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6732     return false;
6733 
6734   bool DefIsLiveOut = false;
6735   for (User *U : I->users()) {
6736     Instruction *UI = cast<Instruction>(U);
6737 
6738     // Figure out which BB this ext is used in.
6739     BasicBlock *UserBB = UI->getParent();
6740     if (UserBB == DefBB)
6741       continue;
6742     DefIsLiveOut = true;
6743     break;
6744   }
6745   if (!DefIsLiveOut)
6746     return false;
6747 
6748   // Make sure none of the uses are PHI nodes.
6749   for (User *U : Src->users()) {
6750     Instruction *UI = cast<Instruction>(U);
6751     BasicBlock *UserBB = UI->getParent();
6752     if (UserBB == DefBB)
6753       continue;
6754     // Be conservative. We don't want this xform to end up introducing
6755     // reloads just before load / store instructions.
6756     if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
6757       return false;
6758   }
6759 
6760   // InsertedTruncs - Only insert one trunc in each block once.
6761   DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
6762 
6763   bool MadeChange = false;
6764   for (Use &U : Src->uses()) {
6765     Instruction *User = cast<Instruction>(U.getUser());
6766 
6767     // Figure out which BB this ext is used in.
6768     BasicBlock *UserBB = User->getParent();
6769     if (UserBB == DefBB)
6770       continue;
6771 
6772     // Both src and def are live in this block. Rewrite the use.
6773     Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
6774 
6775     if (!InsertedTrunc) {
6776       BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
6777       assert(InsertPt != UserBB->end());
6778       InsertedTrunc = new TruncInst(I, Src->getType(), "");
6779       InsertedTrunc->insertBefore(*UserBB, InsertPt);
6780       InsertedInsts.insert(InsertedTrunc);
6781     }
6782 
6783     // Replace a use of the {s|z}ext source with a use of the result.
6784     U = InsertedTrunc;
6785     ++NumExtUses;
6786     MadeChange = true;
6787   }
6788 
6789   return MadeChange;
6790 }
6791 
6792 // Find loads whose uses only use some of the loaded value's bits.  Add an "and"
6793 // just after the load if the target can fold this into one extload instruction,
6794 // with the hope of eliminating some of the other later "and" instructions using
6795 // the loaded value.  "and"s that are made trivially redundant by the insertion
6796 // of the new "and" are removed by this function, while others (e.g. those whose
6797 // path from the load goes through a phi) are left for isel to potentially
6798 // remove.
6799 //
6800 // For example:
6801 //
6802 // b0:
6803 //   x = load i32
6804 //   ...
6805 // b1:
6806 //   y = and x, 0xff
6807 //   z = use y
6808 //
6809 // becomes:
6810 //
6811 // b0:
6812 //   x = load i32
6813 //   x' = and x, 0xff
6814 //   ...
6815 // b1:
6816 //   z = use x'
6817 //
6818 // whereas:
6819 //
6820 // b0:
6821 //   x1 = load i32
6822 //   ...
6823 // b1:
6824 //   x2 = load i32
6825 //   ...
6826 // b2:
6827 //   x = phi x1, x2
6828 //   y = and x, 0xff
6829 //
6830 // becomes (after a call to optimizeLoadExt for each load):
6831 //
6832 // b0:
6833 //   x1 = load i32
6834 //   x1' = and x1, 0xff
6835 //   ...
6836 // b1:
6837 //   x2 = load i32
6838 //   x2' = and x2, 0xff
6839 //   ...
6840 // b2:
6841 //   x = phi x1', x2'
6842 //   y = and x, 0xff
6843 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
6844   if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
6845     return false;
6846 
6847   // Skip loads we've already transformed.
6848   if (Load->hasOneUse() &&
6849       InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
6850     return false;
6851 
6852   // Look at all uses of Load, looking through phis, to determine how many bits
6853   // of the loaded value are needed.
6854   SmallVector<Instruction *, 8> WorkList;
6855   SmallPtrSet<Instruction *, 16> Visited;
6856   SmallVector<Instruction *, 8> AndsToMaybeRemove;
6857   for (auto *U : Load->users())
6858     WorkList.push_back(cast<Instruction>(U));
6859 
6860   EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
6861   unsigned BitWidth = LoadResultVT.getSizeInBits();
6862   // If the BitWidth is 0, do not try to optimize the type
6863   if (BitWidth == 0)
6864     return false;
6865 
6866   APInt DemandBits(BitWidth, 0);
6867   APInt WidestAndBits(BitWidth, 0);
6868 
6869   while (!WorkList.empty()) {
6870     Instruction *I = WorkList.pop_back_val();
6871 
6872     // Break use-def graph loops.
6873     if (!Visited.insert(I).second)
6874       continue;
6875 
6876     // For a PHI node, push all of its users.
6877     if (auto *Phi = dyn_cast<PHINode>(I)) {
6878       for (auto *U : Phi->users())
6879         WorkList.push_back(cast<Instruction>(U));
6880       continue;
6881     }
6882 
6883     switch (I->getOpcode()) {
6884     case Instruction::And: {
6885       auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
6886       if (!AndC)
6887         return false;
6888       APInt AndBits = AndC->getValue();
6889       DemandBits |= AndBits;
6890       // Keep track of the widest and mask we see.
6891       if (AndBits.ugt(WidestAndBits))
6892         WidestAndBits = AndBits;
6893       if (AndBits == WidestAndBits && I->getOperand(0) == Load)
6894         AndsToMaybeRemove.push_back(I);
6895       break;
6896     }
6897 
6898     case Instruction::Shl: {
6899       auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
6900       if (!ShlC)
6901         return false;
6902       uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
6903       DemandBits.setLowBits(BitWidth - ShiftAmt);
6904       break;
6905     }
6906 
6907     case Instruction::Trunc: {
6908       EVT TruncVT = TLI->getValueType(*DL, I->getType());
6909       unsigned TruncBitWidth = TruncVT.getSizeInBits();
6910       DemandBits.setLowBits(TruncBitWidth);
6911       break;
6912     }
6913 
6914     default:
6915       return false;
6916     }
6917   }
6918 
6919   uint32_t ActiveBits = DemandBits.getActiveBits();
6920   // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6921   // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
6922   // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6923   // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6924   // followed by an AND.
6925   // TODO: Look into removing this restriction by fixing backends to either
6926   // return false for isLoadExtLegal for i1 or have them select this pattern to
6927   // a single instruction.
6928   //
6929   // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6930   // mask, since these are the only ands that will be removed by isel.
6931   if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
6932       WidestAndBits != DemandBits)
6933     return false;
6934 
6935   LLVMContext &Ctx = Load->getType()->getContext();
6936   Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
6937   EVT TruncVT = TLI->getValueType(*DL, TruncTy);
6938 
6939   // Reject cases that won't be matched as extloads.
6940   if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
6941       !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
6942     return false;
6943 
6944   IRBuilder<> Builder(Load->getNextNonDebugInstruction());
6945   auto *NewAnd = cast<Instruction>(
6946       Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
6947   // Mark this instruction as "inserted by CGP", so that other
6948   // optimizations don't touch it.
6949   InsertedInsts.insert(NewAnd);
6950 
6951   // Replace all uses of load with new and (except for the use of load in the
6952   // new and itself).
6953   replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
6954   NewAnd->setOperand(0, Load);
6955 
6956   // Remove any and instructions that are now redundant.
6957   for (auto *And : AndsToMaybeRemove)
6958     // Check that the and mask is the same as the one we decided to put on the
6959     // new and.
6960     if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
6961       replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
6962       if (&*CurInstIterator == And)
6963         CurInstIterator = std::next(And->getIterator());
6964       And->eraseFromParent();
6965       ++NumAndUses;
6966     }
6967 
6968   ++NumAndsAdded;
6969   return true;
6970 }
6971 
6972 /// Check if V (an operand of a select instruction) is an expensive instruction
6973 /// that is only used once.
6974 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
6975   auto *I = dyn_cast<Instruction>(V);
6976   // If it's safe to speculatively execute, then it should not have side
6977   // effects; therefore, it's safe to sink and possibly *not* execute.
6978   return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
6979          TTI->isExpensiveToSpeculativelyExecute(I);
6980 }
6981 
6982 /// Returns true if a SelectInst should be turned into an explicit branch.
6983 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
6984                                                 const TargetLowering *TLI,
6985                                                 SelectInst *SI) {
6986   // If even a predictable select is cheap, then a branch can't be cheaper.
6987   if (!TLI->isPredictableSelectExpensive())
6988     return false;
6989 
6990   // FIXME: This should use the same heuristics as IfConversion to determine
6991   // whether a select is better represented as a branch.
6992 
6993   // If metadata tells us that the select condition is obviously predictable,
6994   // then we want to replace the select with a branch.
6995   uint64_t TrueWeight, FalseWeight;
6996   if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
6997     uint64_t Max = std::max(TrueWeight, FalseWeight);
6998     uint64_t Sum = TrueWeight + FalseWeight;
6999     if (Sum != 0) {
7000       auto Probability = BranchProbability::getBranchProbability(Max, Sum);
7001       if (Probability > TTI->getPredictableBranchThreshold())
7002         return true;
7003     }
7004   }
7005 
7006   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
7007 
7008   // If a branch is predictable, an out-of-order CPU can avoid blocking on its
7009   // comparison condition. If the compare has more than one use, there's
7010   // probably another cmov or setcc around, so it's not worth emitting a branch.
7011   if (!Cmp || !Cmp->hasOneUse())
7012     return false;
7013 
7014   // If either operand of the select is expensive and only needed on one side
7015   // of the select, we should form a branch.
7016   if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
7017       sinkSelectOperand(TTI, SI->getFalseValue()))
7018     return true;
7019 
7020   return false;
7021 }
7022 
7023 /// If \p isTrue is true, return the true value of \p SI, otherwise return
7024 /// false value of \p SI. If the true/false value of \p SI is defined by any
7025 /// select instructions in \p Selects, look through the defining select
7026 /// instruction until the true/false value is not defined in \p Selects.
7027 static Value *
7028 getTrueOrFalseValue(SelectInst *SI, bool isTrue,
7029                     const SmallPtrSet<const Instruction *, 2> &Selects) {
7030   Value *V = nullptr;
7031 
7032   for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
7033        DefSI = dyn_cast<SelectInst>(V)) {
7034     assert(DefSI->getCondition() == SI->getCondition() &&
7035            "The condition of DefSI does not match with SI");
7036     V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
7037   }
7038 
7039   assert(V && "Failed to get select true/false value");
7040   return V;
7041 }
7042 
7043 bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
7044   assert(Shift->isShift() && "Expected a shift");
7045 
7046   // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
7047   // general vector shifts, and (3) the shift amount is a select-of-splatted
7048   // values, hoist the shifts before the select:
7049   //   shift Op0, (select Cond, TVal, FVal) -->
7050   //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
7051   //
7052   // This is inverting a generic IR transform when we know that the cost of a
7053   // general vector shift is more than the cost of 2 shift-by-scalars.
7054   // We can't do this effectively in SDAG because we may not be able to
7055   // determine if the select operands are splats from within a basic block.
7056   Type *Ty = Shift->getType();
7057   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
7058     return false;
7059   Value *Cond, *TVal, *FVal;
7060   if (!match(Shift->getOperand(1),
7061              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7062     return false;
7063   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7064     return false;
7065 
7066   IRBuilder<> Builder(Shift);
7067   BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
7068   Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
7069   Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
7070   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7071   replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
7072   Shift->eraseFromParent();
7073   return true;
7074 }
7075 
7076 bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
7077   Intrinsic::ID Opcode = Fsh->getIntrinsicID();
7078   assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
7079          "Expected a funnel shift");
7080 
7081   // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
7082   // than general vector shifts, and (3) the shift amount is select-of-splatted
7083   // values, hoist the funnel shifts before the select:
7084   //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
7085   //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
7086   //
7087   // This is inverting a generic IR transform when we know that the cost of a
7088   // general vector shift is more than the cost of 2 shift-by-scalars.
7089   // We can't do this effectively in SDAG because we may not be able to
7090   // determine if the select operands are splats from within a basic block.
7091   Type *Ty = Fsh->getType();
7092   if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
7093     return false;
7094   Value *Cond, *TVal, *FVal;
7095   if (!match(Fsh->getOperand(2),
7096              m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
7097     return false;
7098   if (!isSplatValue(TVal) || !isSplatValue(FVal))
7099     return false;
7100 
7101   IRBuilder<> Builder(Fsh);
7102   Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
7103   Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
7104   Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
7105   Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
7106   replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
7107   Fsh->eraseFromParent();
7108   return true;
7109 }
7110 
7111 /// If we have a SelectInst that will likely profit from branch prediction,
7112 /// turn it into a branch.
7113 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
7114   if (DisableSelectToBranch)
7115     return false;
7116 
7117   // If the SelectOptimize pass is enabled, selects have already been optimized.
7118   if (!getCGPassBuilderOption().DisableSelectOptimize)
7119     return false;
7120 
7121   // Find all consecutive select instructions that share the same condition.
7122   SmallVector<SelectInst *, 2> ASI;
7123   ASI.push_back(SI);
7124   for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
7125        It != SI->getParent()->end(); ++It) {
7126     SelectInst *I = dyn_cast<SelectInst>(&*It);
7127     if (I && SI->getCondition() == I->getCondition()) {
7128       ASI.push_back(I);
7129     } else {
7130       break;
7131     }
7132   }
7133 
7134   SelectInst *LastSI = ASI.back();
7135   // Increment the current iterator to skip all the rest of select instructions
7136   // because they will be either "not lowered" or "all lowered" to branch.
7137   CurInstIterator = std::next(LastSI->getIterator());
7138   // Examine debug-info attached to the consecutive select instructions. They
7139   // won't be individually optimised by optimizeInst, so we need to perform
7140   // DbgVariableRecord maintenence here instead.
7141   for (SelectInst *SI : ArrayRef(ASI).drop_front())
7142     fixupDbgVariableRecordsOnInst(*SI);
7143 
7144   bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
7145 
7146   // Can we convert the 'select' to CF ?
7147   if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
7148     return false;
7149 
7150   TargetLowering::SelectSupportKind SelectKind;
7151   if (SI->getType()->isVectorTy())
7152     SelectKind = TargetLowering::ScalarCondVectorVal;
7153   else
7154     SelectKind = TargetLowering::ScalarValSelect;
7155 
7156   if (TLI->isSelectSupported(SelectKind) &&
7157       (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
7158        llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
7159     return false;
7160 
7161   // The DominatorTree needs to be rebuilt by any consumers after this
7162   // transformation. We simply reset here rather than setting the ModifiedDT
7163   // flag to avoid restarting the function walk in runOnFunction for each
7164   // select optimized.
7165   DT.reset();
7166 
7167   // Transform a sequence like this:
7168   //    start:
7169   //       %cmp = cmp uge i32 %a, %b
7170   //       %sel = select i1 %cmp, i32 %c, i32 %d
7171   //
7172   // Into:
7173   //    start:
7174   //       %cmp = cmp uge i32 %a, %b
7175   //       %cmp.frozen = freeze %cmp
7176   //       br i1 %cmp.frozen, label %select.true, label %select.false
7177   //    select.true:
7178   //       br label %select.end
7179   //    select.false:
7180   //       br label %select.end
7181   //    select.end:
7182   //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7183   //
7184   // %cmp should be frozen, otherwise it may introduce undefined behavior.
7185   // In addition, we may sink instructions that produce %c or %d from
7186   // the entry block into the destination(s) of the new branch.
7187   // If the true or false blocks do not contain a sunken instruction, that
7188   // block and its branch may be optimized away. In that case, one side of the
7189   // first branch will point directly to select.end, and the corresponding PHI
7190   // predecessor block will be the start block.
7191 
7192   // Collect values that go on the true side and the values that go on the false
7193   // side.
7194   SmallVector<Instruction *> TrueInstrs, FalseInstrs;
7195   for (SelectInst *SI : ASI) {
7196     if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V))
7197       TrueInstrs.push_back(cast<Instruction>(V));
7198     if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V))
7199       FalseInstrs.push_back(cast<Instruction>(V));
7200   }
7201 
7202   // Split the select block, according to how many (if any) values go on each
7203   // side.
7204   BasicBlock *StartBlock = SI->getParent();
7205   BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(LastSI));
7206   // We should split before any debug-info.
7207   SplitPt.setHeadBit(true);
7208 
7209   IRBuilder<> IB(SI);
7210   auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
7211 
7212   BasicBlock *TrueBlock = nullptr;
7213   BasicBlock *FalseBlock = nullptr;
7214   BasicBlock *EndBlock = nullptr;
7215   BranchInst *TrueBranch = nullptr;
7216   BranchInst *FalseBranch = nullptr;
7217   if (TrueInstrs.size() == 0) {
7218     FalseBranch = cast<BranchInst>(SplitBlockAndInsertIfElse(
7219         CondFr, SplitPt, false, nullptr, nullptr, LI));
7220     FalseBlock = FalseBranch->getParent();
7221     EndBlock = cast<BasicBlock>(FalseBranch->getOperand(0));
7222   } else if (FalseInstrs.size() == 0) {
7223     TrueBranch = cast<BranchInst>(SplitBlockAndInsertIfThen(
7224         CondFr, SplitPt, false, nullptr, nullptr, LI));
7225     TrueBlock = TrueBranch->getParent();
7226     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7227   } else {
7228     Instruction *ThenTerm = nullptr;
7229     Instruction *ElseTerm = nullptr;
7230     SplitBlockAndInsertIfThenElse(CondFr, SplitPt, &ThenTerm, &ElseTerm,
7231                                   nullptr, nullptr, LI);
7232     TrueBranch = cast<BranchInst>(ThenTerm);
7233     FalseBranch = cast<BranchInst>(ElseTerm);
7234     TrueBlock = TrueBranch->getParent();
7235     FalseBlock = FalseBranch->getParent();
7236     EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7237   }
7238 
7239   EndBlock->setName("select.end");
7240   if (TrueBlock)
7241     TrueBlock->setName("select.true.sink");
7242   if (FalseBlock)
7243     FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false"
7244                                                 : "select.false.sink");
7245 
7246   if (IsHugeFunc) {
7247     if (TrueBlock)
7248       FreshBBs.insert(TrueBlock);
7249     if (FalseBlock)
7250       FreshBBs.insert(FalseBlock);
7251     FreshBBs.insert(EndBlock);
7252   }
7253 
7254   BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
7255 
7256   static const unsigned MD[] = {
7257       LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
7258       LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
7259   StartBlock->getTerminator()->copyMetadata(*SI, MD);
7260 
7261   // Sink expensive instructions into the conditional blocks to avoid executing
7262   // them speculatively.
7263   for (Instruction *I : TrueInstrs)
7264     I->moveBefore(TrueBranch);
7265   for (Instruction *I : FalseInstrs)
7266     I->moveBefore(FalseBranch);
7267 
7268   // If we did not create a new block for one of the 'true' or 'false' paths
7269   // of the condition, it means that side of the branch goes to the end block
7270   // directly and the path originates from the start block from the point of
7271   // view of the new PHI.
7272   if (TrueBlock == nullptr)
7273     TrueBlock = StartBlock;
7274   else if (FalseBlock == nullptr)
7275     FalseBlock = StartBlock;
7276 
7277   SmallPtrSet<const Instruction *, 2> INS;
7278   INS.insert(ASI.begin(), ASI.end());
7279   // Use reverse iterator because later select may use the value of the
7280   // earlier select, and we need to propagate value through earlier select
7281   // to get the PHI operand.
7282   for (SelectInst *SI : llvm::reverse(ASI)) {
7283     // The select itself is replaced with a PHI Node.
7284     PHINode *PN = PHINode::Create(SI->getType(), 2, "");
7285     PN->insertBefore(EndBlock->begin());
7286     PN->takeName(SI);
7287     PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7288     PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7289     PN->setDebugLoc(SI->getDebugLoc());
7290 
7291     replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7292     SI->eraseFromParent();
7293     INS.erase(SI);
7294     ++NumSelectsExpanded;
7295   }
7296 
7297   // Instruct OptimizeBlock to skip to the next block.
7298   CurInstIterator = StartBlock->end();
7299   return true;
7300 }
7301 
7302 /// Some targets only accept certain types for splat inputs. For example a VDUP
7303 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
7304 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7305 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7306   // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7307   if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7308                             m_Undef(), m_ZeroMask())))
7309     return false;
7310   Type *NewType = TLI->shouldConvertSplatType(SVI);
7311   if (!NewType)
7312     return false;
7313 
7314   auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7315   assert(!NewType->isVectorTy() && "Expected a scalar type!");
7316   assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7317          "Expected a type of the same size!");
7318   auto *NewVecType =
7319       FixedVectorType::get(NewType, SVIVecType->getNumElements());
7320 
7321   // Create a bitcast (shuffle (insert (bitcast(..))))
7322   IRBuilder<> Builder(SVI->getContext());
7323   Builder.SetInsertPoint(SVI);
7324   Value *BC1 = Builder.CreateBitCast(
7325       cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7326   Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7327   Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7328 
7329   replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7330   RecursivelyDeleteTriviallyDeadInstructions(
7331       SVI, TLInfo, nullptr,
7332       [&](Value *V) { removeAllAssertingVHReferences(V); });
7333 
7334   // Also hoist the bitcast up to its operand if it they are not in the same
7335   // block.
7336   if (auto *BCI = dyn_cast<Instruction>(BC1))
7337     if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7338       if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7339           !Op->isTerminator() && !Op->isEHPad())
7340         BCI->moveAfter(Op);
7341 
7342   return true;
7343 }
7344 
7345 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7346   // If the operands of I can be folded into a target instruction together with
7347   // I, duplicate and sink them.
7348   SmallVector<Use *, 4> OpsToSink;
7349   if (!TLI->shouldSinkOperands(I, OpsToSink))
7350     return false;
7351 
7352   // OpsToSink can contain multiple uses in a use chain (e.g.
7353   // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7354   // uses must come first, so we process the ops in reverse order so as to not
7355   // create invalid IR.
7356   BasicBlock *TargetBB = I->getParent();
7357   bool Changed = false;
7358   SmallVector<Use *, 4> ToReplace;
7359   Instruction *InsertPoint = I;
7360   DenseMap<const Instruction *, unsigned long> InstOrdering;
7361   unsigned long InstNumber = 0;
7362   for (const auto &I : *TargetBB)
7363     InstOrdering[&I] = InstNumber++;
7364 
7365   for (Use *U : reverse(OpsToSink)) {
7366     auto *UI = cast<Instruction>(U->get());
7367     if (isa<PHINode>(UI))
7368       continue;
7369     if (UI->getParent() == TargetBB) {
7370       if (InstOrdering[UI] < InstOrdering[InsertPoint])
7371         InsertPoint = UI;
7372       continue;
7373     }
7374     ToReplace.push_back(U);
7375   }
7376 
7377   SetVector<Instruction *> MaybeDead;
7378   DenseMap<Instruction *, Instruction *> NewInstructions;
7379   for (Use *U : ToReplace) {
7380     auto *UI = cast<Instruction>(U->get());
7381     Instruction *NI = UI->clone();
7382 
7383     if (IsHugeFunc) {
7384       // Now we clone an instruction, its operands' defs may sink to this BB
7385       // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7386       for (Value *Op : NI->operands())
7387         if (auto *OpDef = dyn_cast<Instruction>(Op))
7388           FreshBBs.insert(OpDef->getParent());
7389     }
7390 
7391     NewInstructions[UI] = NI;
7392     MaybeDead.insert(UI);
7393     LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7394     NI->insertBefore(InsertPoint);
7395     InsertPoint = NI;
7396     InsertedInsts.insert(NI);
7397 
7398     // Update the use for the new instruction, making sure that we update the
7399     // sunk instruction uses, if it is part of a chain that has already been
7400     // sunk.
7401     Instruction *OldI = cast<Instruction>(U->getUser());
7402     if (NewInstructions.count(OldI))
7403       NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
7404     else
7405       U->set(NI);
7406     Changed = true;
7407   }
7408 
7409   // Remove instructions that are dead after sinking.
7410   for (auto *I : MaybeDead) {
7411     if (!I->hasNUsesOrMore(1)) {
7412       LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7413       I->eraseFromParent();
7414     }
7415   }
7416 
7417   return Changed;
7418 }
7419 
7420 bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7421   Value *Cond = SI->getCondition();
7422   Type *OldType = Cond->getType();
7423   LLVMContext &Context = Cond->getContext();
7424   EVT OldVT = TLI->getValueType(*DL, OldType);
7425   MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
7426   unsigned RegWidth = RegType.getSizeInBits();
7427 
7428   if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
7429     return false;
7430 
7431   // If the register width is greater than the type width, expand the condition
7432   // of the switch instruction and each case constant to the width of the
7433   // register. By widening the type of the switch condition, subsequent
7434   // comparisons (for case comparisons) will not need to be extended to the
7435   // preferred register width, so we will potentially eliminate N-1 extends,
7436   // where N is the number of cases in the switch.
7437   auto *NewType = Type::getIntNTy(Context, RegWidth);
7438 
7439   // Extend the switch condition and case constants using the target preferred
7440   // extend unless the switch condition is a function argument with an extend
7441   // attribute. In that case, we can avoid an unnecessary mask/extension by
7442   // matching the argument extension instead.
7443   Instruction::CastOps ExtType = Instruction::ZExt;
7444   // Some targets prefer SExt over ZExt.
7445   if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
7446     ExtType = Instruction::SExt;
7447 
7448   if (auto *Arg = dyn_cast<Argument>(Cond)) {
7449     if (Arg->hasSExtAttr())
7450       ExtType = Instruction::SExt;
7451     if (Arg->hasZExtAttr())
7452       ExtType = Instruction::ZExt;
7453   }
7454 
7455   auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
7456   ExtInst->insertBefore(SI);
7457   ExtInst->setDebugLoc(SI->getDebugLoc());
7458   SI->setCondition(ExtInst);
7459   for (auto Case : SI->cases()) {
7460     const APInt &NarrowConst = Case.getCaseValue()->getValue();
7461     APInt WideConst = (ExtType == Instruction::ZExt)
7462                           ? NarrowConst.zext(RegWidth)
7463                           : NarrowConst.sext(RegWidth);
7464     Case.setValue(ConstantInt::get(Context, WideConst));
7465   }
7466 
7467   return true;
7468 }
7469 
7470 bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
7471   // The SCCP optimization tends to produce code like this:
7472   //   switch(x) { case 42: phi(42, ...) }
7473   // Materializing the constant for the phi-argument needs instructions; So we
7474   // change the code to:
7475   //   switch(x) { case 42: phi(x, ...) }
7476 
7477   Value *Condition = SI->getCondition();
7478   // Avoid endless loop in degenerate case.
7479   if (isa<ConstantInt>(*Condition))
7480     return false;
7481 
7482   bool Changed = false;
7483   BasicBlock *SwitchBB = SI->getParent();
7484   Type *ConditionType = Condition->getType();
7485 
7486   for (const SwitchInst::CaseHandle &Case : SI->cases()) {
7487     ConstantInt *CaseValue = Case.getCaseValue();
7488     BasicBlock *CaseBB = Case.getCaseSuccessor();
7489     // Set to true if we previously checked that `CaseBB` is only reached by
7490     // a single case from this switch.
7491     bool CheckedForSinglePred = false;
7492     for (PHINode &PHI : CaseBB->phis()) {
7493       Type *PHIType = PHI.getType();
7494       // If ZExt is free then we can also catch patterns like this:
7495       //   switch((i32)x) { case 42: phi((i64)42, ...); }
7496       // and replace `(i64)42` with `zext i32 %x to i64`.
7497       bool TryZExt =
7498           PHIType->isIntegerTy() &&
7499           PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
7500           TLI->isZExtFree(ConditionType, PHIType);
7501       if (PHIType == ConditionType || TryZExt) {
7502         // Set to true to skip this case because of multiple preds.
7503         bool SkipCase = false;
7504         Value *Replacement = nullptr;
7505         for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
7506           Value *PHIValue = PHI.getIncomingValue(I);
7507           if (PHIValue != CaseValue) {
7508             if (!TryZExt)
7509               continue;
7510             ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
7511             if (!PHIValueInt ||
7512                 PHIValueInt->getValue() !=
7513                     CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
7514               continue;
7515           }
7516           if (PHI.getIncomingBlock(I) != SwitchBB)
7517             continue;
7518           // We cannot optimize if there are multiple case labels jumping to
7519           // this block.  This check may get expensive when there are many
7520           // case labels so we test for it last.
7521           if (!CheckedForSinglePred) {
7522             CheckedForSinglePred = true;
7523             if (SI->findCaseDest(CaseBB) == nullptr) {
7524               SkipCase = true;
7525               break;
7526             }
7527           }
7528 
7529           if (Replacement == nullptr) {
7530             if (PHIValue == CaseValue) {
7531               Replacement = Condition;
7532             } else {
7533               IRBuilder<> Builder(SI);
7534               Replacement = Builder.CreateZExt(Condition, PHIType);
7535             }
7536           }
7537           PHI.setIncomingValue(I, Replacement);
7538           Changed = true;
7539         }
7540         if (SkipCase)
7541           break;
7542       }
7543     }
7544   }
7545   return Changed;
7546 }
7547 
7548 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
7549   bool Changed = optimizeSwitchType(SI);
7550   Changed |= optimizeSwitchPhiConstants(SI);
7551   return Changed;
7552 }
7553 
7554 namespace {
7555 
7556 /// Helper class to promote a scalar operation to a vector one.
7557 /// This class is used to move downward extractelement transition.
7558 /// E.g.,
7559 /// a = vector_op <2 x i32>
7560 /// b = extractelement <2 x i32> a, i32 0
7561 /// c = scalar_op b
7562 /// store c
7563 ///
7564 /// =>
7565 /// a = vector_op <2 x i32>
7566 /// c = vector_op a (equivalent to scalar_op on the related lane)
7567 /// * d = extractelement <2 x i32> c, i32 0
7568 /// * store d
7569 /// Assuming both extractelement and store can be combine, we get rid of the
7570 /// transition.
7571 class VectorPromoteHelper {
7572   /// DataLayout associated with the current module.
7573   const DataLayout &DL;
7574 
7575   /// Used to perform some checks on the legality of vector operations.
7576   const TargetLowering &TLI;
7577 
7578   /// Used to estimated the cost of the promoted chain.
7579   const TargetTransformInfo &TTI;
7580 
7581   /// The transition being moved downwards.
7582   Instruction *Transition;
7583 
7584   /// The sequence of instructions to be promoted.
7585   SmallVector<Instruction *, 4> InstsToBePromoted;
7586 
7587   /// Cost of combining a store and an extract.
7588   unsigned StoreExtractCombineCost;
7589 
7590   /// Instruction that will be combined with the transition.
7591   Instruction *CombineInst = nullptr;
7592 
7593   /// The instruction that represents the current end of the transition.
7594   /// Since we are faking the promotion until we reach the end of the chain
7595   /// of computation, we need a way to get the current end of the transition.
7596   Instruction *getEndOfTransition() const {
7597     if (InstsToBePromoted.empty())
7598       return Transition;
7599     return InstsToBePromoted.back();
7600   }
7601 
7602   /// Return the index of the original value in the transition.
7603   /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7604   /// c, is at index 0.
7605   unsigned getTransitionOriginalValueIdx() const {
7606     assert(isa<ExtractElementInst>(Transition) &&
7607            "Other kind of transitions are not supported yet");
7608     return 0;
7609   }
7610 
7611   /// Return the index of the index in the transition.
7612   /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7613   /// is at index 1.
7614   unsigned getTransitionIdx() const {
7615     assert(isa<ExtractElementInst>(Transition) &&
7616            "Other kind of transitions are not supported yet");
7617     return 1;
7618   }
7619 
7620   /// Get the type of the transition.
7621   /// This is the type of the original value.
7622   /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7623   /// transition is <2 x i32>.
7624   Type *getTransitionType() const {
7625     return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
7626   }
7627 
7628   /// Promote \p ToBePromoted by moving \p Def downward through.
7629   /// I.e., we have the following sequence:
7630   /// Def = Transition <ty1> a to <ty2>
7631   /// b = ToBePromoted <ty2> Def, ...
7632   /// =>
7633   /// b = ToBePromoted <ty1> a, ...
7634   /// Def = Transition <ty1> ToBePromoted to <ty2>
7635   void promoteImpl(Instruction *ToBePromoted);
7636 
7637   /// Check whether or not it is profitable to promote all the
7638   /// instructions enqueued to be promoted.
7639   bool isProfitableToPromote() {
7640     Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
7641     unsigned Index = isa<ConstantInt>(ValIdx)
7642                          ? cast<ConstantInt>(ValIdx)->getZExtValue()
7643                          : -1;
7644     Type *PromotedType = getTransitionType();
7645 
7646     StoreInst *ST = cast<StoreInst>(CombineInst);
7647     unsigned AS = ST->getPointerAddressSpace();
7648     // Check if this store is supported.
7649     if (!TLI.allowsMisalignedMemoryAccesses(
7650             TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
7651             ST->getAlign())) {
7652       // If this is not supported, there is no way we can combine
7653       // the extract with the store.
7654       return false;
7655     }
7656 
7657     // The scalar chain of computation has to pay for the transition
7658     // scalar to vector.
7659     // The vector chain has to account for the combining cost.
7660     enum TargetTransformInfo::TargetCostKind CostKind =
7661         TargetTransformInfo::TCK_RecipThroughput;
7662     InstructionCost ScalarCost =
7663         TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
7664     InstructionCost VectorCost = StoreExtractCombineCost;
7665     for (const auto &Inst : InstsToBePromoted) {
7666       // Compute the cost.
7667       // By construction, all instructions being promoted are arithmetic ones.
7668       // Moreover, one argument is a constant that can be viewed as a splat
7669       // constant.
7670       Value *Arg0 = Inst->getOperand(0);
7671       bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
7672                             isa<ConstantFP>(Arg0);
7673       TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
7674       if (IsArg0Constant)
7675         Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7676       else
7677         Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7678 
7679       ScalarCost += TTI.getArithmeticInstrCost(
7680           Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
7681       VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
7682                                                CostKind, Arg0Info, Arg1Info);
7683     }
7684     LLVM_DEBUG(
7685         dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7686                << ScalarCost << "\nVector: " << VectorCost << '\n');
7687     return ScalarCost > VectorCost;
7688   }
7689 
7690   /// Generate a constant vector with \p Val with the same
7691   /// number of elements as the transition.
7692   /// \p UseSplat defines whether or not \p Val should be replicated
7693   /// across the whole vector.
7694   /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7695   /// otherwise we generate a vector with as many undef as possible:
7696   /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7697   /// used at the index of the extract.
7698   Value *getConstantVector(Constant *Val, bool UseSplat) const {
7699     unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
7700     if (!UseSplat) {
7701       // If we cannot determine where the constant must be, we have to
7702       // use a splat constant.
7703       Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
7704       if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
7705         ExtractIdx = CstVal->getSExtValue();
7706       else
7707         UseSplat = true;
7708     }
7709 
7710     ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7711     if (UseSplat)
7712       return ConstantVector::getSplat(EC, Val);
7713 
7714     if (!EC.isScalable()) {
7715       SmallVector<Constant *, 4> ConstVec;
7716       UndefValue *UndefVal = UndefValue::get(Val->getType());
7717       for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7718         if (Idx == ExtractIdx)
7719           ConstVec.push_back(Val);
7720         else
7721           ConstVec.push_back(UndefVal);
7722       }
7723       return ConstantVector::get(ConstVec);
7724     } else
7725       llvm_unreachable(
7726           "Generate scalable vector for non-splat is unimplemented");
7727   }
7728 
7729   /// Check if promoting to a vector type an operand at \p OperandIdx
7730   /// in \p Use can trigger undefined behavior.
7731   static bool canCauseUndefinedBehavior(const Instruction *Use,
7732                                         unsigned OperandIdx) {
7733     // This is not safe to introduce undef when the operand is on
7734     // the right hand side of a division-like instruction.
7735     if (OperandIdx != 1)
7736       return false;
7737     switch (Use->getOpcode()) {
7738     default:
7739       return false;
7740     case Instruction::SDiv:
7741     case Instruction::UDiv:
7742     case Instruction::SRem:
7743     case Instruction::URem:
7744       return true;
7745     case Instruction::FDiv:
7746     case Instruction::FRem:
7747       return !Use->hasNoNaNs();
7748     }
7749     llvm_unreachable(nullptr);
7750   }
7751 
7752 public:
7753   VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
7754                       const TargetTransformInfo &TTI, Instruction *Transition,
7755                       unsigned CombineCost)
7756       : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
7757         StoreExtractCombineCost(CombineCost) {
7758     assert(Transition && "Do not know how to promote null");
7759   }
7760 
7761   /// Check if we can promote \p ToBePromoted to \p Type.
7762   bool canPromote(const Instruction *ToBePromoted) const {
7763     // We could support CastInst too.
7764     return isa<BinaryOperator>(ToBePromoted);
7765   }
7766 
7767   /// Check if it is profitable to promote \p ToBePromoted
7768   /// by moving downward the transition through.
7769   bool shouldPromote(const Instruction *ToBePromoted) const {
7770     // Promote only if all the operands can be statically expanded.
7771     // Indeed, we do not want to introduce any new kind of transitions.
7772     for (const Use &U : ToBePromoted->operands()) {
7773       const Value *Val = U.get();
7774       if (Val == getEndOfTransition()) {
7775         // If the use is a division and the transition is on the rhs,
7776         // we cannot promote the operation, otherwise we may create a
7777         // division by zero.
7778         if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
7779           return false;
7780         continue;
7781       }
7782       if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
7783           !isa<ConstantFP>(Val))
7784         return false;
7785     }
7786     // Check that the resulting operation is legal.
7787     int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
7788     if (!ISDOpcode)
7789       return false;
7790     return StressStoreExtract ||
7791            TLI.isOperationLegalOrCustom(
7792                ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
7793   }
7794 
7795   /// Check whether or not \p Use can be combined
7796   /// with the transition.
7797   /// I.e., is it possible to do Use(Transition) => AnotherUse?
7798   bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
7799 
7800   /// Record \p ToBePromoted as part of the chain to be promoted.
7801   void enqueueForPromotion(Instruction *ToBePromoted) {
7802     InstsToBePromoted.push_back(ToBePromoted);
7803   }
7804 
7805   /// Set the instruction that will be combined with the transition.
7806   void recordCombineInstruction(Instruction *ToBeCombined) {
7807     assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
7808     CombineInst = ToBeCombined;
7809   }
7810 
7811   /// Promote all the instructions enqueued for promotion if it is
7812   /// is profitable.
7813   /// \return True if the promotion happened, false otherwise.
7814   bool promote() {
7815     // Check if there is something to promote.
7816     // Right now, if we do not have anything to combine with,
7817     // we assume the promotion is not profitable.
7818     if (InstsToBePromoted.empty() || !CombineInst)
7819       return false;
7820 
7821     // Check cost.
7822     if (!StressStoreExtract && !isProfitableToPromote())
7823       return false;
7824 
7825     // Promote.
7826     for (auto &ToBePromoted : InstsToBePromoted)
7827       promoteImpl(ToBePromoted);
7828     InstsToBePromoted.clear();
7829     return true;
7830   }
7831 };
7832 
7833 } // end anonymous namespace
7834 
7835 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
7836   // At this point, we know that all the operands of ToBePromoted but Def
7837   // can be statically promoted.
7838   // For Def, we need to use its parameter in ToBePromoted:
7839   // b = ToBePromoted ty1 a
7840   // Def = Transition ty1 b to ty2
7841   // Move the transition down.
7842   // 1. Replace all uses of the promoted operation by the transition.
7843   // = ... b => = ... Def.
7844   assert(ToBePromoted->getType() == Transition->getType() &&
7845          "The type of the result of the transition does not match "
7846          "the final type");
7847   ToBePromoted->replaceAllUsesWith(Transition);
7848   // 2. Update the type of the uses.
7849   // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7850   Type *TransitionTy = getTransitionType();
7851   ToBePromoted->mutateType(TransitionTy);
7852   // 3. Update all the operands of the promoted operation with promoted
7853   // operands.
7854   // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7855   for (Use &U : ToBePromoted->operands()) {
7856     Value *Val = U.get();
7857     Value *NewVal = nullptr;
7858     if (Val == Transition)
7859       NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
7860     else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
7861              isa<ConstantFP>(Val)) {
7862       // Use a splat constant if it is not safe to use undef.
7863       NewVal = getConstantVector(
7864           cast<Constant>(Val),
7865           isa<UndefValue>(Val) ||
7866               canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
7867     } else
7868       llvm_unreachable("Did you modified shouldPromote and forgot to update "
7869                        "this?");
7870     ToBePromoted->setOperand(U.getOperandNo(), NewVal);
7871   }
7872   Transition->moveAfter(ToBePromoted);
7873   Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
7874 }
7875 
7876 /// Some targets can do store(extractelement) with one instruction.
7877 /// Try to push the extractelement towards the stores when the target
7878 /// has this feature and this is profitable.
7879 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
7880   unsigned CombineCost = std::numeric_limits<unsigned>::max();
7881   if (DisableStoreExtract ||
7882       (!StressStoreExtract &&
7883        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
7884                                        Inst->getOperand(1), CombineCost)))
7885     return false;
7886 
7887   // At this point we know that Inst is a vector to scalar transition.
7888   // Try to move it down the def-use chain, until:
7889   // - We can combine the transition with its single use
7890   //   => we got rid of the transition.
7891   // - We escape the current basic block
7892   //   => we would need to check that we are moving it at a cheaper place and
7893   //      we do not do that for now.
7894   BasicBlock *Parent = Inst->getParent();
7895   LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
7896   VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
7897   // If the transition has more than one use, assume this is not going to be
7898   // beneficial.
7899   while (Inst->hasOneUse()) {
7900     Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
7901     LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
7902 
7903     if (ToBePromoted->getParent() != Parent) {
7904       LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7905                         << ToBePromoted->getParent()->getName()
7906                         << ") than the transition (" << Parent->getName()
7907                         << ").\n");
7908       return false;
7909     }
7910 
7911     if (VPH.canCombine(ToBePromoted)) {
7912       LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
7913                         << "will be combined with: " << *ToBePromoted << '\n');
7914       VPH.recordCombineInstruction(ToBePromoted);
7915       bool Changed = VPH.promote();
7916       NumStoreExtractExposed += Changed;
7917       return Changed;
7918     }
7919 
7920     LLVM_DEBUG(dbgs() << "Try promoting.\n");
7921     if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
7922       return false;
7923 
7924     LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7925 
7926     VPH.enqueueForPromotion(ToBePromoted);
7927     Inst = ToBePromoted;
7928   }
7929   return false;
7930 }
7931 
7932 /// For the instruction sequence of store below, F and I values
7933 /// are bundled together as an i64 value before being stored into memory.
7934 /// Sometimes it is more efficient to generate separate stores for F and I,
7935 /// which can remove the bitwise instructions or sink them to colder places.
7936 ///
7937 ///   (store (or (zext (bitcast F to i32) to i64),
7938 ///              (shl (zext I to i64), 32)), addr)  -->
7939 ///   (store F, addr) and (store I, addr+4)
7940 ///
7941 /// Similarly, splitting for other merged store can also be beneficial, like:
7942 /// For pair of {i32, i32}, i64 store --> two i32 stores.
7943 /// For pair of {i32, i16}, i64 store --> two i32 stores.
7944 /// For pair of {i16, i16}, i32 store --> two i16 stores.
7945 /// For pair of {i16, i8},  i32 store --> two i16 stores.
7946 /// For pair of {i8, i8},   i16 store --> two i8 stores.
7947 ///
7948 /// We allow each target to determine specifically which kind of splitting is
7949 /// supported.
7950 ///
7951 /// The store patterns are commonly seen from the simple code snippet below
7952 /// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7953 ///   void goo(const std::pair<int, float> &);
7954 ///   hoo() {
7955 ///     ...
7956 ///     goo(std::make_pair(tmp, ftmp));
7957 ///     ...
7958 ///   }
7959 ///
7960 /// Although we already have similar splitting in DAG Combine, we duplicate
7961 /// it in CodeGenPrepare to catch the case in which pattern is across
7962 /// multiple BBs. The logic in DAG Combine is kept to catch case generated
7963 /// during code expansion.
7964 static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
7965                                 const TargetLowering &TLI) {
7966   // Handle simple but common cases only.
7967   Type *StoreType = SI.getValueOperand()->getType();
7968 
7969   // The code below assumes shifting a value by <number of bits>,
7970   // whereas scalable vectors would have to be shifted by
7971   // <2log(vscale) + number of bits> in order to store the
7972   // low/high parts. Bailing out for now.
7973   if (StoreType->isScalableTy())
7974     return false;
7975 
7976   if (!DL.typeSizeEqualsStoreSize(StoreType) ||
7977       DL.getTypeSizeInBits(StoreType) == 0)
7978     return false;
7979 
7980   unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
7981   Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
7982   if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
7983     return false;
7984 
7985   // Don't split the store if it is volatile.
7986   if (SI.isVolatile())
7987     return false;
7988 
7989   // Match the following patterns:
7990   // (store (or (zext LValue to i64),
7991   //            (shl (zext HValue to i64), 32)), HalfValBitSize)
7992   //  or
7993   // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7994   //            (zext LValue to i64),
7995   // Expect both operands of OR and the first operand of SHL have only
7996   // one use.
7997   Value *LValue, *HValue;
7998   if (!match(SI.getValueOperand(),
7999              m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
8000                     m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
8001                                    m_SpecificInt(HalfValBitSize))))))
8002     return false;
8003 
8004   // Check LValue and HValue are int with size less or equal than 32.
8005   if (!LValue->getType()->isIntegerTy() ||
8006       DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
8007       !HValue->getType()->isIntegerTy() ||
8008       DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
8009     return false;
8010 
8011   // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
8012   // as the input of target query.
8013   auto *LBC = dyn_cast<BitCastInst>(LValue);
8014   auto *HBC = dyn_cast<BitCastInst>(HValue);
8015   EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
8016                   : EVT::getEVT(LValue->getType());
8017   EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
8018                    : EVT::getEVT(HValue->getType());
8019   if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
8020     return false;
8021 
8022   // Start to split store.
8023   IRBuilder<> Builder(SI.getContext());
8024   Builder.SetInsertPoint(&SI);
8025 
8026   // If LValue/HValue is a bitcast in another BB, create a new one in current
8027   // BB so it may be merged with the splitted stores by dag combiner.
8028   if (LBC && LBC->getParent() != SI.getParent())
8029     LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
8030   if (HBC && HBC->getParent() != SI.getParent())
8031     HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
8032 
8033   bool IsLE = SI.getDataLayout().isLittleEndian();
8034   auto CreateSplitStore = [&](Value *V, bool Upper) {
8035     V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
8036     Value *Addr = SI.getPointerOperand();
8037     Align Alignment = SI.getAlign();
8038     const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
8039     if (IsOffsetStore) {
8040       Addr = Builder.CreateGEP(
8041           SplitStoreType, Addr,
8042           ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
8043 
8044       // When splitting the store in half, naturally one half will retain the
8045       // alignment of the original wider store, regardless of whether it was
8046       // over-aligned or not, while the other will require adjustment.
8047       Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
8048     }
8049     Builder.CreateAlignedStore(V, Addr, Alignment);
8050   };
8051 
8052   CreateSplitStore(LValue, false);
8053   CreateSplitStore(HValue, true);
8054 
8055   // Delete the old store.
8056   SI.eraseFromParent();
8057   return true;
8058 }
8059 
8060 // Return true if the GEP has two operands, the first operand is of a sequential
8061 // type, and the second operand is a constant.
8062 static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
8063   gep_type_iterator I = gep_type_begin(*GEP);
8064   return GEP->getNumOperands() == 2 && I.isSequential() &&
8065          isa<ConstantInt>(GEP->getOperand(1));
8066 }
8067 
8068 // Try unmerging GEPs to reduce liveness interference (register pressure) across
8069 // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
8070 // reducing liveness interference across those edges benefits global register
8071 // allocation. Currently handles only certain cases.
8072 //
8073 // For example, unmerge %GEPI and %UGEPI as below.
8074 //
8075 // ---------- BEFORE ----------
8076 // SrcBlock:
8077 //   ...
8078 //   %GEPIOp = ...
8079 //   ...
8080 //   %GEPI = gep %GEPIOp, Idx
8081 //   ...
8082 //   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
8083 //   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
8084 //   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
8085 //   %UGEPI)
8086 //
8087 // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
8088 // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
8089 // ...
8090 //
8091 // DstBi:
8092 //   ...
8093 //   %UGEPI = gep %GEPIOp, UIdx
8094 // ...
8095 // ---------------------------
8096 //
8097 // ---------- AFTER ----------
8098 // SrcBlock:
8099 //   ... (same as above)
8100 //    (* %GEPI is still alive on the indirectbr edges)
8101 //    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
8102 //    unmerging)
8103 // ...
8104 //
8105 // DstBi:
8106 //   ...
8107 //   %UGEPI = gep %GEPI, (UIdx-Idx)
8108 //   ...
8109 // ---------------------------
8110 //
8111 // The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8112 // no longer alive on them.
8113 //
8114 // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8115 // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8116 // not to disable further simplications and optimizations as a result of GEP
8117 // merging.
8118 //
8119 // Note this unmerging may increase the length of the data flow critical path
8120 // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8121 // between the register pressure and the length of data-flow critical
8122 // path. Restricting this to the uncommon IndirectBr case would minimize the
8123 // impact of potentially longer critical path, if any, and the impact on compile
8124 // time.
8125 static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
8126                                              const TargetTransformInfo *TTI) {
8127   BasicBlock *SrcBlock = GEPI->getParent();
8128   // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8129   // (non-IndirectBr) cases exit early here.
8130   if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
8131     return false;
8132   // Check that GEPI is a simple gep with a single constant index.
8133   if (!GEPSequentialConstIndexed(GEPI))
8134     return false;
8135   ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
8136   // Check that GEPI is a cheap one.
8137   if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
8138                          TargetTransformInfo::TCK_SizeAndLatency) >
8139       TargetTransformInfo::TCC_Basic)
8140     return false;
8141   Value *GEPIOp = GEPI->getOperand(0);
8142   // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8143   if (!isa<Instruction>(GEPIOp))
8144     return false;
8145   auto *GEPIOpI = cast<Instruction>(GEPIOp);
8146   if (GEPIOpI->getParent() != SrcBlock)
8147     return false;
8148   // Check that GEP is used outside the block, meaning it's alive on the
8149   // IndirectBr edge(s).
8150   if (llvm::none_of(GEPI->users(), [&](User *Usr) {
8151         if (auto *I = dyn_cast<Instruction>(Usr)) {
8152           if (I->getParent() != SrcBlock) {
8153             return true;
8154           }
8155         }
8156         return false;
8157       }))
8158     return false;
8159   // The second elements of the GEP chains to be unmerged.
8160   std::vector<GetElementPtrInst *> UGEPIs;
8161   // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8162   // on IndirectBr edges.
8163   for (User *Usr : GEPIOp->users()) {
8164     if (Usr == GEPI)
8165       continue;
8166     // Check if Usr is an Instruction. If not, give up.
8167     if (!isa<Instruction>(Usr))
8168       return false;
8169     auto *UI = cast<Instruction>(Usr);
8170     // Check if Usr in the same block as GEPIOp, which is fine, skip.
8171     if (UI->getParent() == SrcBlock)
8172       continue;
8173     // Check if Usr is a GEP. If not, give up.
8174     if (!isa<GetElementPtrInst>(Usr))
8175       return false;
8176     auto *UGEPI = cast<GetElementPtrInst>(Usr);
8177     // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8178     // the pointer operand to it. If so, record it in the vector. If not, give
8179     // up.
8180     if (!GEPSequentialConstIndexed(UGEPI))
8181       return false;
8182     if (UGEPI->getOperand(0) != GEPIOp)
8183       return false;
8184     if (UGEPI->getSourceElementType() != GEPI->getSourceElementType())
8185       return false;
8186     if (GEPIIdx->getType() !=
8187         cast<ConstantInt>(UGEPI->getOperand(1))->getType())
8188       return false;
8189     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8190     if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
8191                            TargetTransformInfo::TCK_SizeAndLatency) >
8192         TargetTransformInfo::TCC_Basic)
8193       return false;
8194     UGEPIs.push_back(UGEPI);
8195   }
8196   if (UGEPIs.size() == 0)
8197     return false;
8198   // Check the materializing cost of (Uidx-Idx).
8199   for (GetElementPtrInst *UGEPI : UGEPIs) {
8200     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8201     APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
8202     InstructionCost ImmCost = TTI->getIntImmCost(
8203         NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
8204     if (ImmCost > TargetTransformInfo::TCC_Basic)
8205       return false;
8206   }
8207   // Now unmerge between GEPI and UGEPIs.
8208   for (GetElementPtrInst *UGEPI : UGEPIs) {
8209     UGEPI->setOperand(0, GEPI);
8210     ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8211     Constant *NewUGEPIIdx = ConstantInt::get(
8212         GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
8213     UGEPI->setOperand(1, NewUGEPIIdx);
8214     // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8215     // inbounds to avoid UB.
8216     if (!GEPI->isInBounds()) {
8217       UGEPI->setIsInBounds(false);
8218     }
8219   }
8220   // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8221   // alive on IndirectBr edges).
8222   assert(llvm::none_of(GEPIOp->users(),
8223                        [&](User *Usr) {
8224                          return cast<Instruction>(Usr)->getParent() != SrcBlock;
8225                        }) &&
8226          "GEPIOp is used outside SrcBlock");
8227   return true;
8228 }
8229 
8230 static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
8231                            SmallSet<BasicBlock *, 32> &FreshBBs,
8232                            bool IsHugeFunc) {
8233   // Try and convert
8234   //  %c = icmp ult %x, 8
8235   //  br %c, bla, blb
8236   //  %tc = lshr %x, 3
8237   // to
8238   //  %tc = lshr %x, 3
8239   //  %c = icmp eq %tc, 0
8240   //  br %c, bla, blb
8241   // Creating the cmp to zero can be better for the backend, especially if the
8242   // lshr produces flags that can be used automatically.
8243   if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
8244     return false;
8245 
8246   ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
8247   if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
8248     return false;
8249 
8250   Value *X = Cmp->getOperand(0);
8251   APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
8252 
8253   for (auto *U : X->users()) {
8254     Instruction *UI = dyn_cast<Instruction>(U);
8255     // A quick dominance check
8256     if (!UI ||
8257         (UI->getParent() != Branch->getParent() &&
8258          UI->getParent() != Branch->getSuccessor(0) &&
8259          UI->getParent() != Branch->getSuccessor(1)) ||
8260         (UI->getParent() != Branch->getParent() &&
8261          !UI->getParent()->getSinglePredecessor()))
8262       continue;
8263 
8264     if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
8265         match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
8266       IRBuilder<> Builder(Branch);
8267       if (UI->getParent() != Branch->getParent())
8268         UI->moveBefore(Branch);
8269       UI->dropPoisonGeneratingFlags();
8270       Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
8271                                         ConstantInt::get(UI->getType(), 0));
8272       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8273       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8274       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8275       return true;
8276     }
8277     if (Cmp->isEquality() &&
8278         (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8279          match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) {
8280       IRBuilder<> Builder(Branch);
8281       if (UI->getParent() != Branch->getParent())
8282         UI->moveBefore(Branch);
8283       UI->dropPoisonGeneratingFlags();
8284       Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8285                                         ConstantInt::get(UI->getType(), 0));
8286       LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8287       LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8288       replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8289       return true;
8290     }
8291   }
8292   return false;
8293 }
8294 
8295 bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8296   bool AnyChange = false;
8297   AnyChange = fixupDbgVariableRecordsOnInst(*I);
8298 
8299   // Bail out if we inserted the instruction to prevent optimizations from
8300   // stepping on each other's toes.
8301   if (InsertedInsts.count(I))
8302     return AnyChange;
8303 
8304   // TODO: Move into the switch on opcode below here.
8305   if (PHINode *P = dyn_cast<PHINode>(I)) {
8306     // It is possible for very late stage optimizations (such as SimplifyCFG)
8307     // to introduce PHI nodes too late to be cleaned up.  If we detect such a
8308     // trivial PHI, go ahead and zap it here.
8309     if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8310       LargeOffsetGEPMap.erase(P);
8311       replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8312       P->eraseFromParent();
8313       ++NumPHIsElim;
8314       return true;
8315     }
8316     return AnyChange;
8317   }
8318 
8319   if (CastInst *CI = dyn_cast<CastInst>(I)) {
8320     // If the source of the cast is a constant, then this should have
8321     // already been constant folded.  The only reason NOT to constant fold
8322     // it is if something (e.g. LSR) was careful to place the constant
8323     // evaluation in a block other than then one that uses it (e.g. to hoist
8324     // the address of globals out of a loop).  If this is the case, we don't
8325     // want to forward-subst the cast.
8326     if (isa<Constant>(CI->getOperand(0)))
8327       return AnyChange;
8328 
8329     if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8330       return true;
8331 
8332     if ((isa<UIToFPInst>(I) || isa<SIToFPInst>(I) || isa<FPToUIInst>(I) ||
8333          isa<TruncInst>(I)) &&
8334         TLI->optimizeExtendOrTruncateConversion(
8335             I, LI->getLoopFor(I->getParent()), *TTI))
8336       return true;
8337 
8338     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8339       /// Sink a zext or sext into its user blocks if the target type doesn't
8340       /// fit in one register
8341       if (TLI->getTypeAction(CI->getContext(),
8342                              TLI->getValueType(*DL, CI->getType())) ==
8343           TargetLowering::TypeExpandInteger) {
8344         return SinkCast(CI);
8345       } else {
8346         if (TLI->optimizeExtendOrTruncateConversion(
8347                 I, LI->getLoopFor(I->getParent()), *TTI))
8348           return true;
8349 
8350         bool MadeChange = optimizeExt(I);
8351         return MadeChange | optimizeExtUses(I);
8352       }
8353     }
8354     return AnyChange;
8355   }
8356 
8357   if (auto *Cmp = dyn_cast<CmpInst>(I))
8358     if (optimizeCmp(Cmp, ModifiedDT))
8359       return true;
8360 
8361   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8362     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8363     bool Modified = optimizeLoadExt(LI);
8364     unsigned AS = LI->getPointerAddressSpace();
8365     Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8366     return Modified;
8367   }
8368 
8369   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8370     if (splitMergedValStore(*SI, *DL, *TLI))
8371       return true;
8372     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8373     unsigned AS = SI->getPointerAddressSpace();
8374     return optimizeMemoryInst(I, SI->getOperand(1),
8375                               SI->getOperand(0)->getType(), AS);
8376   }
8377 
8378   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8379     unsigned AS = RMW->getPointerAddressSpace();
8380     return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8381   }
8382 
8383   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8384     unsigned AS = CmpX->getPointerAddressSpace();
8385     return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8386                               CmpX->getCompareOperand()->getType(), AS);
8387   }
8388 
8389   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8390 
8391   if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8392       sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8393     return true;
8394 
8395   // TODO: Move this into the switch on opcode - it handles shifts already.
8396   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8397                 BinOp->getOpcode() == Instruction::LShr)) {
8398     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8399     if (CI && TLI->hasExtractBitsInsn())
8400       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8401         return true;
8402   }
8403 
8404   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8405     if (GEPI->hasAllZeroIndices()) {
8406       /// The GEP operand must be a pointer, so must its result -> BitCast
8407       Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8408                                         GEPI->getName(), GEPI->getIterator());
8409       NC->setDebugLoc(GEPI->getDebugLoc());
8410       replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8411       RecursivelyDeleteTriviallyDeadInstructions(
8412           GEPI, TLInfo, nullptr,
8413           [&](Value *V) { removeAllAssertingVHReferences(V); });
8414       ++NumGEPsElim;
8415       optimizeInst(NC, ModifiedDT);
8416       return true;
8417     }
8418     if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
8419       return true;
8420     }
8421   }
8422 
8423   if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
8424     // freeze(icmp a, const)) -> icmp (freeze a), const
8425     // This helps generate efficient conditional jumps.
8426     Instruction *CmpI = nullptr;
8427     if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
8428       CmpI = II;
8429     else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
8430       CmpI = F->getFastMathFlags().none() ? F : nullptr;
8431 
8432     if (CmpI && CmpI->hasOneUse()) {
8433       auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
8434       bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
8435                     isa<ConstantPointerNull>(Op0);
8436       bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
8437                     isa<ConstantPointerNull>(Op1);
8438       if (Const0 || Const1) {
8439         if (!Const0 || !Const1) {
8440           auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI->getIterator());
8441           F->takeName(FI);
8442           CmpI->setOperand(Const0 ? 1 : 0, F);
8443         }
8444         replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
8445         FI->eraseFromParent();
8446         return true;
8447       }
8448     }
8449     return AnyChange;
8450   }
8451 
8452   if (tryToSinkFreeOperands(I))
8453     return true;
8454 
8455   switch (I->getOpcode()) {
8456   case Instruction::Shl:
8457   case Instruction::LShr:
8458   case Instruction::AShr:
8459     return optimizeShiftInst(cast<BinaryOperator>(I));
8460   case Instruction::Call:
8461     return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
8462   case Instruction::Select:
8463     return optimizeSelectInst(cast<SelectInst>(I));
8464   case Instruction::ShuffleVector:
8465     return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
8466   case Instruction::Switch:
8467     return optimizeSwitchInst(cast<SwitchInst>(I));
8468   case Instruction::ExtractElement:
8469     return optimizeExtractElementInst(cast<ExtractElementInst>(I));
8470   case Instruction::Br:
8471     return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
8472   }
8473 
8474   return AnyChange;
8475 }
8476 
8477 /// Given an OR instruction, check to see if this is a bitreverse
8478 /// idiom. If so, insert the new intrinsic and return true.
8479 bool CodeGenPrepare::makeBitReverse(Instruction &I) {
8480   if (!I.getType()->isIntegerTy() ||
8481       !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
8482                                      TLI->getValueType(*DL, I.getType(), true)))
8483     return false;
8484 
8485   SmallVector<Instruction *, 4> Insts;
8486   if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
8487     return false;
8488   Instruction *LastInst = Insts.back();
8489   replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
8490   RecursivelyDeleteTriviallyDeadInstructions(
8491       &I, TLInfo, nullptr,
8492       [&](Value *V) { removeAllAssertingVHReferences(V); });
8493   return true;
8494 }
8495 
8496 // In this pass we look for GEP and cast instructions that are used
8497 // across basic blocks and rewrite them to improve basic-block-at-a-time
8498 // selection.
8499 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
8500   SunkAddrs.clear();
8501   bool MadeChange = false;
8502 
8503   do {
8504     CurInstIterator = BB.begin();
8505     ModifiedDT = ModifyDT::NotModifyDT;
8506     while (CurInstIterator != BB.end()) {
8507       MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
8508       if (ModifiedDT != ModifyDT::NotModifyDT) {
8509         // For huge function we tend to quickly go though the inner optmization
8510         // opportunities in the BB. So we go back to the BB head to re-optimize
8511         // each instruction instead of go back to the function head.
8512         if (IsHugeFunc) {
8513           DT.reset();
8514           getDT(*BB.getParent());
8515           break;
8516         } else {
8517           return true;
8518         }
8519       }
8520     }
8521   } while (ModifiedDT == ModifyDT::ModifyInstDT);
8522 
8523   bool MadeBitReverse = true;
8524   while (MadeBitReverse) {
8525     MadeBitReverse = false;
8526     for (auto &I : reverse(BB)) {
8527       if (makeBitReverse(I)) {
8528         MadeBitReverse = MadeChange = true;
8529         break;
8530       }
8531     }
8532   }
8533   MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
8534 
8535   return MadeChange;
8536 }
8537 
8538 // Some CGP optimizations may move or alter what's computed in a block. Check
8539 // whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8540 bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
8541   assert(isa<DbgValueInst>(I));
8542   DbgValueInst &DVI = *cast<DbgValueInst>(I);
8543 
8544   // Does this dbg.value refer to a sunk address calculation?
8545   bool AnyChange = false;
8546   SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(),
8547                                      DVI.location_ops().end());
8548   for (Value *Location : LocationOps) {
8549     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8550     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8551     if (SunkAddr) {
8552       // Point dbg.value at locally computed address, which should give the best
8553       // opportunity to be accurately lowered. This update may change the type
8554       // of pointer being referred to; however this makes no difference to
8555       // debugging information, and we can't generate bitcasts that may affect
8556       // codegen.
8557       DVI.replaceVariableLocationOp(Location, SunkAddr);
8558       AnyChange = true;
8559     }
8560   }
8561   return AnyChange;
8562 }
8563 
8564 bool CodeGenPrepare::fixupDbgVariableRecordsOnInst(Instruction &I) {
8565   bool AnyChange = false;
8566   for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange()))
8567     AnyChange |= fixupDbgVariableRecord(DVR);
8568   return AnyChange;
8569 }
8570 
8571 // FIXME: should updating debug-info really cause the "changed" flag to fire,
8572 // which can cause a function to be reprocessed?
8573 bool CodeGenPrepare::fixupDbgVariableRecord(DbgVariableRecord &DVR) {
8574   if (DVR.Type != DbgVariableRecord::LocationType::Value &&
8575       DVR.Type != DbgVariableRecord::LocationType::Assign)
8576     return false;
8577 
8578   // Does this DbgVariableRecord refer to a sunk address calculation?
8579   bool AnyChange = false;
8580   SmallDenseSet<Value *> LocationOps(DVR.location_ops().begin(),
8581                                      DVR.location_ops().end());
8582   for (Value *Location : LocationOps) {
8583     WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8584     Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8585     if (SunkAddr) {
8586       // Point dbg.value at locally computed address, which should give the best
8587       // opportunity to be accurately lowered. This update may change the type
8588       // of pointer being referred to; however this makes no difference to
8589       // debugging information, and we can't generate bitcasts that may affect
8590       // codegen.
8591       DVR.replaceVariableLocationOp(Location, SunkAddr);
8592       AnyChange = true;
8593     }
8594   }
8595   return AnyChange;
8596 }
8597 
8598 static void DbgInserterHelper(DbgValueInst *DVI, Instruction *VI) {
8599   DVI->removeFromParent();
8600   if (isa<PHINode>(VI))
8601     DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
8602   else
8603     DVI->insertAfter(VI);
8604 }
8605 
8606 static void DbgInserterHelper(DbgVariableRecord *DVR, Instruction *VI) {
8607   DVR->removeFromParent();
8608   BasicBlock *VIBB = VI->getParent();
8609   if (isa<PHINode>(VI))
8610     VIBB->insertDbgRecordBefore(DVR, VIBB->getFirstInsertionPt());
8611   else
8612     VIBB->insertDbgRecordAfter(DVR, VI);
8613 }
8614 
8615 // A llvm.dbg.value may be using a value before its definition, due to
8616 // optimizations in this pass and others. Scan for such dbg.values, and rescue
8617 // them by moving the dbg.value to immediately after the value definition.
8618 // FIXME: Ideally this should never be necessary, and this has the potential
8619 // to re-order dbg.value intrinsics.
8620 bool CodeGenPrepare::placeDbgValues(Function &F) {
8621   bool MadeChange = false;
8622   DominatorTree DT(F);
8623 
8624   auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) {
8625     SmallVector<Instruction *, 4> VIs;
8626     for (Value *V : DbgItem->location_ops())
8627       if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
8628         VIs.push_back(VI);
8629 
8630     // This item may depend on multiple instructions, complicating any
8631     // potential sink. This block takes the defensive approach, opting to
8632     // "undef" the item if it has more than one instruction and any of them do
8633     // not dominate iem.
8634     for (Instruction *VI : VIs) {
8635       if (VI->isTerminator())
8636         continue;
8637 
8638       // If VI is a phi in a block with an EHPad terminator, we can't insert
8639       // after it.
8640       if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
8641         continue;
8642 
8643       // If the defining instruction dominates the dbg.value, we do not need
8644       // to move the dbg.value.
8645       if (DT.dominates(VI, Position))
8646         continue;
8647 
8648       // If we depend on multiple instructions and any of them doesn't
8649       // dominate this DVI, we probably can't salvage it: moving it to
8650       // after any of the instructions could cause us to lose the others.
8651       if (VIs.size() > 1) {
8652         LLVM_DEBUG(
8653             dbgs()
8654             << "Unable to find valid location for Debug Value, undefing:\n"
8655             << *DbgItem);
8656         DbgItem->setKillLocation();
8657         break;
8658       }
8659 
8660       LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8661                         << *DbgItem << ' ' << *VI);
8662       DbgInserterHelper(DbgItem, VI);
8663       MadeChange = true;
8664       ++NumDbgValueMoved;
8665     }
8666   };
8667 
8668   for (BasicBlock &BB : F) {
8669     for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
8670       // Process dbg.value intrinsics.
8671       DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
8672       if (DVI) {
8673         DbgProcessor(DVI, DVI);
8674         continue;
8675       }
8676 
8677       // If this isn't a dbg.value, process any attached DbgVariableRecord
8678       // records attached to this instruction.
8679       for (DbgVariableRecord &DVR : llvm::make_early_inc_range(
8680                filterDbgVars(Insn.getDbgRecordRange()))) {
8681         if (DVR.Type != DbgVariableRecord::LocationType::Value)
8682           continue;
8683         DbgProcessor(&DVR, &Insn);
8684       }
8685     }
8686   }
8687 
8688   return MadeChange;
8689 }
8690 
8691 // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8692 // probes can be chained dependencies of other regular DAG nodes and block DAG
8693 // combine optimizations.
8694 bool CodeGenPrepare::placePseudoProbes(Function &F) {
8695   bool MadeChange = false;
8696   for (auto &Block : F) {
8697     // Move the rest probes to the beginning of the block.
8698     auto FirstInst = Block.getFirstInsertionPt();
8699     while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
8700       ++FirstInst;
8701     BasicBlock::iterator I(FirstInst);
8702     I++;
8703     while (I != Block.end()) {
8704       if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
8705         II->moveBefore(&*FirstInst);
8706         MadeChange = true;
8707       }
8708     }
8709   }
8710   return MadeChange;
8711 }
8712 
8713 /// Scale down both weights to fit into uint32_t.
8714 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
8715   uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
8716   uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
8717   NewTrue = NewTrue / Scale;
8718   NewFalse = NewFalse / Scale;
8719 }
8720 
8721 /// Some targets prefer to split a conditional branch like:
8722 /// \code
8723 ///   %0 = icmp ne i32 %a, 0
8724 ///   %1 = icmp ne i32 %b, 0
8725 ///   %or.cond = or i1 %0, %1
8726 ///   br i1 %or.cond, label %TrueBB, label %FalseBB
8727 /// \endcode
8728 /// into multiple branch instructions like:
8729 /// \code
8730 ///   bb1:
8731 ///     %0 = icmp ne i32 %a, 0
8732 ///     br i1 %0, label %TrueBB, label %bb2
8733 ///   bb2:
8734 ///     %1 = icmp ne i32 %b, 0
8735 ///     br i1 %1, label %TrueBB, label %FalseBB
8736 /// \endcode
8737 /// This usually allows instruction selection to do even further optimizations
8738 /// and combine the compare with the branch instruction. Currently this is
8739 /// applied for targets which have "cheap" jump instructions.
8740 ///
8741 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8742 ///
8743 bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
8744   if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
8745     return false;
8746 
8747   bool MadeChange = false;
8748   for (auto &BB : F) {
8749     // Does this BB end with the following?
8750     //   %cond1 = icmp|fcmp|binary instruction ...
8751     //   %cond2 = icmp|fcmp|binary instruction ...
8752     //   %cond.or = or|and i1 %cond1, cond2
8753     //   br i1 %cond.or label %dest1, label %dest2"
8754     Instruction *LogicOp;
8755     BasicBlock *TBB, *FBB;
8756     if (!match(BB.getTerminator(),
8757                m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
8758       continue;
8759 
8760     auto *Br1 = cast<BranchInst>(BB.getTerminator());
8761     if (Br1->getMetadata(LLVMContext::MD_unpredictable))
8762       continue;
8763 
8764     // The merging of mostly empty BB can cause a degenerate branch.
8765     if (TBB == FBB)
8766       continue;
8767 
8768     unsigned Opc;
8769     Value *Cond1, *Cond2;
8770     if (match(LogicOp,
8771               m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
8772       Opc = Instruction::And;
8773     else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
8774                                         m_OneUse(m_Value(Cond2)))))
8775       Opc = Instruction::Or;
8776     else
8777       continue;
8778 
8779     auto IsGoodCond = [](Value *Cond) {
8780       return match(
8781           Cond,
8782           m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
8783                                            m_LogicalOr(m_Value(), m_Value()))));
8784     };
8785     if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
8786       continue;
8787 
8788     LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
8789 
8790     // Create a new BB.
8791     auto *TmpBB =
8792         BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
8793                            BB.getParent(), BB.getNextNode());
8794     if (IsHugeFunc)
8795       FreshBBs.insert(TmpBB);
8796 
8797     // Update original basic block by using the first condition directly by the
8798     // branch instruction and removing the no longer needed and/or instruction.
8799     Br1->setCondition(Cond1);
8800     LogicOp->eraseFromParent();
8801 
8802     // Depending on the condition we have to either replace the true or the
8803     // false successor of the original branch instruction.
8804     if (Opc == Instruction::And)
8805       Br1->setSuccessor(0, TmpBB);
8806     else
8807       Br1->setSuccessor(1, TmpBB);
8808 
8809     // Fill in the new basic block.
8810     auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
8811     if (auto *I = dyn_cast<Instruction>(Cond2)) {
8812       I->removeFromParent();
8813       I->insertBefore(Br2);
8814     }
8815 
8816     // Update PHI nodes in both successors. The original BB needs to be
8817     // replaced in one successor's PHI nodes, because the branch comes now from
8818     // the newly generated BB (NewBB). In the other successor we need to add one
8819     // incoming edge to the PHI nodes, because both branch instructions target
8820     // now the same successor. Depending on the original branch condition
8821     // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
8822     // we perform the correct update for the PHI nodes.
8823     // This doesn't change the successor order of the just created branch
8824     // instruction (or any other instruction).
8825     if (Opc == Instruction::Or)
8826       std::swap(TBB, FBB);
8827 
8828     // Replace the old BB with the new BB.
8829     TBB->replacePhiUsesWith(&BB, TmpBB);
8830 
8831     // Add another incoming edge from the new BB.
8832     for (PHINode &PN : FBB->phis()) {
8833       auto *Val = PN.getIncomingValueForBlock(&BB);
8834       PN.addIncoming(Val, TmpBB);
8835     }
8836 
8837     // Update the branch weights (from SelectionDAGBuilder::
8838     // FindMergedConditions).
8839     if (Opc == Instruction::Or) {
8840       // Codegen X | Y as:
8841       // BB1:
8842       //   jmp_if_X TBB
8843       //   jmp TmpBB
8844       // TmpBB:
8845       //   jmp_if_Y TBB
8846       //   jmp FBB
8847       //
8848 
8849       // We have flexibility in setting Prob for BB1 and Prob for NewBB.
8850       // The requirement is that
8851       //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
8852       //     = TrueProb for original BB.
8853       // Assuming the original weights are A and B, one choice is to set BB1's
8854       // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
8855       // assumes that
8856       //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
8857       // Another choice is to assume TrueProb for BB1 equals to TrueProb for
8858       // TmpBB, but the math is more complicated.
8859       uint64_t TrueWeight, FalseWeight;
8860       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8861         uint64_t NewTrueWeight = TrueWeight;
8862         uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
8863         scaleWeights(NewTrueWeight, NewFalseWeight);
8864         Br1->setMetadata(LLVMContext::MD_prof,
8865                          MDBuilder(Br1->getContext())
8866                              .createBranchWeights(TrueWeight, FalseWeight,
8867                                                   hasBranchWeightOrigin(*Br1)));
8868 
8869         NewTrueWeight = TrueWeight;
8870         NewFalseWeight = 2 * FalseWeight;
8871         scaleWeights(NewTrueWeight, NewFalseWeight);
8872         Br2->setMetadata(LLVMContext::MD_prof,
8873                          MDBuilder(Br2->getContext())
8874                              .createBranchWeights(TrueWeight, FalseWeight));
8875       }
8876     } else {
8877       // Codegen X & Y as:
8878       // BB1:
8879       //   jmp_if_X TmpBB
8880       //   jmp FBB
8881       // TmpBB:
8882       //   jmp_if_Y TBB
8883       //   jmp FBB
8884       //
8885       //  This requires creation of TmpBB after CurBB.
8886 
8887       // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
8888       // The requirement is that
8889       //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
8890       //     = FalseProb for original BB.
8891       // Assuming the original weights are A and B, one choice is to set BB1's
8892       // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
8893       // assumes that
8894       //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
8895       uint64_t TrueWeight, FalseWeight;
8896       if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8897         uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
8898         uint64_t NewFalseWeight = FalseWeight;
8899         scaleWeights(NewTrueWeight, NewFalseWeight);
8900         Br1->setMetadata(LLVMContext::MD_prof,
8901                          MDBuilder(Br1->getContext())
8902                              .createBranchWeights(TrueWeight, FalseWeight));
8903 
8904         NewTrueWeight = 2 * TrueWeight;
8905         NewFalseWeight = FalseWeight;
8906         scaleWeights(NewTrueWeight, NewFalseWeight);
8907         Br2->setMetadata(LLVMContext::MD_prof,
8908                          MDBuilder(Br2->getContext())
8909                              .createBranchWeights(TrueWeight, FalseWeight));
8910       }
8911     }
8912 
8913     ModifiedDT = ModifyDT::ModifyBBDT;
8914     MadeChange = true;
8915 
8916     LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
8917                TmpBB->dump());
8918   }
8919   return MadeChange;
8920 }
8921