xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp (revision 6398b687c516d27778a570635d6f2fe1c145cb18)
1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass does misc. AMDGPU optimizations on IR before instruction
11 /// selection.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "SIModeRegisterDefaults.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/UniformityAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/InstVisitor.h"
27 #include "llvm/IR/IntrinsicsAMDGPU.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/InitializePasses.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Transforms/Utils/IntegerDivision.h"
33 #include "llvm/Transforms/Utils/Local.h"
34 
35 #define DEBUG_TYPE "amdgpu-codegenprepare"
36 
37 using namespace llvm;
38 using namespace llvm::PatternMatch;
39 
40 namespace {
41 
42 static cl::opt<bool> WidenLoads(
43   "amdgpu-codegenprepare-widen-constant-loads",
44   cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
45   cl::ReallyHidden,
46   cl::init(false));
47 
48 static cl::opt<bool> Widen16BitOps(
49   "amdgpu-codegenprepare-widen-16-bit-ops",
50   cl::desc("Widen uniform 16-bit instructions to 32-bit in AMDGPUCodeGenPrepare"),
51   cl::ReallyHidden,
52   cl::init(true));
53 
54 static cl::opt<bool>
55     ScalarizeLargePHIs("amdgpu-codegenprepare-break-large-phis",
56                        cl::desc("Break large PHI nodes for DAGISel"),
57                        cl::ReallyHidden, cl::init(true));
58 
59 static cl::opt<bool>
60     ForceScalarizeLargePHIs("amdgpu-codegenprepare-force-break-large-phis",
61                             cl::desc("For testing purposes, always break large "
62                                      "PHIs even if it isn't profitable."),
63                             cl::ReallyHidden, cl::init(false));
64 
65 static cl::opt<unsigned> ScalarizeLargePHIsThreshold(
66     "amdgpu-codegenprepare-break-large-phis-threshold",
67     cl::desc("Minimum type size in bits for breaking large PHI nodes"),
68     cl::ReallyHidden, cl::init(32));
69 
70 static cl::opt<bool> UseMul24Intrin(
71   "amdgpu-codegenprepare-mul24",
72   cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
73   cl::ReallyHidden,
74   cl::init(true));
75 
76 // Legalize 64-bit division by using the generic IR expansion.
77 static cl::opt<bool> ExpandDiv64InIR(
78   "amdgpu-codegenprepare-expand-div64",
79   cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
80   cl::ReallyHidden,
81   cl::init(false));
82 
83 // Leave all division operations as they are. This supersedes ExpandDiv64InIR
84 // and is used for testing the legalizer.
85 static cl::opt<bool> DisableIDivExpand(
86   "amdgpu-codegenprepare-disable-idiv-expansion",
87   cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
88   cl::ReallyHidden,
89   cl::init(false));
90 
91 // Disable processing of fdiv so we can better test the backend implementations.
92 static cl::opt<bool> DisableFDivExpand(
93   "amdgpu-codegenprepare-disable-fdiv-expansion",
94   cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
95   cl::ReallyHidden,
96   cl::init(false));
97 
98 class AMDGPUCodeGenPrepareImpl
99     : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
100 public:
101   const GCNSubtarget *ST = nullptr;
102   const TargetLibraryInfo *TLInfo = nullptr;
103   AssumptionCache *AC = nullptr;
104   DominatorTree *DT = nullptr;
105   UniformityInfo *UA = nullptr;
106   Module *Mod = nullptr;
107   const DataLayout *DL = nullptr;
108   bool HasUnsafeFPMath = false;
109   bool HasFP32DenormalFlush = false;
110   bool FlowChanged = false;
111 
112   DenseMap<const PHINode *, bool> BreakPhiNodesCache;
113 
114   bool canBreakPHINode(const PHINode &I);
115 
116   /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
117   /// binary operation \p V.
118   ///
119   /// \returns Binary operation \p V.
120   /// \returns \p T's base element bit width.
121   unsigned getBaseElementBitWidth(const Type *T) const;
122 
123   /// \returns Equivalent 32 bit integer type for given type \p T. For example,
124   /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
125   /// is returned.
126   Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
127 
128   /// \returns True if binary operation \p I is a signed binary operation, false
129   /// otherwise.
130   bool isSigned(const BinaryOperator &I) const;
131 
132   /// \returns True if the condition of 'select' operation \p I comes from a
133   /// signed 'icmp' operation, false otherwise.
134   bool isSigned(const SelectInst &I) const;
135 
136   /// \returns True if type \p T needs to be promoted to 32 bit integer type,
137   /// false otherwise.
138   bool needsPromotionToI32(const Type *T) const;
139 
140   /// Return true if \p T is a legal scalar floating point type.
141   bool isLegalFloatingTy(const Type *T) const;
142 
143   /// Wrapper to pass all the arguments to computeKnownFPClass
144   KnownFPClass computeKnownFPClass(const Value *V, FPClassTest Interested,
145                                    const Instruction *CtxI) const {
146     return llvm::computeKnownFPClass(V, *DL, Interested, 0, TLInfo, AC, CtxI,
147                                      DT);
148   }
149 
150   bool canIgnoreDenormalInput(const Value *V, const Instruction *CtxI) const {
151     return HasFP32DenormalFlush ||
152            computeKnownFPClass(V, fcSubnormal, CtxI).isKnownNeverSubnormal();
153   }
154 
155   /// Promotes uniform binary operation \p I to equivalent 32 bit binary
156   /// operation.
157   ///
158   /// \details \p I's base element bit width must be greater than 1 and less
159   /// than or equal 16. Promotion is done by sign or zero extending operands to
160   /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
161   /// truncating the result of 32 bit binary operation back to \p I's original
162   /// type. Division operation is not promoted.
163   ///
164   /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
165   /// false otherwise.
166   bool promoteUniformOpToI32(BinaryOperator &I) const;
167 
168   /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
169   ///
170   /// \details \p I's base element bit width must be greater than 1 and less
171   /// than or equal 16. Promotion is done by sign or zero extending operands to
172   /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
173   ///
174   /// \returns True.
175   bool promoteUniformOpToI32(ICmpInst &I) const;
176 
177   /// Promotes uniform 'select' operation \p I to 32 bit 'select'
178   /// operation.
179   ///
180   /// \details \p I's base element bit width must be greater than 1 and less
181   /// than or equal 16. Promotion is done by sign or zero extending operands to
182   /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
183   /// result of 32 bit 'select' operation back to \p I's original type.
184   ///
185   /// \returns True.
186   bool promoteUniformOpToI32(SelectInst &I) const;
187 
188   /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
189   /// intrinsic.
190   ///
191   /// \details \p I's base element bit width must be greater than 1 and less
192   /// than or equal 16. Promotion is done by zero extending the operand to 32
193   /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
194   /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
195   /// shift amount is 32 minus \p I's base element bit width), and truncating
196   /// the result of the shift operation back to \p I's original type.
197   ///
198   /// \returns True.
199   bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
200 
201   /// \returns The minimum number of bits needed to store the value of \Op as an
202   /// unsigned integer. Truncating to this size and then zero-extending to
203   /// the original will not change the value.
204   unsigned numBitsUnsigned(Value *Op) const;
205 
206   /// \returns The minimum number of bits needed to store the value of \Op as a
207   /// signed integer. Truncating to this size and then sign-extending to
208   /// the original size will not change the value.
209   unsigned numBitsSigned(Value *Op) const;
210 
211   /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
212   /// SelectionDAG has an issue where an and asserting the bits are known
213   bool replaceMulWithMul24(BinaryOperator &I) const;
214 
215   /// Perform same function as equivalently named function in DAGCombiner. Since
216   /// we expand some divisions here, we need to perform this before obscuring.
217   bool foldBinOpIntoSelect(BinaryOperator &I) const;
218 
219   bool divHasSpecialOptimization(BinaryOperator &I,
220                                  Value *Num, Value *Den) const;
221   int getDivNumBits(BinaryOperator &I,
222                     Value *Num, Value *Den,
223                     unsigned AtLeast, bool Signed) const;
224 
225   /// Expands 24 bit div or rem.
226   Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
227                         Value *Num, Value *Den,
228                         bool IsDiv, bool IsSigned) const;
229 
230   Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
231                             Value *Num, Value *Den, unsigned NumBits,
232                             bool IsDiv, bool IsSigned) const;
233 
234   /// Expands 32 bit div or rem.
235   Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
236                         Value *Num, Value *Den) const;
237 
238   Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
239                         Value *Num, Value *Den) const;
240   void expandDivRem64(BinaryOperator &I) const;
241 
242   /// Widen a scalar load.
243   ///
244   /// \details \p Widen scalar load for uniform, small type loads from constant
245   //  memory / to a full 32-bits and then truncate the input to allow a scalar
246   //  load instead of a vector load.
247   //
248   /// \returns True.
249 
250   bool canWidenScalarExtLoad(LoadInst &I) const;
251 
252   Value *matchFractPat(IntrinsicInst &I);
253   Value *applyFractPat(IRBuilder<> &Builder, Value *FractArg);
254 
255   bool canOptimizeWithRsq(const FPMathOperator *SqrtOp, FastMathFlags DivFMF,
256                           FastMathFlags SqrtFMF) const;
257 
258   Value *optimizeWithRsq(IRBuilder<> &Builder, Value *Num, Value *Den,
259                          FastMathFlags DivFMF, FastMathFlags SqrtFMF,
260                          const Instruction *CtxI) const;
261 
262   Value *optimizeWithRcp(IRBuilder<> &Builder, Value *Num, Value *Den,
263                          FastMathFlags FMF, const Instruction *CtxI) const;
264   Value *optimizeWithFDivFast(IRBuilder<> &Builder, Value *Num, Value *Den,
265                               float ReqdAccuracy) const;
266 
267   Value *visitFDivElement(IRBuilder<> &Builder, Value *Num, Value *Den,
268                           FastMathFlags DivFMF, FastMathFlags SqrtFMF,
269                           Value *RsqOp, const Instruction *FDiv,
270                           float ReqdAccuracy) const;
271 
272   std::pair<Value *, Value *> getFrexpResults(IRBuilder<> &Builder,
273                                               Value *Src) const;
274 
275   Value *emitRcpIEEE1ULP(IRBuilder<> &Builder, Value *Src,
276                          bool IsNegative) const;
277   Value *emitFrexpDiv(IRBuilder<> &Builder, Value *LHS, Value *RHS,
278                       FastMathFlags FMF) const;
279 
280 public:
281   bool visitFDiv(BinaryOperator &I);
282 
283   bool visitInstruction(Instruction &I) { return false; }
284   bool visitBinaryOperator(BinaryOperator &I);
285   bool visitLoadInst(LoadInst &I);
286   bool visitICmpInst(ICmpInst &I);
287   bool visitSelectInst(SelectInst &I);
288   bool visitPHINode(PHINode &I);
289 
290   bool visitIntrinsicInst(IntrinsicInst &I);
291   bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
292   bool visitMinNum(IntrinsicInst &I);
293   bool run(Function &F);
294 };
295 
296 class AMDGPUCodeGenPrepare : public FunctionPass {
297 private:
298   AMDGPUCodeGenPrepareImpl Impl;
299 
300 public:
301   static char ID;
302   AMDGPUCodeGenPrepare() : FunctionPass(ID) {
303     initializeAMDGPUCodeGenPreparePass(*PassRegistry::getPassRegistry());
304   }
305   void getAnalysisUsage(AnalysisUsage &AU) const override {
306     AU.addRequired<AssumptionCacheTracker>();
307     AU.addRequired<UniformityInfoWrapperPass>();
308     AU.addRequired<TargetLibraryInfoWrapperPass>();
309 
310     // FIXME: Division expansion needs to preserve the dominator tree.
311     if (!ExpandDiv64InIR)
312       AU.setPreservesAll();
313   }
314   bool runOnFunction(Function &F) override;
315   bool doInitialization(Module &M) override;
316   StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
317 };
318 
319 } // end anonymous namespace
320 
321 bool AMDGPUCodeGenPrepareImpl::run(Function &F) {
322   bool MadeChange = false;
323 
324   Function::iterator NextBB;
325   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
326     BasicBlock *BB = &*FI;
327     NextBB = std::next(FI);
328 
329     BasicBlock::iterator Next;
330     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
331          I = Next) {
332       Next = std::next(I);
333 
334       MadeChange |= visit(*I);
335 
336       if (Next != E) { // Control flow changed
337         BasicBlock *NextInstBB = Next->getParent();
338         if (NextInstBB != BB) {
339           BB = NextInstBB;
340           E = BB->end();
341           FE = F.end();
342         }
343       }
344     }
345   }
346   return MadeChange;
347 }
348 
349 unsigned AMDGPUCodeGenPrepareImpl::getBaseElementBitWidth(const Type *T) const {
350   assert(needsPromotionToI32(T) && "T does not need promotion to i32");
351 
352   if (T->isIntegerTy())
353     return T->getIntegerBitWidth();
354   return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
355 }
356 
357 Type *AMDGPUCodeGenPrepareImpl::getI32Ty(IRBuilder<> &B, const Type *T) const {
358   assert(needsPromotionToI32(T) && "T does not need promotion to i32");
359 
360   if (T->isIntegerTy())
361     return B.getInt32Ty();
362   return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T));
363 }
364 
365 bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
366   return I.getOpcode() == Instruction::AShr ||
367       I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
368 }
369 
370 bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
371   return isa<ICmpInst>(I.getOperand(0)) ?
372       cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
373 }
374 
375 bool AMDGPUCodeGenPrepareImpl::needsPromotionToI32(const Type *T) const {
376   if (!Widen16BitOps)
377     return false;
378 
379   const IntegerType *IntTy = dyn_cast<IntegerType>(T);
380   if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
381     return true;
382 
383   if (const VectorType *VT = dyn_cast<VectorType>(T)) {
384     // TODO: The set of packed operations is more limited, so may want to
385     // promote some anyway.
386     if (ST->hasVOP3PInsts())
387       return false;
388 
389     return needsPromotionToI32(VT->getElementType());
390   }
391 
392   return false;
393 }
394 
395 bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const {
396   return Ty->isFloatTy() || Ty->isDoubleTy() ||
397          (Ty->isHalfTy() && ST->has16BitInsts());
398 }
399 
400 // Return true if the op promoted to i32 should have nsw set.
401 static bool promotedOpIsNSW(const Instruction &I) {
402   switch (I.getOpcode()) {
403   case Instruction::Shl:
404   case Instruction::Add:
405   case Instruction::Sub:
406     return true;
407   case Instruction::Mul:
408     return I.hasNoUnsignedWrap();
409   default:
410     return false;
411   }
412 }
413 
414 // Return true if the op promoted to i32 should have nuw set.
415 static bool promotedOpIsNUW(const Instruction &I) {
416   switch (I.getOpcode()) {
417   case Instruction::Shl:
418   case Instruction::Add:
419   case Instruction::Mul:
420     return true;
421   case Instruction::Sub:
422     return I.hasNoUnsignedWrap();
423   default:
424     return false;
425   }
426 }
427 
428 bool AMDGPUCodeGenPrepareImpl::canWidenScalarExtLoad(LoadInst &I) const {
429   Type *Ty = I.getType();
430   const DataLayout &DL = Mod->getDataLayout();
431   int TySize = DL.getTypeSizeInBits(Ty);
432   Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
433 
434   return I.isSimple() && TySize < 32 && Alignment >= 4 && UA->isUniform(&I);
435 }
436 
437 bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(BinaryOperator &I) const {
438   assert(needsPromotionToI32(I.getType()) &&
439          "I does not need promotion to i32");
440 
441   if (I.getOpcode() == Instruction::SDiv ||
442       I.getOpcode() == Instruction::UDiv ||
443       I.getOpcode() == Instruction::SRem ||
444       I.getOpcode() == Instruction::URem)
445     return false;
446 
447   IRBuilder<> Builder(&I);
448   Builder.SetCurrentDebugLocation(I.getDebugLoc());
449 
450   Type *I32Ty = getI32Ty(Builder, I.getType());
451   Value *ExtOp0 = nullptr;
452   Value *ExtOp1 = nullptr;
453   Value *ExtRes = nullptr;
454   Value *TruncRes = nullptr;
455 
456   if (isSigned(I)) {
457     ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
458     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
459   } else {
460     ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
461     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
462   }
463 
464   ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
465   if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
466     if (promotedOpIsNSW(cast<Instruction>(I)))
467       Inst->setHasNoSignedWrap();
468 
469     if (promotedOpIsNUW(cast<Instruction>(I)))
470       Inst->setHasNoUnsignedWrap();
471 
472     if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
473       Inst->setIsExact(ExactOp->isExact());
474   }
475 
476   TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
477 
478   I.replaceAllUsesWith(TruncRes);
479   I.eraseFromParent();
480 
481   return true;
482 }
483 
484 bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(ICmpInst &I) const {
485   assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
486          "I does not need promotion to i32");
487 
488   IRBuilder<> Builder(&I);
489   Builder.SetCurrentDebugLocation(I.getDebugLoc());
490 
491   Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
492   Value *ExtOp0 = nullptr;
493   Value *ExtOp1 = nullptr;
494   Value *NewICmp  = nullptr;
495 
496   if (I.isSigned()) {
497     ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
498     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
499   } else {
500     ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
501     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
502   }
503   NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
504 
505   I.replaceAllUsesWith(NewICmp);
506   I.eraseFromParent();
507 
508   return true;
509 }
510 
511 bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(SelectInst &I) const {
512   assert(needsPromotionToI32(I.getType()) &&
513          "I does not need promotion to i32");
514 
515   IRBuilder<> Builder(&I);
516   Builder.SetCurrentDebugLocation(I.getDebugLoc());
517 
518   Type *I32Ty = getI32Ty(Builder, I.getType());
519   Value *ExtOp1 = nullptr;
520   Value *ExtOp2 = nullptr;
521   Value *ExtRes = nullptr;
522   Value *TruncRes = nullptr;
523 
524   if (isSigned(I)) {
525     ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
526     ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
527   } else {
528     ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
529     ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
530   }
531   ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
532   TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
533 
534   I.replaceAllUsesWith(TruncRes);
535   I.eraseFromParent();
536 
537   return true;
538 }
539 
540 bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
541     IntrinsicInst &I) const {
542   assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
543          "I must be bitreverse intrinsic");
544   assert(needsPromotionToI32(I.getType()) &&
545          "I does not need promotion to i32");
546 
547   IRBuilder<> Builder(&I);
548   Builder.SetCurrentDebugLocation(I.getDebugLoc());
549 
550   Type *I32Ty = getI32Ty(Builder, I.getType());
551   Function *I32 =
552       Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
553   Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
554   Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
555   Value *LShrOp =
556       Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
557   Value *TruncRes =
558       Builder.CreateTrunc(LShrOp, I.getType());
559 
560   I.replaceAllUsesWith(TruncRes);
561   I.eraseFromParent();
562 
563   return true;
564 }
565 
566 unsigned AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op) const {
567   return computeKnownBits(Op, *DL, 0, AC).countMaxActiveBits();
568 }
569 
570 unsigned AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op) const {
571   return ComputeMaxSignificantBits(Op, *DL, 0, AC);
572 }
573 
574 static void extractValues(IRBuilder<> &Builder,
575                           SmallVectorImpl<Value *> &Values, Value *V) {
576   auto *VT = dyn_cast<FixedVectorType>(V->getType());
577   if (!VT) {
578     Values.push_back(V);
579     return;
580   }
581 
582   for (int I = 0, E = VT->getNumElements(); I != E; ++I)
583     Values.push_back(Builder.CreateExtractElement(V, I));
584 }
585 
586 static Value *insertValues(IRBuilder<> &Builder,
587                            Type *Ty,
588                            SmallVectorImpl<Value *> &Values) {
589   if (!Ty->isVectorTy()) {
590     assert(Values.size() == 1);
591     return Values[0];
592   }
593 
594   Value *NewVal = PoisonValue::get(Ty);
595   for (int I = 0, E = Values.size(); I != E; ++I)
596     NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
597 
598   return NewVal;
599 }
600 
601 // Returns 24-bit or 48-bit (as per `NumBits` and `Size`) mul of `LHS` and
602 // `RHS`. `NumBits` is the number of KnownBits of the result and `Size` is the
603 // width of the original destination.
604 static Value *getMul24(IRBuilder<> &Builder, Value *LHS, Value *RHS,
605                        unsigned Size, unsigned NumBits, bool IsSigned) {
606   if (Size <= 32 || NumBits <= 32) {
607     Intrinsic::ID ID =
608         IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
609     return Builder.CreateIntrinsic(ID, {}, {LHS, RHS});
610   }
611 
612   assert(NumBits <= 48);
613 
614   Intrinsic::ID LoID =
615       IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
616   Intrinsic::ID HiID =
617       IsSigned ? Intrinsic::amdgcn_mulhi_i24 : Intrinsic::amdgcn_mulhi_u24;
618 
619   Value *Lo = Builder.CreateIntrinsic(LoID, {}, {LHS, RHS});
620   Value *Hi = Builder.CreateIntrinsic(HiID, {}, {LHS, RHS});
621 
622   IntegerType *I64Ty = Builder.getInt64Ty();
623   Lo = Builder.CreateZExtOrTrunc(Lo, I64Ty);
624   Hi = Builder.CreateZExtOrTrunc(Hi, I64Ty);
625 
626   return Builder.CreateOr(Lo, Builder.CreateShl(Hi, 32));
627 }
628 
629 bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
630   if (I.getOpcode() != Instruction::Mul)
631     return false;
632 
633   Type *Ty = I.getType();
634   unsigned Size = Ty->getScalarSizeInBits();
635   if (Size <= 16 && ST->has16BitInsts())
636     return false;
637 
638   // Prefer scalar if this could be s_mul_i32
639   if (UA->isUniform(&I))
640     return false;
641 
642   Value *LHS = I.getOperand(0);
643   Value *RHS = I.getOperand(1);
644   IRBuilder<> Builder(&I);
645   Builder.SetCurrentDebugLocation(I.getDebugLoc());
646 
647   unsigned LHSBits = 0, RHSBits = 0;
648   bool IsSigned = false;
649 
650   if (ST->hasMulU24() && (LHSBits = numBitsUnsigned(LHS)) <= 24 &&
651       (RHSBits = numBitsUnsigned(RHS)) <= 24) {
652     IsSigned = false;
653 
654   } else if (ST->hasMulI24() && (LHSBits = numBitsSigned(LHS)) <= 24 &&
655              (RHSBits = numBitsSigned(RHS)) <= 24) {
656     IsSigned = true;
657 
658   } else
659     return false;
660 
661   SmallVector<Value *, 4> LHSVals;
662   SmallVector<Value *, 4> RHSVals;
663   SmallVector<Value *, 4> ResultVals;
664   extractValues(Builder, LHSVals, LHS);
665   extractValues(Builder, RHSVals, RHS);
666 
667   IntegerType *I32Ty = Builder.getInt32Ty();
668   for (int I = 0, E = LHSVals.size(); I != E; ++I) {
669     Value *LHS, *RHS;
670     if (IsSigned) {
671       LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty);
672       RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty);
673     } else {
674       LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
675       RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
676     }
677 
678     Value *Result =
679         getMul24(Builder, LHS, RHS, Size, LHSBits + RHSBits, IsSigned);
680 
681     if (IsSigned) {
682       ResultVals.push_back(
683           Builder.CreateSExtOrTrunc(Result, LHSVals[I]->getType()));
684     } else {
685       ResultVals.push_back(
686           Builder.CreateZExtOrTrunc(Result, LHSVals[I]->getType()));
687     }
688   }
689 
690   Value *NewVal = insertValues(Builder, Ty, ResultVals);
691   NewVal->takeName(&I);
692   I.replaceAllUsesWith(NewVal);
693   I.eraseFromParent();
694 
695   return true;
696 }
697 
698 // Find a select instruction, which may have been casted. This is mostly to deal
699 // with cases where i16 selects were promoted here to i32.
700 static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) {
701   Cast = nullptr;
702   if (SelectInst *Sel = dyn_cast<SelectInst>(V))
703     return Sel;
704 
705   if ((Cast = dyn_cast<CastInst>(V))) {
706     if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
707       return Sel;
708   }
709 
710   return nullptr;
711 }
712 
713 bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
714   // Don't do this unless the old select is going away. We want to eliminate the
715   // binary operator, not replace a binop with a select.
716   int SelOpNo = 0;
717 
718   CastInst *CastOp;
719 
720   // TODO: Should probably try to handle some cases with multiple
721   // users. Duplicating the select may be profitable for division.
722   SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
723   if (!Sel || !Sel->hasOneUse()) {
724     SelOpNo = 1;
725     Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
726   }
727 
728   if (!Sel || !Sel->hasOneUse())
729     return false;
730 
731   Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
732   Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
733   Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
734   if (!CBO || !CT || !CF)
735     return false;
736 
737   if (CastOp) {
738     if (!CastOp->hasOneUse())
739       return false;
740     CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL);
741     CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL);
742   }
743 
744   // TODO: Handle special 0/-1 cases DAG combine does, although we only really
745   // need to handle divisions here.
746   Constant *FoldedT = SelOpNo ?
747     ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
748     ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL);
749   if (!FoldedT || isa<ConstantExpr>(FoldedT))
750     return false;
751 
752   Constant *FoldedF = SelOpNo ?
753     ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
754     ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL);
755   if (!FoldedF || isa<ConstantExpr>(FoldedF))
756     return false;
757 
758   IRBuilder<> Builder(&BO);
759   Builder.SetCurrentDebugLocation(BO.getDebugLoc());
760   if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
761     Builder.setFastMathFlags(FPOp->getFastMathFlags());
762 
763   Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
764                                           FoldedT, FoldedF);
765   NewSelect->takeName(&BO);
766   BO.replaceAllUsesWith(NewSelect);
767   BO.eraseFromParent();
768   if (CastOp)
769     CastOp->eraseFromParent();
770   Sel->eraseFromParent();
771   return true;
772 }
773 
774 std::pair<Value *, Value *>
775 AMDGPUCodeGenPrepareImpl::getFrexpResults(IRBuilder<> &Builder,
776                                           Value *Src) const {
777   Type *Ty = Src->getType();
778   Value *Frexp = Builder.CreateIntrinsic(Intrinsic::frexp,
779                                          {Ty, Builder.getInt32Ty()}, Src);
780   Value *FrexpMant = Builder.CreateExtractValue(Frexp, {0});
781 
782   // Bypass the bug workaround for the exponent result since it doesn't matter.
783   // TODO: Does the bug workaround even really need to consider the exponent
784   // result? It's unspecified by the spec.
785 
786   Value *FrexpExp =
787       ST->hasFractBug()
788           ? Builder.CreateIntrinsic(Intrinsic::amdgcn_frexp_exp,
789                                     {Builder.getInt32Ty(), Ty}, Src)
790           : Builder.CreateExtractValue(Frexp, {1});
791   return {FrexpMant, FrexpExp};
792 }
793 
794 /// Emit an expansion of 1.0 / Src good for 1ulp that supports denormals.
795 Value *AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(IRBuilder<> &Builder,
796                                                  Value *Src,
797                                                  bool IsNegative) const {
798   // Same as for 1.0, but expand the sign out of the constant.
799   // -1.0 / x -> rcp (fneg x)
800   if (IsNegative)
801     Src = Builder.CreateFNeg(Src);
802 
803   // The rcp instruction doesn't support denormals, so scale the input
804   // out of the denormal range and convert at the end.
805   //
806   // Expand as 2^-n * (1.0 / (x * 2^n))
807 
808   // TODO: Skip scaling if input is known never denormal and the input
809   // range won't underflow to denormal. The hard part is knowing the
810   // result. We need a range check, the result could be denormal for
811   // 0x1p+126 < den <= 0x1p+127.
812 
813   Type *Ty = Src->getType();
814 
815   auto [FrexpMant, FrexpExp] = getFrexpResults(Builder, Src);
816   Value *ScaleFactor = Builder.CreateNeg(FrexpExp);
817   Value *Rcp = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMant);
818   return Builder.CreateIntrinsic(Intrinsic::ldexp, {Ty, Builder.getInt32Ty()},
819                                  {Rcp, ScaleFactor});
820 }
821 
822 /// Emit a 2ulp expansion for fdiv by using frexp for input scaling.
823 Value *AMDGPUCodeGenPrepareImpl::emitFrexpDiv(IRBuilder<> &Builder, Value *LHS,
824                                               Value *RHS,
825                                               FastMathFlags FMF) const {
826   // If we have have to work around the fract/frexp bug, we're worse off than
827   // using the fdiv.fast expansion. The full safe expansion is faster if we have
828   // fast FMA.
829   if (HasFP32DenormalFlush && ST->hasFractBug() && !ST->hasFastFMAF32() &&
830       (!FMF.noNaNs() || !FMF.noInfs()))
831     return nullptr;
832 
833   // We're scaling the LHS to avoid a denormal input, and scale the denominator
834   // to avoid large values underflowing the result.
835   Type *Ty = LHS->getType();
836 
837   auto [FrexpMantRHS, FrexpExpRHS] = getFrexpResults(Builder, RHS);
838 
839   Value *Rcp =
840       Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMantRHS);
841 
842   auto [FrexpMantLHS, FrexpExpLHS] = getFrexpResults(Builder, LHS);
843   Value *Mul = Builder.CreateFMul(FrexpMantLHS, Rcp);
844 
845   // We multiplied by 2^N/2^M, so we need to multiply by 2^(N-M) to scale the
846   // result.
847   Value *ExpDiff = Builder.CreateSub(FrexpExpLHS, FrexpExpRHS);
848   return Builder.CreateIntrinsic(Intrinsic::ldexp, {Ty, Builder.getInt32Ty()},
849                                  {Mul, ExpDiff});
850 }
851 
852 /// Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
853 static Value *emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src,
854                               bool IsNegative) {
855   // bool need_scale = x < 0x1p-126f;
856   // float input_scale = need_scale ? 0x1.0p+24f : 1.0f;
857   // float output_scale = need_scale ? 0x1.0p+12f : 1.0f;
858   // rsq(x * input_scale) * output_scale;
859 
860   Type *Ty = Src->getType();
861   APFloat SmallestNormal =
862       APFloat::getSmallestNormalized(Ty->getFltSemantics());
863   Value *NeedScale =
864       Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
865   Constant *One = ConstantFP::get(Ty, 1.0);
866   Constant *InputScale = ConstantFP::get(Ty, 0x1.0p+24);
867   Constant *OutputScale =
868       ConstantFP::get(Ty, IsNegative ? -0x1.0p+12 : 0x1.0p+12);
869 
870   Value *InputScaleFactor = Builder.CreateSelect(NeedScale, InputScale, One);
871 
872   Value *ScaledInput = Builder.CreateFMul(Src, InputScaleFactor);
873   Value *Rsq = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, ScaledInput);
874   Value *OutputScaleFactor = Builder.CreateSelect(
875       NeedScale, OutputScale, IsNegative ? ConstantFP::get(Ty, -1.0) : One);
876 
877   return Builder.CreateFMul(Rsq, OutputScaleFactor);
878 }
879 
880 bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
881                                                   FastMathFlags DivFMF,
882                                                   FastMathFlags SqrtFMF) const {
883   // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
884   if (!DivFMF.allowContract() || !SqrtFMF.allowContract())
885     return false;
886 
887   // v_rsq_f32 gives 1ulp
888   return SqrtFMF.approxFunc() || HasUnsafeFPMath ||
889          SqrtOp->getFPAccuracy() >= 1.0f;
890 }
891 
892 Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
893     IRBuilder<> &Builder, Value *Num, Value *Den, FastMathFlags DivFMF,
894     FastMathFlags SqrtFMF, const Instruction *CtxI) const {
895   // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
896   assert(DivFMF.allowContract() && SqrtFMF.allowContract());
897 
898   // rsq_f16 is accurate to 0.51 ulp.
899   // rsq_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
900   // rsq_f64 is never accurate.
901   const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num);
902   if (!CLHS)
903     return nullptr;
904 
905   assert(Den->getType()->isFloatTy());
906 
907   bool IsNegative = false;
908 
909   // TODO: Handle other numerator values with arcp.
910   if (CLHS->isExactlyValue(1.0) || (IsNegative = CLHS->isExactlyValue(-1.0))) {
911     // Add in the sqrt flags.
912     IRBuilder<>::FastMathFlagGuard Guard(Builder);
913     DivFMF |= SqrtFMF;
914     Builder.setFastMathFlags(DivFMF);
915 
916     if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
917         canIgnoreDenormalInput(Den, CtxI)) {
918       Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
919       // -1.0 / sqrt(x) -> fneg(rsq(x))
920       return IsNegative ? Builder.CreateFNeg(Result) : Result;
921     }
922 
923     return emitRsqIEEE1ULP(Builder, Den, IsNegative);
924   }
925 
926   return nullptr;
927 }
928 
929 // Optimize fdiv with rcp:
930 //
931 // 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
932 //               allowed with unsafe-fp-math or afn.
933 //
934 // a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
935 Value *
936 AMDGPUCodeGenPrepareImpl::optimizeWithRcp(IRBuilder<> &Builder, Value *Num,
937                                           Value *Den, FastMathFlags FMF,
938                                           const Instruction *CtxI) const {
939   // rcp_f16 is accurate to 0.51 ulp.
940   // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
941   // rcp_f64 is never accurate.
942   assert(Den->getType()->isFloatTy());
943 
944   if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
945     bool IsNegative = false;
946     if (CLHS->isExactlyValue(1.0) ||
947         (IsNegative = CLHS->isExactlyValue(-1.0))) {
948       Value *Src = Den;
949 
950       if (HasFP32DenormalFlush || FMF.approxFunc()) {
951         // -1.0 / x -> 1.0 / fneg(x)
952         if (IsNegative)
953           Src = Builder.CreateFNeg(Src);
954 
955         // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
956         // the CI documentation has a worst case error of 1 ulp.
957         // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK
958         // to use it as long as we aren't trying to use denormals.
959         //
960         // v_rcp_f16 and v_rsq_f16 DO support denormals.
961 
962         // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
963         //       insert rsq intrinsic here.
964 
965         // 1.0 / x -> rcp(x)
966         return Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Src);
967       }
968 
969       // TODO: If the input isn't denormal, and we know the input exponent isn't
970       // big enough to introduce a denormal we can avoid the scaling.
971       return emitRcpIEEE1ULP(Builder, Src, IsNegative);
972     }
973   }
974 
975   if (FMF.allowReciprocal()) {
976     // x / y -> x * (1.0 / y)
977 
978     // TODO: Could avoid denormal scaling and use raw rcp if we knew the output
979     // will never underflow.
980     if (HasFP32DenormalFlush || FMF.approxFunc()) {
981       Value *Recip = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Den);
982       return Builder.CreateFMul(Num, Recip);
983     }
984 
985     Value *Recip = emitRcpIEEE1ULP(Builder, Den, false);
986     return Builder.CreateFMul(Num, Recip);
987   }
988 
989   return nullptr;
990 }
991 
992 // optimize with fdiv.fast:
993 //
994 // a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
995 //
996 // 1/x -> fdiv.fast(1,x)  when !fpmath >= 2.5ulp.
997 //
998 // NOTE: optimizeWithRcp should be tried first because rcp is the preference.
999 Value *AMDGPUCodeGenPrepareImpl::optimizeWithFDivFast(
1000     IRBuilder<> &Builder, Value *Num, Value *Den, float ReqdAccuracy) const {
1001   // fdiv.fast can achieve 2.5 ULP accuracy.
1002   if (ReqdAccuracy < 2.5f)
1003     return nullptr;
1004 
1005   // Only have fdiv.fast for f32.
1006   Type *Ty = Den->getType();
1007   assert(Ty->isFloatTy());
1008 
1009   bool NumIsOne = false;
1010   if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
1011     if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
1012       NumIsOne = true;
1013   }
1014 
1015   // fdiv does not support denormals. But 1.0/x is always fine to use it.
1016   //
1017   // TODO: This works for any value with a specific known exponent range, don't
1018   // just limit to constant 1.
1019   if (!HasFP32DenormalFlush && !NumIsOne)
1020     return nullptr;
1021 
1022   return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {}, {Num, Den});
1023 }
1024 
1025 Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
1026     IRBuilder<> &Builder, Value *Num, Value *Den, FastMathFlags DivFMF,
1027     FastMathFlags SqrtFMF, Value *RsqOp, const Instruction *FDivInst,
1028     float ReqdDivAccuracy) const {
1029   if (RsqOp) {
1030     Value *Rsq =
1031         optimizeWithRsq(Builder, Num, RsqOp, DivFMF, SqrtFMF, FDivInst);
1032     if (Rsq)
1033       return Rsq;
1034   }
1035 
1036   Value *Rcp = optimizeWithRcp(Builder, Num, Den, DivFMF, FDivInst);
1037   if (Rcp)
1038     return Rcp;
1039 
1040   // In the basic case fdiv_fast has the same instruction count as the frexp div
1041   // expansion. Slightly prefer fdiv_fast since it ends in an fmul that can
1042   // potentially be fused into a user. Also, materialization of the constants
1043   // can be reused for multiple instances.
1044   Value *FDivFast = optimizeWithFDivFast(Builder, Num, Den, ReqdDivAccuracy);
1045   if (FDivFast)
1046     return FDivFast;
1047 
1048   return emitFrexpDiv(Builder, Num, Den, DivFMF);
1049 }
1050 
1051 // Optimizations is performed based on fpmath, fast math flags as well as
1052 // denormals to optimize fdiv with either rcp or fdiv.fast.
1053 //
1054 // With rcp:
1055 //   1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
1056 //                 allowed with unsafe-fp-math or afn.
1057 //
1058 //   a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
1059 //
1060 // With fdiv.fast:
1061 //   a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
1062 //
1063 //   1/x -> fdiv.fast(1,x)  when !fpmath >= 2.5ulp.
1064 //
1065 // NOTE: rcp is the preference in cases that both are legal.
1066 bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
1067   if (DisableFDivExpand)
1068     return false;
1069 
1070   Type *Ty = FDiv.getType()->getScalarType();
1071   if (!Ty->isFloatTy())
1072     return false;
1073 
1074   // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
1075   // expansion around them in codegen. f16 is good enough to always use.
1076 
1077   const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
1078   const FastMathFlags DivFMF = FPOp->getFastMathFlags();
1079   const float ReqdAccuracy = FPOp->getFPAccuracy();
1080 
1081   // Inaccurate rcp is allowed with unsafe-fp-math or afn.
1082   //
1083   // Defer to codegen to handle this.
1084   //
1085   // TODO: Decide on an interpretation for interactions between afn + arcp +
1086   // !fpmath, and make it consistent between here and codegen. For now, defer
1087   // expansion of afn to codegen. The current interpretation is so aggressive we
1088   // don't need any pre-consideration here when we have better information. A
1089   // more conservative interpretation could use handling here.
1090   const bool AllowInaccurateRcp = HasUnsafeFPMath || DivFMF.approxFunc();
1091   if (AllowInaccurateRcp)
1092     return false;
1093 
1094   // Defer the correct implementations to codegen.
1095   if (ReqdAccuracy < 1.0f)
1096     return false;
1097 
1098   FastMathFlags SqrtFMF;
1099 
1100   Value *Num = FDiv.getOperand(0);
1101   Value *Den = FDiv.getOperand(1);
1102 
1103   Value *RsqOp = nullptr;
1104   auto *DenII = dyn_cast<IntrinsicInst>(Den);
1105   if (DenII && DenII->getIntrinsicID() == Intrinsic::sqrt &&
1106       DenII->hasOneUse()) {
1107     const auto *SqrtOp = cast<FPMathOperator>(DenII);
1108     SqrtFMF = SqrtOp->getFastMathFlags();
1109     if (canOptimizeWithRsq(SqrtOp, DivFMF, SqrtFMF))
1110       RsqOp = SqrtOp->getOperand(0);
1111   }
1112 
1113   IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
1114   Builder.setFastMathFlags(DivFMF);
1115   Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
1116 
1117   SmallVector<Value *, 4> NumVals;
1118   SmallVector<Value *, 4> DenVals;
1119   SmallVector<Value *, 4> RsqDenVals;
1120   extractValues(Builder, NumVals, Num);
1121   extractValues(Builder, DenVals, Den);
1122 
1123   if (RsqOp)
1124     extractValues(Builder, RsqDenVals, RsqOp);
1125 
1126   SmallVector<Value *, 4> ResultVals(NumVals.size());
1127   for (int I = 0, E = NumVals.size(); I != E; ++I) {
1128     Value *NumElt = NumVals[I];
1129     Value *DenElt = DenVals[I];
1130     Value *RsqDenElt = RsqOp ? RsqDenVals[I] : nullptr;
1131 
1132     Value *NewElt =
1133         visitFDivElement(Builder, NumElt, DenElt, DivFMF, SqrtFMF, RsqDenElt,
1134                          cast<Instruction>(FPOp), ReqdAccuracy);
1135     if (!NewElt) {
1136       // Keep the original, but scalarized.
1137 
1138       // This has the unfortunate side effect of sometimes scalarizing when
1139       // we're not going to do anything.
1140       NewElt = Builder.CreateFDiv(NumElt, DenElt);
1141       if (auto *NewEltInst = dyn_cast<Instruction>(NewElt))
1142         NewEltInst->copyMetadata(FDiv);
1143     }
1144 
1145     ResultVals[I] = NewElt;
1146   }
1147 
1148   Value *NewVal = insertValues(Builder, FDiv.getType(), ResultVals);
1149 
1150   if (NewVal) {
1151     FDiv.replaceAllUsesWith(NewVal);
1152     NewVal->takeName(&FDiv);
1153     RecursivelyDeleteTriviallyDeadInstructions(&FDiv, TLInfo);
1154   }
1155 
1156   return true;
1157 }
1158 
1159 static bool hasUnsafeFPMath(const Function &F) {
1160   Attribute Attr = F.getFnAttribute("unsafe-fp-math");
1161   return Attr.getValueAsBool();
1162 }
1163 
1164 static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
1165                                           Value *LHS, Value *RHS) {
1166   Type *I32Ty = Builder.getInt32Ty();
1167   Type *I64Ty = Builder.getInt64Ty();
1168 
1169   Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
1170   Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
1171   Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
1172   Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
1173   Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
1174   Hi = Builder.CreateTrunc(Hi, I32Ty);
1175   return std::pair(Lo, Hi);
1176 }
1177 
1178 static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
1179   return getMul64(Builder, LHS, RHS).second;
1180 }
1181 
1182 /// Figure out how many bits are really needed for this division. \p AtLeast is
1183 /// an optimization hint to bypass the second ComputeNumSignBits call if we the
1184 /// first one is insufficient. Returns -1 on failure.
1185 int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
1186                                             Value *Den, unsigned AtLeast,
1187                                             bool IsSigned) const {
1188   const DataLayout &DL = Mod->getDataLayout();
1189   unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
1190   if (LHSSignBits < AtLeast)
1191     return -1;
1192 
1193   unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
1194   if (RHSSignBits < AtLeast)
1195     return -1;
1196 
1197   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1198   unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
1199   if (IsSigned)
1200     ++DivBits;
1201   return DivBits;
1202 }
1203 
1204 // The fractional part of a float is enough to accurately represent up to
1205 // a 24-bit signed integer.
1206 Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
1207                                                 BinaryOperator &I, Value *Num,
1208                                                 Value *Den, bool IsDiv,
1209                                                 bool IsSigned) const {
1210   int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
1211   if (DivBits == -1)
1212     return nullptr;
1213   return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
1214 }
1215 
1216 Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
1217     IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den,
1218     unsigned DivBits, bool IsDiv, bool IsSigned) const {
1219   Type *I32Ty = Builder.getInt32Ty();
1220   Num = Builder.CreateTrunc(Num, I32Ty);
1221   Den = Builder.CreateTrunc(Den, I32Ty);
1222 
1223   Type *F32Ty = Builder.getFloatTy();
1224   ConstantInt *One = Builder.getInt32(1);
1225   Value *JQ = One;
1226 
1227   if (IsSigned) {
1228     // char|short jq = ia ^ ib;
1229     JQ = Builder.CreateXor(Num, Den);
1230 
1231     // jq = jq >> (bitsize - 2)
1232     JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
1233 
1234     // jq = jq | 0x1
1235     JQ = Builder.CreateOr(JQ, One);
1236   }
1237 
1238   // int ia = (int)LHS;
1239   Value *IA = Num;
1240 
1241   // int ib, (int)RHS;
1242   Value *IB = Den;
1243 
1244   // float fa = (float)ia;
1245   Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
1246                        : Builder.CreateUIToFP(IA, F32Ty);
1247 
1248   // float fb = (float)ib;
1249   Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
1250                        : Builder.CreateUIToFP(IB,F32Ty);
1251 
1252   Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
1253                                                 Builder.getFloatTy());
1254   Value *RCP = Builder.CreateCall(RcpDecl, { FB });
1255   Value *FQM = Builder.CreateFMul(FA, RCP);
1256 
1257   // fq = trunc(fqm);
1258   CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
1259   FQ->copyFastMathFlags(Builder.getFastMathFlags());
1260 
1261   // float fqneg = -fq;
1262   Value *FQNeg = Builder.CreateFNeg(FQ);
1263 
1264   // float fr = mad(fqneg, fb, fa);
1265   auto FMAD = !ST->hasMadMacF32Insts()
1266                   ? Intrinsic::fma
1267                   : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
1268   Value *FR = Builder.CreateIntrinsic(FMAD,
1269                                       {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
1270 
1271   // int iq = (int)fq;
1272   Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
1273                        : Builder.CreateFPToUI(FQ, I32Ty);
1274 
1275   // fr = fabs(fr);
1276   FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
1277 
1278   // fb = fabs(fb);
1279   FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
1280 
1281   // int cv = fr >= fb;
1282   Value *CV = Builder.CreateFCmpOGE(FR, FB);
1283 
1284   // jq = (cv ? jq : 0);
1285   JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
1286 
1287   // dst = iq + jq;
1288   Value *Div = Builder.CreateAdd(IQ, JQ);
1289 
1290   Value *Res = Div;
1291   if (!IsDiv) {
1292     // Rem needs compensation, it's easier to recompute it
1293     Value *Rem = Builder.CreateMul(Div, Den);
1294     Res = Builder.CreateSub(Num, Rem);
1295   }
1296 
1297   if (DivBits != 0 && DivBits < 32) {
1298     // Extend in register from the number of bits this divide really is.
1299     if (IsSigned) {
1300       int InRegBits = 32 - DivBits;
1301 
1302       Res = Builder.CreateShl(Res, InRegBits);
1303       Res = Builder.CreateAShr(Res, InRegBits);
1304     } else {
1305       ConstantInt *TruncMask
1306         = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
1307       Res = Builder.CreateAnd(Res, TruncMask);
1308     }
1309   }
1310 
1311   return Res;
1312 }
1313 
1314 // Try to recognize special cases the DAG will emit special, better expansions
1315 // than the general expansion we do here.
1316 
1317 // TODO: It would be better to just directly handle those optimizations here.
1318 bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
1319                                                          Value *Num,
1320                                                          Value *Den) const {
1321   if (Constant *C = dyn_cast<Constant>(Den)) {
1322     // Arbitrary constants get a better expansion as long as a wider mulhi is
1323     // legal.
1324     if (C->getType()->getScalarSizeInBits() <= 32)
1325       return true;
1326 
1327     // TODO: Sdiv check for not exact for some reason.
1328 
1329     // If there's no wider mulhi, there's only a better expansion for powers of
1330     // two.
1331     // TODO: Should really know for each vector element.
1332     if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT))
1333       return true;
1334 
1335     return false;
1336   }
1337 
1338   if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
1339     // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1340     if (BinOpDen->getOpcode() == Instruction::Shl &&
1341         isa<Constant>(BinOpDen->getOperand(0)) &&
1342         isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true,
1343                                0, AC, &I, DT)) {
1344       return true;
1345     }
1346   }
1347 
1348   return false;
1349 }
1350 
1351 static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) {
1352   // Check whether the sign can be determined statically.
1353   KnownBits Known = computeKnownBits(V, *DL);
1354   if (Known.isNegative())
1355     return Constant::getAllOnesValue(V->getType());
1356   if (Known.isNonNegative())
1357     return Constant::getNullValue(V->getType());
1358   return Builder.CreateAShr(V, Builder.getInt32(31));
1359 }
1360 
1361 Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
1362                                                 BinaryOperator &I, Value *X,
1363                                                 Value *Y) const {
1364   Instruction::BinaryOps Opc = I.getOpcode();
1365   assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1366          Opc == Instruction::SRem || Opc == Instruction::SDiv);
1367 
1368   FastMathFlags FMF;
1369   FMF.setFast();
1370   Builder.setFastMathFlags(FMF);
1371 
1372   if (divHasSpecialOptimization(I, X, Y))
1373     return nullptr;  // Keep it for later optimization.
1374 
1375   bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1376   bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1377 
1378   Type *Ty = X->getType();
1379   Type *I32Ty = Builder.getInt32Ty();
1380   Type *F32Ty = Builder.getFloatTy();
1381 
1382   if (Ty->getScalarSizeInBits() < 32) {
1383     if (IsSigned) {
1384       X = Builder.CreateSExt(X, I32Ty);
1385       Y = Builder.CreateSExt(Y, I32Ty);
1386     } else {
1387       X = Builder.CreateZExt(X, I32Ty);
1388       Y = Builder.CreateZExt(Y, I32Ty);
1389     }
1390   }
1391 
1392   if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1393     return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1394                       Builder.CreateZExtOrTrunc(Res, Ty);
1395   }
1396 
1397   ConstantInt *Zero = Builder.getInt32(0);
1398   ConstantInt *One = Builder.getInt32(1);
1399 
1400   Value *Sign = nullptr;
1401   if (IsSigned) {
1402     Value *SignX = getSign32(X, Builder, DL);
1403     Value *SignY = getSign32(Y, Builder, DL);
1404     // Remainder sign is the same as LHS
1405     Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1406 
1407     X = Builder.CreateAdd(X, SignX);
1408     Y = Builder.CreateAdd(Y, SignY);
1409 
1410     X = Builder.CreateXor(X, SignX);
1411     Y = Builder.CreateXor(Y, SignY);
1412   }
1413 
1414   // The algorithm here is based on ideas from "Software Integer Division", Tom
1415   // Rodeheffer, August 2008.
1416   //
1417   // unsigned udiv(unsigned x, unsigned y) {
1418   //   // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1419   //   // that this is a lower bound on inv(y), even if some of the calculations
1420   //   // round up.
1421   //   unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1422   //
1423   //   // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1424   //   // Empirically this is guaranteed to give a "two-y" lower bound on
1425   //   // inv(y).
1426   //   z += umulh(z, -y * z);
1427   //
1428   //   // Quotient/remainder estimate.
1429   //   unsigned q = umulh(x, z);
1430   //   unsigned r = x - q * y;
1431   //
1432   //   // Two rounds of quotient/remainder refinement.
1433   //   if (r >= y) {
1434   //     ++q;
1435   //     r -= y;
1436   //   }
1437   //   if (r >= y) {
1438   //     ++q;
1439   //     r -= y;
1440   //   }
1441   //
1442   //   return q;
1443   // }
1444 
1445   // Initial estimate of inv(y).
1446   Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1447   Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
1448   Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
1449   Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
1450   Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1451   Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1452 
1453   // One round of UNR.
1454   Value *NegY = Builder.CreateSub(Zero, Y);
1455   Value *NegYZ = Builder.CreateMul(NegY, Z);
1456   Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1457 
1458   // Quotient/remainder estimate.
1459   Value *Q = getMulHu(Builder, X, Z);
1460   Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1461 
1462   // First quotient/remainder refinement.
1463   Value *Cond = Builder.CreateICmpUGE(R, Y);
1464   if (IsDiv)
1465     Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1466   R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1467 
1468   // Second quotient/remainder refinement.
1469   Cond = Builder.CreateICmpUGE(R, Y);
1470   Value *Res;
1471   if (IsDiv)
1472     Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1473   else
1474     Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1475 
1476   if (IsSigned) {
1477     Res = Builder.CreateXor(Res, Sign);
1478     Res = Builder.CreateSub(Res, Sign);
1479   }
1480 
1481   Res = Builder.CreateTrunc(Res, Ty);
1482 
1483   return Res;
1484 }
1485 
1486 Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
1487                                                 BinaryOperator &I, Value *Num,
1488                                                 Value *Den) const {
1489   if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1490     return nullptr;  // Keep it for later optimization.
1491 
1492   Instruction::BinaryOps Opc = I.getOpcode();
1493 
1494   bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1495   bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1496 
1497   int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1498   if (NumDivBits == -1)
1499     return nullptr;
1500 
1501   Value *Narrowed = nullptr;
1502   if (NumDivBits <= 24) {
1503     Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1504                                   IsDiv, IsSigned);
1505   } else if (NumDivBits <= 32) {
1506     Narrowed = expandDivRem32(Builder, I, Num, Den);
1507   }
1508 
1509   if (Narrowed) {
1510     return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1511                       Builder.CreateZExt(Narrowed, Num->getType());
1512   }
1513 
1514   return nullptr;
1515 }
1516 
1517 void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
1518   Instruction::BinaryOps Opc = I.getOpcode();
1519   // Do the general expansion.
1520   if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1521     expandDivisionUpTo64Bits(&I);
1522     return;
1523   }
1524 
1525   if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1526     expandRemainderUpTo64Bits(&I);
1527     return;
1528   }
1529 
1530   llvm_unreachable("not a division");
1531 }
1532 
1533 bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
1534   if (foldBinOpIntoSelect(I))
1535     return true;
1536 
1537   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1538       UA->isUniform(&I) && promoteUniformOpToI32(I))
1539     return true;
1540 
1541   if (UseMul24Intrin && replaceMulWithMul24(I))
1542     return true;
1543 
1544   bool Changed = false;
1545   Instruction::BinaryOps Opc = I.getOpcode();
1546   Type *Ty = I.getType();
1547   Value *NewDiv = nullptr;
1548   unsigned ScalarSize = Ty->getScalarSizeInBits();
1549 
1550   SmallVector<BinaryOperator *, 8> Div64ToExpand;
1551 
1552   if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1553        Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1554       ScalarSize <= 64 &&
1555       !DisableIDivExpand) {
1556     Value *Num = I.getOperand(0);
1557     Value *Den = I.getOperand(1);
1558     IRBuilder<> Builder(&I);
1559     Builder.SetCurrentDebugLocation(I.getDebugLoc());
1560 
1561     if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1562       NewDiv = PoisonValue::get(VT);
1563 
1564       for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1565         Value *NumEltN = Builder.CreateExtractElement(Num, N);
1566         Value *DenEltN = Builder.CreateExtractElement(Den, N);
1567 
1568         Value *NewElt;
1569         if (ScalarSize <= 32) {
1570           NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1571           if (!NewElt)
1572             NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1573         } else {
1574           // See if this 64-bit division can be shrunk to 32/24-bits before
1575           // producing the general expansion.
1576           NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1577           if (!NewElt) {
1578             // The general 64-bit expansion introduces control flow and doesn't
1579             // return the new value. Just insert a scalar copy and defer
1580             // expanding it.
1581             NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1582             Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
1583           }
1584         }
1585 
1586         NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1587       }
1588     } else {
1589       if (ScalarSize <= 32)
1590         NewDiv = expandDivRem32(Builder, I, Num, Den);
1591       else {
1592         NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1593         if (!NewDiv)
1594           Div64ToExpand.push_back(&I);
1595       }
1596     }
1597 
1598     if (NewDiv) {
1599       I.replaceAllUsesWith(NewDiv);
1600       I.eraseFromParent();
1601       Changed = true;
1602     }
1603   }
1604 
1605   if (ExpandDiv64InIR) {
1606     // TODO: We get much worse code in specially handled constant cases.
1607     for (BinaryOperator *Div : Div64ToExpand) {
1608       expandDivRem64(*Div);
1609       FlowChanged = true;
1610       Changed = true;
1611     }
1612   }
1613 
1614   return Changed;
1615 }
1616 
1617 bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
1618   if (!WidenLoads)
1619     return false;
1620 
1621   if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1622        I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1623       canWidenScalarExtLoad(I)) {
1624     IRBuilder<> Builder(&I);
1625     Builder.SetCurrentDebugLocation(I.getDebugLoc());
1626 
1627     Type *I32Ty = Builder.getInt32Ty();
1628     LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, I.getPointerOperand());
1629     WidenLoad->copyMetadata(I);
1630 
1631     // If we have range metadata, we need to convert the type, and not make
1632     // assumptions about the high bits.
1633     if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1634       ConstantInt *Lower =
1635         mdconst::extract<ConstantInt>(Range->getOperand(0));
1636 
1637       if (Lower->isNullValue()) {
1638         WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1639       } else {
1640         Metadata *LowAndHigh[] = {
1641           ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1642           // Don't make assumptions about the high bits.
1643           ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
1644         };
1645 
1646         WidenLoad->setMetadata(LLVMContext::MD_range,
1647                                MDNode::get(Mod->getContext(), LowAndHigh));
1648       }
1649     }
1650 
1651     int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
1652     Type *IntNTy = Builder.getIntNTy(TySize);
1653     Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1654     Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1655     I.replaceAllUsesWith(ValOrig);
1656     I.eraseFromParent();
1657     return true;
1658   }
1659 
1660   return false;
1661 }
1662 
1663 bool AMDGPUCodeGenPrepareImpl::visitICmpInst(ICmpInst &I) {
1664   bool Changed = false;
1665 
1666   if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
1667       UA->isUniform(&I))
1668     Changed |= promoteUniformOpToI32(I);
1669 
1670   return Changed;
1671 }
1672 
1673 bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
1674   Value *Cond = I.getCondition();
1675   Value *TrueVal = I.getTrueValue();
1676   Value *FalseVal = I.getFalseValue();
1677   Value *CmpVal;
1678   FCmpInst::Predicate Pred;
1679 
1680   if (ST->has16BitInsts() && needsPromotionToI32(I.getType())) {
1681     if (UA->isUniform(&I))
1682       return promoteUniformOpToI32(I);
1683     return false;
1684   }
1685 
1686   // Match fract pattern with nan check.
1687   if (!match(Cond, m_FCmp(Pred, m_Value(CmpVal), m_NonNaN())))
1688     return false;
1689 
1690   FPMathOperator *FPOp = dyn_cast<FPMathOperator>(&I);
1691   if (!FPOp)
1692     return false;
1693 
1694   IRBuilder<> Builder(&I);
1695   Builder.setFastMathFlags(FPOp->getFastMathFlags());
1696 
1697   auto *IITrue = dyn_cast<IntrinsicInst>(TrueVal);
1698   auto *IIFalse = dyn_cast<IntrinsicInst>(FalseVal);
1699 
1700   Value *Fract = nullptr;
1701   if (Pred == FCmpInst::FCMP_UNO && TrueVal == CmpVal && IIFalse &&
1702       CmpVal == matchFractPat(*IIFalse)) {
1703     // isnan(x) ? x : fract(x)
1704     Fract = applyFractPat(Builder, CmpVal);
1705   } else if (Pred == FCmpInst::FCMP_ORD && FalseVal == CmpVal && IITrue &&
1706              CmpVal == matchFractPat(*IITrue)) {
1707     // !isnan(x) ? fract(x) : x
1708     Fract = applyFractPat(Builder, CmpVal);
1709   } else
1710     return false;
1711 
1712   Fract->takeName(&I);
1713   I.replaceAllUsesWith(Fract);
1714   RecursivelyDeleteTriviallyDeadInstructions(&I, TLInfo);
1715   return true;
1716 }
1717 
1718 static bool areInSameBB(const Value *A, const Value *B) {
1719   const auto *IA = dyn_cast<Instruction>(A);
1720   const auto *IB = dyn_cast<Instruction>(B);
1721   return IA && IB && IA->getParent() == IB->getParent();
1722 }
1723 
1724 // Helper for breaking large PHIs that returns true when an extractelement on V
1725 // is likely to be folded away by the DAG combiner.
1726 static bool isInterestingPHIIncomingValue(const Value *V) {
1727   const auto *FVT = dyn_cast<FixedVectorType>(V->getType());
1728   if (!FVT)
1729     return false;
1730 
1731   const Value *CurVal = V;
1732 
1733   // Check for insertelements, keeping track of the elements covered.
1734   BitVector EltsCovered(FVT->getNumElements());
1735   while (const auto *IE = dyn_cast<InsertElementInst>(CurVal)) {
1736     const auto *Idx = dyn_cast<ConstantInt>(IE->getOperand(2));
1737 
1738     // Non constant index/out of bounds index -> folding is unlikely.
1739     // The latter is more of a sanity check because canonical IR should just
1740     // have replaced those with poison.
1741     if (!Idx || Idx->getSExtValue() >= FVT->getNumElements())
1742       return false;
1743 
1744     const auto *VecSrc = IE->getOperand(0);
1745 
1746     // If the vector source is another instruction, it must be in the same basic
1747     // block. Otherwise, the DAGCombiner won't see the whole thing and is
1748     // unlikely to be able to do anything interesting here.
1749     if (isa<Instruction>(VecSrc) && !areInSameBB(VecSrc, IE))
1750       return false;
1751 
1752     CurVal = VecSrc;
1753     EltsCovered.set(Idx->getSExtValue());
1754 
1755     // All elements covered.
1756     if (EltsCovered.all())
1757       return true;
1758   }
1759 
1760   // We either didn't find a single insertelement, or the insertelement chain
1761   // ended before all elements were covered. Check for other interesting values.
1762 
1763   // Constants are always interesting because we can just constant fold the
1764   // extractelements.
1765   if (isa<Constant>(CurVal))
1766     return true;
1767 
1768   // shufflevector is likely to be profitable if either operand is a constant,
1769   // or if either source is in the same block.
1770   // This is because shufflevector is most often lowered as a series of
1771   // insert/extract elements anyway.
1772   if (const auto *SV = dyn_cast<ShuffleVectorInst>(CurVal)) {
1773     return isa<Constant>(SV->getOperand(1)) ||
1774            areInSameBB(SV, SV->getOperand(0)) ||
1775            areInSameBB(SV, SV->getOperand(1));
1776   }
1777 
1778   return false;
1779 }
1780 
1781 bool AMDGPUCodeGenPrepareImpl::canBreakPHINode(const PHINode &I) {
1782   // Check in the cache, or add an entry for this node.
1783   //
1784   // We init with false because we consider all PHI nodes unbreakable until we
1785   // reach a conclusion. Doing the opposite - assuming they're break-able until
1786   // proven otherwise - can be harmful in some pathological cases so we're
1787   // conservative for now.
1788   const auto [It, DidInsert] = BreakPhiNodesCache.insert({&I, false});
1789   if (!DidInsert)
1790     return It->second;
1791 
1792   // This function may recurse, so to guard against infinite looping, this PHI
1793   // is conservatively considered unbreakable until we reach a conclusion.
1794 
1795   // Don't break PHIs that have no interesting incoming values. That is, where
1796   // there is no clear opportunity to fold the "extractelement" instructions we
1797   // would add.
1798   //
1799   // Note: IC does not run after this pass, so we're only interested in the
1800   // foldings that the DAG combiner can do.
1801   if (none_of(I.incoming_values(),
1802               [&](Value *V) { return isInterestingPHIIncomingValue(V); }))
1803     return false;
1804 
1805   // Now, check users for unbreakable PHI nodes. If we have an unbreakable PHI
1806   // node as user, we don't want to break this PHI either because it's unlikely
1807   // to be beneficial. We would just explode the vector and reassemble it
1808   // directly, wasting instructions.
1809   //
1810   // In the case where multiple users are PHI nodes, we want at least half of
1811   // them to be breakable.
1812   int Score = 0;
1813   for (const Value *U : I.users()) {
1814     if (const auto *PU = dyn_cast<PHINode>(U))
1815       Score += canBreakPHINode(*PU) ? 1 : -1;
1816   }
1817 
1818   if (Score < 0)
1819     return false;
1820 
1821   return BreakPhiNodesCache[&I] = true;
1822 }
1823 
1824 /// Helper class for "break large PHIs" (visitPHINode).
1825 ///
1826 /// This represents a slice of a PHI's incoming value, which is made up of:
1827 ///   - The type of the slice (Ty)
1828 ///   - The index in the incoming value's vector where the slice starts (Idx)
1829 ///   - The number of elements in the slice (NumElts).
1830 /// It also keeps track of the NewPHI node inserted for this particular slice.
1831 ///
1832 /// Slice examples:
1833 ///   <4 x i64> -> Split into four i64 slices.
1834 ///     -> [i64, 0, 1], [i64, 1, 1], [i64, 2, 1], [i64, 3, 1]
1835 ///   <5 x i16> -> Split into 2 <2 x i16> slices + a i16 tail.
1836 ///     -> [<2 x i16>, 0, 2], [<2 x i16>, 2, 2], [i16, 4, 1]
1837 class VectorSlice {
1838 public:
1839   VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
1840       : Ty(Ty), Idx(Idx), NumElts(NumElts) {}
1841 
1842   Type *Ty = nullptr;
1843   unsigned Idx = 0;
1844   unsigned NumElts = 0;
1845   PHINode *NewPHI = nullptr;
1846 
1847   /// Slice \p Inc according to the information contained within this slice.
1848   /// This is cached, so if called multiple times for the same \p BB & \p Inc
1849   /// pair, it returns the same Sliced value as well.
1850   ///
1851   /// Note this *intentionally* does not return the same value for, say,
1852   /// [%bb.0, %0] & [%bb.1, %0] as:
1853   ///   - It could cause issues with dominance (e.g. if bb.1 is seen first, then
1854   ///   the value in bb.1 may not be reachable from bb.0 if it's its
1855   ///   predecessor.)
1856   ///   - We also want to make our extract instructions as local as possible so
1857   ///   the DAG has better chances of folding them out. Duplicating them like
1858   ///   that is beneficial in that regard.
1859   ///
1860   /// This is both a minor optimization to avoid creating duplicate
1861   /// instructions, but also a requirement for correctness. It is not forbidden
1862   /// for a PHI node to have the same [BB, Val] pair multiple times. If we
1863   /// returned a new value each time, those previously identical pairs would all
1864   /// have different incoming values (from the same block) and it'd cause a "PHI
1865   /// node has multiple entries for the same basic block with different incoming
1866   /// values!" verifier error.
1867   Value *getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName) {
1868     Value *&Res = SlicedVals[{BB, Inc}];
1869     if (Res)
1870       return Res;
1871 
1872     IRBuilder<> B(BB->getTerminator());
1873     if (Instruction *IncInst = dyn_cast<Instruction>(Inc))
1874       B.SetCurrentDebugLocation(IncInst->getDebugLoc());
1875 
1876     if (NumElts > 1) {
1877       SmallVector<int, 4> Mask;
1878       for (unsigned K = Idx; K < (Idx + NumElts); ++K)
1879         Mask.push_back(K);
1880       Res = B.CreateShuffleVector(Inc, Mask, NewValName);
1881     } else
1882       Res = B.CreateExtractElement(Inc, Idx, NewValName);
1883 
1884     return Res;
1885   }
1886 
1887 private:
1888   SmallDenseMap<std::pair<BasicBlock *, Value *>, Value *> SlicedVals;
1889 };
1890 
1891 bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
1892   // Break-up fixed-vector PHIs into smaller pieces.
1893   // Default threshold is 32, so it breaks up any vector that's >32 bits into
1894   // its elements, or into 32-bit pieces (for 8/16 bit elts).
1895   //
1896   // This is only helpful for DAGISel because it doesn't handle large PHIs as
1897   // well as GlobalISel. DAGISel lowers PHIs by using CopyToReg/CopyFromReg.
1898   // With large, odd-sized PHIs we may end up needing many `build_vector`
1899   // operations with most elements being "undef". This inhibits a lot of
1900   // optimization opportunities and can result in unreasonably high register
1901   // pressure and the inevitable stack spilling.
1902   if (!ScalarizeLargePHIs || getCGPassBuilderOption().EnableGlobalISelOption)
1903     return false;
1904 
1905   FixedVectorType *FVT = dyn_cast<FixedVectorType>(I.getType());
1906   if (!FVT || DL->getTypeSizeInBits(FVT) <= ScalarizeLargePHIsThreshold)
1907     return false;
1908 
1909   if (!ForceScalarizeLargePHIs && !canBreakPHINode(I))
1910     return false;
1911 
1912   std::vector<VectorSlice> Slices;
1913 
1914   Type *EltTy = FVT->getElementType();
1915   {
1916     unsigned Idx = 0;
1917     // For 8/16 bits type, don't scalarize fully but break it up into as many
1918     // 32-bit slices as we can, and scalarize the tail.
1919     const unsigned EltSize = DL->getTypeSizeInBits(EltTy);
1920     const unsigned NumElts = FVT->getNumElements();
1921     if (EltSize == 8 || EltSize == 16) {
1922       const unsigned SubVecSize = (32 / EltSize);
1923       Type *SubVecTy = FixedVectorType::get(EltTy, SubVecSize);
1924       for (unsigned End = alignDown(NumElts, SubVecSize); Idx < End;
1925            Idx += SubVecSize)
1926         Slices.emplace_back(SubVecTy, Idx, SubVecSize);
1927     }
1928 
1929     // Scalarize all remaining elements.
1930     for (; Idx < NumElts; ++Idx)
1931       Slices.emplace_back(EltTy, Idx, 1);
1932   }
1933 
1934   if (Slices.size() == 1)
1935     return false;
1936 
1937   // Create one PHI per vector piece. The "VectorSlice" class takes care of
1938   // creating the necessary instruction to extract the relevant slices of each
1939   // incoming value.
1940   IRBuilder<> B(I.getParent());
1941   B.SetCurrentDebugLocation(I.getDebugLoc());
1942 
1943   unsigned IncNameSuffix = 0;
1944   for (VectorSlice &S : Slices) {
1945     // We need to reset the build on each iteration, because getSlicedVal may
1946     // have inserted something into I's BB.
1947     B.SetInsertPoint(I.getParent()->getFirstNonPHI());
1948     S.NewPHI = B.CreatePHI(S.Ty, I.getNumIncomingValues());
1949 
1950     for (const auto &[Idx, BB] : enumerate(I.blocks())) {
1951       S.NewPHI->addIncoming(S.getSlicedVal(BB, I.getIncomingValue(Idx),
1952                                            "largephi.extractslice" +
1953                                                std::to_string(IncNameSuffix++)),
1954                             BB);
1955     }
1956   }
1957 
1958   // And replace this PHI with a vector of all the previous PHI values.
1959   Value *Vec = PoisonValue::get(FVT);
1960   unsigned NameSuffix = 0;
1961   for (VectorSlice &S : Slices) {
1962     const auto ValName = "largephi.insertslice" + std::to_string(NameSuffix++);
1963     if (S.NumElts > 1)
1964       Vec =
1965           B.CreateInsertVector(FVT, Vec, S.NewPHI, B.getInt64(S.Idx), ValName);
1966     else
1967       Vec = B.CreateInsertElement(Vec, S.NewPHI, S.Idx, ValName);
1968   }
1969 
1970   I.replaceAllUsesWith(Vec);
1971   I.eraseFromParent();
1972   return true;
1973 }
1974 
1975 bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
1976   switch (I.getIntrinsicID()) {
1977   case Intrinsic::bitreverse:
1978     return visitBitreverseIntrinsicInst(I);
1979   case Intrinsic::minnum:
1980     return visitMinNum(I);
1981   default:
1982     return false;
1983   }
1984 }
1985 
1986 bool AMDGPUCodeGenPrepareImpl::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
1987   bool Changed = false;
1988 
1989   if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1990       UA->isUniform(&I))
1991     Changed |= promoteUniformBitreverseToI32(I);
1992 
1993   return Changed;
1994 }
1995 
1996 /// Match non-nan fract pattern.
1997 ///   minnum(fsub(x, floor(x)), nextafter(1.0, -1.0)
1998 ///
1999 /// If fract is a useful instruction for the subtarget. Does not account for the
2000 /// nan handling; the instruction has a nan check on the input value.
2001 Value *AMDGPUCodeGenPrepareImpl::matchFractPat(IntrinsicInst &I) {
2002   if (ST->hasFractBug())
2003     return nullptr;
2004 
2005   if (I.getIntrinsicID() != Intrinsic::minnum)
2006     return nullptr;
2007 
2008   Type *Ty = I.getType();
2009   if (!isLegalFloatingTy(Ty->getScalarType()))
2010     return nullptr;
2011 
2012   Value *Arg0 = I.getArgOperand(0);
2013   Value *Arg1 = I.getArgOperand(1);
2014 
2015   const APFloat *C;
2016   if (!match(Arg1, m_APFloat(C)))
2017     return nullptr;
2018 
2019   APFloat One(1.0);
2020   bool LosesInfo;
2021   One.convert(C->getSemantics(), APFloat::rmNearestTiesToEven, &LosesInfo);
2022 
2023   // Match nextafter(1.0, -1)
2024   One.next(true);
2025   if (One != *C)
2026     return nullptr;
2027 
2028   Value *FloorSrc;
2029   if (match(Arg0, m_FSub(m_Value(FloorSrc),
2030                          m_Intrinsic<Intrinsic::floor>(m_Deferred(FloorSrc)))))
2031     return FloorSrc;
2032   return nullptr;
2033 }
2034 
2035 Value *AMDGPUCodeGenPrepareImpl::applyFractPat(IRBuilder<> &Builder,
2036                                                Value *FractArg) {
2037   SmallVector<Value *, 4> FractVals;
2038   extractValues(Builder, FractVals, FractArg);
2039 
2040   SmallVector<Value *, 4> ResultVals(FractVals.size());
2041 
2042   Type *Ty = FractArg->getType()->getScalarType();
2043   for (unsigned I = 0, E = FractVals.size(); I != E; ++I) {
2044     ResultVals[I] =
2045         Builder.CreateIntrinsic(Intrinsic::amdgcn_fract, {Ty}, {FractVals[I]});
2046   }
2047 
2048   return insertValues(Builder, FractArg->getType(), ResultVals);
2049 }
2050 
2051 bool AMDGPUCodeGenPrepareImpl::visitMinNum(IntrinsicInst &I) {
2052   Value *FractArg = matchFractPat(I);
2053   if (!FractArg)
2054     return false;
2055 
2056   // Match pattern for fract intrinsic in contexts where the nan check has been
2057   // optimized out (and hope the knowledge the source can't be nan wasn't lost).
2058   if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, *DL, TLInfo))
2059     return false;
2060 
2061   IRBuilder<> Builder(&I);
2062   FastMathFlags FMF = I.getFastMathFlags();
2063   FMF.setNoNaNs();
2064   Builder.setFastMathFlags(FMF);
2065 
2066   Value *Fract = applyFractPat(Builder, FractArg);
2067   Fract->takeName(&I);
2068   I.replaceAllUsesWith(Fract);
2069 
2070   RecursivelyDeleteTriviallyDeadInstructions(&I, TLInfo);
2071   return true;
2072 }
2073 
2074 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
2075   Impl.Mod = &M;
2076   Impl.DL = &Impl.Mod->getDataLayout();
2077   return false;
2078 }
2079 
2080 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
2081   if (skipFunction(F))
2082     return false;
2083 
2084   auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
2085   if (!TPC)
2086     return false;
2087 
2088   const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
2089   Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2090   Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
2091   Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2092   Impl.UA = &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
2093   auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
2094   Impl.DT = DTWP ? &DTWP->getDomTree() : nullptr;
2095   Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
2096   SIModeRegisterDefaults Mode(F);
2097   Impl.HasFP32DenormalFlush =
2098       Mode.FP32Denormals == DenormalMode::getPreserveSign();
2099   return Impl.run(F);
2100 }
2101 
2102 PreservedAnalyses AMDGPUCodeGenPreparePass::run(Function &F,
2103                                                 FunctionAnalysisManager &FAM) {
2104   AMDGPUCodeGenPrepareImpl Impl;
2105   Impl.Mod = F.getParent();
2106   Impl.DL = &Impl.Mod->getDataLayout();
2107   Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
2108   Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
2109   Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);
2110   Impl.UA = &FAM.getResult<UniformityInfoAnalysis>(F);
2111   Impl.DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
2112   Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
2113   SIModeRegisterDefaults Mode(F);
2114   Impl.HasFP32DenormalFlush =
2115       Mode.FP32Denormals == DenormalMode::getPreserveSign();
2116   PreservedAnalyses PA = PreservedAnalyses::none();
2117   if (!Impl.FlowChanged)
2118     PA.preserveSet<CFGAnalyses>();
2119   return Impl.run(F) ? PA : PreservedAnalyses::all();
2120 }
2121 
2122 INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
2123                       "AMDGPU IR optimizations", false, false)
2124 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2125 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2126 INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
2127 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
2128                     false, false)
2129 
2130 char AMDGPUCodeGenPrepare::ID = 0;
2131 
2132 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
2133   return new AMDGPUCodeGenPrepare();
2134 }
2135