xref: /llvm-project/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp (revision b446c208a5f0e2ad7193cc23e70642d207db4d13)
1 //===- AMDGPULibCalls.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file does AMD library function optimizations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPULibFunc.h"
16 #include "GCNSubtarget.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/TargetLibraryInfo.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/AttributeMask.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/MDBuilder.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include <cmath>
26 
27 #define DEBUG_TYPE "amdgpu-simplifylib"
28 
29 using namespace llvm;
30 using namespace llvm::PatternMatch;
31 
32 static cl::opt<bool> EnablePreLink("amdgpu-prelink",
33   cl::desc("Enable pre-link mode optimizations"),
34   cl::init(false),
35   cl::Hidden);
36 
37 static cl::list<std::string> UseNative("amdgpu-use-native",
38   cl::desc("Comma separated list of functions to replace with native, or all"),
39   cl::CommaSeparated, cl::ValueOptional,
40   cl::Hidden);
41 
42 #define MATH_PI      numbers::pi
43 #define MATH_E       numbers::e
44 #define MATH_SQRT2   numbers::sqrt2
45 #define MATH_SQRT1_2 numbers::inv_sqrt2
46 
47 namespace llvm {
48 
49 class AMDGPULibCalls {
50 private:
51   const TargetLibraryInfo *TLInfo = nullptr;
52   AssumptionCache *AC = nullptr;
53   DominatorTree *DT = nullptr;
54 
55   using FuncInfo = llvm::AMDGPULibFunc;
56 
57   bool UnsafeFPMath = false;
58 
59   // -fuse-native.
60   bool AllNative = false;
61 
62   bool useNativeFunc(const StringRef F) const;
63 
64   // Return a pointer (pointer expr) to the function if function definition with
65   // "FuncName" exists. It may create a new function prototype in pre-link mode.
66   FunctionCallee getFunction(Module *M, const FuncInfo &fInfo);
67 
68   bool parseFunctionName(const StringRef &FMangledName, FuncInfo &FInfo);
69 
70   bool TDOFold(CallInst *CI, const FuncInfo &FInfo);
71 
72   /* Specialized optimizations */
73 
74   // pow/powr/pown
75   bool fold_pow(FPMathOperator *FPOp, IRBuilder<> &B, const FuncInfo &FInfo);
76 
77   // rootn
78   bool fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B, const FuncInfo &FInfo);
79 
80   // -fuse-native for sincos
81   bool sincosUseNative(CallInst *aCI, const FuncInfo &FInfo);
82 
83   // evaluate calls if calls' arguments are constants.
84   bool evaluateScalarMathFunc(const FuncInfo &FInfo, double &Res0, double &Res1,
85                               Constant *copr0, Constant *copr1);
86   bool evaluateCall(CallInst *aCI, const FuncInfo &FInfo);
87 
88   /// Insert a value to sincos function \p Fsincos. Returns (value of sin, value
89   /// of cos, sincos call).
90   std::tuple<Value *, Value *, Value *> insertSinCos(Value *Arg,
91                                                      FastMathFlags FMF,
92                                                      IRBuilder<> &B,
93                                                      FunctionCallee Fsincos);
94 
95   // sin/cos
96   bool fold_sincos(FPMathOperator *FPOp, IRBuilder<> &B, const FuncInfo &FInfo);
97 
98   // __read_pipe/__write_pipe
99   bool fold_read_write_pipe(CallInst *CI, IRBuilder<> &B,
100                             const FuncInfo &FInfo);
101 
102   // Get a scalar native builtin single argument FP function
103   FunctionCallee getNativeFunction(Module *M, const FuncInfo &FInfo);
104 
105   /// Substitute a call to a known libcall with an intrinsic call. If \p
106   /// AllowMinSize is true, allow the replacement in a minsize function.
107   bool shouldReplaceLibcallWithIntrinsic(const CallInst *CI,
108                                          bool AllowMinSizeF32 = false,
109                                          bool AllowF64 = false,
110                                          bool AllowStrictFP = false);
111   void replaceLibCallWithSimpleIntrinsic(IRBuilder<> &B, CallInst *CI,
112                                          Intrinsic::ID IntrID);
113 
114   bool tryReplaceLibcallWithSimpleIntrinsic(IRBuilder<> &B, CallInst *CI,
115                                             Intrinsic::ID IntrID,
116                                             bool AllowMinSizeF32 = false,
117                                             bool AllowF64 = false,
118                                             bool AllowStrictFP = false);
119 
120 protected:
121   bool isUnsafeMath(const FPMathOperator *FPOp) const;
122   bool isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const;
123 
124   bool canIncreasePrecisionOfConstantFold(const FPMathOperator *FPOp) const;
125 
126   static void replaceCall(Instruction *I, Value *With) {
127     I->replaceAllUsesWith(With);
128     I->eraseFromParent();
129   }
130 
131   static void replaceCall(FPMathOperator *I, Value *With) {
132     replaceCall(cast<Instruction>(I), With);
133   }
134 
135 public:
136   AMDGPULibCalls() = default;
137 
138   bool fold(CallInst *CI);
139 
140   void initFunction(Function &F, FunctionAnalysisManager &FAM);
141   void initNativeFuncs();
142 
143   // Replace a normal math function call with that native version
144   bool useNative(CallInst *CI);
145 };
146 
147 } // end namespace llvm
148 
149 template <typename IRB>
150 static CallInst *CreateCallEx(IRB &B, FunctionCallee Callee, Value *Arg,
151                               const Twine &Name = "") {
152   CallInst *R = B.CreateCall(Callee, Arg, Name);
153   if (Function *F = dyn_cast<Function>(Callee.getCallee()))
154     R->setCallingConv(F->getCallingConv());
155   return R;
156 }
157 
158 template <typename IRB>
159 static CallInst *CreateCallEx2(IRB &B, FunctionCallee Callee, Value *Arg1,
160                                Value *Arg2, const Twine &Name = "") {
161   CallInst *R = B.CreateCall(Callee, {Arg1, Arg2}, Name);
162   if (Function *F = dyn_cast<Function>(Callee.getCallee()))
163     R->setCallingConv(F->getCallingConv());
164   return R;
165 }
166 
167 static FunctionType *getPownType(FunctionType *FT) {
168   Type *PowNExpTy = Type::getInt32Ty(FT->getContext());
169   if (VectorType *VecTy = dyn_cast<VectorType>(FT->getReturnType()))
170     PowNExpTy = VectorType::get(PowNExpTy, VecTy->getElementCount());
171 
172   return FunctionType::get(FT->getReturnType(),
173                            {FT->getParamType(0), PowNExpTy}, false);
174 }
175 
176 //  Data structures for table-driven optimizations.
177 //  FuncTbl works for both f32 and f64 functions with 1 input argument
178 
179 struct TableEntry {
180   double   result;
181   double   input;
182 };
183 
184 /* a list of {result, input} */
185 static const TableEntry tbl_acos[] = {
186   {MATH_PI / 2.0, 0.0},
187   {MATH_PI / 2.0, -0.0},
188   {0.0, 1.0},
189   {MATH_PI, -1.0}
190 };
191 static const TableEntry tbl_acosh[] = {
192   {0.0, 1.0}
193 };
194 static const TableEntry tbl_acospi[] = {
195   {0.5, 0.0},
196   {0.5, -0.0},
197   {0.0, 1.0},
198   {1.0, -1.0}
199 };
200 static const TableEntry tbl_asin[] = {
201   {0.0, 0.0},
202   {-0.0, -0.0},
203   {MATH_PI / 2.0, 1.0},
204   {-MATH_PI / 2.0, -1.0}
205 };
206 static const TableEntry tbl_asinh[] = {
207   {0.0, 0.0},
208   {-0.0, -0.0}
209 };
210 static const TableEntry tbl_asinpi[] = {
211   {0.0, 0.0},
212   {-0.0, -0.0},
213   {0.5, 1.0},
214   {-0.5, -1.0}
215 };
216 static const TableEntry tbl_atan[] = {
217   {0.0, 0.0},
218   {-0.0, -0.0},
219   {MATH_PI / 4.0, 1.0},
220   {-MATH_PI / 4.0, -1.0}
221 };
222 static const TableEntry tbl_atanh[] = {
223   {0.0, 0.0},
224   {-0.0, -0.0}
225 };
226 static const TableEntry tbl_atanpi[] = {
227   {0.0, 0.0},
228   {-0.0, -0.0},
229   {0.25, 1.0},
230   {-0.25, -1.0}
231 };
232 static const TableEntry tbl_cbrt[] = {
233   {0.0, 0.0},
234   {-0.0, -0.0},
235   {1.0, 1.0},
236   {-1.0, -1.0},
237 };
238 static const TableEntry tbl_cos[] = {
239   {1.0, 0.0},
240   {1.0, -0.0}
241 };
242 static const TableEntry tbl_cosh[] = {
243   {1.0, 0.0},
244   {1.0, -0.0}
245 };
246 static const TableEntry tbl_cospi[] = {
247   {1.0, 0.0},
248   {1.0, -0.0}
249 };
250 static const TableEntry tbl_erfc[] = {
251   {1.0, 0.0},
252   {1.0, -0.0}
253 };
254 static const TableEntry tbl_erf[] = {
255   {0.0, 0.0},
256   {-0.0, -0.0}
257 };
258 static const TableEntry tbl_exp[] = {
259   {1.0, 0.0},
260   {1.0, -0.0},
261   {MATH_E, 1.0}
262 };
263 static const TableEntry tbl_exp2[] = {
264   {1.0, 0.0},
265   {1.0, -0.0},
266   {2.0, 1.0}
267 };
268 static const TableEntry tbl_exp10[] = {
269   {1.0, 0.0},
270   {1.0, -0.0},
271   {10.0, 1.0}
272 };
273 static const TableEntry tbl_expm1[] = {
274   {0.0, 0.0},
275   {-0.0, -0.0}
276 };
277 static const TableEntry tbl_log[] = {
278   {0.0, 1.0},
279   {1.0, MATH_E}
280 };
281 static const TableEntry tbl_log2[] = {
282   {0.0, 1.0},
283   {1.0, 2.0}
284 };
285 static const TableEntry tbl_log10[] = {
286   {0.0, 1.0},
287   {1.0, 10.0}
288 };
289 static const TableEntry tbl_rsqrt[] = {
290   {1.0, 1.0},
291   {MATH_SQRT1_2, 2.0}
292 };
293 static const TableEntry tbl_sin[] = {
294   {0.0, 0.0},
295   {-0.0, -0.0}
296 };
297 static const TableEntry tbl_sinh[] = {
298   {0.0, 0.0},
299   {-0.0, -0.0}
300 };
301 static const TableEntry tbl_sinpi[] = {
302   {0.0, 0.0},
303   {-0.0, -0.0}
304 };
305 static const TableEntry tbl_sqrt[] = {
306   {0.0, 0.0},
307   {1.0, 1.0},
308   {MATH_SQRT2, 2.0}
309 };
310 static const TableEntry tbl_tan[] = {
311   {0.0, 0.0},
312   {-0.0, -0.0}
313 };
314 static const TableEntry tbl_tanh[] = {
315   {0.0, 0.0},
316   {-0.0, -0.0}
317 };
318 static const TableEntry tbl_tanpi[] = {
319   {0.0, 0.0},
320   {-0.0, -0.0}
321 };
322 static const TableEntry tbl_tgamma[] = {
323   {1.0, 1.0},
324   {1.0, 2.0},
325   {2.0, 3.0},
326   {6.0, 4.0}
327 };
328 
329 static bool HasNative(AMDGPULibFunc::EFuncId id) {
330   switch(id) {
331   case AMDGPULibFunc::EI_DIVIDE:
332   case AMDGPULibFunc::EI_COS:
333   case AMDGPULibFunc::EI_EXP:
334   case AMDGPULibFunc::EI_EXP2:
335   case AMDGPULibFunc::EI_EXP10:
336   case AMDGPULibFunc::EI_LOG:
337   case AMDGPULibFunc::EI_LOG2:
338   case AMDGPULibFunc::EI_LOG10:
339   case AMDGPULibFunc::EI_POWR:
340   case AMDGPULibFunc::EI_RECIP:
341   case AMDGPULibFunc::EI_RSQRT:
342   case AMDGPULibFunc::EI_SIN:
343   case AMDGPULibFunc::EI_SINCOS:
344   case AMDGPULibFunc::EI_SQRT:
345   case AMDGPULibFunc::EI_TAN:
346     return true;
347   default:;
348   }
349   return false;
350 }
351 
352 using TableRef = ArrayRef<TableEntry>;
353 
354 static TableRef getOptTable(AMDGPULibFunc::EFuncId id) {
355   switch(id) {
356   case AMDGPULibFunc::EI_ACOS:    return TableRef(tbl_acos);
357   case AMDGPULibFunc::EI_ACOSH:   return TableRef(tbl_acosh);
358   case AMDGPULibFunc::EI_ACOSPI:  return TableRef(tbl_acospi);
359   case AMDGPULibFunc::EI_ASIN:    return TableRef(tbl_asin);
360   case AMDGPULibFunc::EI_ASINH:   return TableRef(tbl_asinh);
361   case AMDGPULibFunc::EI_ASINPI:  return TableRef(tbl_asinpi);
362   case AMDGPULibFunc::EI_ATAN:    return TableRef(tbl_atan);
363   case AMDGPULibFunc::EI_ATANH:   return TableRef(tbl_atanh);
364   case AMDGPULibFunc::EI_ATANPI:  return TableRef(tbl_atanpi);
365   case AMDGPULibFunc::EI_CBRT:    return TableRef(tbl_cbrt);
366   case AMDGPULibFunc::EI_NCOS:
367   case AMDGPULibFunc::EI_COS:     return TableRef(tbl_cos);
368   case AMDGPULibFunc::EI_COSH:    return TableRef(tbl_cosh);
369   case AMDGPULibFunc::EI_COSPI:   return TableRef(tbl_cospi);
370   case AMDGPULibFunc::EI_ERFC:    return TableRef(tbl_erfc);
371   case AMDGPULibFunc::EI_ERF:     return TableRef(tbl_erf);
372   case AMDGPULibFunc::EI_EXP:     return TableRef(tbl_exp);
373   case AMDGPULibFunc::EI_NEXP2:
374   case AMDGPULibFunc::EI_EXP2:    return TableRef(tbl_exp2);
375   case AMDGPULibFunc::EI_EXP10:   return TableRef(tbl_exp10);
376   case AMDGPULibFunc::EI_EXPM1:   return TableRef(tbl_expm1);
377   case AMDGPULibFunc::EI_LOG:     return TableRef(tbl_log);
378   case AMDGPULibFunc::EI_NLOG2:
379   case AMDGPULibFunc::EI_LOG2:    return TableRef(tbl_log2);
380   case AMDGPULibFunc::EI_LOG10:   return TableRef(tbl_log10);
381   case AMDGPULibFunc::EI_NRSQRT:
382   case AMDGPULibFunc::EI_RSQRT:   return TableRef(tbl_rsqrt);
383   case AMDGPULibFunc::EI_NSIN:
384   case AMDGPULibFunc::EI_SIN:     return TableRef(tbl_sin);
385   case AMDGPULibFunc::EI_SINH:    return TableRef(tbl_sinh);
386   case AMDGPULibFunc::EI_SINPI:   return TableRef(tbl_sinpi);
387   case AMDGPULibFunc::EI_NSQRT:
388   case AMDGPULibFunc::EI_SQRT:    return TableRef(tbl_sqrt);
389   case AMDGPULibFunc::EI_TAN:     return TableRef(tbl_tan);
390   case AMDGPULibFunc::EI_TANH:    return TableRef(tbl_tanh);
391   case AMDGPULibFunc::EI_TANPI:   return TableRef(tbl_tanpi);
392   case AMDGPULibFunc::EI_TGAMMA:  return TableRef(tbl_tgamma);
393   default:;
394   }
395   return TableRef();
396 }
397 
398 static inline int getVecSize(const AMDGPULibFunc& FInfo) {
399   return FInfo.getLeads()[0].VectorSize;
400 }
401 
402 static inline AMDGPULibFunc::EType getArgType(const AMDGPULibFunc& FInfo) {
403   return (AMDGPULibFunc::EType)FInfo.getLeads()[0].ArgType;
404 }
405 
406 FunctionCallee AMDGPULibCalls::getFunction(Module *M, const FuncInfo &fInfo) {
407   // If we are doing PreLinkOpt, the function is external. So it is safe to
408   // use getOrInsertFunction() at this stage.
409 
410   return EnablePreLink ? AMDGPULibFunc::getOrInsertFunction(M, fInfo)
411                        : AMDGPULibFunc::getFunction(M, fInfo);
412 }
413 
414 bool AMDGPULibCalls::parseFunctionName(const StringRef &FMangledName,
415                                        FuncInfo &FInfo) {
416   return AMDGPULibFunc::parse(FMangledName, FInfo);
417 }
418 
419 bool AMDGPULibCalls::isUnsafeMath(const FPMathOperator *FPOp) const {
420   return UnsafeFPMath || FPOp->isFast();
421 }
422 
423 bool AMDGPULibCalls::isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const {
424   return UnsafeFPMath ||
425          (FPOp->hasApproxFunc() && FPOp->hasNoNaNs() && FPOp->hasNoInfs());
426 }
427 
428 bool AMDGPULibCalls::canIncreasePrecisionOfConstantFold(
429     const FPMathOperator *FPOp) const {
430   // TODO: Refine to approxFunc or contract
431   return isUnsafeMath(FPOp);
432 }
433 
434 void AMDGPULibCalls::initFunction(Function &F, FunctionAnalysisManager &FAM) {
435   UnsafeFPMath = F.getFnAttribute("unsafe-fp-math").getValueAsBool();
436   AC = &FAM.getResult<AssumptionAnalysis>(F);
437   TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
438   DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
439 }
440 
441 bool AMDGPULibCalls::useNativeFunc(const StringRef F) const {
442   return AllNative || llvm::is_contained(UseNative, F);
443 }
444 
445 void AMDGPULibCalls::initNativeFuncs() {
446   AllNative = useNativeFunc("all") ||
447               (UseNative.getNumOccurrences() && UseNative.size() == 1 &&
448                UseNative.begin()->empty());
449 }
450 
451 bool AMDGPULibCalls::sincosUseNative(CallInst *aCI, const FuncInfo &FInfo) {
452   bool native_sin = useNativeFunc("sin");
453   bool native_cos = useNativeFunc("cos");
454 
455   if (native_sin && native_cos) {
456     Module *M = aCI->getModule();
457     Value *opr0 = aCI->getArgOperand(0);
458 
459     AMDGPULibFunc nf;
460     nf.getLeads()[0].ArgType = FInfo.getLeads()[0].ArgType;
461     nf.getLeads()[0].VectorSize = FInfo.getLeads()[0].VectorSize;
462 
463     nf.setPrefix(AMDGPULibFunc::NATIVE);
464     nf.setId(AMDGPULibFunc::EI_SIN);
465     FunctionCallee sinExpr = getFunction(M, nf);
466 
467     nf.setPrefix(AMDGPULibFunc::NATIVE);
468     nf.setId(AMDGPULibFunc::EI_COS);
469     FunctionCallee cosExpr = getFunction(M, nf);
470     if (sinExpr && cosExpr) {
471       Value *sinval =
472           CallInst::Create(sinExpr, opr0, "splitsin", aCI->getIterator());
473       Value *cosval =
474           CallInst::Create(cosExpr, opr0, "splitcos", aCI->getIterator());
475       new StoreInst(cosval, aCI->getArgOperand(1), aCI->getIterator());
476 
477       DEBUG_WITH_TYPE("usenative", dbgs() << "<useNative> replace " << *aCI
478                                           << " with native version of sin/cos");
479 
480       replaceCall(aCI, sinval);
481       return true;
482     }
483   }
484   return false;
485 }
486 
487 bool AMDGPULibCalls::useNative(CallInst *aCI) {
488   Function *Callee = aCI->getCalledFunction();
489   if (!Callee || aCI->isNoBuiltin())
490     return false;
491 
492   FuncInfo FInfo;
493   if (!parseFunctionName(Callee->getName(), FInfo) || !FInfo.isMangled() ||
494       FInfo.getPrefix() != AMDGPULibFunc::NOPFX ||
495       getArgType(FInfo) == AMDGPULibFunc::F64 || !HasNative(FInfo.getId()) ||
496       !(AllNative || useNativeFunc(FInfo.getName()))) {
497     return false;
498   }
499 
500   if (FInfo.getId() == AMDGPULibFunc::EI_SINCOS)
501     return sincosUseNative(aCI, FInfo);
502 
503   FInfo.setPrefix(AMDGPULibFunc::NATIVE);
504   FunctionCallee F = getFunction(aCI->getModule(), FInfo);
505   if (!F)
506     return false;
507 
508   aCI->setCalledFunction(F);
509   DEBUG_WITH_TYPE("usenative", dbgs() << "<useNative> replace " << *aCI
510                                       << " with native version");
511   return true;
512 }
513 
514 // Clang emits call of __read_pipe_2 or __read_pipe_4 for OpenCL read_pipe
515 // builtin, with appended type size and alignment arguments, where 2 or 4
516 // indicates the original number of arguments. The library has optimized version
517 // of __read_pipe_2/__read_pipe_4 when the type size and alignment has the same
518 // power of 2 value. This function transforms __read_pipe_2 to __read_pipe_2_N
519 // for such cases where N is the size in bytes of the type (N = 1, 2, 4, 8, ...,
520 // 128). The same for __read_pipe_4, write_pipe_2, and write_pipe_4.
521 bool AMDGPULibCalls::fold_read_write_pipe(CallInst *CI, IRBuilder<> &B,
522                                           const FuncInfo &FInfo) {
523   auto *Callee = CI->getCalledFunction();
524   if (!Callee->isDeclaration())
525     return false;
526 
527   assert(Callee->hasName() && "Invalid read_pipe/write_pipe function");
528   auto *M = Callee->getParent();
529   std::string Name = std::string(Callee->getName());
530   auto NumArg = CI->arg_size();
531   if (NumArg != 4 && NumArg != 6)
532     return false;
533   ConstantInt *PacketSize =
534       dyn_cast<ConstantInt>(CI->getArgOperand(NumArg - 2));
535   ConstantInt *PacketAlign =
536       dyn_cast<ConstantInt>(CI->getArgOperand(NumArg - 1));
537   if (!PacketSize || !PacketAlign)
538     return false;
539 
540   unsigned Size = PacketSize->getZExtValue();
541   Align Alignment = PacketAlign->getAlignValue();
542   if (Alignment != Size)
543     return false;
544 
545   unsigned PtrArgLoc = CI->arg_size() - 3;
546   Value *PtrArg = CI->getArgOperand(PtrArgLoc);
547   Type *PtrTy = PtrArg->getType();
548 
549   SmallVector<llvm::Type *, 6> ArgTys;
550   for (unsigned I = 0; I != PtrArgLoc; ++I)
551     ArgTys.push_back(CI->getArgOperand(I)->getType());
552   ArgTys.push_back(PtrTy);
553 
554   Name = Name + "_" + std::to_string(Size);
555   auto *FTy = FunctionType::get(Callee->getReturnType(),
556                                 ArrayRef<Type *>(ArgTys), false);
557   AMDGPULibFunc NewLibFunc(Name, FTy);
558   FunctionCallee F = AMDGPULibFunc::getOrInsertFunction(M, NewLibFunc);
559   if (!F)
560     return false;
561 
562   SmallVector<Value *, 6> Args;
563   for (unsigned I = 0; I != PtrArgLoc; ++I)
564     Args.push_back(CI->getArgOperand(I));
565   Args.push_back(PtrArg);
566 
567   auto *NCI = B.CreateCall(F, Args);
568   NCI->setAttributes(CI->getAttributes());
569   CI->replaceAllUsesWith(NCI);
570   CI->dropAllReferences();
571   CI->eraseFromParent();
572 
573   return true;
574 }
575 
576 static bool isKnownIntegral(const Value *V, const DataLayout &DL,
577                             FastMathFlags FMF) {
578   if (isa<PoisonValue>(V))
579     return true;
580   if (isa<UndefValue>(V))
581     return false;
582 
583   if (const ConstantFP *CF = dyn_cast<ConstantFP>(V))
584     return CF->getValueAPF().isInteger();
585 
586   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
587   const Constant *CV = dyn_cast<Constant>(V);
588   if (VFVTy && CV) {
589     unsigned NumElts = VFVTy->getNumElements();
590     for (unsigned i = 0; i != NumElts; ++i) {
591       Constant *Elt = CV->getAggregateElement(i);
592       if (!Elt)
593         return false;
594       if (isa<PoisonValue>(Elt))
595         continue;
596 
597       const ConstantFP *CFP = dyn_cast<ConstantFP>(Elt);
598       if (!CFP || !CFP->getValue().isInteger())
599         return false;
600     }
601 
602     return true;
603   }
604 
605   const Instruction *I = dyn_cast<Instruction>(V);
606   if (!I)
607     return false;
608 
609   switch (I->getOpcode()) {
610   case Instruction::SIToFP:
611   case Instruction::UIToFP:
612     // TODO: Could check nofpclass(inf) on incoming argument
613     if (FMF.noInfs())
614       return true;
615 
616     // Need to check int size cannot produce infinity, which computeKnownFPClass
617     // knows how to do already.
618     return isKnownNeverInfinity(I, /*Depth=*/0, SimplifyQuery(DL));
619   case Instruction::Call: {
620     const CallInst *CI = cast<CallInst>(I);
621     switch (CI->getIntrinsicID()) {
622     case Intrinsic::trunc:
623     case Intrinsic::floor:
624     case Intrinsic::ceil:
625     case Intrinsic::rint:
626     case Intrinsic::nearbyint:
627     case Intrinsic::round:
628     case Intrinsic::roundeven:
629       return (FMF.noInfs() && FMF.noNaNs()) ||
630              isKnownNeverInfOrNaN(I, /*Depth=*/0, SimplifyQuery(DL));
631     default:
632       break;
633     }
634 
635     break;
636   }
637   default:
638     break;
639   }
640 
641   return false;
642 }
643 
644 // This function returns false if no change; return true otherwise.
645 bool AMDGPULibCalls::fold(CallInst *CI) {
646   Function *Callee = CI->getCalledFunction();
647   // Ignore indirect calls.
648   if (!Callee || Callee->isIntrinsic() || CI->isNoBuiltin())
649     return false;
650 
651   FuncInfo FInfo;
652   if (!parseFunctionName(Callee->getName(), FInfo))
653     return false;
654 
655   // Further check the number of arguments to see if they match.
656   // TODO: Check calling convention matches too
657   if (!FInfo.isCompatibleSignature(*Callee->getParent(), CI->getFunctionType()))
658     return false;
659 
660   LLVM_DEBUG(dbgs() << "AMDIC: try folding " << *CI << '\n');
661 
662   if (TDOFold(CI, FInfo))
663     return true;
664 
665   IRBuilder<> B(CI);
666   if (CI->isStrictFP())
667     B.setIsFPConstrained(true);
668 
669   if (FPMathOperator *FPOp = dyn_cast<FPMathOperator>(CI)) {
670     // Under unsafe-math, evaluate calls if possible.
671     // According to Brian Sumner, we can do this for all f32 function calls
672     // using host's double function calls.
673     if (canIncreasePrecisionOfConstantFold(FPOp) && evaluateCall(CI, FInfo))
674       return true;
675 
676     // Copy fast flags from the original call.
677     FastMathFlags FMF = FPOp->getFastMathFlags();
678     B.setFastMathFlags(FMF);
679 
680     // Specialized optimizations for each function call.
681     //
682     // TODO: Handle native functions
683     switch (FInfo.getId()) {
684     case AMDGPULibFunc::EI_EXP:
685       if (FMF.none())
686         return false;
687       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::exp,
688                                                   FMF.approxFunc());
689     case AMDGPULibFunc::EI_EXP2:
690       if (FMF.none())
691         return false;
692       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::exp2,
693                                                   FMF.approxFunc());
694     case AMDGPULibFunc::EI_LOG:
695       if (FMF.none())
696         return false;
697       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log,
698                                                   FMF.approxFunc());
699     case AMDGPULibFunc::EI_LOG2:
700       if (FMF.none())
701         return false;
702       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log2,
703                                                   FMF.approxFunc());
704     case AMDGPULibFunc::EI_LOG10:
705       if (FMF.none())
706         return false;
707       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::log10,
708                                                   FMF.approxFunc());
709     case AMDGPULibFunc::EI_FMIN:
710       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::minnum,
711                                                   true, true);
712     case AMDGPULibFunc::EI_FMAX:
713       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::maxnum,
714                                                   true, true);
715     case AMDGPULibFunc::EI_FMA:
716       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fma, true,
717                                                   true);
718     case AMDGPULibFunc::EI_MAD:
719       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fmuladd,
720                                                   true, true);
721     case AMDGPULibFunc::EI_FABS:
722       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::fabs, true,
723                                                   true, true);
724     case AMDGPULibFunc::EI_COPYSIGN:
725       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::copysign,
726                                                   true, true, true);
727     case AMDGPULibFunc::EI_FLOOR:
728       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::floor, true,
729                                                   true);
730     case AMDGPULibFunc::EI_CEIL:
731       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::ceil, true,
732                                                   true);
733     case AMDGPULibFunc::EI_TRUNC:
734       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::trunc, true,
735                                                   true);
736     case AMDGPULibFunc::EI_RINT:
737       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::rint, true,
738                                                   true);
739     case AMDGPULibFunc::EI_ROUND:
740       return tryReplaceLibcallWithSimpleIntrinsic(B, CI, Intrinsic::round, true,
741                                                   true);
742     case AMDGPULibFunc::EI_LDEXP: {
743       if (!shouldReplaceLibcallWithIntrinsic(CI, true, true))
744         return false;
745 
746       Value *Arg1 = CI->getArgOperand(1);
747       if (VectorType *VecTy = dyn_cast<VectorType>(CI->getType());
748           VecTy && !isa<VectorType>(Arg1->getType())) {
749         Value *SplatArg1 = B.CreateVectorSplat(VecTy->getElementCount(), Arg1);
750         CI->setArgOperand(1, SplatArg1);
751       }
752 
753       CI->setCalledFunction(Intrinsic::getOrInsertDeclaration(
754           CI->getModule(), Intrinsic::ldexp,
755           {CI->getType(), CI->getArgOperand(1)->getType()}));
756       return true;
757     }
758     case AMDGPULibFunc::EI_POW: {
759       Module *M = Callee->getParent();
760       AMDGPULibFunc PowrInfo(AMDGPULibFunc::EI_POWR, FInfo);
761       FunctionCallee PowrFunc = getFunction(M, PowrInfo);
762       CallInst *Call = cast<CallInst>(FPOp);
763 
764       // pow(x, y) -> powr(x, y) for x >= -0.0
765       // TODO: Account for flags on current call
766       if (PowrFunc &&
767           cannotBeOrderedLessThanZero(
768               FPOp->getOperand(0), /*Depth=*/0,
769               SimplifyQuery(M->getDataLayout(), TLInfo, DT, AC, Call))) {
770         Call->setCalledFunction(PowrFunc);
771         return fold_pow(FPOp, B, PowrInfo) || true;
772       }
773 
774       // pow(x, y) -> pown(x, y) for known integral y
775       if (isKnownIntegral(FPOp->getOperand(1), M->getDataLayout(),
776                           FPOp->getFastMathFlags())) {
777         FunctionType *PownType = getPownType(CI->getFunctionType());
778         AMDGPULibFunc PownInfo(AMDGPULibFunc::EI_POWN, PownType, true);
779         FunctionCallee PownFunc = getFunction(M, PownInfo);
780         if (PownFunc) {
781           // TODO: If the incoming integral value is an sitofp/uitofp, it won't
782           // fold out without a known range. We can probably take the source
783           // value directly.
784           Value *CastedArg =
785               B.CreateFPToSI(FPOp->getOperand(1), PownType->getParamType(1));
786           // Have to drop any nofpclass attributes on the original call site.
787           Call->removeParamAttrs(
788               1, AttributeFuncs::typeIncompatible(CastedArg->getType(),
789                                                   Call->getParamAttributes(1)));
790           Call->setCalledFunction(PownFunc);
791           Call->setArgOperand(1, CastedArg);
792           return fold_pow(FPOp, B, PownInfo) || true;
793         }
794       }
795 
796       return fold_pow(FPOp, B, FInfo);
797     }
798     case AMDGPULibFunc::EI_POWR:
799     case AMDGPULibFunc::EI_POWN:
800       return fold_pow(FPOp, B, FInfo);
801     case AMDGPULibFunc::EI_ROOTN:
802       return fold_rootn(FPOp, B, FInfo);
803     case AMDGPULibFunc::EI_SQRT:
804       // TODO: Allow with strictfp + constrained intrinsic
805       return tryReplaceLibcallWithSimpleIntrinsic(
806           B, CI, Intrinsic::sqrt, true, true, /*AllowStrictFP=*/false);
807     case AMDGPULibFunc::EI_COS:
808     case AMDGPULibFunc::EI_SIN:
809       return fold_sincos(FPOp, B, FInfo);
810     default:
811       break;
812     }
813   } else {
814     // Specialized optimizations for each function call
815     switch (FInfo.getId()) {
816     case AMDGPULibFunc::EI_READ_PIPE_2:
817     case AMDGPULibFunc::EI_READ_PIPE_4:
818     case AMDGPULibFunc::EI_WRITE_PIPE_2:
819     case AMDGPULibFunc::EI_WRITE_PIPE_4:
820       return fold_read_write_pipe(CI, B, FInfo);
821     default:
822       break;
823     }
824   }
825 
826   return false;
827 }
828 
829 bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
830   // Table-Driven optimization
831   const TableRef tr = getOptTable(FInfo.getId());
832   if (tr.empty())
833     return false;
834 
835   int const sz = (int)tr.size();
836   Value *opr0 = CI->getArgOperand(0);
837 
838   if (getVecSize(FInfo) > 1) {
839     if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(opr0)) {
840       SmallVector<double, 0> DVal;
841       for (int eltNo = 0; eltNo < getVecSize(FInfo); ++eltNo) {
842         ConstantFP *eltval = dyn_cast<ConstantFP>(
843                                CV->getElementAsConstant((unsigned)eltNo));
844         assert(eltval && "Non-FP arguments in math function!");
845         bool found = false;
846         for (int i=0; i < sz; ++i) {
847           if (eltval->isExactlyValue(tr[i].input)) {
848             DVal.push_back(tr[i].result);
849             found = true;
850             break;
851           }
852         }
853         if (!found) {
854           // This vector constants not handled yet.
855           return false;
856         }
857       }
858       LLVMContext &context = CI->getParent()->getParent()->getContext();
859       Constant *nval;
860       if (getArgType(FInfo) == AMDGPULibFunc::F32) {
861         SmallVector<float, 0> FVal;
862         for (double D : DVal)
863           FVal.push_back((float)D);
864         ArrayRef<float> tmp(FVal);
865         nval = ConstantDataVector::get(context, tmp);
866       } else { // F64
867         ArrayRef<double> tmp(DVal);
868         nval = ConstantDataVector::get(context, tmp);
869       }
870       LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
871       replaceCall(CI, nval);
872       return true;
873     }
874   } else {
875     // Scalar version
876     if (ConstantFP *CF = dyn_cast<ConstantFP>(opr0)) {
877       for (int i = 0; i < sz; ++i) {
878         if (CF->isExactlyValue(tr[i].input)) {
879           Value *nval = ConstantFP::get(CF->getType(), tr[i].result);
880           LLVM_DEBUG(errs() << "AMDIC: " << *CI << " ---> " << *nval << "\n");
881           replaceCall(CI, nval);
882           return true;
883         }
884       }
885     }
886   }
887 
888   return false;
889 }
890 
891 namespace llvm {
892 static double log2(double V) {
893 #if _XOPEN_SOURCE >= 600 || defined(_ISOC99_SOURCE) || _POSIX_C_SOURCE >= 200112L
894   return ::log2(V);
895 #else
896   return log(V) / numbers::ln2;
897 #endif
898 }
899 } // namespace llvm
900 
901 bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
902                               const FuncInfo &FInfo) {
903   assert((FInfo.getId() == AMDGPULibFunc::EI_POW ||
904           FInfo.getId() == AMDGPULibFunc::EI_POWR ||
905           FInfo.getId() == AMDGPULibFunc::EI_POWN) &&
906          "fold_pow: encounter a wrong function call");
907 
908   Module *M = B.GetInsertBlock()->getModule();
909   Type *eltType = FPOp->getType()->getScalarType();
910   Value *opr0 = FPOp->getOperand(0);
911   Value *opr1 = FPOp->getOperand(1);
912 
913   const APFloat *CF = nullptr;
914   const APInt *CINT = nullptr;
915   if (!match(opr1, m_APFloatAllowPoison(CF)))
916     match(opr1, m_APIntAllowPoison(CINT));
917 
918   // 0x1111111 means that we don't do anything for this call.
919   int ci_opr1 = (CINT ? (int)CINT->getSExtValue() : 0x1111111);
920 
921   if ((CF && CF->isZero()) || (CINT && ci_opr1 == 0)) {
922     //  pow/powr/pown(x, 0) == 1
923     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> 1\n");
924     Constant *cnval = ConstantFP::get(eltType, 1.0);
925     if (getVecSize(FInfo) > 1) {
926       cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
927     }
928     replaceCall(FPOp, cnval);
929     return true;
930   }
931   if ((CF && CF->isExactlyValue(1.0)) || (CINT && ci_opr1 == 1)) {
932     // pow/powr/pown(x, 1.0) = x
933     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << "\n");
934     replaceCall(FPOp, opr0);
935     return true;
936   }
937   if ((CF && CF->isExactlyValue(2.0)) || (CINT && ci_opr1 == 2)) {
938     // pow/powr/pown(x, 2.0) = x*x
939     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << " * "
940                       << *opr0 << "\n");
941     Value *nval = B.CreateFMul(opr0, opr0, "__pow2");
942     replaceCall(FPOp, nval);
943     return true;
944   }
945   if ((CF && CF->isExactlyValue(-1.0)) || (CINT && ci_opr1 == -1)) {
946     // pow/powr/pown(x, -1.0) = 1.0/x
947     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> 1 / " << *opr0 << "\n");
948     Constant *cnval = ConstantFP::get(eltType, 1.0);
949     if (getVecSize(FInfo) > 1) {
950       cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
951     }
952     Value *nval = B.CreateFDiv(cnval, opr0, "__powrecip");
953     replaceCall(FPOp, nval);
954     return true;
955   }
956 
957   if (CF && (CF->isExactlyValue(0.5) || CF->isExactlyValue(-0.5))) {
958     // pow[r](x, [-]0.5) = sqrt(x)
959     bool issqrt = CF->isExactlyValue(0.5);
960     if (FunctionCallee FPExpr =
961             getFunction(M, AMDGPULibFunc(issqrt ? AMDGPULibFunc::EI_SQRT
962                                                 : AMDGPULibFunc::EI_RSQRT,
963                                          FInfo))) {
964       LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << FInfo.getName()
965                         << '(' << *opr0 << ")\n");
966       Value *nval = CreateCallEx(B,FPExpr, opr0, issqrt ? "__pow2sqrt"
967                                                         : "__pow2rsqrt");
968       replaceCall(FPOp, nval);
969       return true;
970     }
971   }
972 
973   if (!isUnsafeFiniteOnlyMath(FPOp))
974     return false;
975 
976   // Unsafe Math optimization
977 
978   // Remember that ci_opr1 is set if opr1 is integral
979   if (CF) {
980     double dval = (getArgType(FInfo) == AMDGPULibFunc::F32)
981                       ? (double)CF->convertToFloat()
982                       : CF->convertToDouble();
983     int ival = (int)dval;
984     if ((double)ival == dval) {
985       ci_opr1 = ival;
986     } else
987       ci_opr1 = 0x11111111;
988   }
989 
990   // pow/powr/pown(x, c) = [1/](x*x*..x); where
991   //   trunc(c) == c && the number of x == c && |c| <= 12
992   unsigned abs_opr1 = (ci_opr1 < 0) ? -ci_opr1 : ci_opr1;
993   if (abs_opr1 <= 12) {
994     Constant *cnval;
995     Value *nval;
996     if (abs_opr1 == 0) {
997       cnval = ConstantFP::get(eltType, 1.0);
998       if (getVecSize(FInfo) > 1) {
999         cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
1000       }
1001       nval = cnval;
1002     } else {
1003       Value *valx2 = nullptr;
1004       nval = nullptr;
1005       while (abs_opr1 > 0) {
1006         valx2 = valx2 ? B.CreateFMul(valx2, valx2, "__powx2") : opr0;
1007         if (abs_opr1 & 1) {
1008           nval = nval ? B.CreateFMul(nval, valx2, "__powprod") : valx2;
1009         }
1010         abs_opr1 >>= 1;
1011       }
1012     }
1013 
1014     if (ci_opr1 < 0) {
1015       cnval = ConstantFP::get(eltType, 1.0);
1016       if (getVecSize(FInfo) > 1) {
1017         cnval = ConstantDataVector::getSplat(getVecSize(FInfo), cnval);
1018       }
1019       nval = B.CreateFDiv(cnval, nval, "__1powprod");
1020     }
1021     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> "
1022                       << ((ci_opr1 < 0) ? "1/prod(" : "prod(") << *opr0
1023                       << ")\n");
1024     replaceCall(FPOp, nval);
1025     return true;
1026   }
1027 
1028   // If we should use the generic intrinsic instead of emitting a libcall
1029   const bool ShouldUseIntrinsic = eltType->isFloatTy() || eltType->isHalfTy();
1030 
1031   // powr ---> exp2(y * log2(x))
1032   // pown/pow ---> powr(fabs(x), y) | (x & ((int)y << 31))
1033   FunctionCallee ExpExpr;
1034   if (ShouldUseIntrinsic)
1035     ExpExpr = Intrinsic::getOrInsertDeclaration(M, Intrinsic::exp2,
1036                                                 {FPOp->getType()});
1037   else {
1038     ExpExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_EXP2, FInfo));
1039     if (!ExpExpr)
1040       return false;
1041   }
1042 
1043   bool needlog = false;
1044   bool needabs = false;
1045   bool needcopysign = false;
1046   Constant *cnval = nullptr;
1047   if (getVecSize(FInfo) == 1) {
1048     CF = nullptr;
1049     match(opr0, m_APFloatAllowPoison(CF));
1050 
1051     if (CF) {
1052       double V = (getArgType(FInfo) == AMDGPULibFunc::F32)
1053                      ? (double)CF->convertToFloat()
1054                      : CF->convertToDouble();
1055 
1056       V = log2(std::abs(V));
1057       cnval = ConstantFP::get(eltType, V);
1058       needcopysign = (FInfo.getId() != AMDGPULibFunc::EI_POWR) &&
1059                      CF->isNegative();
1060     } else {
1061       needlog = true;
1062       needcopysign = needabs = FInfo.getId() != AMDGPULibFunc::EI_POWR;
1063     }
1064   } else {
1065     ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(opr0);
1066 
1067     if (!CDV) {
1068       needlog = true;
1069       needcopysign = needabs = FInfo.getId() != AMDGPULibFunc::EI_POWR;
1070     } else {
1071       assert ((int)CDV->getNumElements() == getVecSize(FInfo) &&
1072               "Wrong vector size detected");
1073 
1074       SmallVector<double, 0> DVal;
1075       for (int i=0; i < getVecSize(FInfo); ++i) {
1076         double V = CDV->getElementAsAPFloat(i).convertToDouble();
1077         if (V < 0.0) needcopysign = true;
1078         V = log2(std::abs(V));
1079         DVal.push_back(V);
1080       }
1081       if (getArgType(FInfo) == AMDGPULibFunc::F32) {
1082         SmallVector<float, 0> FVal;
1083         for (double D : DVal)
1084           FVal.push_back((float)D);
1085         ArrayRef<float> tmp(FVal);
1086         cnval = ConstantDataVector::get(M->getContext(), tmp);
1087       } else {
1088         ArrayRef<double> tmp(DVal);
1089         cnval = ConstantDataVector::get(M->getContext(), tmp);
1090       }
1091     }
1092   }
1093 
1094   if (needcopysign && (FInfo.getId() == AMDGPULibFunc::EI_POW)) {
1095     // We cannot handle corner cases for a general pow() function, give up
1096     // unless y is a constant integral value. Then proceed as if it were pown.
1097     if (!isKnownIntegral(opr1, M->getDataLayout(), FPOp->getFastMathFlags()))
1098       return false;
1099   }
1100 
1101   Value *nval;
1102   if (needabs) {
1103     nval = B.CreateUnaryIntrinsic(Intrinsic::fabs, opr0, nullptr, "__fabs");
1104   } else {
1105     nval = cnval ? cnval : opr0;
1106   }
1107   if (needlog) {
1108     FunctionCallee LogExpr;
1109     if (ShouldUseIntrinsic) {
1110       LogExpr = Intrinsic::getOrInsertDeclaration(M, Intrinsic::log2,
1111                                                   {FPOp->getType()});
1112     } else {
1113       LogExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_LOG2, FInfo));
1114       if (!LogExpr)
1115         return false;
1116     }
1117 
1118     nval = CreateCallEx(B,LogExpr, nval, "__log2");
1119   }
1120 
1121   if (FInfo.getId() == AMDGPULibFunc::EI_POWN) {
1122     // convert int(32) to fp(f32 or f64)
1123     opr1 = B.CreateSIToFP(opr1, nval->getType(), "pownI2F");
1124   }
1125   nval = B.CreateFMul(opr1, nval, "__ylogx");
1126   nval = CreateCallEx(B,ExpExpr, nval, "__exp2");
1127 
1128   if (needcopysign) {
1129     Type* nTyS = B.getIntNTy(eltType->getPrimitiveSizeInBits());
1130     Type *nTy = FPOp->getType()->getWithNewType(nTyS);
1131     unsigned size = nTy->getScalarSizeInBits();
1132     Value *opr_n = FPOp->getOperand(1);
1133     if (opr_n->getType()->getScalarType()->isIntegerTy())
1134       opr_n = B.CreateZExtOrTrunc(opr_n, nTy, "__ytou");
1135     else
1136       opr_n = B.CreateFPToSI(opr1, nTy, "__ytou");
1137 
1138     Value *sign = B.CreateShl(opr_n, size-1, "__yeven");
1139     sign = B.CreateAnd(B.CreateBitCast(opr0, nTy), sign, "__pow_sign");
1140     nval = B.CreateOr(B.CreateBitCast(nval, nTy), sign);
1141     nval = B.CreateBitCast(nval, opr0->getType());
1142   }
1143 
1144   LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> "
1145                     << "exp2(" << *opr1 << " * log2(" << *opr0 << "))\n");
1146   replaceCall(FPOp, nval);
1147 
1148   return true;
1149 }
1150 
1151 bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
1152                                 const FuncInfo &FInfo) {
1153   Value *opr0 = FPOp->getOperand(0);
1154   Value *opr1 = FPOp->getOperand(1);
1155 
1156   const APInt *CINT = nullptr;
1157   if (!match(opr1, m_APIntAllowPoison(CINT)))
1158     return false;
1159 
1160   Function *Parent = B.GetInsertBlock()->getParent();
1161 
1162   int ci_opr1 = (int)CINT->getSExtValue();
1163   if (ci_opr1 == 1 && !Parent->hasFnAttribute(Attribute::StrictFP)) {
1164     // rootn(x, 1) = x
1165     //
1166     // TODO: Insert constrained canonicalize for strictfp case.
1167     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << '\n');
1168     replaceCall(FPOp, opr0);
1169     return true;
1170   }
1171 
1172   Module *M = B.GetInsertBlock()->getModule();
1173 
1174   CallInst *CI = cast<CallInst>(FPOp);
1175   if (ci_opr1 == 2 &&
1176       shouldReplaceLibcallWithIntrinsic(CI,
1177                                         /*AllowMinSizeF32=*/true,
1178                                         /*AllowF64=*/true)) {
1179     // rootn(x, 2) = sqrt(x)
1180     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> sqrt(" << *opr0 << ")\n");
1181 
1182     CallInst *NewCall = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
1183     NewCall->takeName(CI);
1184 
1185     // OpenCL rootn has a looser ulp of 2 requirement than sqrt, so add some
1186     // metadata.
1187     MDBuilder MDHelper(M->getContext());
1188     MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
1189     NewCall->setMetadata(LLVMContext::MD_fpmath, FPMD);
1190 
1191     replaceCall(CI, NewCall);
1192     return true;
1193   }
1194 
1195   if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
1196     if (FunctionCallee FPExpr =
1197             getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_CBRT, FInfo))) {
1198       LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> cbrt(" << *opr0
1199                         << ")\n");
1200       Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2cbrt");
1201       replaceCall(FPOp, nval);
1202       return true;
1203     }
1204   } else if (ci_opr1 == -1) { // rootn(x, -1) = 1.0/x
1205     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> 1.0 / " << *opr0 << "\n");
1206     Value *nval = B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0),
1207                                opr0,
1208                                "__rootn2div");
1209     replaceCall(FPOp, nval);
1210     return true;
1211   }
1212 
1213   if (ci_opr1 == -2 &&
1214       shouldReplaceLibcallWithIntrinsic(CI,
1215                                         /*AllowMinSizeF32=*/true,
1216                                         /*AllowF64=*/true)) {
1217     // rootn(x, -2) = rsqrt(x)
1218 
1219     // The original rootn had looser ulp requirements than the resultant sqrt
1220     // and fdiv.
1221     MDBuilder MDHelper(M->getContext());
1222     MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
1223 
1224     // TODO: Could handle strictfp but need to fix strict sqrt emission
1225     FastMathFlags FMF = FPOp->getFastMathFlags();
1226     FMF.setAllowContract(true);
1227 
1228     CallInst *Sqrt = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
1229     Instruction *RSqrt = cast<Instruction>(
1230         B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0), Sqrt));
1231     Sqrt->setFastMathFlags(FMF);
1232     RSqrt->setFastMathFlags(FMF);
1233     RSqrt->setMetadata(LLVMContext::MD_fpmath, FPMD);
1234 
1235     LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> rsqrt(" << *opr0
1236                       << ")\n");
1237     replaceCall(CI, RSqrt);
1238     return true;
1239   }
1240 
1241   return false;
1242 }
1243 
1244 // Get a scalar native builtin single argument FP function
1245 FunctionCallee AMDGPULibCalls::getNativeFunction(Module *M,
1246                                                  const FuncInfo &FInfo) {
1247   if (getArgType(FInfo) == AMDGPULibFunc::F64 || !HasNative(FInfo.getId()))
1248     return nullptr;
1249   FuncInfo nf = FInfo;
1250   nf.setPrefix(AMDGPULibFunc::NATIVE);
1251   return getFunction(M, nf);
1252 }
1253 
1254 // Some library calls are just wrappers around llvm intrinsics, but compiled
1255 // conservatively. Preserve the flags from the original call site by
1256 // substituting them with direct calls with all the flags.
1257 bool AMDGPULibCalls::shouldReplaceLibcallWithIntrinsic(const CallInst *CI,
1258                                                        bool AllowMinSizeF32,
1259                                                        bool AllowF64,
1260                                                        bool AllowStrictFP) {
1261   Type *FltTy = CI->getType()->getScalarType();
1262   const bool IsF32 = FltTy->isFloatTy();
1263 
1264   // f64 intrinsics aren't implemented for most operations.
1265   if (!IsF32 && !FltTy->isHalfTy() && (!AllowF64 || !FltTy->isDoubleTy()))
1266     return false;
1267 
1268   // We're implicitly inlining by replacing the libcall with the intrinsic, so
1269   // don't do it for noinline call sites.
1270   if (CI->isNoInline())
1271     return false;
1272 
1273   const Function *ParentF = CI->getFunction();
1274   // TODO: Handle strictfp
1275   if (!AllowStrictFP && ParentF->hasFnAttribute(Attribute::StrictFP))
1276     return false;
1277 
1278   if (IsF32 && !AllowMinSizeF32 && ParentF->hasMinSize())
1279     return false;
1280   return true;
1281 }
1282 
1283 void AMDGPULibCalls::replaceLibCallWithSimpleIntrinsic(IRBuilder<> &B,
1284                                                        CallInst *CI,
1285                                                        Intrinsic::ID IntrID) {
1286   if (CI->arg_size() == 2) {
1287     Value *Arg0 = CI->getArgOperand(0);
1288     Value *Arg1 = CI->getArgOperand(1);
1289     VectorType *Arg0VecTy = dyn_cast<VectorType>(Arg0->getType());
1290     VectorType *Arg1VecTy = dyn_cast<VectorType>(Arg1->getType());
1291     if (Arg0VecTy && !Arg1VecTy) {
1292       Value *SplatRHS = B.CreateVectorSplat(Arg0VecTy->getElementCount(), Arg1);
1293       CI->setArgOperand(1, SplatRHS);
1294     } else if (!Arg0VecTy && Arg1VecTy) {
1295       Value *SplatLHS = B.CreateVectorSplat(Arg1VecTy->getElementCount(), Arg0);
1296       CI->setArgOperand(0, SplatLHS);
1297     }
1298   }
1299 
1300   CI->setCalledFunction(Intrinsic::getOrInsertDeclaration(
1301       CI->getModule(), IntrID, {CI->getType()}));
1302 }
1303 
1304 bool AMDGPULibCalls::tryReplaceLibcallWithSimpleIntrinsic(
1305     IRBuilder<> &B, CallInst *CI, Intrinsic::ID IntrID, bool AllowMinSizeF32,
1306     bool AllowF64, bool AllowStrictFP) {
1307   if (!shouldReplaceLibcallWithIntrinsic(CI, AllowMinSizeF32, AllowF64,
1308                                          AllowStrictFP))
1309     return false;
1310   replaceLibCallWithSimpleIntrinsic(B, CI, IntrID);
1311   return true;
1312 }
1313 
1314 std::tuple<Value *, Value *, Value *>
1315 AMDGPULibCalls::insertSinCos(Value *Arg, FastMathFlags FMF, IRBuilder<> &B,
1316                              FunctionCallee Fsincos) {
1317   DebugLoc DL = B.getCurrentDebugLocation();
1318   Function *F = B.GetInsertBlock()->getParent();
1319   B.SetInsertPointPastAllocas(F);
1320 
1321   AllocaInst *Alloc = B.CreateAlloca(Arg->getType(), nullptr, "__sincos_");
1322 
1323   if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
1324     // If the argument is an instruction, it must dominate all uses so put our
1325     // sincos call there. Otherwise, right after the allocas works well enough
1326     // if it's an argument or constant.
1327 
1328     B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
1329 
1330     // SetInsertPoint unwelcomely always tries to set the debug loc.
1331     B.SetCurrentDebugLocation(DL);
1332   }
1333 
1334   Type *CosPtrTy = Fsincos.getFunctionType()->getParamType(1);
1335 
1336   // The allocaInst allocates the memory in private address space. This need
1337   // to be addrspacecasted to point to the address space of cos pointer type.
1338   // In OpenCL 2.0 this is generic, while in 1.2 that is private.
1339   Value *CastAlloc = B.CreateAddrSpaceCast(Alloc, CosPtrTy);
1340 
1341   CallInst *SinCos = CreateCallEx2(B, Fsincos, Arg, CastAlloc);
1342 
1343   // TODO: Is it worth trying to preserve the location for the cos calls for the
1344   // load?
1345 
1346   LoadInst *LoadCos = B.CreateLoad(Alloc->getAllocatedType(), Alloc);
1347   return {SinCos, LoadCos, SinCos};
1348 }
1349 
1350 // fold sin, cos -> sincos.
1351 bool AMDGPULibCalls::fold_sincos(FPMathOperator *FPOp, IRBuilder<> &B,
1352                                  const FuncInfo &fInfo) {
1353   assert(fInfo.getId() == AMDGPULibFunc::EI_SIN ||
1354          fInfo.getId() == AMDGPULibFunc::EI_COS);
1355 
1356   if ((getArgType(fInfo) != AMDGPULibFunc::F32 &&
1357        getArgType(fInfo) != AMDGPULibFunc::F64) ||
1358       fInfo.getPrefix() != AMDGPULibFunc::NOPFX)
1359     return false;
1360 
1361   bool const isSin = fInfo.getId() == AMDGPULibFunc::EI_SIN;
1362 
1363   Value *CArgVal = FPOp->getOperand(0);
1364   CallInst *CI = cast<CallInst>(FPOp);
1365 
1366   Function *F = B.GetInsertBlock()->getParent();
1367   Module *M = F->getParent();
1368 
1369   // Merge the sin and cos. For OpenCL 2.0, there may only be a generic pointer
1370   // implementation. Prefer the private form if available.
1371   AMDGPULibFunc SinCosLibFuncPrivate(AMDGPULibFunc::EI_SINCOS, fInfo);
1372   SinCosLibFuncPrivate.getLeads()[0].PtrKind =
1373       AMDGPULibFunc::getEPtrKindFromAddrSpace(AMDGPUAS::PRIVATE_ADDRESS);
1374 
1375   AMDGPULibFunc SinCosLibFuncGeneric(AMDGPULibFunc::EI_SINCOS, fInfo);
1376   SinCosLibFuncGeneric.getLeads()[0].PtrKind =
1377       AMDGPULibFunc::getEPtrKindFromAddrSpace(AMDGPUAS::FLAT_ADDRESS);
1378 
1379   FunctionCallee FSinCosPrivate = getFunction(M, SinCosLibFuncPrivate);
1380   FunctionCallee FSinCosGeneric = getFunction(M, SinCosLibFuncGeneric);
1381   FunctionCallee FSinCos = FSinCosPrivate ? FSinCosPrivate : FSinCosGeneric;
1382   if (!FSinCos)
1383     return false;
1384 
1385   SmallVector<CallInst *> SinCalls;
1386   SmallVector<CallInst *> CosCalls;
1387   SmallVector<CallInst *> SinCosCalls;
1388   FuncInfo PartnerInfo(isSin ? AMDGPULibFunc::EI_COS : AMDGPULibFunc::EI_SIN,
1389                        fInfo);
1390   const std::string PairName = PartnerInfo.mangle();
1391 
1392   StringRef SinName = isSin ? CI->getCalledFunction()->getName() : PairName;
1393   StringRef CosName = isSin ? PairName : CI->getCalledFunction()->getName();
1394   const std::string SinCosPrivateName = SinCosLibFuncPrivate.mangle();
1395   const std::string SinCosGenericName = SinCosLibFuncGeneric.mangle();
1396 
1397   // Intersect the two sets of flags.
1398   FastMathFlags FMF = FPOp->getFastMathFlags();
1399   MDNode *FPMath = CI->getMetadata(LLVMContext::MD_fpmath);
1400 
1401   SmallVector<DILocation *> MergeDbgLocs = {CI->getDebugLoc()};
1402 
1403   for (User* U : CArgVal->users()) {
1404     CallInst *XI = dyn_cast<CallInst>(U);
1405     if (!XI || XI->getFunction() != F || XI->isNoBuiltin())
1406       continue;
1407 
1408     Function *UCallee = XI->getCalledFunction();
1409     if (!UCallee)
1410       continue;
1411 
1412     bool Handled = true;
1413 
1414     if (UCallee->getName() == SinName)
1415       SinCalls.push_back(XI);
1416     else if (UCallee->getName() == CosName)
1417       CosCalls.push_back(XI);
1418     else if (UCallee->getName() == SinCosPrivateName ||
1419              UCallee->getName() == SinCosGenericName)
1420       SinCosCalls.push_back(XI);
1421     else
1422       Handled = false;
1423 
1424     if (Handled) {
1425       MergeDbgLocs.push_back(XI->getDebugLoc());
1426       auto *OtherOp = cast<FPMathOperator>(XI);
1427       FMF &= OtherOp->getFastMathFlags();
1428       FPMath = MDNode::getMostGenericFPMath(
1429           FPMath, XI->getMetadata(LLVMContext::MD_fpmath));
1430     }
1431   }
1432 
1433   if (SinCalls.empty() || CosCalls.empty())
1434     return false;
1435 
1436   B.setFastMathFlags(FMF);
1437   B.setDefaultFPMathTag(FPMath);
1438   DILocation *DbgLoc = DILocation::getMergedLocations(MergeDbgLocs);
1439   B.SetCurrentDebugLocation(DbgLoc);
1440 
1441   auto [Sin, Cos, SinCos] = insertSinCos(CArgVal, FMF, B, FSinCos);
1442 
1443   auto replaceTrigInsts = [](ArrayRef<CallInst *> Calls, Value *Res) {
1444     for (CallInst *C : Calls)
1445       C->replaceAllUsesWith(Res);
1446 
1447     // Leave the other dead instructions to avoid clobbering iterators.
1448   };
1449 
1450   replaceTrigInsts(SinCalls, Sin);
1451   replaceTrigInsts(CosCalls, Cos);
1452   replaceTrigInsts(SinCosCalls, SinCos);
1453 
1454   // It's safe to delete the original now.
1455   CI->eraseFromParent();
1456   return true;
1457 }
1458 
1459 bool AMDGPULibCalls::evaluateScalarMathFunc(const FuncInfo &FInfo, double &Res0,
1460                                             double &Res1, Constant *copr0,
1461                                             Constant *copr1) {
1462   // By default, opr0/opr1/opr3 holds values of float/double type.
1463   // If they are not float/double, each function has to its
1464   // operand separately.
1465   double opr0 = 0.0, opr1 = 0.0;
1466   ConstantFP *fpopr0 = dyn_cast_or_null<ConstantFP>(copr0);
1467   ConstantFP *fpopr1 = dyn_cast_or_null<ConstantFP>(copr1);
1468   if (fpopr0) {
1469     opr0 = (getArgType(FInfo) == AMDGPULibFunc::F64)
1470              ? fpopr0->getValueAPF().convertToDouble()
1471              : (double)fpopr0->getValueAPF().convertToFloat();
1472   }
1473 
1474   if (fpopr1) {
1475     opr1 = (getArgType(FInfo) == AMDGPULibFunc::F64)
1476              ? fpopr1->getValueAPF().convertToDouble()
1477              : (double)fpopr1->getValueAPF().convertToFloat();
1478   }
1479 
1480   switch (FInfo.getId()) {
1481   default : return false;
1482 
1483   case AMDGPULibFunc::EI_ACOS:
1484     Res0 = acos(opr0);
1485     return true;
1486 
1487   case AMDGPULibFunc::EI_ACOSH:
1488     // acosh(x) == log(x + sqrt(x*x - 1))
1489     Res0 = log(opr0 + sqrt(opr0*opr0 - 1.0));
1490     return true;
1491 
1492   case AMDGPULibFunc::EI_ACOSPI:
1493     Res0 = acos(opr0) / MATH_PI;
1494     return true;
1495 
1496   case AMDGPULibFunc::EI_ASIN:
1497     Res0 = asin(opr0);
1498     return true;
1499 
1500   case AMDGPULibFunc::EI_ASINH:
1501     // asinh(x) == log(x + sqrt(x*x + 1))
1502     Res0 = log(opr0 + sqrt(opr0*opr0 + 1.0));
1503     return true;
1504 
1505   case AMDGPULibFunc::EI_ASINPI:
1506     Res0 = asin(opr0) / MATH_PI;
1507     return true;
1508 
1509   case AMDGPULibFunc::EI_ATAN:
1510     Res0 = atan(opr0);
1511     return true;
1512 
1513   case AMDGPULibFunc::EI_ATANH:
1514     // atanh(x) == (log(x+1) - log(x-1))/2;
1515     Res0 = (log(opr0 + 1.0) - log(opr0 - 1.0))/2.0;
1516     return true;
1517 
1518   case AMDGPULibFunc::EI_ATANPI:
1519     Res0 = atan(opr0) / MATH_PI;
1520     return true;
1521 
1522   case AMDGPULibFunc::EI_CBRT:
1523     Res0 = (opr0 < 0.0) ? -pow(-opr0, 1.0/3.0) : pow(opr0, 1.0/3.0);
1524     return true;
1525 
1526   case AMDGPULibFunc::EI_COS:
1527     Res0 = cos(opr0);
1528     return true;
1529 
1530   case AMDGPULibFunc::EI_COSH:
1531     Res0 = cosh(opr0);
1532     return true;
1533 
1534   case AMDGPULibFunc::EI_COSPI:
1535     Res0 = cos(MATH_PI * opr0);
1536     return true;
1537 
1538   case AMDGPULibFunc::EI_EXP:
1539     Res0 = exp(opr0);
1540     return true;
1541 
1542   case AMDGPULibFunc::EI_EXP2:
1543     Res0 = pow(2.0, opr0);
1544     return true;
1545 
1546   case AMDGPULibFunc::EI_EXP10:
1547     Res0 = pow(10.0, opr0);
1548     return true;
1549 
1550   case AMDGPULibFunc::EI_LOG:
1551     Res0 = log(opr0);
1552     return true;
1553 
1554   case AMDGPULibFunc::EI_LOG2:
1555     Res0 = log(opr0) / log(2.0);
1556     return true;
1557 
1558   case AMDGPULibFunc::EI_LOG10:
1559     Res0 = log(opr0) / log(10.0);
1560     return true;
1561 
1562   case AMDGPULibFunc::EI_RSQRT:
1563     Res0 = 1.0 / sqrt(opr0);
1564     return true;
1565 
1566   case AMDGPULibFunc::EI_SIN:
1567     Res0 = sin(opr0);
1568     return true;
1569 
1570   case AMDGPULibFunc::EI_SINH:
1571     Res0 = sinh(opr0);
1572     return true;
1573 
1574   case AMDGPULibFunc::EI_SINPI:
1575     Res0 = sin(MATH_PI * opr0);
1576     return true;
1577 
1578   case AMDGPULibFunc::EI_TAN:
1579     Res0 = tan(opr0);
1580     return true;
1581 
1582   case AMDGPULibFunc::EI_TANH:
1583     Res0 = tanh(opr0);
1584     return true;
1585 
1586   case AMDGPULibFunc::EI_TANPI:
1587     Res0 = tan(MATH_PI * opr0);
1588     return true;
1589 
1590   // two-arg functions
1591   case AMDGPULibFunc::EI_POW:
1592   case AMDGPULibFunc::EI_POWR:
1593     Res0 = pow(opr0, opr1);
1594     return true;
1595 
1596   case AMDGPULibFunc::EI_POWN: {
1597     if (ConstantInt *iopr1 = dyn_cast_or_null<ConstantInt>(copr1)) {
1598       double val = (double)iopr1->getSExtValue();
1599       Res0 = pow(opr0, val);
1600       return true;
1601     }
1602     return false;
1603   }
1604 
1605   case AMDGPULibFunc::EI_ROOTN: {
1606     if (ConstantInt *iopr1 = dyn_cast_or_null<ConstantInt>(copr1)) {
1607       double val = (double)iopr1->getSExtValue();
1608       Res0 = pow(opr0, 1.0 / val);
1609       return true;
1610     }
1611     return false;
1612   }
1613 
1614   // with ptr arg
1615   case AMDGPULibFunc::EI_SINCOS:
1616     Res0 = sin(opr0);
1617     Res1 = cos(opr0);
1618     return true;
1619   }
1620 
1621   return false;
1622 }
1623 
1624 bool AMDGPULibCalls::evaluateCall(CallInst *aCI, const FuncInfo &FInfo) {
1625   int numArgs = (int)aCI->arg_size();
1626   if (numArgs > 3)
1627     return false;
1628 
1629   Constant *copr0 = nullptr;
1630   Constant *copr1 = nullptr;
1631   if (numArgs > 0) {
1632     if ((copr0 = dyn_cast<Constant>(aCI->getArgOperand(0))) == nullptr)
1633       return false;
1634   }
1635 
1636   if (numArgs > 1) {
1637     if ((copr1 = dyn_cast<Constant>(aCI->getArgOperand(1))) == nullptr) {
1638       if (FInfo.getId() != AMDGPULibFunc::EI_SINCOS)
1639         return false;
1640     }
1641   }
1642 
1643   // At this point, all arguments to aCI are constants.
1644 
1645   // max vector size is 16, and sincos will generate two results.
1646   double DVal0[16], DVal1[16];
1647   int FuncVecSize = getVecSize(FInfo);
1648   bool hasTwoResults = (FInfo.getId() == AMDGPULibFunc::EI_SINCOS);
1649   if (FuncVecSize == 1) {
1650     if (!evaluateScalarMathFunc(FInfo, DVal0[0], DVal1[0], copr0, copr1)) {
1651       return false;
1652     }
1653   } else {
1654     ConstantDataVector *CDV0 = dyn_cast_or_null<ConstantDataVector>(copr0);
1655     ConstantDataVector *CDV1 = dyn_cast_or_null<ConstantDataVector>(copr1);
1656     for (int i = 0; i < FuncVecSize; ++i) {
1657       Constant *celt0 = CDV0 ? CDV0->getElementAsConstant(i) : nullptr;
1658       Constant *celt1 = CDV1 ? CDV1->getElementAsConstant(i) : nullptr;
1659       if (!evaluateScalarMathFunc(FInfo, DVal0[i], DVal1[i], celt0, celt1)) {
1660         return false;
1661       }
1662     }
1663   }
1664 
1665   LLVMContext &context = aCI->getContext();
1666   Constant *nval0, *nval1;
1667   if (FuncVecSize == 1) {
1668     nval0 = ConstantFP::get(aCI->getType(), DVal0[0]);
1669     if (hasTwoResults)
1670       nval1 = ConstantFP::get(aCI->getType(), DVal1[0]);
1671   } else {
1672     if (getArgType(FInfo) == AMDGPULibFunc::F32) {
1673       SmallVector <float, 0> FVal0, FVal1;
1674       for (int i = 0; i < FuncVecSize; ++i)
1675         FVal0.push_back((float)DVal0[i]);
1676       ArrayRef<float> tmp0(FVal0);
1677       nval0 = ConstantDataVector::get(context, tmp0);
1678       if (hasTwoResults) {
1679         for (int i = 0; i < FuncVecSize; ++i)
1680           FVal1.push_back((float)DVal1[i]);
1681         ArrayRef<float> tmp1(FVal1);
1682         nval1 = ConstantDataVector::get(context, tmp1);
1683       }
1684     } else {
1685       ArrayRef<double> tmp0(DVal0);
1686       nval0 = ConstantDataVector::get(context, tmp0);
1687       if (hasTwoResults) {
1688         ArrayRef<double> tmp1(DVal1);
1689         nval1 = ConstantDataVector::get(context, tmp1);
1690       }
1691     }
1692   }
1693 
1694   if (hasTwoResults) {
1695     // sincos
1696     assert(FInfo.getId() == AMDGPULibFunc::EI_SINCOS &&
1697            "math function with ptr arg not supported yet");
1698     new StoreInst(nval1, aCI->getArgOperand(1), aCI->getIterator());
1699   }
1700 
1701   replaceCall(aCI, nval0);
1702   return true;
1703 }
1704 
1705 PreservedAnalyses AMDGPUSimplifyLibCallsPass::run(Function &F,
1706                                                   FunctionAnalysisManager &AM) {
1707   AMDGPULibCalls Simplifier;
1708   Simplifier.initNativeFuncs();
1709   Simplifier.initFunction(F, AM);
1710 
1711   bool Changed = false;
1712 
1713   LLVM_DEBUG(dbgs() << "AMDIC: process function ";
1714              F.printAsOperand(dbgs(), false, F.getParent()); dbgs() << '\n';);
1715 
1716   for (auto &BB : F) {
1717     for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E;) {
1718       // Ignore non-calls.
1719       CallInst *CI = dyn_cast<CallInst>(I);
1720       ++I;
1721 
1722       if (CI) {
1723         if (Simplifier.fold(CI))
1724           Changed = true;
1725       }
1726     }
1727   }
1728   return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
1729 }
1730 
1731 PreservedAnalyses AMDGPUUseNativeCallsPass::run(Function &F,
1732                                                 FunctionAnalysisManager &AM) {
1733   if (UseNative.empty())
1734     return PreservedAnalyses::all();
1735 
1736   AMDGPULibCalls Simplifier;
1737   Simplifier.initNativeFuncs();
1738   Simplifier.initFunction(F, AM);
1739 
1740   bool Changed = false;
1741   for (auto &BB : F) {
1742     for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E;) {
1743       // Ignore non-calls.
1744       CallInst *CI = dyn_cast<CallInst>(I);
1745       ++I;
1746       if (CI && Simplifier.useNative(CI))
1747         Changed = true;
1748     }
1749   }
1750   return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
1751 }
1752