xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (revision 647cbc5de815c5651677bf8582797f716ec7b48d)
1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUMachineFunction.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/KnownBits.h"
28 #include "llvm/Target/TargetMachine.h"
29 
30 using namespace llvm;
31 
32 #include "AMDGPUGenCallingConv.inc"
33 
34 static cl::opt<bool> AMDGPUBypassSlowDiv(
35   "amdgpu-bypass-slow-div",
36   cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
37   cl::init(true));
38 
39 // Find a larger type to do a load / store of a vector with.
40 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
41   unsigned StoreSize = VT.getStoreSizeInBits();
42   if (StoreSize <= 32)
43     return EVT::getIntegerVT(Ctx, StoreSize);
44 
45   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
46   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
47 }
48 
49 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
50   return DAG.computeKnownBits(Op).countMaxActiveBits();
51 }
52 
53 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
54   // In order for this to be a signed 24-bit value, bit 23, must
55   // be a sign bit.
56   return DAG.ComputeMaxSignificantBits(Op);
57 }
58 
59 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
60                                            const AMDGPUSubtarget &STI)
61     : TargetLowering(TM), Subtarget(&STI) {
62   // Lower floating point store/load to integer store/load to reduce the number
63   // of patterns in tablegen.
64   setOperationAction(ISD::LOAD, MVT::f32, Promote);
65   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
66 
67   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
68   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
69 
70   setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
71   AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
72 
73   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
74   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
75 
76   setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
77   AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
78 
79   setOperationAction(ISD::LOAD, MVT::v6f32, Promote);
80   AddPromotedToType(ISD::LOAD, MVT::v6f32, MVT::v6i32);
81 
82   setOperationAction(ISD::LOAD, MVT::v7f32, Promote);
83   AddPromotedToType(ISD::LOAD, MVT::v7f32, MVT::v7i32);
84 
85   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
86   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
87 
88   setOperationAction(ISD::LOAD, MVT::v9f32, Promote);
89   AddPromotedToType(ISD::LOAD, MVT::v9f32, MVT::v9i32);
90 
91   setOperationAction(ISD::LOAD, MVT::v10f32, Promote);
92   AddPromotedToType(ISD::LOAD, MVT::v10f32, MVT::v10i32);
93 
94   setOperationAction(ISD::LOAD, MVT::v11f32, Promote);
95   AddPromotedToType(ISD::LOAD, MVT::v11f32, MVT::v11i32);
96 
97   setOperationAction(ISD::LOAD, MVT::v12f32, Promote);
98   AddPromotedToType(ISD::LOAD, MVT::v12f32, MVT::v12i32);
99 
100   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
101   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
102 
103   setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
104   AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
105 
106   setOperationAction(ISD::LOAD, MVT::i64, Promote);
107   AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
108 
109   setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
110   AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
111 
112   setOperationAction(ISD::LOAD, MVT::f64, Promote);
113   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
114 
115   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
116   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
117 
118   setOperationAction(ISD::LOAD, MVT::v3i64, Promote);
119   AddPromotedToType(ISD::LOAD, MVT::v3i64, MVT::v6i32);
120 
121   setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
122   AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
123 
124   setOperationAction(ISD::LOAD, MVT::v3f64, Promote);
125   AddPromotedToType(ISD::LOAD, MVT::v3f64, MVT::v6i32);
126 
127   setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
128   AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
129 
130   setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
131   AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
132 
133   setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
134   AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
135 
136   setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
137   AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
138 
139   setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
140   AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
141 
142   setOperationAction(ISD::LOAD, MVT::i128, Promote);
143   AddPromotedToType(ISD::LOAD, MVT::i128, MVT::v4i32);
144 
145   // There are no 64-bit extloads. These should be done as a 32-bit extload and
146   // an extension to 64-bit.
147   for (MVT VT : MVT::integer_valuetypes())
148     setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT,
149                      Expand);
150 
151   for (MVT VT : MVT::integer_valuetypes()) {
152     if (VT == MVT::i64)
153       continue;
154 
155     for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
156       setLoadExtAction(Op, VT, MVT::i1, Promote);
157       setLoadExtAction(Op, VT, MVT::i8, Legal);
158       setLoadExtAction(Op, VT, MVT::i16, Legal);
159       setLoadExtAction(Op, VT, MVT::i32, Expand);
160     }
161   }
162 
163   for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
164     for (auto MemVT :
165          {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
166       setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT,
167                        Expand);
168 
169   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
170   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
171   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
172   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2bf16, Expand);
173   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
174   setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3bf16, Expand);
175   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
176   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4bf16, Expand);
177   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
178   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8bf16, Expand);
179   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
180   setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16bf16, Expand);
181   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
182   setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32bf16, Expand);
183 
184   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
185   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
186   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f32, Expand);
187   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
188   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
189   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
190 
191   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
192   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
193   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
194   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2bf16, Expand);
195   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f16, Expand);
196   setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3bf16, Expand);
197   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
198   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4bf16, Expand);
199   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
200   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8bf16, Expand);
201   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
202   setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16bf16, Expand);
203 
204   setOperationAction(ISD::STORE, MVT::f32, Promote);
205   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
206 
207   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
208   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
209 
210   setOperationAction(ISD::STORE, MVT::v3f32, Promote);
211   AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
212 
213   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
214   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
215 
216   setOperationAction(ISD::STORE, MVT::v5f32, Promote);
217   AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
218 
219   setOperationAction(ISD::STORE, MVT::v6f32, Promote);
220   AddPromotedToType(ISD::STORE, MVT::v6f32, MVT::v6i32);
221 
222   setOperationAction(ISD::STORE, MVT::v7f32, Promote);
223   AddPromotedToType(ISD::STORE, MVT::v7f32, MVT::v7i32);
224 
225   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
226   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
227 
228   setOperationAction(ISD::STORE, MVT::v9f32, Promote);
229   AddPromotedToType(ISD::STORE, MVT::v9f32, MVT::v9i32);
230 
231   setOperationAction(ISD::STORE, MVT::v10f32, Promote);
232   AddPromotedToType(ISD::STORE, MVT::v10f32, MVT::v10i32);
233 
234   setOperationAction(ISD::STORE, MVT::v11f32, Promote);
235   AddPromotedToType(ISD::STORE, MVT::v11f32, MVT::v11i32);
236 
237   setOperationAction(ISD::STORE, MVT::v12f32, Promote);
238   AddPromotedToType(ISD::STORE, MVT::v12f32, MVT::v12i32);
239 
240   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
241   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
242 
243   setOperationAction(ISD::STORE, MVT::v32f32, Promote);
244   AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
245 
246   setOperationAction(ISD::STORE, MVT::i64, Promote);
247   AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
248 
249   setOperationAction(ISD::STORE, MVT::v2i64, Promote);
250   AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
251 
252   setOperationAction(ISD::STORE, MVT::f64, Promote);
253   AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
254 
255   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
256   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
257 
258   setOperationAction(ISD::STORE, MVT::v3i64, Promote);
259   AddPromotedToType(ISD::STORE, MVT::v3i64, MVT::v6i32);
260 
261   setOperationAction(ISD::STORE, MVT::v3f64, Promote);
262   AddPromotedToType(ISD::STORE, MVT::v3f64, MVT::v6i32);
263 
264   setOperationAction(ISD::STORE, MVT::v4i64, Promote);
265   AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
266 
267   setOperationAction(ISD::STORE, MVT::v4f64, Promote);
268   AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
269 
270   setOperationAction(ISD::STORE, MVT::v8i64, Promote);
271   AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
272 
273   setOperationAction(ISD::STORE, MVT::v8f64, Promote);
274   AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
275 
276   setOperationAction(ISD::STORE, MVT::v16i64, Promote);
277   AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
278 
279   setOperationAction(ISD::STORE, MVT::v16f64, Promote);
280   AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
281 
282   setOperationAction(ISD::STORE, MVT::i128, Promote);
283   AddPromotedToType(ISD::STORE, MVT::i128, MVT::v4i32);
284 
285   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
286   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
287   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
288   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
289 
290   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
291   setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
292   setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
293   setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
294 
295   setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
296   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
297   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
298   setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
299   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
300   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
301   setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
302   setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
303 
304   setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
305   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
306   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
307 
308   setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
309   setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
310 
311   setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
312   setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
313   setTruncStoreAction(MVT::v3f64, MVT::v3f32, Expand);
314   setTruncStoreAction(MVT::v3f64, MVT::v3f16, Expand);
315 
316   setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
317   setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
318   setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
319   setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
320 
321   setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
322   setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
323 
324   setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
325   setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
326   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
327   setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
328   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
329   setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
330   setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
331 
332   setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal);
333   setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal);
334 
335   setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand);
336 
337   // For R600, this is totally unsupported, just custom lower to produce an
338   // error.
339   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
340 
341   // Library functions.  These default to Expand, but we have instructions
342   // for them.
343   setOperationAction({ISD::FCEIL, ISD::FPOW, ISD::FABS, ISD::FFLOOR,
344                       ISD::FROUNDEVEN, ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM},
345                      MVT::f32, Legal);
346 
347   setOperationAction(ISD::FLOG2, MVT::f32, Custom);
348   setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
349 
350   setOperationAction(
351       {ISD::FLOG, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FEXP10}, MVT::f32,
352       Custom);
353 
354   setOperationAction(ISD::FNEARBYINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
355 
356   setOperationAction(ISD::FRINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
357 
358   setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
359 
360   if (Subtarget->has16BitInsts())
361     setOperationAction(ISD::IS_FPCLASS, {MVT::f16, MVT::f32, MVT::f64}, Legal);
362   else {
363     setOperationAction(ISD::IS_FPCLASS, {MVT::f32, MVT::f64}, Legal);
364     setOperationAction({ISD::FLOG2, ISD::FEXP2}, MVT::f16, Custom);
365   }
366 
367   setOperationAction({ISD::FLOG10, ISD::FLOG, ISD::FEXP, ISD::FEXP10}, MVT::f16,
368                      Custom);
369 
370   // FIXME: These IS_FPCLASS vector fp types are marked custom so it reaches
371   // scalarization code. Can be removed when IS_FPCLASS expand isn't called by
372   // default unless marked custom/legal.
373   setOperationAction(
374       ISD::IS_FPCLASS,
375       {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16, MVT::v2f32, MVT::v3f32,
376        MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
377        MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64, MVT::v16f64},
378       Custom);
379 
380   // Expand to fneg + fadd.
381   setOperationAction(ISD::FSUB, MVT::f64, Expand);
382 
383   setOperationAction(ISD::CONCAT_VECTORS,
384                      {MVT::v3i32,  MVT::v3f32,  MVT::v4i32,  MVT::v4f32,
385                       MVT::v5i32,  MVT::v5f32,  MVT::v6i32,  MVT::v6f32,
386                       MVT::v7i32,  MVT::v7f32,  MVT::v8i32,  MVT::v8f32,
387                       MVT::v9i32,  MVT::v9f32,  MVT::v10i32, MVT::v10f32,
388                       MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
389                      Custom);
390   setOperationAction(
391       ISD::EXTRACT_SUBVECTOR,
392       {MVT::v2f16,  MVT::v2i16,  MVT::v4f16,  MVT::v4i16,  MVT::v2f32,
393        MVT::v2i32,  MVT::v3f32,  MVT::v3i32,  MVT::v4f32,  MVT::v4i32,
394        MVT::v5f32,  MVT::v5i32,  MVT::v6f32,  MVT::v6i32,  MVT::v7f32,
395        MVT::v7i32,  MVT::v8f32,  MVT::v8i32,  MVT::v9f32,  MVT::v9i32,
396        MVT::v10i32, MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32,
397        MVT::v12f32, MVT::v16f16, MVT::v16i16, MVT::v16f32, MVT::v16i32,
398        MVT::v32f32, MVT::v32i32, MVT::v2f64,  MVT::v2i64,  MVT::v3f64,
399        MVT::v3i64,  MVT::v4f64,  MVT::v4i64,  MVT::v8f64,  MVT::v8i64,
400        MVT::v16f64, MVT::v16i64, MVT::v32i16, MVT::v32f16},
401       Custom);
402 
403   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
404   setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom);
405 
406   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
407   for (MVT VT : ScalarIntVTs) {
408     // These should use [SU]DIVREM, so set them to expand
409     setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT,
410                        Expand);
411 
412     // GPU does not have divrem function for signed or unsigned.
413     setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom);
414 
415     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
416     setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
417 
418     setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand);
419 
420     // AMDGPU uses ADDC/SUBC/ADDE/SUBE
421     setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal);
422   }
423 
424   // The hardware supports 32-bit FSHR, but not FSHL.
425   setOperationAction(ISD::FSHR, MVT::i32, Legal);
426 
427   // The hardware supports 32-bit ROTR, but not ROTL.
428   setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand);
429   setOperationAction(ISD::ROTR, MVT::i64, Expand);
430 
431   setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand);
432 
433   setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand);
434   setOperationAction(
435       {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
436       MVT::i64, Custom);
437   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
438 
439   setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
440                      Legal);
441 
442   setOperationAction(
443       {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
444       MVT::i64, Custom);
445 
446   static const MVT::SimpleValueType VectorIntTypes[] = {
447       MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
448       MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
449 
450   for (MVT VT : VectorIntTypes) {
451     // Expand the following operations for the current type by default.
452     setOperationAction({ISD::ADD,        ISD::AND,     ISD::FP_TO_SINT,
453                         ISD::FP_TO_UINT, ISD::MUL,     ISD::MULHU,
454                         ISD::MULHS,      ISD::OR,      ISD::SHL,
455                         ISD::SRA,        ISD::SRL,     ISD::ROTL,
456                         ISD::ROTR,       ISD::SUB,     ISD::SINT_TO_FP,
457                         ISD::UINT_TO_FP, ISD::SDIV,    ISD::UDIV,
458                         ISD::SREM,       ISD::UREM,    ISD::SMUL_LOHI,
459                         ISD::UMUL_LOHI,  ISD::SDIVREM, ISD::UDIVREM,
460                         ISD::SELECT,     ISD::VSELECT, ISD::SELECT_CC,
461                         ISD::XOR,        ISD::BSWAP,   ISD::CTPOP,
462                         ISD::CTTZ,       ISD::CTLZ,    ISD::VECTOR_SHUFFLE,
463                         ISD::SETCC},
464                        VT, Expand);
465   }
466 
467   static const MVT::SimpleValueType FloatVectorTypes[] = {
468       MVT::v2f32, MVT::v3f32,  MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
469       MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
470 
471   for (MVT VT : FloatVectorTypes) {
472     setOperationAction(
473         {ISD::FABS,          ISD::FMINNUM,        ISD::FMAXNUM,
474          ISD::FADD,          ISD::FCEIL,          ISD::FCOS,
475          ISD::FDIV,          ISD::FEXP2,          ISD::FEXP,
476          ISD::FEXP10,        ISD::FLOG2,          ISD::FREM,
477          ISD::FLOG,          ISD::FLOG10,         ISD::FPOW,
478          ISD::FFLOOR,        ISD::FTRUNC,         ISD::FMUL,
479          ISD::FMA,           ISD::FRINT,          ISD::FNEARBYINT,
480          ISD::FSQRT,         ISD::FSIN,           ISD::FSUB,
481          ISD::FNEG,          ISD::VSELECT,        ISD::SELECT_CC,
482          ISD::FCOPYSIGN,     ISD::VECTOR_SHUFFLE, ISD::SETCC,
483          ISD::FCANONICALIZE, ISD::FROUNDEVEN},
484         VT, Expand);
485   }
486 
487   // This causes using an unrolled select operation rather than expansion with
488   // bit operations. This is in general better, but the alternative using BFI
489   // instructions may be better if the select sources are SGPRs.
490   setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
491   AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
492 
493   setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
494   AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
495 
496   setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
497   AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
498 
499   setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
500   AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
501 
502   setOperationAction(ISD::SELECT, MVT::v6f32, Promote);
503   AddPromotedToType(ISD::SELECT, MVT::v6f32, MVT::v6i32);
504 
505   setOperationAction(ISD::SELECT, MVT::v7f32, Promote);
506   AddPromotedToType(ISD::SELECT, MVT::v7f32, MVT::v7i32);
507 
508   setOperationAction(ISD::SELECT, MVT::v9f32, Promote);
509   AddPromotedToType(ISD::SELECT, MVT::v9f32, MVT::v9i32);
510 
511   setOperationAction(ISD::SELECT, MVT::v10f32, Promote);
512   AddPromotedToType(ISD::SELECT, MVT::v10f32, MVT::v10i32);
513 
514   setOperationAction(ISD::SELECT, MVT::v11f32, Promote);
515   AddPromotedToType(ISD::SELECT, MVT::v11f32, MVT::v11i32);
516 
517   setOperationAction(ISD::SELECT, MVT::v12f32, Promote);
518   AddPromotedToType(ISD::SELECT, MVT::v12f32, MVT::v12i32);
519 
520   // Disable most libcalls.
521   for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) {
522     if (I < RTLIB::ATOMIC_LOAD || I > RTLIB::ATOMIC_FETCH_NAND_16)
523       setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
524   }
525 
526   setSchedulingPreference(Sched::RegPressure);
527   setJumpIsExpensive(true);
528 
529   // FIXME: This is only partially true. If we have to do vector compares, any
530   // SGPR pair can be a condition register. If we have a uniform condition, we
531   // are better off doing SALU operations, where there is only one SCC. For now,
532   // we don't have a way of knowing during instruction selection if a condition
533   // will be uniform and we always use vector compares. Assume we are using
534   // vector compares until that is fixed.
535   setHasMultipleConditionRegisters(true);
536 
537   setMinCmpXchgSizeInBits(32);
538   setSupportsUnalignedAtomics(false);
539 
540   PredictableSelectIsExpensive = false;
541 
542   // We want to find all load dependencies for long chains of stores to enable
543   // merging into very wide vectors. The problem is with vectors with > 4
544   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
545   // vectors are a legal type, even though we have to split the loads
546   // usually. When we can more precisely specify load legality per address
547   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
548   // smarter so that they can figure out what to do in 2 iterations without all
549   // N > 4 stores on the same chain.
550   GatherAllAliasesMaxDepth = 16;
551 
552   // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
553   // about these during lowering.
554   MaxStoresPerMemcpy  = 0xffffffff;
555   MaxStoresPerMemmove = 0xffffffff;
556   MaxStoresPerMemset  = 0xffffffff;
557 
558   // The expansion for 64-bit division is enormous.
559   if (AMDGPUBypassSlowDiv)
560     addBypassSlowDiv(64, 32);
561 
562   setTargetDAGCombine({ISD::BITCAST,    ISD::SHL,
563                        ISD::SRA,        ISD::SRL,
564                        ISD::TRUNCATE,   ISD::MUL,
565                        ISD::SMUL_LOHI,  ISD::UMUL_LOHI,
566                        ISD::MULHU,      ISD::MULHS,
567                        ISD::SELECT,     ISD::SELECT_CC,
568                        ISD::STORE,      ISD::FADD,
569                        ISD::FSUB,       ISD::FNEG,
570                        ISD::FABS,       ISD::AssertZext,
571                        ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
572 
573   setMaxAtomicSizeInBitsSupported(64);
574 }
575 
576 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {
577   if (getTargetMachine().Options.NoSignedZerosFPMath)
578     return true;
579 
580   const auto Flags = Op.getNode()->getFlags();
581   if (Flags.hasNoSignedZeros())
582     return true;
583 
584   return false;
585 }
586 
587 //===----------------------------------------------------------------------===//
588 // Target Information
589 //===----------------------------------------------------------------------===//
590 
591 LLVM_READNONE
592 static bool fnegFoldsIntoOpcode(unsigned Opc) {
593   switch (Opc) {
594   case ISD::FADD:
595   case ISD::FSUB:
596   case ISD::FMUL:
597   case ISD::FMA:
598   case ISD::FMAD:
599   case ISD::FMINNUM:
600   case ISD::FMAXNUM:
601   case ISD::FMINNUM_IEEE:
602   case ISD::FMAXNUM_IEEE:
603   case ISD::FMINIMUM:
604   case ISD::FMAXIMUM:
605   case ISD::SELECT:
606   case ISD::FSIN:
607   case ISD::FTRUNC:
608   case ISD::FRINT:
609   case ISD::FNEARBYINT:
610   case ISD::FROUNDEVEN:
611   case ISD::FCANONICALIZE:
612   case AMDGPUISD::RCP:
613   case AMDGPUISD::RCP_LEGACY:
614   case AMDGPUISD::RCP_IFLAG:
615   case AMDGPUISD::SIN_HW:
616   case AMDGPUISD::FMUL_LEGACY:
617   case AMDGPUISD::FMIN_LEGACY:
618   case AMDGPUISD::FMAX_LEGACY:
619   case AMDGPUISD::FMED3:
620     // TODO: handle llvm.amdgcn.fma.legacy
621     return true;
622   case ISD::BITCAST:
623     llvm_unreachable("bitcast is special cased");
624   default:
625     return false;
626   }
627 }
628 
629 static bool fnegFoldsIntoOp(const SDNode *N) {
630   unsigned Opc = N->getOpcode();
631   if (Opc == ISD::BITCAST) {
632     // TODO: Is there a benefit to checking the conditions performFNegCombine
633     // does? We don't for the other cases.
634     SDValue BCSrc = N->getOperand(0);
635     if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
636       return BCSrc.getNumOperands() == 2 &&
637              BCSrc.getOperand(1).getValueSizeInBits() == 32;
638     }
639 
640     return BCSrc.getOpcode() == ISD::SELECT && BCSrc.getValueType() == MVT::f32;
641   }
642 
643   return fnegFoldsIntoOpcode(Opc);
644 }
645 
646 /// \p returns true if the operation will definitely need to use a 64-bit
647 /// encoding, and thus will use a VOP3 encoding regardless of the source
648 /// modifiers.
649 LLVM_READONLY
650 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
651   return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) ||
652          VT == MVT::f64;
653 }
654 
655 /// Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the
656 /// type for ISD::SELECT.
657 LLVM_READONLY
658 static bool selectSupportsSourceMods(const SDNode *N) {
659   // TODO: Only applies if select will be vector
660   return N->getValueType(0) == MVT::f32;
661 }
662 
663 // Most FP instructions support source modifiers, but this could be refined
664 // slightly.
665 LLVM_READONLY
666 static bool hasSourceMods(const SDNode *N) {
667   if (isa<MemSDNode>(N))
668     return false;
669 
670   switch (N->getOpcode()) {
671   case ISD::CopyToReg:
672   case ISD::FDIV:
673   case ISD::FREM:
674   case ISD::INLINEASM:
675   case ISD::INLINEASM_BR:
676   case AMDGPUISD::DIV_SCALE:
677   case ISD::INTRINSIC_W_CHAIN:
678 
679   // TODO: Should really be looking at the users of the bitcast. These are
680   // problematic because bitcasts are used to legalize all stores to integer
681   // types.
682   case ISD::BITCAST:
683     return false;
684   case ISD::INTRINSIC_WO_CHAIN: {
685     switch (N->getConstantOperandVal(0)) {
686     case Intrinsic::amdgcn_interp_p1:
687     case Intrinsic::amdgcn_interp_p2:
688     case Intrinsic::amdgcn_interp_mov:
689     case Intrinsic::amdgcn_interp_p1_f16:
690     case Intrinsic::amdgcn_interp_p2_f16:
691       return false;
692     default:
693       return true;
694     }
695   }
696   case ISD::SELECT:
697     return selectSupportsSourceMods(N);
698   default:
699     return true;
700   }
701 }
702 
703 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
704                                                  unsigned CostThreshold) {
705   // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
706   // it is truly free to use a source modifier in all cases. If there are
707   // multiple users but for each one will necessitate using VOP3, there will be
708   // a code size increase. Try to avoid increasing code size unless we know it
709   // will save on the instruction count.
710   unsigned NumMayIncreaseSize = 0;
711   MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
712 
713   assert(!N->use_empty());
714 
715   // XXX - Should this limit number of uses to check?
716   for (const SDNode *U : N->uses()) {
717     if (!hasSourceMods(U))
718       return false;
719 
720     if (!opMustUseVOP3Encoding(U, VT)) {
721       if (++NumMayIncreaseSize > CostThreshold)
722         return false;
723     }
724   }
725 
726   return true;
727 }
728 
729 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
730                                               ISD::NodeType ExtendKind) const {
731   assert(!VT.isVector() && "only scalar expected");
732 
733   // Round to the next multiple of 32-bits.
734   unsigned Size = VT.getSizeInBits();
735   if (Size <= 32)
736     return MVT::i32;
737   return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
738 }
739 
740 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
741   return MVT::i32;
742 }
743 
744 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
745   return true;
746 }
747 
748 // The backend supports 32 and 64 bit floating point immediates.
749 // FIXME: Why are we reporting vectors of FP immediates as legal?
750 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
751                                         bool ForCodeSize) const {
752   EVT ScalarVT = VT.getScalarType();
753   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
754          (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
755 }
756 
757 // We don't want to shrink f64 / f32 constants.
758 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
759   EVT ScalarVT = VT.getScalarType();
760   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
761 }
762 
763 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
764                                                  ISD::LoadExtType ExtTy,
765                                                  EVT NewVT) const {
766   // TODO: This may be worth removing. Check regression tests for diffs.
767   if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
768     return false;
769 
770   unsigned NewSize = NewVT.getStoreSizeInBits();
771 
772   // If we are reducing to a 32-bit load or a smaller multi-dword load,
773   // this is always better.
774   if (NewSize >= 32)
775     return true;
776 
777   EVT OldVT = N->getValueType(0);
778   unsigned OldSize = OldVT.getStoreSizeInBits();
779 
780   MemSDNode *MN = cast<MemSDNode>(N);
781   unsigned AS = MN->getAddressSpace();
782   // Do not shrink an aligned scalar load to sub-dword.
783   // Scalar engine cannot do sub-dword loads.
784   if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
785       (AS == AMDGPUAS::CONSTANT_ADDRESS ||
786        AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
787        (isa<LoadSDNode>(N) && AS == AMDGPUAS::GLOBAL_ADDRESS &&
788         MN->isInvariant())) &&
789       AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
790     return false;
791 
792   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
793   // extloads, so doing one requires using a buffer_load. In cases where we
794   // still couldn't use a scalar load, using the wider load shouldn't really
795   // hurt anything.
796 
797   // If the old size already had to be an extload, there's no harm in continuing
798   // to reduce the width.
799   return (OldSize < 32);
800 }
801 
802 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
803                                                    const SelectionDAG &DAG,
804                                                    const MachineMemOperand &MMO) const {
805 
806   assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
807 
808   if (LoadTy.getScalarType() == MVT::i32)
809     return false;
810 
811   unsigned LScalarSize = LoadTy.getScalarSizeInBits();
812   unsigned CastScalarSize = CastTy.getScalarSizeInBits();
813 
814   if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
815     return false;
816 
817   unsigned Fast = 0;
818   return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
819                                         CastTy, MMO, &Fast) &&
820          Fast;
821 }
822 
823 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
824 // profitable with the expansion for 64-bit since it's generally good to
825 // speculate things.
826 bool AMDGPUTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
827   return true;
828 }
829 
830 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
831   return true;
832 }
833 
834 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
835   switch (N->getOpcode()) {
836   case ISD::EntryToken:
837   case ISD::TokenFactor:
838     return true;
839   case ISD::INTRINSIC_WO_CHAIN: {
840     unsigned IntrID = N->getConstantOperandVal(0);
841     switch (IntrID) {
842     case Intrinsic::amdgcn_readfirstlane:
843     case Intrinsic::amdgcn_readlane:
844       return true;
845     }
846     return false;
847   }
848   case ISD::LOAD:
849     if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
850         AMDGPUAS::CONSTANT_ADDRESS_32BIT)
851       return true;
852     return false;
853   case AMDGPUISD::SETCC: // ballot-style instruction
854     return true;
855   }
856   return false;
857 }
858 
859 SDValue AMDGPUTargetLowering::getNegatedExpression(
860     SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
861     NegatibleCost &Cost, unsigned Depth) const {
862 
863   switch (Op.getOpcode()) {
864   case ISD::FMA:
865   case ISD::FMAD: {
866     // Negating a fma is not free if it has users without source mods.
867     if (!allUsesHaveSourceMods(Op.getNode()))
868       return SDValue();
869     break;
870   }
871   case AMDGPUISD::RCP: {
872     SDValue Src = Op.getOperand(0);
873     EVT VT = Op.getValueType();
874     SDLoc SL(Op);
875 
876     SDValue NegSrc = getNegatedExpression(Src, DAG, LegalOperations,
877                                           ForCodeSize, Cost, Depth + 1);
878     if (NegSrc)
879       return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags());
880     return SDValue();
881   }
882   default:
883     break;
884   }
885 
886   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
887                                               ForCodeSize, Cost, Depth);
888 }
889 
890 //===---------------------------------------------------------------------===//
891 // Target Properties
892 //===---------------------------------------------------------------------===//
893 
894 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
895   assert(VT.isFloatingPoint());
896 
897   // Packed operations do not have a fabs modifier.
898   return VT == MVT::f32 || VT == MVT::f64 ||
899          (Subtarget->has16BitInsts() && VT == MVT::f16);
900 }
901 
902 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
903   assert(VT.isFloatingPoint());
904   // Report this based on the end legalized type.
905   VT = VT.getScalarType();
906   return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16;
907 }
908 
909 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
910                                                          unsigned NumElem,
911                                                          unsigned AS) const {
912   return true;
913 }
914 
915 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
916   // There are few operations which truly have vector input operands. Any vector
917   // operation is going to involve operations on each component, and a
918   // build_vector will be a copy per element, so it always makes sense to use a
919   // build_vector input in place of the extracted element to avoid a copy into a
920   // super register.
921   //
922   // We should probably only do this if all users are extracts only, but this
923   // should be the common case.
924   return true;
925 }
926 
927 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
928   // Truncate is just accessing a subregister.
929 
930   unsigned SrcSize = Source.getSizeInBits();
931   unsigned DestSize = Dest.getSizeInBits();
932 
933   return DestSize < SrcSize && DestSize % 32 == 0 ;
934 }
935 
936 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
937   // Truncate is just accessing a subregister.
938 
939   unsigned SrcSize = Source->getScalarSizeInBits();
940   unsigned DestSize = Dest->getScalarSizeInBits();
941 
942   if (DestSize== 16 && Subtarget->has16BitInsts())
943     return SrcSize >= 32;
944 
945   return DestSize < SrcSize && DestSize % 32 == 0;
946 }
947 
948 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
949   unsigned SrcSize = Src->getScalarSizeInBits();
950   unsigned DestSize = Dest->getScalarSizeInBits();
951 
952   if (SrcSize == 16 && Subtarget->has16BitInsts())
953     return DestSize >= 32;
954 
955   return SrcSize == 32 && DestSize == 64;
956 }
957 
958 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
959   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
960   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
961   // this will enable reducing 64-bit operations the 32-bit, which is always
962   // good.
963 
964   if (Src == MVT::i16)
965     return Dest == MVT::i32 ||Dest == MVT::i64 ;
966 
967   return Src == MVT::i32 && Dest == MVT::i64;
968 }
969 
970 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
971   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
972   // limited number of native 64-bit operations. Shrinking an operation to fit
973   // in a single 32-bit register should always be helpful. As currently used,
974   // this is much less general than the name suggests, and is only used in
975   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
976   // not profitable, and may actually be harmful.
977   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
978 }
979 
980 bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
981     const SDNode* N, CombineLevel Level) const {
982   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
983           N->getOpcode() == ISD::SRL) &&
984          "Expected shift op");
985   // Always commute pre-type legalization and right shifts.
986   // We're looking for shl(or(x,y),z) patterns.
987   if (Level < CombineLevel::AfterLegalizeTypes ||
988       N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR)
989     return true;
990 
991   // If only user is a i32 right-shift, then don't destroy a BFE pattern.
992   if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 &&
993       (N->use_begin()->getOpcode() == ISD::SRA ||
994        N->use_begin()->getOpcode() == ISD::SRL))
995     return false;
996 
997   // Don't destroy or(shl(load_zext(),c), load_zext()) patterns.
998   auto IsShiftAndLoad = [](SDValue LHS, SDValue RHS) {
999     if (LHS.getOpcode() != ISD::SHL)
1000       return false;
1001     auto *RHSLd = dyn_cast<LoadSDNode>(RHS);
1002     auto *LHS0 = dyn_cast<LoadSDNode>(LHS.getOperand(0));
1003     auto *LHS1 = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
1004     return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD &&
1005            LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1006            RHSLd->getExtensionType() == ISD::ZEXTLOAD;
1007   };
1008   SDValue LHS = N->getOperand(0).getOperand(0);
1009   SDValue RHS = N->getOperand(0).getOperand(1);
1010   return !(IsShiftAndLoad(LHS, RHS) || IsShiftAndLoad(RHS, LHS));
1011 }
1012 
1013 //===---------------------------------------------------------------------===//
1014 // TargetLowering Callbacks
1015 //===---------------------------------------------------------------------===//
1016 
1017 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
1018                                                   bool IsVarArg) {
1019   switch (CC) {
1020   case CallingConv::AMDGPU_VS:
1021   case CallingConv::AMDGPU_GS:
1022   case CallingConv::AMDGPU_PS:
1023   case CallingConv::AMDGPU_CS:
1024   case CallingConv::AMDGPU_HS:
1025   case CallingConv::AMDGPU_ES:
1026   case CallingConv::AMDGPU_LS:
1027     return CC_AMDGPU;
1028   case CallingConv::AMDGPU_CS_Chain:
1029   case CallingConv::AMDGPU_CS_ChainPreserve:
1030     return CC_AMDGPU_CS_CHAIN;
1031   case CallingConv::C:
1032   case CallingConv::Fast:
1033   case CallingConv::Cold:
1034     return CC_AMDGPU_Func;
1035   case CallingConv::AMDGPU_Gfx:
1036     return CC_SI_Gfx;
1037   case CallingConv::AMDGPU_KERNEL:
1038   case CallingConv::SPIR_KERNEL:
1039   default:
1040     report_fatal_error("Unsupported calling convention for call");
1041   }
1042 }
1043 
1044 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
1045                                                     bool IsVarArg) {
1046   switch (CC) {
1047   case CallingConv::AMDGPU_KERNEL:
1048   case CallingConv::SPIR_KERNEL:
1049     llvm_unreachable("kernels should not be handled here");
1050   case CallingConv::AMDGPU_VS:
1051   case CallingConv::AMDGPU_GS:
1052   case CallingConv::AMDGPU_PS:
1053   case CallingConv::AMDGPU_CS:
1054   case CallingConv::AMDGPU_CS_Chain:
1055   case CallingConv::AMDGPU_CS_ChainPreserve:
1056   case CallingConv::AMDGPU_HS:
1057   case CallingConv::AMDGPU_ES:
1058   case CallingConv::AMDGPU_LS:
1059     return RetCC_SI_Shader;
1060   case CallingConv::AMDGPU_Gfx:
1061     return RetCC_SI_Gfx;
1062   case CallingConv::C:
1063   case CallingConv::Fast:
1064   case CallingConv::Cold:
1065     return RetCC_AMDGPU_Func;
1066   default:
1067     report_fatal_error("Unsupported calling convention.");
1068   }
1069 }
1070 
1071 /// The SelectionDAGBuilder will automatically promote function arguments
1072 /// with illegal types.  However, this does not work for the AMDGPU targets
1073 /// since the function arguments are stored in memory as these illegal types.
1074 /// In order to handle this properly we need to get the original types sizes
1075 /// from the LLVM IR Function and fixup the ISD:InputArg values before
1076 /// passing them to AnalyzeFormalArguments()
1077 
1078 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
1079 /// input values across multiple registers.  Each item in the Ins array
1080 /// represents a single value that will be stored in registers.  Ins[x].VT is
1081 /// the value type of the value that will be stored in the register, so
1082 /// whatever SDNode we lower the argument to needs to be this type.
1083 ///
1084 /// In order to correctly lower the arguments we need to know the size of each
1085 /// argument.  Since Ins[x].VT gives us the size of the register that will
1086 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
1087 /// for the original function argument so that we can deduce the correct memory
1088 /// type to use for Ins[x].  In most cases the correct memory type will be
1089 /// Ins[x].ArgVT.  However, this will not always be the case.  If, for example,
1090 /// we have a kernel argument of type v8i8, this argument will be split into
1091 /// 8 parts and each part will be represented by its own item in the Ins array.
1092 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
1093 /// the argument before it was split.  From this, we deduce that the memory type
1094 /// for each individual part is i8.  We pass the memory type as LocVT to the
1095 /// calling convention analysis function and the register type (Ins[x].VT) as
1096 /// the ValVT.
1097 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
1098   CCState &State,
1099   const SmallVectorImpl<ISD::InputArg> &Ins) const {
1100   const MachineFunction &MF = State.getMachineFunction();
1101   const Function &Fn = MF.getFunction();
1102   LLVMContext &Ctx = Fn.getParent()->getContext();
1103   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
1104   const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset();
1105   CallingConv::ID CC = Fn.getCallingConv();
1106 
1107   Align MaxAlign = Align(1);
1108   uint64_t ExplicitArgOffset = 0;
1109   const DataLayout &DL = Fn.getParent()->getDataLayout();
1110 
1111   unsigned InIndex = 0;
1112 
1113   for (const Argument &Arg : Fn.args()) {
1114     const bool IsByRef = Arg.hasByRefAttr();
1115     Type *BaseArgTy = Arg.getType();
1116     Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1117     Align Alignment = DL.getValueOrABITypeAlignment(
1118         IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1119     MaxAlign = std::max(Alignment, MaxAlign);
1120     uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
1121 
1122     uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1123     ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1124 
1125     // We're basically throwing away everything passed into us and starting over
1126     // to get accurate in-memory offsets. The "PartOffset" is completely useless
1127     // to us as computed in Ins.
1128     //
1129     // We also need to figure out what type legalization is trying to do to get
1130     // the correct memory offsets.
1131 
1132     SmallVector<EVT, 16> ValueVTs;
1133     SmallVector<uint64_t, 16> Offsets;
1134     ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1135 
1136     for (unsigned Value = 0, NumValues = ValueVTs.size();
1137          Value != NumValues; ++Value) {
1138       uint64_t BasePartOffset = Offsets[Value];
1139 
1140       EVT ArgVT = ValueVTs[Value];
1141       EVT MemVT = ArgVT;
1142       MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1143       unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1144 
1145       if (NumRegs == 1) {
1146         // This argument is not split, so the IR type is the memory type.
1147         if (ArgVT.isExtended()) {
1148           // We have an extended type, like i24, so we should just use the
1149           // register type.
1150           MemVT = RegisterVT;
1151         } else {
1152           MemVT = ArgVT;
1153         }
1154       } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1155                  ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1156         assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1157         // We have a vector value which has been split into a vector with
1158         // the same scalar type, but fewer elements.  This should handle
1159         // all the floating-point vector types.
1160         MemVT = RegisterVT;
1161       } else if (ArgVT.isVector() &&
1162                  ArgVT.getVectorNumElements() == NumRegs) {
1163         // This arg has been split so that each element is stored in a separate
1164         // register.
1165         MemVT = ArgVT.getScalarType();
1166       } else if (ArgVT.isExtended()) {
1167         // We have an extended type, like i65.
1168         MemVT = RegisterVT;
1169       } else {
1170         unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1171         assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1172         if (RegisterVT.isInteger()) {
1173           MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1174         } else if (RegisterVT.isVector()) {
1175           assert(!RegisterVT.getScalarType().isFloatingPoint());
1176           unsigned NumElements = RegisterVT.getVectorNumElements();
1177           assert(MemoryBits % NumElements == 0);
1178           // This vector type has been split into another vector type with
1179           // a different elements size.
1180           EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1181                                            MemoryBits / NumElements);
1182           MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1183         } else {
1184           llvm_unreachable("cannot deduce memory type.");
1185         }
1186       }
1187 
1188       // Convert one element vectors to scalar.
1189       if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1190         MemVT = MemVT.getScalarType();
1191 
1192       // Round up vec3/vec5 argument.
1193       if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1194         assert(MemVT.getVectorNumElements() == 3 ||
1195                MemVT.getVectorNumElements() == 5 ||
1196                (MemVT.getVectorNumElements() >= 9 &&
1197                 MemVT.getVectorNumElements() <= 12));
1198         MemVT = MemVT.getPow2VectorType(State.getContext());
1199       } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1200         MemVT = MemVT.getRoundIntegerType(State.getContext());
1201       }
1202 
1203       unsigned PartOffset = 0;
1204       for (unsigned i = 0; i != NumRegs; ++i) {
1205         State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1206                                                BasePartOffset + PartOffset,
1207                                                MemVT.getSimpleVT(),
1208                                                CCValAssign::Full));
1209         PartOffset += MemVT.getStoreSize();
1210       }
1211     }
1212   }
1213 }
1214 
1215 SDValue AMDGPUTargetLowering::LowerReturn(
1216   SDValue Chain, CallingConv::ID CallConv,
1217   bool isVarArg,
1218   const SmallVectorImpl<ISD::OutputArg> &Outs,
1219   const SmallVectorImpl<SDValue> &OutVals,
1220   const SDLoc &DL, SelectionDAG &DAG) const {
1221   // FIXME: Fails for r600 tests
1222   //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1223   // "wave terminate should not have return values");
1224   return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1225 }
1226 
1227 //===---------------------------------------------------------------------===//
1228 // Target specific lowering
1229 //===---------------------------------------------------------------------===//
1230 
1231 /// Selects the correct CCAssignFn for a given CallingConvention value.
1232 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1233                                                     bool IsVarArg) {
1234   return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1235 }
1236 
1237 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1238                                                       bool IsVarArg) {
1239   return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1240 }
1241 
1242 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1243                                                   SelectionDAG &DAG,
1244                                                   MachineFrameInfo &MFI,
1245                                                   int ClobberedFI) const {
1246   SmallVector<SDValue, 8> ArgChains;
1247   int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1248   int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1249 
1250   // Include the original chain at the beginning of the list. When this is
1251   // used by target LowerCall hooks, this helps legalize find the
1252   // CALLSEQ_BEGIN node.
1253   ArgChains.push_back(Chain);
1254 
1255   // Add a chain value for each stack argument corresponding
1256   for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1257     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1258       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1259         if (FI->getIndex() < 0) {
1260           int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1261           int64_t InLastByte = InFirstByte;
1262           InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1263 
1264           if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1265               (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1266             ArgChains.push_back(SDValue(L, 1));
1267         }
1268       }
1269     }
1270   }
1271 
1272   // Build a tokenfactor for all the chains.
1273   return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1274 }
1275 
1276 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1277                                                  SmallVectorImpl<SDValue> &InVals,
1278                                                  StringRef Reason) const {
1279   SDValue Callee = CLI.Callee;
1280   SelectionDAG &DAG = CLI.DAG;
1281 
1282   const Function &Fn = DAG.getMachineFunction().getFunction();
1283 
1284   StringRef FuncName("<unknown>");
1285 
1286   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1287     FuncName = G->getSymbol();
1288   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1289     FuncName = G->getGlobal()->getName();
1290 
1291   DiagnosticInfoUnsupported NoCalls(
1292     Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1293   DAG.getContext()->diagnose(NoCalls);
1294 
1295   if (!CLI.IsTailCall) {
1296     for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1297       InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1298   }
1299 
1300   return DAG.getEntryNode();
1301 }
1302 
1303 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1304                                         SmallVectorImpl<SDValue> &InVals) const {
1305   return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1306 }
1307 
1308 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1309                                                       SelectionDAG &DAG) const {
1310   const Function &Fn = DAG.getMachineFunction().getFunction();
1311 
1312   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1313                                             SDLoc(Op).getDebugLoc());
1314   DAG.getContext()->diagnose(NoDynamicAlloca);
1315   auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1316   return DAG.getMergeValues(Ops, SDLoc());
1317 }
1318 
1319 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1320                                              SelectionDAG &DAG) const {
1321   switch (Op.getOpcode()) {
1322   default:
1323     Op->print(errs(), &DAG);
1324     llvm_unreachable("Custom lowering code for this "
1325                      "instruction is not implemented yet!");
1326     break;
1327   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1328   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1329   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1330   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1331   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1332   case ISD::FREM: return LowerFREM(Op, DAG);
1333   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1334   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1335   case ISD::FRINT: return LowerFRINT(Op, DAG);
1336   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1337   case ISD::FROUNDEVEN:
1338     return LowerFROUNDEVEN(Op, DAG);
1339   case ISD::FROUND: return LowerFROUND(Op, DAG);
1340   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1341   case ISD::FLOG2:
1342     return LowerFLOG2(Op, DAG);
1343   case ISD::FLOG:
1344   case ISD::FLOG10:
1345     return LowerFLOGCommon(Op, DAG);
1346   case ISD::FEXP:
1347   case ISD::FEXP10:
1348     return lowerFEXP(Op, DAG);
1349   case ISD::FEXP2:
1350     return lowerFEXP2(Op, DAG);
1351   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1352   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1353   case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1354   case ISD::FP_TO_SINT:
1355   case ISD::FP_TO_UINT:
1356     return LowerFP_TO_INT(Op, DAG);
1357   case ISD::CTTZ:
1358   case ISD::CTTZ_ZERO_UNDEF:
1359   case ISD::CTLZ:
1360   case ISD::CTLZ_ZERO_UNDEF:
1361     return LowerCTLZ_CTTZ(Op, DAG);
1362   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1363   }
1364   return Op;
1365 }
1366 
1367 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1368                                               SmallVectorImpl<SDValue> &Results,
1369                                               SelectionDAG &DAG) const {
1370   switch (N->getOpcode()) {
1371   case ISD::SIGN_EXTEND_INREG:
1372     // Different parts of legalization seem to interpret which type of
1373     // sign_extend_inreg is the one to check for custom lowering. The extended
1374     // from type is what really matters, but some places check for custom
1375     // lowering of the result type. This results in trying to use
1376     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1377     // nothing here and let the illegal result integer be handled normally.
1378     return;
1379   case ISD::FLOG2:
1380     if (SDValue Lowered = LowerFLOG2(SDValue(N, 0), DAG))
1381       Results.push_back(Lowered);
1382     return;
1383   case ISD::FLOG:
1384   case ISD::FLOG10:
1385     if (SDValue Lowered = LowerFLOGCommon(SDValue(N, 0), DAG))
1386       Results.push_back(Lowered);
1387     return;
1388   case ISD::FEXP2:
1389     if (SDValue Lowered = lowerFEXP2(SDValue(N, 0), DAG))
1390       Results.push_back(Lowered);
1391     return;
1392   case ISD::FEXP:
1393   case ISD::FEXP10:
1394     if (SDValue Lowered = lowerFEXP(SDValue(N, 0), DAG))
1395       Results.push_back(Lowered);
1396     return;
1397   default:
1398     return;
1399   }
1400 }
1401 
1402 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1403                                                  SDValue Op,
1404                                                  SelectionDAG &DAG) const {
1405 
1406   const DataLayout &DL = DAG.getDataLayout();
1407   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1408   const GlobalValue *GV = G->getGlobal();
1409 
1410   if (!MFI->isModuleEntryFunction()) {
1411     if (std::optional<uint32_t> Address =
1412             AMDGPUMachineFunction::getLDSAbsoluteAddress(*GV)) {
1413       return DAG.getConstant(*Address, SDLoc(Op), Op.getValueType());
1414     }
1415   }
1416 
1417   if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1418       G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1419     if (!MFI->isModuleEntryFunction() &&
1420         !GV->getName().equals("llvm.amdgcn.module.lds")) {
1421       SDLoc DL(Op);
1422       const Function &Fn = DAG.getMachineFunction().getFunction();
1423       DiagnosticInfoUnsupported BadLDSDecl(
1424         Fn, "local memory global used by non-kernel function",
1425         DL.getDebugLoc(), DS_Warning);
1426       DAG.getContext()->diagnose(BadLDSDecl);
1427 
1428       // We currently don't have a way to correctly allocate LDS objects that
1429       // aren't directly associated with a kernel. We do force inlining of
1430       // functions that use local objects. However, if these dead functions are
1431       // not eliminated, we don't want a compile time error. Just emit a warning
1432       // and a trap, since there should be no callable path here.
1433       SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1434       SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1435                                         Trap, DAG.getRoot());
1436       DAG.setRoot(OutputChain);
1437       return DAG.getUNDEF(Op.getValueType());
1438     }
1439 
1440     // XXX: What does the value of G->getOffset() mean?
1441     assert(G->getOffset() == 0 &&
1442          "Do not know what to do with an non-zero offset");
1443 
1444     // TODO: We could emit code to handle the initialization somewhere.
1445     // We ignore the initializer for now and legalize it to allow selection.
1446     // The initializer will anyway get errored out during assembly emission.
1447     unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1448     return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1449   }
1450   return SDValue();
1451 }
1452 
1453 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1454                                                   SelectionDAG &DAG) const {
1455   SmallVector<SDValue, 8> Args;
1456   SDLoc SL(Op);
1457 
1458   EVT VT = Op.getValueType();
1459   if (VT.getVectorElementType().getSizeInBits() < 32) {
1460     unsigned OpBitSize = Op.getOperand(0).getValueType().getSizeInBits();
1461     if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1462       unsigned NewNumElt = OpBitSize / 32;
1463       EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1464                                       : EVT::getVectorVT(*DAG.getContext(),
1465                                                          MVT::i32, NewNumElt);
1466       for (const SDUse &U : Op->ops()) {
1467         SDValue In = U.get();
1468         SDValue NewIn = DAG.getNode(ISD::BITCAST, SL, NewEltVT, In);
1469         if (NewNumElt > 1)
1470           DAG.ExtractVectorElements(NewIn, Args);
1471         else
1472           Args.push_back(NewIn);
1473       }
1474 
1475       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
1476                                    NewNumElt * Op.getNumOperands());
1477       SDValue BV = DAG.getBuildVector(NewVT, SL, Args);
1478       return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1479     }
1480   }
1481 
1482   for (const SDUse &U : Op->ops())
1483     DAG.ExtractVectorElements(U.get(), Args);
1484 
1485   return DAG.getBuildVector(Op.getValueType(), SL, Args);
1486 }
1487 
1488 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1489                                                      SelectionDAG &DAG) const {
1490   SDLoc SL(Op);
1491   SmallVector<SDValue, 8> Args;
1492   unsigned Start = Op.getConstantOperandVal(1);
1493   EVT VT = Op.getValueType();
1494   EVT SrcVT = Op.getOperand(0).getValueType();
1495 
1496   if (VT.getScalarSizeInBits() == 16 && Start % 2 == 0) {
1497     unsigned NumElt = VT.getVectorNumElements();
1498     unsigned NumSrcElt = SrcVT.getVectorNumElements();
1499     assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 && "expect legal types");
1500 
1501     // Extract 32-bit registers at a time.
1502     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumSrcElt / 2);
1503     EVT NewVT = NumElt == 2
1504                     ? MVT::i32
1505                     : EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElt / 2);
1506     SDValue Tmp = DAG.getNode(ISD::BITCAST, SL, NewSrcVT, Op.getOperand(0));
1507 
1508     DAG.ExtractVectorElements(Tmp, Args, Start / 2, NumElt / 2);
1509     if (NumElt == 2)
1510       Tmp = Args[0];
1511     else
1512       Tmp = DAG.getBuildVector(NewVT, SL, Args);
1513 
1514     return DAG.getNode(ISD::BITCAST, SL, VT, Tmp);
1515   }
1516 
1517   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1518                             VT.getVectorNumElements());
1519 
1520   return DAG.getBuildVector(Op.getValueType(), SL, Args);
1521 }
1522 
1523 // TODO: Handle fabs too
1524 static SDValue peekFNeg(SDValue Val) {
1525   if (Val.getOpcode() == ISD::FNEG)
1526     return Val.getOperand(0);
1527 
1528   return Val;
1529 }
1530 
1531 static SDValue peekFPSignOps(SDValue Val) {
1532   if (Val.getOpcode() == ISD::FNEG)
1533     Val = Val.getOperand(0);
1534   if (Val.getOpcode() == ISD::FABS)
1535     Val = Val.getOperand(0);
1536   if (Val.getOpcode() == ISD::FCOPYSIGN)
1537     Val = Val.getOperand(0);
1538   return Val;
1539 }
1540 
1541 SDValue AMDGPUTargetLowering::combineFMinMaxLegacyImpl(
1542     const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True,
1543     SDValue False, SDValue CC, DAGCombinerInfo &DCI) const {
1544   SelectionDAG &DAG = DCI.DAG;
1545   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1546   switch (CCOpcode) {
1547   case ISD::SETOEQ:
1548   case ISD::SETONE:
1549   case ISD::SETUNE:
1550   case ISD::SETNE:
1551   case ISD::SETUEQ:
1552   case ISD::SETEQ:
1553   case ISD::SETFALSE:
1554   case ISD::SETFALSE2:
1555   case ISD::SETTRUE:
1556   case ISD::SETTRUE2:
1557   case ISD::SETUO:
1558   case ISD::SETO:
1559     break;
1560   case ISD::SETULE:
1561   case ISD::SETULT: {
1562     if (LHS == True)
1563       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1564     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1565   }
1566   case ISD::SETOLE:
1567   case ISD::SETOLT:
1568   case ISD::SETLE:
1569   case ISD::SETLT: {
1570     // Ordered. Assume ordered for undefined.
1571 
1572     // Only do this after legalization to avoid interfering with other combines
1573     // which might occur.
1574     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1575         !DCI.isCalledByLegalizer())
1576       return SDValue();
1577 
1578     // We need to permute the operands to get the correct NaN behavior. The
1579     // selected operand is the second one based on the failing compare with NaN,
1580     // so permute it based on the compare type the hardware uses.
1581     if (LHS == True)
1582       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1583     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1584   }
1585   case ISD::SETUGE:
1586   case ISD::SETUGT: {
1587     if (LHS == True)
1588       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1589     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1590   }
1591   case ISD::SETGT:
1592   case ISD::SETGE:
1593   case ISD::SETOGE:
1594   case ISD::SETOGT: {
1595     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1596         !DCI.isCalledByLegalizer())
1597       return SDValue();
1598 
1599     if (LHS == True)
1600       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1601     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1602   }
1603   case ISD::SETCC_INVALID:
1604     llvm_unreachable("Invalid setcc condcode!");
1605   }
1606   return SDValue();
1607 }
1608 
1609 /// Generate Min/Max node
1610 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1611                                                    SDValue LHS, SDValue RHS,
1612                                                    SDValue True, SDValue False,
1613                                                    SDValue CC,
1614                                                    DAGCombinerInfo &DCI) const {
1615   if ((LHS == True && RHS == False) || (LHS == False && RHS == True))
1616     return combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, True, False, CC, DCI);
1617 
1618   SelectionDAG &DAG = DCI.DAG;
1619 
1620   // If we can't directly match this, try to see if we can fold an fneg to
1621   // match.
1622 
1623   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
1624   ConstantFPSDNode *CFalse = dyn_cast<ConstantFPSDNode>(False);
1625   SDValue NegTrue = peekFNeg(True);
1626 
1627   // Undo the combine foldFreeOpFromSelect does if it helps us match the
1628   // fmin/fmax.
1629   //
1630   // select (fcmp olt (lhs, K)), (fneg lhs), -K
1631   // -> fneg (fmin_legacy lhs, K)
1632   //
1633   // TODO: Use getNegatedExpression
1634   if (LHS == NegTrue && CFalse && CRHS) {
1635     APFloat NegRHS = neg(CRHS->getValueAPF());
1636     if (NegRHS == CFalse->getValueAPF()) {
1637       SDValue Combined =
1638           combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, NegTrue, False, CC, DCI);
1639       if (Combined)
1640         return DAG.getNode(ISD::FNEG, DL, VT, Combined);
1641       return SDValue();
1642     }
1643   }
1644 
1645   return SDValue();
1646 }
1647 
1648 std::pair<SDValue, SDValue>
1649 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1650   SDLoc SL(Op);
1651 
1652   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1653 
1654   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1655   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1656 
1657   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1658   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1659 
1660   return std::pair(Lo, Hi);
1661 }
1662 
1663 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1664   SDLoc SL(Op);
1665 
1666   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1667   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1668   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1669 }
1670 
1671 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1672   SDLoc SL(Op);
1673 
1674   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1675   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1676   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1677 }
1678 
1679 // Split a vector type into two parts. The first part is a power of two vector.
1680 // The second part is whatever is left over, and is a scalar if it would
1681 // otherwise be a 1-vector.
1682 std::pair<EVT, EVT>
1683 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1684   EVT LoVT, HiVT;
1685   EVT EltVT = VT.getVectorElementType();
1686   unsigned NumElts = VT.getVectorNumElements();
1687   unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1688   LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1689   HiVT = NumElts - LoNumElts == 1
1690              ? EltVT
1691              : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1692   return std::pair(LoVT, HiVT);
1693 }
1694 
1695 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1696 // scalar.
1697 std::pair<SDValue, SDValue>
1698 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1699                                   const EVT &LoVT, const EVT &HiVT,
1700                                   SelectionDAG &DAG) const {
1701   assert(LoVT.getVectorNumElements() +
1702                  (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1703              N.getValueType().getVectorNumElements() &&
1704          "More vector elements requested than available!");
1705   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1706                            DAG.getVectorIdxConstant(0, DL));
1707   SDValue Hi = DAG.getNode(
1708       HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1709       HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1710   return std::pair(Lo, Hi);
1711 }
1712 
1713 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1714                                               SelectionDAG &DAG) const {
1715   LoadSDNode *Load = cast<LoadSDNode>(Op);
1716   EVT VT = Op.getValueType();
1717   SDLoc SL(Op);
1718 
1719 
1720   // If this is a 2 element vector, we really want to scalarize and not create
1721   // weird 1 element vectors.
1722   if (VT.getVectorNumElements() == 2) {
1723     SDValue Ops[2];
1724     std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1725     return DAG.getMergeValues(Ops, SL);
1726   }
1727 
1728   SDValue BasePtr = Load->getBasePtr();
1729   EVT MemVT = Load->getMemoryVT();
1730 
1731   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1732 
1733   EVT LoVT, HiVT;
1734   EVT LoMemVT, HiMemVT;
1735   SDValue Lo, Hi;
1736 
1737   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1738   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1739   std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1740 
1741   unsigned Size = LoMemVT.getStoreSize();
1742   Align BaseAlign = Load->getAlign();
1743   Align HiAlign = commonAlignment(BaseAlign, Size);
1744 
1745   SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1746                                   Load->getChain(), BasePtr, SrcValue, LoMemVT,
1747                                   BaseAlign, Load->getMemOperand()->getFlags());
1748   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::getFixed(Size));
1749   SDValue HiLoad =
1750       DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1751                      HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1752                      HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1753 
1754   SDValue Join;
1755   if (LoVT == HiVT) {
1756     // This is the case that the vector is power of two so was evenly split.
1757     Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1758   } else {
1759     Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1760                        DAG.getVectorIdxConstant(0, SL));
1761     Join = DAG.getNode(
1762         HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1763         VT, Join, HiLoad,
1764         DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1765   }
1766 
1767   SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1768                                      LoLoad.getValue(1), HiLoad.getValue(1))};
1769 
1770   return DAG.getMergeValues(Ops, SL);
1771 }
1772 
1773 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1774                                                      SelectionDAG &DAG) const {
1775   LoadSDNode *Load = cast<LoadSDNode>(Op);
1776   EVT VT = Op.getValueType();
1777   SDValue BasePtr = Load->getBasePtr();
1778   EVT MemVT = Load->getMemoryVT();
1779   SDLoc SL(Op);
1780   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1781   Align BaseAlign = Load->getAlign();
1782   unsigned NumElements = MemVT.getVectorNumElements();
1783 
1784   // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1785   // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1786   if (NumElements != 3 ||
1787       (BaseAlign < Align(8) &&
1788        !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1789     return SplitVectorLoad(Op, DAG);
1790 
1791   assert(NumElements == 3);
1792 
1793   EVT WideVT =
1794       EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1795   EVT WideMemVT =
1796       EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1797   SDValue WideLoad = DAG.getExtLoad(
1798       Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1799       WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1800   return DAG.getMergeValues(
1801       {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1802                    DAG.getVectorIdxConstant(0, SL)),
1803        WideLoad.getValue(1)},
1804       SL);
1805 }
1806 
1807 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1808                                                SelectionDAG &DAG) const {
1809   StoreSDNode *Store = cast<StoreSDNode>(Op);
1810   SDValue Val = Store->getValue();
1811   EVT VT = Val.getValueType();
1812 
1813   // If this is a 2 element vector, we really want to scalarize and not create
1814   // weird 1 element vectors.
1815   if (VT.getVectorNumElements() == 2)
1816     return scalarizeVectorStore(Store, DAG);
1817 
1818   EVT MemVT = Store->getMemoryVT();
1819   SDValue Chain = Store->getChain();
1820   SDValue BasePtr = Store->getBasePtr();
1821   SDLoc SL(Op);
1822 
1823   EVT LoVT, HiVT;
1824   EVT LoMemVT, HiMemVT;
1825   SDValue Lo, Hi;
1826 
1827   std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1828   std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1829   std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1830 
1831   SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1832 
1833   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1834   Align BaseAlign = Store->getAlign();
1835   unsigned Size = LoMemVT.getStoreSize();
1836   Align HiAlign = commonAlignment(BaseAlign, Size);
1837 
1838   SDValue LoStore =
1839       DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1840                         Store->getMemOperand()->getFlags());
1841   SDValue HiStore =
1842       DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1843                         HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1844 
1845   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1846 }
1847 
1848 // This is a shortcut for integer division because we have fast i32<->f32
1849 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1850 // float is enough to accurately represent up to a 24-bit signed integer.
1851 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1852                                             bool Sign) const {
1853   SDLoc DL(Op);
1854   EVT VT = Op.getValueType();
1855   SDValue LHS = Op.getOperand(0);
1856   SDValue RHS = Op.getOperand(1);
1857   MVT IntVT = MVT::i32;
1858   MVT FltVT = MVT::f32;
1859 
1860   unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1861   if (LHSSignBits < 9)
1862     return SDValue();
1863 
1864   unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1865   if (RHSSignBits < 9)
1866     return SDValue();
1867 
1868   unsigned BitSize = VT.getSizeInBits();
1869   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1870   unsigned DivBits = BitSize - SignBits;
1871   if (Sign)
1872     ++DivBits;
1873 
1874   ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1875   ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1876 
1877   SDValue jq = DAG.getConstant(1, DL, IntVT);
1878 
1879   if (Sign) {
1880     // char|short jq = ia ^ ib;
1881     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1882 
1883     // jq = jq >> (bitsize - 2)
1884     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1885                      DAG.getConstant(BitSize - 2, DL, VT));
1886 
1887     // jq = jq | 0x1
1888     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1889   }
1890 
1891   // int ia = (int)LHS;
1892   SDValue ia = LHS;
1893 
1894   // int ib, (int)RHS;
1895   SDValue ib = RHS;
1896 
1897   // float fa = (float)ia;
1898   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1899 
1900   // float fb = (float)ib;
1901   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1902 
1903   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1904                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1905 
1906   // fq = trunc(fq);
1907   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1908 
1909   // float fqneg = -fq;
1910   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1911 
1912   MachineFunction &MF = DAG.getMachineFunction();
1913 
1914   bool UseFmadFtz = false;
1915   if (Subtarget->isGCN()) {
1916     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1917     UseFmadFtz =
1918         MFI->getMode().FP32Denormals != DenormalMode::getPreserveSign();
1919   }
1920 
1921   // float fr = mad(fqneg, fb, fa);
1922   unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
1923                     : UseFmadFtz ? (unsigned)AMDGPUISD::FMAD_FTZ
1924                                  : (unsigned)ISD::FMAD;
1925   SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1926 
1927   // int iq = (int)fq;
1928   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1929 
1930   // fr = fabs(fr);
1931   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1932 
1933   // fb = fabs(fb);
1934   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1935 
1936   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1937 
1938   // int cv = fr >= fb;
1939   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1940 
1941   // jq = (cv ? jq : 0);
1942   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1943 
1944   // dst = iq + jq;
1945   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1946 
1947   // Rem needs compensation, it's easier to recompute it
1948   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1949   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1950 
1951   // Truncate to number of bits this divide really is.
1952   if (Sign) {
1953     SDValue InRegSize
1954       = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1955     Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1956     Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1957   } else {
1958     SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1959     Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1960     Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1961   }
1962 
1963   return DAG.getMergeValues({ Div, Rem }, DL);
1964 }
1965 
1966 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1967                                       SelectionDAG &DAG,
1968                                       SmallVectorImpl<SDValue> &Results) const {
1969   SDLoc DL(Op);
1970   EVT VT = Op.getValueType();
1971 
1972   assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1973 
1974   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1975 
1976   SDValue One = DAG.getConstant(1, DL, HalfVT);
1977   SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1978 
1979   //HiLo split
1980   SDValue LHS_Lo, LHS_Hi;
1981   SDValue LHS = Op.getOperand(0);
1982   std::tie(LHS_Lo, LHS_Hi) = DAG.SplitScalar(LHS, DL, HalfVT, HalfVT);
1983 
1984   SDValue RHS_Lo, RHS_Hi;
1985   SDValue RHS = Op.getOperand(1);
1986   std::tie(RHS_Lo, RHS_Hi) = DAG.SplitScalar(RHS, DL, HalfVT, HalfVT);
1987 
1988   if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1989       DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1990 
1991     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1992                               LHS_Lo, RHS_Lo);
1993 
1994     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1995     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1996 
1997     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1998     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1999     return;
2000   }
2001 
2002   if (isTypeLegal(MVT::i64)) {
2003     // The algorithm here is based on ideas from "Software Integer Division",
2004     // Tom Rodeheffer, August 2008.
2005 
2006     MachineFunction &MF = DAG.getMachineFunction();
2007     const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2008 
2009     // Compute denominator reciprocal.
2010     unsigned FMAD =
2011         !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
2012         : MFI->getMode().FP32Denormals == DenormalMode::getPreserveSign()
2013             ? (unsigned)ISD::FMAD
2014             : (unsigned)AMDGPUISD::FMAD_FTZ;
2015 
2016     SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
2017     SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
2018     SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
2019       DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
2020       Cvt_Lo);
2021     SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
2022     SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
2023       DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
2024     SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
2025       DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
2026     SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
2027     SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
2028       DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
2029       Mul1);
2030     SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
2031     SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
2032     SDValue Rcp64 = DAG.getBitcast(VT,
2033                         DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
2034 
2035     SDValue Zero64 = DAG.getConstant(0, DL, VT);
2036     SDValue One64  = DAG.getConstant(1, DL, VT);
2037     SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
2038     SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
2039 
2040     // First round of UNR (Unsigned integer Newton-Raphson).
2041     SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
2042     SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
2043     SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
2044     SDValue Mulhi1_Lo, Mulhi1_Hi;
2045     std::tie(Mulhi1_Lo, Mulhi1_Hi) =
2046         DAG.SplitScalar(Mulhi1, DL, HalfVT, HalfVT);
2047     SDValue Add1_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Lo,
2048                                   Mulhi1_Lo, Zero1);
2049     SDValue Add1_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Hi,
2050                                   Mulhi1_Hi, Add1_Lo.getValue(1));
2051     SDValue Add1 = DAG.getBitcast(VT,
2052                         DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
2053 
2054     // Second round of UNR.
2055     SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
2056     SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
2057     SDValue Mulhi2_Lo, Mulhi2_Hi;
2058     std::tie(Mulhi2_Lo, Mulhi2_Hi) =
2059         DAG.SplitScalar(Mulhi2, DL, HalfVT, HalfVT);
2060     SDValue Add2_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Lo,
2061                                   Mulhi2_Lo, Zero1);
2062     SDValue Add2_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Hi,
2063                                   Mulhi2_Hi, Add2_Lo.getValue(1));
2064     SDValue Add2 = DAG.getBitcast(VT,
2065                         DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
2066 
2067     SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
2068 
2069     SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
2070 
2071     SDValue Mul3_Lo, Mul3_Hi;
2072     std::tie(Mul3_Lo, Mul3_Hi) = DAG.SplitScalar(Mul3, DL, HalfVT, HalfVT);
2073     SDValue Sub1_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Lo,
2074                                   Mul3_Lo, Zero1);
2075     SDValue Sub1_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Hi,
2076                                   Mul3_Hi, Sub1_Lo.getValue(1));
2077     SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
2078     SDValue Sub1 = DAG.getBitcast(VT,
2079                         DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
2080 
2081     SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
2082     SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
2083                                  ISD::SETUGE);
2084     SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
2085                                  ISD::SETUGE);
2086     SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
2087 
2088     // TODO: Here and below portions of the code can be enclosed into if/endif.
2089     // Currently control flow is unconditional and we have 4 selects after
2090     // potential endif to substitute PHIs.
2091 
2092     // if C3 != 0 ...
2093     SDValue Sub2_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Lo,
2094                                   RHS_Lo, Zero1);
2095     SDValue Sub2_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Mi,
2096                                   RHS_Hi, Sub1_Lo.getValue(1));
2097     SDValue Sub2_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2098                                   Zero, Sub2_Lo.getValue(1));
2099     SDValue Sub2 = DAG.getBitcast(VT,
2100                         DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
2101 
2102     SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
2103 
2104     SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
2105                                  ISD::SETUGE);
2106     SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
2107                                  ISD::SETUGE);
2108     SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
2109 
2110     // if (C6 != 0)
2111     SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
2112 
2113     SDValue Sub3_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Lo,
2114                                   RHS_Lo, Zero1);
2115     SDValue Sub3_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2116                                   RHS_Hi, Sub2_Lo.getValue(1));
2117     SDValue Sub3_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub3_Mi,
2118                                   Zero, Sub3_Lo.getValue(1));
2119     SDValue Sub3 = DAG.getBitcast(VT,
2120                         DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
2121 
2122     // endif C6
2123     // endif C3
2124 
2125     SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
2126     SDValue Div  = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
2127 
2128     SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
2129     SDValue Rem  = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
2130 
2131     Results.push_back(Div);
2132     Results.push_back(Rem);
2133 
2134     return;
2135   }
2136 
2137   // r600 expandion.
2138   // Get Speculative values
2139   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
2140   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
2141 
2142   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
2143   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
2144   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
2145 
2146   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
2147   SDValue DIV_Lo = Zero;
2148 
2149   const unsigned halfBitWidth = HalfVT.getSizeInBits();
2150 
2151   for (unsigned i = 0; i < halfBitWidth; ++i) {
2152     const unsigned bitPos = halfBitWidth - i - 1;
2153     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
2154     // Get value of high bit
2155     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
2156     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
2157     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
2158 
2159     // Shift
2160     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
2161     // Add LHS high bit
2162     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
2163 
2164     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
2165     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
2166 
2167     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
2168 
2169     // Update REM
2170     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
2171     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
2172   }
2173 
2174   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
2175   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
2176   Results.push_back(DIV);
2177   Results.push_back(REM);
2178 }
2179 
2180 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
2181                                            SelectionDAG &DAG) const {
2182   SDLoc DL(Op);
2183   EVT VT = Op.getValueType();
2184 
2185   if (VT == MVT::i64) {
2186     SmallVector<SDValue, 2> Results;
2187     LowerUDIVREM64(Op, DAG, Results);
2188     return DAG.getMergeValues(Results, DL);
2189   }
2190 
2191   if (VT == MVT::i32) {
2192     if (SDValue Res = LowerDIVREM24(Op, DAG, false))
2193       return Res;
2194   }
2195 
2196   SDValue X = Op.getOperand(0);
2197   SDValue Y = Op.getOperand(1);
2198 
2199   // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
2200   // algorithm used here.
2201 
2202   // Initial estimate of inv(y).
2203   SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
2204 
2205   // One round of UNR.
2206   SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
2207   SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
2208   Z = DAG.getNode(ISD::ADD, DL, VT, Z,
2209                   DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
2210 
2211   // Quotient/remainder estimate.
2212   SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
2213   SDValue R =
2214       DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
2215 
2216   // First quotient/remainder refinement.
2217   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2218   SDValue One = DAG.getConstant(1, DL, VT);
2219   SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2220   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2221                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2222   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2223                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2224 
2225   // Second quotient/remainder refinement.
2226   Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2227   Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2228                   DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2229   R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2230                   DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2231 
2232   return DAG.getMergeValues({Q, R}, DL);
2233 }
2234 
2235 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2236                                            SelectionDAG &DAG) const {
2237   SDLoc DL(Op);
2238   EVT VT = Op.getValueType();
2239 
2240   SDValue LHS = Op.getOperand(0);
2241   SDValue RHS = Op.getOperand(1);
2242 
2243   SDValue Zero = DAG.getConstant(0, DL, VT);
2244   SDValue NegOne = DAG.getConstant(-1, DL, VT);
2245 
2246   if (VT == MVT::i32) {
2247     if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2248       return Res;
2249   }
2250 
2251   if (VT == MVT::i64 &&
2252       DAG.ComputeNumSignBits(LHS) > 32 &&
2253       DAG.ComputeNumSignBits(RHS) > 32) {
2254     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2255 
2256     //HiLo split
2257     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2258     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2259     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2260                                  LHS_Lo, RHS_Lo);
2261     SDValue Res[2] = {
2262       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2263       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2264     };
2265     return DAG.getMergeValues(Res, DL);
2266   }
2267 
2268   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2269   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2270   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2271   SDValue RSign = LHSign; // Remainder sign is the same as LHS
2272 
2273   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2274   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2275 
2276   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2277   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2278 
2279   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2280   SDValue Rem = Div.getValue(1);
2281 
2282   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2283   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2284 
2285   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2286   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2287 
2288   SDValue Res[2] = {
2289     Div,
2290     Rem
2291   };
2292   return DAG.getMergeValues(Res, DL);
2293 }
2294 
2295 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2296 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2297   SDLoc SL(Op);
2298   EVT VT = Op.getValueType();
2299   auto Flags = Op->getFlags();
2300   SDValue X = Op.getOperand(0);
2301   SDValue Y = Op.getOperand(1);
2302 
2303   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2304   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2305   SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2306   // TODO: For f32 use FMAD instead if !hasFastFMA32?
2307   return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2308 }
2309 
2310 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2311   SDLoc SL(Op);
2312   SDValue Src = Op.getOperand(0);
2313 
2314   // result = trunc(src)
2315   // if (src > 0.0 && src != result)
2316   //   result += 1.0
2317 
2318   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2319 
2320   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2321   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2322 
2323   EVT SetCCVT =
2324       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2325 
2326   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2327   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2328   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2329 
2330   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2331   // TODO: Should this propagate fast-math-flags?
2332   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2333 }
2334 
2335 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2336                                   SelectionDAG &DAG) {
2337   const unsigned FractBits = 52;
2338   const unsigned ExpBits = 11;
2339 
2340   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2341                                 Hi,
2342                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2343                                 DAG.getConstant(ExpBits, SL, MVT::i32));
2344   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2345                             DAG.getConstant(1023, SL, MVT::i32));
2346 
2347   return Exp;
2348 }
2349 
2350 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2351   SDLoc SL(Op);
2352   SDValue Src = Op.getOperand(0);
2353 
2354   assert(Op.getValueType() == MVT::f64);
2355 
2356   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2357 
2358   // Extract the upper half, since this is where we will find the sign and
2359   // exponent.
2360   SDValue Hi = getHiHalf64(Src, DAG);
2361 
2362   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2363 
2364   const unsigned FractBits = 52;
2365 
2366   // Extract the sign bit.
2367   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2368   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2369 
2370   // Extend back to 64-bits.
2371   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2372   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2373 
2374   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2375   const SDValue FractMask
2376     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2377 
2378   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2379   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2380   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2381 
2382   EVT SetCCVT =
2383       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2384 
2385   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2386 
2387   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2388   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2389 
2390   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2391   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2392 
2393   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2394 }
2395 
2396 SDValue AMDGPUTargetLowering::LowerFROUNDEVEN(SDValue Op,
2397                                               SelectionDAG &DAG) const {
2398   SDLoc SL(Op);
2399   SDValue Src = Op.getOperand(0);
2400 
2401   assert(Op.getValueType() == MVT::f64);
2402 
2403   APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2404   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2405   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2406 
2407   // TODO: Should this propagate fast-math-flags?
2408 
2409   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2410   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2411 
2412   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2413 
2414   APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2415   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2416 
2417   EVT SetCCVT =
2418       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2419   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2420 
2421   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2422 }
2423 
2424 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op,
2425                                               SelectionDAG &DAG) const {
2426   // FNEARBYINT and FRINT are the same, except in their handling of FP
2427   // exceptions. Those aren't really meaningful for us, and OpenCL only has
2428   // rint, so just treat them as equivalent.
2429   return DAG.getNode(ISD::FROUNDEVEN, SDLoc(Op), Op.getValueType(),
2430                      Op.getOperand(0));
2431 }
2432 
2433 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2434   auto VT = Op.getValueType();
2435   auto Arg = Op.getOperand(0u);
2436   return DAG.getNode(ISD::FROUNDEVEN, SDLoc(Op), VT, Arg);
2437 }
2438 
2439 // XXX - May require not supporting f32 denormals?
2440 
2441 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2442 // compare and vselect end up producing worse code than scalarizing the whole
2443 // operation.
2444 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2445   SDLoc SL(Op);
2446   SDValue X = Op.getOperand(0);
2447   EVT VT = Op.getValueType();
2448 
2449   SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2450 
2451   // TODO: Should this propagate fast-math-flags?
2452 
2453   SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2454 
2455   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2456 
2457   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2458   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2459 
2460   EVT SetCCVT =
2461       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2462 
2463   const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2464   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2465   SDValue OneOrZeroFP = DAG.getNode(ISD::SELECT, SL, VT, Cmp, One, Zero);
2466 
2467   SDValue SignedOffset = DAG.getNode(ISD::FCOPYSIGN, SL, VT, OneOrZeroFP, X);
2468   return DAG.getNode(ISD::FADD, SL, VT, T, SignedOffset);
2469 }
2470 
2471 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2472   SDLoc SL(Op);
2473   SDValue Src = Op.getOperand(0);
2474 
2475   // result = trunc(src);
2476   // if (src < 0.0 && src != result)
2477   //   result += -1.0.
2478 
2479   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2480 
2481   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2482   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2483 
2484   EVT SetCCVT =
2485       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2486 
2487   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2488   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2489   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2490 
2491   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2492   // TODO: Should this propagate fast-math-flags?
2493   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2494 }
2495 
2496 /// Return true if it's known that \p Src can never be an f32 denormal value.
2497 static bool valueIsKnownNeverF32Denorm(SDValue Src) {
2498   switch (Src.getOpcode()) {
2499   case ISD::FP_EXTEND:
2500     return Src.getOperand(0).getValueType() == MVT::f16;
2501   case ISD::FP16_TO_FP:
2502   case ISD::FFREXP:
2503     return true;
2504   case ISD::INTRINSIC_WO_CHAIN: {
2505     unsigned IntrinsicID = Src.getConstantOperandVal(0);
2506     switch (IntrinsicID) {
2507     case Intrinsic::amdgcn_frexp_mant:
2508       return true;
2509     default:
2510       return false;
2511     }
2512   }
2513   default:
2514     return false;
2515   }
2516 
2517   llvm_unreachable("covered opcode switch");
2518 }
2519 
2520 bool AMDGPUTargetLowering::allowApproxFunc(const SelectionDAG &DAG,
2521                                            SDNodeFlags Flags) {
2522   if (Flags.hasApproximateFuncs())
2523     return true;
2524   auto &Options = DAG.getTarget().Options;
2525   return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
2526 }
2527 
2528 bool AMDGPUTargetLowering::needsDenormHandlingF32(const SelectionDAG &DAG,
2529                                                   SDValue Src,
2530                                                   SDNodeFlags Flags) {
2531   return !valueIsKnownNeverF32Denorm(Src) &&
2532          DAG.getMachineFunction()
2533                  .getDenormalMode(APFloat::IEEEsingle())
2534                  .Input != DenormalMode::PreserveSign;
2535 }
2536 
2537 SDValue AMDGPUTargetLowering::getIsLtSmallestNormal(SelectionDAG &DAG,
2538                                                     SDValue Src,
2539                                                     SDNodeFlags Flags) const {
2540   SDLoc SL(Src);
2541   EVT VT = Src.getValueType();
2542   const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT);
2543   SDValue SmallestNormal =
2544       DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT);
2545 
2546   // Want to scale denormals up, but negatives and 0 work just as well on the
2547   // scaled path.
2548   SDValue IsLtSmallestNormal = DAG.getSetCC(
2549       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
2550       SmallestNormal, ISD::SETOLT);
2551 
2552   return IsLtSmallestNormal;
2553 }
2554 
2555 SDValue AMDGPUTargetLowering::getIsFinite(SelectionDAG &DAG, SDValue Src,
2556                                           SDNodeFlags Flags) const {
2557   SDLoc SL(Src);
2558   EVT VT = Src.getValueType();
2559   const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT);
2560   SDValue Inf = DAG.getConstantFP(APFloat::getInf(Semantics), SL, VT);
2561 
2562   SDValue Fabs = DAG.getNode(ISD::FABS, SL, VT, Src, Flags);
2563   SDValue IsFinite = DAG.getSetCC(
2564       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Fabs,
2565       Inf, ISD::SETOLT);
2566   return IsFinite;
2567 }
2568 
2569 /// If denormal handling is required return the scaled input to FLOG2, and the
2570 /// check for denormal range. Otherwise, return null values.
2571 std::pair<SDValue, SDValue>
2572 AMDGPUTargetLowering::getScaledLogInput(SelectionDAG &DAG, const SDLoc SL,
2573                                         SDValue Src, SDNodeFlags Flags) const {
2574   if (!needsDenormHandlingF32(DAG, Src, Flags))
2575     return {};
2576 
2577   MVT VT = MVT::f32;
2578   const fltSemantics &Semantics = APFloat::IEEEsingle();
2579   SDValue SmallestNormal =
2580       DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT);
2581 
2582   SDValue IsLtSmallestNormal = DAG.getSetCC(
2583       SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
2584       SmallestNormal, ISD::SETOLT);
2585 
2586   SDValue Scale32 = DAG.getConstantFP(0x1.0p+32, SL, VT);
2587   SDValue One = DAG.getConstantFP(1.0, SL, VT);
2588   SDValue ScaleFactor =
2589       DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, Scale32, One, Flags);
2590 
2591   SDValue ScaledInput = DAG.getNode(ISD::FMUL, SL, VT, Src, ScaleFactor, Flags);
2592   return {ScaledInput, IsLtSmallestNormal};
2593 }
2594 
2595 SDValue AMDGPUTargetLowering::LowerFLOG2(SDValue Op, SelectionDAG &DAG) const {
2596   // v_log_f32 is good enough for OpenCL, except it doesn't handle denormals.
2597   // If we have to handle denormals, scale up the input and adjust the result.
2598 
2599   // scaled = x * (is_denormal ? 0x1.0p+32 : 1.0)
2600   // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0)
2601 
2602   SDLoc SL(Op);
2603   EVT VT = Op.getValueType();
2604   SDValue Src = Op.getOperand(0);
2605   SDNodeFlags Flags = Op->getFlags();
2606 
2607   if (VT == MVT::f16) {
2608     // Nothing in half is a denormal when promoted to f32.
2609     assert(!Subtarget->has16BitInsts());
2610     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2611     SDValue Log = DAG.getNode(AMDGPUISD::LOG, SL, MVT::f32, Ext, Flags);
2612     return DAG.getNode(ISD::FP_ROUND, SL, VT, Log,
2613                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2614   }
2615 
2616   auto [ScaledInput, IsLtSmallestNormal] =
2617       getScaledLogInput(DAG, SL, Src, Flags);
2618   if (!ScaledInput)
2619     return DAG.getNode(AMDGPUISD::LOG, SL, VT, Src, Flags);
2620 
2621   SDValue Log2 = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2622 
2623   SDValue ThirtyTwo = DAG.getConstantFP(32.0, SL, VT);
2624   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2625   SDValue ResultOffset =
2626       DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, ThirtyTwo, Zero);
2627   return DAG.getNode(ISD::FSUB, SL, VT, Log2, ResultOffset, Flags);
2628 }
2629 
2630 static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X,
2631                       SDValue Y, SDValue C, SDNodeFlags Flags = SDNodeFlags()) {
2632   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Y, Flags);
2633   return DAG.getNode(ISD::FADD, SL, VT, Mul, C, Flags);
2634 }
2635 
2636 SDValue AMDGPUTargetLowering::LowerFLOGCommon(SDValue Op,
2637                                               SelectionDAG &DAG) const {
2638   SDValue X = Op.getOperand(0);
2639   EVT VT = Op.getValueType();
2640   SDNodeFlags Flags = Op->getFlags();
2641   SDLoc DL(Op);
2642 
2643   const bool IsLog10 = Op.getOpcode() == ISD::FLOG10;
2644   assert(IsLog10 || Op.getOpcode() == ISD::FLOG);
2645 
2646   const auto &Options = getTargetMachine().Options;
2647   if (VT == MVT::f16 || Flags.hasApproximateFuncs() ||
2648       Options.ApproxFuncFPMath || Options.UnsafeFPMath) {
2649 
2650     if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2651       // Log and multiply in f32 is good enough for f16.
2652       X = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, X, Flags);
2653     }
2654 
2655     SDValue Lowered = LowerFLOGUnsafe(X, DL, DAG, IsLog10, Flags);
2656     if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2657       return DAG.getNode(ISD::FP_ROUND, DL, VT, Lowered,
2658                          DAG.getTargetConstant(0, DL, MVT::i32), Flags);
2659     }
2660 
2661     return Lowered;
2662   }
2663 
2664   auto [ScaledInput, IsScaled] = getScaledLogInput(DAG, DL, X, Flags);
2665   if (ScaledInput)
2666     X = ScaledInput;
2667 
2668   SDValue Y = DAG.getNode(AMDGPUISD::LOG, DL, VT, X, Flags);
2669 
2670   SDValue R;
2671   if (Subtarget->hasFastFMAF32()) {
2672     // c+cc are ln(2)/ln(10) to more than 49 bits
2673     const float c_log10 = 0x1.344134p-2f;
2674     const float cc_log10 = 0x1.09f79ep-26f;
2675 
2676     // c + cc is ln(2) to more than 49 bits
2677     const float c_log = 0x1.62e42ep-1f;
2678     const float cc_log = 0x1.efa39ep-25f;
2679 
2680     SDValue C = DAG.getConstantFP(IsLog10 ? c_log10 : c_log, DL, VT);
2681     SDValue CC = DAG.getConstantFP(IsLog10 ? cc_log10 : cc_log, DL, VT);
2682 
2683     R = DAG.getNode(ISD::FMUL, DL, VT, Y, C, Flags);
2684     SDValue NegR = DAG.getNode(ISD::FNEG, DL, VT, R, Flags);
2685     SDValue FMA0 = DAG.getNode(ISD::FMA, DL, VT, Y, C, NegR, Flags);
2686     SDValue FMA1 = DAG.getNode(ISD::FMA, DL, VT, Y, CC, FMA0, Flags);
2687     R = DAG.getNode(ISD::FADD, DL, VT, R, FMA1, Flags);
2688   } else {
2689     // ch+ct is ln(2)/ln(10) to more than 36 bits
2690     const float ch_log10 = 0x1.344000p-2f;
2691     const float ct_log10 = 0x1.3509f6p-18f;
2692 
2693     // ch + ct is ln(2) to more than 36 bits
2694     const float ch_log = 0x1.62e000p-1f;
2695     const float ct_log = 0x1.0bfbe8p-15f;
2696 
2697     SDValue CH = DAG.getConstantFP(IsLog10 ? ch_log10 : ch_log, DL, VT);
2698     SDValue CT = DAG.getConstantFP(IsLog10 ? ct_log10 : ct_log, DL, VT);
2699 
2700     SDValue YAsInt = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Y);
2701     SDValue MaskConst = DAG.getConstant(0xfffff000, DL, MVT::i32);
2702     SDValue YHInt = DAG.getNode(ISD::AND, DL, MVT::i32, YAsInt, MaskConst);
2703     SDValue YH = DAG.getNode(ISD::BITCAST, DL, MVT::f32, YHInt);
2704     SDValue YT = DAG.getNode(ISD::FSUB, DL, VT, Y, YH, Flags);
2705 
2706     SDValue YTCT = DAG.getNode(ISD::FMUL, DL, VT, YT, CT, Flags);
2707     SDValue Mad0 = getMad(DAG, DL, VT, YH, CT, YTCT, Flags);
2708     SDValue Mad1 = getMad(DAG, DL, VT, YT, CH, Mad0, Flags);
2709     R = getMad(DAG, DL, VT, YH, CH, Mad1);
2710   }
2711 
2712   const bool IsFiniteOnly = (Flags.hasNoNaNs() || Options.NoNaNsFPMath) &&
2713                             (Flags.hasNoInfs() || Options.NoInfsFPMath);
2714 
2715   // TODO: Check if known finite from source value.
2716   if (!IsFiniteOnly) {
2717     SDValue IsFinite = getIsFinite(DAG, Y, Flags);
2718     R = DAG.getNode(ISD::SELECT, DL, VT, IsFinite, R, Y, Flags);
2719   }
2720 
2721   if (IsScaled) {
2722     SDValue Zero = DAG.getConstantFP(0.0f, DL, VT);
2723     SDValue ShiftK =
2724         DAG.getConstantFP(IsLog10 ? 0x1.344136p+3f : 0x1.62e430p+4f, DL, VT);
2725     SDValue Shift =
2726         DAG.getNode(ISD::SELECT, DL, VT, IsScaled, ShiftK, Zero, Flags);
2727     R = DAG.getNode(ISD::FSUB, DL, VT, R, Shift, Flags);
2728   }
2729 
2730   return R;
2731 }
2732 
2733 SDValue AMDGPUTargetLowering::LowerFLOG10(SDValue Op, SelectionDAG &DAG) const {
2734   return LowerFLOGCommon(Op, DAG);
2735 }
2736 
2737 // Do f32 fast math expansion for flog2 or flog10. This is accurate enough for a
2738 // promote f16 operation.
2739 SDValue AMDGPUTargetLowering::LowerFLOGUnsafe(SDValue Src, const SDLoc &SL,
2740                                               SelectionDAG &DAG, bool IsLog10,
2741                                               SDNodeFlags Flags) const {
2742   EVT VT = Src.getValueType();
2743   unsigned LogOp =
2744       VT == MVT::f32 ? (unsigned)AMDGPUISD::LOG : (unsigned)ISD::FLOG2;
2745 
2746   double Log2BaseInverted =
2747       IsLog10 ? numbers::ln2 / numbers::ln10 : numbers::ln2;
2748 
2749   if (VT == MVT::f32) {
2750     auto [ScaledInput, IsScaled] = getScaledLogInput(DAG, SL, Src, Flags);
2751     if (ScaledInput) {
2752       SDValue LogSrc = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2753       SDValue ScaledResultOffset =
2754           DAG.getConstantFP(-32.0 * Log2BaseInverted, SL, VT);
2755 
2756       SDValue Zero = DAG.getConstantFP(0.0f, SL, VT);
2757 
2758       SDValue ResultOffset = DAG.getNode(ISD::SELECT, SL, VT, IsScaled,
2759                                          ScaledResultOffset, Zero, Flags);
2760 
2761       SDValue Log2Inv = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2762 
2763       if (Subtarget->hasFastFMAF32())
2764         return DAG.getNode(ISD::FMA, SL, VT, LogSrc, Log2Inv, ResultOffset,
2765                            Flags);
2766       SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, LogSrc, Log2Inv, Flags);
2767       return DAG.getNode(ISD::FADD, SL, VT, Mul, ResultOffset);
2768     }
2769   }
2770 
2771   SDValue Log2Operand = DAG.getNode(LogOp, SL, VT, Src, Flags);
2772   SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2773 
2774   return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand,
2775                      Flags);
2776 }
2777 
2778 SDValue AMDGPUTargetLowering::lowerFEXP2(SDValue Op, SelectionDAG &DAG) const {
2779   // v_exp_f32 is good enough for OpenCL, except it doesn't handle denormals.
2780   // If we have to handle denormals, scale up the input and adjust the result.
2781 
2782   SDLoc SL(Op);
2783   EVT VT = Op.getValueType();
2784   SDValue Src = Op.getOperand(0);
2785   SDNodeFlags Flags = Op->getFlags();
2786 
2787   if (VT == MVT::f16) {
2788     // Nothing in half is a denormal when promoted to f32.
2789     assert(!Subtarget->has16BitInsts());
2790     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2791     SDValue Log = DAG.getNode(AMDGPUISD::EXP, SL, MVT::f32, Ext, Flags);
2792     return DAG.getNode(ISD::FP_ROUND, SL, VT, Log,
2793                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2794   }
2795 
2796   assert(VT == MVT::f32);
2797 
2798   if (!needsDenormHandlingF32(DAG, Src, Flags))
2799     return DAG.getNode(AMDGPUISD::EXP, SL, MVT::f32, Src, Flags);
2800 
2801   // bool needs_scaling = x < -0x1.f80000p+6f;
2802   // v_exp_f32(x + (s ? 0x1.0p+6f : 0.0f)) * (s ? 0x1.0p-64f : 1.0f);
2803 
2804   // -nextafter(128.0, -1)
2805   SDValue RangeCheckConst = DAG.getConstantFP(-0x1.f80000p+6f, SL, VT);
2806 
2807   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2808 
2809   SDValue NeedsScaling =
2810       DAG.getSetCC(SL, SetCCVT, Src, RangeCheckConst, ISD::SETOLT);
2811 
2812   SDValue SixtyFour = DAG.getConstantFP(0x1.0p+6f, SL, VT);
2813   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2814 
2815   SDValue AddOffset =
2816       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, SixtyFour, Zero);
2817 
2818   SDValue AddInput = DAG.getNode(ISD::FADD, SL, VT, Src, AddOffset, Flags);
2819   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, AddInput, Flags);
2820 
2821   SDValue TwoExpNeg64 = DAG.getConstantFP(0x1.0p-64f, SL, VT);
2822   SDValue One = DAG.getConstantFP(1.0, SL, VT);
2823   SDValue ResultScale =
2824       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, TwoExpNeg64, One);
2825 
2826   return DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScale, Flags);
2827 }
2828 
2829 SDValue AMDGPUTargetLowering::lowerFEXPUnsafe(SDValue X, const SDLoc &SL,
2830                                               SelectionDAG &DAG,
2831                                               SDNodeFlags Flags) const {
2832   EVT VT = X.getValueType();
2833   const SDValue Log2E = DAG.getConstantFP(numbers::log2e, SL, VT);
2834 
2835   if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) {
2836     // exp2(M_LOG2E_F * f);
2837     SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, X, Log2E, Flags);
2838     return DAG.getNode(VT == MVT::f32 ? (unsigned)AMDGPUISD::EXP
2839                                       : (unsigned)ISD::FEXP2,
2840                        SL, VT, Mul, Flags);
2841   }
2842 
2843   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2844 
2845   SDValue Threshold = DAG.getConstantFP(-0x1.5d58a0p+6f, SL, VT);
2846   SDValue NeedsScaling = DAG.getSetCC(SL, SetCCVT, X, Threshold, ISD::SETOLT);
2847 
2848   SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+6f, SL, VT);
2849 
2850   SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags);
2851 
2852   SDValue AdjustedX =
2853       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X);
2854 
2855   SDValue ExpInput = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, Log2E, Flags);
2856 
2857   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, ExpInput, Flags);
2858 
2859   SDValue ResultScaleFactor = DAG.getConstantFP(0x1.969d48p-93f, SL, VT);
2860   SDValue AdjustedResult =
2861       DAG.getNode(ISD::FMUL, SL, VT, Exp2, ResultScaleFactor, Flags);
2862 
2863   return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, Exp2,
2864                      Flags);
2865 }
2866 
2867 /// Emit approx-funcs appropriate lowering for exp10. inf/nan should still be
2868 /// handled correctly.
2869 SDValue AMDGPUTargetLowering::lowerFEXP10Unsafe(SDValue X, const SDLoc &SL,
2870                                                 SelectionDAG &DAG,
2871                                                 SDNodeFlags Flags) const {
2872   const EVT VT = X.getValueType();
2873   const unsigned Exp2Op = VT == MVT::f32 ? AMDGPUISD::EXP : ISD::FEXP2;
2874 
2875   if (VT != MVT::f32 || !needsDenormHandlingF32(DAG, X, Flags)) {
2876     // exp2(x * 0x1.a92000p+1f) * exp2(x * 0x1.4f0978p-11f);
2877     SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT);
2878     SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2879 
2880     SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, X, K0, Flags);
2881     SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags);
2882     SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, X, K1, Flags);
2883     SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags);
2884     return DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1);
2885   }
2886 
2887   // bool s = x < -0x1.2f7030p+5f;
2888   // x += s ? 0x1.0p+5f : 0.0f;
2889   // exp10 = exp2(x * 0x1.a92000p+1f) *
2890   //        exp2(x * 0x1.4f0978p-11f) *
2891   //        (s ? 0x1.9f623ep-107f : 1.0f);
2892 
2893   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2894 
2895   SDValue Threshold = DAG.getConstantFP(-0x1.2f7030p+5f, SL, VT);
2896   SDValue NeedsScaling = DAG.getSetCC(SL, SetCCVT, X, Threshold, ISD::SETOLT);
2897 
2898   SDValue ScaleOffset = DAG.getConstantFP(0x1.0p+5f, SL, VT);
2899   SDValue ScaledX = DAG.getNode(ISD::FADD, SL, VT, X, ScaleOffset, Flags);
2900   SDValue AdjustedX =
2901       DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, ScaledX, X);
2902 
2903   SDValue K0 = DAG.getConstantFP(0x1.a92000p+1f, SL, VT);
2904   SDValue K1 = DAG.getConstantFP(0x1.4f0978p-11f, SL, VT);
2905 
2906   SDValue Mul0 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K0, Flags);
2907   SDValue Exp2_0 = DAG.getNode(Exp2Op, SL, VT, Mul0, Flags);
2908   SDValue Mul1 = DAG.getNode(ISD::FMUL, SL, VT, AdjustedX, K1, Flags);
2909   SDValue Exp2_1 = DAG.getNode(Exp2Op, SL, VT, Mul1, Flags);
2910 
2911   SDValue MulExps = DAG.getNode(ISD::FMUL, SL, VT, Exp2_0, Exp2_1, Flags);
2912 
2913   SDValue ResultScaleFactor = DAG.getConstantFP(0x1.9f623ep-107f, SL, VT);
2914   SDValue AdjustedResult =
2915       DAG.getNode(ISD::FMUL, SL, VT, MulExps, ResultScaleFactor, Flags);
2916 
2917   return DAG.getNode(ISD::SELECT, SL, VT, NeedsScaling, AdjustedResult, MulExps,
2918                      Flags);
2919 }
2920 
2921 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2922   EVT VT = Op.getValueType();
2923   SDLoc SL(Op);
2924   SDValue X = Op.getOperand(0);
2925   SDNodeFlags Flags = Op->getFlags();
2926   const bool IsExp10 = Op.getOpcode() == ISD::FEXP10;
2927 
2928   if (VT.getScalarType() == MVT::f16) {
2929     // v_exp_f16 (fmul x, log2e)
2930     if (allowApproxFunc(DAG, Flags)) // TODO: Does this really require fast?
2931       return lowerFEXPUnsafe(X, SL, DAG, Flags);
2932 
2933     if (VT.isVector())
2934       return SDValue();
2935 
2936     // exp(f16 x) ->
2937     //   fptrunc (v_exp_f32 (fmul (fpext x), log2e))
2938 
2939     // Nothing in half is a denormal when promoted to f32.
2940     SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, X, Flags);
2941     SDValue Lowered = lowerFEXPUnsafe(Ext, SL, DAG, Flags);
2942     return DAG.getNode(ISD::FP_ROUND, SL, VT, Lowered,
2943                        DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2944   }
2945 
2946   assert(VT == MVT::f32);
2947 
2948   // TODO: Interpret allowApproxFunc as ignoring DAZ. This is currently copying
2949   // library behavior. Also, is known-not-daz source sufficient?
2950   if (allowApproxFunc(DAG, Flags)) {
2951     return IsExp10 ? lowerFEXP10Unsafe(X, SL, DAG, Flags)
2952                    : lowerFEXPUnsafe(X, SL, DAG, Flags);
2953   }
2954 
2955   //    Algorithm:
2956   //
2957   //    e^x = 2^(x/ln(2)) = 2^(x*(64/ln(2))/64)
2958   //
2959   //    x*(64/ln(2)) = n + f, |f| <= 0.5, n is integer
2960   //    n = 64*m + j,   0 <= j < 64
2961   //
2962   //    e^x = 2^((64*m + j + f)/64)
2963   //        = (2^m) * (2^(j/64)) * 2^(f/64)
2964   //        = (2^m) * (2^(j/64)) * e^(f*(ln(2)/64))
2965   //
2966   //    f = x*(64/ln(2)) - n
2967   //    r = f*(ln(2)/64) = x - n*(ln(2)/64)
2968   //
2969   //    e^x = (2^m) * (2^(j/64)) * e^r
2970   //
2971   //    (2^(j/64)) is precomputed
2972   //
2973   //    e^r = 1 + r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
2974   //    e^r = 1 + q
2975   //
2976   //    q = r + (r^2)/2! + (r^3)/3! + (r^4)/4! + (r^5)/5!
2977   //
2978   //    e^x = (2^m) * ( (2^(j/64)) + q*(2^(j/64)) )
2979   SDNodeFlags FlagsNoContract = Flags;
2980   FlagsNoContract.setAllowContract(false);
2981 
2982   SDValue PH, PL;
2983   if (Subtarget->hasFastFMAF32()) {
2984     const float c_exp = numbers::log2ef;
2985     const float cc_exp = 0x1.4ae0bep-26f; // c+cc are 49 bits
2986     const float c_exp10 = 0x1.a934f0p+1f;
2987     const float cc_exp10 = 0x1.2f346ep-24f;
2988 
2989     SDValue C = DAG.getConstantFP(IsExp10 ? c_exp10 : c_exp, SL, VT);
2990     SDValue CC = DAG.getConstantFP(IsExp10 ? cc_exp10 : cc_exp, SL, VT);
2991 
2992     PH = DAG.getNode(ISD::FMUL, SL, VT, X, C, Flags);
2993     SDValue NegPH = DAG.getNode(ISD::FNEG, SL, VT, PH, Flags);
2994     SDValue FMA0 = DAG.getNode(ISD::FMA, SL, VT, X, C, NegPH, Flags);
2995     PL = DAG.getNode(ISD::FMA, SL, VT, X, CC, FMA0, Flags);
2996   } else {
2997     const float ch_exp = 0x1.714000p+0f;
2998     const float cl_exp = 0x1.47652ap-12f; // ch + cl are 36 bits
2999 
3000     const float ch_exp10 = 0x1.a92000p+1f;
3001     const float cl_exp10 = 0x1.4f0978p-11f;
3002 
3003     SDValue CH = DAG.getConstantFP(IsExp10 ? ch_exp10 : ch_exp, SL, VT);
3004     SDValue CL = DAG.getConstantFP(IsExp10 ? cl_exp10 : cl_exp, SL, VT);
3005 
3006     SDValue XAsInt = DAG.getNode(ISD::BITCAST, SL, MVT::i32, X);
3007     SDValue MaskConst = DAG.getConstant(0xfffff000, SL, MVT::i32);
3008     SDValue XHAsInt = DAG.getNode(ISD::AND, SL, MVT::i32, XAsInt, MaskConst);
3009     SDValue XH = DAG.getNode(ISD::BITCAST, SL, VT, XHAsInt);
3010     SDValue XL = DAG.getNode(ISD::FSUB, SL, VT, X, XH, Flags);
3011 
3012     PH = DAG.getNode(ISD::FMUL, SL, VT, XH, CH, Flags);
3013 
3014     SDValue XLCL = DAG.getNode(ISD::FMUL, SL, VT, XL, CL, Flags);
3015     SDValue Mad0 = getMad(DAG, SL, VT, XL, CH, XLCL, Flags);
3016     PL = getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
3017   }
3018 
3019   SDValue E = DAG.getNode(ISD::FROUNDEVEN, SL, VT, PH, Flags);
3020 
3021   // It is unsafe to contract this fsub into the PH multiply.
3022   SDValue PHSubE = DAG.getNode(ISD::FSUB, SL, VT, PH, E, FlagsNoContract);
3023 
3024   SDValue A = DAG.getNode(ISD::FADD, SL, VT, PHSubE, PL, Flags);
3025   SDValue IntE = DAG.getNode(ISD::FP_TO_SINT, SL, MVT::i32, E);
3026   SDValue Exp2 = DAG.getNode(AMDGPUISD::EXP, SL, VT, A, Flags);
3027 
3028   SDValue R = DAG.getNode(ISD::FLDEXP, SL, VT, Exp2, IntE, Flags);
3029 
3030   SDValue UnderflowCheckConst =
3031       DAG.getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3032 
3033   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
3034   SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
3035   SDValue Underflow =
3036       DAG.getSetCC(SL, SetCCVT, X, UnderflowCheckConst, ISD::SETOLT);
3037 
3038   R = DAG.getNode(ISD::SELECT, SL, VT, Underflow, Zero, R);
3039   const auto &Options = getTargetMachine().Options;
3040 
3041   if (!Flags.hasNoInfs() && !Options.NoInfsFPMath) {
3042     SDValue OverflowCheckConst =
3043         DAG.getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
3044     SDValue Overflow =
3045         DAG.getSetCC(SL, SetCCVT, X, OverflowCheckConst, ISD::SETOGT);
3046     SDValue Inf =
3047         DAG.getConstantFP(APFloat::getInf(APFloat::IEEEsingle()), SL, VT);
3048     R = DAG.getNode(ISD::SELECT, SL, VT, Overflow, Inf, R);
3049   }
3050 
3051   return R;
3052 }
3053 
3054 static bool isCtlzOpc(unsigned Opc) {
3055   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
3056 }
3057 
3058 static bool isCttzOpc(unsigned Opc) {
3059   return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
3060 }
3061 
3062 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
3063   SDLoc SL(Op);
3064   SDValue Src = Op.getOperand(0);
3065 
3066   assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode()));
3067   bool Ctlz = isCtlzOpc(Op.getOpcode());
3068   unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
3069 
3070   bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ||
3071                    Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF;
3072   bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3073 
3074   if (Src.getValueType() == MVT::i32 || Is64BitScalar) {
3075     // (ctlz hi:lo) -> (umin (ffbh src), 32)
3076     // (cttz hi:lo) -> (umin (ffbl src), 32)
3077     // (ctlz_zero_undef src) -> (ffbh src)
3078     // (cttz_zero_undef src) -> (ffbl src)
3079 
3080     //  64-bit scalar version produce 32-bit result
3081     // (ctlz hi:lo) -> (umin (S_FLBIT_I32_B64 src), 64)
3082     // (cttz hi:lo) -> (umin (S_FF1_I32_B64 src), 64)
3083     // (ctlz_zero_undef src) -> (S_FLBIT_I32_B64 src)
3084     // (cttz_zero_undef src) -> (S_FF1_I32_B64 src)
3085     SDValue NewOpr = DAG.getNode(NewOpc, SL, MVT::i32, Src);
3086     if (!ZeroUndef) {
3087       const SDValue ConstVal = DAG.getConstant(
3088           Op.getValueType().getScalarSizeInBits(), SL, MVT::i32);
3089       NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, ConstVal);
3090     }
3091     return DAG.getNode(ISD::ZERO_EXTEND, SL, Src.getValueType(), NewOpr);
3092   }
3093 
3094   SDValue Lo, Hi;
3095   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3096 
3097   SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo);
3098   SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi);
3099 
3100   // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
3101   // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
3102   // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
3103   // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
3104 
3105   unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT;
3106   const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
3107   if (Ctlz)
3108     OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
3109   else
3110     OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
3111 
3112   SDValue NewOpr;
3113   NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi);
3114   if (!ZeroUndef) {
3115     const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32);
3116     NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64);
3117   }
3118 
3119   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
3120 }
3121 
3122 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
3123                                                bool Signed) const {
3124   // The regular method converting a 64-bit integer to float roughly consists of
3125   // 2 steps: normalization and rounding. In fact, after normalization, the
3126   // conversion from a 64-bit integer to a float is essentially the same as the
3127   // one from a 32-bit integer. The only difference is that it has more
3128   // trailing bits to be rounded. To leverage the native 32-bit conversion, a
3129   // 64-bit integer could be preprocessed and fit into a 32-bit integer then
3130   // converted into the correct float number. The basic steps for the unsigned
3131   // conversion are illustrated in the following pseudo code:
3132   //
3133   // f32 uitofp(i64 u) {
3134   //   i32 hi, lo = split(u);
3135   //   // Only count the leading zeros in hi as we have native support of the
3136   //   // conversion from i32 to f32. If hi is all 0s, the conversion is
3137   //   // reduced to a 32-bit one automatically.
3138   //   i32 shamt = clz(hi); // Return 32 if hi is all 0s.
3139   //   u <<= shamt;
3140   //   hi, lo = split(u);
3141   //   hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
3142   //   // convert it as a 32-bit integer and scale the result back.
3143   //   return uitofp(hi) * 2^(32 - shamt);
3144   // }
3145   //
3146   // The signed one follows the same principle but uses 'ffbh_i32' to count its
3147   // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
3148   // converted instead followed by negation based its sign bit.
3149 
3150   SDLoc SL(Op);
3151   SDValue Src = Op.getOperand(0);
3152 
3153   SDValue Lo, Hi;
3154   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3155   SDValue Sign;
3156   SDValue ShAmt;
3157   if (Signed && Subtarget->isGCN()) {
3158     // We also need to consider the sign bit in Lo if Hi has just sign bits,
3159     // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
3160     // account. That is, the maximal shift is
3161     // - 32 if Lo and Hi have opposite signs;
3162     // - 33 if Lo and Hi have the same sign.
3163     //
3164     // Or, MaxShAmt = 33 + OppositeSign, where
3165     //
3166     // OppositeSign is defined as ((Lo ^ Hi) >> 31), which is
3167     // - -1 if Lo and Hi have opposite signs; and
3168     // -  0 otherwise.
3169     //
3170     // All in all, ShAmt is calculated as
3171     //
3172     //  umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
3173     //
3174     // or
3175     //
3176     //  umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
3177     //
3178     // to reduce the critical path.
3179     SDValue OppositeSign = DAG.getNode(
3180         ISD::SRA, SL, MVT::i32, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
3181         DAG.getConstant(31, SL, MVT::i32));
3182     SDValue MaxShAmt =
3183         DAG.getNode(ISD::ADD, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
3184                     OppositeSign);
3185     // Count the leading sign bits.
3186     ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
3187     // Different from unsigned conversion, the shift should be one bit less to
3188     // preserve the sign bit.
3189     ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
3190                         DAG.getConstant(1, SL, MVT::i32));
3191     ShAmt = DAG.getNode(ISD::UMIN, SL, MVT::i32, ShAmt, MaxShAmt);
3192   } else {
3193     if (Signed) {
3194       // Without 'ffbh_i32', only leading zeros could be counted. Take the
3195       // absolute value first.
3196       Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
3197                          DAG.getConstant(63, SL, MVT::i64));
3198       SDValue Abs =
3199           DAG.getNode(ISD::XOR, SL, MVT::i64,
3200                       DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
3201       std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
3202     }
3203     // Count the leading zeros.
3204     ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
3205     // The shift amount for signed integers is [0, 32].
3206   }
3207   // Normalize the given 64-bit integer.
3208   SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
3209   // Split it again.
3210   std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
3211   // Calculate the adjust bit for rounding.
3212   // (lo != 0) ? 1 : 0 => (lo >= 1) ? 1 : 0 => umin(1, lo)
3213   SDValue Adjust = DAG.getNode(ISD::UMIN, SL, MVT::i32,
3214                                DAG.getConstant(1, SL, MVT::i32), Lo);
3215   // Get the 32-bit normalized integer.
3216   Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
3217   // Convert the normalized 32-bit integer into f32.
3218   unsigned Opc =
3219       (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
3220   SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
3221 
3222   // Finally, need to scale back the converted floating number as the original
3223   // 64-bit integer is converted as a 32-bit one.
3224   ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
3225                       ShAmt);
3226   // On GCN, use LDEXP directly.
3227   if (Subtarget->isGCN())
3228     return DAG.getNode(ISD::FLDEXP, SL, MVT::f32, FVal, ShAmt);
3229 
3230   // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
3231   // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
3232   // exponent is enough to avoid overflowing into the sign bit.
3233   SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
3234                             DAG.getConstant(23, SL, MVT::i32));
3235   SDValue IVal =
3236       DAG.getNode(ISD::ADD, SL, MVT::i32,
3237                   DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
3238   if (Signed) {
3239     // Set the sign bit.
3240     Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
3241                        DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
3242                        DAG.getConstant(31, SL, MVT::i32));
3243     IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
3244   }
3245   return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
3246 }
3247 
3248 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
3249                                                bool Signed) const {
3250   SDLoc SL(Op);
3251   SDValue Src = Op.getOperand(0);
3252 
3253   SDValue Lo, Hi;
3254   std::tie(Lo, Hi) = split64BitValue(Src, DAG);
3255 
3256   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
3257                               SL, MVT::f64, Hi);
3258 
3259   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
3260 
3261   SDValue LdExp = DAG.getNode(ISD::FLDEXP, SL, MVT::f64, CvtHi,
3262                               DAG.getConstant(32, SL, MVT::i32));
3263   // TODO: Should this propagate fast-math-flags?
3264   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
3265 }
3266 
3267 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
3268                                                SelectionDAG &DAG) const {
3269   // TODO: Factor out code common with LowerSINT_TO_FP.
3270   EVT DestVT = Op.getValueType();
3271   SDValue Src = Op.getOperand(0);
3272   EVT SrcVT = Src.getValueType();
3273 
3274   if (SrcVT == MVT::i16) {
3275     if (DestVT == MVT::f16)
3276       return Op;
3277     SDLoc DL(Op);
3278 
3279     // Promote src to i32
3280     SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
3281     return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
3282   }
3283 
3284   assert(SrcVT == MVT::i64 && "operation should be legal");
3285 
3286   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3287     SDLoc DL(Op);
3288 
3289     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
3290     SDValue FPRoundFlag =
3291         DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
3292     SDValue FPRound =
3293         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
3294 
3295     return FPRound;
3296   }
3297 
3298   if (DestVT == MVT::f32)
3299     return LowerINT_TO_FP32(Op, DAG, false);
3300 
3301   assert(DestVT == MVT::f64);
3302   return LowerINT_TO_FP64(Op, DAG, false);
3303 }
3304 
3305 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
3306                                               SelectionDAG &DAG) const {
3307   EVT DestVT = Op.getValueType();
3308 
3309   SDValue Src = Op.getOperand(0);
3310   EVT SrcVT = Src.getValueType();
3311 
3312   if (SrcVT == MVT::i16) {
3313     if (DestVT == MVT::f16)
3314       return Op;
3315 
3316     SDLoc DL(Op);
3317     // Promote src to i32
3318     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
3319     return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
3320   }
3321 
3322   assert(SrcVT == MVT::i64 && "operation should be legal");
3323 
3324   // TODO: Factor out code common with LowerUINT_TO_FP.
3325 
3326   if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3327     SDLoc DL(Op);
3328     SDValue Src = Op.getOperand(0);
3329 
3330     SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
3331     SDValue FPRoundFlag =
3332         DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
3333     SDValue FPRound =
3334         DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
3335 
3336     return FPRound;
3337   }
3338 
3339   if (DestVT == MVT::f32)
3340     return LowerINT_TO_FP32(Op, DAG, true);
3341 
3342   assert(DestVT == MVT::f64);
3343   return LowerINT_TO_FP64(Op, DAG, true);
3344 }
3345 
3346 SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
3347                                                bool Signed) const {
3348   SDLoc SL(Op);
3349 
3350   SDValue Src = Op.getOperand(0);
3351   EVT SrcVT = Src.getValueType();
3352 
3353   assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
3354 
3355   // The basic idea of converting a floating point number into a pair of 32-bit
3356   // integers is illustrated as follows:
3357   //
3358   //     tf := trunc(val);
3359   //    hif := floor(tf * 2^-32);
3360   //    lof := tf - hif * 2^32; // lof is always positive due to floor.
3361   //     hi := fptoi(hif);
3362   //     lo := fptoi(lof);
3363   //
3364   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
3365   SDValue Sign;
3366   if (Signed && SrcVT == MVT::f32) {
3367     // However, a 32-bit floating point number has only 23 bits mantissa and
3368     // it's not enough to hold all the significant bits of `lof` if val is
3369     // negative. To avoid the loss of precision, We need to take the absolute
3370     // value after truncating and flip the result back based on the original
3371     // signedness.
3372     Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
3373                        DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
3374                        DAG.getConstant(31, SL, MVT::i32));
3375     Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
3376   }
3377 
3378   SDValue K0, K1;
3379   if (SrcVT == MVT::f64) {
3380     K0 = DAG.getConstantFP(
3381         llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL,
3382         SrcVT);
3383     K1 = DAG.getConstantFP(
3384         llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL,
3385         SrcVT);
3386   } else {
3387     K0 = DAG.getConstantFP(
3388         llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT);
3389     K1 = DAG.getConstantFP(
3390         llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT);
3391   }
3392   // TODO: Should this propagate fast-math-flags?
3393   SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
3394 
3395   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
3396 
3397   SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
3398 
3399   SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
3400                                                          : ISD::FP_TO_UINT,
3401                            SL, MVT::i32, FloorMul);
3402   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
3403 
3404   SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
3405                                DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
3406 
3407   if (Signed && SrcVT == MVT::f32) {
3408     assert(Sign);
3409     // Flip the result based on the signedness, which is either all 0s or 1s.
3410     Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
3411                        DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
3412     // r := xor(r, sign) - sign;
3413     Result =
3414         DAG.getNode(ISD::SUB, SL, MVT::i64,
3415                     DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
3416   }
3417 
3418   return Result;
3419 }
3420 
3421 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
3422   SDLoc DL(Op);
3423   SDValue N0 = Op.getOperand(0);
3424 
3425   // Convert to target node to get known bits
3426   if (N0.getValueType() == MVT::f32)
3427     return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
3428 
3429   if (getTargetMachine().Options.UnsafeFPMath) {
3430     // There is a generic expand for FP_TO_FP16 with unsafe fast math.
3431     return SDValue();
3432   }
3433 
3434   assert(N0.getSimpleValueType() == MVT::f64);
3435 
3436   // f64 -> f16 conversion using round-to-nearest-even rounding mode.
3437   const unsigned ExpMask = 0x7ff;
3438   const unsigned ExpBiasf64 = 1023;
3439   const unsigned ExpBiasf16 = 15;
3440   SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
3441   SDValue One = DAG.getConstant(1, DL, MVT::i32);
3442   SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
3443   SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
3444                            DAG.getConstant(32, DL, MVT::i64));
3445   UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
3446   U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
3447   SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3448                           DAG.getConstant(20, DL, MVT::i64));
3449   E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
3450                   DAG.getConstant(ExpMask, DL, MVT::i32));
3451   // Subtract the fp64 exponent bias (1023) to get the real exponent and
3452   // add the f16 bias (15) to get the biased exponent for the f16 format.
3453   E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
3454                   DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
3455 
3456   SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3457                           DAG.getConstant(8, DL, MVT::i32));
3458   M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
3459                   DAG.getConstant(0xffe, DL, MVT::i32));
3460 
3461   SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
3462                                   DAG.getConstant(0x1ff, DL, MVT::i32));
3463   MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
3464 
3465   SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
3466   M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
3467 
3468   // (M != 0 ? 0x0200 : 0) | 0x7c00;
3469   SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
3470       DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
3471                       Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
3472 
3473   // N = M | (E << 12);
3474   SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
3475       DAG.getNode(ISD::SHL, DL, MVT::i32, E,
3476                   DAG.getConstant(12, DL, MVT::i32)));
3477 
3478   // B = clamp(1-E, 0, 13);
3479   SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
3480                                   One, E);
3481   SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
3482   B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
3483                   DAG.getConstant(13, DL, MVT::i32));
3484 
3485   SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
3486                                    DAG.getConstant(0x1000, DL, MVT::i32));
3487 
3488   SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
3489   SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
3490   SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
3491   D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
3492 
3493   SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
3494   SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
3495                               DAG.getConstant(0x7, DL, MVT::i32));
3496   V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
3497                   DAG.getConstant(2, DL, MVT::i32));
3498   SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
3499                                One, Zero, ISD::SETEQ);
3500   SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
3501                                One, Zero, ISD::SETGT);
3502   V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
3503   V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
3504 
3505   V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
3506                       DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
3507   V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
3508                       I, V, ISD::SETEQ);
3509 
3510   // Extract the sign bit.
3511   SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
3512                             DAG.getConstant(16, DL, MVT::i32));
3513   Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
3514                      DAG.getConstant(0x8000, DL, MVT::i32));
3515 
3516   V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
3517   return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
3518 }
3519 
3520 SDValue AMDGPUTargetLowering::LowerFP_TO_INT(SDValue Op,
3521                                              SelectionDAG &DAG) const {
3522   SDValue Src = Op.getOperand(0);
3523   unsigned OpOpcode = Op.getOpcode();
3524   EVT SrcVT = Src.getValueType();
3525   EVT DestVT = Op.getValueType();
3526 
3527   // Will be selected natively
3528   if (SrcVT == MVT::f16 && DestVT == MVT::i16)
3529     return Op;
3530 
3531   // Promote i16 to i32
3532   if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3533     SDLoc DL(Op);
3534 
3535     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3536     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToInt32);
3537   }
3538 
3539   if (SrcVT == MVT::f16 ||
3540       (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
3541     SDLoc DL(Op);
3542 
3543     SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3544     unsigned Ext =
3545         OpOpcode == ISD::FP_TO_SINT ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3546     return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
3547   }
3548 
3549   if (DestVT == MVT::i64 && (SrcVT == MVT::f32 || SrcVT == MVT::f64))
3550     return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
3551 
3552   return SDValue();
3553 }
3554 
3555 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
3556                                                      SelectionDAG &DAG) const {
3557   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3558   MVT VT = Op.getSimpleValueType();
3559   MVT ScalarVT = VT.getScalarType();
3560 
3561   assert(VT.isVector());
3562 
3563   SDValue Src = Op.getOperand(0);
3564   SDLoc DL(Op);
3565 
3566   // TODO: Don't scalarize on Evergreen?
3567   unsigned NElts = VT.getVectorNumElements();
3568   SmallVector<SDValue, 8> Args;
3569   DAG.ExtractVectorElements(Src, Args, 0, NElts);
3570 
3571   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
3572   for (unsigned I = 0; I < NElts; ++I)
3573     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
3574 
3575   return DAG.getBuildVector(VT, DL, Args);
3576 }
3577 
3578 //===----------------------------------------------------------------------===//
3579 // Custom DAG optimizations
3580 //===----------------------------------------------------------------------===//
3581 
3582 static bool isU24(SDValue Op, SelectionDAG &DAG) {
3583   return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
3584 }
3585 
3586 static bool isI24(SDValue Op, SelectionDAG &DAG) {
3587   EVT VT = Op.getValueType();
3588   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
3589                                      // as unsigned 24-bit values.
3590          AMDGPUTargetLowering::numBitsSigned(Op, DAG) <= 24;
3591 }
3592 
3593 static SDValue simplifyMul24(SDNode *Node24,
3594                              TargetLowering::DAGCombinerInfo &DCI) {
3595   SelectionDAG &DAG = DCI.DAG;
3596   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3597   bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
3598 
3599   SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
3600   SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
3601   unsigned NewOpcode = Node24->getOpcode();
3602   if (IsIntrin) {
3603     unsigned IID = Node24->getConstantOperandVal(0);
3604     switch (IID) {
3605     case Intrinsic::amdgcn_mul_i24:
3606       NewOpcode = AMDGPUISD::MUL_I24;
3607       break;
3608     case Intrinsic::amdgcn_mul_u24:
3609       NewOpcode = AMDGPUISD::MUL_U24;
3610       break;
3611     case Intrinsic::amdgcn_mulhi_i24:
3612       NewOpcode = AMDGPUISD::MULHI_I24;
3613       break;
3614     case Intrinsic::amdgcn_mulhi_u24:
3615       NewOpcode = AMDGPUISD::MULHI_U24;
3616       break;
3617     default:
3618       llvm_unreachable("Expected 24-bit mul intrinsic");
3619     }
3620   }
3621 
3622   APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
3623 
3624   // First try to simplify using SimplifyMultipleUseDemandedBits which allows
3625   // the operands to have other uses, but will only perform simplifications that
3626   // involve bypassing some nodes for this user.
3627   SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
3628   SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
3629   if (DemandedLHS || DemandedRHS)
3630     return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
3631                        DemandedLHS ? DemandedLHS : LHS,
3632                        DemandedRHS ? DemandedRHS : RHS);
3633 
3634   // Now try SimplifyDemandedBits which can simplify the nodes used by our
3635   // operands if this node is the only user.
3636   if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
3637     return SDValue(Node24, 0);
3638   if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
3639     return SDValue(Node24, 0);
3640 
3641   return SDValue();
3642 }
3643 
3644 template <typename IntTy>
3645 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
3646                                uint32_t Width, const SDLoc &DL) {
3647   if (Width + Offset < 32) {
3648     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
3649     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
3650     return DAG.getConstant(Result, DL, MVT::i32);
3651   }
3652 
3653   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
3654 }
3655 
3656 static bool hasVolatileUser(SDNode *Val) {
3657   for (SDNode *U : Val->uses()) {
3658     if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
3659       if (M->isVolatile())
3660         return true;
3661     }
3662   }
3663 
3664   return false;
3665 }
3666 
3667 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
3668   // i32 vectors are the canonical memory type.
3669   if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
3670     return false;
3671 
3672   if (!VT.isByteSized())
3673     return false;
3674 
3675   unsigned Size = VT.getStoreSize();
3676 
3677   if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
3678     return false;
3679 
3680   if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
3681     return false;
3682 
3683   return true;
3684 }
3685 
3686 // Replace load of an illegal type with a store of a bitcast to a friendlier
3687 // type.
3688 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
3689                                                  DAGCombinerInfo &DCI) const {
3690   if (!DCI.isBeforeLegalize())
3691     return SDValue();
3692 
3693   LoadSDNode *LN = cast<LoadSDNode>(N);
3694   if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
3695     return SDValue();
3696 
3697   SDLoc SL(N);
3698   SelectionDAG &DAG = DCI.DAG;
3699   EVT VT = LN->getMemoryVT();
3700 
3701   unsigned Size = VT.getStoreSize();
3702   Align Alignment = LN->getAlign();
3703   if (Alignment < Size && isTypeLegal(VT)) {
3704     unsigned IsFast;
3705     unsigned AS = LN->getAddressSpace();
3706 
3707     // Expand unaligned loads earlier than legalization. Due to visitation order
3708     // problems during legalization, the emitted instructions to pack and unpack
3709     // the bytes again are not eliminated in the case of an unaligned copy.
3710     if (!allowsMisalignedMemoryAccesses(
3711             VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
3712       if (VT.isVector())
3713         return SplitVectorLoad(SDValue(LN, 0), DAG);
3714 
3715       SDValue Ops[2];
3716       std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
3717 
3718       return DAG.getMergeValues(Ops, SDLoc(N));
3719     }
3720 
3721     if (!IsFast)
3722       return SDValue();
3723   }
3724 
3725   if (!shouldCombineMemoryType(VT))
3726     return SDValue();
3727 
3728   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3729 
3730   SDValue NewLoad
3731     = DAG.getLoad(NewVT, SL, LN->getChain(),
3732                   LN->getBasePtr(), LN->getMemOperand());
3733 
3734   SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
3735   DCI.CombineTo(N, BC, NewLoad.getValue(1));
3736   return SDValue(N, 0);
3737 }
3738 
3739 // Replace store of an illegal type with a store of a bitcast to a friendlier
3740 // type.
3741 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
3742                                                   DAGCombinerInfo &DCI) const {
3743   if (!DCI.isBeforeLegalize())
3744     return SDValue();
3745 
3746   StoreSDNode *SN = cast<StoreSDNode>(N);
3747   if (!SN->isSimple() || !ISD::isNormalStore(SN))
3748     return SDValue();
3749 
3750   EVT VT = SN->getMemoryVT();
3751   unsigned Size = VT.getStoreSize();
3752 
3753   SDLoc SL(N);
3754   SelectionDAG &DAG = DCI.DAG;
3755   Align Alignment = SN->getAlign();
3756   if (Alignment < Size && isTypeLegal(VT)) {
3757     unsigned IsFast;
3758     unsigned AS = SN->getAddressSpace();
3759 
3760     // Expand unaligned stores earlier than legalization. Due to visitation
3761     // order problems during legalization, the emitted instructions to pack and
3762     // unpack the bytes again are not eliminated in the case of an unaligned
3763     // copy.
3764     if (!allowsMisalignedMemoryAccesses(
3765             VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
3766       if (VT.isVector())
3767         return SplitVectorStore(SDValue(SN, 0), DAG);
3768 
3769       return expandUnalignedStore(SN, DAG);
3770     }
3771 
3772     if (!IsFast)
3773       return SDValue();
3774   }
3775 
3776   if (!shouldCombineMemoryType(VT))
3777     return SDValue();
3778 
3779   EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3780   SDValue Val = SN->getValue();
3781 
3782   //DCI.AddToWorklist(Val.getNode());
3783 
3784   bool OtherUses = !Val.hasOneUse();
3785   SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
3786   if (OtherUses) {
3787     SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
3788     DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
3789   }
3790 
3791   return DAG.getStore(SN->getChain(), SL, CastVal,
3792                       SN->getBasePtr(), SN->getMemOperand());
3793 }
3794 
3795 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
3796 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
3797 // issues.
3798 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
3799                                                         DAGCombinerInfo &DCI) const {
3800   SelectionDAG &DAG = DCI.DAG;
3801   SDValue N0 = N->getOperand(0);
3802 
3803   // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3804   //     (vt2 (truncate (assertzext vt0:x, vt1)))
3805   if (N0.getOpcode() == ISD::TRUNCATE) {
3806     SDValue N1 = N->getOperand(1);
3807     EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3808     SDLoc SL(N);
3809 
3810     SDValue Src = N0.getOperand(0);
3811     EVT SrcVT = Src.getValueType();
3812     if (SrcVT.bitsGE(ExtVT)) {
3813       SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3814       return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3815     }
3816   }
3817 
3818   return SDValue();
3819 }
3820 
3821 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
3822   SDNode *N, DAGCombinerInfo &DCI) const {
3823   unsigned IID = N->getConstantOperandVal(0);
3824   switch (IID) {
3825   case Intrinsic::amdgcn_mul_i24:
3826   case Intrinsic::amdgcn_mul_u24:
3827   case Intrinsic::amdgcn_mulhi_i24:
3828   case Intrinsic::amdgcn_mulhi_u24:
3829     return simplifyMul24(N, DCI);
3830   case Intrinsic::amdgcn_fract:
3831   case Intrinsic::amdgcn_rsq:
3832   case Intrinsic::amdgcn_rcp_legacy:
3833   case Intrinsic::amdgcn_rsq_legacy:
3834   case Intrinsic::amdgcn_rsq_clamp: {
3835     // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3836     SDValue Src = N->getOperand(1);
3837     return Src.isUndef() ? Src : SDValue();
3838   }
3839   case Intrinsic::amdgcn_frexp_exp: {
3840     // frexp_exp (fneg x) -> frexp_exp x
3841     // frexp_exp (fabs x) -> frexp_exp x
3842     // frexp_exp (fneg (fabs x)) -> frexp_exp x
3843     SDValue Src = N->getOperand(1);
3844     SDValue PeekSign = peekFPSignOps(Src);
3845     if (PeekSign == Src)
3846       return SDValue();
3847     return SDValue(DCI.DAG.UpdateNodeOperands(N, N->getOperand(0), PeekSign),
3848                    0);
3849   }
3850   default:
3851     return SDValue();
3852   }
3853 }
3854 
3855 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3856 /// binary operation \p Opc to it with the corresponding constant operands.
3857 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3858   DAGCombinerInfo &DCI, const SDLoc &SL,
3859   unsigned Opc, SDValue LHS,
3860   uint32_t ValLo, uint32_t ValHi) const {
3861   SelectionDAG &DAG = DCI.DAG;
3862   SDValue Lo, Hi;
3863   std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3864 
3865   SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3866   SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3867 
3868   SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3869   SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3870 
3871   // Re-visit the ands. It's possible we eliminated one of them and it could
3872   // simplify the vector.
3873   DCI.AddToWorklist(Lo.getNode());
3874   DCI.AddToWorklist(Hi.getNode());
3875 
3876   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3877   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3878 }
3879 
3880 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3881                                                 DAGCombinerInfo &DCI) const {
3882   EVT VT = N->getValueType(0);
3883 
3884   ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3885   if (!RHS)
3886     return SDValue();
3887 
3888   SDValue LHS = N->getOperand(0);
3889   unsigned RHSVal = RHS->getZExtValue();
3890   if (!RHSVal)
3891     return LHS;
3892 
3893   SDLoc SL(N);
3894   SelectionDAG &DAG = DCI.DAG;
3895 
3896   switch (LHS->getOpcode()) {
3897   default:
3898     break;
3899   case ISD::ZERO_EXTEND:
3900   case ISD::SIGN_EXTEND:
3901   case ISD::ANY_EXTEND: {
3902     SDValue X = LHS->getOperand(0);
3903 
3904     if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3905         isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3906       // Prefer build_vector as the canonical form if packed types are legal.
3907       // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3908       SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3909        { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3910       return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3911     }
3912 
3913     // shl (ext x) => zext (shl x), if shift does not overflow int
3914     if (VT != MVT::i64)
3915       break;
3916     KnownBits Known = DAG.computeKnownBits(X);
3917     unsigned LZ = Known.countMinLeadingZeros();
3918     if (LZ < RHSVal)
3919       break;
3920     EVT XVT = X.getValueType();
3921     SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3922     return DAG.getZExtOrTrunc(Shl, SL, VT);
3923   }
3924   }
3925 
3926   if (VT != MVT::i64)
3927     return SDValue();
3928 
3929   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3930 
3931   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3932   // common case, splitting this into a move and a 32-bit shift is faster and
3933   // the same code size.
3934   if (RHSVal < 32)
3935     return SDValue();
3936 
3937   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3938 
3939   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3940   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3941 
3942   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3943 
3944   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3945   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3946 }
3947 
3948 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3949                                                 DAGCombinerInfo &DCI) const {
3950   if (N->getValueType(0) != MVT::i64)
3951     return SDValue();
3952 
3953   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3954   if (!RHS)
3955     return SDValue();
3956 
3957   SelectionDAG &DAG = DCI.DAG;
3958   SDLoc SL(N);
3959   unsigned RHSVal = RHS->getZExtValue();
3960 
3961   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3962   if (RHSVal == 32) {
3963     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3964     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3965                                    DAG.getConstant(31, SL, MVT::i32));
3966 
3967     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3968     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3969   }
3970 
3971   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3972   if (RHSVal == 63) {
3973     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3974     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3975                                    DAG.getConstant(31, SL, MVT::i32));
3976     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3977     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3978   }
3979 
3980   return SDValue();
3981 }
3982 
3983 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3984                                                 DAGCombinerInfo &DCI) const {
3985   auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3986   if (!RHS)
3987     return SDValue();
3988 
3989   EVT VT = N->getValueType(0);
3990   SDValue LHS = N->getOperand(0);
3991   unsigned ShiftAmt = RHS->getZExtValue();
3992   SelectionDAG &DAG = DCI.DAG;
3993   SDLoc SL(N);
3994 
3995   // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3996   // this improves the ability to match BFE patterns in isel.
3997   if (LHS.getOpcode() == ISD::AND) {
3998     if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3999       unsigned MaskIdx, MaskLen;
4000       if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4001           MaskIdx == ShiftAmt) {
4002         return DAG.getNode(
4003             ISD::AND, SL, VT,
4004             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
4005             DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
4006       }
4007     }
4008   }
4009 
4010   if (VT != MVT::i64)
4011     return SDValue();
4012 
4013   if (ShiftAmt < 32)
4014     return SDValue();
4015 
4016   // srl i64:x, C for C >= 32
4017   // =>
4018   //   build_pair (srl hi_32(x), C - 32), 0
4019   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
4020 
4021   SDValue Hi = getHiHalf64(LHS, DAG);
4022 
4023   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
4024   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
4025 
4026   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
4027 
4028   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
4029 }
4030 
4031 SDValue AMDGPUTargetLowering::performTruncateCombine(
4032   SDNode *N, DAGCombinerInfo &DCI) const {
4033   SDLoc SL(N);
4034   SelectionDAG &DAG = DCI.DAG;
4035   EVT VT = N->getValueType(0);
4036   SDValue Src = N->getOperand(0);
4037 
4038   // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
4039   if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
4040     SDValue Vec = Src.getOperand(0);
4041     if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
4042       SDValue Elt0 = Vec.getOperand(0);
4043       EVT EltVT = Elt0.getValueType();
4044       if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
4045         if (EltVT.isFloatingPoint()) {
4046           Elt0 = DAG.getNode(ISD::BITCAST, SL,
4047                              EltVT.changeTypeToInteger(), Elt0);
4048         }
4049 
4050         return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
4051       }
4052     }
4053   }
4054 
4055   // Equivalent of above for accessing the high element of a vector as an
4056   // integer operation.
4057   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
4058   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
4059     if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
4060       if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
4061         SDValue BV = stripBitcast(Src.getOperand(0));
4062         if (BV.getOpcode() == ISD::BUILD_VECTOR &&
4063             BV.getValueType().getVectorNumElements() == 2) {
4064           SDValue SrcElt = BV.getOperand(1);
4065           EVT SrcEltVT = SrcElt.getValueType();
4066           if (SrcEltVT.isFloatingPoint()) {
4067             SrcElt = DAG.getNode(ISD::BITCAST, SL,
4068                                  SrcEltVT.changeTypeToInteger(), SrcElt);
4069           }
4070 
4071           return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
4072         }
4073       }
4074     }
4075   }
4076 
4077   // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
4078   //
4079   // i16 (trunc (srl i64:x, K)), K <= 16 ->
4080   //     i16 (trunc (srl (i32 (trunc x), K)))
4081   if (VT.getScalarSizeInBits() < 32) {
4082     EVT SrcVT = Src.getValueType();
4083     if (SrcVT.getScalarSizeInBits() > 32 &&
4084         (Src.getOpcode() == ISD::SRL ||
4085          Src.getOpcode() == ISD::SRA ||
4086          Src.getOpcode() == ISD::SHL)) {
4087       SDValue Amt = Src.getOperand(1);
4088       KnownBits Known = DAG.computeKnownBits(Amt);
4089 
4090       // - For left shifts, do the transform as long as the shift
4091       //   amount is still legal for i32, so when ShiftAmt < 32 (<= 31)
4092       // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid
4093       //   losing information stored in the high bits when truncating.
4094       const unsigned MaxCstSize =
4095           (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits());
4096       if (Known.getMaxValue().ule(MaxCstSize)) {
4097         EVT MidVT = VT.isVector() ?
4098           EVT::getVectorVT(*DAG.getContext(), MVT::i32,
4099                            VT.getVectorNumElements()) : MVT::i32;
4100 
4101         EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
4102         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
4103                                     Src.getOperand(0));
4104         DCI.AddToWorklist(Trunc.getNode());
4105 
4106         if (Amt.getValueType() != NewShiftVT) {
4107           Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
4108           DCI.AddToWorklist(Amt.getNode());
4109         }
4110 
4111         SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
4112                                           Trunc, Amt);
4113         return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
4114       }
4115     }
4116   }
4117 
4118   return SDValue();
4119 }
4120 
4121 // We need to specifically handle i64 mul here to avoid unnecessary conversion
4122 // instructions. If we only match on the legalized i64 mul expansion,
4123 // SimplifyDemandedBits will be unable to remove them because there will be
4124 // multiple uses due to the separate mul + mulh[su].
4125 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
4126                         SDValue N0, SDValue N1, unsigned Size, bool Signed) {
4127   if (Size <= 32) {
4128     unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4129     return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
4130   }
4131 
4132   unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4133   unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
4134 
4135   SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
4136   SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
4137 
4138   return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
4139 }
4140 
4141 /// If \p V is an add of a constant 1, returns the other operand. Otherwise
4142 /// return SDValue().
4143 static SDValue getAddOneOp(const SDNode *V) {
4144   if (V->getOpcode() != ISD::ADD)
4145     return SDValue();
4146 
4147   return isOneConstant(V->getOperand(1)) ? V->getOperand(0) : SDValue();
4148 }
4149 
4150 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
4151                                                 DAGCombinerInfo &DCI) const {
4152   EVT VT = N->getValueType(0);
4153 
4154   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4155   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4156   // unnecessarily). isDivergent() is used as an approximation of whether the
4157   // value is in an SGPR.
4158   if (!N->isDivergent())
4159     return SDValue();
4160 
4161   unsigned Size = VT.getSizeInBits();
4162   if (VT.isVector() || Size > 64)
4163     return SDValue();
4164 
4165   SelectionDAG &DAG = DCI.DAG;
4166   SDLoc DL(N);
4167 
4168   SDValue N0 = N->getOperand(0);
4169   SDValue N1 = N->getOperand(1);
4170 
4171   // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad
4172   // matching.
4173 
4174   // mul x, (add y, 1) -> add (mul x, y), x
4175   auto IsFoldableAdd = [](SDValue V) -> SDValue {
4176     SDValue AddOp = getAddOneOp(V.getNode());
4177     if (!AddOp)
4178       return SDValue();
4179 
4180     if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool {
4181           return U->getOpcode() == ISD::MUL;
4182         }))
4183       return AddOp;
4184 
4185     return SDValue();
4186   };
4187 
4188   // FIXME: The selection pattern is not properly checking for commuted
4189   // operands, so we have to place the mul in the LHS
4190   if (SDValue MulOper = IsFoldableAdd(N0)) {
4191     SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper);
4192     return DAG.getNode(ISD::ADD, DL, VT, MulVal, N1);
4193   }
4194 
4195   if (SDValue MulOper = IsFoldableAdd(N1)) {
4196     SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper);
4197     return DAG.getNode(ISD::ADD, DL, VT, MulVal, N0);
4198   }
4199 
4200   // Skip if already mul24.
4201   if (N->getOpcode() != ISD::MUL)
4202     return SDValue();
4203 
4204   // There are i16 integer mul/mad.
4205   if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
4206     return SDValue();
4207 
4208   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
4209   // in the source into any_extends if the result of the mul is truncated. Since
4210   // we can assume the high bits are whatever we want, use the underlying value
4211   // to avoid the unknown high bits from interfering.
4212   if (N0.getOpcode() == ISD::ANY_EXTEND)
4213     N0 = N0.getOperand(0);
4214 
4215   if (N1.getOpcode() == ISD::ANY_EXTEND)
4216     N1 = N1.getOperand(0);
4217 
4218   SDValue Mul;
4219 
4220   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4221     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4222     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4223     Mul = getMul24(DAG, DL, N0, N1, Size, false);
4224   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4225     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4226     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4227     Mul = getMul24(DAG, DL, N0, N1, Size, true);
4228   } else {
4229     return SDValue();
4230   }
4231 
4232   // We need to use sext even for MUL_U24, because MUL_U24 is used
4233   // for signed multiply of 8 and 16-bit types.
4234   return DAG.getSExtOrTrunc(Mul, DL, VT);
4235 }
4236 
4237 SDValue
4238 AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
4239                                             DAGCombinerInfo &DCI) const {
4240   if (N->getValueType(0) != MVT::i32)
4241     return SDValue();
4242 
4243   SelectionDAG &DAG = DCI.DAG;
4244   SDLoc DL(N);
4245 
4246   SDValue N0 = N->getOperand(0);
4247   SDValue N1 = N->getOperand(1);
4248 
4249   // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
4250   // in the source into any_extends if the result of the mul is truncated. Since
4251   // we can assume the high bits are whatever we want, use the underlying value
4252   // to avoid the unknown high bits from interfering.
4253   if (N0.getOpcode() == ISD::ANY_EXTEND)
4254     N0 = N0.getOperand(0);
4255   if (N1.getOpcode() == ISD::ANY_EXTEND)
4256     N1 = N1.getOperand(0);
4257 
4258   // Try to use two fast 24-bit multiplies (one for each half of the result)
4259   // instead of one slow extending multiply.
4260   unsigned LoOpcode, HiOpcode;
4261   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
4262     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4263     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4264     LoOpcode = AMDGPUISD::MUL_U24;
4265     HiOpcode = AMDGPUISD::MULHI_U24;
4266   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
4267     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4268     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4269     LoOpcode = AMDGPUISD::MUL_I24;
4270     HiOpcode = AMDGPUISD::MULHI_I24;
4271   } else {
4272     return SDValue();
4273   }
4274 
4275   SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
4276   SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
4277   DCI.CombineTo(N, Lo, Hi);
4278   return SDValue(N, 0);
4279 }
4280 
4281 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
4282                                                   DAGCombinerInfo &DCI) const {
4283   EVT VT = N->getValueType(0);
4284 
4285   if (!Subtarget->hasMulI24() || VT.isVector())
4286     return SDValue();
4287 
4288   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4289   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4290   // unnecessarily). isDivergent() is used as an approximation of whether the
4291   // value is in an SGPR.
4292   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
4293   // valu op anyway)
4294   if (Subtarget->hasSMulHi() && !N->isDivergent())
4295     return SDValue();
4296 
4297   SelectionDAG &DAG = DCI.DAG;
4298   SDLoc DL(N);
4299 
4300   SDValue N0 = N->getOperand(0);
4301   SDValue N1 = N->getOperand(1);
4302 
4303   if (!isI24(N0, DAG) || !isI24(N1, DAG))
4304     return SDValue();
4305 
4306   N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
4307   N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
4308 
4309   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
4310   DCI.AddToWorklist(Mulhi.getNode());
4311   return DAG.getSExtOrTrunc(Mulhi, DL, VT);
4312 }
4313 
4314 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
4315                                                   DAGCombinerInfo &DCI) const {
4316   EVT VT = N->getValueType(0);
4317 
4318   if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
4319     return SDValue();
4320 
4321   // Don't generate 24-bit multiplies on values that are in SGPRs, since
4322   // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
4323   // unnecessarily). isDivergent() is used as an approximation of whether the
4324   // value is in an SGPR.
4325   // This doesn't apply if no s_mul_hi is available (since we'll end up with a
4326   // valu op anyway)
4327   if (Subtarget->hasSMulHi() && !N->isDivergent())
4328     return SDValue();
4329 
4330   SelectionDAG &DAG = DCI.DAG;
4331   SDLoc DL(N);
4332 
4333   SDValue N0 = N->getOperand(0);
4334   SDValue N1 = N->getOperand(1);
4335 
4336   if (!isU24(N0, DAG) || !isU24(N1, DAG))
4337     return SDValue();
4338 
4339   N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
4340   N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
4341 
4342   SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
4343   DCI.AddToWorklist(Mulhi.getNode());
4344   return DAG.getZExtOrTrunc(Mulhi, DL, VT);
4345 }
4346 
4347 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
4348                                           SDValue Op,
4349                                           const SDLoc &DL,
4350                                           unsigned Opc) const {
4351   EVT VT = Op.getValueType();
4352   EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
4353   if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
4354                               LegalVT != MVT::i16))
4355     return SDValue();
4356 
4357   if (VT != MVT::i32)
4358     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
4359 
4360   SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
4361   if (VT != MVT::i32)
4362     FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
4363 
4364   return FFBX;
4365 }
4366 
4367 // The native instructions return -1 on 0 input. Optimize out a select that
4368 // produces -1 on 0.
4369 //
4370 // TODO: If zero is not undef, we could also do this if the output is compared
4371 // against the bitwidth.
4372 //
4373 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
4374 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
4375                                                  SDValue LHS, SDValue RHS,
4376                                                  DAGCombinerInfo &DCI) const {
4377   if (!isNullConstant(Cond.getOperand(1)))
4378     return SDValue();
4379 
4380   SelectionDAG &DAG = DCI.DAG;
4381   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
4382   SDValue CmpLHS = Cond.getOperand(0);
4383 
4384   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
4385   // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
4386   if (CCOpcode == ISD::SETEQ &&
4387       (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
4388       RHS.getOperand(0) == CmpLHS && isAllOnesConstant(LHS)) {
4389     unsigned Opc =
4390         isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4391     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
4392   }
4393 
4394   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
4395   // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
4396   if (CCOpcode == ISD::SETNE &&
4397       (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
4398       LHS.getOperand(0) == CmpLHS && isAllOnesConstant(RHS)) {
4399     unsigned Opc =
4400         isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4401 
4402     return getFFBX_U32(DAG, CmpLHS, SL, Opc);
4403   }
4404 
4405   return SDValue();
4406 }
4407 
4408 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
4409                                          unsigned Op,
4410                                          const SDLoc &SL,
4411                                          SDValue Cond,
4412                                          SDValue N1,
4413                                          SDValue N2) {
4414   SelectionDAG &DAG = DCI.DAG;
4415   EVT VT = N1.getValueType();
4416 
4417   SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
4418                                   N1.getOperand(0), N2.getOperand(0));
4419   DCI.AddToWorklist(NewSelect.getNode());
4420   return DAG.getNode(Op, SL, VT, NewSelect);
4421 }
4422 
4423 // Pull a free FP operation out of a select so it may fold into uses.
4424 //
4425 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
4426 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
4427 //
4428 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
4429 // select c, (fabs x), +k -> fabs (select c, x, k)
4430 SDValue
4431 AMDGPUTargetLowering::foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
4432                                            SDValue N) const {
4433   SelectionDAG &DAG = DCI.DAG;
4434   SDValue Cond = N.getOperand(0);
4435   SDValue LHS = N.getOperand(1);
4436   SDValue RHS = N.getOperand(2);
4437 
4438   EVT VT = N.getValueType();
4439   if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
4440       (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
4441     if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
4442       return SDValue();
4443 
4444     return distributeOpThroughSelect(DCI, LHS.getOpcode(),
4445                                      SDLoc(N), Cond, LHS, RHS);
4446   }
4447 
4448   bool Inv = false;
4449   if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
4450     std::swap(LHS, RHS);
4451     Inv = true;
4452   }
4453 
4454   // TODO: Support vector constants.
4455   ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
4456   if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS &&
4457       !selectSupportsSourceMods(N.getNode())) {
4458     SDLoc SL(N);
4459     // If one side is an fneg/fabs and the other is a constant, we can push the
4460     // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
4461     SDValue NewLHS = LHS.getOperand(0);
4462     SDValue NewRHS = RHS;
4463 
4464     // Careful: if the neg can be folded up, don't try to pull it back down.
4465     bool ShouldFoldNeg = true;
4466 
4467     if (NewLHS.hasOneUse()) {
4468       unsigned Opc = NewLHS.getOpcode();
4469       if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(NewLHS.getNode()))
4470         ShouldFoldNeg = false;
4471       if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
4472         ShouldFoldNeg = false;
4473     }
4474 
4475     if (ShouldFoldNeg) {
4476       if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative())
4477         return SDValue();
4478 
4479       // We're going to be forced to use a source modifier anyway, there's no
4480       // point to pulling the negate out unless we can get a size reduction by
4481       // negating the constant.
4482       //
4483       // TODO: Generalize to use getCheaperNegatedExpression which doesn't know
4484       // about cheaper constants.
4485       if (NewLHS.getOpcode() == ISD::FABS &&
4486           getConstantNegateCost(CRHS) != NegatibleCost::Cheaper)
4487         return SDValue();
4488 
4489       if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
4490         return SDValue();
4491 
4492       if (LHS.getOpcode() == ISD::FNEG)
4493         NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4494 
4495       if (Inv)
4496         std::swap(NewLHS, NewRHS);
4497 
4498       SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
4499                                       Cond, NewLHS, NewRHS);
4500       DCI.AddToWorklist(NewSelect.getNode());
4501       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
4502     }
4503   }
4504 
4505   return SDValue();
4506 }
4507 
4508 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
4509                                                    DAGCombinerInfo &DCI) const {
4510   if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
4511     return Folded;
4512 
4513   SDValue Cond = N->getOperand(0);
4514   if (Cond.getOpcode() != ISD::SETCC)
4515     return SDValue();
4516 
4517   EVT VT = N->getValueType(0);
4518   SDValue LHS = Cond.getOperand(0);
4519   SDValue RHS = Cond.getOperand(1);
4520   SDValue CC = Cond.getOperand(2);
4521 
4522   SDValue True = N->getOperand(1);
4523   SDValue False = N->getOperand(2);
4524 
4525   if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
4526     SelectionDAG &DAG = DCI.DAG;
4527     if (DAG.isConstantValueOfAnyType(True) &&
4528         !DAG.isConstantValueOfAnyType(False)) {
4529       // Swap cmp + select pair to move constant to false input.
4530       // This will allow using VOPC cndmasks more often.
4531       // select (setcc x, y), k, x -> select (setccinv x, y), x, k
4532 
4533       SDLoc SL(N);
4534       ISD::CondCode NewCC =
4535           getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
4536 
4537       SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
4538       return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
4539     }
4540 
4541     if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
4542       SDValue MinMax
4543         = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
4544       // Revisit this node so we can catch min3/max3/med3 patterns.
4545       //DCI.AddToWorklist(MinMax.getNode());
4546       return MinMax;
4547     }
4548   }
4549 
4550   // There's no reason to not do this if the condition has other uses.
4551   return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
4552 }
4553 
4554 static bool isInv2Pi(const APFloat &APF) {
4555   static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
4556   static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
4557   static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
4558 
4559   return APF.bitwiseIsEqual(KF16) ||
4560          APF.bitwiseIsEqual(KF32) ||
4561          APF.bitwiseIsEqual(KF64);
4562 }
4563 
4564 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
4565 // additional cost to negate them.
4566 TargetLowering::NegatibleCost
4567 AMDGPUTargetLowering::getConstantNegateCost(const ConstantFPSDNode *C) const {
4568   if (C->isZero())
4569     return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4570 
4571   if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
4572     return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4573 
4574   return NegatibleCost::Neutral;
4575 }
4576 
4577 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
4578   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4579     return getConstantNegateCost(C) == NegatibleCost::Expensive;
4580   return false;
4581 }
4582 
4583 bool AMDGPUTargetLowering::isConstantCheaperToNegate(SDValue N) const {
4584   if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4585     return getConstantNegateCost(C) == NegatibleCost::Cheaper;
4586   return false;
4587 }
4588 
4589 static unsigned inverseMinMax(unsigned Opc) {
4590   switch (Opc) {
4591   case ISD::FMAXNUM:
4592     return ISD::FMINNUM;
4593   case ISD::FMINNUM:
4594     return ISD::FMAXNUM;
4595   case ISD::FMAXNUM_IEEE:
4596     return ISD::FMINNUM_IEEE;
4597   case ISD::FMINNUM_IEEE:
4598     return ISD::FMAXNUM_IEEE;
4599   case ISD::FMAXIMUM:
4600     return ISD::FMINIMUM;
4601   case ISD::FMINIMUM:
4602     return ISD::FMAXIMUM;
4603   case AMDGPUISD::FMAX_LEGACY:
4604     return AMDGPUISD::FMIN_LEGACY;
4605   case AMDGPUISD::FMIN_LEGACY:
4606     return  AMDGPUISD::FMAX_LEGACY;
4607   default:
4608     llvm_unreachable("invalid min/max opcode");
4609   }
4610 }
4611 
4612 /// \return true if it's profitable to try to push an fneg into its source
4613 /// instruction.
4614 bool AMDGPUTargetLowering::shouldFoldFNegIntoSrc(SDNode *N, SDValue N0) {
4615   // If the input has multiple uses and we can either fold the negate down, or
4616   // the other uses cannot, give up. This both prevents unprofitable
4617   // transformations and infinite loops: we won't repeatedly try to fold around
4618   // a negate that has no 'good' form.
4619   if (N0.hasOneUse()) {
4620     // This may be able to fold into the source, but at a code size cost. Don't
4621     // fold if the fold into the user is free.
4622     if (allUsesHaveSourceMods(N, 0))
4623       return false;
4624   } else {
4625     if (fnegFoldsIntoOp(N0.getNode()) &&
4626         (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
4627       return false;
4628   }
4629 
4630   return true;
4631 }
4632 
4633 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
4634                                                  DAGCombinerInfo &DCI) const {
4635   SelectionDAG &DAG = DCI.DAG;
4636   SDValue N0 = N->getOperand(0);
4637   EVT VT = N->getValueType(0);
4638 
4639   unsigned Opc = N0.getOpcode();
4640 
4641   if (!shouldFoldFNegIntoSrc(N, N0))
4642     return SDValue();
4643 
4644   SDLoc SL(N);
4645   switch (Opc) {
4646   case ISD::FADD: {
4647     if (!mayIgnoreSignedZero(N0))
4648       return SDValue();
4649 
4650     // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
4651     SDValue LHS = N0.getOperand(0);
4652     SDValue RHS = N0.getOperand(1);
4653 
4654     if (LHS.getOpcode() != ISD::FNEG)
4655       LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4656     else
4657       LHS = LHS.getOperand(0);
4658 
4659     if (RHS.getOpcode() != ISD::FNEG)
4660       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4661     else
4662       RHS = RHS.getOperand(0);
4663 
4664     SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
4665     if (Res.getOpcode() != ISD::FADD)
4666       return SDValue(); // Op got folded away.
4667     if (!N0.hasOneUse())
4668       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4669     return Res;
4670   }
4671   case ISD::FMUL:
4672   case AMDGPUISD::FMUL_LEGACY: {
4673     // (fneg (fmul x, y)) -> (fmul x, (fneg y))
4674     // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
4675     SDValue LHS = N0.getOperand(0);
4676     SDValue RHS = N0.getOperand(1);
4677 
4678     if (LHS.getOpcode() == ISD::FNEG)
4679       LHS = LHS.getOperand(0);
4680     else if (RHS.getOpcode() == ISD::FNEG)
4681       RHS = RHS.getOperand(0);
4682     else
4683       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4684 
4685     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
4686     if (Res.getOpcode() != Opc)
4687       return SDValue(); // Op got folded away.
4688     if (!N0.hasOneUse())
4689       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4690     return Res;
4691   }
4692   case ISD::FMA:
4693   case ISD::FMAD: {
4694     // TODO: handle llvm.amdgcn.fma.legacy
4695     if (!mayIgnoreSignedZero(N0))
4696       return SDValue();
4697 
4698     // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
4699     SDValue LHS = N0.getOperand(0);
4700     SDValue MHS = N0.getOperand(1);
4701     SDValue RHS = N0.getOperand(2);
4702 
4703     if (LHS.getOpcode() == ISD::FNEG)
4704       LHS = LHS.getOperand(0);
4705     else if (MHS.getOpcode() == ISD::FNEG)
4706       MHS = MHS.getOperand(0);
4707     else
4708       MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
4709 
4710     if (RHS.getOpcode() != ISD::FNEG)
4711       RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4712     else
4713       RHS = RHS.getOperand(0);
4714 
4715     SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
4716     if (Res.getOpcode() != Opc)
4717       return SDValue(); // Op got folded away.
4718     if (!N0.hasOneUse())
4719       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4720     return Res;
4721   }
4722   case ISD::FMAXNUM:
4723   case ISD::FMINNUM:
4724   case ISD::FMAXNUM_IEEE:
4725   case ISD::FMINNUM_IEEE:
4726   case ISD::FMINIMUM:
4727   case ISD::FMAXIMUM:
4728   case AMDGPUISD::FMAX_LEGACY:
4729   case AMDGPUISD::FMIN_LEGACY: {
4730     // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
4731     // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
4732     // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
4733     // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
4734 
4735     SDValue LHS = N0.getOperand(0);
4736     SDValue RHS = N0.getOperand(1);
4737 
4738     // 0 doesn't have a negated inline immediate.
4739     // TODO: This constant check should be generalized to other operations.
4740     if (isConstantCostlierToNegate(RHS))
4741       return SDValue();
4742 
4743     SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4744     SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4745     unsigned Opposite = inverseMinMax(Opc);
4746 
4747     SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
4748     if (Res.getOpcode() != Opposite)
4749       return SDValue(); // Op got folded away.
4750     if (!N0.hasOneUse())
4751       DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4752     return Res;
4753   }
4754   case AMDGPUISD::FMED3: {
4755     SDValue Ops[3];
4756     for (unsigned I = 0; I < 3; ++I)
4757       Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
4758 
4759     SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
4760     if (Res.getOpcode() != AMDGPUISD::FMED3)
4761       return SDValue(); // Op got folded away.
4762 
4763     if (!N0.hasOneUse()) {
4764       SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
4765       DAG.ReplaceAllUsesWith(N0, Neg);
4766 
4767       for (SDNode *U : Neg->uses())
4768         DCI.AddToWorklist(U);
4769     }
4770 
4771     return Res;
4772   }
4773   case ISD::FP_EXTEND:
4774   case ISD::FTRUNC:
4775   case ISD::FRINT:
4776   case ISD::FNEARBYINT: // XXX - Should fround be handled?
4777   case ISD::FROUNDEVEN:
4778   case ISD::FSIN:
4779   case ISD::FCANONICALIZE:
4780   case AMDGPUISD::RCP:
4781   case AMDGPUISD::RCP_LEGACY:
4782   case AMDGPUISD::RCP_IFLAG:
4783   case AMDGPUISD::SIN_HW: {
4784     SDValue CvtSrc = N0.getOperand(0);
4785     if (CvtSrc.getOpcode() == ISD::FNEG) {
4786       // (fneg (fp_extend (fneg x))) -> (fp_extend x)
4787       // (fneg (rcp (fneg x))) -> (rcp x)
4788       return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
4789     }
4790 
4791     if (!N0.hasOneUse())
4792       return SDValue();
4793 
4794     // (fneg (fp_extend x)) -> (fp_extend (fneg x))
4795     // (fneg (rcp x)) -> (rcp (fneg x))
4796     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4797     return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
4798   }
4799   case ISD::FP_ROUND: {
4800     SDValue CvtSrc = N0.getOperand(0);
4801 
4802     if (CvtSrc.getOpcode() == ISD::FNEG) {
4803       // (fneg (fp_round (fneg x))) -> (fp_round x)
4804       return DAG.getNode(ISD::FP_ROUND, SL, VT,
4805                          CvtSrc.getOperand(0), N0.getOperand(1));
4806     }
4807 
4808     if (!N0.hasOneUse())
4809       return SDValue();
4810 
4811     // (fneg (fp_round x)) -> (fp_round (fneg x))
4812     SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4813     return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
4814   }
4815   case ISD::FP16_TO_FP: {
4816     // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
4817     // f16, but legalization of f16 fneg ends up pulling it out of the source.
4818     // Put the fneg back as a legal source operation that can be matched later.
4819     SDLoc SL(N);
4820 
4821     SDValue Src = N0.getOperand(0);
4822     EVT SrcVT = Src.getValueType();
4823 
4824     // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
4825     SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
4826                                   DAG.getConstant(0x8000, SL, SrcVT));
4827     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
4828   }
4829   case ISD::SELECT: {
4830     // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
4831     // TODO: Invert conditions of foldFreeOpFromSelect
4832     return SDValue();
4833   }
4834   case ISD::BITCAST: {
4835     SDLoc SL(N);
4836     SDValue BCSrc = N0.getOperand(0);
4837     if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
4838       SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1);
4839       if (HighBits.getValueType().getSizeInBits() != 32 ||
4840           !fnegFoldsIntoOp(HighBits.getNode()))
4841         return SDValue();
4842 
4843       // f64 fneg only really needs to operate on the high half of of the
4844       // register, so try to force it to an f32 operation to help make use of
4845       // source modifiers.
4846       //
4847       //
4848       // fneg (f64 (bitcast (build_vector x, y))) ->
4849       // f64 (bitcast (build_vector (bitcast i32:x to f32),
4850       //                            (fneg (bitcast i32:y to f32)))
4851 
4852       SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::f32, HighBits);
4853       SDValue NegHi = DAG.getNode(ISD::FNEG, SL, MVT::f32, CastHi);
4854       SDValue CastBack =
4855           DAG.getNode(ISD::BITCAST, SL, HighBits.getValueType(), NegHi);
4856 
4857       SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end());
4858       Ops.back() = CastBack;
4859       DCI.AddToWorklist(NegHi.getNode());
4860       SDValue Build =
4861           DAG.getNode(ISD::BUILD_VECTOR, SL, BCSrc.getValueType(), Ops);
4862       SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, Build);
4863 
4864       if (!N0.hasOneUse())
4865         DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Result));
4866       return Result;
4867     }
4868 
4869     if (BCSrc.getOpcode() == ISD::SELECT && VT == MVT::f32 &&
4870         BCSrc.hasOneUse()) {
4871       // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) ->
4872       //   select cond, (bitcast i32:lhs to f32), (bitcast i32:rhs to f32)
4873 
4874       // TODO: Cast back result for multiple uses is beneficial in some cases.
4875 
4876       SDValue LHS =
4877           DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(1));
4878       SDValue RHS =
4879           DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(2));
4880 
4881       SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, LHS);
4882       SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, RHS);
4883 
4884       return DAG.getNode(ISD::SELECT, SL, MVT::f32, BCSrc.getOperand(0), NegLHS,
4885                          NegRHS);
4886     }
4887 
4888     return SDValue();
4889   }
4890   default:
4891     return SDValue();
4892   }
4893 }
4894 
4895 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
4896                                                  DAGCombinerInfo &DCI) const {
4897   SelectionDAG &DAG = DCI.DAG;
4898   SDValue N0 = N->getOperand(0);
4899 
4900   if (!N0.hasOneUse())
4901     return SDValue();
4902 
4903   switch (N0.getOpcode()) {
4904   case ISD::FP16_TO_FP: {
4905     assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
4906     SDLoc SL(N);
4907     SDValue Src = N0.getOperand(0);
4908     EVT SrcVT = Src.getValueType();
4909 
4910     // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
4911     SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
4912                                   DAG.getConstant(0x7fff, SL, SrcVT));
4913     return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
4914   }
4915   default:
4916     return SDValue();
4917   }
4918 }
4919 
4920 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
4921                                                 DAGCombinerInfo &DCI) const {
4922   const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
4923   if (!CFP)
4924     return SDValue();
4925 
4926   // XXX - Should this flush denormals?
4927   const APFloat &Val = CFP->getValueAPF();
4928   APFloat One(Val.getSemantics(), "1.0");
4929   return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
4930 }
4931 
4932 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
4933                                                 DAGCombinerInfo &DCI) const {
4934   SelectionDAG &DAG = DCI.DAG;
4935   SDLoc DL(N);
4936 
4937   switch(N->getOpcode()) {
4938   default:
4939     break;
4940   case ISD::BITCAST: {
4941     EVT DestVT = N->getValueType(0);
4942 
4943     // Push casts through vector builds. This helps avoid emitting a large
4944     // number of copies when materializing floating point vector constants.
4945     //
4946     // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
4947     //   vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
4948     if (DestVT.isVector()) {
4949       SDValue Src = N->getOperand(0);
4950       if (Src.getOpcode() == ISD::BUILD_VECTOR) {
4951         EVT SrcVT = Src.getValueType();
4952         unsigned NElts = DestVT.getVectorNumElements();
4953 
4954         if (SrcVT.getVectorNumElements() == NElts) {
4955           EVT DestEltVT = DestVT.getVectorElementType();
4956 
4957           SmallVector<SDValue, 8> CastedElts;
4958           SDLoc SL(N);
4959           for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
4960             SDValue Elt = Src.getOperand(I);
4961             CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
4962           }
4963 
4964           return DAG.getBuildVector(DestVT, SL, CastedElts);
4965         }
4966       }
4967     }
4968 
4969     if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
4970       break;
4971 
4972     // Fold bitcasts of constants.
4973     //
4974     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
4975     // TODO: Generalize and move to DAGCombiner
4976     SDValue Src = N->getOperand(0);
4977     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
4978       SDLoc SL(N);
4979       uint64_t CVal = C->getZExtValue();
4980       SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4981                                DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4982                                DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4983       return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
4984     }
4985 
4986     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
4987       const APInt &Val = C->getValueAPF().bitcastToAPInt();
4988       SDLoc SL(N);
4989       uint64_t CVal = Val.getZExtValue();
4990       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4991                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4992                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4993 
4994       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
4995     }
4996 
4997     break;
4998   }
4999   case ISD::SHL: {
5000     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5001       break;
5002 
5003     return performShlCombine(N, DCI);
5004   }
5005   case ISD::SRL: {
5006     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5007       break;
5008 
5009     return performSrlCombine(N, DCI);
5010   }
5011   case ISD::SRA: {
5012     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5013       break;
5014 
5015     return performSraCombine(N, DCI);
5016   }
5017   case ISD::TRUNCATE:
5018     return performTruncateCombine(N, DCI);
5019   case ISD::MUL:
5020     return performMulCombine(N, DCI);
5021   case AMDGPUISD::MUL_U24:
5022   case AMDGPUISD::MUL_I24: {
5023     if (SDValue Simplified = simplifyMul24(N, DCI))
5024       return Simplified;
5025     return performMulCombine(N, DCI);
5026   }
5027   case AMDGPUISD::MULHI_I24:
5028   case AMDGPUISD::MULHI_U24:
5029     return simplifyMul24(N, DCI);
5030   case ISD::SMUL_LOHI:
5031   case ISD::UMUL_LOHI:
5032     return performMulLoHiCombine(N, DCI);
5033   case ISD::MULHS:
5034     return performMulhsCombine(N, DCI);
5035   case ISD::MULHU:
5036     return performMulhuCombine(N, DCI);
5037   case ISD::SELECT:
5038     return performSelectCombine(N, DCI);
5039   case ISD::FNEG:
5040     return performFNegCombine(N, DCI);
5041   case ISD::FABS:
5042     return performFAbsCombine(N, DCI);
5043   case AMDGPUISD::BFE_I32:
5044   case AMDGPUISD::BFE_U32: {
5045     assert(!N->getValueType(0).isVector() &&
5046            "Vector handling of BFE not implemented");
5047     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
5048     if (!Width)
5049       break;
5050 
5051     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
5052     if (WidthVal == 0)
5053       return DAG.getConstant(0, DL, MVT::i32);
5054 
5055     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
5056     if (!Offset)
5057       break;
5058 
5059     SDValue BitsFrom = N->getOperand(0);
5060     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
5061 
5062     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
5063 
5064     if (OffsetVal == 0) {
5065       // This is already sign / zero extended, so try to fold away extra BFEs.
5066       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5067 
5068       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
5069       if (OpSignBits >= SignBits)
5070         return BitsFrom;
5071 
5072       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
5073       if (Signed) {
5074         // This is a sign_extend_inreg. Replace it to take advantage of existing
5075         // DAG Combines. If not eliminated, we will match back to BFE during
5076         // selection.
5077 
5078         // TODO: The sext_inreg of extended types ends, although we can could
5079         // handle them in a single BFE.
5080         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
5081                            DAG.getValueType(SmallVT));
5082       }
5083 
5084       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
5085     }
5086 
5087     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
5088       if (Signed) {
5089         return constantFoldBFE<int32_t>(DAG,
5090                                         CVal->getSExtValue(),
5091                                         OffsetVal,
5092                                         WidthVal,
5093                                         DL);
5094       }
5095 
5096       return constantFoldBFE<uint32_t>(DAG,
5097                                        CVal->getZExtValue(),
5098                                        OffsetVal,
5099                                        WidthVal,
5100                                        DL);
5101     }
5102 
5103     if ((OffsetVal + WidthVal) >= 32 &&
5104         !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
5105       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
5106       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
5107                          BitsFrom, ShiftVal);
5108     }
5109 
5110     if (BitsFrom.hasOneUse()) {
5111       APInt Demanded = APInt::getBitsSet(32,
5112                                          OffsetVal,
5113                                          OffsetVal + WidthVal);
5114 
5115       KnownBits Known;
5116       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
5117                                             !DCI.isBeforeLegalizeOps());
5118       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5119       if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
5120           TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
5121         DCI.CommitTargetLoweringOpt(TLO);
5122       }
5123     }
5124 
5125     break;
5126   }
5127   case ISD::LOAD:
5128     return performLoadCombine(N, DCI);
5129   case ISD::STORE:
5130     return performStoreCombine(N, DCI);
5131   case AMDGPUISD::RCP:
5132   case AMDGPUISD::RCP_IFLAG:
5133     return performRcpCombine(N, DCI);
5134   case ISD::AssertZext:
5135   case ISD::AssertSext:
5136     return performAssertSZExtCombine(N, DCI);
5137   case ISD::INTRINSIC_WO_CHAIN:
5138     return performIntrinsicWOChainCombine(N, DCI);
5139   case AMDGPUISD::FMAD_FTZ: {
5140     SDValue N0 = N->getOperand(0);
5141     SDValue N1 = N->getOperand(1);
5142     SDValue N2 = N->getOperand(2);
5143     EVT VT = N->getValueType(0);
5144 
5145     // FMAD_FTZ is a FMAD + flush denormals to zero.
5146     // We flush the inputs, the intermediate step, and the output.
5147     ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
5148     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5149     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5150     if (N0CFP && N1CFP && N2CFP) {
5151       const auto FTZ = [](const APFloat &V) {
5152         if (V.isDenormal()) {
5153           APFloat Zero(V.getSemantics(), 0);
5154           return V.isNegative() ? -Zero : Zero;
5155         }
5156         return V;
5157       };
5158 
5159       APFloat V0 = FTZ(N0CFP->getValueAPF());
5160       APFloat V1 = FTZ(N1CFP->getValueAPF());
5161       APFloat V2 = FTZ(N2CFP->getValueAPF());
5162       V0.multiply(V1, APFloat::rmNearestTiesToEven);
5163       V0 = FTZ(V0);
5164       V0.add(V2, APFloat::rmNearestTiesToEven);
5165       return DAG.getConstantFP(FTZ(V0), DL, VT);
5166     }
5167     break;
5168   }
5169   }
5170   return SDValue();
5171 }
5172 
5173 //===----------------------------------------------------------------------===//
5174 // Helper functions
5175 //===----------------------------------------------------------------------===//
5176 
5177 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
5178                                                    const TargetRegisterClass *RC,
5179                                                    Register Reg, EVT VT,
5180                                                    const SDLoc &SL,
5181                                                    bool RawReg) const {
5182   MachineFunction &MF = DAG.getMachineFunction();
5183   MachineRegisterInfo &MRI = MF.getRegInfo();
5184   Register VReg;
5185 
5186   if (!MRI.isLiveIn(Reg)) {
5187     VReg = MRI.createVirtualRegister(RC);
5188     MRI.addLiveIn(Reg, VReg);
5189   } else {
5190     VReg = MRI.getLiveInVirtReg(Reg);
5191   }
5192 
5193   if (RawReg)
5194     return DAG.getRegister(VReg, VT);
5195 
5196   return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
5197 }
5198 
5199 // This may be called multiple times, and nothing prevents creating multiple
5200 // objects at the same offset. See if we already defined this object.
5201 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
5202                                        int64_t Offset) {
5203   for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
5204     if (MFI.getObjectOffset(I) == Offset) {
5205       assert(MFI.getObjectSize(I) == Size);
5206       return I;
5207     }
5208   }
5209 
5210   return MFI.CreateFixedObject(Size, Offset, true);
5211 }
5212 
5213 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
5214                                                   EVT VT,
5215                                                   const SDLoc &SL,
5216                                                   int64_t Offset) const {
5217   MachineFunction &MF = DAG.getMachineFunction();
5218   MachineFrameInfo &MFI = MF.getFrameInfo();
5219   int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
5220 
5221   auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
5222   SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
5223 
5224   return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
5225                      MachineMemOperand::MODereferenceable |
5226                          MachineMemOperand::MOInvariant);
5227 }
5228 
5229 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
5230                                                    const SDLoc &SL,
5231                                                    SDValue Chain,
5232                                                    SDValue ArgVal,
5233                                                    int64_t Offset) const {
5234   MachineFunction &MF = DAG.getMachineFunction();
5235   MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
5236   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5237 
5238   SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
5239   // Stores to the argument stack area are relative to the stack pointer.
5240   SDValue SP =
5241       DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
5242   Ptr = DAG.getNode(ISD::ADD, SL, MVT::i32, SP, Ptr);
5243   SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
5244                                MachineMemOperand::MODereferenceable);
5245   return Store;
5246 }
5247 
5248 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
5249                                              const TargetRegisterClass *RC,
5250                                              EVT VT, const SDLoc &SL,
5251                                              const ArgDescriptor &Arg) const {
5252   assert(Arg && "Attempting to load missing argument");
5253 
5254   SDValue V = Arg.isRegister() ?
5255     CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
5256     loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
5257 
5258   if (!Arg.isMasked())
5259     return V;
5260 
5261   unsigned Mask = Arg.getMask();
5262   unsigned Shift = llvm::countr_zero<unsigned>(Mask);
5263   V = DAG.getNode(ISD::SRL, SL, VT, V,
5264                   DAG.getShiftAmountConstant(Shift, VT, SL));
5265   return DAG.getNode(ISD::AND, SL, VT, V,
5266                      DAG.getConstant(Mask >> Shift, SL, VT));
5267 }
5268 
5269 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
5270     uint64_t ExplicitKernArgSize, const ImplicitParameter Param) const {
5271   unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
5272   const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
5273   uint64_t ArgOffset =
5274       alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
5275   switch (Param) {
5276   case FIRST_IMPLICIT:
5277     return ArgOffset;
5278   case PRIVATE_BASE:
5279     return ArgOffset + AMDGPU::ImplicitArg::PRIVATE_BASE_OFFSET;
5280   case SHARED_BASE:
5281     return ArgOffset + AMDGPU::ImplicitArg::SHARED_BASE_OFFSET;
5282   case QUEUE_PTR:
5283     return ArgOffset + AMDGPU::ImplicitArg::QUEUE_PTR_OFFSET;
5284   }
5285   llvm_unreachable("unexpected implicit parameter type");
5286 }
5287 
5288 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
5289     const MachineFunction &MF, const ImplicitParameter Param) const {
5290   const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
5291   return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param);
5292 }
5293 
5294 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
5295 
5296 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
5297   switch ((AMDGPUISD::NodeType)Opcode) {
5298   case AMDGPUISD::FIRST_NUMBER: break;
5299   // AMDIL DAG nodes
5300   NODE_NAME_CASE(UMUL);
5301   NODE_NAME_CASE(BRANCH_COND);
5302 
5303   // AMDGPU DAG nodes
5304   NODE_NAME_CASE(IF)
5305   NODE_NAME_CASE(ELSE)
5306   NODE_NAME_CASE(LOOP)
5307   NODE_NAME_CASE(CALL)
5308   NODE_NAME_CASE(TC_RETURN)
5309   NODE_NAME_CASE(TC_RETURN_GFX)
5310   NODE_NAME_CASE(TC_RETURN_CHAIN)
5311   NODE_NAME_CASE(TRAP)
5312   NODE_NAME_CASE(RET_GLUE)
5313   NODE_NAME_CASE(WAVE_ADDRESS)
5314   NODE_NAME_CASE(RETURN_TO_EPILOG)
5315   NODE_NAME_CASE(ENDPGM)
5316   NODE_NAME_CASE(ENDPGM_TRAP)
5317   NODE_NAME_CASE(DWORDADDR)
5318   NODE_NAME_CASE(FRACT)
5319   NODE_NAME_CASE(SETCC)
5320   NODE_NAME_CASE(SETREG)
5321   NODE_NAME_CASE(DENORM_MODE)
5322   NODE_NAME_CASE(FMA_W_CHAIN)
5323   NODE_NAME_CASE(FMUL_W_CHAIN)
5324   NODE_NAME_CASE(CLAMP)
5325   NODE_NAME_CASE(COS_HW)
5326   NODE_NAME_CASE(SIN_HW)
5327   NODE_NAME_CASE(FMAX_LEGACY)
5328   NODE_NAME_CASE(FMIN_LEGACY)
5329   NODE_NAME_CASE(FMAX3)
5330   NODE_NAME_CASE(SMAX3)
5331   NODE_NAME_CASE(UMAX3)
5332   NODE_NAME_CASE(FMIN3)
5333   NODE_NAME_CASE(SMIN3)
5334   NODE_NAME_CASE(UMIN3)
5335   NODE_NAME_CASE(FMED3)
5336   NODE_NAME_CASE(SMED3)
5337   NODE_NAME_CASE(UMED3)
5338   NODE_NAME_CASE(FMAXIMUM3)
5339   NODE_NAME_CASE(FMINIMUM3)
5340   NODE_NAME_CASE(FDOT2)
5341   NODE_NAME_CASE(URECIP)
5342   NODE_NAME_CASE(DIV_SCALE)
5343   NODE_NAME_CASE(DIV_FMAS)
5344   NODE_NAME_CASE(DIV_FIXUP)
5345   NODE_NAME_CASE(FMAD_FTZ)
5346   NODE_NAME_CASE(RCP)
5347   NODE_NAME_CASE(RSQ)
5348   NODE_NAME_CASE(RCP_LEGACY)
5349   NODE_NAME_CASE(RCP_IFLAG)
5350   NODE_NAME_CASE(LOG)
5351   NODE_NAME_CASE(EXP)
5352   NODE_NAME_CASE(FMUL_LEGACY)
5353   NODE_NAME_CASE(RSQ_CLAMP)
5354   NODE_NAME_CASE(FP_CLASS)
5355   NODE_NAME_CASE(DOT4)
5356   NODE_NAME_CASE(CARRY)
5357   NODE_NAME_CASE(BORROW)
5358   NODE_NAME_CASE(BFE_U32)
5359   NODE_NAME_CASE(BFE_I32)
5360   NODE_NAME_CASE(BFI)
5361   NODE_NAME_CASE(BFM)
5362   NODE_NAME_CASE(FFBH_U32)
5363   NODE_NAME_CASE(FFBH_I32)
5364   NODE_NAME_CASE(FFBL_B32)
5365   NODE_NAME_CASE(MUL_U24)
5366   NODE_NAME_CASE(MUL_I24)
5367   NODE_NAME_CASE(MULHI_U24)
5368   NODE_NAME_CASE(MULHI_I24)
5369   NODE_NAME_CASE(MAD_U24)
5370   NODE_NAME_CASE(MAD_I24)
5371   NODE_NAME_CASE(MAD_I64_I32)
5372   NODE_NAME_CASE(MAD_U64_U32)
5373   NODE_NAME_CASE(PERM)
5374   NODE_NAME_CASE(TEXTURE_FETCH)
5375   NODE_NAME_CASE(R600_EXPORT)
5376   NODE_NAME_CASE(CONST_ADDRESS)
5377   NODE_NAME_CASE(REGISTER_LOAD)
5378   NODE_NAME_CASE(REGISTER_STORE)
5379   NODE_NAME_CASE(SAMPLE)
5380   NODE_NAME_CASE(SAMPLEB)
5381   NODE_NAME_CASE(SAMPLED)
5382   NODE_NAME_CASE(SAMPLEL)
5383   NODE_NAME_CASE(CVT_F32_UBYTE0)
5384   NODE_NAME_CASE(CVT_F32_UBYTE1)
5385   NODE_NAME_CASE(CVT_F32_UBYTE2)
5386   NODE_NAME_CASE(CVT_F32_UBYTE3)
5387   NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
5388   NODE_NAME_CASE(CVT_PKNORM_I16_F32)
5389   NODE_NAME_CASE(CVT_PKNORM_U16_F32)
5390   NODE_NAME_CASE(CVT_PK_I16_I32)
5391   NODE_NAME_CASE(CVT_PK_U16_U32)
5392   NODE_NAME_CASE(FP_TO_FP16)
5393   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
5394   NODE_NAME_CASE(CONST_DATA_PTR)
5395   NODE_NAME_CASE(PC_ADD_REL_OFFSET)
5396   NODE_NAME_CASE(LDS)
5397   NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
5398   NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
5399   NODE_NAME_CASE(DUMMY_CHAIN)
5400   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
5401   NODE_NAME_CASE(LOAD_D16_HI)
5402   NODE_NAME_CASE(LOAD_D16_LO)
5403   NODE_NAME_CASE(LOAD_D16_HI_I8)
5404   NODE_NAME_CASE(LOAD_D16_HI_U8)
5405   NODE_NAME_CASE(LOAD_D16_LO_I8)
5406   NODE_NAME_CASE(LOAD_D16_LO_U8)
5407   NODE_NAME_CASE(STORE_MSKOR)
5408   NODE_NAME_CASE(LOAD_CONSTANT)
5409   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
5410   NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
5411   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
5412   NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
5413   NODE_NAME_CASE(DS_ORDERED_COUNT)
5414   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
5415   NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
5416   NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
5417   NODE_NAME_CASE(BUFFER_LOAD)
5418   NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
5419   NODE_NAME_CASE(BUFFER_LOAD_USHORT)
5420   NODE_NAME_CASE(BUFFER_LOAD_BYTE)
5421   NODE_NAME_CASE(BUFFER_LOAD_SHORT)
5422   NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
5423   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_TFE)
5424   NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
5425   NODE_NAME_CASE(SBUFFER_LOAD)
5426   NODE_NAME_CASE(BUFFER_STORE)
5427   NODE_NAME_CASE(BUFFER_STORE_BYTE)
5428   NODE_NAME_CASE(BUFFER_STORE_SHORT)
5429   NODE_NAME_CASE(BUFFER_STORE_FORMAT)
5430   NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
5431   NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
5432   NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
5433   NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
5434   NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
5435   NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
5436   NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
5437   NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
5438   NODE_NAME_CASE(BUFFER_ATOMIC_AND)
5439   NODE_NAME_CASE(BUFFER_ATOMIC_OR)
5440   NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
5441   NODE_NAME_CASE(BUFFER_ATOMIC_INC)
5442   NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
5443   NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
5444   NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
5445   NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
5446   NODE_NAME_CASE(BUFFER_ATOMIC_FMIN)
5447   NODE_NAME_CASE(BUFFER_ATOMIC_FMAX)
5448 
5449   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
5450   }
5451   return nullptr;
5452 }
5453 
5454 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
5455                                               SelectionDAG &DAG, int Enabled,
5456                                               int &RefinementSteps,
5457                                               bool &UseOneConstNR,
5458                                               bool Reciprocal) const {
5459   EVT VT = Operand.getValueType();
5460 
5461   if (VT == MVT::f32) {
5462     RefinementSteps = 0;
5463     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
5464   }
5465 
5466   // TODO: There is also f64 rsq instruction, but the documentation is less
5467   // clear on its precision.
5468 
5469   return SDValue();
5470 }
5471 
5472 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
5473                                                SelectionDAG &DAG, int Enabled,
5474                                                int &RefinementSteps) const {
5475   EVT VT = Operand.getValueType();
5476 
5477   if (VT == MVT::f32) {
5478     // Reciprocal, < 1 ulp error.
5479     //
5480     // This reciprocal approximation converges to < 0.5 ulp error with one
5481     // newton rhapson performed with two fused multiple adds (FMAs).
5482 
5483     RefinementSteps = 0;
5484     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
5485   }
5486 
5487   // TODO: There is also f64 rcp instruction, but the documentation is less
5488   // clear on its precision.
5489 
5490   return SDValue();
5491 }
5492 
5493 static unsigned workitemIntrinsicDim(unsigned ID) {
5494   switch (ID) {
5495   case Intrinsic::amdgcn_workitem_id_x:
5496     return 0;
5497   case Intrinsic::amdgcn_workitem_id_y:
5498     return 1;
5499   case Intrinsic::amdgcn_workitem_id_z:
5500     return 2;
5501   default:
5502     llvm_unreachable("not a workitem intrinsic");
5503   }
5504 }
5505 
5506 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
5507     const SDValue Op, KnownBits &Known,
5508     const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
5509 
5510   Known.resetAll(); // Don't know anything.
5511 
5512   unsigned Opc = Op.getOpcode();
5513 
5514   switch (Opc) {
5515   default:
5516     break;
5517   case AMDGPUISD::CARRY:
5518   case AMDGPUISD::BORROW: {
5519     Known.Zero = APInt::getHighBitsSet(32, 31);
5520     break;
5521   }
5522 
5523   case AMDGPUISD::BFE_I32:
5524   case AMDGPUISD::BFE_U32: {
5525     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5526     if (!CWidth)
5527       return;
5528 
5529     uint32_t Width = CWidth->getZExtValue() & 0x1f;
5530 
5531     if (Opc == AMDGPUISD::BFE_U32)
5532       Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
5533 
5534     break;
5535   }
5536   case AMDGPUISD::FP_TO_FP16: {
5537     unsigned BitWidth = Known.getBitWidth();
5538 
5539     // High bits are zero.
5540     Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
5541     break;
5542   }
5543   case AMDGPUISD::MUL_U24:
5544   case AMDGPUISD::MUL_I24: {
5545     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5546     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5547     unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
5548                       RHSKnown.countMinTrailingZeros();
5549     Known.Zero.setLowBits(std::min(TrailZ, 32u));
5550     // Skip extra check if all bits are known zeros.
5551     if (TrailZ >= 32)
5552       break;
5553 
5554     // Truncate to 24 bits.
5555     LHSKnown = LHSKnown.trunc(24);
5556     RHSKnown = RHSKnown.trunc(24);
5557 
5558     if (Opc == AMDGPUISD::MUL_I24) {
5559       unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
5560       unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
5561       unsigned MaxValBits = LHSValBits + RHSValBits;
5562       if (MaxValBits > 32)
5563         break;
5564       unsigned SignBits = 32 - MaxValBits + 1;
5565       bool LHSNegative = LHSKnown.isNegative();
5566       bool LHSNonNegative = LHSKnown.isNonNegative();
5567       bool LHSPositive = LHSKnown.isStrictlyPositive();
5568       bool RHSNegative = RHSKnown.isNegative();
5569       bool RHSNonNegative = RHSKnown.isNonNegative();
5570       bool RHSPositive = RHSKnown.isStrictlyPositive();
5571 
5572       if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
5573         Known.Zero.setHighBits(SignBits);
5574       else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
5575         Known.One.setHighBits(SignBits);
5576     } else {
5577       unsigned LHSValBits = LHSKnown.countMaxActiveBits();
5578       unsigned RHSValBits = RHSKnown.countMaxActiveBits();
5579       unsigned MaxValBits = LHSValBits + RHSValBits;
5580       if (MaxValBits >= 32)
5581         break;
5582       Known.Zero.setBitsFrom(MaxValBits);
5583     }
5584     break;
5585   }
5586   case AMDGPUISD::PERM: {
5587     ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5588     if (!CMask)
5589       return;
5590 
5591     KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5592     KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5593     unsigned Sel = CMask->getZExtValue();
5594 
5595     for (unsigned I = 0; I < 32; I += 8) {
5596       unsigned SelBits = Sel & 0xff;
5597       if (SelBits < 4) {
5598         SelBits *= 8;
5599         Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5600         Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5601       } else if (SelBits < 7) {
5602         SelBits = (SelBits & 3) * 8;
5603         Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5604         Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5605       } else if (SelBits == 0x0c) {
5606         Known.Zero |= 0xFFull << I;
5607       } else if (SelBits > 0x0c) {
5608         Known.One |= 0xFFull << I;
5609       }
5610       Sel >>= 8;
5611     }
5612     break;
5613   }
5614   case AMDGPUISD::BUFFER_LOAD_UBYTE:  {
5615     Known.Zero.setHighBits(24);
5616     break;
5617   }
5618   case AMDGPUISD::BUFFER_LOAD_USHORT: {
5619     Known.Zero.setHighBits(16);
5620     break;
5621   }
5622   case AMDGPUISD::LDS: {
5623     auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
5624     Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
5625 
5626     Known.Zero.setHighBits(16);
5627     Known.Zero.setLowBits(Log2(Alignment));
5628     break;
5629   }
5630   case AMDGPUISD::SMIN3:
5631   case AMDGPUISD::SMAX3:
5632   case AMDGPUISD::SMED3:
5633   case AMDGPUISD::UMIN3:
5634   case AMDGPUISD::UMAX3:
5635   case AMDGPUISD::UMED3: {
5636     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(2), Depth + 1);
5637     if (Known2.isUnknown())
5638       break;
5639 
5640     KnownBits Known1 = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5641     if (Known1.isUnknown())
5642       break;
5643 
5644     KnownBits Known0 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5645     if (Known0.isUnknown())
5646       break;
5647 
5648     // TODO: Handle LeadZero/LeadOne from UMIN/UMAX handling.
5649     Known.Zero = Known0.Zero & Known1.Zero & Known2.Zero;
5650     Known.One = Known0.One & Known1.One & Known2.One;
5651     break;
5652   }
5653   case ISD::INTRINSIC_WO_CHAIN: {
5654     unsigned IID = Op.getConstantOperandVal(0);
5655     switch (IID) {
5656     case Intrinsic::amdgcn_workitem_id_x:
5657     case Intrinsic::amdgcn_workitem_id_y:
5658     case Intrinsic::amdgcn_workitem_id_z: {
5659       unsigned MaxValue = Subtarget->getMaxWorkitemID(
5660           DAG.getMachineFunction().getFunction(), workitemIntrinsicDim(IID));
5661       Known.Zero.setHighBits(llvm::countl_zero(MaxValue));
5662       break;
5663     }
5664     default:
5665       break;
5666     }
5667   }
5668   }
5669 }
5670 
5671 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
5672     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
5673     unsigned Depth) const {
5674   switch (Op.getOpcode()) {
5675   case AMDGPUISD::BFE_I32: {
5676     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5677     if (!Width)
5678       return 1;
5679 
5680     unsigned SignBits = 32 - Width->getZExtValue() + 1;
5681     if (!isNullConstant(Op.getOperand(1)))
5682       return SignBits;
5683 
5684     // TODO: Could probably figure something out with non-0 offsets.
5685     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5686     return std::max(SignBits, Op0SignBits);
5687   }
5688 
5689   case AMDGPUISD::BFE_U32: {
5690     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5691     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
5692   }
5693 
5694   case AMDGPUISD::CARRY:
5695   case AMDGPUISD::BORROW:
5696     return 31;
5697   case AMDGPUISD::BUFFER_LOAD_BYTE:
5698     return 25;
5699   case AMDGPUISD::BUFFER_LOAD_SHORT:
5700     return 17;
5701   case AMDGPUISD::BUFFER_LOAD_UBYTE:
5702     return 24;
5703   case AMDGPUISD::BUFFER_LOAD_USHORT:
5704     return 16;
5705   case AMDGPUISD::FP_TO_FP16:
5706     return 16;
5707   case AMDGPUISD::SMIN3:
5708   case AMDGPUISD::SMAX3:
5709   case AMDGPUISD::SMED3:
5710   case AMDGPUISD::UMIN3:
5711   case AMDGPUISD::UMAX3:
5712   case AMDGPUISD::UMED3: {
5713     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(2), Depth + 1);
5714     if (Tmp2 == 1)
5715       return 1; // Early out.
5716 
5717     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth + 1);
5718     if (Tmp1 == 1)
5719       return 1; // Early out.
5720 
5721     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5722     if (Tmp0 == 1)
5723       return 1; // Early out.
5724 
5725     return std::min(Tmp0, std::min(Tmp1, Tmp2));
5726   }
5727   default:
5728     return 1;
5729   }
5730 }
5731 
5732 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
5733   GISelKnownBits &Analysis, Register R,
5734   const APInt &DemandedElts, const MachineRegisterInfo &MRI,
5735   unsigned Depth) const {
5736   const MachineInstr *MI = MRI.getVRegDef(R);
5737   if (!MI)
5738     return 1;
5739 
5740   // TODO: Check range metadata on MMO.
5741   switch (MI->getOpcode()) {
5742   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
5743     return 25;
5744   case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
5745     return 17;
5746   case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
5747     return 24;
5748   case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
5749     return 16;
5750   case AMDGPU::G_AMDGPU_SMED3:
5751   case AMDGPU::G_AMDGPU_UMED3: {
5752     auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs();
5753     unsigned Tmp2 = Analysis.computeNumSignBits(Src2, DemandedElts, Depth + 1);
5754     if (Tmp2 == 1)
5755       return 1;
5756     unsigned Tmp1 = Analysis.computeNumSignBits(Src1, DemandedElts, Depth + 1);
5757     if (Tmp1 == 1)
5758       return 1;
5759     unsigned Tmp0 = Analysis.computeNumSignBits(Src0, DemandedElts, Depth + 1);
5760     if (Tmp0 == 1)
5761       return 1;
5762     return std::min(Tmp0, std::min(Tmp1, Tmp2));
5763   }
5764   default:
5765     return 1;
5766   }
5767 }
5768 
5769 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
5770                                                         const SelectionDAG &DAG,
5771                                                         bool SNaN,
5772                                                         unsigned Depth) const {
5773   unsigned Opcode = Op.getOpcode();
5774   switch (Opcode) {
5775   case AMDGPUISD::FMIN_LEGACY:
5776   case AMDGPUISD::FMAX_LEGACY: {
5777     if (SNaN)
5778       return true;
5779 
5780     // TODO: Can check no nans on one of the operands for each one, but which
5781     // one?
5782     return false;
5783   }
5784   case AMDGPUISD::FMUL_LEGACY:
5785   case AMDGPUISD::CVT_PKRTZ_F16_F32: {
5786     if (SNaN)
5787       return true;
5788     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5789            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5790   }
5791   case AMDGPUISD::FMED3:
5792   case AMDGPUISD::FMIN3:
5793   case AMDGPUISD::FMAX3:
5794   case AMDGPUISD::FMINIMUM3:
5795   case AMDGPUISD::FMAXIMUM3:
5796   case AMDGPUISD::FMAD_FTZ: {
5797     if (SNaN)
5798       return true;
5799     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5800            DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5801            DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5802   }
5803   case AMDGPUISD::CVT_F32_UBYTE0:
5804   case AMDGPUISD::CVT_F32_UBYTE1:
5805   case AMDGPUISD::CVT_F32_UBYTE2:
5806   case AMDGPUISD::CVT_F32_UBYTE3:
5807     return true;
5808 
5809   case AMDGPUISD::RCP:
5810   case AMDGPUISD::RSQ:
5811   case AMDGPUISD::RCP_LEGACY:
5812   case AMDGPUISD::RSQ_CLAMP: {
5813     if (SNaN)
5814       return true;
5815 
5816     // TODO: Need is known positive check.
5817     return false;
5818   }
5819   case ISD::FLDEXP:
5820   case AMDGPUISD::FRACT: {
5821     if (SNaN)
5822       return true;
5823     return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5824   }
5825   case AMDGPUISD::DIV_SCALE:
5826   case AMDGPUISD::DIV_FMAS:
5827   case AMDGPUISD::DIV_FIXUP:
5828     // TODO: Refine on operands.
5829     return SNaN;
5830   case AMDGPUISD::SIN_HW:
5831   case AMDGPUISD::COS_HW: {
5832     // TODO: Need check for infinity
5833     return SNaN;
5834   }
5835   case ISD::INTRINSIC_WO_CHAIN: {
5836     unsigned IntrinsicID = Op.getConstantOperandVal(0);
5837     // TODO: Handle more intrinsics
5838     switch (IntrinsicID) {
5839     case Intrinsic::amdgcn_cubeid:
5840       return true;
5841 
5842     case Intrinsic::amdgcn_frexp_mant: {
5843       if (SNaN)
5844         return true;
5845       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5846     }
5847     case Intrinsic::amdgcn_cvt_pkrtz: {
5848       if (SNaN)
5849         return true;
5850       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5851              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5852     }
5853     case Intrinsic::amdgcn_rcp:
5854     case Intrinsic::amdgcn_rsq:
5855     case Intrinsic::amdgcn_rcp_legacy:
5856     case Intrinsic::amdgcn_rsq_legacy:
5857     case Intrinsic::amdgcn_rsq_clamp: {
5858       if (SNaN)
5859         return true;
5860 
5861       // TODO: Need is known positive check.
5862       return false;
5863     }
5864     case Intrinsic::amdgcn_trig_preop:
5865     case Intrinsic::amdgcn_fdot2:
5866       // TODO: Refine on operand
5867       return SNaN;
5868     case Intrinsic::amdgcn_fma_legacy:
5869       if (SNaN)
5870         return true;
5871       return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5872              DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
5873              DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
5874     default:
5875       return false;
5876     }
5877   }
5878   default:
5879     return false;
5880   }
5881 }
5882 
5883 bool AMDGPUTargetLowering::isReassocProfitable(MachineRegisterInfo &MRI,
5884                                                Register N0, Register N1) const {
5885   return MRI.hasOneNonDBGUse(N0); // FIXME: handle regbanks
5886 }
5887 
5888 TargetLowering::AtomicExpansionKind
5889 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
5890   switch (RMW->getOperation()) {
5891   case AtomicRMWInst::Nand:
5892   case AtomicRMWInst::FAdd:
5893   case AtomicRMWInst::FSub:
5894   case AtomicRMWInst::FMax:
5895   case AtomicRMWInst::FMin:
5896     return AtomicExpansionKind::CmpXChg;
5897   default: {
5898     if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
5899       unsigned Size = IntTy->getBitWidth();
5900       if (Size == 32 || Size == 64)
5901         return AtomicExpansionKind::None;
5902     }
5903 
5904     return AtomicExpansionKind::CmpXChg;
5905   }
5906   }
5907 }
5908 
5909 /// Whether it is profitable to sink the operands of an
5910 /// Instruction I to the basic block of I.
5911 /// This helps using several modifiers (like abs and neg) more often.
5912 bool AMDGPUTargetLowering::shouldSinkOperands(
5913     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
5914   using namespace PatternMatch;
5915 
5916   for (auto &Op : I->operands()) {
5917     // Ensure we are not already sinking this operand.
5918     if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
5919       continue;
5920 
5921     if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
5922       Ops.push_back(&Op);
5923   }
5924 
5925   return !Ops.empty();
5926 }
5927