xref: /llvm-project/llvm/test/Transforms/InstSimplify/call.ll (revision 17ef436e3df231fa45aa6010bf8ed41189380679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
3
4declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
5declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b)
6declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b)
7declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b)
8declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b)
9declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b)
10
11define i1 @test_uadd1() {
12; CHECK-LABEL: @test_uadd1(
13; CHECK-NEXT:    ret i1 true
14;
15  %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3)
16  %overflow = extractvalue {i8, i1} %x, 1
17  ret i1 %overflow
18}
19
20define i8 @test_uadd2() {
21; CHECK-LABEL: @test_uadd2(
22; CHECK-NEXT:    ret i8 42
23;
24  %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44)
25  %result = extractvalue {i8, i1} %x, 0
26  ret i8 %result
27}
28
29define {i8, i1} @test_uadd3(i8 %v) {
30; CHECK-LABEL: @test_uadd3(
31; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
32;
33  %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef)
34  ret {i8, i1} %result
35}
36
37define {i8, i1} @test_uadd3_poison(i8 %v) {
38; CHECK-LABEL: @test_uadd3_poison(
39; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
40;
41  %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 poison)
42  ret {i8, i1} %result
43}
44
45define {i8, i1} @test_uadd4(i8 %v) {
46; CHECK-LABEL: @test_uadd4(
47; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
48;
49  %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v)
50  ret {i8, i1} %result
51}
52
53define {i8, i1} @test_uadd4_poison(i8 %v) {
54; CHECK-LABEL: @test_uadd4_poison(
55; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
56;
57  %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 poison, i8 %v)
58  ret {i8, i1} %result
59}
60
61define i1 @test_sadd1() {
62; CHECK-LABEL: @test_sadd1(
63; CHECK-NEXT:    ret i1 true
64;
65  %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3)
66  %overflow = extractvalue {i8, i1} %x, 1
67  ret i1 %overflow
68}
69
70define i8 @test_sadd2() {
71; CHECK-LABEL: @test_sadd2(
72; CHECK-NEXT:    ret i8 -86
73;
74  %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44)
75  %result = extractvalue {i8, i1} %x, 0
76  ret i8 %result
77}
78
79define {i8, i1} @test_sadd3(i8 %v) {
80; CHECK-LABEL: @test_sadd3(
81; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
82;
83  %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef)
84  ret {i8, i1} %result
85}
86
87define {i8, i1} @test_sadd3_poison(i8 %v) {
88; CHECK-LABEL: @test_sadd3_poison(
89; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
90;
91  %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 poison)
92  ret {i8, i1} %result
93}
94
95define {i8, i1} @test_sadd4(i8 %v) {
96; CHECK-LABEL: @test_sadd4(
97; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
98;
99  %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v)
100  ret {i8, i1} %result
101}
102
103define {i8, i1} @test_sadd4_poison(i8 %v) {
104; CHECK-LABEL: @test_sadd4_poison(
105; CHECK-NEXT:    ret { i8, i1 } { i8 -1, i1 false }
106;
107  %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 poison, i8 %v)
108  ret {i8, i1} %result
109}
110
111define {i8, i1} @test_usub1(i8 %V) {
112; CHECK-LABEL: @test_usub1(
113; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
114;
115  %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V)
116  ret {i8, i1} %x
117}
118
119define {i8, i1} @test_usub2(i8 %V) {
120; CHECK-LABEL: @test_usub2(
121; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
122;
123  %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef)
124  ret {i8, i1} %x
125}
126
127define {i8, i1} @test_usub2_poison(i8 %V) {
128; CHECK-LABEL: @test_usub2_poison(
129; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
130;
131  %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 poison)
132  ret {i8, i1} %x
133}
134
135define {i8, i1} @test_usub3(i8 %V) {
136; CHECK-LABEL: @test_usub3(
137; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
138;
139  %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V)
140  ret {i8, i1} %x
141}
142
143define {i8, i1} @test_usub3_poison(i8 %V) {
144; CHECK-LABEL: @test_usub3_poison(
145; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
146;
147  %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 poison, i8 %V)
148  ret {i8, i1} %x
149}
150
151define {i8, i1} @test_ssub1(i8 %V) {
152; CHECK-LABEL: @test_ssub1(
153; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
154;
155  %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V)
156  ret {i8, i1} %x
157}
158
159define {i8, i1} @test_ssub2(i8 %V) {
160; CHECK-LABEL: @test_ssub2(
161; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
162;
163  %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef)
164  ret {i8, i1} %x
165}
166
167define {i8, i1} @test_ssub2_poison(i8 %V) {
168; CHECK-LABEL: @test_ssub2_poison(
169; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
170;
171  %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 poison)
172  ret {i8, i1} %x
173}
174
175define {i8, i1} @test_ssub3(i8 %V) {
176; CHECK-LABEL: @test_ssub3(
177; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
178;
179  %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V)
180  ret {i8, i1} %x
181}
182
183define {i8, i1} @test_ssub3_poison(i8 %V) {
184; CHECK-LABEL: @test_ssub3_poison(
185; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
186;
187  %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 poison, i8 %V)
188  ret {i8, i1} %x
189}
190
191define {i8, i1} @test_umul1(i8 %V) {
192; CHECK-LABEL: @test_umul1(
193; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
194;
195  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0)
196  ret {i8, i1} %x
197}
198
199define {i8, i1} @test_umul2(i8 %V) {
200; CHECK-LABEL: @test_umul2(
201; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
202;
203  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef)
204  ret {i8, i1} %x
205}
206
207define {i8, i1} @test_umul2_poison(i8 %V) {
208; CHECK-LABEL: @test_umul2_poison(
209; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
210;
211  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 poison)
212  ret {i8, i1} %x
213}
214
215define {i8, i1} @test_umul3(i8 %V) {
216; CHECK-LABEL: @test_umul3(
217; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
218;
219  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V)
220  ret {i8, i1} %x
221}
222
223define {i8, i1} @test_umul4(i8 %V) {
224; CHECK-LABEL: @test_umul4(
225; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
226;
227  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V)
228  ret {i8, i1} %x
229}
230
231define {i8, i1} @test_umul4_poison(i8 %V) {
232; CHECK-LABEL: @test_umul4_poison(
233; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
234;
235  %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 poison, i8 %V)
236  ret {i8, i1} %x
237}
238
239define {i8, i1} @test_smul1(i8 %V) {
240; CHECK-LABEL: @test_smul1(
241; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
242;
243  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0)
244  ret {i8, i1} %x
245}
246
247define {i8, i1} @test_smul2(i8 %V) {
248; CHECK-LABEL: @test_smul2(
249; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
250;
251  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef)
252  ret {i8, i1} %x
253}
254
255define {i8, i1} @test_smul2_poison(i8 %V) {
256; CHECK-LABEL: @test_smul2_poison(
257; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
258;
259  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 poison)
260  ret {i8, i1} %x
261}
262
263define {i8, i1} @test_smul3(i8 %V) {
264; CHECK-LABEL: @test_smul3(
265; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
266;
267  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V)
268  ret {i8, i1} %x
269}
270
271define {i8, i1} @test_smul4(i8 %V) {
272; CHECK-LABEL: @test_smul4(
273; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
274;
275  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V)
276  ret {i8, i1} %x
277}
278
279define {i8, i1} @test_smul4_poison(i8 %V) {
280; CHECK-LABEL: @test_smul4_poison(
281; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
282;
283  %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 poison, i8 %V)
284  ret {i8, i1} %x
285}
286
287; Test a non-intrinsic that we know about as a library call.
288declare float @fabsf(float %x)
289
290define float @test_fabs_libcall() {
291; CHECK-LABEL: @test_fabs_libcall(
292; CHECK-NEXT:    [[X:%.*]] = call float @fabsf(float -4.200000e+01)
293; CHECK-NEXT:    ret float 4.200000e+01
294;
295
296  %x = call float @fabsf(float -42.0)
297; This is still a real function call, so instsimplify won't nuke it -- other
298; passes have to do that.
299
300  ret float %x
301}
302
303
304declare float @llvm.fabs.f32(float) nounwind readnone
305declare float @llvm.floor.f32(float) nounwind readnone
306declare float @llvm.ceil.f32(float) nounwind readnone
307declare float @llvm.trunc.f32(float) nounwind readnone
308declare float @llvm.rint.f32(float) nounwind readnone
309declare float @llvm.nearbyint.f32(float) nounwind readnone
310declare float @llvm.canonicalize.f32(float) nounwind readnone
311declare float @llvm.arithmetic.fence.f32(float) nounwind readnone
312
313; Test idempotent intrinsics
314define float @test_idempotence(float %a) {
315; CHECK-LABEL: @test_idempotence(
316; CHECK-NEXT:    [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]])
317; CHECK-NEXT:    [[B0:%.*]] = call float @llvm.floor.f32(float [[A]])
318; CHECK-NEXT:    [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]])
319; CHECK-NEXT:    [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]])
320; CHECK-NEXT:    [[E0:%.*]] = call float @llvm.rint.f32(float [[A]])
321; CHECK-NEXT:    [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]])
322; CHECK-NEXT:    [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]])
323; CHECK-NEXT:    [[H0:%.*]] = call float @llvm.arithmetic.fence.f32(float [[A]])
324; CHECK-NEXT:    [[R0:%.*]] = fadd float [[A0]], [[B0]]
325; CHECK-NEXT:    [[R1:%.*]] = fadd float [[R0]], [[C0]]
326; CHECK-NEXT:    [[R2:%.*]] = fadd float [[R1]], [[D0]]
327; CHECK-NEXT:    [[R3:%.*]] = fadd float [[R2]], [[E0]]
328; CHECK-NEXT:    [[R4:%.*]] = fadd float [[R3]], [[F0]]
329; CHECK-NEXT:    [[R5:%.*]] = fadd float [[R4]], [[G0]]
330; CHECK-NEXT:    [[R6:%.*]] = fadd float [[R5]], [[H0]]
331; CHECK-NEXT:    ret float [[R6]]
332;
333
334  %a0 = call float @llvm.fabs.f32(float %a)
335  %a1 = call float @llvm.fabs.f32(float %a0)
336
337  %b0 = call float @llvm.floor.f32(float %a)
338  %b1 = call float @llvm.floor.f32(float %b0)
339
340  %c0 = call float @llvm.ceil.f32(float %a)
341  %c1 = call float @llvm.ceil.f32(float %c0)
342
343  %d0 = call float @llvm.trunc.f32(float %a)
344  %d1 = call float @llvm.trunc.f32(float %d0)
345
346  %e0 = call float @llvm.rint.f32(float %a)
347  %e1 = call float @llvm.rint.f32(float %e0)
348
349  %f0 = call float @llvm.nearbyint.f32(float %a)
350  %f1 = call float @llvm.nearbyint.f32(float %f0)
351
352  %g0 = call float @llvm.canonicalize.f32(float %a)
353  %g1 = call float @llvm.canonicalize.f32(float %g0)
354
355  %h0 = call float @llvm.arithmetic.fence.f32(float %a)
356  %h1 = call float @llvm.arithmetic.fence.f32(float %h0)
357
358  %r0 = fadd float %a1, %b1
359  %r1 = fadd float %r0, %c1
360  %r2 = fadd float %r1, %d1
361  %r3 = fadd float %r2, %e1
362  %r4 = fadd float %r3, %f1
363  %r5 = fadd float %r4, %g1
364  %r6 = fadd float %r5, %h1
365
366  ret float %r6
367}
368
369define ptr @operator_new() {
370; CHECK-LABEL: @operator_new(
371; CHECK-NEXT:  entry:
372; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @_Znwm(i64 8)
373; CHECK-NEXT:    br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
374; CHECK:       cast.notnull:
375; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
376; CHECK-NEXT:    br label [[CAST_END]]
377; CHECK:       cast.end:
378; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
379; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
380;
381entry:
382  %call = tail call noalias ptr @_Znwm(i64 8)
383  %cmp = icmp eq ptr %call, null
384  br i1 %cmp, label %cast.end, label %cast.notnull
385
386cast.notnull:                                     ; preds = %entry
387  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
388  br label %cast.end
389
390cast.end:                                         ; preds = %cast.notnull, %entry
391  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
392  ret ptr %cast.result
393
394}
395
396declare nonnull noalias ptr @_Znwm(i64)
397
398%"struct.std::nothrow_t" = type { i8 }
399@_ZSt7nothrow = external global %"struct.std::nothrow_t"
400
401define ptr @operator_new_nothrow_t() {
402; CHECK-LABEL: @operator_new_nothrow_t(
403; CHECK-NEXT:  entry:
404; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
405; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALL]], null
406; CHECK-NEXT:    br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
407; CHECK:       cast.notnull:
408; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
409; CHECK-NEXT:    br label [[CAST_END]]
410; CHECK:       cast.end:
411; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
412; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
413;
414entry:
415  %call = tail call noalias ptr @_ZnamRKSt9nothrow_t(i64 8, ptr @_ZSt7nothrow)
416  %cmp = icmp eq ptr %call, null
417  br i1 %cmp, label %cast.end, label %cast.notnull
418
419cast.notnull:                                     ; preds = %entry
420  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
421  br label %cast.end
422
423cast.end:                                         ; preds = %cast.notnull, %entry
424  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
425  ret ptr %cast.result
426
427}
428
429declare ptr @_ZnamRKSt9nothrow_t(i64, ptr) nounwind
430
431define ptr @malloc_can_return_null() {
432; CHECK-LABEL: @malloc_can_return_null(
433; CHECK-NEXT:  entry:
434; CHECK-NEXT:    [[CALL:%.*]] = tail call noalias ptr @malloc(i64 8)
435; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[CALL]], null
436; CHECK-NEXT:    br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]]
437; CHECK:       cast.notnull:
438; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[CALL]], i64 4
439; CHECK-NEXT:    br label [[CAST_END]]
440; CHECK:       cast.end:
441; CHECK-NEXT:    [[CAST_RESULT:%.*]] = phi ptr [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ]
442; CHECK-NEXT:    ret ptr [[CAST_RESULT]]
443;
444entry:
445  %call = tail call noalias ptr @malloc(i64 8)
446  %cmp = icmp eq ptr %call, null
447  br i1 %cmp, label %cast.end, label %cast.notnull
448
449cast.notnull:                                     ; preds = %entry
450  %add.ptr = getelementptr inbounds i8, ptr %call, i64 4
451  br label %cast.end
452
453cast.end:                                         ; preds = %cast.notnull, %entry
454  %cast.result = phi ptr [ %add.ptr, %cast.notnull ], [ null, %entry ]
455  ret ptr %cast.result
456
457}
458
459define i32 @call_null() {
460; CHECK-LABEL: @call_null(
461; CHECK-NEXT:  entry:
462; CHECK-NEXT:    [[CALL:%.*]] = call i32 null()
463; CHECK-NEXT:    ret i32 poison
464;
465entry:
466  %call = call i32 null()
467  ret i32 %call
468}
469
470define i32 @call_undef() {
471; CHECK-LABEL: @call_undef(
472; CHECK-NEXT:  entry:
473; CHECK-NEXT:    [[CALL:%.*]] = call i32 undef()
474; CHECK-NEXT:    ret i32 poison
475;
476entry:
477  %call = call i32 undef()
478  ret i32 %call
479}
480
481@GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49]
482
483define <8 x i32> @partial_masked_load() {
484; CHECK-LABEL: @partial_masked_load(
485; CHECK-NEXT:    ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
486;
487  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr getelementptr ([8 x i32], ptr @GV, i64 0, i64 -2), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
488  ret <8 x i32> %masked.load
489}
490
491define <8 x i32> @masked_load_undef_mask(ptr %V) {
492; CHECK-LABEL: @masked_load_undef_mask(
493; CHECK-NEXT:    ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>
494;
495  %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>)
496  ret <8 x i32> %masked.load
497}
498
499declare noalias ptr @malloc(i64)
500
501declare <8 x i32> @llvm.masked.load.v8i32.p0(ptr, i32, <8 x i1>, <8 x i32>)
502
503declare double @llvm.powi.f64.i16(double, i16)
504declare <2 x double> @llvm.powi.v2f64.i16(<2 x double>, i16)
505declare double @llvm.powi.f64.i32(double, i32)
506declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32)
507
508define double @constant_fold_powi() {
509; CHECK-LABEL: @constant_fold_powi(
510; CHECK-NEXT:    ret double 9.000000e+00
511;
512  %t0 = call double @llvm.powi.f64.i32(double 3.00000e+00, i32 2)
513  ret double %t0
514}
515
516define double @constant_fold_powi_i16() {
517; CHECK-LABEL: @constant_fold_powi_i16(
518; CHECK-NEXT:    ret double 9.000000e+00
519;
520  %t0 = call double @llvm.powi.f64.i16(double 3.00000e+00, i16 2)
521  ret double %t0
522}
523
524define <2 x double> @constant_fold_powi_vec() {
525; CHECK-LABEL: @constant_fold_powi_vec(
526; CHECK-NEXT:    ret <2 x double> <double 9.000000e+00, double 2.500000e+01>
527;
528  %t0 = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2)
529  ret <2 x double> %t0
530}
531
532define <2 x double> @constant_fold_powi_vec_i16() {
533; CHECK-LABEL: @constant_fold_powi_vec_i16(
534; CHECK-NEXT:    ret <2 x double> <double 9.000000e+00, double 2.500000e+01>
535;
536  %t0 = call <2 x double> @llvm.powi.v2f64.i16(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i16 2)
537  ret <2 x double> %t0
538}
539
540declare i8 @llvm.fshl.i8(i8, i8, i8)
541declare i9 @llvm.fshr.i9(i9, i9, i9)
542declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>)
543declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>)
544
545define i8 @fshl_no_shift(i8 %x, i8 %y) {
546; CHECK-LABEL: @fshl_no_shift(
547; CHECK-NEXT:    ret i8 [[X:%.*]]
548;
549  %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0)
550  ret i8 %z
551}
552
553define i9 @fshr_no_shift(i9 %x, i9 %y) {
554; CHECK-LABEL: @fshr_no_shift(
555; CHECK-NEXT:    ret i9 [[Y:%.*]]
556;
557  %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0)
558  ret i9 %z
559}
560
561define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) {
562; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth(
563; CHECK-NEXT:    ret i8 [[X:%.*]]
564;
565  %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40)
566  ret i8 %z
567}
568
569define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) {
570; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth(
571; CHECK-NEXT:    ret i9 [[Y:%.*]]
572;
573  %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189)
574  ret i9 %z
575}
576
577define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) {
578; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat(
579; CHECK-NEXT:    ret <2 x i7> [[X:%.*]]
580;
581  %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>)
582  ret <2 x i7> %z
583}
584
585define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) {
586; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat(
587; CHECK-NEXT:    ret <2 x i8> [[Y:%.*]]
588;
589  %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>)
590  ret <2 x i8> %z
591}
592
593; If y is poison, eliminating the guard is not safe.
594
595define i8 @fshl_zero_shift_guard(i8 %x, i8 %y, i8 %sh) {
596; CHECK-LABEL: @fshl_zero_shift_guard(
597; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0
598; CHECK-NEXT:    [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
599; CHECK-NEXT:    [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[F]]
600; CHECK-NEXT:    ret i8 [[S]]
601;
602  %c = icmp eq i8 %sh, 0
603  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
604  %s = select i1 %c, i8 %x, i8 %f
605  ret i8 %s
606}
607
608; If y is poison, eliminating the guard is not safe.
609
610define i8 @fshl_zero_shift_guard_swapped(i8 %x, i8 %y, i8 %sh) {
611; CHECK-LABEL: @fshl_zero_shift_guard_swapped(
612; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[SH:%.*]], 0
613; CHECK-NEXT:    [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
614; CHECK-NEXT:    [[S:%.*]] = select i1 [[C]], i8 [[F]], i8 [[X]]
615; CHECK-NEXT:    ret i8 [[S]]
616;
617  %c = icmp ne i8 %sh, 0
618  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
619  %s = select i1 %c, i8 %f, i8 %x
620  ret i8 %s
621}
622
623; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
624
625define i8 @fshl_zero_shift_guard_inverted(i8 %x, i8 %y, i8 %sh) {
626; CHECK-LABEL: @fshl_zero_shift_guard_inverted(
627; CHECK-NEXT:    ret i8 [[X:%.*]]
628;
629  %c = icmp eq i8 %sh, 0
630  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
631  %s = select i1 %c, i8 %f, i8 %x
632  ret i8 %s
633}
634
635; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
636
637define i8 @fshl_zero_shift_guard_inverted_swapped(i8 %x, i8 %y, i8 %sh) {
638; CHECK-LABEL: @fshl_zero_shift_guard_inverted_swapped(
639; CHECK-NEXT:    ret i8 [[X:%.*]]
640;
641  %c = icmp ne i8 %sh, 0
642  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
643  %s = select i1 %c, i8 %x, i8 %f
644  ret i8 %s
645}
646
647; If x is poison, eliminating the guard is not safe.
648
649define i9 @fshr_zero_shift_guard(i9 %x, i9 %y, i9 %sh) {
650; CHECK-LABEL: @fshr_zero_shift_guard(
651; CHECK-NEXT:    [[C:%.*]] = icmp eq i9 [[SH:%.*]], 0
652; CHECK-NEXT:    [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]])
653; CHECK-NEXT:    [[S:%.*]] = select i1 [[C]], i9 [[Y]], i9 [[F]]
654; CHECK-NEXT:    ret i9 [[S]]
655;
656  %c = icmp eq i9 %sh, 0
657  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
658  %s = select i1 %c, i9 %y, i9 %f
659  ret i9 %s
660}
661
662; If x is poison, eliminating the guard is not safe.
663
664define i9 @fshr_zero_shift_guard_swapped(i9 %x, i9 %y, i9 %sh) {
665; CHECK-LABEL: @fshr_zero_shift_guard_swapped(
666; CHECK-NEXT:    [[C:%.*]] = icmp ne i9 [[SH:%.*]], 0
667; CHECK-NEXT:    [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]])
668; CHECK-NEXT:    [[S:%.*]] = select i1 [[C]], i9 [[F]], i9 [[Y]]
669; CHECK-NEXT:    ret i9 [[S]]
670;
671  %c = icmp ne i9 %sh, 0
672  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
673  %s = select i1 %c, i9 %f, i9 %y
674  ret i9 %s
675}
676
677; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted.
678
679define i9 @fshr_zero_shift_guard_inverted(i9 %x, i9 %y, i9 %sh) {
680; CHECK-LABEL: @fshr_zero_shift_guard_inverted(
681; CHECK-NEXT:    ret i9 [[Y:%.*]]
682;
683  %c = icmp eq i9 %sh, 0
684  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
685  %s = select i1 %c, i9 %f, i9 %y
686  ret i9 %s
687}
688
689; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted.
690
691define i9 @fshr_zero_shift_guard_inverted_swapped(i9 %x, i9 %y, i9 %sh) {
692; CHECK-LABEL: @fshr_zero_shift_guard_inverted_swapped(
693; CHECK-NEXT:    ret i9 [[Y:%.*]]
694;
695  %c = icmp ne i9 %sh, 0
696  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh)
697  %s = select i1 %c, i9 %y, i9 %f
698  ret i9 %s
699}
700
701; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed.
702
703define i8 @rotl_zero_shift_guard(i8 %x, i8 %sh) {
704; CHECK-LABEL: @rotl_zero_shift_guard(
705; CHECK-NEXT:    [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]])
706; CHECK-NEXT:    ret i8 [[F]]
707;
708  %c = icmp eq i8 %sh, 0
709  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
710  %s = select i1 %c, i8 %x, i8 %f
711  ret i8 %s
712}
713
714; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed.
715
716define i8 @rotl_zero_shift_guard_swapped(i8 %x, i8 %sh) {
717; CHECK-LABEL: @rotl_zero_shift_guard_swapped(
718; CHECK-NEXT:    [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]])
719; CHECK-NEXT:    ret i8 [[F]]
720;
721  %c = icmp ne i8 %sh, 0
722  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
723  %s = select i1 %c, i8 %f, i8 %x
724  ret i8 %s
725}
726
727; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
728
729define i8 @rotl_zero_shift_guard_inverted(i8 %x, i8 %sh) {
730; CHECK-LABEL: @rotl_zero_shift_guard_inverted(
731; CHECK-NEXT:    ret i8 [[X:%.*]]
732;
733  %c = icmp eq i8 %sh, 0
734  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
735  %s = select i1 %c, i8 %f, i8 %x
736  ret i8 %s
737}
738
739; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted.
740
741define i8 @rotl_zero_shift_guard_inverted_swapped(i8 %x, i8 %sh) {
742; CHECK-LABEL: @rotl_zero_shift_guard_inverted_swapped(
743; CHECK-NEXT:    ret i8 [[X:%.*]]
744;
745  %c = icmp ne i8 %sh, 0
746  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh)
747  %s = select i1 %c, i8 %x, i8 %f
748  ret i8 %s
749}
750
751; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed.
752
753define i9 @rotr_zero_shift_guard(i9 %x, i9 %sh) {
754; CHECK-LABEL: @rotr_zero_shift_guard(
755; CHECK-NEXT:    [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]])
756; CHECK-NEXT:    ret i9 [[F]]
757;
758  %c = icmp eq i9 %sh, 0
759  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
760  %s = select i1 %c, i9 %x, i9 %f
761  ret i9 %s
762}
763
764; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed.
765
766define i9 @rotr_zero_shift_guard_swapped(i9 %x, i9 %sh) {
767; CHECK-LABEL: @rotr_zero_shift_guard_swapped(
768; CHECK-NEXT:    [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]])
769; CHECK-NEXT:    ret i9 [[F]]
770;
771  %c = icmp ne i9 %sh, 0
772  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
773  %s = select i1 %c, i9 %f, i9 %x
774  ret i9 %s
775}
776
777; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted.
778
779define i9 @rotr_zero_shift_guard_inverted(i9 %x, i9 %sh) {
780; CHECK-LABEL: @rotr_zero_shift_guard_inverted(
781; CHECK-NEXT:    ret i9 [[X:%.*]]
782;
783  %c = icmp eq i9 %sh, 0
784  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
785  %s = select i1 %c, i9 %f, i9 %x
786  ret i9 %s
787}
788
789; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted.
790
791define i9 @rotr_zero_shift_guard_inverted_swapped(i9 %x, i9 %sh) {
792; CHECK-LABEL: @rotr_zero_shift_guard_inverted_swapped(
793; CHECK-NEXT:    ret i9 [[X:%.*]]
794;
795  %c = icmp ne i9 %sh, 0
796  %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh)
797  %s = select i1 %c, i9 %x, i9 %f
798  ret i9 %s
799}
800
801; Negative test - make sure we're matching the correct parameter of fshl.
802
803define i8 @fshl_zero_shift_guard_wrong_select_op(i8 %x, i8 %y, i8 %sh) {
804; CHECK-LABEL: @fshl_zero_shift_guard_wrong_select_op(
805; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0
806; CHECK-NEXT:    [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]])
807; CHECK-NEXT:    [[S:%.*]] = select i1 [[C]], i8 [[Y]], i8 [[F]]
808; CHECK-NEXT:    ret i8 [[S]]
809;
810  %c = icmp eq i8 %sh, 0
811  %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh)
812  %s = select i1 %c, i8 %y, i8 %f
813  ret i8 %s
814}
815
816; Vector types work too.
817
818define <2 x i8> @rotr_zero_shift_guard_splat(<2 x i8> %x, <2 x i8> %sh) {
819; CHECK-LABEL: @rotr_zero_shift_guard_splat(
820; CHECK-NEXT:    [[F:%.*]] = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[X]], <2 x i8> [[SH:%.*]])
821; CHECK-NEXT:    ret <2 x i8> [[F]]
822;
823  %c = icmp eq <2 x i8> %sh, zeroinitializer
824  %f = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %x, <2 x i8> %sh)
825  %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %f
826  ret <2 x i8> %s
827}
828
829; If first two operands of funnel shift are undef, the result is undef
830
831define i8 @fshl_ops_undef(i8 %shamt) {
832; CHECK-LABEL: @fshl_ops_undef(
833; CHECK-NEXT:    ret i8 undef
834;
835  %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 %shamt)
836  ret i8 %r
837}
838
839define i9 @fshr_ops_undef(i9 %shamt) {
840; CHECK-LABEL: @fshr_ops_undef(
841; CHECK-NEXT:    ret i9 undef
842;
843  %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 %shamt)
844  ret i9 %r
845}
846
847; If shift amount is undef, treat it as zero, returning operand 0 or 1
848
849define i8 @fshl_shift_undef(i8 %x, i8 %y) {
850; CHECK-LABEL: @fshl_shift_undef(
851; CHECK-NEXT:    ret i8 [[X:%.*]]
852;
853  %r = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 undef)
854  ret i8 %r
855}
856
857define i9 @fshr_shift_undef(i9 %x, i9 %y) {
858; CHECK-LABEL: @fshr_shift_undef(
859; CHECK-NEXT:    ret i9 [[Y:%.*]]
860;
861  %r = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 undef)
862  ret i9 %r
863}
864
865; If one of operands is poison, the result is poison
866; TODO: these should be poison
867define i8 @fshl_ops_poison(i8 %b, i8 %shamt) {
868; CHECK-LABEL: @fshl_ops_poison(
869; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.fshl.i8(i8 poison, i8 [[B:%.*]], i8 [[SHAMT:%.*]])
870; CHECK-NEXT:    ret i8 [[R]]
871;
872  %r = call i8 @llvm.fshl.i8(i8 poison, i8 %b, i8 %shamt)
873  ret i8 %r
874}
875
876define i8 @fshl_ops_poison2(i8 %shamt) {
877; CHECK-LABEL: @fshl_ops_poison2(
878; CHECK-NEXT:    ret i8 undef
879;
880  %r = call i8 @llvm.fshl.i8(i8 poison, i8 undef, i8 %shamt)
881  ret i8 %r
882}
883
884define i8 @fshl_ops_poison3(i8 %a, i8 %shamt) {
885; CHECK-LABEL: @fshl_ops_poison3(
886; CHECK-NEXT:    [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[A:%.*]], i8 poison, i8 [[SHAMT:%.*]])
887; CHECK-NEXT:    ret i8 [[R]]
888;
889  %r = call i8 @llvm.fshl.i8(i8 %a, i8 poison, i8 %shamt)
890  ret i8 %r
891}
892
893define i8 @fshl_ops_poison4(i8 %shamt) {
894; CHECK-LABEL: @fshl_ops_poison4(
895; CHECK-NEXT:    ret i8 undef
896;
897  %r = call i8 @llvm.fshl.i8(i8 undef, i8 poison, i8 %shamt)
898  ret i8 %r
899}
900
901define i8 @fshl_ops_poison5(i8 %a, i8 %b) {
902; CHECK-LABEL: @fshl_ops_poison5(
903; CHECK-NEXT:    ret i8 [[A:%.*]]
904;
905  %r = call i8 @llvm.fshl.i8(i8 %a, i8 %b, i8 poison)
906  ret i8 %r
907}
908
909define i8 @fshl_ops_poison6() {
910; CHECK-LABEL: @fshl_ops_poison6(
911; CHECK-NEXT:    ret i8 undef
912;
913  %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 poison)
914  ret i8 %r
915}
916
917define i9 @fshr_ops_poison(i9 %b, i9 %shamt) {
918; CHECK-LABEL: @fshr_ops_poison(
919; CHECK-NEXT:    [[R:%.*]] = call i9 @llvm.fshr.i9(i9 poison, i9 [[B:%.*]], i9 [[SHAMT:%.*]])
920; CHECK-NEXT:    ret i9 [[R]]
921;
922  %r = call i9 @llvm.fshr.i9(i9 poison, i9 %b, i9 %shamt)
923  ret i9 %r
924}
925
926define i9 @fshr_ops_poison2(i9 %shamt) {
927; CHECK-LABEL: @fshr_ops_poison2(
928; CHECK-NEXT:    ret i9 undef
929;
930  %r = call i9 @llvm.fshr.i9(i9 poison, i9 undef, i9 %shamt)
931  ret i9 %r
932}
933
934define i9 @fshr_ops_poison3(i9 %a, i9 %shamt) {
935; CHECK-LABEL: @fshr_ops_poison3(
936; CHECK-NEXT:    [[R:%.*]] = call i9 @llvm.fshr.i9(i9 [[A:%.*]], i9 poison, i9 [[SHAMT:%.*]])
937; CHECK-NEXT:    ret i9 [[R]]
938;
939  %r = call i9 @llvm.fshr.i9(i9 %a, i9 poison, i9 %shamt)
940  ret i9 %r
941}
942
943define i9 @fshr_ops_poison4(i9 %shamt) {
944; CHECK-LABEL: @fshr_ops_poison4(
945; CHECK-NEXT:    ret i9 undef
946;
947  %r = call i9 @llvm.fshr.i9(i9 undef, i9 poison, i9 %shamt)
948  ret i9 %r
949}
950
951define i9 @fshr_ops_poison5(i9 %a, i9 %b) {
952; CHECK-LABEL: @fshr_ops_poison5(
953; CHECK-NEXT:    ret i9 [[B:%.*]]
954;
955  %r = call i9 @llvm.fshr.i9(i9 %a, i9 %b, i9 poison)
956  ret i9 %r
957}
958
959define i9 @fshr_ops_poison6() {
960; CHECK-LABEL: @fshr_ops_poison6(
961; CHECK-NEXT:    ret i9 undef
962;
963  %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 poison)
964  ret i9 %r
965}
966
967define i8 @fshl_zero(i8 %shamt) {
968; CHECK-LABEL: @fshl_zero(
969; CHECK-NEXT:    ret i8 0
970;
971  %r = call i8 @llvm.fshl.i8(i8 0, i8 0, i8 %shamt)
972  ret i8 %r
973}
974
975define <2 x i8> @fshr_zero_vec(<2 x i8> %shamt) {
976; CHECK-LABEL: @fshr_zero_vec(
977; CHECK-NEXT:    ret <2 x i8> zeroinitializer
978;
979  %r = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> zeroinitializer, <2 x i8> <i8 0, i8 poison>, <2 x i8> %shamt)
980  ret <2 x i8> %r
981}
982
983define <2 x i7> @fshl_ones_vec(<2 x i7> %shamt) {
984; CHECK-LABEL: @fshl_ones_vec(
985; CHECK-NEXT:    ret <2 x i7> splat (i7 -1)
986;
987  %r = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> <i7 poison, i7 -1>, <2 x i7> <i7 -1, i7 poison>, <2 x i7> %shamt)
988  ret <2 x i7> %r
989}
990
991define i9 @fshr_ones(i9 %shamt) {
992; CHECK-LABEL: @fshr_ones(
993; CHECK-NEXT:    ret i9 -1
994;
995  %r = call i9 @llvm.fshr.i9(i9 -1, i9 -1, i9 %shamt)
996  ret i9 %r
997}
998
999declare double @llvm.fma.f64(double,double,double)
1000declare double @llvm.fmuladd.f64(double,double,double)
1001
1002define double @fma_undef_op0(double %x, double %y) {
1003; CHECK-LABEL: @fma_undef_op0(
1004; CHECK-NEXT:    ret double 0x7FF8000000000000
1005;
1006  %r = call double @llvm.fma.f64(double undef, double %x, double %y)
1007  ret double %r
1008}
1009
1010define double @fma_poison_op0(double %x, double %y) {
1011; CHECK-LABEL: @fma_poison_op0(
1012; CHECK-NEXT:    ret double poison
1013;
1014  %r = call double @llvm.fma.f64(double poison, double %x, double %y)
1015  ret double %r
1016}
1017
1018define double @fma_undef_op1(double %x, double %y) {
1019; CHECK-LABEL: @fma_undef_op1(
1020; CHECK-NEXT:    ret double 0x7FF8000000000000
1021;
1022  %r = call double @llvm.fma.f64(double %x, double undef, double %y)
1023  ret double %r
1024}
1025
1026define double @fma_poison_op1(double %x, double %y) {
1027; CHECK-LABEL: @fma_poison_op1(
1028; CHECK-NEXT:    ret double poison
1029;
1030  %r = call double @llvm.fma.f64(double %x, double poison, double %y)
1031  ret double %r
1032}
1033
1034define double @fma_undef_op2(double %x, double %y) {
1035; CHECK-LABEL: @fma_undef_op2(
1036; CHECK-NEXT:    ret double 0x7FF8000000000000
1037;
1038  %r = call double @llvm.fma.f64(double %x, double %y, double undef)
1039  ret double %r
1040}
1041
1042define double @fma_poison_op2(double %x, double %y) {
1043; CHECK-LABEL: @fma_poison_op2(
1044; CHECK-NEXT:    ret double poison
1045;
1046  %r = call double @llvm.fma.f64(double %x, double %y, double poison)
1047  ret double %r
1048}
1049
1050define double @fma_undef_op0_poison_op1(double %x) {
1051; CHECK-LABEL: @fma_undef_op0_poison_op1(
1052; CHECK-NEXT:    ret double poison
1053;
1054  %r = call double @llvm.fma.f64(double undef, double poison, double %x)
1055  ret double %r
1056}
1057
1058define double @fma_undef_op0_poison_op2(double %x) {
1059; CHECK-LABEL: @fma_undef_op0_poison_op2(
1060; CHECK-NEXT:    ret double poison
1061;
1062  %r = call double @llvm.fma.f64(double undef, double %x, double poison)
1063  ret double %r
1064}
1065
1066define double @fmuladd_undef_op0(double %x, double %y) {
1067; CHECK-LABEL: @fmuladd_undef_op0(
1068; CHECK-NEXT:    ret double 0x7FF8000000000000
1069;
1070  %r = call double @llvm.fmuladd.f64(double undef, double %x, double %y)
1071  ret double %r
1072}
1073
1074define double @fmuladd_poison_op0(double %x, double %y) {
1075; CHECK-LABEL: @fmuladd_poison_op0(
1076; CHECK-NEXT:    ret double poison
1077;
1078  %r = call double @llvm.fmuladd.f64(double poison, double %x, double %y)
1079  ret double %r
1080}
1081
1082define double @fmuladd_undef_op1(double %x, double %y) {
1083; CHECK-LABEL: @fmuladd_undef_op1(
1084; CHECK-NEXT:    ret double 0x7FF8000000000000
1085;
1086  %r = call double @llvm.fmuladd.f64(double %x, double undef, double %y)
1087  ret double %r
1088}
1089
1090define double @fmuladd_poison_op1(double %x, double %y) {
1091; CHECK-LABEL: @fmuladd_poison_op1(
1092; CHECK-NEXT:    ret double poison
1093;
1094  %r = call double @llvm.fmuladd.f64(double %x, double poison, double %y)
1095  ret double %r
1096}
1097
1098define double @fmuladd_undef_op2(double %x, double %y) {
1099; CHECK-LABEL: @fmuladd_undef_op2(
1100; CHECK-NEXT:    ret double 0x7FF8000000000000
1101;
1102  %r = call double @llvm.fmuladd.f64(double %x, double %y, double undef)
1103  ret double %r
1104}
1105
1106define double @fmuladd_poison_op2(double %x, double %y) {
1107; CHECK-LABEL: @fmuladd_poison_op2(
1108; CHECK-NEXT:    ret double poison
1109;
1110  %r = call double @llvm.fmuladd.f64(double %x, double %y, double poison)
1111  ret double %r
1112}
1113
1114define double @fmuladd_nan_op0_poison_op1(double %x) {
1115; CHECK-LABEL: @fmuladd_nan_op0_poison_op1(
1116; CHECK-NEXT:    ret double poison
1117;
1118  %r = call double @llvm.fmuladd.f64(double 0x7ff8000000000000, double poison, double %x)
1119  ret double %r
1120}
1121
1122define double @fmuladd_nan_op1_poison_op2(double %x) {
1123; CHECK-LABEL: @fmuladd_nan_op1_poison_op2(
1124; CHECK-NEXT:    ret double poison
1125;
1126  %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff8000000000000, double poison)
1127  ret double %r
1128}
1129
1130define double @fma_nan_op0(double %x, double %y) {
1131; CHECK-LABEL: @fma_nan_op0(
1132; CHECK-NEXT:    ret double 0x7FF8000000000000
1133;
1134  %r = call double @llvm.fma.f64(double 0x7ff8000000000000, double %x, double %y)
1135  ret double %r
1136}
1137
1138define double @fma_nan_op1(double %x, double %y) {
1139; CHECK-LABEL: @fma_nan_op1(
1140; CHECK-NEXT:    ret double 0x7FF8000000000001
1141;
1142  %r = call double @llvm.fma.f64(double %x, double 0x7ff8000000000001, double %y)
1143  ret double %r
1144}
1145
1146define double @fma_nan_op2(double %x, double %y) {
1147; CHECK-LABEL: @fma_nan_op2(
1148; CHECK-NEXT:    ret double 0x7FF8000000000002
1149;
1150  %r = call double @llvm.fma.f64(double %x, double %y, double 0x7ff8000000000002)
1151  ret double %r
1152}
1153
1154define double @fmuladd_nan_op0_op1(double %x) {
1155; CHECK-LABEL: @fmuladd_nan_op0_op1(
1156; CHECK-NEXT:    ret double 0x7FF8000000001234
1157;
1158  %r = call double @llvm.fmuladd.f64(double 0x7ff8000000001234, double 0x7ff800000000dead, double %x)
1159  ret double %r
1160}
1161
1162define double @fmuladd_nan_op0_op2(double %x) {
1163; CHECK-LABEL: @fmuladd_nan_op0_op2(
1164; CHECK-NEXT:    ret double 0x7FF8000000005678
1165;
1166  %r = call double @llvm.fmuladd.f64(double 0x7ff8000000005678, double %x, double 0x7ff800000000dead)
1167  ret double %r
1168}
1169
1170define double @fmuladd_nan_op1_op2(double %x) {
1171; CHECK-LABEL: @fmuladd_nan_op1_op2(
1172; CHECK-NEXT:    ret double 0x7FF80000AAAAAAAA
1173;
1174  %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff80000aaaaaaaa, double 0x7ff800000000dead)
1175  ret double %r
1176}
1177
1178define double @fma_nan_multiplicand_inf_zero(double %x) {
1179; CHECK-LABEL: @fma_nan_multiplicand_inf_zero(
1180; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double 0.000000e+00, double [[X:%.*]])
1181; CHECK-NEXT:    ret double [[R]]
1182;
1183  %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double %x)
1184  ret double %r
1185}
1186
1187define double @fma_nan_multiplicand_zero_inf(double %x) {
1188; CHECK-LABEL: @fma_nan_multiplicand_zero_inf(
1189; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fma.f64(double 0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]])
1190; CHECK-NEXT:    ret double [[R]]
1191;
1192  %r = call double @llvm.fma.f64(double 0.0, double 0x7ff0000000000000, double %x)
1193  ret double %r
1194}
1195
1196define double @fma_nan_addend_inf_neginf(double %x, i32 %y) {
1197; CHECK-LABEL: @fma_nan_addend_inf_neginf(
1198; CHECK-NEXT:    [[NOTNAN:%.*]] = uitofp i32 [[Y:%.*]] to double
1199; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000)
1200; CHECK-NEXT:    ret double [[R]]
1201;
1202  %notnan = uitofp i32 %y to double
1203  %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000)
1204  ret double %r
1205}
1206
1207define double @fma_nan_addend_neginf_inf(double %x, i1 %y) {
1208; CHECK-LABEL: @fma_nan_addend_neginf_inf(
1209; CHECK-NEXT:    [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01
1210; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fma.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000)
1211; CHECK-NEXT:    ret double [[R]]
1212;
1213  %notnan = select i1 %y, double 42.0, double -0.1
1214  %r = call double @llvm.fma.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000)
1215  ret double %r
1216}
1217
1218define double @fmuladd_nan_multiplicand_neginf_zero(double %x) {
1219; CHECK-LABEL: @fmuladd_nan_multiplicand_neginf_zero(
1220; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fmuladd.f64(double 0xFFF0000000000000, double 0.000000e+00, double [[X:%.*]])
1221; CHECK-NEXT:    ret double [[R]]
1222;
1223  %r = call double @llvm.fmuladd.f64(double 0xfff0000000000000, double 0.0, double %x)
1224  ret double %r
1225}
1226
1227define double @fmuladd_nan_multiplicand_negzero_inf(double %x) {
1228; CHECK-LABEL: @fmuladd_nan_multiplicand_negzero_inf(
1229; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fmuladd.f64(double -0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]])
1230; CHECK-NEXT:    ret double [[R]]
1231;
1232  %r = call double @llvm.fmuladd.f64(double -0.0, double 0x7ff0000000000000, double %x)
1233  ret double %r
1234}
1235
1236define double @fmuladd_nan_addend_inf_neginf(double %x, i32 %y) {
1237; CHECK-LABEL: @fmuladd_nan_addend_inf_neginf(
1238; CHECK-NEXT:    [[NOTNAN:%.*]] = sitofp i32 [[Y:%.*]] to double
1239; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fmuladd.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000)
1240; CHECK-NEXT:    ret double [[R]]
1241;
1242  %notnan = sitofp i32 %y to double
1243  %r = call double @llvm.fmuladd.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000)
1244  ret double %r
1245}
1246
1247define double @fmuladd_nan_addend_neginf_inf(double %x, i1 %y) {
1248; CHECK-LABEL: @fmuladd_nan_addend_neginf_inf(
1249; CHECK-NEXT:    [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01
1250; CHECK-NEXT:    [[R:%.*]] = call double @llvm.fmuladd.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000)
1251; CHECK-NEXT:    ret double [[R]]
1252;
1253  %notnan = select i1 %y, double 42.0, double -0.1
1254  %r = call double @llvm.fmuladd.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000)
1255  ret double %r
1256}
1257
1258declare float @llvm.copysign.f32(float, float)
1259declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
1260
1261define float @copysign_same_operand(float %x) {
1262; CHECK-LABEL: @copysign_same_operand(
1263; CHECK-NEXT:    ret float [[X:%.*]]
1264;
1265  %r = call float @llvm.copysign.f32(float %x, float %x)
1266  ret float %r
1267}
1268
1269define <2 x double> @copysign_same_operand_vec(<2 x double> %x) {
1270; CHECK-LABEL: @copysign_same_operand_vec(
1271; CHECK-NEXT:    ret <2 x double> [[X:%.*]]
1272;
1273  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %x)
1274  ret <2 x double> %r
1275}
1276
1277define float @negated_sign_arg(float %x) {
1278; CHECK-LABEL: @negated_sign_arg(
1279; CHECK-NEXT:    [[NEGX:%.*]] = fsub ninf float -0.000000e+00, [[X:%.*]]
1280; CHECK-NEXT:    ret float [[NEGX]]
1281;
1282  %negx = fsub ninf float -0.0, %x
1283  %r = call arcp float @llvm.copysign.f32(float %x, float %negx)
1284  ret float %r
1285}
1286
1287define <2 x double> @negated_sign_arg_vec(<2 x double> %x) {
1288; CHECK-LABEL: @negated_sign_arg_vec(
1289; CHECK-NEXT:    [[NEGX:%.*]] = fneg afn <2 x double> [[X:%.*]]
1290; CHECK-NEXT:    ret <2 x double> [[NEGX]]
1291;
1292  %negx = fneg afn <2 x double> %x
1293  %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %negx)
1294  ret <2 x double> %r
1295}
1296
1297define float @negated_mag_arg(float %x) {
1298; CHECK-LABEL: @negated_mag_arg(
1299; CHECK-NEXT:    ret float [[X:%.*]]
1300;
1301  %negx = fneg nnan float %x
1302  %r = call ninf float @llvm.copysign.f32(float %negx, float %x)
1303  ret float %r
1304}
1305
1306define <2 x double> @negated_mag_arg_vec(<2 x double> %x) {
1307; CHECK-LABEL: @negated_mag_arg_vec(
1308; CHECK-NEXT:    ret <2 x double> [[X:%.*]]
1309;
1310  %negx = fneg afn <2 x double> %x
1311  %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %negx, <2 x double> %x)
1312  ret <2 x double> %r
1313}
1314
1315; We handle the "returned" attribute only in InstCombine, because the fact
1316; that this simplification may replace one call with another may cause issues
1317; for call graph passes.
1318
1319declare i32 @passthru_i32(i32 returned)
1320declare ptr @passthru_p8(ptr returned)
1321
1322define i32 @returned_const_int_arg() {
1323; CHECK-LABEL: @returned_const_int_arg(
1324; CHECK-NEXT:    [[X:%.*]] = call i32 @passthru_i32(i32 42)
1325; CHECK-NEXT:    ret i32 [[X]]
1326;
1327  %x = call i32 @passthru_i32(i32 42)
1328  ret i32 %x
1329}
1330
1331define ptr @returned_const_ptr_arg() {
1332; CHECK-LABEL: @returned_const_ptr_arg(
1333; CHECK-NEXT:    [[X:%.*]] = call ptr @passthru_p8(ptr null)
1334; CHECK-NEXT:    ret ptr [[X]]
1335;
1336  %x = call ptr @passthru_p8(ptr null)
1337  ret ptr %x
1338}
1339
1340define i32 @returned_var_arg(i32 %arg) {
1341; CHECK-LABEL: @returned_var_arg(
1342; CHECK-NEXT:    [[X:%.*]] = call i32 @passthru_i32(i32 [[ARG:%.*]])
1343; CHECK-NEXT:    ret i32 [[X]]
1344;
1345  %x = call i32 @passthru_i32(i32 %arg)
1346  ret i32 %x
1347}
1348
1349define i32 @returned_const_int_arg_musttail(i32 %arg) {
1350; CHECK-LABEL: @returned_const_int_arg_musttail(
1351; CHECK-NEXT:    [[X:%.*]] = musttail call i32 @passthru_i32(i32 42)
1352; CHECK-NEXT:    ret i32 [[X]]
1353;
1354  %x = musttail call i32 @passthru_i32(i32 42)
1355  ret i32 %x
1356}
1357
1358define i32 @returned_var_arg_musttail(i32 %arg) {
1359; CHECK-LABEL: @returned_var_arg_musttail(
1360; CHECK-NEXT:    [[X:%.*]] = musttail call i32 @passthru_i32(i32 [[ARG:%.*]])
1361; CHECK-NEXT:    ret i32 [[X]]
1362;
1363  %x = musttail call i32 @passthru_i32(i32 %arg)
1364  ret i32 %x
1365}
1366
1367define i32 @call_undef_musttail() {
1368; CHECK-LABEL: @call_undef_musttail(
1369; CHECK-NEXT:    [[X:%.*]] = musttail call i32 undef()
1370; CHECK-NEXT:    ret i32 [[X]]
1371;
1372  %x = musttail call i32 undef()
1373  ret i32 %x
1374}
1375
1376; This is not the builtin fmax, so we don't know anything about its behavior.
1377
1378declare float @fmaxf(float, float)
1379
1380define float @nobuiltin_fmax() {
1381; CHECK-LABEL: @nobuiltin_fmax(
1382; CHECK-NEXT:    [[M:%.*]] = call float @fmaxf(float 0.000000e+00, float 1.000000e+00) #[[ATTR3:[0-9]+]]
1383; CHECK-NEXT:    [[R:%.*]] = call float @llvm.fabs.f32(float [[M]])
1384; CHECK-NEXT:    ret float [[R]]
1385;
1386  %m = call float @fmaxf(float 0.0, float 1.0) #0
1387  %r = call float @llvm.fabs.f32(float %m)
1388  ret float %r
1389}
1390
1391
1392declare i32 @llvm.ctpop.i32(i32)
1393declare <3 x i33> @llvm.ctpop.v3i33(<3 x i33>)
1394declare i1 @llvm.ctpop.i1(i1)
1395declare i1 @llvm.ctlz.i1(i1, i1)
1396declare i1 @llvm.cttz.i1(i1, i1)
1397
1398define i32 @ctpop_lowbit(i32 %x) {
1399; CHECK-LABEL: @ctpop_lowbit(
1400; CHECK-NEXT:    [[B:%.*]] = and i32 [[X:%.*]], 1
1401; CHECK-NEXT:    ret i32 [[B]]
1402;
1403  %b = and i32 %x, 1
1404  %r = call i32 @llvm.ctpop.i32(i32 %b)
1405  ret i32 %r
1406}
1407
1408; Negative test - only low bit allowed
1409; This could be reduced by instcombine to and+shift.
1410
1411define i32 @ctpop_pow2(i32 %x) {
1412; CHECK-LABEL: @ctpop_pow2(
1413; CHECK-NEXT:    [[B:%.*]] = and i32 [[X:%.*]], 4
1414; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.ctpop.i32(i32 [[B]])
1415; CHECK-NEXT:    ret i32 [[R]]
1416;
1417  %b = and i32 %x, 4
1418  %r = call i32 @llvm.ctpop.i32(i32 %b)
1419  ret i32 %r
1420}
1421
1422define <3 x i33> @ctpop_signbit(<3 x i33> %x) {
1423; CHECK-LABEL: @ctpop_signbit(
1424; CHECK-NEXT:    [[B:%.*]] = lshr <3 x i33> [[X:%.*]], splat (i33 32)
1425; CHECK-NEXT:    ret <3 x i33> [[B]]
1426;
1427  %b = lshr <3 x i33> %x, <i33 32, i33 32, i33 32>
1428  %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b)
1429  ret <3 x i33> %r
1430}
1431
1432; Negative test - only 1 bit allowed
1433
1434define <3 x i33> @ctpop_notsignbit(<3 x i33> %x) {
1435; CHECK-LABEL: @ctpop_notsignbit(
1436; CHECK-NEXT:    [[B:%.*]] = lshr <3 x i33> [[X:%.*]], splat (i33 31)
1437; CHECK-NEXT:    [[R:%.*]] = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> [[B]])
1438; CHECK-NEXT:    ret <3 x i33> [[R]]
1439;
1440  %b = lshr <3 x i33> %x, <i33 31, i33 31, i33 31>
1441  %r = tail call <3 x i33> @llvm.ctpop.v3i33(<3 x i33> %b)
1442  ret <3 x i33> %r
1443}
1444
1445define i1 @ctpop_bool(i1 %x) {
1446; CHECK-LABEL: @ctpop_bool(
1447; CHECK-NEXT:    ret i1 [[X:%.*]]
1448;
1449  %r = tail call i1 @llvm.ctpop.i1(i1 %x)
1450  ret i1 %r
1451}
1452
1453declare i32 @llvm.cttz.i32(i32, i1)
1454declare <3 x i33> @llvm.cttz.v3i33(<3 x i33>, i1)
1455
1456define i32 @cttz_shl1(i32 %x) {
1457; CHECK-LABEL: @cttz_shl1(
1458; CHECK-NEXT:    ret i32 [[X:%.*]]
1459;
1460  %s = shl i32 1, %x
1461  %r = call i32 @llvm.cttz.i32(i32 %s, i1 true)
1462  ret i32 %r
1463}
1464
1465define <3 x i33> @cttz_shl1_vec(<3 x i33> %x) {
1466; CHECK-LABEL: @cttz_shl1_vec(
1467; CHECK-NEXT:    ret <3 x i33> [[X:%.*]]
1468;
1469  %s = shl <3 x i33> <i33 1, i33 1, i33 poison>, %x
1470  %r = call <3 x i33> @llvm.cttz.v3i33(<3 x i33> %s, i1 false)
1471  ret <3 x i33> %r
1472}
1473
1474; Negative test - this could be generalized in instcombine though.
1475
1476define i32 @cttz_shl_not_low_bit(i32 %x) {
1477; CHECK-LABEL: @cttz_shl_not_low_bit(
1478; CHECK-NEXT:    [[S:%.*]] = shl i32 2, [[X:%.*]]
1479; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.cttz.i32(i32 [[S]], i1 true)
1480; CHECK-NEXT:    ret i32 [[R]]
1481;
1482  %s = shl i32 2, %x
1483  %r = call i32 @llvm.cttz.i32(i32 %s, i1 true)
1484  ret i32 %r
1485}
1486
1487declare i32 @llvm.ctlz.i32(i32, i1)
1488declare <3 x i33> @llvm.ctlz.v3i33(<3 x i33>, i1)
1489
1490define i32 @ctlz_lshr_sign_bit(i32 %x) {
1491; CHECK-LABEL: @ctlz_lshr_sign_bit(
1492; CHECK-NEXT:    ret i32 [[X:%.*]]
1493;
1494  %s = lshr i32 2147483648, %x
1495  %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1496  ret i32 %r
1497}
1498
1499define i32 @ctlz_lshr_negative(i32 %x) {
1500; CHECK-LABEL: @ctlz_lshr_negative(
1501; CHECK-NEXT:    ret i32 [[X:%.*]]
1502;
1503  %s = lshr i32 -42, %x
1504  %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1505  ret i32 %r
1506}
1507
1508define <3 x i33> @ctlz_lshr_sign_bit_vec(<3 x i33> %x) {
1509; CHECK-LABEL: @ctlz_lshr_sign_bit_vec(
1510; CHECK-NEXT:    ret <3 x i33> [[X:%.*]]
1511;
1512  %s = lshr <3 x i33> <i33 poison, i33 4294967296, i33 4294967296>, %x
1513  %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 false)
1514  ret <3 x i33> %r
1515}
1516
1517; Negative test - this could be generalized in instcombine though.
1518
1519define i32 @ctlz_lshr_not_negative(i32 %x) {
1520; CHECK-LABEL: @ctlz_lshr_not_negative(
1521; CHECK-NEXT:    [[S:%.*]] = lshr i32 42, [[X:%.*]]
1522; CHECK-NEXT:    [[R:%.*]] = call i32 @llvm.ctlz.i32(i32 [[S]], i1 true)
1523; CHECK-NEXT:    ret i32 [[R]]
1524;
1525  %s = lshr i32 42, %x
1526  %r = call i32 @llvm.ctlz.i32(i32 %s, i1 true)
1527  ret i32 %r
1528}
1529
1530define i32 @ctlz_ashr_sign_bit(i32 %x) {
1531; CHECK-LABEL: @ctlz_ashr_sign_bit(
1532; CHECK-NEXT:    ret i32 0
1533;
1534  %s = ashr i32 2147483648, %x
1535  %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false)
1536  ret i32 %r
1537}
1538
1539define i32 @ctlz_ashr_negative(i32 %x) {
1540; CHECK-LABEL: @ctlz_ashr_negative(
1541; CHECK-NEXT:    ret i32 0
1542;
1543  %s = ashr i32 -42, %x
1544  %r = call i32 @llvm.ctlz.i32(i32 %s, i1 false)
1545  ret i32 %r
1546}
1547
1548define <3 x i33> @ctlz_ashr_sign_bit_vec(<3 x i33> %x) {
1549; CHECK-LABEL: @ctlz_ashr_sign_bit_vec(
1550; CHECK-NEXT:    ret <3 x i33> zeroinitializer
1551;
1552  %s = ashr <3 x i33> <i33 4294967296, i33 poison, i33 4294967296>, %x
1553  %r = call <3 x i33> @llvm.ctlz.v3i33(<3 x i33> %s, i1 true)
1554  ret <3 x i33> %r
1555}
1556
1557declare ptr @llvm.ptrmask.p0.i64(ptr , i64)
1558
1559define i1 @capture_vs_recurse(i64 %mask) {
1560; CHECK-LABEL: @capture_vs_recurse(
1561; CHECK-NEXT:    [[A:%.*]] = call noalias ptr @malloc(i64 8)
1562; CHECK-NEXT:    [[B:%.*]] = call nonnull ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 [[MASK:%.*]])
1563; CHECK-NEXT:    [[CMP:%.*]] = icmp eq ptr [[A]], [[B]]
1564; CHECK-NEXT:    ret i1 [[CMP]]
1565;
1566  %a = call noalias ptr @malloc(i64 8)
1567  %b = call nonnull ptr @llvm.ptrmask.p0.i64(ptr %a, i64 %mask)
1568  %cmp = icmp eq ptr %a, %b
1569  ret i1 %cmp
1570}
1571
1572define i1 @ctlz_i1_non_poison_eq_false(i1 %x) {
1573; CHECK-LABEL: @ctlz_i1_non_poison_eq_false(
1574; CHECK-NEXT:    [[CT:%.*]] = call i1 @llvm.ctlz.i1(i1 [[X:%.*]], i1 false)
1575; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i1 [[CT]], false
1576; CHECK-NEXT:    ret i1 [[CMP]]
1577;
1578  %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 false)
1579  %cmp = icmp eq i1 %ct, false
1580  ret i1 %cmp
1581}
1582
1583define i1 @ctlz_i1_poison_eq_false(i1 %x) {
1584; CHECK-LABEL: @ctlz_i1_poison_eq_false(
1585; CHECK-NEXT:    ret i1 true
1586;
1587  %ct = call i1 @llvm.ctlz.i1(i1 %x, i1 true)
1588  %cmp = icmp eq i1 %ct, false
1589  ret i1 %cmp
1590}
1591
1592define i1 @cttz_i1_non_poison_eq_false(i1 %x) {
1593; CHECK-LABEL: @cttz_i1_non_poison_eq_false(
1594; CHECK-NEXT:    [[CT:%.*]] = call i1 @llvm.cttz.i1(i1 [[X:%.*]], i1 false)
1595; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i1 [[CT]], false
1596; CHECK-NEXT:    ret i1 [[CMP]]
1597;
1598  %ct = call i1 @llvm.cttz.i1(i1 %x, i1 false)
1599  %cmp = icmp eq i1 %ct, false
1600  ret i1 %cmp
1601}
1602
1603define i1 @cttz_i1_poison_eq_false(i1 %x) {
1604; CHECK-LABEL: @cttz_i1_poison_eq_false(
1605; CHECK-NEXT:    ret i1 true
1606;
1607  %ct = call i1 @llvm.cttz.i1(i1 %x, i1 true)
1608  %cmp = icmp eq i1 %ct, false
1609  ret i1 %cmp
1610}
1611
1612define i1 @ctpop_i1_non_poison_eq_false(i1 %x) {
1613; CHECK-LABEL: @ctpop_i1_non_poison_eq_false(
1614; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i1 [[X:%.*]], false
1615; CHECK-NEXT:    ret i1 [[CMP]]
1616;
1617  %ct = call i1 @llvm.ctpop.i1(i1 %x)
1618  %cmp = icmp eq i1 %ct, false
1619  ret i1 %cmp
1620}
1621
1622attributes #0 = { nobuiltin readnone }
1623