xref: /llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/call-lowering-zeroext.ll (revision 31d6a572579a5d1d9ae14a1a9d4ffbdb1b098e49)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=aarch64 -global-isel -stop-after=irtranslator -verify-machineinstrs -o - %s | FileCheck %s
3
4; Verify that we generate G_ASSERT_ZEXT for zeroext parameters.
5
6define i8 @zeroext_param_i8(i8 zeroext %x) {
7  ; CHECK-LABEL: name: zeroext_param_i8
8  ; CHECK: bb.1 (%ir-block.0):
9  ; CHECK:   liveins: $w0
10  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
11  ; CHECK:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 8
12  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
13  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
14  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
15  ; CHECK:   RET_ReallyLR implicit $w0
16  ret i8 %x
17}
18
19define i8 @no_zeroext_param(i8 %x) {
20  ; CHECK-LABEL: name: no_zeroext_param
21  ; CHECK: bb.1 (%ir-block.0):
22  ; CHECK:   liveins: $w0
23  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
24  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
25  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
26  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
27  ; CHECK:   RET_ReallyLR implicit $w0
28  ret i8 %x
29}
30
31; Don't need G_ASSERT_ZEXT here. The sizes match.
32define i32 @zeroext_param_i32(i32 zeroext %x) {
33  ; CHECK-LABEL: name: zeroext_param_i32
34  ; CHECK: bb.1 (%ir-block.0):
35  ; CHECK:   liveins: $w0
36  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
37  ; CHECK:   $w0 = COPY [[COPY]](s32)
38  ; CHECK:   RET_ReallyLR implicit $w0
39  ret i32 %x
40}
41
42; Zeroext param is passed on the stack. We should still get a G_ASSERT_ZEXT.
43define i32 @zeroext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
44  ; CHECK-LABEL: name: zeroext_param_stack
45  ; CHECK: bb.1 (%ir-block.0):
46  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
47  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
48  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
49  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
50  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
51  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
52  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
53  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
54  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
55  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
56  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
57  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
58  ; CHECK:   [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
59  ; CHECK:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[ZEXTLOAD]], 1
60  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s32)
61  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
62  ; CHECK:   $w0 = COPY [[ZEXT]](s32)
63  ; CHECK:   RET_ReallyLR implicit $w0
64                                i64 %g, i64 %h, i64 %i, i1 zeroext %j) {
65  %v = zext i1 %j to i32
66  ret i32 %v
67}
68
69; The zeroext parameter is a s32, so there's no extension required.
70define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
71  ; CHECK-LABEL: name: dont_need_assert_zext_stack
72  ; CHECK: bb.1 (%ir-block.0):
73  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
74  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
75  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
76  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
77  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
78  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
79  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
80  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
81  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
82  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
83  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
84  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
85  ; CHECK:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0, align 8)
86  ; CHECK:   $w0 = COPY [[LOAD1]](s32)
87  ; CHECK:   RET_ReallyLR implicit $w0
88                                        i64 %f, i64 %g, i64 %h, i64 %i,
89                                        i32 zeroext %j) {
90  ret i32 %j
91}
92
93; s8 requires extension to s32, so we should get a G_ASSERT_ZEXT here.
94define i8 @s8_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
95  ; CHECK-LABEL: name: s8_assert_zext_stack
96  ; CHECK: bb.1 (%ir-block.0):
97  ; CHECK:   liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
98  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
99  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
100  ; CHECK:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
101  ; CHECK:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
102  ; CHECK:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
103  ; CHECK:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x5
104  ; CHECK:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
105  ; CHECK:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
106  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
107  ; CHECK:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
108  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
109  ; CHECK:   [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
110  ; CHECK:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[ZEXTLOAD]], 8
111  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
112  ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
113  ; CHECK:   $w0 = COPY [[ANYEXT]](s32)
114  ; CHECK:   RET_ReallyLR implicit $w0
115                                        i64 %f, i64 %g, i64 %h, i64 %i,
116                                        i8 zeroext %j) {
117  ret i8 %j
118}
119
120define i32 @callee_zeroext_i1(i1 zeroext %0) {
121  ; CHECK-LABEL: name: callee_zeroext_i1
122  ; CHECK: bb.1 (%ir-block.1):
123  ; CHECK-NEXT:   liveins: $w0
124  ; CHECK-NEXT: {{  $}}
125  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
126  ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 1
127  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s32)
128  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
129  ; CHECK-NEXT:   $w0 = COPY [[ZEXT]](s32)
130  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
131  %r = zext i1 %0 to i32
132  ret i32 %r
133}
134
135define i32 @caller_zeroext_i1() {
136  ; CHECK-LABEL: name: caller_zeroext_i1
137  ; CHECK: bb.1 (%ir-block.0):
138  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
139  ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
140  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s8) = G_ZEXT [[C]](s1)
141  ; CHECK-NEXT:   [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ZEXT]](s8)
142  ; CHECK-NEXT:   $w0 = COPY [[ZEXT1]](s32)
143  ; CHECK-NEXT:   BL @callee_zeroext_i1, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit-def $w0
144  ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
145  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
146  ; CHECK-NEXT:   $w0 = COPY [[COPY]](s32)
147  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
148  %r = call i32 @callee_zeroext_i1(i1 zeroext true)
149  ret i32 %r
150}
151
152define zeroext i1 @ret_zeroext_i1() {
153  ; CHECK-LABEL: name: ret_zeroext_i1
154  ; CHECK: bb.1 (%ir-block.0):
155  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
156  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s1)
157  ; CHECK-NEXT:   $w0 = COPY [[ZEXT]](s32)
158  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
159  ret i1 true
160}
161