xref: /llvm-project/llvm/test/CodeGen/SPARC/64bit.ll (revision 728490257ecc09ada707a0390303bd3c61027a53)
1; RUN: llc < %s -mtriple=sparcv9 -mattr=+popc -disable-sparc-delay-filler -disable-sparc-leaf-proc | FileCheck %s
2; RUN: llc < %s -mtriple=sparcv9 -mattr=+popc | FileCheck %s -check-prefix=OPT
3; RUN: llc %s -mtriple=sparcv9 -mattr=+popc -filetype=null
4
5; CHECK-LABEL: ret2:
6; CHECK: mov %i1, %i0
7
8; OPT-LABEL: ret2:
9; OPT: retl
10; OPT: mov %o1, %o0
11define i64 @ret2(i64 %a, i64 %b) {
12  ret i64 %b
13}
14
15; CHECK: shl_imm
16; CHECK: sllx %i0, 7, %i0
17
18; OPT-LABEL: shl_imm:
19; OPT: retl
20; OPT: sllx %o0, 7, %o0
21define i64 @shl_imm(i64 %a) {
22  %x = shl i64 %a, 7
23  ret i64 %x
24}
25
26; CHECK: sra_reg
27; CHECK: srax %i0, %i1, %i0
28
29; OPT-LABEL: sra_reg:
30; OPT: retl
31; OPT: srax %o0, %o1, %o0
32define i64 @sra_reg(i64 %a, i64 %b) {
33  %x = ashr i64 %a, %b
34  ret i64 %x
35}
36
37; Immediate materialization. Many of these patterns could actually be merged
38; into the restore instruction:
39;
40;     restore %g0, %g0, %o0
41;
42; CHECK: ret_imm0
43; CHECK: mov %g0, %i0
44
45; OPT: ret_imm0
46; OPT: retl
47; OPT: mov %g0, %o0
48define i64 @ret_imm0() {
49  ret i64 0
50}
51
52; CHECK: ret_simm13
53; CHECK: mov -4096, %i0
54
55; OPT:   ret_simm13
56; OPT:   retl
57; OPT:   mov -4096, %o0
58define i64 @ret_simm13() {
59  ret i64 -4096
60}
61
62; CHECK: ret_sethi
63; CHECK: sethi 4, %i0
64; CHECK-NOT: or
65; CHECK: restore
66
67; OPT:  ret_sethi
68; OPT:  retl
69; OPT:  sethi 4, %o0
70define i64 @ret_sethi() {
71  ret i64 4096
72}
73
74; CHECK: ret_sethi_or
75; CHECK: sethi 4, [[R:%[goli][0-7]]]
76; CHECK: or [[R]], 1, %i0
77
78; OPT: ret_sethi_or
79; OPT: sethi 4, [[R:%[go][0-7]]]
80; OPT: retl
81; OPT: or [[R]], 1, %o0
82
83define i64 @ret_sethi_or() {
84  ret i64 4097
85}
86
87; CHECK: ret_nimm33
88; CHECK: sethi 4, [[R:%[goli][0-7]]]
89; CHECK: xor [[R]], -4, %i0
90
91; OPT: ret_nimm33
92; OPT: sethi 4, [[R:%[go][0-7]]]
93; OPT: retl
94; OPT: xor [[R]], -4, %o0
95
96define i64 @ret_nimm33() {
97  ret i64 -4100
98}
99
100; CHECK: ret_bigimm
101; CHECK: sethi
102; CHECK: sethi
103define i64 @ret_bigimm() {
104  ret i64 6800754272627607872
105}
106
107; CHECK: ret_bigimm2
108; CHECK: sethi 1048576
109define i64 @ret_bigimm2() {
110  ret i64 4611686018427387904 ; 0x4000000000000000
111}
112
113; CHECK: reg_reg_alu
114; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
115; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
116; CHECK: andn [[R1]], %i0, %i0
117define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) {
118  %a = add i64 %x, %y
119  %b = sub i64 %a, %z
120  %c = xor i64 %x, -1
121  %d = and i64 %b, %c
122  ret i64 %d
123}
124
125; CHECK: reg_imm_alu
126; CHECK: add %i0, -5, [[R0:%[goli][0-7]]]
127; CHECK: xor [[R0]], 2, %i0
128define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
129  %a = add i64 %x, -5
130  %b = xor i64 %a, 2
131  ret i64 %b
132}
133
134; CHECK: loads
135; CHECK: ldx [%i0]
136; CHECK: stx %
137; CHECK: ld [%i1]
138; CHECK: st %
139; CHECK: ldsw [%i2]
140; CHECK: stx %
141; CHECK: ldsh [%i3]
142; CHECK: sth %
143define i64 @loads(ptr %p, ptr %q, ptr %r, ptr %s) {
144  %a = load i64, ptr %p
145  %ai = add i64 1, %a
146  store i64 %ai, ptr %p
147  %b = load i32, ptr %q
148  %b2 = zext i32 %b to i64
149  %bi = trunc i64 %ai to i32
150  store i32 %bi, ptr %q
151  %c = load i32, ptr %r
152  %c2 = sext i32 %c to i64
153  store i64 %ai, ptr %p
154  %d = load i16, ptr %s
155  %d2 = sext i16 %d to i64
156  %di = trunc i64 %ai to i16
157  store i16 %di, ptr %s
158
159  %x1 = add i64 %a, %b2
160  %x2 = add i64 %c2, %d2
161  %x3 = add i64 %x1, %x2
162  ret i64 %x3
163}
164
165; CHECK: load_bool
166; CHECK: ldub [%i0], %i0
167define i64 @load_bool(ptr %p) {
168  %a = load i1, ptr %p
169  %b = zext i1 %a to i64
170  ret i64 %b
171}
172
173; CHECK: stores
174; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]]
175; CHECK: stx [[R]], [%i0+16]
176; CHECK: st [[R]], [%i1+-8]
177; CHECK: sth [[R]], [%i2+40]
178; CHECK: stb [[R]], [%i3+-20]
179define void @stores(ptr %p, ptr %q, ptr %r, ptr %s) {
180  %p1 = getelementptr i64, ptr %p, i64 1
181  %p2 = getelementptr i64, ptr %p, i64 2
182  %pv = load i64, ptr %p1
183  store i64 %pv, ptr %p2
184
185  %q2 = getelementptr i32, ptr %q, i32 -2
186  %qv = trunc i64 %pv to i32
187  store i32 %qv, ptr %q2
188
189  %r2 = getelementptr i16, ptr %r, i16 20
190  %rv = trunc i64 %pv to i16
191  store i16 %rv, ptr %r2
192
193  %s2 = getelementptr i8, ptr %s, i8 -20
194  %sv = trunc i64 %pv to i8
195  store i8 %sv, ptr %s2
196
197  ret void
198}
199
200; CHECK: promote_shifts
201; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
202; CHECK: sll [[R]], [[R]], %i0
203define i8 @promote_shifts(ptr %p) {
204  %L24 = load i8, ptr %p
205  %L32 = load i8, ptr %p
206  %B36 = shl i8 %L24, %L32
207  ret i8 %B36
208}
209
210; CHECK: multiply
211; CHECK: mulx %i0, %i1, %i0
212define i64 @multiply(i64 %a, i64 %b) {
213  %r = mul i64 %a, %b
214  ret i64 %r
215}
216
217; CHECK: signed_divide
218; CHECK: sdivx %i0, %i1, %i0
219define i64 @signed_divide(i64 %a, i64 %b) {
220  %r = sdiv i64 %a, %b
221  ret i64 %r
222}
223
224; CHECK: unsigned_divide
225; CHECK: udivx %i0, %i1, %i0
226define i64 @unsigned_divide(i64 %a, i64 %b) {
227  %r = udiv i64 %a, %b
228  ret i64 %r
229}
230
231define void @access_fi() {
232entry:
233  %b = alloca [32 x i8], align 1
234  %arraydecay = getelementptr inbounds [32 x i8], ptr %b, i64 0, i64 0
235  call void @g(ptr %arraydecay) #2
236  ret void
237}
238
239declare void @g(ptr)
240
241; CHECK: expand_setcc
242; CHECK: movrgz %i0, 1,
243define i32 @expand_setcc(i64 %a) {
244  %cond = icmp sle i64 %a, 0
245  %cast2 = zext i1 %cond to i32
246  %RV = sub i32 1, %cast2
247  ret i32 %RV
248}
249
250; CHECK: spill_i64
251; CHECK: stx
252; CHECK: ldx
253define i64 @spill_i64(i64 %x) {
254  call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{g1},~{g2},~{g3},~{g4},~{g5},~{g6},~{g7}"()
255  ret i64 %x
256}
257
258; CHECK: bitcast_i64_f64
259; CHECK: std
260; CHECK: ldx
261define i64 @bitcast_i64_f64(double %x) {
262  %y = bitcast double %x to i64
263  ret i64 %y
264}
265
266; CHECK: bitcast_f64_i64
267; CHECK: stx
268; CHECK: ldd
269define double @bitcast_f64_i64(i64 %x) {
270  %y = bitcast i64 %x to double
271  ret double %y
272}
273
274; CHECK-LABEL: store_zero:
275; CHECK: stx %g0, [%i0]
276; CHECK: stx %g0, [%i1+8]
277
278; OPT-LABEL:  store_zero:
279; OPT:  stx %g0, [%o0]
280; OPT:  stx %g0, [%o1+8]
281define i64 @store_zero(ptr nocapture %a, ptr nocapture %b) {
282entry:
283  store i64 0, ptr %a, align 8
284  %0 = getelementptr inbounds i64, ptr %b, i32 1
285  store i64 0, ptr %0, align 8
286  ret i64 0
287}
288
289; CHECK-LABEL: bit_ops
290; CHECK:       popc
291
292; OPT-LABEL: bit_ops
293; OPT:       popc
294
295define i64 @bit_ops(i64 %arg) {
296entry:
297  %0 = tail call i64 @llvm.ctpop.i64(i64 %arg)
298  %1 = tail call i64 @llvm.ctlz.i64(i64 %arg, i1 true)
299  %2 = tail call i64 @llvm.cttz.i64(i64 %arg, i1 true)
300  %3 = tail call i64 @llvm.bswap.i64(i64 %arg)
301  %4 = add i64 %0, %1
302  %5 = add i64 %2, %3
303  %6 = add i64 %4, %5
304  ret i64 %6
305}
306
307declare i64 @llvm.ctpop.i64(i64) nounwind readnone
308declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
309declare i64 @llvm.cttz.i64(i64, i1) nounwind readnone
310declare i64 @llvm.bswap.i64(i64) nounwind readnone
311