xref: /llvm-project/llvm/test/CodeGen/Mips/msa/frameindex.ll (revision d8a5fae6913a0f6c7e3c814315c1a11fcfd609a1)
1; RUN: llc -mtriple=mips-elf -mattr=+msa,+fp64,+mips32r5 < %s | FileCheck %s
2; RUN: llc -mtriple=mipsel-elf -mattr=+msa,+fp64,+mips32r5 < %s | FileCheck %s
3
4define void @loadstore_v16i8_near() nounwind {
5  ; CHECK: loadstore_v16i8_near:
6
7  %1 = alloca <16 x i8>
8  %2 = load volatile <16 x i8>, ptr %1
9  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0($sp)
10  store volatile <16 x i8> %2, ptr %1
11  ; CHECK: st.b [[R1]], 0($sp)
12
13  ret void
14  ; CHECK: .size loadstore_v16i8_near
15}
16
17define void @loadstore_v16i8_just_under_simm10() nounwind {
18  ; CHECK: loadstore_v16i8_just_under_simm10:
19
20  %1 = alloca <16 x i8>
21  %2 = alloca [492 x i8] ; Push the frame--acounting for the emergency spill
22                         ; slot--right up to 512 bytes
23
24  %3 = load volatile <16 x i8>, ptr %1
25  ; CHECK: ld.b [[R1:\$w[0-9]+]], 496($sp)
26  store volatile <16 x i8> %3, ptr %1
27  ; CHECK: st.b [[R1]], 496($sp)
28
29  ret void
30  ; CHECK: .size loadstore_v16i8_just_under_simm10
31}
32
33define void @loadstore_v16i8_just_over_simm10() nounwind {
34  ; CHECK: loadstore_v16i8_just_over_simm10:
35
36  %1 = alloca <16 x i8>
37  %2 = alloca [497 x i8] ; Push the frame--acounting for the emergency spill
38                         ; slot--right up to 512 bytes
39
40  %3 = load volatile <16 x i8>, ptr %1
41  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
42  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
43  store volatile <16 x i8> %3, ptr %1
44  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
45  ; CHECK: st.b [[R1]], 0([[BASE]])
46
47  ret void
48  ; CHECK: .size loadstore_v16i8_just_over_simm10
49}
50
51define void @loadstore_v16i8_just_under_simm16() nounwind {
52  ; CHECK: loadstore_v16i8_just_under_simm16:
53
54  %1 = alloca <16 x i8>
55  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
56                           ; slot--right up to 32768 bytes
57
58  %3 = load volatile <16 x i8>, ptr %1
59  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
60  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
61  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
62  store volatile <16 x i8> %3, ptr %1
63  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
64  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
65  ; CHECK: st.b [[R1]], 0([[BASE]])
66
67  ret void
68  ; CHECK: .size loadstore_v16i8_just_under_simm16
69}
70
71define void @loadstore_v16i8_just_over_simm16() nounwind {
72  ; CHECK: loadstore_v16i8_just_over_simm16:
73
74  %1 = alloca <16 x i8>
75  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
76                           ; slot--just over 32768 bytes
77
78  %3 = load volatile <16 x i8>, ptr %1
79  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
80  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
81  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
82  store volatile <16 x i8> %3, ptr %1
83  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
84  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
85  ; CHECK: st.b [[R1]], 0([[BASE]])
86
87  ret void
88  ; CHECK: .size loadstore_v16i8_just_over_simm16
89}
90
91define void @loadstore_v8i16_near() nounwind {
92  ; CHECK: loadstore_v8i16_near:
93
94  %1 = alloca <8 x i16>
95  %2 = load volatile <8 x i16>, ptr %1
96  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0($sp)
97  store volatile <8 x i16> %2, ptr %1
98  ; CHECK: st.h [[R1]], 0($sp)
99
100  ret void
101  ; CHECK: .size loadstore_v8i16_near
102}
103
104define void @loadstore_v8i16_unaligned() nounwind {
105  ; CHECK: loadstore_v8i16_unaligned:
106
107  %1 = alloca [2 x <8 x i16>]
108  %2 = getelementptr i8, ptr %1, i32 1
109
110  %3 = load volatile <8 x i16>, ptr %2
111  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
112  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
113  store volatile <8 x i16> %3, ptr %2
114  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
115  ; CHECK: st.h [[R1]], 0([[BASE]])
116
117  ret void
118  ; CHECK: .size loadstore_v8i16_unaligned
119}
120
121define void @loadstore_v8i16_just_under_simm10() nounwind {
122  ; CHECK: loadstore_v8i16_just_under_simm10:
123
124  %1 = alloca <8 x i16>
125  %2 = alloca [1004 x i8] ; Push the frame--acounting for the emergency spill
126                          ; slot--right up to 1024 bytes
127
128  %3 = load volatile <8 x i16>, ptr %1
129  ; CHECK: ld.h [[R1:\$w[0-9]+]], 1008($sp)
130  store volatile <8 x i16> %3, ptr %1
131  ; CHECK: st.h [[R1]], 1008($sp)
132
133  ret void
134  ; CHECK: .size loadstore_v8i16_just_under_simm10
135}
136
137define void @loadstore_v8i16_just_over_simm10() nounwind {
138  ; CHECK: loadstore_v8i16_just_over_simm10:
139
140  %1 = alloca <8 x i16>
141  %2 = alloca [1009 x i8] ; Push the frame--acounting for the emergency spill
142                          ; slot--just over 1024 bytes
143
144  %3 = load volatile <8 x i16>, ptr %1
145  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
146  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
147  store volatile <8 x i16> %3, ptr %1
148  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
149  ; CHECK: st.h [[R1]], 0([[BASE]])
150
151  ret void
152  ; CHECK: .size loadstore_v8i16_just_over_simm10
153}
154
155define void @loadstore_v8i16_just_under_simm16() nounwind {
156  ; CHECK: loadstore_v8i16_just_under_simm16:
157
158  %1 = alloca <8 x i16>
159  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
160                           ; slot--right up to 32768 bytes
161
162  %3 = load volatile <8 x i16>, ptr %1
163  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
164  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
165  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
166  store volatile <8 x i16> %3, ptr %1
167  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
168  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
169  ; CHECK: st.h [[R1]], 0([[BASE]])
170
171  ret void
172  ; CHECK: .size loadstore_v8i16_just_under_simm16
173}
174
175define void @loadstore_v8i16_just_over_simm16() nounwind {
176  ; CHECK: loadstore_v8i16_just_over_simm16:
177
178  %1 = alloca <8 x i16>
179  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
180                           ; slot--just over 32768 bytes
181
182  %3 = load volatile <8 x i16>, ptr %1
183  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
184  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
185  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
186  store volatile <8 x i16> %3, ptr %1
187  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
188  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
189  ; CHECK: st.h [[R1]], 0([[BASE]])
190
191  ret void
192  ; CHECK: .size loadstore_v8i16_just_over_simm16
193}
194
195define void @loadstore_v4i32_near() nounwind {
196  ; CHECK: loadstore_v4i32_near:
197
198  %1 = alloca <4 x i32>
199  %2 = load volatile <4 x i32>, ptr %1
200  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0($sp)
201  store volatile <4 x i32> %2, ptr %1
202  ; CHECK: st.w [[R1]], 0($sp)
203
204  ret void
205  ; CHECK: .size loadstore_v4i32_near
206}
207
208define void @loadstore_v4i32_unaligned() nounwind {
209  ; CHECK: loadstore_v4i32_unaligned:
210
211  %1 = alloca [2 x <4 x i32>]
212  %2 = getelementptr i8, ptr %1, i32 1
213
214  %3 = load volatile <4 x i32>, ptr %2
215  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
216  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
217  store volatile <4 x i32> %3, ptr %2
218  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
219  ; CHECK: st.w [[R1]], 0([[BASE]])
220
221  ret void
222  ; CHECK: .size loadstore_v4i32_unaligned
223}
224
225define void @loadstore_v4i32_just_under_simm10() nounwind {
226  ; CHECK: loadstore_v4i32_just_under_simm10:
227
228  %1 = alloca <4 x i32>
229  %2 = alloca [2028 x i8] ; Push the frame--acounting for the emergency spill
230                          ; slot--right up to 2048 bytes
231
232  %3 = load volatile <4 x i32>, ptr %1
233  ; CHECK: ld.w [[R1:\$w[0-9]+]], 2032($sp)
234  store volatile <4 x i32> %3, ptr %1
235  ; CHECK: st.w [[R1]], 2032($sp)
236
237  ret void
238  ; CHECK: .size loadstore_v4i32_just_under_simm10
239}
240
241define void @loadstore_v4i32_just_over_simm10() nounwind {
242  ; CHECK: loadstore_v4i32_just_over_simm10:
243
244  %1 = alloca <4 x i32>
245  %2 = alloca [2033 x i8] ; Push the frame--acounting for the emergency spill
246                          ; slot--just over 2048 bytes
247
248  %3 = load volatile <4 x i32>, ptr %1
249  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
250  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
251  store volatile <4 x i32> %3, ptr %1
252  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
253  ; CHECK: st.w [[R1]], 0([[BASE]])
254
255  ret void
256  ; CHECK: .size loadstore_v4i32_just_over_simm10
257}
258
259define void @loadstore_v4i32_just_under_simm16() nounwind {
260  ; CHECK: loadstore_v4i32_just_under_simm16:
261
262  %1 = alloca <4 x i32>
263  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
264                           ; slot-- right up to 32768 bytes
265
266  %3 = load volatile <4 x i32>, ptr %1
267  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
268  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
269  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
270  store volatile <4 x i32> %3, ptr %1
271  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
272  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
273  ; CHECK: st.w [[R1]], 0([[BASE]])
274
275  ret void
276  ; CHECK: .size loadstore_v4i32_just_under_simm16
277}
278
279define void @loadstore_v4i32_just_over_simm16() nounwind {
280  ; CHECK: loadstore_v4i32_just_over_simm16:
281
282  %1 = alloca <4 x i32>
283  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
284                           ; slot--just over 32768 bytes
285
286  %3 = load volatile <4 x i32>, ptr %1
287  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
288  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
289  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
290  store volatile <4 x i32> %3, ptr %1
291  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
292  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
293  ; CHECK: st.w [[R1]], 0([[BASE]])
294
295  ret void
296  ; CHECK: .size loadstore_v4i32_just_over_simm16
297}
298
299define void @loadstore_v2i64_near() nounwind {
300  ; CHECK: loadstore_v2i64_near:
301
302  %1 = alloca <2 x i64>
303  %2 = load volatile <2 x i64>, ptr %1
304  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0($sp)
305  store volatile <2 x i64> %2, ptr %1
306  ; CHECK: st.d [[R1]], 0($sp)
307
308  ret void
309  ; CHECK: .size loadstore_v2i64_near
310}
311
312define void @loadstore_v2i64_unaligned() nounwind {
313  ; CHECK: loadstore_v2i64_unaligned:
314
315  %1 = alloca [2 x <2 x i64>]
316  %2 = getelementptr i8, ptr %1, i32 1
317
318  %3 = load volatile <2 x i64>, ptr %2
319  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
320  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
321  store volatile <2 x i64> %3, ptr %2
322  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
323  ; CHECK: st.d [[R1]], 0([[BASE]])
324
325  ret void
326  ; CHECK: .size loadstore_v2i64_unaligned
327}
328
329define void @loadstore_v2i64_just_under_simm10() nounwind {
330  ; CHECK: loadstore_v2i64_just_under_simm10:
331
332  %1 = alloca <2 x i64>
333  %2 = alloca [4076 x i8] ; Push the frame--acounting for the emergency spill
334                          ; slot--right up to 4096 bytes
335  %3 = load volatile <2 x i64>, ptr %1
336  ; CHECK: ld.d [[R1:\$w[0-9]+]], 4080($sp)
337  store volatile <2 x i64> %3, ptr %1
338  ; CHECK: st.d [[R1]], 4080($sp)
339
340  ret void
341  ; CHECK: .size loadstore_v2i64_just_under_simm10
342}
343
344define void @loadstore_v2i64_just_over_simm10() nounwind {
345  ; CHECK: loadstore_v2i64_just_over_simm10:
346
347  %1 = alloca <2 x i64>
348  %2 = alloca [4081 x i8] ; Push the frame--acounting for the emergency spill
349                          ; slot--just over 4096 bytes
350
351  %3 = load volatile <2 x i64>, ptr %1
352  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
353  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
354  store volatile <2 x i64> %3, ptr %1
355  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
356  ; CHECK: st.d [[R1]], 0([[BASE]])
357
358  ret void
359  ; CHECK: .size loadstore_v2i64_just_over_simm10
360}
361
362define void @loadstore_v2i64_just_under_simm16() nounwind {
363  ; CHECK: loadstore_v2i64_just_under_simm16:
364
365  %1 = alloca <2 x i64>
366  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
367                           ; slot--right up to 32768 bytes
368
369  %3 = load volatile <2 x i64>, ptr %1
370  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
371  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
372  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
373  store volatile <2 x i64> %3, ptr %1
374  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
375  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
376  ; CHECK: st.d [[R1]], 0([[BASE]])
377
378  ret void
379  ; CHECK: .size loadstore_v2i64_just_under_simm16
380}
381
382define void @loadstore_v2i64_just_over_simm16() nounwind {
383  ; CHECK: loadstore_v2i64_just_over_simm16:
384
385  %1 = alloca <2 x i64>
386  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
387                           ; slot--just over 32768 bytes
388
389  %3 = load volatile <2 x i64>, ptr %1
390  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
391  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
392  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
393  store volatile <2 x i64> %3, ptr %1
394  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
395  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
396  ; CHECK: st.d [[R1]], 0([[BASE]])
397
398  ret void
399  ; CHECK: .size loadstore_v2i64_just_over_simm16
400}
401