xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll (revision 6e1ea7e5a7b6e581bf9a030b98a7f63ee2833278)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-streaming-hazard-size=0 | FileCheck %s --check-prefixes=CHECK
3; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-streaming-hazard-size=0 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT
4
5; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64
6; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
7; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
8; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
9; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-16 x vscale], Type: Variable, Align: 4, Size: 4
10; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 8, Size: 8
11
12define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible" {
13; CHECK-LABEL: csr_d8_allocnxv4i32i32f64:
14; CHECK:       // %bb.0: // %entry
15; CHECK-NEXT:    str d8, [sp, #-16]! // 8-byte Folded Spill
16; CHECK-NEXT:    str x29, [sp, #8] // 8-byte Folded Spill
17; CHECK-NEXT:    sub sp, sp, #16
18; CHECK-NEXT:    addvl sp, sp, #-1
19; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
20; CHECK-NEXT:    .cfi_offset w29, -8
21; CHECK-NEXT:    .cfi_offset b8, -16
22; CHECK-NEXT:    mov z1.s, #0 // =0x0
23; CHECK-NEXT:    ptrue p0.s
24; CHECK-NEXT:    add x8, sp, #16
25; CHECK-NEXT:    mov w0, wzr
26; CHECK-NEXT:    //APP
27; CHECK-NEXT:    //NO_APP
28; CHECK-NEXT:    str wzr, [sp, #12]
29; CHECK-NEXT:    str d0, [sp]
30; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
31; CHECK-NEXT:    addvl sp, sp, #1
32; CHECK-NEXT:    add sp, sp, #16
33; CHECK-NEXT:    ldr x29, [sp, #8] // 8-byte Folded Reload
34; CHECK-NEXT:    ldr d8, [sp], #16 // 8-byte Folded Reload
35; CHECK-NEXT:    ret
36entry:
37  %a = alloca <vscale x 4 x i32>
38  %b = alloca i32
39  %c = alloca double
40  tail call void asm sideeffect "", "~{d8}"() #1
41  store <vscale x 4 x i32> zeroinitializer, ptr %a
42  store i32 zeroinitializer, ptr %b
43  store double %d, ptr %c
44  ret i32 0
45}
46
47; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_fp
48; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
49; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
50; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20], Type: Variable, Align: 4, Size: 4
51; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
52; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
53; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40-16 x vscale], Type: Variable, Align: 8, Size: 8
54
55define i32 @csr_d8_allocnxv4i32i32f64_fp(double %d) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
56; CHECK-LABEL: csr_d8_allocnxv4i32i32f64_fp:
57; CHECK:       // %bb.0: // %entry
58; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
59; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
60; CHECK-NEXT:    add x29, sp, #16
61; CHECK-NEXT:    sub sp, sp, #16
62; CHECK-NEXT:    addvl sp, sp, #-1
63; CHECK-NEXT:    .cfi_def_cfa w29, 16
64; CHECK-NEXT:    .cfi_offset w30, -8
65; CHECK-NEXT:    .cfi_offset w29, -16
66; CHECK-NEXT:    .cfi_offset b8, -32
67; CHECK-NEXT:    mov z1.s, #0 // =0x0
68; CHECK-NEXT:    ptrue p0.s
69; CHECK-NEXT:    addvl x8, sp, #1
70; CHECK-NEXT:    //APP
71; CHECK-NEXT:    //NO_APP
72; CHECK-NEXT:    str wzr, [x8, #28]
73; CHECK-NEXT:    sub x8, x29, #16
74; CHECK-NEXT:    mov w0, wzr
75; CHECK-NEXT:    str d0, [sp, #8]
76; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #-1, mul vl]
77; CHECK-NEXT:    addvl sp, sp, #1
78; CHECK-NEXT:    add sp, sp, #16
79; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
80; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
81; CHECK-NEXT:    ret
82entry:
83  %a = alloca <vscale x 4 x i32>
84  %b = alloca i32
85  %c = alloca double
86  tail call void asm sideeffect "", "~{d8}"() #1
87  store <vscale x 4 x i32> zeroinitializer, ptr %a
88  store i32 zeroinitializer, ptr %b
89  store double %d, ptr %c
90  ret i32 0
91}
92
93; In the presence of dynamic stack-realignment we emit correct offsets for
94; objects which are not realigned. For realigned objects, e.g. the i32 alloca
95; in this test, we emit the correct offset ignoring the re-alignment (i.e. the
96; offset if the alignment requirement is already satisfied).
97
98; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_dynamicrealign
99; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
100; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
101; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Variable, Align: 8, Size: 8
102; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
103; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
104; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128-16 x vscale], Type: Variable, Align: 128, Size: 4
105
106define i32 @csr_d8_allocnxv4i32i32f64_dynamicrealign(double %d) "aarch64_pstate_sm_compatible" {
107; CHECK-LABEL: csr_d8_allocnxv4i32i32f64_dynamicrealign:
108; CHECK:       // %bb.0: // %entry
109; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
110; CHECK-NEXT:    sub x9, sp, #96
111; CHECK-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
112; CHECK-NEXT:    add x29, sp, #16
113; CHECK-NEXT:    addvl x9, x9, #-1
114; CHECK-NEXT:    and sp, x9, #0xffffffffffffff80
115; CHECK-NEXT:    .cfi_def_cfa w29, 16
116; CHECK-NEXT:    .cfi_offset w30, -8
117; CHECK-NEXT:    .cfi_offset w29, -16
118; CHECK-NEXT:    .cfi_offset b8, -32
119; CHECK-NEXT:    mov z1.s, #0 // =0x0
120; CHECK-NEXT:    ptrue p0.s
121; CHECK-NEXT:    sub x8, x29, #16
122; CHECK-NEXT:    mov w0, wzr
123; CHECK-NEXT:    //APP
124; CHECK-NEXT:    //NO_APP
125; CHECK-NEXT:    str wzr, [sp]
126; CHECK-NEXT:    stur d0, [x29, #-8]
127; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #-1, mul vl]
128; CHECK-NEXT:    sub sp, x29, #16
129; CHECK-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
130; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
131; CHECK-NEXT:    ret
132entry:
133  %a = alloca <vscale x 4 x i32>
134  %b = alloca i32, align 128
135  %c = alloca double
136  tail call void asm sideeffect "", "~{d8}"() #1
137  store <vscale x 4 x i32> zeroinitializer, ptr %a
138  store i32 zeroinitializer, ptr %b
139  store double %d, ptr %c
140  ret i32 0
141}
142
143; In the presence of VLA-area objects, we emit correct offsets for all objects
144; except for these VLA objects.
145
146; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_vla
147; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
148; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
149; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
150; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
151; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
152; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40-16 x vscale], Type: Variable, Align: 8, Size: 8
153; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
154; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
155
156define i32 @csr_d8_allocnxv4i32i32f64_vla(double %d, i32 %i) "aarch64_pstate_sm_compatible" {
157; CHECK-LABEL: csr_d8_allocnxv4i32i32f64_vla:
158; CHECK:       // %bb.0: // %entry
159; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
160; CHECK-NEXT:    stp x29, x30, [sp, #8] // 16-byte Folded Spill
161; CHECK-NEXT:    add x29, sp, #8
162; CHECK-NEXT:    str x19, [sp, #24] // 8-byte Folded Spill
163; CHECK-NEXT:    sub sp, sp, #16
164; CHECK-NEXT:    addvl sp, sp, #-1
165; CHECK-NEXT:    mov x19, sp
166; CHECK-NEXT:    .cfi_def_cfa w29, 24
167; CHECK-NEXT:    .cfi_offset w19, -8
168; CHECK-NEXT:    .cfi_offset w30, -16
169; CHECK-NEXT:    .cfi_offset w29, -24
170; CHECK-NEXT:    .cfi_offset b8, -32
171; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
172; CHECK-NEXT:    ubfiz x8, x0, #2, #32
173; CHECK-NEXT:    mov x9, sp
174; CHECK-NEXT:    add x8, x8, #15
175; CHECK-NEXT:    and x8, x8, #0x7fffffff0
176; CHECK-NEXT:    sub x9, x9, x8
177; CHECK-NEXT:    mov sp, x9
178; CHECK-NEXT:    mov x10, sp
179; CHECK-NEXT:    sub x8, x10, x8
180; CHECK-NEXT:    mov sp, x8
181; CHECK-NEXT:    mov z1.s, #0 // =0x0
182; CHECK-NEXT:    ptrue p0.s
183; CHECK-NEXT:    //APP
184; CHECK-NEXT:    //NO_APP
185; CHECK-NEXT:    str wzr, [x8]
186; CHECK-NEXT:    sub x8, x29, #8
187; CHECK-NEXT:    mov w0, wzr
188; CHECK-NEXT:    str wzr, [x9]
189; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #-1, mul vl]
190; CHECK-NEXT:    str d0, [x19, #8]
191; CHECK-NEXT:    sub sp, x29, #8
192; CHECK-NEXT:    ldp x29, x30, [sp, #8] // 16-byte Folded Reload
193; CHECK-NEXT:    ldr x19, [sp, #24] // 8-byte Folded Reload
194; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
195; CHECK-NEXT:    ret
196entry:
197  %a = alloca <vscale x 4 x i32>
198  %0 = zext i32 %i to i64
199  %vla0 = alloca i32, i64 %0
200  %vla1 = alloca i32, i64 %0
201  %c = alloca double
202  tail call void asm sideeffect "", "~{d8}"() #1
203  store <vscale x 4 x i32> zeroinitializer, ptr %a
204  store i32 zeroinitializer, ptr %vla0
205  store i32 zeroinitializer, ptr %vla1
206  store double %d, ptr %c
207  ret i32 0
208}
209
210; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_stackargsi32f64
211; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+8], Type: Fixed, Align: 8, Size: 4
212; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+0], Type: Fixed, Align: 16, Size: 8
213; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
214; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
215; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
216; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-16 x vscale], Type: Variable, Align: 4, Size: 4
217; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 8, Size: 8
218
219define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8) "aarch64_pstate_sm_compatible" {
220; CHECK-LABEL: csr_d8_allocnxv4i32i32f64_stackargsi32f64:
221; CHECK:       // %bb.0: // %entry
222; CHECK-NEXT:    str d8, [sp, #-16]! // 8-byte Folded Spill
223; CHECK-NEXT:    str x29, [sp, #8] // 8-byte Folded Spill
224; CHECK-NEXT:    sub sp, sp, #16
225; CHECK-NEXT:    addvl sp, sp, #-1
226; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
227; CHECK-NEXT:    .cfi_offset w29, -8
228; CHECK-NEXT:    .cfi_offset b8, -16
229; CHECK-NEXT:    mov z1.s, #0 // =0x0
230; CHECK-NEXT:    ptrue p0.s
231; CHECK-NEXT:    add x8, sp, #16
232; CHECK-NEXT:    mov w0, wzr
233; CHECK-NEXT:    //APP
234; CHECK-NEXT:    //NO_APP
235; CHECK-NEXT:    str wzr, [sp, #12]
236; CHECK-NEXT:    str d0, [sp]
237; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
238; CHECK-NEXT:    addvl sp, sp, #1
239; CHECK-NEXT:    add sp, sp, #16
240; CHECK-NEXT:    ldr x29, [sp, #8] // 8-byte Folded Reload
241; CHECK-NEXT:    ldr d8, [sp], #16 // 8-byte Folded Reload
242; CHECK-NEXT:    ret
243entry:
244  %a = alloca <vscale x 4 x i32>
245  %b = alloca i32
246  %c = alloca double
247  tail call void asm sideeffect "", "~{d8}"() #1
248  store <vscale x 4 x i32> zeroinitializer, ptr %a
249  store i32 zeroinitializer, ptr %b
250  store double %d0, ptr %c
251  ret i32 0
252}
253
254; CHECK-FRAMELAYOUT-LABEL: Function: svecc_z8_allocnxv4i32i32f64_fp
255; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
256; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
257; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
258; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
259; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-32 x vscale], Type: Variable, Align: 4, Size: 4
260; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-32 x vscale], Type: Variable, Align: 8, Size: 8
261
262define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
263; CHECK-LABEL: svecc_z8_allocnxv4i32i32f64_fp:
264; CHECK:       // %bb.0: // %entry
265; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
266; CHECK-NEXT:    mov x29, sp
267; CHECK-NEXT:    addvl sp, sp, #-1
268; CHECK-NEXT:    str z8, [sp] // 16-byte Folded Spill
269; CHECK-NEXT:    sub sp, sp, #16
270; CHECK-NEXT:    addvl sp, sp, #-1
271; CHECK-NEXT:    .cfi_def_cfa w29, 16
272; CHECK-NEXT:    .cfi_offset w30, -8
273; CHECK-NEXT:    .cfi_offset w29, -16
274; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
275; CHECK-NEXT:    ptrue p0.s
276; CHECK-NEXT:    mov w0, wzr
277; CHECK-NEXT:    //APP
278; CHECK-NEXT:    //NO_APP
279; CHECK-NEXT:    str wzr, [sp, #12]
280; CHECK-NEXT:    st1w { z1.s }, p0, [x29, #-2, mul vl]
281; CHECK-NEXT:    str d0, [sp], #16
282; CHECK-NEXT:    addvl sp, sp, #1
283; CHECK-NEXT:    ldr z8, [sp] // 16-byte Folded Reload
284; CHECK-NEXT:    addvl sp, sp, #1
285; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
286; CHECK-NEXT:    ret
287entry:
288  %a = alloca <vscale x 4 x i32>
289  %b = alloca i32
290  %c = alloca double
291  tail call void asm sideeffect "", "~{d8}"() #1
292  store <vscale x 4 x i32> %v, ptr %a
293  store i32 zeroinitializer, ptr %b
294  store double %d, ptr %c
295  ret i32 0
296}
297
298; CHECK-FRAMELAYOUT-LABEL: Function: svecc_z8_allocnxv4i32i32f64_stackargsi32_fp
299; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+0], Type: Fixed, Align: 16, Size: 4
300; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
301; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
302; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
303; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
304; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-32 x vscale], Type: Variable, Align: 4, Size: 4
305; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-32 x vscale], Type: Variable, Align: 8, Size: 8
306
307define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, <vscale x 4 x i32> %v) "aarch64_pstate_sm_compatible" "frame-pointer"="all"{
308; CHECK-LABEL: svecc_z8_allocnxv4i32i32f64_stackargsi32_fp:
309; CHECK:       // %bb.0: // %entry
310; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
311; CHECK-NEXT:    mov x29, sp
312; CHECK-NEXT:    addvl sp, sp, #-1
313; CHECK-NEXT:    str z8, [sp] // 16-byte Folded Spill
314; CHECK-NEXT:    sub sp, sp, #16
315; CHECK-NEXT:    addvl sp, sp, #-1
316; CHECK-NEXT:    .cfi_def_cfa w29, 16
317; CHECK-NEXT:    .cfi_offset w30, -8
318; CHECK-NEXT:    .cfi_offset w29, -16
319; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
320; CHECK-NEXT:    ptrue p0.s
321; CHECK-NEXT:    mov w0, wzr
322; CHECK-NEXT:    //APP
323; CHECK-NEXT:    //NO_APP
324; CHECK-NEXT:    str wzr, [sp, #12]
325; CHECK-NEXT:    st1w { z1.s }, p0, [x29, #-2, mul vl]
326; CHECK-NEXT:    str d0, [sp], #16
327; CHECK-NEXT:    addvl sp, sp, #1
328; CHECK-NEXT:    ldr z8, [sp] // 16-byte Folded Reload
329; CHECK-NEXT:    addvl sp, sp, #1
330; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
331; CHECK-NEXT:    ret
332entry:
333  %a = alloca <vscale x 4 x i32>
334  %b = alloca i32
335  %c = alloca double
336  tail call void asm sideeffect "", "~{d8}"() #1
337  store <vscale x 4 x i32> %v, ptr %a
338  store i32 zeroinitializer, ptr %b
339  store double %d, ptr %c
340  ret i32 0
341}
342
343; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call
344; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
345; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
346; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
347; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
348; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40], Type: Spill, Align: 8, Size: 8
349; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 8, Size: 8
350; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
351; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
352; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16
353; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
354; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
355; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
356; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
357; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
358; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
359; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
360; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
361; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
362; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
363; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
364; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
365; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
366; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-258 x vscale], Type: Spill, Align: 2, Size: vscale x 2
367; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-260 x vscale], Type: Spill, Align: 2, Size: vscale x 2
368; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-262 x vscale], Type: Spill, Align: 2, Size: vscale x 2
369; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-264 x vscale], Type: Spill, Align: 2, Size: vscale x 2
370; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-266 x vscale], Type: Spill, Align: 2, Size: vscale x 2
371; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-268 x vscale], Type: Spill, Align: 2, Size: vscale x 2
372; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-270 x vscale], Type: Spill, Align: 2, Size: vscale x 2
373; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-272 x vscale], Type: Spill, Align: 2, Size: vscale x 2
374; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-274 x vscale], Type: Spill, Align: 2, Size: vscale x 2
375; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-276 x vscale], Type: Spill, Align: 2, Size: vscale x 2
376; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-278 x vscale], Type: Spill, Align: 2, Size: vscale x 2
377; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-280 x vscale], Type: Spill, Align: 2, Size: vscale x 2
378
379define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
380; CHECK-LABEL: svecc_call:
381; CHECK:       // %bb.0: // %entry
382; CHECK-NEXT:    stp x29, x30, [sp, #-48]! // 16-byte Folded Spill
383; CHECK-NEXT:    .cfi_def_cfa_offset 48
384; CHECK-NEXT:    cntd x9
385; CHECK-NEXT:    stp x9, x28, [sp, #16] // 16-byte Folded Spill
386; CHECK-NEXT:    stp x27, x19, [sp, #32] // 16-byte Folded Spill
387; CHECK-NEXT:    .cfi_offset w19, -8
388; CHECK-NEXT:    .cfi_offset w27, -16
389; CHECK-NEXT:    .cfi_offset w28, -24
390; CHECK-NEXT:    .cfi_offset w30, -40
391; CHECK-NEXT:    .cfi_offset w29, -48
392; CHECK-NEXT:    addvl sp, sp, #-18
393; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG
394; CHECK-NEXT:    str p15, [sp, #4, mul vl] // 2-byte Folded Spill
395; CHECK-NEXT:    str p14, [sp, #5, mul vl] // 2-byte Folded Spill
396; CHECK-NEXT:    str p13, [sp, #6, mul vl] // 2-byte Folded Spill
397; CHECK-NEXT:    str p12, [sp, #7, mul vl] // 2-byte Folded Spill
398; CHECK-NEXT:    str p11, [sp, #8, mul vl] // 2-byte Folded Spill
399; CHECK-NEXT:    str p10, [sp, #9, mul vl] // 2-byte Folded Spill
400; CHECK-NEXT:    str p9, [sp, #10, mul vl] // 2-byte Folded Spill
401; CHECK-NEXT:    str p8, [sp, #11, mul vl] // 2-byte Folded Spill
402; CHECK-NEXT:    str p7, [sp, #12, mul vl] // 2-byte Folded Spill
403; CHECK-NEXT:    str p6, [sp, #13, mul vl] // 2-byte Folded Spill
404; CHECK-NEXT:    str p5, [sp, #14, mul vl] // 2-byte Folded Spill
405; CHECK-NEXT:    str p4, [sp, #15, mul vl] // 2-byte Folded Spill
406; CHECK-NEXT:    str z23, [sp, #2, mul vl] // 16-byte Folded Spill
407; CHECK-NEXT:    str z22, [sp, #3, mul vl] // 16-byte Folded Spill
408; CHECK-NEXT:    str z21, [sp, #4, mul vl] // 16-byte Folded Spill
409; CHECK-NEXT:    str z20, [sp, #5, mul vl] // 16-byte Folded Spill
410; CHECK-NEXT:    str z19, [sp, #6, mul vl] // 16-byte Folded Spill
411; CHECK-NEXT:    str z18, [sp, #7, mul vl] // 16-byte Folded Spill
412; CHECK-NEXT:    str z17, [sp, #8, mul vl] // 16-byte Folded Spill
413; CHECK-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
414; CHECK-NEXT:    str z15, [sp, #10, mul vl] // 16-byte Folded Spill
415; CHECK-NEXT:    str z14, [sp, #11, mul vl] // 16-byte Folded Spill
416; CHECK-NEXT:    str z13, [sp, #12, mul vl] // 16-byte Folded Spill
417; CHECK-NEXT:    str z12, [sp, #13, mul vl] // 16-byte Folded Spill
418; CHECK-NEXT:    str z11, [sp, #14, mul vl] // 16-byte Folded Spill
419; CHECK-NEXT:    str z10, [sp, #15, mul vl] // 16-byte Folded Spill
420; CHECK-NEXT:    str z9, [sp, #16, mul vl] // 16-byte Folded Spill
421; CHECK-NEXT:    str z8, [sp, #17, mul vl] // 16-byte Folded Spill
422; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG
423; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG
424; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG
425; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG
426; CHECK-NEXT:    .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG
427; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG
428; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG
429; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG
430; CHECK-NEXT:    mov x8, x0
431; CHECK-NEXT:    //APP
432; CHECK-NEXT:    //NO_APP
433; CHECK-NEXT:    bl __arm_sme_state
434; CHECK-NEXT:    and x19, x0, #0x1
435; CHECK-NEXT:    .cfi_offset vg, -32
436; CHECK-NEXT:    tbz w19, #0, .LBB7_2
437; CHECK-NEXT:  // %bb.1: // %entry
438; CHECK-NEXT:    smstop sm
439; CHECK-NEXT:  .LBB7_2: // %entry
440; CHECK-NEXT:    mov x0, x8
441; CHECK-NEXT:    mov w1, #45 // =0x2d
442; CHECK-NEXT:    mov w2, #37 // =0x25
443; CHECK-NEXT:    bl memset
444; CHECK-NEXT:    tbz w19, #0, .LBB7_4
445; CHECK-NEXT:  // %bb.3: // %entry
446; CHECK-NEXT:    smstart sm
447; CHECK-NEXT:  .LBB7_4: // %entry
448; CHECK-NEXT:    mov w0, #22647 // =0x5877
449; CHECK-NEXT:    movk w0, #59491, lsl #16
450; CHECK-NEXT:    .cfi_restore vg
451; CHECK-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
452; CHECK-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
453; CHECK-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
454; CHECK-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
455; CHECK-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
456; CHECK-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
457; CHECK-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
458; CHECK-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
459; CHECK-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
460; CHECK-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
461; CHECK-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
462; CHECK-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
463; CHECK-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
464; CHECK-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
465; CHECK-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
466; CHECK-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
467; CHECK-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
468; CHECK-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
469; CHECK-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
470; CHECK-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
471; CHECK-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
472; CHECK-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
473; CHECK-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
474; CHECK-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
475; CHECK-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
476; CHECK-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
477; CHECK-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
478; CHECK-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
479; CHECK-NEXT:    addvl sp, sp, #18
480; CHECK-NEXT:    .cfi_def_cfa wsp, 48
481; CHECK-NEXT:    .cfi_restore z8
482; CHECK-NEXT:    .cfi_restore z9
483; CHECK-NEXT:    .cfi_restore z10
484; CHECK-NEXT:    .cfi_restore z11
485; CHECK-NEXT:    .cfi_restore z12
486; CHECK-NEXT:    .cfi_restore z13
487; CHECK-NEXT:    .cfi_restore z14
488; CHECK-NEXT:    .cfi_restore z15
489; CHECK-NEXT:    ldp x27, x19, [sp, #32] // 16-byte Folded Reload
490; CHECK-NEXT:    ldr x28, [sp, #24] // 8-byte Folded Reload
491; CHECK-NEXT:    ldp x29, x30, [sp], #48 // 16-byte Folded Reload
492; CHECK-NEXT:    .cfi_def_cfa_offset 0
493; CHECK-NEXT:    .cfi_restore w19
494; CHECK-NEXT:    .cfi_restore w27
495; CHECK-NEXT:    .cfi_restore w28
496; CHECK-NEXT:    .cfi_restore w30
497; CHECK-NEXT:    .cfi_restore w29
498; CHECK-NEXT:    ret
499entry:
500  tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
501  %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
502  ret i32 -396142473
503}
504declare ptr @memset(ptr, i32, i32)
505
506; The VA register currently ends up in VLA space - in the presence of VLA-area
507; objects, we emit correct offsets for all objects except for these VLA objects.
508
509; CHECK-FRAMELAYOUT-LABEL: Function: vastate
510; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
511; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
512; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
513; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40], Type: Spill, Align: 8, Size: 8
514; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 8, Size: 8
515; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8
516; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8
517; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-72], Type: Spill, Align: 8, Size: 8
518; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-80], Type: Spill, Align: 8, Size: 8
519; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-88], Type: Spill, Align: 8, Size: 8
520; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-96], Type: Spill, Align: 8, Size: 8
521; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-104], Type: Spill, Align: 8, Size: 8
522; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-112], Type: Spill, Align: 8, Size: 8
523; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128], Type: Variable, Align: 16, Size: 16
524; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128], Type: VariableSized, Align: 16, Size: 0
525
526define i32 @vastate(i32 %x) "aarch64_inout_za" "aarch64_pstate_sm_enabled" "target-features"="+sme" {
527; CHECK-LABEL: vastate:
528; CHECK:       // %bb.0: // %entry
529; CHECK-NEXT:    stp d15, d14, [sp, #-112]! // 16-byte Folded Spill
530; CHECK-NEXT:    .cfi_def_cfa_offset 112
531; CHECK-NEXT:    cntd x9
532; CHECK-NEXT:    stp d13, d12, [sp, #16] // 16-byte Folded Spill
533; CHECK-NEXT:    stp d11, d10, [sp, #32] // 16-byte Folded Spill
534; CHECK-NEXT:    stp d9, d8, [sp, #48] // 16-byte Folded Spill
535; CHECK-NEXT:    stp x29, x30, [sp, #64] // 16-byte Folded Spill
536; CHECK-NEXT:    str x9, [sp, #80] // 8-byte Folded Spill
537; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
538; CHECK-NEXT:    add x29, sp, #64
539; CHECK-NEXT:    .cfi_def_cfa w29, 48
540; CHECK-NEXT:    .cfi_offset w19, -8
541; CHECK-NEXT:    .cfi_offset w20, -16
542; CHECK-NEXT:    .cfi_offset w30, -40
543; CHECK-NEXT:    .cfi_offset w29, -48
544; CHECK-NEXT:    .cfi_offset b8, -56
545; CHECK-NEXT:    .cfi_offset b9, -64
546; CHECK-NEXT:    .cfi_offset b10, -72
547; CHECK-NEXT:    .cfi_offset b11, -80
548; CHECK-NEXT:    .cfi_offset b12, -88
549; CHECK-NEXT:    .cfi_offset b13, -96
550; CHECK-NEXT:    .cfi_offset b14, -104
551; CHECK-NEXT:    .cfi_offset b15, -112
552; CHECK-NEXT:    sub sp, sp, #16
553; CHECK-NEXT:    rdsvl x8, #1
554; CHECK-NEXT:    mov x9, sp
555; CHECK-NEXT:    mov w20, w0
556; CHECK-NEXT:    msub x9, x8, x8, x9
557; CHECK-NEXT:    mov sp, x9
558; CHECK-NEXT:    stur x9, [x29, #-80]
559; CHECK-NEXT:    sub x9, x29, #80
560; CHECK-NEXT:    sturh wzr, [x29, #-70]
561; CHECK-NEXT:    stur wzr, [x29, #-68]
562; CHECK-NEXT:    sturh w8, [x29, #-72]
563; CHECK-NEXT:    msr TPIDR2_EL0, x9
564; CHECK-NEXT:    .cfi_offset vg, -32
565; CHECK-NEXT:    smstop sm
566; CHECK-NEXT:    bl other
567; CHECK-NEXT:    smstart sm
568; CHECK-NEXT:    .cfi_restore vg
569; CHECK-NEXT:    smstart za
570; CHECK-NEXT:    mrs x8, TPIDR2_EL0
571; CHECK-NEXT:    sub x0, x29, #80
572; CHECK-NEXT:    cbnz x8, .LBB8_2
573; CHECK-NEXT:  // %bb.1: // %entry
574; CHECK-NEXT:    bl __arm_tpidr2_restore
575; CHECK-NEXT:  .LBB8_2: // %entry
576; CHECK-NEXT:    mov w0, w20
577; CHECK-NEXT:    msr TPIDR2_EL0, xzr
578; CHECK-NEXT:    sub sp, x29, #64
579; CHECK-NEXT:    .cfi_def_cfa wsp, 112
580; CHECK-NEXT:    ldp x20, x19, [sp, #96] // 16-byte Folded Reload
581; CHECK-NEXT:    ldp x29, x30, [sp, #64] // 16-byte Folded Reload
582; CHECK-NEXT:    ldp d9, d8, [sp, #48] // 16-byte Folded Reload
583; CHECK-NEXT:    ldp d11, d10, [sp, #32] // 16-byte Folded Reload
584; CHECK-NEXT:    ldp d13, d12, [sp, #16] // 16-byte Folded Reload
585; CHECK-NEXT:    ldp d15, d14, [sp], #112 // 16-byte Folded Reload
586; CHECK-NEXT:    .cfi_def_cfa_offset 0
587; CHECK-NEXT:    .cfi_restore w19
588; CHECK-NEXT:    .cfi_restore w20
589; CHECK-NEXT:    .cfi_restore w30
590; CHECK-NEXT:    .cfi_restore w29
591; CHECK-NEXT:    .cfi_restore b8
592; CHECK-NEXT:    .cfi_restore b9
593; CHECK-NEXT:    .cfi_restore b10
594; CHECK-NEXT:    .cfi_restore b11
595; CHECK-NEXT:    .cfi_restore b12
596; CHECK-NEXT:    .cfi_restore b13
597; CHECK-NEXT:    .cfi_restore b14
598; CHECK-NEXT:    .cfi_restore b15
599; CHECK-NEXT:    ret
600entry:
601  tail call void @other()
602  ret i32 %x
603}
604declare void @other()
605