xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
8  <vscale x 1 x half>,
9  half,
10  iXLen);
11
12define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind {
13; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
16; CHECK-NEXT:    vfmv.v.f v8, fa0
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
20    <vscale x 1 x half> undef,
21    half %0,
22    iXLen %1)
23
24  ret <vscale x 1 x half> %a
25}
26
27declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
28  <vscale x 2 x half>,
29  half,
30  iXLen);
31
32define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind {
33; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
36; CHECK-NEXT:    vfmv.v.f v8, fa0
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
40    <vscale x 2 x half> undef,
41    half %0,
42    iXLen %1)
43
44  ret <vscale x 2 x half> %a
45}
46
47declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
48  <vscale x 4 x half>,
49  half,
50  iXLen);
51
52define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind {
53; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
56; CHECK-NEXT:    vfmv.v.f v8, fa0
57; CHECK-NEXT:    ret
58entry:
59  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
60    <vscale x 4 x half> undef,
61    half %0,
62    iXLen %1)
63
64  ret <vscale x 4 x half> %a
65}
66
67declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
68  <vscale x 8 x half>,
69  half,
70  iXLen);
71
72define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind {
73; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16:
74; CHECK:       # %bb.0: # %entry
75; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
76; CHECK-NEXT:    vfmv.v.f v8, fa0
77; CHECK-NEXT:    ret
78entry:
79  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
80    <vscale x 8 x half> undef,
81    half %0,
82    iXLen %1)
83
84  ret <vscale x 8 x half> %a
85}
86
87declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
88  <vscale x 16 x half>,
89  half,
90  iXLen);
91
92define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind {
93; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16:
94; CHECK:       # %bb.0: # %entry
95; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
96; CHECK-NEXT:    vfmv.v.f v8, fa0
97; CHECK-NEXT:    ret
98entry:
99  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
100    <vscale x 16 x half> undef,
101    half %0,
102    iXLen %1)
103
104  ret <vscale x 16 x half> %a
105}
106
107declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
108  <vscale x 32 x half>,
109  half,
110  iXLen);
111
112define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind {
113; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
116; CHECK-NEXT:    vfmv.v.f v8, fa0
117; CHECK-NEXT:    ret
118entry:
119  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
120    <vscale x 32 x half> undef,
121    half %0,
122    iXLen %1)
123
124  ret <vscale x 32 x half> %a
125}
126
127declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
128  <vscale x 1 x float>,
129  float,
130  iXLen);
131
132define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind {
133; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
134; CHECK:       # %bb.0: # %entry
135; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
136; CHECK-NEXT:    vfmv.v.f v8, fa0
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
140    <vscale x 1 x float> undef,
141    float %0,
142    iXLen %1)
143
144  ret <vscale x 1 x float> %a
145}
146
147declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
148  <vscale x 2 x float>,
149  float,
150  iXLen);
151
152define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind {
153; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
156; CHECK-NEXT:    vfmv.v.f v8, fa0
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
160    <vscale x 2 x float> undef,
161    float %0,
162    iXLen %1)
163
164  ret <vscale x 2 x float> %a
165}
166
167declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
168  <vscale x 4 x float>,
169  float,
170  iXLen);
171
172define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind {
173; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32:
174; CHECK:       # %bb.0: # %entry
175; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
176; CHECK-NEXT:    vfmv.v.f v8, fa0
177; CHECK-NEXT:    ret
178entry:
179  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
180    <vscale x 4 x float> undef,
181    float %0,
182    iXLen %1)
183
184  ret <vscale x 4 x float> %a
185}
186
187declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
188  <vscale x 8 x float>,
189  float,
190  iXLen);
191
192define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind {
193; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32:
194; CHECK:       # %bb.0: # %entry
195; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
196; CHECK-NEXT:    vfmv.v.f v8, fa0
197; CHECK-NEXT:    ret
198entry:
199  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
200    <vscale x 8 x float> undef,
201    float %0,
202    iXLen %1)
203
204  ret <vscale x 8 x float> %a
205}
206
207declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
208  <vscale x 16 x float>,
209  float,
210  iXLen);
211
212define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind {
213; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32:
214; CHECK:       # %bb.0: # %entry
215; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
216; CHECK-NEXT:    vfmv.v.f v8, fa0
217; CHECK-NEXT:    ret
218entry:
219  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
220    <vscale x 16 x float> undef,
221    float %0,
222    iXLen %1)
223
224  ret <vscale x 16 x float> %a
225}
226
227declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
228  <vscale x 1 x double>,
229  double,
230  iXLen);
231
232define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind {
233; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64:
234; CHECK:       # %bb.0: # %entry
235; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
236; CHECK-NEXT:    vfmv.v.f v8, fa0
237; CHECK-NEXT:    ret
238entry:
239  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
240    <vscale x 1 x double> undef,
241    double %0,
242    iXLen %1)
243
244  ret <vscale x 1 x double> %a
245}
246
247declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
248  <vscale x 2 x double>,
249  double,
250  iXLen);
251
252define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind {
253; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
256; CHECK-NEXT:    vfmv.v.f v8, fa0
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
260    <vscale x 2 x double> undef,
261    double %0,
262    iXLen %1)
263
264  ret <vscale x 2 x double> %a
265}
266
267declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
268  <vscale x 4 x double>,
269  double,
270  iXLen);
271
272define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind {
273; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
276; CHECK-NEXT:    vfmv.v.f v8, fa0
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
280    <vscale x 4 x double> undef,
281    double %0,
282    iXLen %1)
283
284  ret <vscale x 4 x double> %a
285}
286
287declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
288  <vscale x 8 x double>,
289  double,
290  iXLen);
291
292define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind {
293; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64:
294; CHECK:       # %bb.0: # %entry
295; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
296; CHECK-NEXT:    vfmv.v.f v8, fa0
297; CHECK-NEXT:    ret
298entry:
299  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
300    <vscale x 8 x double> undef,
301    double %0,
302    iXLen %1)
303
304  ret <vscale x 8 x double> %a
305}
306
307define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind {
308; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16:
309; CHECK:       # %bb.0: # %entry
310; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
311; CHECK-NEXT:    vmv.v.i v8, 0
312; CHECK-NEXT:    ret
313entry:
314  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
315    <vscale x 1 x half> undef,
316    half 0.0,
317    iXLen %0)
318
319  ret <vscale x 1 x half> %a
320}
321
322define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind {
323; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16:
324; CHECK:       # %bb.0: # %entry
325; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
326; CHECK-NEXT:    vmv.v.i v8, 0
327; CHECK-NEXT:    ret
328entry:
329  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
330    <vscale x 2 x half> undef,
331    half 0.0,
332    iXLen %0)
333
334  ret <vscale x 2 x half> %a
335}
336
337define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind {
338; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16:
339; CHECK:       # %bb.0: # %entry
340; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
341; CHECK-NEXT:    vmv.v.i v8, 0
342; CHECK-NEXT:    ret
343entry:
344  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
345    <vscale x 4 x half> undef,
346    half 0.0,
347    iXLen %0)
348
349  ret <vscale x 4 x half> %a
350}
351
352define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind {
353; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16:
354; CHECK:       # %bb.0: # %entry
355; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
356; CHECK-NEXT:    vmv.v.i v8, 0
357; CHECK-NEXT:    ret
358entry:
359  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
360    <vscale x 8 x half> undef,
361    half 0.0,
362    iXLen %0)
363
364  ret <vscale x 8 x half> %a
365}
366
367define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind {
368; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16:
369; CHECK:       # %bb.0: # %entry
370; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
371; CHECK-NEXT:    vmv.v.i v8, 0
372; CHECK-NEXT:    ret
373entry:
374  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
375    <vscale x 16 x half> undef,
376    half 0.0,
377    iXLen %0)
378
379  ret <vscale x 16 x half> %a
380}
381
382define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind {
383; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16:
384; CHECK:       # %bb.0: # %entry
385; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
386; CHECK-NEXT:    vmv.v.i v8, 0
387; CHECK-NEXT:    ret
388entry:
389  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
390    <vscale x 32 x half> undef,
391    half 0.0,
392    iXLen %0)
393
394  ret <vscale x 32 x half> %a
395}
396
397define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind {
398; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
401; CHECK-NEXT:    vmv.v.i v8, 0
402; CHECK-NEXT:    ret
403entry:
404  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
405    <vscale x 1 x float> undef,
406    float 0.0,
407    iXLen %0)
408
409  ret <vscale x 1 x float> %a
410}
411
412define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind {
413; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32:
414; CHECK:       # %bb.0: # %entry
415; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
416; CHECK-NEXT:    vmv.v.i v8, 0
417; CHECK-NEXT:    ret
418entry:
419  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
420    <vscale x 2 x float> undef,
421    float 0.0,
422    iXLen %0)
423
424  ret <vscale x 2 x float> %a
425}
426
427define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind {
428; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
431; CHECK-NEXT:    vmv.v.i v8, 0
432; CHECK-NEXT:    ret
433entry:
434  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
435    <vscale x 4 x float> undef,
436    float 0.0,
437    iXLen %0)
438
439  ret <vscale x 4 x float> %a
440}
441
442define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind {
443; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
446; CHECK-NEXT:    vmv.v.i v8, 0
447; CHECK-NEXT:    ret
448entry:
449  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
450    <vscale x 8 x float> undef,
451    float 0.0,
452    iXLen %0)
453
454  ret <vscale x 8 x float> %a
455}
456
457define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind {
458; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32:
459; CHECK:       # %bb.0: # %entry
460; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
461; CHECK-NEXT:    vmv.v.i v8, 0
462; CHECK-NEXT:    ret
463entry:
464  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
465    <vscale x 16 x float> undef,
466    float 0.0,
467    iXLen %0)
468
469  ret <vscale x 16 x float> %a
470}
471
472define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind {
473; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
476; CHECK-NEXT:    vmv.v.i v8, 0
477; CHECK-NEXT:    ret
478entry:
479  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
480    <vscale x 1 x double> undef,
481    double 0.0,
482    iXLen %0)
483
484  ret <vscale x 1 x double> %a
485}
486
487define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind {
488; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64:
489; CHECK:       # %bb.0: # %entry
490; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
491; CHECK-NEXT:    vmv.v.i v8, 0
492; CHECK-NEXT:    ret
493entry:
494  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
495    <vscale x 2 x double> undef,
496    double 0.0,
497    iXLen %0)
498
499  ret <vscale x 2 x double> %a
500}
501
502define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind {
503; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64:
504; CHECK:       # %bb.0: # %entry
505; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
506; CHECK-NEXT:    vmv.v.i v8, 0
507; CHECK-NEXT:    ret
508entry:
509  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
510    <vscale x 4 x double> undef,
511    double 0.0,
512    iXLen %0)
513
514  ret <vscale x 4 x double> %a
515}
516
517define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind {
518; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64:
519; CHECK:       # %bb.0: # %entry
520; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
521; CHECK-NEXT:    vmv.v.i v8, 0
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
525    <vscale x 8 x double> undef,
526    double 0.0,
527    iXLen %0)
528
529  ret <vscale x 8 x double> %a
530}
531