xref: /llvm-project/llvm/test/CodeGen/RISCV/llvm.exp10.ll (revision dae9cf3816bbb2b4589d258a82e6ac90fad71485)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -mattr=+d \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d < %s \
4; RUN:   | FileCheck -check-prefixes=CHECK,RV32IFD %s
5; RUN: llc -mtriple=riscv64 -mattr=+d \
6; RUN:   -verify-machineinstrs -target-abi=lp64d < %s \
7; RUN:   | FileCheck -check-prefixes=CHECK,RV64IFD %s
8
9declare <1 x half> @llvm.exp10.v1f16(<1 x half>)
10declare <2 x half> @llvm.exp10.v2f16(<2 x half>)
11declare <3 x half> @llvm.exp10.v3f16(<3 x half>)
12declare <4 x half> @llvm.exp10.v4f16(<4 x half>)
13declare <1 x float> @llvm.exp10.v1f32(<1 x float>)
14declare <2 x float> @llvm.exp10.v2f32(<2 x float>)
15declare <3 x float> @llvm.exp10.v3f32(<3 x float>)
16declare <4 x float> @llvm.exp10.v4f32(<4 x float>)
17declare <1 x double> @llvm.exp10.v1f64(<1 x double>)
18declare <2 x double> @llvm.exp10.v2f64(<2 x double>)
19declare <3 x double> @llvm.exp10.v3f64(<3 x double>)
20declare <4 x double> @llvm.exp10.v4f64(<4 x double>)
21
22define <1 x half> @exp10_v1f16(<1 x half> %x) {
23; RV32IFD-LABEL: exp10_v1f16:
24; RV32IFD:       # %bb.0:
25; RV32IFD-NEXT:    addi sp, sp, -16
26; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
27; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
28; RV32IFD-NEXT:    .cfi_offset ra, -4
29; RV32IFD-NEXT:    fmv.w.x fa0, a0
30; RV32IFD-NEXT:    call __extendhfsf2
31; RV32IFD-NEXT:    call exp10f
32; RV32IFD-NEXT:    call __truncsfhf2
33; RV32IFD-NEXT:    fmv.x.w a0, fa0
34; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
35; RV32IFD-NEXT:    .cfi_restore ra
36; RV32IFD-NEXT:    addi sp, sp, 16
37; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
38; RV32IFD-NEXT:    ret
39;
40; RV64IFD-LABEL: exp10_v1f16:
41; RV64IFD:       # %bb.0:
42; RV64IFD-NEXT:    addi sp, sp, -16
43; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
44; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
45; RV64IFD-NEXT:    .cfi_offset ra, -8
46; RV64IFD-NEXT:    fmv.w.x fa0, a0
47; RV64IFD-NEXT:    call __extendhfsf2
48; RV64IFD-NEXT:    call exp10f
49; RV64IFD-NEXT:    call __truncsfhf2
50; RV64IFD-NEXT:    fmv.x.w a0, fa0
51; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
52; RV64IFD-NEXT:    .cfi_restore ra
53; RV64IFD-NEXT:    addi sp, sp, 16
54; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
55; RV64IFD-NEXT:    ret
56  %r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x)
57  ret <1 x half> %r
58}
59
60define <2 x half> @exp10_v2f16(<2 x half> %x) {
61; RV32IFD-LABEL: exp10_v2f16:
62; RV32IFD:       # %bb.0:
63; RV32IFD-NEXT:    addi sp, sp, -16
64; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
65; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
66; RV32IFD-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
67; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
68; RV32IFD-NEXT:    .cfi_offset ra, -4
69; RV32IFD-NEXT:    .cfi_offset s0, -8
70; RV32IFD-NEXT:    .cfi_offset fs0, -16
71; RV32IFD-NEXT:    fmv.w.x fs0, a1
72; RV32IFD-NEXT:    fmv.w.x fa0, a0
73; RV32IFD-NEXT:    call __extendhfsf2
74; RV32IFD-NEXT:    call exp10f
75; RV32IFD-NEXT:    call __truncsfhf2
76; RV32IFD-NEXT:    fmv.x.w s0, fa0
77; RV32IFD-NEXT:    fmv.s fa0, fs0
78; RV32IFD-NEXT:    call __extendhfsf2
79; RV32IFD-NEXT:    call exp10f
80; RV32IFD-NEXT:    call __truncsfhf2
81; RV32IFD-NEXT:    fmv.x.w a1, fa0
82; RV32IFD-NEXT:    mv a0, s0
83; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
84; RV32IFD-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
85; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
86; RV32IFD-NEXT:    .cfi_restore ra
87; RV32IFD-NEXT:    .cfi_restore s0
88; RV32IFD-NEXT:    .cfi_restore fs0
89; RV32IFD-NEXT:    addi sp, sp, 16
90; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
91; RV32IFD-NEXT:    ret
92;
93; RV64IFD-LABEL: exp10_v2f16:
94; RV64IFD:       # %bb.0:
95; RV64IFD-NEXT:    addi sp, sp, -32
96; RV64IFD-NEXT:    .cfi_def_cfa_offset 32
97; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
98; RV64IFD-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
99; RV64IFD-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
100; RV64IFD-NEXT:    .cfi_offset ra, -8
101; RV64IFD-NEXT:    .cfi_offset s0, -16
102; RV64IFD-NEXT:    .cfi_offset s1, -24
103; RV64IFD-NEXT:    mv s0, a1
104; RV64IFD-NEXT:    fmv.w.x fa0, a0
105; RV64IFD-NEXT:    call __extendhfsf2
106; RV64IFD-NEXT:    call exp10f
107; RV64IFD-NEXT:    call __truncsfhf2
108; RV64IFD-NEXT:    fmv.x.w s1, fa0
109; RV64IFD-NEXT:    fmv.w.x fa0, s0
110; RV64IFD-NEXT:    call __extendhfsf2
111; RV64IFD-NEXT:    call exp10f
112; RV64IFD-NEXT:    call __truncsfhf2
113; RV64IFD-NEXT:    fmv.x.w a1, fa0
114; RV64IFD-NEXT:    mv a0, s1
115; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
116; RV64IFD-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
117; RV64IFD-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
118; RV64IFD-NEXT:    .cfi_restore ra
119; RV64IFD-NEXT:    .cfi_restore s0
120; RV64IFD-NEXT:    .cfi_restore s1
121; RV64IFD-NEXT:    addi sp, sp, 32
122; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
123; RV64IFD-NEXT:    ret
124  %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
125  ret <2 x half> %r
126}
127
128define <3 x half> @exp10_v3f16(<3 x half> %x) {
129; RV32IFD-LABEL: exp10_v3f16:
130; RV32IFD:       # %bb.0:
131; RV32IFD-NEXT:    addi sp, sp, -48
132; RV32IFD-NEXT:    .cfi_def_cfa_offset 48
133; RV32IFD-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
134; RV32IFD-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
135; RV32IFD-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
136; RV32IFD-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
137; RV32IFD-NEXT:    fsd fs1, 16(sp) # 8-byte Folded Spill
138; RV32IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
139; RV32IFD-NEXT:    .cfi_offset ra, -4
140; RV32IFD-NEXT:    .cfi_offset s0, -8
141; RV32IFD-NEXT:    .cfi_offset s1, -12
142; RV32IFD-NEXT:    .cfi_offset fs0, -24
143; RV32IFD-NEXT:    .cfi_offset fs1, -32
144; RV32IFD-NEXT:    .cfi_offset fs2, -40
145; RV32IFD-NEXT:    mv s0, a0
146; RV32IFD-NEXT:    lhu a0, 8(a1)
147; RV32IFD-NEXT:    lhu a2, 0(a1)
148; RV32IFD-NEXT:    lhu a1, 4(a1)
149; RV32IFD-NEXT:    fmv.w.x fs0, a0
150; RV32IFD-NEXT:    fmv.w.x fs1, a2
151; RV32IFD-NEXT:    fmv.w.x fa0, a1
152; RV32IFD-NEXT:    call __extendhfsf2
153; RV32IFD-NEXT:    call exp10f
154; RV32IFD-NEXT:    call __truncsfhf2
155; RV32IFD-NEXT:    fmv.s fs2, fa0
156; RV32IFD-NEXT:    fmv.s fa0, fs1
157; RV32IFD-NEXT:    call __extendhfsf2
158; RV32IFD-NEXT:    call exp10f
159; RV32IFD-NEXT:    fmv.x.w a0, fs2
160; RV32IFD-NEXT:    slli s1, a0, 16
161; RV32IFD-NEXT:    call __truncsfhf2
162; RV32IFD-NEXT:    fmv.x.w a0, fa0
163; RV32IFD-NEXT:    slli a0, a0, 16
164; RV32IFD-NEXT:    srli a0, a0, 16
165; RV32IFD-NEXT:    or s1, a0, s1
166; RV32IFD-NEXT:    fmv.s fa0, fs0
167; RV32IFD-NEXT:    call __extendhfsf2
168; RV32IFD-NEXT:    call exp10f
169; RV32IFD-NEXT:    call __truncsfhf2
170; RV32IFD-NEXT:    fmv.x.w a0, fa0
171; RV32IFD-NEXT:    sw s1, 0(s0)
172; RV32IFD-NEXT:    sh a0, 4(s0)
173; RV32IFD-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
174; RV32IFD-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
175; RV32IFD-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
176; RV32IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
177; RV32IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
178; RV32IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
179; RV32IFD-NEXT:    .cfi_restore ra
180; RV32IFD-NEXT:    .cfi_restore s0
181; RV32IFD-NEXT:    .cfi_restore s1
182; RV32IFD-NEXT:    .cfi_restore fs0
183; RV32IFD-NEXT:    .cfi_restore fs1
184; RV32IFD-NEXT:    .cfi_restore fs2
185; RV32IFD-NEXT:    addi sp, sp, 48
186; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
187; RV32IFD-NEXT:    ret
188;
189; RV64IFD-LABEL: exp10_v3f16:
190; RV64IFD:       # %bb.0:
191; RV64IFD-NEXT:    addi sp, sp, -48
192; RV64IFD-NEXT:    .cfi_def_cfa_offset 48
193; RV64IFD-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
194; RV64IFD-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
195; RV64IFD-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
196; RV64IFD-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
197; RV64IFD-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
198; RV64IFD-NEXT:    .cfi_offset ra, -8
199; RV64IFD-NEXT:    .cfi_offset s0, -16
200; RV64IFD-NEXT:    .cfi_offset s1, -24
201; RV64IFD-NEXT:    .cfi_offset s2, -32
202; RV64IFD-NEXT:    .cfi_offset fs0, -40
203; RV64IFD-NEXT:    lhu s1, 0(a1)
204; RV64IFD-NEXT:    lhu a2, 8(a1)
205; RV64IFD-NEXT:    lhu s2, 16(a1)
206; RV64IFD-NEXT:    mv s0, a0
207; RV64IFD-NEXT:    fmv.w.x fa0, a2
208; RV64IFD-NEXT:    call __extendhfsf2
209; RV64IFD-NEXT:    call exp10f
210; RV64IFD-NEXT:    call __truncsfhf2
211; RV64IFD-NEXT:    fmv.s fs0, fa0
212; RV64IFD-NEXT:    fmv.w.x fa0, s1
213; RV64IFD-NEXT:    call __extendhfsf2
214; RV64IFD-NEXT:    call exp10f
215; RV64IFD-NEXT:    fmv.x.w a0, fs0
216; RV64IFD-NEXT:    slli s1, a0, 16
217; RV64IFD-NEXT:    call __truncsfhf2
218; RV64IFD-NEXT:    fmv.x.w a0, fa0
219; RV64IFD-NEXT:    slli a0, a0, 48
220; RV64IFD-NEXT:    srli a0, a0, 48
221; RV64IFD-NEXT:    or s1, a0, s1
222; RV64IFD-NEXT:    fmv.w.x fa0, s2
223; RV64IFD-NEXT:    call __extendhfsf2
224; RV64IFD-NEXT:    call exp10f
225; RV64IFD-NEXT:    call __truncsfhf2
226; RV64IFD-NEXT:    fmv.x.w a0, fa0
227; RV64IFD-NEXT:    sw s1, 0(s0)
228; RV64IFD-NEXT:    sh a0, 4(s0)
229; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
230; RV64IFD-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
231; RV64IFD-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
232; RV64IFD-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
233; RV64IFD-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
234; RV64IFD-NEXT:    .cfi_restore ra
235; RV64IFD-NEXT:    .cfi_restore s0
236; RV64IFD-NEXT:    .cfi_restore s1
237; RV64IFD-NEXT:    .cfi_restore s2
238; RV64IFD-NEXT:    .cfi_restore fs0
239; RV64IFD-NEXT:    addi sp, sp, 48
240; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
241; RV64IFD-NEXT:    ret
242  %r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x)
243  ret <3 x half> %r
244}
245
246define <4 x half> @exp10_v4f16(<4 x half> %x) {
247; RV32IFD-LABEL: exp10_v4f16:
248; RV32IFD:       # %bb.0:
249; RV32IFD-NEXT:    addi sp, sp, -64
250; RV32IFD-NEXT:    .cfi_def_cfa_offset 64
251; RV32IFD-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
252; RV32IFD-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
253; RV32IFD-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
254; RV32IFD-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
255; RV32IFD-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
256; RV32IFD-NEXT:    fsd fs0, 32(sp) # 8-byte Folded Spill
257; RV32IFD-NEXT:    fsd fs1, 24(sp) # 8-byte Folded Spill
258; RV32IFD-NEXT:    fsd fs2, 16(sp) # 8-byte Folded Spill
259; RV32IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
260; RV32IFD-NEXT:    .cfi_offset ra, -4
261; RV32IFD-NEXT:    .cfi_offset s0, -8
262; RV32IFD-NEXT:    .cfi_offset s1, -12
263; RV32IFD-NEXT:    .cfi_offset s2, -16
264; RV32IFD-NEXT:    .cfi_offset s3, -20
265; RV32IFD-NEXT:    .cfi_offset fs0, -32
266; RV32IFD-NEXT:    .cfi_offset fs1, -40
267; RV32IFD-NEXT:    .cfi_offset fs2, -48
268; RV32IFD-NEXT:    .cfi_offset fs3, -56
269; RV32IFD-NEXT:    mv s0, a0
270; RV32IFD-NEXT:    lhu a0, 12(a1)
271; RV32IFD-NEXT:    lhu a2, 0(a1)
272; RV32IFD-NEXT:    lhu a3, 4(a1)
273; RV32IFD-NEXT:    lhu a1, 8(a1)
274; RV32IFD-NEXT:    fmv.w.x fs0, a0
275; RV32IFD-NEXT:    fmv.w.x fs1, a2
276; RV32IFD-NEXT:    fmv.w.x fs2, a3
277; RV32IFD-NEXT:    fmv.w.x fa0, a1
278; RV32IFD-NEXT:    call __extendhfsf2
279; RV32IFD-NEXT:    call exp10f
280; RV32IFD-NEXT:    call __truncsfhf2
281; RV32IFD-NEXT:    fmv.s fs3, fa0
282; RV32IFD-NEXT:    fmv.s fa0, fs2
283; RV32IFD-NEXT:    call __extendhfsf2
284; RV32IFD-NEXT:    call exp10f
285; RV32IFD-NEXT:    call __truncsfhf2
286; RV32IFD-NEXT:    fmv.s fs2, fa0
287; RV32IFD-NEXT:    fmv.s fa0, fs1
288; RV32IFD-NEXT:    call __extendhfsf2
289; RV32IFD-NEXT:    call exp10f
290; RV32IFD-NEXT:    call __truncsfhf2
291; RV32IFD-NEXT:    fmv.s fs1, fa0
292; RV32IFD-NEXT:    fmv.s fa0, fs0
293; RV32IFD-NEXT:    call __extendhfsf2
294; RV32IFD-NEXT:    call exp10f
295; RV32IFD-NEXT:    fmv.x.w s1, fs1
296; RV32IFD-NEXT:    fmv.x.w s2, fs2
297; RV32IFD-NEXT:    fmv.x.w s3, fs3
298; RV32IFD-NEXT:    call __truncsfhf2
299; RV32IFD-NEXT:    fmv.x.w a0, fa0
300; RV32IFD-NEXT:    sh s1, 0(s0)
301; RV32IFD-NEXT:    sh s2, 2(s0)
302; RV32IFD-NEXT:    sh s3, 4(s0)
303; RV32IFD-NEXT:    sh a0, 6(s0)
304; RV32IFD-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
305; RV32IFD-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
306; RV32IFD-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
307; RV32IFD-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
308; RV32IFD-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
309; RV32IFD-NEXT:    fld fs0, 32(sp) # 8-byte Folded Reload
310; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
311; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
312; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
313; RV32IFD-NEXT:    .cfi_restore ra
314; RV32IFD-NEXT:    .cfi_restore s0
315; RV32IFD-NEXT:    .cfi_restore s1
316; RV32IFD-NEXT:    .cfi_restore s2
317; RV32IFD-NEXT:    .cfi_restore s3
318; RV32IFD-NEXT:    .cfi_restore fs0
319; RV32IFD-NEXT:    .cfi_restore fs1
320; RV32IFD-NEXT:    .cfi_restore fs2
321; RV32IFD-NEXT:    .cfi_restore fs3
322; RV32IFD-NEXT:    addi sp, sp, 64
323; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
324; RV32IFD-NEXT:    ret
325;
326; RV64IFD-LABEL: exp10_v4f16:
327; RV64IFD:       # %bb.0:
328; RV64IFD-NEXT:    addi sp, sp, -64
329; RV64IFD-NEXT:    .cfi_def_cfa_offset 64
330; RV64IFD-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
331; RV64IFD-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
332; RV64IFD-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
333; RV64IFD-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
334; RV64IFD-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
335; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
336; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
337; RV64IFD-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
338; RV64IFD-NEXT:    .cfi_offset ra, -8
339; RV64IFD-NEXT:    .cfi_offset s0, -16
340; RV64IFD-NEXT:    .cfi_offset s1, -24
341; RV64IFD-NEXT:    .cfi_offset s2, -32
342; RV64IFD-NEXT:    .cfi_offset s3, -40
343; RV64IFD-NEXT:    .cfi_offset fs0, -48
344; RV64IFD-NEXT:    .cfi_offset fs1, -56
345; RV64IFD-NEXT:    .cfi_offset fs2, -64
346; RV64IFD-NEXT:    lhu s1, 0(a1)
347; RV64IFD-NEXT:    lhu s2, 8(a1)
348; RV64IFD-NEXT:    lhu a2, 16(a1)
349; RV64IFD-NEXT:    lhu s3, 24(a1)
350; RV64IFD-NEXT:    mv s0, a0
351; RV64IFD-NEXT:    fmv.w.x fa0, a2
352; RV64IFD-NEXT:    call __extendhfsf2
353; RV64IFD-NEXT:    call exp10f
354; RV64IFD-NEXT:    call __truncsfhf2
355; RV64IFD-NEXT:    fmv.s fs0, fa0
356; RV64IFD-NEXT:    fmv.w.x fa0, s2
357; RV64IFD-NEXT:    call __extendhfsf2
358; RV64IFD-NEXT:    call exp10f
359; RV64IFD-NEXT:    call __truncsfhf2
360; RV64IFD-NEXT:    fmv.s fs1, fa0
361; RV64IFD-NEXT:    fmv.w.x fa0, s1
362; RV64IFD-NEXT:    call __extendhfsf2
363; RV64IFD-NEXT:    call exp10f
364; RV64IFD-NEXT:    call __truncsfhf2
365; RV64IFD-NEXT:    fmv.s fs2, fa0
366; RV64IFD-NEXT:    fmv.w.x fa0, s3
367; RV64IFD-NEXT:    call __extendhfsf2
368; RV64IFD-NEXT:    call exp10f
369; RV64IFD-NEXT:    fmv.x.w s1, fs2
370; RV64IFD-NEXT:    fmv.x.w s2, fs1
371; RV64IFD-NEXT:    fmv.x.w s3, fs0
372; RV64IFD-NEXT:    call __truncsfhf2
373; RV64IFD-NEXT:    fmv.x.w a0, fa0
374; RV64IFD-NEXT:    sh s1, 0(s0)
375; RV64IFD-NEXT:    sh s2, 2(s0)
376; RV64IFD-NEXT:    sh s3, 4(s0)
377; RV64IFD-NEXT:    sh a0, 6(s0)
378; RV64IFD-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
379; RV64IFD-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
380; RV64IFD-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
381; RV64IFD-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
382; RV64IFD-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
383; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
384; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
385; RV64IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
386; RV64IFD-NEXT:    .cfi_restore ra
387; RV64IFD-NEXT:    .cfi_restore s0
388; RV64IFD-NEXT:    .cfi_restore s1
389; RV64IFD-NEXT:    .cfi_restore s2
390; RV64IFD-NEXT:    .cfi_restore s3
391; RV64IFD-NEXT:    .cfi_restore fs0
392; RV64IFD-NEXT:    .cfi_restore fs1
393; RV64IFD-NEXT:    .cfi_restore fs2
394; RV64IFD-NEXT:    addi sp, sp, 64
395; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
396; RV64IFD-NEXT:    ret
397  %r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x)
398  ret <4 x half> %r
399}
400
401define <1 x float> @exp10_v1f32(<1 x float> %x) {
402; RV32IFD-LABEL: exp10_v1f32:
403; RV32IFD:       # %bb.0:
404; RV32IFD-NEXT:    addi sp, sp, -16
405; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
406; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
407; RV32IFD-NEXT:    .cfi_offset ra, -4
408; RV32IFD-NEXT:    call exp10f
409; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
410; RV32IFD-NEXT:    .cfi_restore ra
411; RV32IFD-NEXT:    addi sp, sp, 16
412; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
413; RV32IFD-NEXT:    ret
414;
415; RV64IFD-LABEL: exp10_v1f32:
416; RV64IFD:       # %bb.0:
417; RV64IFD-NEXT:    addi sp, sp, -16
418; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
419; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
420; RV64IFD-NEXT:    .cfi_offset ra, -8
421; RV64IFD-NEXT:    call exp10f
422; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
423; RV64IFD-NEXT:    .cfi_restore ra
424; RV64IFD-NEXT:    addi sp, sp, 16
425; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
426; RV64IFD-NEXT:    ret
427  %r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x)
428  ret <1 x float> %r
429}
430
431define <2 x float> @exp10_v2f32(<2 x float> %x) {
432; RV32IFD-LABEL: exp10_v2f32:
433; RV32IFD:       # %bb.0:
434; RV32IFD-NEXT:    addi sp, sp, -32
435; RV32IFD-NEXT:    .cfi_def_cfa_offset 32
436; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
437; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
438; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
439; RV32IFD-NEXT:    .cfi_offset ra, -4
440; RV32IFD-NEXT:    .cfi_offset fs0, -16
441; RV32IFD-NEXT:    .cfi_offset fs1, -24
442; RV32IFD-NEXT:    fmv.s fs0, fa1
443; RV32IFD-NEXT:    call exp10f
444; RV32IFD-NEXT:    fmv.s fs1, fa0
445; RV32IFD-NEXT:    fmv.s fa0, fs0
446; RV32IFD-NEXT:    call exp10f
447; RV32IFD-NEXT:    fmv.s fa1, fa0
448; RV32IFD-NEXT:    fmv.s fa0, fs1
449; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
450; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
451; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
452; RV32IFD-NEXT:    .cfi_restore ra
453; RV32IFD-NEXT:    .cfi_restore fs0
454; RV32IFD-NEXT:    .cfi_restore fs1
455; RV32IFD-NEXT:    addi sp, sp, 32
456; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
457; RV32IFD-NEXT:    ret
458;
459; RV64IFD-LABEL: exp10_v2f32:
460; RV64IFD:       # %bb.0:
461; RV64IFD-NEXT:    addi sp, sp, -32
462; RV64IFD-NEXT:    .cfi_def_cfa_offset 32
463; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
464; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
465; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
466; RV64IFD-NEXT:    .cfi_offset ra, -8
467; RV64IFD-NEXT:    .cfi_offset fs0, -16
468; RV64IFD-NEXT:    .cfi_offset fs1, -24
469; RV64IFD-NEXT:    fmv.s fs0, fa1
470; RV64IFD-NEXT:    call exp10f
471; RV64IFD-NEXT:    fmv.s fs1, fa0
472; RV64IFD-NEXT:    fmv.s fa0, fs0
473; RV64IFD-NEXT:    call exp10f
474; RV64IFD-NEXT:    fmv.s fa1, fa0
475; RV64IFD-NEXT:    fmv.s fa0, fs1
476; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
477; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
478; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
479; RV64IFD-NEXT:    .cfi_restore ra
480; RV64IFD-NEXT:    .cfi_restore fs0
481; RV64IFD-NEXT:    .cfi_restore fs1
482; RV64IFD-NEXT:    addi sp, sp, 32
483; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
484; RV64IFD-NEXT:    ret
485  %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
486  ret <2 x float> %r
487}
488
489define <3 x float> @exp10_v3f32(<3 x float> %x) {
490; RV32IFD-LABEL: exp10_v3f32:
491; RV32IFD:       # %bb.0:
492; RV32IFD-NEXT:    addi sp, sp, -32
493; RV32IFD-NEXT:    .cfi_def_cfa_offset 32
494; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
495; RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
496; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
497; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
498; RV32IFD-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
499; RV32IFD-NEXT:    .cfi_offset ra, -4
500; RV32IFD-NEXT:    .cfi_offset s0, -8
501; RV32IFD-NEXT:    .cfi_offset fs0, -16
502; RV32IFD-NEXT:    .cfi_offset fs1, -24
503; RV32IFD-NEXT:    .cfi_offset fs2, -32
504; RV32IFD-NEXT:    fmv.s fs0, fa2
505; RV32IFD-NEXT:    fmv.s fs1, fa1
506; RV32IFD-NEXT:    mv s0, a0
507; RV32IFD-NEXT:    call exp10f
508; RV32IFD-NEXT:    fmv.s fs2, fa0
509; RV32IFD-NEXT:    fmv.s fa0, fs1
510; RV32IFD-NEXT:    call exp10f
511; RV32IFD-NEXT:    fmv.s fs1, fa0
512; RV32IFD-NEXT:    fmv.s fa0, fs0
513; RV32IFD-NEXT:    call exp10f
514; RV32IFD-NEXT:    fsw fs2, 0(s0)
515; RV32IFD-NEXT:    fsw fs1, 4(s0)
516; RV32IFD-NEXT:    fsw fa0, 8(s0)
517; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
518; RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
519; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
520; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
521; RV32IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
522; RV32IFD-NEXT:    .cfi_restore ra
523; RV32IFD-NEXT:    .cfi_restore s0
524; RV32IFD-NEXT:    .cfi_restore fs0
525; RV32IFD-NEXT:    .cfi_restore fs1
526; RV32IFD-NEXT:    .cfi_restore fs2
527; RV32IFD-NEXT:    addi sp, sp, 32
528; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
529; RV32IFD-NEXT:    ret
530;
531; RV64IFD-LABEL: exp10_v3f32:
532; RV64IFD:       # %bb.0:
533; RV64IFD-NEXT:    addi sp, sp, -48
534; RV64IFD-NEXT:    .cfi_def_cfa_offset 48
535; RV64IFD-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
536; RV64IFD-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
537; RV64IFD-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
538; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
539; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
540; RV64IFD-NEXT:    .cfi_offset ra, -8
541; RV64IFD-NEXT:    .cfi_offset s0, -16
542; RV64IFD-NEXT:    .cfi_offset s1, -24
543; RV64IFD-NEXT:    .cfi_offset fs0, -32
544; RV64IFD-NEXT:    .cfi_offset fs1, -40
545; RV64IFD-NEXT:    fmv.s fs0, fa2
546; RV64IFD-NEXT:    fmv.s fs1, fa0
547; RV64IFD-NEXT:    mv s0, a0
548; RV64IFD-NEXT:    fmv.s fa0, fa1
549; RV64IFD-NEXT:    call exp10f
550; RV64IFD-NEXT:    fmv.x.w a0, fa0
551; RV64IFD-NEXT:    slli s1, a0, 32
552; RV64IFD-NEXT:    fmv.s fa0, fs1
553; RV64IFD-NEXT:    call exp10f
554; RV64IFD-NEXT:    fmv.x.w a0, fa0
555; RV64IFD-NEXT:    slli a0, a0, 32
556; RV64IFD-NEXT:    srli a0, a0, 32
557; RV64IFD-NEXT:    or s1, a0, s1
558; RV64IFD-NEXT:    fmv.s fa0, fs0
559; RV64IFD-NEXT:    call exp10f
560; RV64IFD-NEXT:    sd s1, 0(s0)
561; RV64IFD-NEXT:    fsw fa0, 8(s0)
562; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
563; RV64IFD-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
564; RV64IFD-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
565; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
566; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
567; RV64IFD-NEXT:    .cfi_restore ra
568; RV64IFD-NEXT:    .cfi_restore s0
569; RV64IFD-NEXT:    .cfi_restore s1
570; RV64IFD-NEXT:    .cfi_restore fs0
571; RV64IFD-NEXT:    .cfi_restore fs1
572; RV64IFD-NEXT:    addi sp, sp, 48
573; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
574; RV64IFD-NEXT:    ret
575  %r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x)
576  ret <3 x float> %r
577}
578
579define <4 x float> @exp10_v4f32(<4 x float> %x) {
580; RV32IFD-LABEL: exp10_v4f32:
581; RV32IFD:       # %bb.0:
582; RV32IFD-NEXT:    addi sp, sp, -48
583; RV32IFD-NEXT:    .cfi_def_cfa_offset 48
584; RV32IFD-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
585; RV32IFD-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
586; RV32IFD-NEXT:    fsd fs0, 32(sp) # 8-byte Folded Spill
587; RV32IFD-NEXT:    fsd fs1, 24(sp) # 8-byte Folded Spill
588; RV32IFD-NEXT:    fsd fs2, 16(sp) # 8-byte Folded Spill
589; RV32IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
590; RV32IFD-NEXT:    .cfi_offset ra, -4
591; RV32IFD-NEXT:    .cfi_offset s0, -8
592; RV32IFD-NEXT:    .cfi_offset fs0, -16
593; RV32IFD-NEXT:    .cfi_offset fs1, -24
594; RV32IFD-NEXT:    .cfi_offset fs2, -32
595; RV32IFD-NEXT:    .cfi_offset fs3, -40
596; RV32IFD-NEXT:    fmv.s fs0, fa3
597; RV32IFD-NEXT:    fmv.s fs1, fa2
598; RV32IFD-NEXT:    fmv.s fs2, fa1
599; RV32IFD-NEXT:    mv s0, a0
600; RV32IFD-NEXT:    call exp10f
601; RV32IFD-NEXT:    fmv.s fs3, fa0
602; RV32IFD-NEXT:    fmv.s fa0, fs2
603; RV32IFD-NEXT:    call exp10f
604; RV32IFD-NEXT:    fmv.s fs2, fa0
605; RV32IFD-NEXT:    fmv.s fa0, fs1
606; RV32IFD-NEXT:    call exp10f
607; RV32IFD-NEXT:    fmv.s fs1, fa0
608; RV32IFD-NEXT:    fmv.s fa0, fs0
609; RV32IFD-NEXT:    call exp10f
610; RV32IFD-NEXT:    fsw fs3, 0(s0)
611; RV32IFD-NEXT:    fsw fs2, 4(s0)
612; RV32IFD-NEXT:    fsw fs1, 8(s0)
613; RV32IFD-NEXT:    fsw fa0, 12(s0)
614; RV32IFD-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
615; RV32IFD-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
616; RV32IFD-NEXT:    fld fs0, 32(sp) # 8-byte Folded Reload
617; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
618; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
619; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
620; RV32IFD-NEXT:    .cfi_restore ra
621; RV32IFD-NEXT:    .cfi_restore s0
622; RV32IFD-NEXT:    .cfi_restore fs0
623; RV32IFD-NEXT:    .cfi_restore fs1
624; RV32IFD-NEXT:    .cfi_restore fs2
625; RV32IFD-NEXT:    .cfi_restore fs3
626; RV32IFD-NEXT:    addi sp, sp, 48
627; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
628; RV32IFD-NEXT:    ret
629;
630; RV64IFD-LABEL: exp10_v4f32:
631; RV64IFD:       # %bb.0:
632; RV64IFD-NEXT:    addi sp, sp, -48
633; RV64IFD-NEXT:    .cfi_def_cfa_offset 48
634; RV64IFD-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
635; RV64IFD-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
636; RV64IFD-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
637; RV64IFD-NEXT:    fsd fs1, 16(sp) # 8-byte Folded Spill
638; RV64IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
639; RV64IFD-NEXT:    fsd fs3, 0(sp) # 8-byte Folded Spill
640; RV64IFD-NEXT:    .cfi_offset ra, -8
641; RV64IFD-NEXT:    .cfi_offset s0, -16
642; RV64IFD-NEXT:    .cfi_offset fs0, -24
643; RV64IFD-NEXT:    .cfi_offset fs1, -32
644; RV64IFD-NEXT:    .cfi_offset fs2, -40
645; RV64IFD-NEXT:    .cfi_offset fs3, -48
646; RV64IFD-NEXT:    fmv.s fs0, fa3
647; RV64IFD-NEXT:    fmv.s fs1, fa2
648; RV64IFD-NEXT:    fmv.s fs2, fa1
649; RV64IFD-NEXT:    mv s0, a0
650; RV64IFD-NEXT:    call exp10f
651; RV64IFD-NEXT:    fmv.s fs3, fa0
652; RV64IFD-NEXT:    fmv.s fa0, fs2
653; RV64IFD-NEXT:    call exp10f
654; RV64IFD-NEXT:    fmv.s fs2, fa0
655; RV64IFD-NEXT:    fmv.s fa0, fs1
656; RV64IFD-NEXT:    call exp10f
657; RV64IFD-NEXT:    fmv.s fs1, fa0
658; RV64IFD-NEXT:    fmv.s fa0, fs0
659; RV64IFD-NEXT:    call exp10f
660; RV64IFD-NEXT:    fsw fs3, 0(s0)
661; RV64IFD-NEXT:    fsw fs2, 4(s0)
662; RV64IFD-NEXT:    fsw fs1, 8(s0)
663; RV64IFD-NEXT:    fsw fa0, 12(s0)
664; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
665; RV64IFD-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
666; RV64IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
667; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
668; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
669; RV64IFD-NEXT:    fld fs3, 0(sp) # 8-byte Folded Reload
670; RV64IFD-NEXT:    .cfi_restore ra
671; RV64IFD-NEXT:    .cfi_restore s0
672; RV64IFD-NEXT:    .cfi_restore fs0
673; RV64IFD-NEXT:    .cfi_restore fs1
674; RV64IFD-NEXT:    .cfi_restore fs2
675; RV64IFD-NEXT:    .cfi_restore fs3
676; RV64IFD-NEXT:    addi sp, sp, 48
677; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
678; RV64IFD-NEXT:    ret
679  %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
680  ret <4 x float> %r
681}
682
683; FIXME: Broken
684; define <1 x double> @exp10_v1f64(<1 x double> %x) {
685;   %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
686;   ret <1 x double> %r
687; }
688
689define <2 x double> @exp10_v2f64(<2 x double> %x) {
690; RV32IFD-LABEL: exp10_v2f64:
691; RV32IFD:       # %bb.0:
692; RV32IFD-NEXT:    addi sp, sp, -32
693; RV32IFD-NEXT:    .cfi_def_cfa_offset 32
694; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
695; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
696; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
697; RV32IFD-NEXT:    .cfi_offset ra, -4
698; RV32IFD-NEXT:    .cfi_offset fs0, -16
699; RV32IFD-NEXT:    .cfi_offset fs1, -24
700; RV32IFD-NEXT:    fmv.d fs0, fa1
701; RV32IFD-NEXT:    call exp10
702; RV32IFD-NEXT:    fmv.d fs1, fa0
703; RV32IFD-NEXT:    fmv.d fa0, fs0
704; RV32IFD-NEXT:    call exp10
705; RV32IFD-NEXT:    fmv.d fa1, fa0
706; RV32IFD-NEXT:    fmv.d fa0, fs1
707; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
708; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
709; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
710; RV32IFD-NEXT:    .cfi_restore ra
711; RV32IFD-NEXT:    .cfi_restore fs0
712; RV32IFD-NEXT:    .cfi_restore fs1
713; RV32IFD-NEXT:    addi sp, sp, 32
714; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
715; RV32IFD-NEXT:    ret
716;
717; RV64IFD-LABEL: exp10_v2f64:
718; RV64IFD:       # %bb.0:
719; RV64IFD-NEXT:    addi sp, sp, -32
720; RV64IFD-NEXT:    .cfi_def_cfa_offset 32
721; RV64IFD-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
722; RV64IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
723; RV64IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
724; RV64IFD-NEXT:    .cfi_offset ra, -8
725; RV64IFD-NEXT:    .cfi_offset fs0, -16
726; RV64IFD-NEXT:    .cfi_offset fs1, -24
727; RV64IFD-NEXT:    fmv.d fs0, fa1
728; RV64IFD-NEXT:    call exp10
729; RV64IFD-NEXT:    fmv.d fs1, fa0
730; RV64IFD-NEXT:    fmv.d fa0, fs0
731; RV64IFD-NEXT:    call exp10
732; RV64IFD-NEXT:    fmv.d fa1, fa0
733; RV64IFD-NEXT:    fmv.d fa0, fs1
734; RV64IFD-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
735; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
736; RV64IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
737; RV64IFD-NEXT:    .cfi_restore ra
738; RV64IFD-NEXT:    .cfi_restore fs0
739; RV64IFD-NEXT:    .cfi_restore fs1
740; RV64IFD-NEXT:    addi sp, sp, 32
741; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
742; RV64IFD-NEXT:    ret
743  %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
744  ret <2 x double> %r
745}
746
747define <3 x double> @exp10_v3f64(<3 x double> %x) {
748; RV32IFD-LABEL: exp10_v3f64:
749; RV32IFD:       # %bb.0:
750; RV32IFD-NEXT:    addi sp, sp, -32
751; RV32IFD-NEXT:    .cfi_def_cfa_offset 32
752; RV32IFD-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
753; RV32IFD-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
754; RV32IFD-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
755; RV32IFD-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
756; RV32IFD-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
757; RV32IFD-NEXT:    .cfi_offset ra, -4
758; RV32IFD-NEXT:    .cfi_offset s0, -8
759; RV32IFD-NEXT:    .cfi_offset fs0, -16
760; RV32IFD-NEXT:    .cfi_offset fs1, -24
761; RV32IFD-NEXT:    .cfi_offset fs2, -32
762; RV32IFD-NEXT:    fmv.d fs0, fa2
763; RV32IFD-NEXT:    fmv.d fs1, fa1
764; RV32IFD-NEXT:    mv s0, a0
765; RV32IFD-NEXT:    call exp10
766; RV32IFD-NEXT:    fmv.d fs2, fa0
767; RV32IFD-NEXT:    fmv.d fa0, fs1
768; RV32IFD-NEXT:    call exp10
769; RV32IFD-NEXT:    fmv.d fs1, fa0
770; RV32IFD-NEXT:    fmv.d fa0, fs0
771; RV32IFD-NEXT:    call exp10
772; RV32IFD-NEXT:    fsd fs2, 0(s0)
773; RV32IFD-NEXT:    fsd fs1, 8(s0)
774; RV32IFD-NEXT:    fsd fa0, 16(s0)
775; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
776; RV32IFD-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
777; RV32IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
778; RV32IFD-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
779; RV32IFD-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
780; RV32IFD-NEXT:    .cfi_restore ra
781; RV32IFD-NEXT:    .cfi_restore s0
782; RV32IFD-NEXT:    .cfi_restore fs0
783; RV32IFD-NEXT:    .cfi_restore fs1
784; RV32IFD-NEXT:    .cfi_restore fs2
785; RV32IFD-NEXT:    addi sp, sp, 32
786; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
787; RV32IFD-NEXT:    ret
788;
789; RV64IFD-LABEL: exp10_v3f64:
790; RV64IFD:       # %bb.0:
791; RV64IFD-NEXT:    addi sp, sp, -48
792; RV64IFD-NEXT:    .cfi_def_cfa_offset 48
793; RV64IFD-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
794; RV64IFD-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
795; RV64IFD-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
796; RV64IFD-NEXT:    fsd fs1, 16(sp) # 8-byte Folded Spill
797; RV64IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
798; RV64IFD-NEXT:    .cfi_offset ra, -8
799; RV64IFD-NEXT:    .cfi_offset s0, -16
800; RV64IFD-NEXT:    .cfi_offset fs0, -24
801; RV64IFD-NEXT:    .cfi_offset fs1, -32
802; RV64IFD-NEXT:    .cfi_offset fs2, -40
803; RV64IFD-NEXT:    fmv.d fs0, fa2
804; RV64IFD-NEXT:    fmv.d fs1, fa1
805; RV64IFD-NEXT:    mv s0, a0
806; RV64IFD-NEXT:    call exp10
807; RV64IFD-NEXT:    fmv.d fs2, fa0
808; RV64IFD-NEXT:    fmv.d fa0, fs1
809; RV64IFD-NEXT:    call exp10
810; RV64IFD-NEXT:    fmv.d fs1, fa0
811; RV64IFD-NEXT:    fmv.d fa0, fs0
812; RV64IFD-NEXT:    call exp10
813; RV64IFD-NEXT:    fsd fs2, 0(s0)
814; RV64IFD-NEXT:    fsd fs1, 8(s0)
815; RV64IFD-NEXT:    fsd fa0, 16(s0)
816; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
817; RV64IFD-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
818; RV64IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
819; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
820; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
821; RV64IFD-NEXT:    .cfi_restore ra
822; RV64IFD-NEXT:    .cfi_restore s0
823; RV64IFD-NEXT:    .cfi_restore fs0
824; RV64IFD-NEXT:    .cfi_restore fs1
825; RV64IFD-NEXT:    .cfi_restore fs2
826; RV64IFD-NEXT:    addi sp, sp, 48
827; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
828; RV64IFD-NEXT:    ret
829  %r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x)
830  ret <3 x double> %r
831}
832
833define <4 x double> @exp10_v4f64(<4 x double> %x) {
834; RV32IFD-LABEL: exp10_v4f64:
835; RV32IFD:       # %bb.0:
836; RV32IFD-NEXT:    addi sp, sp, -48
837; RV32IFD-NEXT:    .cfi_def_cfa_offset 48
838; RV32IFD-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
839; RV32IFD-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
840; RV32IFD-NEXT:    fsd fs0, 32(sp) # 8-byte Folded Spill
841; RV32IFD-NEXT:    fsd fs1, 24(sp) # 8-byte Folded Spill
842; RV32IFD-NEXT:    fsd fs2, 16(sp) # 8-byte Folded Spill
843; RV32IFD-NEXT:    fsd fs3, 8(sp) # 8-byte Folded Spill
844; RV32IFD-NEXT:    .cfi_offset ra, -4
845; RV32IFD-NEXT:    .cfi_offset s0, -8
846; RV32IFD-NEXT:    .cfi_offset fs0, -16
847; RV32IFD-NEXT:    .cfi_offset fs1, -24
848; RV32IFD-NEXT:    .cfi_offset fs2, -32
849; RV32IFD-NEXT:    .cfi_offset fs3, -40
850; RV32IFD-NEXT:    fmv.d fs0, fa3
851; RV32IFD-NEXT:    fmv.d fs1, fa2
852; RV32IFD-NEXT:    fmv.d fs2, fa1
853; RV32IFD-NEXT:    mv s0, a0
854; RV32IFD-NEXT:    call exp10
855; RV32IFD-NEXT:    fmv.d fs3, fa0
856; RV32IFD-NEXT:    fmv.d fa0, fs2
857; RV32IFD-NEXT:    call exp10
858; RV32IFD-NEXT:    fmv.d fs2, fa0
859; RV32IFD-NEXT:    fmv.d fa0, fs1
860; RV32IFD-NEXT:    call exp10
861; RV32IFD-NEXT:    fmv.d fs1, fa0
862; RV32IFD-NEXT:    fmv.d fa0, fs0
863; RV32IFD-NEXT:    call exp10
864; RV32IFD-NEXT:    fsd fs3, 0(s0)
865; RV32IFD-NEXT:    fsd fs2, 8(s0)
866; RV32IFD-NEXT:    fsd fs1, 16(s0)
867; RV32IFD-NEXT:    fsd fa0, 24(s0)
868; RV32IFD-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
869; RV32IFD-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
870; RV32IFD-NEXT:    fld fs0, 32(sp) # 8-byte Folded Reload
871; RV32IFD-NEXT:    fld fs1, 24(sp) # 8-byte Folded Reload
872; RV32IFD-NEXT:    fld fs2, 16(sp) # 8-byte Folded Reload
873; RV32IFD-NEXT:    fld fs3, 8(sp) # 8-byte Folded Reload
874; RV32IFD-NEXT:    .cfi_restore ra
875; RV32IFD-NEXT:    .cfi_restore s0
876; RV32IFD-NEXT:    .cfi_restore fs0
877; RV32IFD-NEXT:    .cfi_restore fs1
878; RV32IFD-NEXT:    .cfi_restore fs2
879; RV32IFD-NEXT:    .cfi_restore fs3
880; RV32IFD-NEXT:    addi sp, sp, 48
881; RV32IFD-NEXT:    .cfi_def_cfa_offset 0
882; RV32IFD-NEXT:    ret
883;
884; RV64IFD-LABEL: exp10_v4f64:
885; RV64IFD:       # %bb.0:
886; RV64IFD-NEXT:    addi sp, sp, -48
887; RV64IFD-NEXT:    .cfi_def_cfa_offset 48
888; RV64IFD-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
889; RV64IFD-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
890; RV64IFD-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
891; RV64IFD-NEXT:    fsd fs1, 16(sp) # 8-byte Folded Spill
892; RV64IFD-NEXT:    fsd fs2, 8(sp) # 8-byte Folded Spill
893; RV64IFD-NEXT:    fsd fs3, 0(sp) # 8-byte Folded Spill
894; RV64IFD-NEXT:    .cfi_offset ra, -8
895; RV64IFD-NEXT:    .cfi_offset s0, -16
896; RV64IFD-NEXT:    .cfi_offset fs0, -24
897; RV64IFD-NEXT:    .cfi_offset fs1, -32
898; RV64IFD-NEXT:    .cfi_offset fs2, -40
899; RV64IFD-NEXT:    .cfi_offset fs3, -48
900; RV64IFD-NEXT:    fmv.d fs0, fa3
901; RV64IFD-NEXT:    fmv.d fs1, fa2
902; RV64IFD-NEXT:    fmv.d fs2, fa1
903; RV64IFD-NEXT:    mv s0, a0
904; RV64IFD-NEXT:    call exp10
905; RV64IFD-NEXT:    fmv.d fs3, fa0
906; RV64IFD-NEXT:    fmv.d fa0, fs2
907; RV64IFD-NEXT:    call exp10
908; RV64IFD-NEXT:    fmv.d fs2, fa0
909; RV64IFD-NEXT:    fmv.d fa0, fs1
910; RV64IFD-NEXT:    call exp10
911; RV64IFD-NEXT:    fmv.d fs1, fa0
912; RV64IFD-NEXT:    fmv.d fa0, fs0
913; RV64IFD-NEXT:    call exp10
914; RV64IFD-NEXT:    fsd fs3, 0(s0)
915; RV64IFD-NEXT:    fsd fs2, 8(s0)
916; RV64IFD-NEXT:    fsd fs1, 16(s0)
917; RV64IFD-NEXT:    fsd fa0, 24(s0)
918; RV64IFD-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
919; RV64IFD-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
920; RV64IFD-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
921; RV64IFD-NEXT:    fld fs1, 16(sp) # 8-byte Folded Reload
922; RV64IFD-NEXT:    fld fs2, 8(sp) # 8-byte Folded Reload
923; RV64IFD-NEXT:    fld fs3, 0(sp) # 8-byte Folded Reload
924; RV64IFD-NEXT:    .cfi_restore ra
925; RV64IFD-NEXT:    .cfi_restore s0
926; RV64IFD-NEXT:    .cfi_restore fs0
927; RV64IFD-NEXT:    .cfi_restore fs1
928; RV64IFD-NEXT:    .cfi_restore fs2
929; RV64IFD-NEXT:    .cfi_restore fs3
930; RV64IFD-NEXT:    addi sp, sp, 48
931; RV64IFD-NEXT:    .cfi_def_cfa_offset 0
932; RV64IFD-NEXT:    ret
933  %r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x)
934  ret <4 x double> %r
935}
936;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
937; CHECK: {{.*}}
938