xref: /llvm-project/llvm/test/CodeGen/RISCV/double-arith-strict.ll (revision 576d81baa5cf1801bae0fd05892be34acde33c6a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
4; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
7; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefix=RV32IZFINXZDINX %s
11; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
14; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
18
19define double @fadd_d(double %a, double %b) nounwind strictfp {
20; CHECKIFD-LABEL: fadd_d:
21; CHECKIFD:       # %bb.0:
22; CHECKIFD-NEXT:    fadd.d fa0, fa0, fa1
23; CHECKIFD-NEXT:    ret
24;
25; RV32IZFINXZDINX-LABEL: fadd_d:
26; RV32IZFINXZDINX:       # %bb.0:
27; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a2
28; RV32IZFINXZDINX-NEXT:    ret
29;
30; RV64IZFINXZDINX-LABEL: fadd_d:
31; RV64IZFINXZDINX:       # %bb.0:
32; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
33; RV64IZFINXZDINX-NEXT:    ret
34;
35; RV32I-LABEL: fadd_d:
36; RV32I:       # %bb.0:
37; RV32I-NEXT:    addi sp, sp, -16
38; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
39; RV32I-NEXT:    call __adddf3
40; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
41; RV32I-NEXT:    addi sp, sp, 16
42; RV32I-NEXT:    ret
43;
44; RV64I-LABEL: fadd_d:
45; RV64I:       # %bb.0:
46; RV64I-NEXT:    addi sp, sp, -16
47; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
48; RV64I-NEXT:    call __adddf3
49; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
50; RV64I-NEXT:    addi sp, sp, 16
51; RV64I-NEXT:    ret
52  %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
53  ret double %1
54}
55declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
56
57define double @fsub_d(double %a, double %b) nounwind strictfp {
58; CHECKIFD-LABEL: fsub_d:
59; CHECKIFD:       # %bb.0:
60; CHECKIFD-NEXT:    fsub.d fa0, fa0, fa1
61; CHECKIFD-NEXT:    ret
62;
63; RV32IZFINXZDINX-LABEL: fsub_d:
64; RV32IZFINXZDINX:       # %bb.0:
65; RV32IZFINXZDINX-NEXT:    fsub.d a0, a0, a2
66; RV32IZFINXZDINX-NEXT:    ret
67;
68; RV64IZFINXZDINX-LABEL: fsub_d:
69; RV64IZFINXZDINX:       # %bb.0:
70; RV64IZFINXZDINX-NEXT:    fsub.d a0, a0, a1
71; RV64IZFINXZDINX-NEXT:    ret
72;
73; RV32I-LABEL: fsub_d:
74; RV32I:       # %bb.0:
75; RV32I-NEXT:    addi sp, sp, -16
76; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
77; RV32I-NEXT:    call __subdf3
78; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
79; RV32I-NEXT:    addi sp, sp, 16
80; RV32I-NEXT:    ret
81;
82; RV64I-LABEL: fsub_d:
83; RV64I:       # %bb.0:
84; RV64I-NEXT:    addi sp, sp, -16
85; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
86; RV64I-NEXT:    call __subdf3
87; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
88; RV64I-NEXT:    addi sp, sp, 16
89; RV64I-NEXT:    ret
90  %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
91  ret double %1
92}
93declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
94
95define double @fmul_d(double %a, double %b) nounwind strictfp {
96; CHECKIFD-LABEL: fmul_d:
97; CHECKIFD:       # %bb.0:
98; CHECKIFD-NEXT:    fmul.d fa0, fa0, fa1
99; CHECKIFD-NEXT:    ret
100;
101; RV32IZFINXZDINX-LABEL: fmul_d:
102; RV32IZFINXZDINX:       # %bb.0:
103; RV32IZFINXZDINX-NEXT:    fmul.d a0, a0, a2
104; RV32IZFINXZDINX-NEXT:    ret
105;
106; RV64IZFINXZDINX-LABEL: fmul_d:
107; RV64IZFINXZDINX:       # %bb.0:
108; RV64IZFINXZDINX-NEXT:    fmul.d a0, a0, a1
109; RV64IZFINXZDINX-NEXT:    ret
110;
111; RV32I-LABEL: fmul_d:
112; RV32I:       # %bb.0:
113; RV32I-NEXT:    addi sp, sp, -16
114; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
115; RV32I-NEXT:    call __muldf3
116; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
117; RV32I-NEXT:    addi sp, sp, 16
118; RV32I-NEXT:    ret
119;
120; RV64I-LABEL: fmul_d:
121; RV64I:       # %bb.0:
122; RV64I-NEXT:    addi sp, sp, -16
123; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
124; RV64I-NEXT:    call __muldf3
125; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
126; RV64I-NEXT:    addi sp, sp, 16
127; RV64I-NEXT:    ret
128  %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
129  ret double %1
130}
131declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
132
133define double @fdiv_d(double %a, double %b) nounwind strictfp {
134; CHECKIFD-LABEL: fdiv_d:
135; CHECKIFD:       # %bb.0:
136; CHECKIFD-NEXT:    fdiv.d fa0, fa0, fa1
137; CHECKIFD-NEXT:    ret
138;
139; RV32IZFINXZDINX-LABEL: fdiv_d:
140; RV32IZFINXZDINX:       # %bb.0:
141; RV32IZFINXZDINX-NEXT:    fdiv.d a0, a0, a2
142; RV32IZFINXZDINX-NEXT:    ret
143;
144; RV64IZFINXZDINX-LABEL: fdiv_d:
145; RV64IZFINXZDINX:       # %bb.0:
146; RV64IZFINXZDINX-NEXT:    fdiv.d a0, a0, a1
147; RV64IZFINXZDINX-NEXT:    ret
148;
149; RV32I-LABEL: fdiv_d:
150; RV32I:       # %bb.0:
151; RV32I-NEXT:    addi sp, sp, -16
152; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
153; RV32I-NEXT:    call __divdf3
154; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
155; RV32I-NEXT:    addi sp, sp, 16
156; RV32I-NEXT:    ret
157;
158; RV64I-LABEL: fdiv_d:
159; RV64I:       # %bb.0:
160; RV64I-NEXT:    addi sp, sp, -16
161; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
162; RV64I-NEXT:    call __divdf3
163; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
164; RV64I-NEXT:    addi sp, sp, 16
165; RV64I-NEXT:    ret
166  %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
167  ret double %1
168}
169declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
170
171define double @fsqrt_d(double %a) nounwind strictfp {
172; CHECKIFD-LABEL: fsqrt_d:
173; CHECKIFD:       # %bb.0:
174; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
175; CHECKIFD-NEXT:    ret
176;
177; RV32IZFINXZDINX-LABEL: fsqrt_d:
178; RV32IZFINXZDINX:       # %bb.0:
179; RV32IZFINXZDINX-NEXT:    fsqrt.d a0, a0
180; RV32IZFINXZDINX-NEXT:    ret
181;
182; RV64IZFINXZDINX-LABEL: fsqrt_d:
183; RV64IZFINXZDINX:       # %bb.0:
184; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
185; RV64IZFINXZDINX-NEXT:    ret
186;
187; RV32I-LABEL: fsqrt_d:
188; RV32I:       # %bb.0:
189; RV32I-NEXT:    addi sp, sp, -16
190; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
191; RV32I-NEXT:    call sqrt
192; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
193; RV32I-NEXT:    addi sp, sp, 16
194; RV32I-NEXT:    ret
195;
196; RV64I-LABEL: fsqrt_d:
197; RV64I:       # %bb.0:
198; RV64I-NEXT:    addi sp, sp, -16
199; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
200; RV64I-NEXT:    call sqrt
201; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
202; RV64I-NEXT:    addi sp, sp, 16
203; RV64I-NEXT:    ret
204  %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
205  ret double %1
206}
207declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
208
209define double @fmin_d(double %a, double %b) nounwind strictfp {
210; RV32IFD-LABEL: fmin_d:
211; RV32IFD:       # %bb.0:
212; RV32IFD-NEXT:    addi sp, sp, -16
213; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
214; RV32IFD-NEXT:    call fmin
215; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
216; RV32IFD-NEXT:    addi sp, sp, 16
217; RV32IFD-NEXT:    ret
218;
219; RV64IFD-LABEL: fmin_d:
220; RV64IFD:       # %bb.0:
221; RV64IFD-NEXT:    addi sp, sp, -16
222; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
223; RV64IFD-NEXT:    call fmin
224; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
225; RV64IFD-NEXT:    addi sp, sp, 16
226; RV64IFD-NEXT:    ret
227;
228; RV32IZFINXZDINX-LABEL: fmin_d:
229; RV32IZFINXZDINX:       # %bb.0:
230; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
231; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
232; RV32IZFINXZDINX-NEXT:    call fmin
233; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
234; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
235; RV32IZFINXZDINX-NEXT:    ret
236;
237; RV64IZFINXZDINX-LABEL: fmin_d:
238; RV64IZFINXZDINX:       # %bb.0:
239; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
240; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
241; RV64IZFINXZDINX-NEXT:    call fmin
242; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
243; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
244; RV64IZFINXZDINX-NEXT:    ret
245;
246; RV32I-LABEL: fmin_d:
247; RV32I:       # %bb.0:
248; RV32I-NEXT:    addi sp, sp, -16
249; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
250; RV32I-NEXT:    call fmin
251; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
252; RV32I-NEXT:    addi sp, sp, 16
253; RV32I-NEXT:    ret
254;
255; RV64I-LABEL: fmin_d:
256; RV64I:       # %bb.0:
257; RV64I-NEXT:    addi sp, sp, -16
258; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
259; RV64I-NEXT:    call fmin
260; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
261; RV64I-NEXT:    addi sp, sp, 16
262; RV64I-NEXT:    ret
263  %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
264  ret double %1
265}
266declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) strictfp
267
268define double @fmax_d(double %a, double %b) nounwind strictfp {
269; RV32IFD-LABEL: fmax_d:
270; RV32IFD:       # %bb.0:
271; RV32IFD-NEXT:    addi sp, sp, -16
272; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
273; RV32IFD-NEXT:    call fmax
274; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
275; RV32IFD-NEXT:    addi sp, sp, 16
276; RV32IFD-NEXT:    ret
277;
278; RV64IFD-LABEL: fmax_d:
279; RV64IFD:       # %bb.0:
280; RV64IFD-NEXT:    addi sp, sp, -16
281; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
282; RV64IFD-NEXT:    call fmax
283; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
284; RV64IFD-NEXT:    addi sp, sp, 16
285; RV64IFD-NEXT:    ret
286;
287; RV32IZFINXZDINX-LABEL: fmax_d:
288; RV32IZFINXZDINX:       # %bb.0:
289; RV32IZFINXZDINX-NEXT:    addi sp, sp, -16
290; RV32IZFINXZDINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
291; RV32IZFINXZDINX-NEXT:    call fmax
292; RV32IZFINXZDINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
293; RV32IZFINXZDINX-NEXT:    addi sp, sp, 16
294; RV32IZFINXZDINX-NEXT:    ret
295;
296; RV64IZFINXZDINX-LABEL: fmax_d:
297; RV64IZFINXZDINX:       # %bb.0:
298; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
299; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
300; RV64IZFINXZDINX-NEXT:    call fmax
301; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
302; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
303; RV64IZFINXZDINX-NEXT:    ret
304;
305; RV32I-LABEL: fmax_d:
306; RV32I:       # %bb.0:
307; RV32I-NEXT:    addi sp, sp, -16
308; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
309; RV32I-NEXT:    call fmax
310; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
311; RV32I-NEXT:    addi sp, sp, 16
312; RV32I-NEXT:    ret
313;
314; RV64I-LABEL: fmax_d:
315; RV64I:       # %bb.0:
316; RV64I-NEXT:    addi sp, sp, -16
317; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
318; RV64I-NEXT:    call fmax
319; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
320; RV64I-NEXT:    addi sp, sp, 16
321; RV64I-NEXT:    ret
322  %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
323  ret double %1
324}
325declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) strictfp
326
327define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
328; CHECKIFD-LABEL: fmadd_d:
329; CHECKIFD:       # %bb.0:
330; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
331; CHECKIFD-NEXT:    ret
332;
333; RV32IZFINXZDINX-LABEL: fmadd_d:
334; RV32IZFINXZDINX:       # %bb.0:
335; RV32IZFINXZDINX-NEXT:    fmadd.d a0, a0, a2, a4
336; RV32IZFINXZDINX-NEXT:    ret
337;
338; RV64IZFINXZDINX-LABEL: fmadd_d:
339; RV64IZFINXZDINX:       # %bb.0:
340; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
341; RV64IZFINXZDINX-NEXT:    ret
342;
343; RV32I-LABEL: fmadd_d:
344; RV32I:       # %bb.0:
345; RV32I-NEXT:    addi sp, sp, -16
346; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
347; RV32I-NEXT:    call fma
348; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
349; RV32I-NEXT:    addi sp, sp, 16
350; RV32I-NEXT:    ret
351;
352; RV64I-LABEL: fmadd_d:
353; RV64I:       # %bb.0:
354; RV64I-NEXT:    addi sp, sp, -16
355; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
356; RV64I-NEXT:    call fma
357; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
358; RV64I-NEXT:    addi sp, sp, 16
359; RV64I-NEXT:    ret
360  %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
361  ret double %1
362}
363declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) strictfp
364
365define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
366; RV32IFD-LABEL: fmsub_d:
367; RV32IFD:       # %bb.0:
368; RV32IFD-NEXT:    fcvt.d.w fa5, zero
369; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
370; RV32IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
371; RV32IFD-NEXT:    ret
372;
373; RV64IFD-LABEL: fmsub_d:
374; RV64IFD:       # %bb.0:
375; RV64IFD-NEXT:    fmv.d.x fa5, zero
376; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
377; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
378; RV64IFD-NEXT:    ret
379;
380; RV32IZFINXZDINX-LABEL: fmsub_d:
381; RV32IZFINXZDINX:       # %bb.0:
382; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
383; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
384; RV32IZFINXZDINX-NEXT:    fmsub.d a0, a0, a2, a4
385; RV32IZFINXZDINX-NEXT:    ret
386;
387; RV64IZFINXZDINX-LABEL: fmsub_d:
388; RV64IZFINXZDINX:       # %bb.0:
389; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
390; RV64IZFINXZDINX-NEXT:    fmsub.d a0, a0, a1, a2
391; RV64IZFINXZDINX-NEXT:    ret
392;
393; RV32I-LABEL: fmsub_d:
394; RV32I:       # %bb.0:
395; RV32I-NEXT:    addi sp, sp, -32
396; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
397; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
398; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
399; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
400; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
401; RV32I-NEXT:    mv s0, a3
402; RV32I-NEXT:    mv s1, a2
403; RV32I-NEXT:    mv s2, a1
404; RV32I-NEXT:    mv s3, a0
405; RV32I-NEXT:    mv a0, a4
406; RV32I-NEXT:    mv a1, a5
407; RV32I-NEXT:    li a2, 0
408; RV32I-NEXT:    li a3, 0
409; RV32I-NEXT:    call __adddf3
410; RV32I-NEXT:    mv a4, a0
411; RV32I-NEXT:    lui a5, 524288
412; RV32I-NEXT:    xor a5, a1, a5
413; RV32I-NEXT:    mv a0, s3
414; RV32I-NEXT:    mv a1, s2
415; RV32I-NEXT:    mv a2, s1
416; RV32I-NEXT:    mv a3, s0
417; RV32I-NEXT:    call fma
418; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
419; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
420; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
421; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
422; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
423; RV32I-NEXT:    addi sp, sp, 32
424; RV32I-NEXT:    ret
425;
426; RV64I-LABEL: fmsub_d:
427; RV64I:       # %bb.0:
428; RV64I-NEXT:    addi sp, sp, -32
429; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
430; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
431; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
432; RV64I-NEXT:    mv s0, a1
433; RV64I-NEXT:    mv s1, a0
434; RV64I-NEXT:    mv a0, a2
435; RV64I-NEXT:    li a1, 0
436; RV64I-NEXT:    call __adddf3
437; RV64I-NEXT:    li a1, -1
438; RV64I-NEXT:    slli a1, a1, 63
439; RV64I-NEXT:    xor a2, a0, a1
440; RV64I-NEXT:    mv a0, s1
441; RV64I-NEXT:    mv a1, s0
442; RV64I-NEXT:    call fma
443; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
444; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
445; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
446; RV64I-NEXT:    addi sp, sp, 32
447; RV64I-NEXT:    ret
448  %c_ = fadd double 0.0, %c ; avoid negation using xor
449  %negc = fneg double %c_
450  %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
451  ret double %1
452}
453
454define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
455; RV32IFD-LABEL: fnmadd_d:
456; RV32IFD:       # %bb.0:
457; RV32IFD-NEXT:    fcvt.d.w fa5, zero
458; RV32IFD-NEXT:    fadd.d fa4, fa0, fa5
459; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
460; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
461; RV32IFD-NEXT:    ret
462;
463; RV64IFD-LABEL: fnmadd_d:
464; RV64IFD:       # %bb.0:
465; RV64IFD-NEXT:    fmv.d.x fa5, zero
466; RV64IFD-NEXT:    fadd.d fa4, fa0, fa5
467; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
468; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
469; RV64IFD-NEXT:    ret
470;
471; RV32IZFINXZDINX-LABEL: fnmadd_d:
472; RV32IZFINXZDINX:       # %bb.0:
473; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
474; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
475; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
476; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a2, a4
477; RV32IZFINXZDINX-NEXT:    ret
478;
479; RV64IZFINXZDINX-LABEL: fnmadd_d:
480; RV64IZFINXZDINX:       # %bb.0:
481; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
482; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
483; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a1, a2
484; RV64IZFINXZDINX-NEXT:    ret
485;
486; RV32I-LABEL: fnmadd_d:
487; RV32I:       # %bb.0:
488; RV32I-NEXT:    addi sp, sp, -32
489; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
490; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
491; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
492; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
493; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
494; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
495; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
496; RV32I-NEXT:    mv s0, a5
497; RV32I-NEXT:    mv s1, a4
498; RV32I-NEXT:    mv s2, a3
499; RV32I-NEXT:    mv s3, a2
500; RV32I-NEXT:    li a2, 0
501; RV32I-NEXT:    li a3, 0
502; RV32I-NEXT:    call __adddf3
503; RV32I-NEXT:    mv s4, a0
504; RV32I-NEXT:    mv s5, a1
505; RV32I-NEXT:    mv a0, s1
506; RV32I-NEXT:    mv a1, s0
507; RV32I-NEXT:    li a2, 0
508; RV32I-NEXT:    li a3, 0
509; RV32I-NEXT:    call __adddf3
510; RV32I-NEXT:    mv a4, a0
511; RV32I-NEXT:    lui a5, 524288
512; RV32I-NEXT:    xor a2, s5, a5
513; RV32I-NEXT:    xor a5, a1, a5
514; RV32I-NEXT:    mv a0, s4
515; RV32I-NEXT:    mv a1, a2
516; RV32I-NEXT:    mv a2, s3
517; RV32I-NEXT:    mv a3, s2
518; RV32I-NEXT:    call fma
519; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
520; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
521; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
522; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
523; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
524; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
525; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
526; RV32I-NEXT:    addi sp, sp, 32
527; RV32I-NEXT:    ret
528;
529; RV64I-LABEL: fnmadd_d:
530; RV64I:       # %bb.0:
531; RV64I-NEXT:    addi sp, sp, -32
532; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
533; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
534; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
535; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
536; RV64I-NEXT:    mv s0, a2
537; RV64I-NEXT:    mv s1, a1
538; RV64I-NEXT:    li a1, 0
539; RV64I-NEXT:    call __adddf3
540; RV64I-NEXT:    mv s2, a0
541; RV64I-NEXT:    mv a0, s0
542; RV64I-NEXT:    li a1, 0
543; RV64I-NEXT:    call __adddf3
544; RV64I-NEXT:    li a1, -1
545; RV64I-NEXT:    slli a2, a1, 63
546; RV64I-NEXT:    xor a1, s2, a2
547; RV64I-NEXT:    xor a2, a0, a2
548; RV64I-NEXT:    mv a0, a1
549; RV64I-NEXT:    mv a1, s1
550; RV64I-NEXT:    call fma
551; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
552; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
553; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
554; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
555; RV64I-NEXT:    addi sp, sp, 32
556; RV64I-NEXT:    ret
557  %a_ = fadd double 0.0, %a
558  %c_ = fadd double 0.0, %c
559  %nega = fneg double %a_
560  %negc = fneg double %c_
561  %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
562  ret double %1
563}
564
565define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
566; RV32IFD-LABEL: fnmadd_d_2:
567; RV32IFD:       # %bb.0:
568; RV32IFD-NEXT:    fcvt.d.w fa5, zero
569; RV32IFD-NEXT:    fadd.d fa4, fa1, fa5
570; RV32IFD-NEXT:    fadd.d fa5, fa2, fa5
571; RV32IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
572; RV32IFD-NEXT:    ret
573;
574; RV64IFD-LABEL: fnmadd_d_2:
575; RV64IFD:       # %bb.0:
576; RV64IFD-NEXT:    fmv.d.x fa5, zero
577; RV64IFD-NEXT:    fadd.d fa4, fa1, fa5
578; RV64IFD-NEXT:    fadd.d fa5, fa2, fa5
579; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
580; RV64IFD-NEXT:    ret
581;
582; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
583; RV32IZFINXZDINX:       # %bb.0:
584; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
585; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
586; RV32IZFINXZDINX-NEXT:    fadd.d a4, a4, a6
587; RV32IZFINXZDINX-NEXT:    fnmadd.d a0, a2, a0, a4
588; RV32IZFINXZDINX-NEXT:    ret
589;
590; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
591; RV64IZFINXZDINX:       # %bb.0:
592; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
593; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
594; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a1, a0, a2
595; RV64IZFINXZDINX-NEXT:    ret
596;
597; RV32I-LABEL: fnmadd_d_2:
598; RV32I:       # %bb.0:
599; RV32I-NEXT:    addi sp, sp, -32
600; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
601; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
602; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
603; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
604; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
605; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
606; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
607; RV32I-NEXT:    mv s0, a5
608; RV32I-NEXT:    mv s1, a4
609; RV32I-NEXT:    mv s2, a1
610; RV32I-NEXT:    mv s3, a0
611; RV32I-NEXT:    mv a0, a2
612; RV32I-NEXT:    mv a1, a3
613; RV32I-NEXT:    li a2, 0
614; RV32I-NEXT:    li a3, 0
615; RV32I-NEXT:    call __adddf3
616; RV32I-NEXT:    mv s4, a0
617; RV32I-NEXT:    mv s5, a1
618; RV32I-NEXT:    mv a0, s1
619; RV32I-NEXT:    mv a1, s0
620; RV32I-NEXT:    li a2, 0
621; RV32I-NEXT:    li a3, 0
622; RV32I-NEXT:    call __adddf3
623; RV32I-NEXT:    mv a4, a0
624; RV32I-NEXT:    lui a5, 524288
625; RV32I-NEXT:    xor a3, s5, a5
626; RV32I-NEXT:    xor a5, a1, a5
627; RV32I-NEXT:    mv a0, s3
628; RV32I-NEXT:    mv a1, s2
629; RV32I-NEXT:    mv a2, s4
630; RV32I-NEXT:    call fma
631; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
632; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
633; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
634; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
635; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
636; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
637; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
638; RV32I-NEXT:    addi sp, sp, 32
639; RV32I-NEXT:    ret
640;
641; RV64I-LABEL: fnmadd_d_2:
642; RV64I:       # %bb.0:
643; RV64I-NEXT:    addi sp, sp, -32
644; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
645; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
646; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
647; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
648; RV64I-NEXT:    mv s0, a2
649; RV64I-NEXT:    mv s1, a0
650; RV64I-NEXT:    mv a0, a1
651; RV64I-NEXT:    li a1, 0
652; RV64I-NEXT:    call __adddf3
653; RV64I-NEXT:    mv s2, a0
654; RV64I-NEXT:    mv a0, s0
655; RV64I-NEXT:    li a1, 0
656; RV64I-NEXT:    call __adddf3
657; RV64I-NEXT:    li a1, -1
658; RV64I-NEXT:    slli a2, a1, 63
659; RV64I-NEXT:    xor a1, s2, a2
660; RV64I-NEXT:    xor a2, a0, a2
661; RV64I-NEXT:    mv a0, s1
662; RV64I-NEXT:    call fma
663; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
664; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
665; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
666; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
667; RV64I-NEXT:    addi sp, sp, 32
668; RV64I-NEXT:    ret
669  %b_ = fadd double 0.0, %b
670  %c_ = fadd double 0.0, %c
671  %negb = fneg double %b_
672  %negc = fneg double %c_
673  %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
674  ret double %1
675}
676
677define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
678; RV32IFD-LABEL: fnmsub_d:
679; RV32IFD:       # %bb.0:
680; RV32IFD-NEXT:    fcvt.d.w fa5, zero
681; RV32IFD-NEXT:    fadd.d fa5, fa0, fa5
682; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
683; RV32IFD-NEXT:    ret
684;
685; RV64IFD-LABEL: fnmsub_d:
686; RV64IFD:       # %bb.0:
687; RV64IFD-NEXT:    fmv.d.x fa5, zero
688; RV64IFD-NEXT:    fadd.d fa5, fa0, fa5
689; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
690; RV64IFD-NEXT:    ret
691;
692; RV32IZFINXZDINX-LABEL: fnmsub_d:
693; RV32IZFINXZDINX:       # %bb.0:
694; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
695; RV32IZFINXZDINX-NEXT:    fadd.d a0, a0, a6
696; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a2, a4
697; RV32IZFINXZDINX-NEXT:    ret
698;
699; RV64IZFINXZDINX-LABEL: fnmsub_d:
700; RV64IZFINXZDINX:       # %bb.0:
701; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
702; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a1, a2
703; RV64IZFINXZDINX-NEXT:    ret
704;
705; RV32I-LABEL: fnmsub_d:
706; RV32I:       # %bb.0:
707; RV32I-NEXT:    addi sp, sp, -32
708; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
709; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
710; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
711; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
712; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
713; RV32I-NEXT:    mv s0, a5
714; RV32I-NEXT:    mv s1, a4
715; RV32I-NEXT:    mv s2, a3
716; RV32I-NEXT:    mv s3, a2
717; RV32I-NEXT:    li a2, 0
718; RV32I-NEXT:    li a3, 0
719; RV32I-NEXT:    call __adddf3
720; RV32I-NEXT:    lui a2, 524288
721; RV32I-NEXT:    xor a1, a1, a2
722; RV32I-NEXT:    mv a2, s3
723; RV32I-NEXT:    mv a3, s2
724; RV32I-NEXT:    mv a4, s1
725; RV32I-NEXT:    mv a5, s0
726; RV32I-NEXT:    call fma
727; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
728; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
729; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
730; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
731; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
732; RV32I-NEXT:    addi sp, sp, 32
733; RV32I-NEXT:    ret
734;
735; RV64I-LABEL: fnmsub_d:
736; RV64I:       # %bb.0:
737; RV64I-NEXT:    addi sp, sp, -32
738; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
739; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
740; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
741; RV64I-NEXT:    mv s0, a2
742; RV64I-NEXT:    mv s1, a1
743; RV64I-NEXT:    li a1, 0
744; RV64I-NEXT:    call __adddf3
745; RV64I-NEXT:    li a1, -1
746; RV64I-NEXT:    slli a1, a1, 63
747; RV64I-NEXT:    xor a0, a0, a1
748; RV64I-NEXT:    mv a1, s1
749; RV64I-NEXT:    mv a2, s0
750; RV64I-NEXT:    call fma
751; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
752; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
753; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
754; RV64I-NEXT:    addi sp, sp, 32
755; RV64I-NEXT:    ret
756  %a_ = fadd double 0.0, %a
757  %nega = fneg double %a_
758  %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
759  ret double %1
760}
761
762define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
763; RV32IFD-LABEL: fnmsub_d_2:
764; RV32IFD:       # %bb.0:
765; RV32IFD-NEXT:    fcvt.d.w fa5, zero
766; RV32IFD-NEXT:    fadd.d fa5, fa1, fa5
767; RV32IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
768; RV32IFD-NEXT:    ret
769;
770; RV64IFD-LABEL: fnmsub_d_2:
771; RV64IFD:       # %bb.0:
772; RV64IFD-NEXT:    fmv.d.x fa5, zero
773; RV64IFD-NEXT:    fadd.d fa5, fa1, fa5
774; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
775; RV64IFD-NEXT:    ret
776;
777; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
778; RV32IZFINXZDINX:       # %bb.0:
779; RV32IZFINXZDINX-NEXT:    fcvt.d.w a6, zero
780; RV32IZFINXZDINX-NEXT:    fadd.d a2, a2, a6
781; RV32IZFINXZDINX-NEXT:    fnmsub.d a0, a2, a0, a4
782; RV32IZFINXZDINX-NEXT:    ret
783;
784; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
785; RV64IZFINXZDINX:       # %bb.0:
786; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
787; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a1, a0, a2
788; RV64IZFINXZDINX-NEXT:    ret
789;
790; RV32I-LABEL: fnmsub_d_2:
791; RV32I:       # %bb.0:
792; RV32I-NEXT:    addi sp, sp, -32
793; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
794; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
795; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
796; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
797; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
798; RV32I-NEXT:    mv s0, a5
799; RV32I-NEXT:    mv s1, a4
800; RV32I-NEXT:    mv s2, a1
801; RV32I-NEXT:    mv s3, a0
802; RV32I-NEXT:    mv a0, a2
803; RV32I-NEXT:    mv a1, a3
804; RV32I-NEXT:    li a2, 0
805; RV32I-NEXT:    li a3, 0
806; RV32I-NEXT:    call __adddf3
807; RV32I-NEXT:    mv a2, a0
808; RV32I-NEXT:    lui a3, 524288
809; RV32I-NEXT:    xor a3, a1, a3
810; RV32I-NEXT:    mv a0, s3
811; RV32I-NEXT:    mv a1, s2
812; RV32I-NEXT:    mv a4, s1
813; RV32I-NEXT:    mv a5, s0
814; RV32I-NEXT:    call fma
815; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
816; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
817; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
818; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
819; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
820; RV32I-NEXT:    addi sp, sp, 32
821; RV32I-NEXT:    ret
822;
823; RV64I-LABEL: fnmsub_d_2:
824; RV64I:       # %bb.0:
825; RV64I-NEXT:    addi sp, sp, -32
826; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
827; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
828; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
829; RV64I-NEXT:    mv s0, a2
830; RV64I-NEXT:    mv s1, a0
831; RV64I-NEXT:    mv a0, a1
832; RV64I-NEXT:    li a1, 0
833; RV64I-NEXT:    call __adddf3
834; RV64I-NEXT:    li a1, -1
835; RV64I-NEXT:    slli a1, a1, 63
836; RV64I-NEXT:    xor a1, a0, a1
837; RV64I-NEXT:    mv a0, s1
838; RV64I-NEXT:    mv a2, s0
839; RV64I-NEXT:    call fma
840; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
841; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
842; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
843; RV64I-NEXT:    addi sp, sp, 32
844; RV64I-NEXT:    ret
845  %b_ = fadd double 0.0, %b
846  %negb = fneg double %b_
847  %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
848  ret double %1
849}
850