xref: /llvm-project/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll (revision eabaee0c59110d0e11b33a69db54ccda526b35fd)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32F %s
4; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi=ilp32 -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV32ZFINX %s
6; RUN: llc -mtriple=riscv32 -mattr=+f,+d -target-abi=ilp32 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV32FD %s
8; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64F %s
10; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
11; RUN:   | FileCheck -check-prefix=RV64ZFINX %s
12; RUN: llc -mtriple=riscv64 -mattr=+f,+d -target-abi=lp64 -verify-machineinstrs < %s \
13; RUN:   | FileCheck -check-prefix=RV64FD %s
14
15; These functions perform extra work to ensure that `%a3` starts in a
16; floating-point register, if the machine has them, and the result of
17; the bitwise operation is then needed in a floating-point register.
18; This should mean the optimisations will fire even if you're using the
19; soft-float ABI on a machine with hardware floating-point support.
20
21define float @bitcast_and(float %a1, float %a2) nounwind {
22; RV32F-LABEL: bitcast_and:
23; RV32F:       # %bb.0:
24; RV32F-NEXT:    fmv.w.x fa5, a1
25; RV32F-NEXT:    fmv.w.x fa4, a0
26; RV32F-NEXT:    fadd.s fa5, fa4, fa5
27; RV32F-NEXT:    fabs.s fa5, fa5
28; RV32F-NEXT:    fadd.s fa5, fa4, fa5
29; RV32F-NEXT:    fmv.x.w a0, fa5
30; RV32F-NEXT:    ret
31;
32; RV32ZFINX-LABEL: bitcast_and:
33; RV32ZFINX:       # %bb.0:
34; RV32ZFINX-NEXT:    fadd.s a1, a0, a1
35; RV32ZFINX-NEXT:    fabs.s a1, a1
36; RV32ZFINX-NEXT:    fadd.s a0, a0, a1
37; RV32ZFINX-NEXT:    ret
38;
39; RV32FD-LABEL: bitcast_and:
40; RV32FD:       # %bb.0:
41; RV32FD-NEXT:    fmv.w.x fa5, a1
42; RV32FD-NEXT:    fmv.w.x fa4, a0
43; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
44; RV32FD-NEXT:    fabs.s fa5, fa5
45; RV32FD-NEXT:    fadd.s fa5, fa4, fa5
46; RV32FD-NEXT:    fmv.x.w a0, fa5
47; RV32FD-NEXT:    ret
48;
49; RV64F-LABEL: bitcast_and:
50; RV64F:       # %bb.0:
51; RV64F-NEXT:    fmv.w.x fa5, a1
52; RV64F-NEXT:    fmv.w.x fa4, a0
53; RV64F-NEXT:    fadd.s fa5, fa4, fa5
54; RV64F-NEXT:    fabs.s fa5, fa5
55; RV64F-NEXT:    fadd.s fa5, fa4, fa5
56; RV64F-NEXT:    fmv.x.w a0, fa5
57; RV64F-NEXT:    ret
58;
59; RV64ZFINX-LABEL: bitcast_and:
60; RV64ZFINX:       # %bb.0:
61; RV64ZFINX-NEXT:    fadd.s a1, a0, a1
62; RV64ZFINX-NEXT:    fabs.s a1, a1
63; RV64ZFINX-NEXT:    fadd.s a0, a0, a1
64; RV64ZFINX-NEXT:    ret
65;
66; RV64FD-LABEL: bitcast_and:
67; RV64FD:       # %bb.0:
68; RV64FD-NEXT:    fmv.w.x fa5, a1
69; RV64FD-NEXT:    fmv.w.x fa4, a0
70; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
71; RV64FD-NEXT:    fabs.s fa5, fa5
72; RV64FD-NEXT:    fadd.s fa5, fa4, fa5
73; RV64FD-NEXT:    fmv.x.w a0, fa5
74; RV64FD-NEXT:    ret
75  %a3 = fadd float %a1, %a2
76  %bc1 = bitcast float %a3 to i32
77  %and = and i32 %bc1, 2147483647
78  %bc2 = bitcast i32 %and to float
79  %a4 = fadd float %a1, %bc2
80  ret float %a4
81}
82
83define double @bitcast_double_and(double %a1, double %a2) nounwind {
84; RV32F-LABEL: bitcast_double_and:
85; RV32F:       # %bb.0:
86; RV32F-NEXT:    addi sp, sp, -16
87; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
88; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
89; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
90; RV32F-NEXT:    mv s0, a1
91; RV32F-NEXT:    mv s1, a0
92; RV32F-NEXT:    call __adddf3
93; RV32F-NEXT:    mv a2, a0
94; RV32F-NEXT:    slli a1, a1, 1
95; RV32F-NEXT:    srli a3, a1, 1
96; RV32F-NEXT:    mv a0, s1
97; RV32F-NEXT:    mv a1, s0
98; RV32F-NEXT:    call __adddf3
99; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
100; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
101; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
102; RV32F-NEXT:    addi sp, sp, 16
103; RV32F-NEXT:    ret
104;
105; RV32ZFINX-LABEL: bitcast_double_and:
106; RV32ZFINX:       # %bb.0:
107; RV32ZFINX-NEXT:    addi sp, sp, -16
108; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
109; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
110; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
111; RV32ZFINX-NEXT:    mv s0, a1
112; RV32ZFINX-NEXT:    mv s1, a0
113; RV32ZFINX-NEXT:    call __adddf3
114; RV32ZFINX-NEXT:    mv a2, a0
115; RV32ZFINX-NEXT:    slli a1, a1, 1
116; RV32ZFINX-NEXT:    srli a3, a1, 1
117; RV32ZFINX-NEXT:    mv a0, s1
118; RV32ZFINX-NEXT:    mv a1, s0
119; RV32ZFINX-NEXT:    call __adddf3
120; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
121; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
122; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
123; RV32ZFINX-NEXT:    addi sp, sp, 16
124; RV32ZFINX-NEXT:    ret
125;
126; RV32FD-LABEL: bitcast_double_and:
127; RV32FD:       # %bb.0:
128; RV32FD-NEXT:    addi sp, sp, -16
129; RV32FD-NEXT:    sw a2, 8(sp)
130; RV32FD-NEXT:    sw a3, 12(sp)
131; RV32FD-NEXT:    fld fa5, 8(sp)
132; RV32FD-NEXT:    sw a0, 8(sp)
133; RV32FD-NEXT:    sw a1, 12(sp)
134; RV32FD-NEXT:    fld fa4, 8(sp)
135; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
136; RV32FD-NEXT:    fabs.d fa5, fa5
137; RV32FD-NEXT:    fadd.d fa5, fa4, fa5
138; RV32FD-NEXT:    fsd fa5, 8(sp)
139; RV32FD-NEXT:    lw a0, 8(sp)
140; RV32FD-NEXT:    lw a1, 12(sp)
141; RV32FD-NEXT:    addi sp, sp, 16
142; RV32FD-NEXT:    ret
143;
144; RV64F-LABEL: bitcast_double_and:
145; RV64F:       # %bb.0:
146; RV64F-NEXT:    addi sp, sp, -16
147; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
148; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
149; RV64F-NEXT:    mv s0, a0
150; RV64F-NEXT:    call __adddf3
151; RV64F-NEXT:    slli a0, a0, 1
152; RV64F-NEXT:    srli a1, a0, 1
153; RV64F-NEXT:    mv a0, s0
154; RV64F-NEXT:    call __adddf3
155; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
156; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
157; RV64F-NEXT:    addi sp, sp, 16
158; RV64F-NEXT:    ret
159;
160; RV64ZFINX-LABEL: bitcast_double_and:
161; RV64ZFINX:       # %bb.0:
162; RV64ZFINX-NEXT:    addi sp, sp, -16
163; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
164; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
165; RV64ZFINX-NEXT:    mv s0, a0
166; RV64ZFINX-NEXT:    call __adddf3
167; RV64ZFINX-NEXT:    slli a0, a0, 1
168; RV64ZFINX-NEXT:    srli a1, a0, 1
169; RV64ZFINX-NEXT:    mv a0, s0
170; RV64ZFINX-NEXT:    call __adddf3
171; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
172; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
173; RV64ZFINX-NEXT:    addi sp, sp, 16
174; RV64ZFINX-NEXT:    ret
175;
176; RV64FD-LABEL: bitcast_double_and:
177; RV64FD:       # %bb.0:
178; RV64FD-NEXT:    fmv.d.x fa5, a1
179; RV64FD-NEXT:    fmv.d.x fa4, a0
180; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
181; RV64FD-NEXT:    fabs.d fa5, fa5
182; RV64FD-NEXT:    fadd.d fa5, fa4, fa5
183; RV64FD-NEXT:    fmv.x.d a0, fa5
184; RV64FD-NEXT:    ret
185  %a3 = fadd double %a1, %a2
186  %bc1 = bitcast double %a3 to i64
187  %and = and i64 %bc1, 9223372036854775807
188  %bc2 = bitcast i64 %and to double
189  %a4 = fadd double %a1, %bc2
190  ret double %a4
191}
192
193
194define float @bitcast_xor(float %a1, float %a2) nounwind {
195; RV32F-LABEL: bitcast_xor:
196; RV32F:       # %bb.0:
197; RV32F-NEXT:    fmv.w.x fa5, a1
198; RV32F-NEXT:    fmv.w.x fa4, a0
199; RV32F-NEXT:    fmul.s fa5, fa4, fa5
200; RV32F-NEXT:    fneg.s fa5, fa5
201; RV32F-NEXT:    fmul.s fa5, fa4, fa5
202; RV32F-NEXT:    fmv.x.w a0, fa5
203; RV32F-NEXT:    ret
204;
205; RV32ZFINX-LABEL: bitcast_xor:
206; RV32ZFINX:       # %bb.0:
207; RV32ZFINX-NEXT:    fmul.s a1, a0, a1
208; RV32ZFINX-NEXT:    fneg.s a1, a1
209; RV32ZFINX-NEXT:    fmul.s a0, a0, a1
210; RV32ZFINX-NEXT:    ret
211;
212; RV32FD-LABEL: bitcast_xor:
213; RV32FD:       # %bb.0:
214; RV32FD-NEXT:    fmv.w.x fa5, a1
215; RV32FD-NEXT:    fmv.w.x fa4, a0
216; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
217; RV32FD-NEXT:    fneg.s fa5, fa5
218; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
219; RV32FD-NEXT:    fmv.x.w a0, fa5
220; RV32FD-NEXT:    ret
221;
222; RV64F-LABEL: bitcast_xor:
223; RV64F:       # %bb.0:
224; RV64F-NEXT:    fmv.w.x fa5, a1
225; RV64F-NEXT:    fmv.w.x fa4, a0
226; RV64F-NEXT:    fmul.s fa5, fa4, fa5
227; RV64F-NEXT:    fneg.s fa5, fa5
228; RV64F-NEXT:    fmul.s fa5, fa4, fa5
229; RV64F-NEXT:    fmv.x.w a0, fa5
230; RV64F-NEXT:    ret
231;
232; RV64ZFINX-LABEL: bitcast_xor:
233; RV64ZFINX:       # %bb.0:
234; RV64ZFINX-NEXT:    fmul.s a1, a0, a1
235; RV64ZFINX-NEXT:    fneg.s a1, a1
236; RV64ZFINX-NEXT:    fmul.s a0, a0, a1
237; RV64ZFINX-NEXT:    ret
238;
239; RV64FD-LABEL: bitcast_xor:
240; RV64FD:       # %bb.0:
241; RV64FD-NEXT:    fmv.w.x fa5, a1
242; RV64FD-NEXT:    fmv.w.x fa4, a0
243; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
244; RV64FD-NEXT:    fneg.s fa5, fa5
245; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
246; RV64FD-NEXT:    fmv.x.w a0, fa5
247; RV64FD-NEXT:    ret
248  %a3 = fmul float %a1, %a2
249  %bc1 = bitcast float %a3 to i32
250  %and = xor i32 %bc1, 2147483648
251  %bc2 = bitcast i32 %and to float
252  %a4 = fmul float %a1, %bc2
253  ret float %a4
254}
255
256define double @bitcast_double_xor(double %a1, double %a2) nounwind {
257; RV32F-LABEL: bitcast_double_xor:
258; RV32F:       # %bb.0:
259; RV32F-NEXT:    addi sp, sp, -16
260; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
261; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
262; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
263; RV32F-NEXT:    mv s0, a1
264; RV32F-NEXT:    mv s1, a0
265; RV32F-NEXT:    call __muldf3
266; RV32F-NEXT:    mv a2, a0
267; RV32F-NEXT:    lui a3, 524288
268; RV32F-NEXT:    xor a3, a1, a3
269; RV32F-NEXT:    mv a0, s1
270; RV32F-NEXT:    mv a1, s0
271; RV32F-NEXT:    call __muldf3
272; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
273; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
274; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
275; RV32F-NEXT:    addi sp, sp, 16
276; RV32F-NEXT:    ret
277;
278; RV32ZFINX-LABEL: bitcast_double_xor:
279; RV32ZFINX:       # %bb.0:
280; RV32ZFINX-NEXT:    addi sp, sp, -16
281; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
282; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
283; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
284; RV32ZFINX-NEXT:    mv s0, a1
285; RV32ZFINX-NEXT:    mv s1, a0
286; RV32ZFINX-NEXT:    call __muldf3
287; RV32ZFINX-NEXT:    mv a2, a0
288; RV32ZFINX-NEXT:    lui a3, 524288
289; RV32ZFINX-NEXT:    xor a3, a1, a3
290; RV32ZFINX-NEXT:    mv a0, s1
291; RV32ZFINX-NEXT:    mv a1, s0
292; RV32ZFINX-NEXT:    call __muldf3
293; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
294; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
295; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
296; RV32ZFINX-NEXT:    addi sp, sp, 16
297; RV32ZFINX-NEXT:    ret
298;
299; RV32FD-LABEL: bitcast_double_xor:
300; RV32FD:       # %bb.0:
301; RV32FD-NEXT:    addi sp, sp, -16
302; RV32FD-NEXT:    sw a2, 8(sp)
303; RV32FD-NEXT:    sw a3, 12(sp)
304; RV32FD-NEXT:    fld fa5, 8(sp)
305; RV32FD-NEXT:    sw a0, 8(sp)
306; RV32FD-NEXT:    sw a1, 12(sp)
307; RV32FD-NEXT:    fld fa4, 8(sp)
308; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
309; RV32FD-NEXT:    fneg.d fa5, fa5
310; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
311; RV32FD-NEXT:    fsd fa5, 8(sp)
312; RV32FD-NEXT:    lw a0, 8(sp)
313; RV32FD-NEXT:    lw a1, 12(sp)
314; RV32FD-NEXT:    addi sp, sp, 16
315; RV32FD-NEXT:    ret
316;
317; RV64F-LABEL: bitcast_double_xor:
318; RV64F:       # %bb.0:
319; RV64F-NEXT:    addi sp, sp, -16
320; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
321; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
322; RV64F-NEXT:    mv s0, a0
323; RV64F-NEXT:    call __muldf3
324; RV64F-NEXT:    li a1, -1
325; RV64F-NEXT:    slli a1, a1, 63
326; RV64F-NEXT:    xor a1, a0, a1
327; RV64F-NEXT:    mv a0, s0
328; RV64F-NEXT:    call __muldf3
329; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
330; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
331; RV64F-NEXT:    addi sp, sp, 16
332; RV64F-NEXT:    ret
333;
334; RV64ZFINX-LABEL: bitcast_double_xor:
335; RV64ZFINX:       # %bb.0:
336; RV64ZFINX-NEXT:    addi sp, sp, -16
337; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
338; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
339; RV64ZFINX-NEXT:    mv s0, a0
340; RV64ZFINX-NEXT:    call __muldf3
341; RV64ZFINX-NEXT:    li a1, -1
342; RV64ZFINX-NEXT:    slli a1, a1, 63
343; RV64ZFINX-NEXT:    xor a1, a0, a1
344; RV64ZFINX-NEXT:    mv a0, s0
345; RV64ZFINX-NEXT:    call __muldf3
346; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
347; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
348; RV64ZFINX-NEXT:    addi sp, sp, 16
349; RV64ZFINX-NEXT:    ret
350;
351; RV64FD-LABEL: bitcast_double_xor:
352; RV64FD:       # %bb.0:
353; RV64FD-NEXT:    fmv.d.x fa5, a1
354; RV64FD-NEXT:    fmv.d.x fa4, a0
355; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
356; RV64FD-NEXT:    fneg.d fa5, fa5
357; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
358; RV64FD-NEXT:    fmv.x.d a0, fa5
359; RV64FD-NEXT:    ret
360  %a3 = fmul double %a1, %a2
361  %bc1 = bitcast double %a3 to i64
362  %and = xor i64 %bc1, 9223372036854775808
363  %bc2 = bitcast i64 %and to double
364  %a4 = fmul double %a1, %bc2
365  ret double %a4
366}
367
368define float @bitcast_or(float %a1, float %a2) nounwind {
369; RV32F-LABEL: bitcast_or:
370; RV32F:       # %bb.0:
371; RV32F-NEXT:    fmv.w.x fa5, a1
372; RV32F-NEXT:    fmv.w.x fa4, a0
373; RV32F-NEXT:    fmul.s fa5, fa4, fa5
374; RV32F-NEXT:    fabs.s fa5, fa5
375; RV32F-NEXT:    fneg.s fa5, fa5
376; RV32F-NEXT:    fmul.s fa5, fa4, fa5
377; RV32F-NEXT:    fmv.x.w a0, fa5
378; RV32F-NEXT:    ret
379;
380; RV32ZFINX-LABEL: bitcast_or:
381; RV32ZFINX:       # %bb.0:
382; RV32ZFINX-NEXT:    fmul.s a1, a0, a1
383; RV32ZFINX-NEXT:    fabs.s a1, a1
384; RV32ZFINX-NEXT:    fneg.s a1, a1
385; RV32ZFINX-NEXT:    fmul.s a0, a0, a1
386; RV32ZFINX-NEXT:    ret
387;
388; RV32FD-LABEL: bitcast_or:
389; RV32FD:       # %bb.0:
390; RV32FD-NEXT:    fmv.w.x fa5, a1
391; RV32FD-NEXT:    fmv.w.x fa4, a0
392; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
393; RV32FD-NEXT:    fabs.s fa5, fa5
394; RV32FD-NEXT:    fneg.s fa5, fa5
395; RV32FD-NEXT:    fmul.s fa5, fa4, fa5
396; RV32FD-NEXT:    fmv.x.w a0, fa5
397; RV32FD-NEXT:    ret
398;
399; RV64F-LABEL: bitcast_or:
400; RV64F:       # %bb.0:
401; RV64F-NEXT:    fmv.w.x fa5, a1
402; RV64F-NEXT:    fmv.w.x fa4, a0
403; RV64F-NEXT:    fmul.s fa5, fa4, fa5
404; RV64F-NEXT:    fabs.s fa5, fa5
405; RV64F-NEXT:    fneg.s fa5, fa5
406; RV64F-NEXT:    fmul.s fa5, fa4, fa5
407; RV64F-NEXT:    fmv.x.w a0, fa5
408; RV64F-NEXT:    ret
409;
410; RV64ZFINX-LABEL: bitcast_or:
411; RV64ZFINX:       # %bb.0:
412; RV64ZFINX-NEXT:    fmul.s a1, a0, a1
413; RV64ZFINX-NEXT:    fabs.s a1, a1
414; RV64ZFINX-NEXT:    fneg.s a1, a1
415; RV64ZFINX-NEXT:    fmul.s a0, a0, a1
416; RV64ZFINX-NEXT:    ret
417;
418; RV64FD-LABEL: bitcast_or:
419; RV64FD:       # %bb.0:
420; RV64FD-NEXT:    fmv.w.x fa5, a1
421; RV64FD-NEXT:    fmv.w.x fa4, a0
422; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
423; RV64FD-NEXT:    fabs.s fa5, fa5
424; RV64FD-NEXT:    fneg.s fa5, fa5
425; RV64FD-NEXT:    fmul.s fa5, fa4, fa5
426; RV64FD-NEXT:    fmv.x.w a0, fa5
427; RV64FD-NEXT:    ret
428  %a3 = fmul float %a1, %a2
429  %bc1 = bitcast float %a3 to i32
430  %and = or i32 %bc1, 2147483648
431  %bc2 = bitcast i32 %and to float
432  %a4 = fmul float %a1, %bc2
433  ret float %a4
434}
435
436define double @bitcast_double_or(double %a1, double %a2) nounwind {
437; RV32F-LABEL: bitcast_double_or:
438; RV32F:       # %bb.0:
439; RV32F-NEXT:    addi sp, sp, -16
440; RV32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
441; RV32F-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
442; RV32F-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
443; RV32F-NEXT:    mv s0, a1
444; RV32F-NEXT:    mv s1, a0
445; RV32F-NEXT:    call __muldf3
446; RV32F-NEXT:    mv a2, a0
447; RV32F-NEXT:    lui a3, 524288
448; RV32F-NEXT:    or a3, a1, a3
449; RV32F-NEXT:    mv a0, s1
450; RV32F-NEXT:    mv a1, s0
451; RV32F-NEXT:    call __muldf3
452; RV32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
453; RV32F-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
454; RV32F-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
455; RV32F-NEXT:    addi sp, sp, 16
456; RV32F-NEXT:    ret
457;
458; RV32ZFINX-LABEL: bitcast_double_or:
459; RV32ZFINX:       # %bb.0:
460; RV32ZFINX-NEXT:    addi sp, sp, -16
461; RV32ZFINX-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
462; RV32ZFINX-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
463; RV32ZFINX-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
464; RV32ZFINX-NEXT:    mv s0, a1
465; RV32ZFINX-NEXT:    mv s1, a0
466; RV32ZFINX-NEXT:    call __muldf3
467; RV32ZFINX-NEXT:    mv a2, a0
468; RV32ZFINX-NEXT:    lui a3, 524288
469; RV32ZFINX-NEXT:    or a3, a1, a3
470; RV32ZFINX-NEXT:    mv a0, s1
471; RV32ZFINX-NEXT:    mv a1, s0
472; RV32ZFINX-NEXT:    call __muldf3
473; RV32ZFINX-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
474; RV32ZFINX-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
475; RV32ZFINX-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
476; RV32ZFINX-NEXT:    addi sp, sp, 16
477; RV32ZFINX-NEXT:    ret
478;
479; RV32FD-LABEL: bitcast_double_or:
480; RV32FD:       # %bb.0:
481; RV32FD-NEXT:    addi sp, sp, -16
482; RV32FD-NEXT:    sw a2, 8(sp)
483; RV32FD-NEXT:    sw a3, 12(sp)
484; RV32FD-NEXT:    fld fa5, 8(sp)
485; RV32FD-NEXT:    sw a0, 8(sp)
486; RV32FD-NEXT:    sw a1, 12(sp)
487; RV32FD-NEXT:    fld fa4, 8(sp)
488; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
489; RV32FD-NEXT:    fabs.d fa5, fa5
490; RV32FD-NEXT:    fneg.d fa5, fa5
491; RV32FD-NEXT:    fmul.d fa5, fa4, fa5
492; RV32FD-NEXT:    fsd fa5, 8(sp)
493; RV32FD-NEXT:    lw a0, 8(sp)
494; RV32FD-NEXT:    lw a1, 12(sp)
495; RV32FD-NEXT:    addi sp, sp, 16
496; RV32FD-NEXT:    ret
497;
498; RV64F-LABEL: bitcast_double_or:
499; RV64F:       # %bb.0:
500; RV64F-NEXT:    addi sp, sp, -16
501; RV64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
502; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
503; RV64F-NEXT:    mv s0, a0
504; RV64F-NEXT:    call __muldf3
505; RV64F-NEXT:    li a1, -1
506; RV64F-NEXT:    slli a1, a1, 63
507; RV64F-NEXT:    or a1, a0, a1
508; RV64F-NEXT:    mv a0, s0
509; RV64F-NEXT:    call __muldf3
510; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
511; RV64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
512; RV64F-NEXT:    addi sp, sp, 16
513; RV64F-NEXT:    ret
514;
515; RV64ZFINX-LABEL: bitcast_double_or:
516; RV64ZFINX:       # %bb.0:
517; RV64ZFINX-NEXT:    addi sp, sp, -16
518; RV64ZFINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
519; RV64ZFINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
520; RV64ZFINX-NEXT:    mv s0, a0
521; RV64ZFINX-NEXT:    call __muldf3
522; RV64ZFINX-NEXT:    li a1, -1
523; RV64ZFINX-NEXT:    slli a1, a1, 63
524; RV64ZFINX-NEXT:    or a1, a0, a1
525; RV64ZFINX-NEXT:    mv a0, s0
526; RV64ZFINX-NEXT:    call __muldf3
527; RV64ZFINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
528; RV64ZFINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
529; RV64ZFINX-NEXT:    addi sp, sp, 16
530; RV64ZFINX-NEXT:    ret
531;
532; RV64FD-LABEL: bitcast_double_or:
533; RV64FD:       # %bb.0:
534; RV64FD-NEXT:    fmv.d.x fa5, a1
535; RV64FD-NEXT:    fmv.d.x fa4, a0
536; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
537; RV64FD-NEXT:    fabs.d fa5, fa5
538; RV64FD-NEXT:    fneg.d fa5, fa5
539; RV64FD-NEXT:    fmul.d fa5, fa4, fa5
540; RV64FD-NEXT:    fmv.x.d a0, fa5
541; RV64FD-NEXT:    ret
542  %a3 = fmul double %a1, %a2
543  %bc1 = bitcast double %a3 to i64
544  %and = or i64 %bc1, 9223372036854775808
545  %bc2 = bitcast i64 %and to double
546  %a4 = fmul double %a1, %bc2
547  ret double %a4
548}
549