xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fold-vp-fsub-and-vp-fmul.ll (revision e74bb25dd2d9cffe21b3eb63552fda6e310b7332)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
3
4declare <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
5declare <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
6declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x i1> %m, i32 %vl)
7
8; (fsub (fmul x, y), z)) -> (fma x, y, (fneg z))
9define <vscale x 1 x double> @test1(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
10; CHECK-LABEL: test1:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
13; CHECK-NEXT:    vfmsub.vv v9, v8, v10, v0.t
14; CHECK-NEXT:    vmv.v.v v8, v9
15; CHECK-NEXT:    ret
16  %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
17  %2 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
18  ret <vscale x 1 x double> %2
19}
20
21; (fsub z, (fmul x, y))) -> (fma (fneg y), x, z)
22define <vscale x 1 x double> @test2(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
23; CHECK-LABEL: test2:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
26; CHECK-NEXT:    vfnmsub.vv v9, v8, v10, v0.t
27; CHECK-NEXT:    vmv.v.v v8, v9
28; CHECK-NEXT:    ret
29  %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
30  %2 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %z, <vscale x 1 x double> %1, <vscale x 1 x i1> %m, i32 %vl)
31  ret <vscale x 1 x double> %2
32}
33
34; (fsub (fneg (fmul x, y))), z) -> (fma (fneg x), y, (fneg z))
35define <vscale x 1 x double> @test3(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
36; CHECK-LABEL: test3:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
39; CHECK-NEXT:    vfmsub.vv v9, v8, v10, v0.t
40; CHECK-NEXT:    vmv.v.v v8, v9
41; CHECK-NEXT:    ret
42  %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
43  %2 = call fast <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x i1> %m, i32 %vl)
44  %3 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
45  ret <vscale x 1 x double> %3
46}
47