1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 6; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \ 7; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 8; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ 9; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 10 11declare <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32) 12 13define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 14; ZVFH-LABEL: vfneg_vv_nxv1f16: 15; ZVFH: # %bb.0: 16; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 17; ZVFH-NEXT: vfneg.v v8, v8, v0.t 18; ZVFH-NEXT: ret 19; 20; ZVFHMIN-LABEL: vfneg_vv_nxv1f16: 21; ZVFHMIN: # %bb.0: 22; ZVFHMIN-NEXT: lui a1, 8 23; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 24; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 25; ZVFHMIN-NEXT: ret 26 %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) 27 ret <vscale x 1 x half> %v 28} 29 30define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) { 31; ZVFH-LABEL: vfneg_vv_nxv1f16_unmasked: 32; ZVFH: # %bb.0: 33; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 34; ZVFH-NEXT: vfneg.v v8, v8 35; ZVFH-NEXT: ret 36; 37; ZVFHMIN-LABEL: vfneg_vv_nxv1f16_unmasked: 38; ZVFHMIN: # %bb.0: 39; ZVFHMIN-NEXT: lui a1, 8 40; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 41; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 42; ZVFHMIN-NEXT: ret 43 %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 44 ret <vscale x 1 x half> %v 45} 46 47declare <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) 48 49define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 50; ZVFH-LABEL: vfneg_vv_nxv2f16: 51; ZVFH: # %bb.0: 52; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 53; ZVFH-NEXT: vfneg.v v8, v8, v0.t 54; ZVFH-NEXT: ret 55; 56; ZVFHMIN-LABEL: vfneg_vv_nxv2f16: 57; ZVFHMIN: # %bb.0: 58; ZVFHMIN-NEXT: lui a1, 8 59; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 60; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 61; ZVFHMIN-NEXT: ret 62 %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) 63 ret <vscale x 2 x half> %v 64} 65 66define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) { 67; ZVFH-LABEL: vfneg_vv_nxv2f16_unmasked: 68; ZVFH: # %bb.0: 69; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 70; ZVFH-NEXT: vfneg.v v8, v8 71; ZVFH-NEXT: ret 72; 73; ZVFHMIN-LABEL: vfneg_vv_nxv2f16_unmasked: 74; ZVFHMIN: # %bb.0: 75; ZVFHMIN-NEXT: lui a1, 8 76; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 77; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 78; ZVFHMIN-NEXT: ret 79 %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 80 ret <vscale x 2 x half> %v 81} 82 83declare <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32) 84 85define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 86; ZVFH-LABEL: vfneg_vv_nxv4f16: 87; ZVFH: # %bb.0: 88; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma 89; ZVFH-NEXT: vfneg.v v8, v8, v0.t 90; ZVFH-NEXT: ret 91; 92; ZVFHMIN-LABEL: vfneg_vv_nxv4f16: 93; ZVFHMIN: # %bb.0: 94; ZVFHMIN-NEXT: lui a1, 8 95; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma 96; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 97; ZVFHMIN-NEXT: ret 98 %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) 99 ret <vscale x 4 x half> %v 100} 101 102define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) { 103; ZVFH-LABEL: vfneg_vv_nxv4f16_unmasked: 104; ZVFH: # %bb.0: 105; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma 106; ZVFH-NEXT: vfneg.v v8, v8 107; ZVFH-NEXT: ret 108; 109; ZVFHMIN-LABEL: vfneg_vv_nxv4f16_unmasked: 110; ZVFHMIN: # %bb.0: 111; ZVFHMIN-NEXT: lui a1, 8 112; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma 113; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 114; ZVFHMIN-NEXT: ret 115 %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 116 ret <vscale x 4 x half> %v 117} 118 119declare <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32) 120 121define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 122; ZVFH-LABEL: vfneg_vv_nxv8f16: 123; ZVFH: # %bb.0: 124; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma 125; ZVFH-NEXT: vfneg.v v8, v8, v0.t 126; ZVFH-NEXT: ret 127; 128; ZVFHMIN-LABEL: vfneg_vv_nxv8f16: 129; ZVFHMIN: # %bb.0: 130; ZVFHMIN-NEXT: lui a1, 8 131; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma 132; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 133; ZVFHMIN-NEXT: ret 134 %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) 135 ret <vscale x 8 x half> %v 136} 137 138define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) { 139; ZVFH-LABEL: vfneg_vv_nxv8f16_unmasked: 140; ZVFH: # %bb.0: 141; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma 142; ZVFH-NEXT: vfneg.v v8, v8 143; ZVFH-NEXT: ret 144; 145; ZVFHMIN-LABEL: vfneg_vv_nxv8f16_unmasked: 146; ZVFHMIN: # %bb.0: 147; ZVFHMIN-NEXT: lui a1, 8 148; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma 149; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 150; ZVFHMIN-NEXT: ret 151 %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 152 ret <vscale x 8 x half> %v 153} 154 155declare <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32) 156 157define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 158; ZVFH-LABEL: vfneg_vv_nxv16f16: 159; ZVFH: # %bb.0: 160; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma 161; ZVFH-NEXT: vfneg.v v8, v8, v0.t 162; ZVFH-NEXT: ret 163; 164; ZVFHMIN-LABEL: vfneg_vv_nxv16f16: 165; ZVFHMIN: # %bb.0: 166; ZVFHMIN-NEXT: lui a1, 8 167; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma 168; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 169; ZVFHMIN-NEXT: ret 170 %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) 171 ret <vscale x 16 x half> %v 172} 173 174define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) { 175; ZVFH-LABEL: vfneg_vv_nxv16f16_unmasked: 176; ZVFH: # %bb.0: 177; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma 178; ZVFH-NEXT: vfneg.v v8, v8 179; ZVFH-NEXT: ret 180; 181; ZVFHMIN-LABEL: vfneg_vv_nxv16f16_unmasked: 182; ZVFHMIN: # %bb.0: 183; ZVFHMIN-NEXT: lui a1, 8 184; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma 185; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 186; ZVFHMIN-NEXT: ret 187 %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 188 ret <vscale x 16 x half> %v 189} 190 191declare <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32) 192 193define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 194; ZVFH-LABEL: vfneg_vv_nxv32f16: 195; ZVFH: # %bb.0: 196; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma 197; ZVFH-NEXT: vfneg.v v8, v8, v0.t 198; ZVFH-NEXT: ret 199; 200; ZVFHMIN-LABEL: vfneg_vv_nxv32f16: 201; ZVFHMIN: # %bb.0: 202; ZVFHMIN-NEXT: lui a1, 8 203; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma 204; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t 205; ZVFHMIN-NEXT: ret 206 %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) 207 ret <vscale x 32 x half> %v 208} 209 210define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) { 211; ZVFH-LABEL: vfneg_vv_nxv32f16_unmasked: 212; ZVFH: # %bb.0: 213; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma 214; ZVFH-NEXT: vfneg.v v8, v8 215; ZVFH-NEXT: ret 216; 217; ZVFHMIN-LABEL: vfneg_vv_nxv32f16_unmasked: 218; ZVFHMIN: # %bb.0: 219; ZVFHMIN-NEXT: lui a1, 8 220; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma 221; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 222; ZVFHMIN-NEXT: ret 223 %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) 224 ret <vscale x 32 x half> %v 225} 226 227declare <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) 228 229define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 230; CHECK-LABEL: vfneg_vv_nxv1f32: 231; CHECK: # %bb.0: 232; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 233; CHECK-NEXT: vfneg.v v8, v8, v0.t 234; CHECK-NEXT: ret 235 %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) 236 ret <vscale x 1 x float> %v 237} 238 239define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) { 240; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked: 241; CHECK: # %bb.0: 242; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 243; CHECK-NEXT: vfneg.v v8, v8 244; CHECK-NEXT: ret 245 %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 246 ret <vscale x 1 x float> %v 247} 248 249declare <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) 250 251define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 252; CHECK-LABEL: vfneg_vv_nxv2f32: 253; CHECK: # %bb.0: 254; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 255; CHECK-NEXT: vfneg.v v8, v8, v0.t 256; CHECK-NEXT: ret 257 %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) 258 ret <vscale x 2 x float> %v 259} 260 261define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) { 262; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked: 263; CHECK: # %bb.0: 264; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 265; CHECK-NEXT: vfneg.v v8, v8 266; CHECK-NEXT: ret 267 %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 268 ret <vscale x 2 x float> %v 269} 270 271declare <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) 272 273define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 274; CHECK-LABEL: vfneg_vv_nxv4f32: 275; CHECK: # %bb.0: 276; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 277; CHECK-NEXT: vfneg.v v8, v8, v0.t 278; CHECK-NEXT: ret 279 %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) 280 ret <vscale x 4 x float> %v 281} 282 283define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) { 284; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked: 285; CHECK: # %bb.0: 286; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 287; CHECK-NEXT: vfneg.v v8, v8 288; CHECK-NEXT: ret 289 %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 290 ret <vscale x 4 x float> %v 291} 292 293declare <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) 294 295define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 296; CHECK-LABEL: vfneg_vv_nxv8f32: 297; CHECK: # %bb.0: 298; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 299; CHECK-NEXT: vfneg.v v8, v8, v0.t 300; CHECK-NEXT: ret 301 %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) 302 ret <vscale x 8 x float> %v 303} 304 305define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) { 306; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked: 307; CHECK: # %bb.0: 308; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 309; CHECK-NEXT: vfneg.v v8, v8 310; CHECK-NEXT: ret 311 %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 312 ret <vscale x 8 x float> %v 313} 314 315declare <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) 316 317define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 318; CHECK-LABEL: vfneg_vv_nxv16f32: 319; CHECK: # %bb.0: 320; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 321; CHECK-NEXT: vfneg.v v8, v8, v0.t 322; CHECK-NEXT: ret 323 %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) 324 ret <vscale x 16 x float> %v 325} 326 327define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) { 328; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked: 329; CHECK: # %bb.0: 330; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 331; CHECK-NEXT: vfneg.v v8, v8 332; CHECK-NEXT: ret 333 %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 334 ret <vscale x 16 x float> %v 335} 336 337declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) 338 339define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 340; CHECK-LABEL: vfneg_vv_nxv1f64: 341; CHECK: # %bb.0: 342; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 343; CHECK-NEXT: vfneg.v v8, v8, v0.t 344; CHECK-NEXT: ret 345 %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) 346 ret <vscale x 1 x double> %v 347} 348 349define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) { 350; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked: 351; CHECK: # %bb.0: 352; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 353; CHECK-NEXT: vfneg.v v8, v8 354; CHECK-NEXT: ret 355 %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 356 ret <vscale x 1 x double> %v 357} 358 359declare <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) 360 361define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 362; CHECK-LABEL: vfneg_vv_nxv2f64: 363; CHECK: # %bb.0: 364; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 365; CHECK-NEXT: vfneg.v v8, v8, v0.t 366; CHECK-NEXT: ret 367 %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) 368 ret <vscale x 2 x double> %v 369} 370 371define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) { 372; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked: 373; CHECK: # %bb.0: 374; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 375; CHECK-NEXT: vfneg.v v8, v8 376; CHECK-NEXT: ret 377 %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 378 ret <vscale x 2 x double> %v 379} 380 381declare <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) 382 383define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 384; CHECK-LABEL: vfneg_vv_nxv4f64: 385; CHECK: # %bb.0: 386; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 387; CHECK-NEXT: vfneg.v v8, v8, v0.t 388; CHECK-NEXT: ret 389 %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) 390 ret <vscale x 4 x double> %v 391} 392 393define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) { 394; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked: 395; CHECK: # %bb.0: 396; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 397; CHECK-NEXT: vfneg.v v8, v8 398; CHECK-NEXT: ret 399 %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 400 ret <vscale x 4 x double> %v 401} 402 403declare <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32) 404 405define <vscale x 7 x double> @vfneg_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) { 406; CHECK-LABEL: vfneg_vv_nxv7f64: 407; CHECK: # %bb.0: 408; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 409; CHECK-NEXT: vfneg.v v8, v8, v0.t 410; CHECK-NEXT: ret 411 %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl) 412 ret <vscale x 7 x double> %v 413} 414 415define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) { 416; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked: 417; CHECK: # %bb.0: 418; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 419; CHECK-NEXT: vfneg.v v8, v8 420; CHECK-NEXT: ret 421 %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl) 422 ret <vscale x 7 x double> %v 423} 424 425declare <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) 426 427define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 428; CHECK-LABEL: vfneg_vv_nxv8f64: 429; CHECK: # %bb.0: 430; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 431; CHECK-NEXT: vfneg.v v8, v8, v0.t 432; CHECK-NEXT: ret 433 %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) 434 ret <vscale x 8 x double> %v 435} 436 437define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) { 438; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked: 439; CHECK: # %bb.0: 440; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 441; CHECK-NEXT: vfneg.v v8, v8 442; CHECK-NEXT: ret 443 %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 444 ret <vscale x 8 x double> %v 445} 446 447; Test splitting. 448declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32) 449 450define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 451; CHECK-LABEL: vfneg_vv_nxv16f64: 452; CHECK: # %bb.0: 453; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma 454; CHECK-NEXT: vmv1r.v v24, v0 455; CHECK-NEXT: csrr a1, vlenb 456; CHECK-NEXT: srli a2, a1, 3 457; CHECK-NEXT: sub a3, a0, a1 458; CHECK-NEXT: vslidedown.vx v0, v0, a2 459; CHECK-NEXT: sltu a2, a0, a3 460; CHECK-NEXT: addi a2, a2, -1 461; CHECK-NEXT: and a2, a2, a3 462; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 463; CHECK-NEXT: vfneg.v v16, v16, v0.t 464; CHECK-NEXT: bltu a0, a1, .LBB32_2 465; CHECK-NEXT: # %bb.1: 466; CHECK-NEXT: mv a0, a1 467; CHECK-NEXT: .LBB32_2: 468; CHECK-NEXT: vmv1r.v v0, v24 469; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 470; CHECK-NEXT: vfneg.v v8, v8, v0.t 471; CHECK-NEXT: ret 472 %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl) 473 ret <vscale x 16 x double> %v 474} 475 476define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) { 477; CHECK-LABEL: vfneg_vv_nxv16f64_unmasked: 478; CHECK: # %bb.0: 479; CHECK-NEXT: csrr a1, vlenb 480; CHECK-NEXT: sub a2, a0, a1 481; CHECK-NEXT: sltu a3, a0, a2 482; CHECK-NEXT: addi a3, a3, -1 483; CHECK-NEXT: and a2, a3, a2 484; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 485; CHECK-NEXT: vfneg.v v16, v16 486; CHECK-NEXT: bltu a0, a1, .LBB33_2 487; CHECK-NEXT: # %bb.1: 488; CHECK-NEXT: mv a0, a1 489; CHECK-NEXT: .LBB33_2: 490; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 491; CHECK-NEXT: vfneg.v v8, v8 492; CHECK-NEXT: ret 493 %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 494 ret <vscale x 16 x double> %v 495} 496