1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \ 3; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ 4; RUN: --check-prefixes=CHECK,ZVFH 5; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \ 6; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ 7; RUN: --check-prefixes=CHECK,ZVFH 8; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ 9; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ 10; RUN: --check-prefixes=CHECK,ZVFHMIN 11; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ 12; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ 13; RUN: --check-prefixes=CHECK,ZVFHMIN 14 15define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) { 16; CHECK-LABEL: nxv1bf16: 17; CHECK: # %bb.0: 18; CHECK-NEXT: lui a0, 8 19; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma 20; CHECK-NEXT: vxor.vx v8, v8, a0 21; CHECK-NEXT: ret 22 %vb = fneg <vscale x 1 x bfloat> %va 23 ret <vscale x 1 x bfloat> %vb 24} 25 26define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) { 27; CHECK-LABEL: nxv2bf16: 28; CHECK: # %bb.0: 29; CHECK-NEXT: lui a0, 8 30; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma 31; CHECK-NEXT: vxor.vx v8, v8, a0 32; CHECK-NEXT: ret 33 %vb = fneg <vscale x 2 x bfloat> %va 34 ret <vscale x 2 x bfloat> %vb 35} 36 37define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) { 38; CHECK-LABEL: nxv4bf16: 39; CHECK: # %bb.0: 40; CHECK-NEXT: lui a0, 8 41; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma 42; CHECK-NEXT: vxor.vx v8, v8, a0 43; CHECK-NEXT: ret 44 %vb = fneg <vscale x 4 x bfloat> %va 45 ret <vscale x 4 x bfloat> %vb 46} 47 48define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) { 49; CHECK-LABEL: nxv8bf16: 50; CHECK: # %bb.0: 51; CHECK-NEXT: lui a0, 8 52; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma 53; CHECK-NEXT: vxor.vx v8, v8, a0 54; CHECK-NEXT: ret 55 %vb = fneg <vscale x 8 x bfloat> %va 56 ret <vscale x 8 x bfloat> %vb 57} 58 59define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) { 60; CHECK-LABEL: nxv16bf16: 61; CHECK: # %bb.0: 62; CHECK-NEXT: lui a0, 8 63; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma 64; CHECK-NEXT: vxor.vx v8, v8, a0 65; CHECK-NEXT: ret 66 %vb = fneg <vscale x 16 x bfloat> %va 67 ret <vscale x 16 x bfloat> %vb 68} 69 70define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) { 71; CHECK-LABEL: nxv32bf16: 72; CHECK: # %bb.0: 73; CHECK-NEXT: lui a0, 8 74; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma 75; CHECK-NEXT: vxor.vx v8, v8, a0 76; CHECK-NEXT: ret 77 %vb = fneg <vscale x 32 x bfloat> %va 78 ret <vscale x 32 x bfloat> %vb 79} 80 81define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) { 82; ZVFH-LABEL: vfneg_vv_nxv1f16: 83; ZVFH: # %bb.0: 84; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 85; ZVFH-NEXT: vfneg.v v8, v8 86; ZVFH-NEXT: ret 87; 88; ZVFHMIN-LABEL: vfneg_vv_nxv1f16: 89; ZVFHMIN: # %bb.0: 90; ZVFHMIN-NEXT: lui a0, 8 91; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma 92; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 93; ZVFHMIN-NEXT: ret 94 %vb = fneg <vscale x 1 x half> %va 95 ret <vscale x 1 x half> %vb 96} 97 98define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) { 99; ZVFH-LABEL: vfneg_vv_nxv2f16: 100; ZVFH: # %bb.0: 101; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 102; ZVFH-NEXT: vfneg.v v8, v8 103; ZVFH-NEXT: ret 104; 105; ZVFHMIN-LABEL: vfneg_vv_nxv2f16: 106; ZVFHMIN: # %bb.0: 107; ZVFHMIN-NEXT: lui a0, 8 108; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma 109; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 110; ZVFHMIN-NEXT: ret 111 %vb = fneg <vscale x 2 x half> %va 112 ret <vscale x 2 x half> %vb 113} 114 115define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) { 116; ZVFH-LABEL: vfneg_vv_nxv4f16: 117; ZVFH: # %bb.0: 118; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma 119; ZVFH-NEXT: vfneg.v v8, v8 120; ZVFH-NEXT: ret 121; 122; ZVFHMIN-LABEL: vfneg_vv_nxv4f16: 123; ZVFHMIN: # %bb.0: 124; ZVFHMIN-NEXT: lui a0, 8 125; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma 126; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 127; ZVFHMIN-NEXT: ret 128 %vb = fneg <vscale x 4 x half> %va 129 ret <vscale x 4 x half> %vb 130} 131 132define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) { 133; ZVFH-LABEL: vfneg_vv_nxv8f16: 134; ZVFH: # %bb.0: 135; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma 136; ZVFH-NEXT: vfneg.v v8, v8 137; ZVFH-NEXT: ret 138; 139; ZVFHMIN-LABEL: vfneg_vv_nxv8f16: 140; ZVFHMIN: # %bb.0: 141; ZVFHMIN-NEXT: lui a0, 8 142; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma 143; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 144; ZVFHMIN-NEXT: ret 145 %vb = fneg <vscale x 8 x half> %va 146 ret <vscale x 8 x half> %vb 147} 148 149define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) { 150; ZVFH-LABEL: vfneg_vv_nxv16f16: 151; ZVFH: # %bb.0: 152; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma 153; ZVFH-NEXT: vfneg.v v8, v8 154; ZVFH-NEXT: ret 155; 156; ZVFHMIN-LABEL: vfneg_vv_nxv16f16: 157; ZVFHMIN: # %bb.0: 158; ZVFHMIN-NEXT: lui a0, 8 159; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma 160; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 161; ZVFHMIN-NEXT: ret 162 %vb = fneg <vscale x 16 x half> %va 163 ret <vscale x 16 x half> %vb 164} 165 166define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) { 167; ZVFH-LABEL: vfneg_vv_nxv32f16: 168; ZVFH: # %bb.0: 169; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma 170; ZVFH-NEXT: vfneg.v v8, v8 171; ZVFH-NEXT: ret 172; 173; ZVFHMIN-LABEL: vfneg_vv_nxv32f16: 174; ZVFHMIN: # %bb.0: 175; ZVFHMIN-NEXT: lui a0, 8 176; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma 177; ZVFHMIN-NEXT: vxor.vx v8, v8, a0 178; ZVFHMIN-NEXT: ret 179 %vb = fneg <vscale x 32 x half> %va 180 ret <vscale x 32 x half> %vb 181} 182 183define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) { 184; CHECK-LABEL: vfneg_vv_nxv1f32: 185; CHECK: # %bb.0: 186; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 187; CHECK-NEXT: vfneg.v v8, v8 188; CHECK-NEXT: ret 189 %vb = fneg <vscale x 1 x float> %va 190 ret <vscale x 1 x float> %vb 191} 192 193define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) { 194; CHECK-LABEL: vfneg_vv_nxv2f32: 195; CHECK: # %bb.0: 196; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 197; CHECK-NEXT: vfneg.v v8, v8 198; CHECK-NEXT: ret 199 %vb = fneg <vscale x 2 x float> %va 200 ret <vscale x 2 x float> %vb 201} 202 203define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) { 204; CHECK-LABEL: vfneg_vv_nxv4f32: 205; CHECK: # %bb.0: 206; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 207; CHECK-NEXT: vfneg.v v8, v8 208; CHECK-NEXT: ret 209 %vb = fneg <vscale x 4 x float> %va 210 ret <vscale x 4 x float> %vb 211} 212 213define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) { 214; CHECK-LABEL: vfneg_vv_nxv8f32: 215; CHECK: # %bb.0: 216; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 217; CHECK-NEXT: vfneg.v v8, v8 218; CHECK-NEXT: ret 219 %vb = fneg <vscale x 8 x float> %va 220 ret <vscale x 8 x float> %vb 221} 222 223define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) { 224; CHECK-LABEL: vfneg_vv_nxv16f32: 225; CHECK: # %bb.0: 226; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma 227; CHECK-NEXT: vfneg.v v8, v8 228; CHECK-NEXT: ret 229 %vb = fneg <vscale x 16 x float> %va 230 ret <vscale x 16 x float> %vb 231} 232 233define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) { 234; CHECK-LABEL: vfneg_vv_nxv1f64: 235; CHECK: # %bb.0: 236; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma 237; CHECK-NEXT: vfneg.v v8, v8 238; CHECK-NEXT: ret 239 %vb = fneg <vscale x 1 x double> %va 240 ret <vscale x 1 x double> %vb 241} 242 243define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) { 244; CHECK-LABEL: vfneg_vv_nxv2f64: 245; CHECK: # %bb.0: 246; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 247; CHECK-NEXT: vfneg.v v8, v8 248; CHECK-NEXT: ret 249 %vb = fneg <vscale x 2 x double> %va 250 ret <vscale x 2 x double> %vb 251} 252 253define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) { 254; CHECK-LABEL: vfneg_vv_nxv4f64: 255; CHECK: # %bb.0: 256; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma 257; CHECK-NEXT: vfneg.v v8, v8 258; CHECK-NEXT: ret 259 %vb = fneg <vscale x 4 x double> %va 260 ret <vscale x 4 x double> %vb 261} 262 263define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) { 264; CHECK-LABEL: vfneg_vv_nxv8f64: 265; CHECK: # %bb.0: 266; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma 267; CHECK-NEXT: vfneg.v v8, v8 268; CHECK-NEXT: ret 269 %vb = fneg <vscale x 8 x double> %va 270 ret <vscale x 8 x double> %vb 271} 272