1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ 3; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ 5; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH 6; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \ 7; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 8; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ 9; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN 10 11declare <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32) 12 13define <vscale x 1 x half> @vfabs_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 14; ZVFH-LABEL: vfabs_vv_nxv1f16: 15; ZVFH: # %bb.0: 16; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 17; ZVFH-NEXT: vfabs.v v8, v8, v0.t 18; ZVFH-NEXT: ret 19; 20; ZVFHMIN-LABEL: vfabs_vv_nxv1f16: 21; ZVFHMIN: # %bb.0: 22; ZVFHMIN-NEXT: lui a1, 8 23; ZVFHMIN-NEXT: addi a1, a1, -1 24; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 25; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 26; ZVFHMIN-NEXT: ret 27 %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) 28 ret <vscale x 1 x half> %v 29} 30 31define <vscale x 1 x half> @vfabs_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) { 32; ZVFH-LABEL: vfabs_vv_nxv1f16_unmasked: 33; ZVFH: # %bb.0: 34; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 35; ZVFH-NEXT: vfabs.v v8, v8 36; ZVFH-NEXT: ret 37; 38; ZVFHMIN-LABEL: vfabs_vv_nxv1f16_unmasked: 39; ZVFHMIN: # %bb.0: 40; ZVFHMIN-NEXT: lui a1, 8 41; ZVFHMIN-NEXT: addi a1, a1, -1 42; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 43; ZVFHMIN-NEXT: vand.vx v8, v8, a1 44; ZVFHMIN-NEXT: ret 45 %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 46 ret <vscale x 1 x half> %v 47} 48 49declare <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32) 50 51define <vscale x 2 x half> @vfabs_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 52; ZVFH-LABEL: vfabs_vv_nxv2f16: 53; ZVFH: # %bb.0: 54; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 55; ZVFH-NEXT: vfabs.v v8, v8, v0.t 56; ZVFH-NEXT: ret 57; 58; ZVFHMIN-LABEL: vfabs_vv_nxv2f16: 59; ZVFHMIN: # %bb.0: 60; ZVFHMIN-NEXT: lui a1, 8 61; ZVFHMIN-NEXT: addi a1, a1, -1 62; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 63; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 64; ZVFHMIN-NEXT: ret 65 %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) 66 ret <vscale x 2 x half> %v 67} 68 69define <vscale x 2 x half> @vfabs_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) { 70; ZVFH-LABEL: vfabs_vv_nxv2f16_unmasked: 71; ZVFH: # %bb.0: 72; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 73; ZVFH-NEXT: vfabs.v v8, v8 74; ZVFH-NEXT: ret 75; 76; ZVFHMIN-LABEL: vfabs_vv_nxv2f16_unmasked: 77; ZVFHMIN: # %bb.0: 78; ZVFHMIN-NEXT: lui a1, 8 79; ZVFHMIN-NEXT: addi a1, a1, -1 80; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 81; ZVFHMIN-NEXT: vand.vx v8, v8, a1 82; ZVFHMIN-NEXT: ret 83 %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 84 ret <vscale x 2 x half> %v 85} 86 87declare <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32) 88 89define <vscale x 4 x half> @vfabs_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 90; ZVFH-LABEL: vfabs_vv_nxv4f16: 91; ZVFH: # %bb.0: 92; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma 93; ZVFH-NEXT: vfabs.v v8, v8, v0.t 94; ZVFH-NEXT: ret 95; 96; ZVFHMIN-LABEL: vfabs_vv_nxv4f16: 97; ZVFHMIN: # %bb.0: 98; ZVFHMIN-NEXT: lui a1, 8 99; ZVFHMIN-NEXT: addi a1, a1, -1 100; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma 101; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 102; ZVFHMIN-NEXT: ret 103 %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) 104 ret <vscale x 4 x half> %v 105} 106 107define <vscale x 4 x half> @vfabs_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) { 108; ZVFH-LABEL: vfabs_vv_nxv4f16_unmasked: 109; ZVFH: # %bb.0: 110; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma 111; ZVFH-NEXT: vfabs.v v8, v8 112; ZVFH-NEXT: ret 113; 114; ZVFHMIN-LABEL: vfabs_vv_nxv4f16_unmasked: 115; ZVFHMIN: # %bb.0: 116; ZVFHMIN-NEXT: lui a1, 8 117; ZVFHMIN-NEXT: addi a1, a1, -1 118; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma 119; ZVFHMIN-NEXT: vand.vx v8, v8, a1 120; ZVFHMIN-NEXT: ret 121 %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 122 ret <vscale x 4 x half> %v 123} 124 125declare <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32) 126 127define <vscale x 8 x half> @vfabs_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 128; ZVFH-LABEL: vfabs_vv_nxv8f16: 129; ZVFH: # %bb.0: 130; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma 131; ZVFH-NEXT: vfabs.v v8, v8, v0.t 132; ZVFH-NEXT: ret 133; 134; ZVFHMIN-LABEL: vfabs_vv_nxv8f16: 135; ZVFHMIN: # %bb.0: 136; ZVFHMIN-NEXT: lui a1, 8 137; ZVFHMIN-NEXT: addi a1, a1, -1 138; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma 139; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 140; ZVFHMIN-NEXT: ret 141 %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) 142 ret <vscale x 8 x half> %v 143} 144 145define <vscale x 8 x half> @vfabs_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) { 146; ZVFH-LABEL: vfabs_vv_nxv8f16_unmasked: 147; ZVFH: # %bb.0: 148; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma 149; ZVFH-NEXT: vfabs.v v8, v8 150; ZVFH-NEXT: ret 151; 152; ZVFHMIN-LABEL: vfabs_vv_nxv8f16_unmasked: 153; ZVFHMIN: # %bb.0: 154; ZVFHMIN-NEXT: lui a1, 8 155; ZVFHMIN-NEXT: addi a1, a1, -1 156; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma 157; ZVFHMIN-NEXT: vand.vx v8, v8, a1 158; ZVFHMIN-NEXT: ret 159 %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 160 ret <vscale x 8 x half> %v 161} 162 163declare <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32) 164 165define <vscale x 16 x half> @vfabs_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 166; ZVFH-LABEL: vfabs_vv_nxv16f16: 167; ZVFH: # %bb.0: 168; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma 169; ZVFH-NEXT: vfabs.v v8, v8, v0.t 170; ZVFH-NEXT: ret 171; 172; ZVFHMIN-LABEL: vfabs_vv_nxv16f16: 173; ZVFHMIN: # %bb.0: 174; ZVFHMIN-NEXT: lui a1, 8 175; ZVFHMIN-NEXT: addi a1, a1, -1 176; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma 177; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 178; ZVFHMIN-NEXT: ret 179 %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) 180 ret <vscale x 16 x half> %v 181} 182 183define <vscale x 16 x half> @vfabs_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) { 184; ZVFH-LABEL: vfabs_vv_nxv16f16_unmasked: 185; ZVFH: # %bb.0: 186; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma 187; ZVFH-NEXT: vfabs.v v8, v8 188; ZVFH-NEXT: ret 189; 190; ZVFHMIN-LABEL: vfabs_vv_nxv16f16_unmasked: 191; ZVFHMIN: # %bb.0: 192; ZVFHMIN-NEXT: lui a1, 8 193; ZVFHMIN-NEXT: addi a1, a1, -1 194; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma 195; ZVFHMIN-NEXT: vand.vx v8, v8, a1 196; ZVFHMIN-NEXT: ret 197 %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 198 ret <vscale x 16 x half> %v 199} 200 201declare <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32) 202 203define <vscale x 32 x half> @vfabs_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 204; ZVFH-LABEL: vfabs_vv_nxv32f16: 205; ZVFH: # %bb.0: 206; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma 207; ZVFH-NEXT: vfabs.v v8, v8, v0.t 208; ZVFH-NEXT: ret 209; 210; ZVFHMIN-LABEL: vfabs_vv_nxv32f16: 211; ZVFHMIN: # %bb.0: 212; ZVFHMIN-NEXT: lui a1, 8 213; ZVFHMIN-NEXT: addi a1, a1, -1 214; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma 215; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t 216; ZVFHMIN-NEXT: ret 217 %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) 218 ret <vscale x 32 x half> %v 219} 220 221define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) { 222; ZVFH-LABEL: vfabs_vv_nxv32f16_unmasked: 223; ZVFH: # %bb.0: 224; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma 225; ZVFH-NEXT: vfabs.v v8, v8 226; ZVFH-NEXT: ret 227; 228; ZVFHMIN-LABEL: vfabs_vv_nxv32f16_unmasked: 229; ZVFHMIN: # %bb.0: 230; ZVFHMIN-NEXT: lui a1, 8 231; ZVFHMIN-NEXT: addi a1, a1, -1 232; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma 233; ZVFHMIN-NEXT: vand.vx v8, v8, a1 234; ZVFHMIN-NEXT: ret 235 %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl) 236 ret <vscale x 32 x half> %v 237} 238 239declare <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) 240 241define <vscale x 1 x float> @vfabs_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 242; CHECK-LABEL: vfabs_vv_nxv1f32: 243; CHECK: # %bb.0: 244; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 245; CHECK-NEXT: vfabs.v v8, v8, v0.t 246; CHECK-NEXT: ret 247 %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) 248 ret <vscale x 1 x float> %v 249} 250 251define <vscale x 1 x float> @vfabs_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) { 252; CHECK-LABEL: vfabs_vv_nxv1f32_unmasked: 253; CHECK: # %bb.0: 254; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 255; CHECK-NEXT: vfabs.v v8, v8 256; CHECK-NEXT: ret 257 %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 258 ret <vscale x 1 x float> %v 259} 260 261declare <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) 262 263define <vscale x 2 x float> @vfabs_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 264; CHECK-LABEL: vfabs_vv_nxv2f32: 265; CHECK: # %bb.0: 266; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 267; CHECK-NEXT: vfabs.v v8, v8, v0.t 268; CHECK-NEXT: ret 269 %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) 270 ret <vscale x 2 x float> %v 271} 272 273define <vscale x 2 x float> @vfabs_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) { 274; CHECK-LABEL: vfabs_vv_nxv2f32_unmasked: 275; CHECK: # %bb.0: 276; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 277; CHECK-NEXT: vfabs.v v8, v8 278; CHECK-NEXT: ret 279 %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 280 ret <vscale x 2 x float> %v 281} 282 283declare <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) 284 285define <vscale x 4 x float> @vfabs_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 286; CHECK-LABEL: vfabs_vv_nxv4f32: 287; CHECK: # %bb.0: 288; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 289; CHECK-NEXT: vfabs.v v8, v8, v0.t 290; CHECK-NEXT: ret 291 %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) 292 ret <vscale x 4 x float> %v 293} 294 295define <vscale x 4 x float> @vfabs_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) { 296; CHECK-LABEL: vfabs_vv_nxv4f32_unmasked: 297; CHECK: # %bb.0: 298; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 299; CHECK-NEXT: vfabs.v v8, v8 300; CHECK-NEXT: ret 301 %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 302 ret <vscale x 4 x float> %v 303} 304 305declare <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) 306 307define <vscale x 8 x float> @vfabs_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 308; CHECK-LABEL: vfabs_vv_nxv8f32: 309; CHECK: # %bb.0: 310; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 311; CHECK-NEXT: vfabs.v v8, v8, v0.t 312; CHECK-NEXT: ret 313 %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) 314 ret <vscale x 8 x float> %v 315} 316 317define <vscale x 8 x float> @vfabs_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) { 318; CHECK-LABEL: vfabs_vv_nxv8f32_unmasked: 319; CHECK: # %bb.0: 320; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 321; CHECK-NEXT: vfabs.v v8, v8 322; CHECK-NEXT: ret 323 %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 324 ret <vscale x 8 x float> %v 325} 326 327declare <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) 328 329define <vscale x 16 x float> @vfabs_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 330; CHECK-LABEL: vfabs_vv_nxv16f32: 331; CHECK: # %bb.0: 332; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 333; CHECK-NEXT: vfabs.v v8, v8, v0.t 334; CHECK-NEXT: ret 335 %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) 336 ret <vscale x 16 x float> %v 337} 338 339define <vscale x 16 x float> @vfabs_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) { 340; CHECK-LABEL: vfabs_vv_nxv16f32_unmasked: 341; CHECK: # %bb.0: 342; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 343; CHECK-NEXT: vfabs.v v8, v8 344; CHECK-NEXT: ret 345 %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 346 ret <vscale x 16 x float> %v 347} 348 349declare <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) 350 351define <vscale x 1 x double> @vfabs_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 352; CHECK-LABEL: vfabs_vv_nxv1f64: 353; CHECK: # %bb.0: 354; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 355; CHECK-NEXT: vfabs.v v8, v8, v0.t 356; CHECK-NEXT: ret 357 %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) 358 ret <vscale x 1 x double> %v 359} 360 361define <vscale x 1 x double> @vfabs_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) { 362; CHECK-LABEL: vfabs_vv_nxv1f64_unmasked: 363; CHECK: # %bb.0: 364; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 365; CHECK-NEXT: vfabs.v v8, v8 366; CHECK-NEXT: ret 367 %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl) 368 ret <vscale x 1 x double> %v 369} 370 371declare <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) 372 373define <vscale x 2 x double> @vfabs_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 374; CHECK-LABEL: vfabs_vv_nxv2f64: 375; CHECK: # %bb.0: 376; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 377; CHECK-NEXT: vfabs.v v8, v8, v0.t 378; CHECK-NEXT: ret 379 %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) 380 ret <vscale x 2 x double> %v 381} 382 383define <vscale x 2 x double> @vfabs_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) { 384; CHECK-LABEL: vfabs_vv_nxv2f64_unmasked: 385; CHECK: # %bb.0: 386; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 387; CHECK-NEXT: vfabs.v v8, v8 388; CHECK-NEXT: ret 389 %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl) 390 ret <vscale x 2 x double> %v 391} 392 393declare <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) 394 395define <vscale x 4 x double> @vfabs_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 396; CHECK-LABEL: vfabs_vv_nxv4f64: 397; CHECK: # %bb.0: 398; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 399; CHECK-NEXT: vfabs.v v8, v8, v0.t 400; CHECK-NEXT: ret 401 %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) 402 ret <vscale x 4 x double> %v 403} 404 405define <vscale x 4 x double> @vfabs_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) { 406; CHECK-LABEL: vfabs_vv_nxv4f64_unmasked: 407; CHECK: # %bb.0: 408; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 409; CHECK-NEXT: vfabs.v v8, v8 410; CHECK-NEXT: ret 411 %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl) 412 ret <vscale x 4 x double> %v 413} 414 415declare <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32) 416 417define <vscale x 7 x double> @vfabs_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) { 418; CHECK-LABEL: vfabs_vv_nxv7f64: 419; CHECK: # %bb.0: 420; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 421; CHECK-NEXT: vfabs.v v8, v8, v0.t 422; CHECK-NEXT: ret 423 %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl) 424 ret <vscale x 7 x double> %v 425} 426 427define <vscale x 7 x double> @vfabs_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) { 428; CHECK-LABEL: vfabs_vv_nxv7f64_unmasked: 429; CHECK: # %bb.0: 430; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 431; CHECK-NEXT: vfabs.v v8, v8 432; CHECK-NEXT: ret 433 %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl) 434 ret <vscale x 7 x double> %v 435} 436 437declare <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) 438 439define <vscale x 8 x double> @vfabs_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 440; CHECK-LABEL: vfabs_vv_nxv8f64: 441; CHECK: # %bb.0: 442; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 443; CHECK-NEXT: vfabs.v v8, v8, v0.t 444; CHECK-NEXT: ret 445 %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) 446 ret <vscale x 8 x double> %v 447} 448 449define <vscale x 8 x double> @vfabs_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) { 450; CHECK-LABEL: vfabs_vv_nxv8f64_unmasked: 451; CHECK: # %bb.0: 452; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 453; CHECK-NEXT: vfabs.v v8, v8 454; CHECK-NEXT: ret 455 %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl) 456 ret <vscale x 8 x double> %v 457} 458 459; Test splitting. 460declare <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32) 461 462define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 463; CHECK-LABEL: vfabs_vv_nxv16f64: 464; CHECK: # %bb.0: 465; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma 466; CHECK-NEXT: vmv1r.v v24, v0 467; CHECK-NEXT: csrr a1, vlenb 468; CHECK-NEXT: srli a2, a1, 3 469; CHECK-NEXT: sub a3, a0, a1 470; CHECK-NEXT: vslidedown.vx v0, v0, a2 471; CHECK-NEXT: sltu a2, a0, a3 472; CHECK-NEXT: addi a2, a2, -1 473; CHECK-NEXT: and a2, a2, a3 474; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 475; CHECK-NEXT: vfabs.v v16, v16, v0.t 476; CHECK-NEXT: bltu a0, a1, .LBB32_2 477; CHECK-NEXT: # %bb.1: 478; CHECK-NEXT: mv a0, a1 479; CHECK-NEXT: .LBB32_2: 480; CHECK-NEXT: vmv1r.v v0, v24 481; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 482; CHECK-NEXT: vfabs.v v8, v8, v0.t 483; CHECK-NEXT: ret 484 %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl) 485 ret <vscale x 16 x double> %v 486} 487 488define <vscale x 16 x double> @vfabs_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) { 489; CHECK-LABEL: vfabs_vv_nxv16f64_unmasked: 490; CHECK: # %bb.0: 491; CHECK-NEXT: csrr a1, vlenb 492; CHECK-NEXT: sub a2, a0, a1 493; CHECK-NEXT: sltu a3, a0, a2 494; CHECK-NEXT: addi a3, a3, -1 495; CHECK-NEXT: and a2, a3, a2 496; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma 497; CHECK-NEXT: vfabs.v v16, v16 498; CHECK-NEXT: bltu a0, a1, .LBB33_2 499; CHECK-NEXT: # %bb.1: 500; CHECK-NEXT: mv a0, a1 501; CHECK-NEXT: .LBB33_2: 502; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 503; CHECK-NEXT: vfabs.v v8, v8 504; CHECK-NEXT: ret 505 %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl) 506 ret <vscale x 16 x double> %v 507} 508