1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \ 3; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ 5; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s 6 7declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16( 8 <vscale x 1 x half>, 9 <vscale x 1 x half>, 10 <vscale x 1 x half>, 11 iXLen, iXLen); 12 13define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind { 14; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: fsrmi a1, 0 17; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 18; CHECK-NEXT: vfmul.vv v8, v8, v9 19; CHECK-NEXT: fsrm a1 20; CHECK-NEXT: ret 21entry: 22 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16( 23 <vscale x 1 x half> undef, 24 <vscale x 1 x half> %0, 25 <vscale x 1 x half> %1, 26 iXLen 0, iXLen %2) 27 28 ret <vscale x 1 x half> %a 29} 30 31declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( 32 <vscale x 1 x half>, 33 <vscale x 1 x half>, 34 <vscale x 1 x half>, 35 <vscale x 1 x i1>, 36 iXLen, iXLen, iXLen); 37 38define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 39; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: fsrmi a1, 0 42; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 43; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 44; CHECK-NEXT: fsrm a1 45; CHECK-NEXT: ret 46entry: 47 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( 48 <vscale x 1 x half> %0, 49 <vscale x 1 x half> %1, 50 <vscale x 1 x half> %2, 51 <vscale x 1 x i1> %3, 52 iXLen 0, iXLen %4, iXLen 1) 53 54 ret <vscale x 1 x half> %a 55} 56 57declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16( 58 <vscale x 2 x half>, 59 <vscale x 2 x half>, 60 <vscale x 2 x half>, 61 iXLen, iXLen); 62 63define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind { 64; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: 65; CHECK: # %bb.0: # %entry 66; CHECK-NEXT: fsrmi a1, 0 67; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 68; CHECK-NEXT: vfmul.vv v8, v8, v9 69; CHECK-NEXT: fsrm a1 70; CHECK-NEXT: ret 71entry: 72 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16( 73 <vscale x 2 x half> undef, 74 <vscale x 2 x half> %0, 75 <vscale x 2 x half> %1, 76 iXLen 0, iXLen %2) 77 78 ret <vscale x 2 x half> %a 79} 80 81declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( 82 <vscale x 2 x half>, 83 <vscale x 2 x half>, 84 <vscale x 2 x half>, 85 <vscale x 2 x i1>, 86 iXLen, iXLen, iXLen); 87 88define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 89; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16: 90; CHECK: # %bb.0: # %entry 91; CHECK-NEXT: fsrmi a1, 0 92; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 93; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 94; CHECK-NEXT: fsrm a1 95; CHECK-NEXT: ret 96entry: 97 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16( 98 <vscale x 2 x half> %0, 99 <vscale x 2 x half> %1, 100 <vscale x 2 x half> %2, 101 <vscale x 2 x i1> %3, 102 iXLen 0, iXLen %4, iXLen 1) 103 104 ret <vscale x 2 x half> %a 105} 106 107declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16( 108 <vscale x 4 x half>, 109 <vscale x 4 x half>, 110 <vscale x 4 x half>, 111 iXLen, iXLen); 112 113define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind { 114; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: 115; CHECK: # %bb.0: # %entry 116; CHECK-NEXT: fsrmi a1, 0 117; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 118; CHECK-NEXT: vfmul.vv v8, v8, v9 119; CHECK-NEXT: fsrm a1 120; CHECK-NEXT: ret 121entry: 122 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16( 123 <vscale x 4 x half> undef, 124 <vscale x 4 x half> %0, 125 <vscale x 4 x half> %1, 126 iXLen 0, iXLen %2) 127 128 ret <vscale x 4 x half> %a 129} 130 131declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( 132 <vscale x 4 x half>, 133 <vscale x 4 x half>, 134 <vscale x 4 x half>, 135 <vscale x 4 x i1>, 136 iXLen, iXLen, iXLen); 137 138define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 139; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16: 140; CHECK: # %bb.0: # %entry 141; CHECK-NEXT: fsrmi a1, 0 142; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 143; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 144; CHECK-NEXT: fsrm a1 145; CHECK-NEXT: ret 146entry: 147 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16( 148 <vscale x 4 x half> %0, 149 <vscale x 4 x half> %1, 150 <vscale x 4 x half> %2, 151 <vscale x 4 x i1> %3, 152 iXLen 0, iXLen %4, iXLen 1) 153 154 ret <vscale x 4 x half> %a 155} 156 157declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16( 158 <vscale x 8 x half>, 159 <vscale x 8 x half>, 160 <vscale x 8 x half>, 161 iXLen, iXLen); 162 163define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind { 164; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: 165; CHECK: # %bb.0: # %entry 166; CHECK-NEXT: fsrmi a1, 0 167; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 168; CHECK-NEXT: vfmul.vv v8, v8, v10 169; CHECK-NEXT: fsrm a1 170; CHECK-NEXT: ret 171entry: 172 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16( 173 <vscale x 8 x half> undef, 174 <vscale x 8 x half> %0, 175 <vscale x 8 x half> %1, 176 iXLen 0, iXLen %2) 177 178 ret <vscale x 8 x half> %a 179} 180 181declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( 182 <vscale x 8 x half>, 183 <vscale x 8 x half>, 184 <vscale x 8 x half>, 185 <vscale x 8 x i1>, 186 iXLen, iXLen, iXLen); 187 188define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 189; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16: 190; CHECK: # %bb.0: # %entry 191; CHECK-NEXT: fsrmi a1, 0 192; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 193; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t 194; CHECK-NEXT: fsrm a1 195; CHECK-NEXT: ret 196entry: 197 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16( 198 <vscale x 8 x half> %0, 199 <vscale x 8 x half> %1, 200 <vscale x 8 x half> %2, 201 <vscale x 8 x i1> %3, 202 iXLen 0, iXLen %4, iXLen 1) 203 204 ret <vscale x 8 x half> %a 205} 206 207declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16( 208 <vscale x 16 x half>, 209 <vscale x 16 x half>, 210 <vscale x 16 x half>, 211 iXLen, iXLen); 212 213define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind { 214; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: 215; CHECK: # %bb.0: # %entry 216; CHECK-NEXT: fsrmi a1, 0 217; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 218; CHECK-NEXT: vfmul.vv v8, v8, v12 219; CHECK-NEXT: fsrm a1 220; CHECK-NEXT: ret 221entry: 222 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16( 223 <vscale x 16 x half> undef, 224 <vscale x 16 x half> %0, 225 <vscale x 16 x half> %1, 226 iXLen 0, iXLen %2) 227 228 ret <vscale x 16 x half> %a 229} 230 231declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( 232 <vscale x 16 x half>, 233 <vscale x 16 x half>, 234 <vscale x 16 x half>, 235 <vscale x 16 x i1>, 236 iXLen, iXLen, iXLen); 237 238define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 239; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16: 240; CHECK: # %bb.0: # %entry 241; CHECK-NEXT: fsrmi a1, 0 242; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 243; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t 244; CHECK-NEXT: fsrm a1 245; CHECK-NEXT: ret 246entry: 247 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16( 248 <vscale x 16 x half> %0, 249 <vscale x 16 x half> %1, 250 <vscale x 16 x half> %2, 251 <vscale x 16 x i1> %3, 252 iXLen 0, iXLen %4, iXLen 1) 253 254 ret <vscale x 16 x half> %a 255} 256 257declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16( 258 <vscale x 32 x half>, 259 <vscale x 32 x half>, 260 <vscale x 32 x half>, 261 iXLen, iXLen); 262 263define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind { 264; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: 265; CHECK: # %bb.0: # %entry 266; CHECK-NEXT: fsrmi a1, 0 267; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 268; CHECK-NEXT: vfmul.vv v8, v8, v16 269; CHECK-NEXT: fsrm a1 270; CHECK-NEXT: ret 271entry: 272 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16( 273 <vscale x 32 x half> undef, 274 <vscale x 32 x half> %0, 275 <vscale x 32 x half> %1, 276 iXLen 0, iXLen %2) 277 278 ret <vscale x 32 x half> %a 279} 280 281declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( 282 <vscale x 32 x half>, 283 <vscale x 32 x half>, 284 <vscale x 32 x half>, 285 <vscale x 32 x i1>, 286 iXLen, iXLen, iXLen); 287 288define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 289; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: 290; CHECK: # %bb.0: # %entry 291; CHECK-NEXT: vl8re16.v v24, (a0) 292; CHECK-NEXT: fsrmi a0, 0 293; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 294; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t 295; CHECK-NEXT: fsrm a0 296; CHECK-NEXT: ret 297entry: 298 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16( 299 <vscale x 32 x half> %0, 300 <vscale x 32 x half> %1, 301 <vscale x 32 x half> %2, 302 <vscale x 32 x i1> %3, 303 iXLen 0, iXLen %4, iXLen 1) 304 305 ret <vscale x 32 x half> %a 306} 307 308declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32( 309 <vscale x 1 x float>, 310 <vscale x 1 x float>, 311 <vscale x 1 x float>, 312 iXLen, iXLen); 313 314define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind { 315; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: 316; CHECK: # %bb.0: # %entry 317; CHECK-NEXT: fsrmi a1, 0 318; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 319; CHECK-NEXT: vfmul.vv v8, v8, v9 320; CHECK-NEXT: fsrm a1 321; CHECK-NEXT: ret 322entry: 323 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32( 324 <vscale x 1 x float> undef, 325 <vscale x 1 x float> %0, 326 <vscale x 1 x float> %1, 327 iXLen 0, iXLen %2) 328 329 ret <vscale x 1 x float> %a 330} 331 332declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( 333 <vscale x 1 x float>, 334 <vscale x 1 x float>, 335 <vscale x 1 x float>, 336 <vscale x 1 x i1>, 337 iXLen, iXLen, iXLen); 338 339define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 340; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32: 341; CHECK: # %bb.0: # %entry 342; CHECK-NEXT: fsrmi a1, 0 343; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 344; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 345; CHECK-NEXT: fsrm a1 346; CHECK-NEXT: ret 347entry: 348 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32( 349 <vscale x 1 x float> %0, 350 <vscale x 1 x float> %1, 351 <vscale x 1 x float> %2, 352 <vscale x 1 x i1> %3, 353 iXLen 0, iXLen %4, iXLen 1) 354 355 ret <vscale x 1 x float> %a 356} 357 358declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32( 359 <vscale x 2 x float>, 360 <vscale x 2 x float>, 361 <vscale x 2 x float>, 362 iXLen, iXLen); 363 364define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind { 365; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: 366; CHECK: # %bb.0: # %entry 367; CHECK-NEXT: fsrmi a1, 0 368; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 369; CHECK-NEXT: vfmul.vv v8, v8, v9 370; CHECK-NEXT: fsrm a1 371; CHECK-NEXT: ret 372entry: 373 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32( 374 <vscale x 2 x float> undef, 375 <vscale x 2 x float> %0, 376 <vscale x 2 x float> %1, 377 iXLen 0, iXLen %2) 378 379 ret <vscale x 2 x float> %a 380} 381 382declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( 383 <vscale x 2 x float>, 384 <vscale x 2 x float>, 385 <vscale x 2 x float>, 386 <vscale x 2 x i1>, 387 iXLen, iXLen, iXLen); 388 389define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 390; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32: 391; CHECK: # %bb.0: # %entry 392; CHECK-NEXT: fsrmi a1, 0 393; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 394; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 395; CHECK-NEXT: fsrm a1 396; CHECK-NEXT: ret 397entry: 398 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32( 399 <vscale x 2 x float> %0, 400 <vscale x 2 x float> %1, 401 <vscale x 2 x float> %2, 402 <vscale x 2 x i1> %3, 403 iXLen 0, iXLen %4, iXLen 1) 404 405 ret <vscale x 2 x float> %a 406} 407 408declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32( 409 <vscale x 4 x float>, 410 <vscale x 4 x float>, 411 <vscale x 4 x float>, 412 iXLen, iXLen); 413 414define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind { 415; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: 416; CHECK: # %bb.0: # %entry 417; CHECK-NEXT: fsrmi a1, 0 418; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 419; CHECK-NEXT: vfmul.vv v8, v8, v10 420; CHECK-NEXT: fsrm a1 421; CHECK-NEXT: ret 422entry: 423 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32( 424 <vscale x 4 x float> undef, 425 <vscale x 4 x float> %0, 426 <vscale x 4 x float> %1, 427 iXLen 0, iXLen %2) 428 429 ret <vscale x 4 x float> %a 430} 431 432declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( 433 <vscale x 4 x float>, 434 <vscale x 4 x float>, 435 <vscale x 4 x float>, 436 <vscale x 4 x i1>, 437 iXLen, iXLen, iXLen); 438 439define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 440; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32: 441; CHECK: # %bb.0: # %entry 442; CHECK-NEXT: fsrmi a1, 0 443; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 444; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t 445; CHECK-NEXT: fsrm a1 446; CHECK-NEXT: ret 447entry: 448 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32( 449 <vscale x 4 x float> %0, 450 <vscale x 4 x float> %1, 451 <vscale x 4 x float> %2, 452 <vscale x 4 x i1> %3, 453 iXLen 0, iXLen %4, iXLen 1) 454 455 ret <vscale x 4 x float> %a 456} 457 458declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32( 459 <vscale x 8 x float>, 460 <vscale x 8 x float>, 461 <vscale x 8 x float>, 462 iXLen, iXLen); 463 464define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind { 465; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: 466; CHECK: # %bb.0: # %entry 467; CHECK-NEXT: fsrmi a1, 0 468; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 469; CHECK-NEXT: vfmul.vv v8, v8, v12 470; CHECK-NEXT: fsrm a1 471; CHECK-NEXT: ret 472entry: 473 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32( 474 <vscale x 8 x float> undef, 475 <vscale x 8 x float> %0, 476 <vscale x 8 x float> %1, 477 iXLen 0, iXLen %2) 478 479 ret <vscale x 8 x float> %a 480} 481 482declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( 483 <vscale x 8 x float>, 484 <vscale x 8 x float>, 485 <vscale x 8 x float>, 486 <vscale x 8 x i1>, 487 iXLen, iXLen, iXLen); 488 489define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 490; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32: 491; CHECK: # %bb.0: # %entry 492; CHECK-NEXT: fsrmi a1, 0 493; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 494; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t 495; CHECK-NEXT: fsrm a1 496; CHECK-NEXT: ret 497entry: 498 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32( 499 <vscale x 8 x float> %0, 500 <vscale x 8 x float> %1, 501 <vscale x 8 x float> %2, 502 <vscale x 8 x i1> %3, 503 iXLen 0, iXLen %4, iXLen 1) 504 505 ret <vscale x 8 x float> %a 506} 507 508declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32( 509 <vscale x 16 x float>, 510 <vscale x 16 x float>, 511 <vscale x 16 x float>, 512 iXLen, iXLen); 513 514define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind { 515; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: 516; CHECK: # %bb.0: # %entry 517; CHECK-NEXT: fsrmi a1, 0 518; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 519; CHECK-NEXT: vfmul.vv v8, v8, v16 520; CHECK-NEXT: fsrm a1 521; CHECK-NEXT: ret 522entry: 523 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32( 524 <vscale x 16 x float> undef, 525 <vscale x 16 x float> %0, 526 <vscale x 16 x float> %1, 527 iXLen 0, iXLen %2) 528 529 ret <vscale x 16 x float> %a 530} 531 532declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( 533 <vscale x 16 x float>, 534 <vscale x 16 x float>, 535 <vscale x 16 x float>, 536 <vscale x 16 x i1>, 537 iXLen, iXLen, iXLen); 538 539define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 540; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: 541; CHECK: # %bb.0: # %entry 542; CHECK-NEXT: vl8re32.v v24, (a0) 543; CHECK-NEXT: fsrmi a0, 0 544; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 545; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t 546; CHECK-NEXT: fsrm a0 547; CHECK-NEXT: ret 548entry: 549 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32( 550 <vscale x 16 x float> %0, 551 <vscale x 16 x float> %1, 552 <vscale x 16 x float> %2, 553 <vscale x 16 x i1> %3, 554 iXLen 0, iXLen %4, iXLen 1) 555 556 ret <vscale x 16 x float> %a 557} 558 559declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64( 560 <vscale x 1 x double>, 561 <vscale x 1 x double>, 562 <vscale x 1 x double>, 563 iXLen, iXLen); 564 565define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind { 566; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: 567; CHECK: # %bb.0: # %entry 568; CHECK-NEXT: fsrmi a1, 0 569; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 570; CHECK-NEXT: vfmul.vv v8, v8, v9 571; CHECK-NEXT: fsrm a1 572; CHECK-NEXT: ret 573entry: 574 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64( 575 <vscale x 1 x double> undef, 576 <vscale x 1 x double> %0, 577 <vscale x 1 x double> %1, 578 iXLen 0, iXLen %2) 579 580 ret <vscale x 1 x double> %a 581} 582 583declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( 584 <vscale x 1 x double>, 585 <vscale x 1 x double>, 586 <vscale x 1 x double>, 587 <vscale x 1 x i1>, 588 iXLen, iXLen, iXLen); 589 590define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 591; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64: 592; CHECK: # %bb.0: # %entry 593; CHECK-NEXT: fsrmi a1, 0 594; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 595; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t 596; CHECK-NEXT: fsrm a1 597; CHECK-NEXT: ret 598entry: 599 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64( 600 <vscale x 1 x double> %0, 601 <vscale x 1 x double> %1, 602 <vscale x 1 x double> %2, 603 <vscale x 1 x i1> %3, 604 iXLen 0, iXLen %4, iXLen 1) 605 606 ret <vscale x 1 x double> %a 607} 608 609declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64( 610 <vscale x 2 x double>, 611 <vscale x 2 x double>, 612 <vscale x 2 x double>, 613 iXLen, iXLen); 614 615define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind { 616; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: 617; CHECK: # %bb.0: # %entry 618; CHECK-NEXT: fsrmi a1, 0 619; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 620; CHECK-NEXT: vfmul.vv v8, v8, v10 621; CHECK-NEXT: fsrm a1 622; CHECK-NEXT: ret 623entry: 624 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64( 625 <vscale x 2 x double> undef, 626 <vscale x 2 x double> %0, 627 <vscale x 2 x double> %1, 628 iXLen 0, iXLen %2) 629 630 ret <vscale x 2 x double> %a 631} 632 633declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( 634 <vscale x 2 x double>, 635 <vscale x 2 x double>, 636 <vscale x 2 x double>, 637 <vscale x 2 x i1>, 638 iXLen, iXLen, iXLen); 639 640define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 641; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64: 642; CHECK: # %bb.0: # %entry 643; CHECK-NEXT: fsrmi a1, 0 644; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 645; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t 646; CHECK-NEXT: fsrm a1 647; CHECK-NEXT: ret 648entry: 649 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64( 650 <vscale x 2 x double> %0, 651 <vscale x 2 x double> %1, 652 <vscale x 2 x double> %2, 653 <vscale x 2 x i1> %3, 654 iXLen 0, iXLen %4, iXLen 1) 655 656 ret <vscale x 2 x double> %a 657} 658 659declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64( 660 <vscale x 4 x double>, 661 <vscale x 4 x double>, 662 <vscale x 4 x double>, 663 iXLen, iXLen); 664 665define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind { 666; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: 667; CHECK: # %bb.0: # %entry 668; CHECK-NEXT: fsrmi a1, 0 669; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 670; CHECK-NEXT: vfmul.vv v8, v8, v12 671; CHECK-NEXT: fsrm a1 672; CHECK-NEXT: ret 673entry: 674 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64( 675 <vscale x 4 x double> undef, 676 <vscale x 4 x double> %0, 677 <vscale x 4 x double> %1, 678 iXLen 0, iXLen %2) 679 680 ret <vscale x 4 x double> %a 681} 682 683declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( 684 <vscale x 4 x double>, 685 <vscale x 4 x double>, 686 <vscale x 4 x double>, 687 <vscale x 4 x i1>, 688 iXLen, iXLen, iXLen); 689 690define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 691; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64: 692; CHECK: # %bb.0: # %entry 693; CHECK-NEXT: fsrmi a1, 0 694; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 695; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t 696; CHECK-NEXT: fsrm a1 697; CHECK-NEXT: ret 698entry: 699 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64( 700 <vscale x 4 x double> %0, 701 <vscale x 4 x double> %1, 702 <vscale x 4 x double> %2, 703 <vscale x 4 x i1> %3, 704 iXLen 0, iXLen %4, iXLen 1) 705 706 ret <vscale x 4 x double> %a 707} 708 709declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64( 710 <vscale x 8 x double>, 711 <vscale x 8 x double>, 712 <vscale x 8 x double>, 713 iXLen, iXLen); 714 715define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind { 716; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: 717; CHECK: # %bb.0: # %entry 718; CHECK-NEXT: fsrmi a1, 0 719; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 720; CHECK-NEXT: vfmul.vv v8, v8, v16 721; CHECK-NEXT: fsrm a1 722; CHECK-NEXT: ret 723entry: 724 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64( 725 <vscale x 8 x double> undef, 726 <vscale x 8 x double> %0, 727 <vscale x 8 x double> %1, 728 iXLen 0, iXLen %2) 729 730 ret <vscale x 8 x double> %a 731} 732 733declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( 734 <vscale x 8 x double>, 735 <vscale x 8 x double>, 736 <vscale x 8 x double>, 737 <vscale x 8 x i1>, 738 iXLen, iXLen, iXLen); 739 740define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 741; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: 742; CHECK: # %bb.0: # %entry 743; CHECK-NEXT: vl8re64.v v24, (a0) 744; CHECK-NEXT: fsrmi a0, 0 745; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 746; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t 747; CHECK-NEXT: fsrm a0 748; CHECK-NEXT: ret 749entry: 750 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64( 751 <vscale x 8 x double> %0, 752 <vscale x 8 x double> %1, 753 <vscale x 8 x double> %2, 754 <vscale x 8 x i1> %3, 755 iXLen 0, iXLen %4, iXLen 1) 756 757 ret <vscale x 8 x double> %a 758} 759 760declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16( 761 <vscale x 1 x half>, 762 <vscale x 1 x half>, 763 half, 764 iXLen, iXLen); 765 766define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind { 767; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: 768; CHECK: # %bb.0: # %entry 769; CHECK-NEXT: fsrmi a1, 0 770; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 771; CHECK-NEXT: vfmul.vf v8, v8, fa0 772; CHECK-NEXT: fsrm a1 773; CHECK-NEXT: ret 774entry: 775 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16( 776 <vscale x 1 x half> undef, 777 <vscale x 1 x half> %0, 778 half %1, 779 iXLen 0, iXLen %2) 780 781 ret <vscale x 1 x half> %a 782} 783 784declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16( 785 <vscale x 1 x half>, 786 <vscale x 1 x half>, 787 half, 788 <vscale x 1 x i1>, 789 iXLen, iXLen, iXLen); 790 791define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 792; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: 793; CHECK: # %bb.0: # %entry 794; CHECK-NEXT: fsrmi a1, 0 795; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 796; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 797; CHECK-NEXT: fsrm a1 798; CHECK-NEXT: ret 799entry: 800 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16( 801 <vscale x 1 x half> %0, 802 <vscale x 1 x half> %1, 803 half %2, 804 <vscale x 1 x i1> %3, 805 iXLen 0, iXLen %4, iXLen 1) 806 807 ret <vscale x 1 x half> %a 808} 809 810declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16( 811 <vscale x 2 x half>, 812 <vscale x 2 x half>, 813 half, 814 iXLen, iXLen); 815 816define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind { 817; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: 818; CHECK: # %bb.0: # %entry 819; CHECK-NEXT: fsrmi a1, 0 820; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 821; CHECK-NEXT: vfmul.vf v8, v8, fa0 822; CHECK-NEXT: fsrm a1 823; CHECK-NEXT: ret 824entry: 825 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16( 826 <vscale x 2 x half> undef, 827 <vscale x 2 x half> %0, 828 half %1, 829 iXLen 0, iXLen %2) 830 831 ret <vscale x 2 x half> %a 832} 833 834declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16( 835 <vscale x 2 x half>, 836 <vscale x 2 x half>, 837 half, 838 <vscale x 2 x i1>, 839 iXLen, iXLen, iXLen); 840 841define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 842; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: 843; CHECK: # %bb.0: # %entry 844; CHECK-NEXT: fsrmi a1, 0 845; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 846; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 847; CHECK-NEXT: fsrm a1 848; CHECK-NEXT: ret 849entry: 850 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16( 851 <vscale x 2 x half> %0, 852 <vscale x 2 x half> %1, 853 half %2, 854 <vscale x 2 x i1> %3, 855 iXLen 0, iXLen %4, iXLen 1) 856 857 ret <vscale x 2 x half> %a 858} 859 860declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16( 861 <vscale x 4 x half>, 862 <vscale x 4 x half>, 863 half, 864 iXLen, iXLen); 865 866define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind { 867; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: 868; CHECK: # %bb.0: # %entry 869; CHECK-NEXT: fsrmi a1, 0 870; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 871; CHECK-NEXT: vfmul.vf v8, v8, fa0 872; CHECK-NEXT: fsrm a1 873; CHECK-NEXT: ret 874entry: 875 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16( 876 <vscale x 4 x half> undef, 877 <vscale x 4 x half> %0, 878 half %1, 879 iXLen 0, iXLen %2) 880 881 ret <vscale x 4 x half> %a 882} 883 884declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16( 885 <vscale x 4 x half>, 886 <vscale x 4 x half>, 887 half, 888 <vscale x 4 x i1>, 889 iXLen, iXLen, iXLen); 890 891define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 892; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: 893; CHECK: # %bb.0: # %entry 894; CHECK-NEXT: fsrmi a1, 0 895; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 896; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 897; CHECK-NEXT: fsrm a1 898; CHECK-NEXT: ret 899entry: 900 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16( 901 <vscale x 4 x half> %0, 902 <vscale x 4 x half> %1, 903 half %2, 904 <vscale x 4 x i1> %3, 905 iXLen 0, iXLen %4, iXLen 1) 906 907 ret <vscale x 4 x half> %a 908} 909 910declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16( 911 <vscale x 8 x half>, 912 <vscale x 8 x half>, 913 half, 914 iXLen, iXLen); 915 916define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind { 917; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: 918; CHECK: # %bb.0: # %entry 919; CHECK-NEXT: fsrmi a1, 0 920; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 921; CHECK-NEXT: vfmul.vf v8, v8, fa0 922; CHECK-NEXT: fsrm a1 923; CHECK-NEXT: ret 924entry: 925 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16( 926 <vscale x 8 x half> undef, 927 <vscale x 8 x half> %0, 928 half %1, 929 iXLen 0, iXLen %2) 930 931 ret <vscale x 8 x half> %a 932} 933 934declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16( 935 <vscale x 8 x half>, 936 <vscale x 8 x half>, 937 half, 938 <vscale x 8 x i1>, 939 iXLen, iXLen, iXLen); 940 941define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 942; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: 943; CHECK: # %bb.0: # %entry 944; CHECK-NEXT: fsrmi a1, 0 945; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 946; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t 947; CHECK-NEXT: fsrm a1 948; CHECK-NEXT: ret 949entry: 950 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16( 951 <vscale x 8 x half> %0, 952 <vscale x 8 x half> %1, 953 half %2, 954 <vscale x 8 x i1> %3, 955 iXLen 0, iXLen %4, iXLen 1) 956 957 ret <vscale x 8 x half> %a 958} 959 960declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16( 961 <vscale x 16 x half>, 962 <vscale x 16 x half>, 963 half, 964 iXLen, iXLen); 965 966define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind { 967; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: 968; CHECK: # %bb.0: # %entry 969; CHECK-NEXT: fsrmi a1, 0 970; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 971; CHECK-NEXT: vfmul.vf v8, v8, fa0 972; CHECK-NEXT: fsrm a1 973; CHECK-NEXT: ret 974entry: 975 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16( 976 <vscale x 16 x half> undef, 977 <vscale x 16 x half> %0, 978 half %1, 979 iXLen 0, iXLen %2) 980 981 ret <vscale x 16 x half> %a 982} 983 984declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16( 985 <vscale x 16 x half>, 986 <vscale x 16 x half>, 987 half, 988 <vscale x 16 x i1>, 989 iXLen, iXLen, iXLen); 990 991define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 992; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: 993; CHECK: # %bb.0: # %entry 994; CHECK-NEXT: fsrmi a1, 0 995; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 996; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t 997; CHECK-NEXT: fsrm a1 998; CHECK-NEXT: ret 999entry: 1000 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16( 1001 <vscale x 16 x half> %0, 1002 <vscale x 16 x half> %1, 1003 half %2, 1004 <vscale x 16 x i1> %3, 1005 iXLen 0, iXLen %4, iXLen 1) 1006 1007 ret <vscale x 16 x half> %a 1008} 1009 1010declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16( 1011 <vscale x 32 x half>, 1012 <vscale x 32 x half>, 1013 half, 1014 iXLen, iXLen); 1015 1016define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind { 1017; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: 1018; CHECK: # %bb.0: # %entry 1019; CHECK-NEXT: fsrmi a1, 0 1020; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1021; CHECK-NEXT: vfmul.vf v8, v8, fa0 1022; CHECK-NEXT: fsrm a1 1023; CHECK-NEXT: ret 1024entry: 1025 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16( 1026 <vscale x 32 x half> undef, 1027 <vscale x 32 x half> %0, 1028 half %1, 1029 iXLen 0, iXLen %2) 1030 1031 ret <vscale x 32 x half> %a 1032} 1033 1034declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16( 1035 <vscale x 32 x half>, 1036 <vscale x 32 x half>, 1037 half, 1038 <vscale x 32 x i1>, 1039 iXLen, iXLen, iXLen); 1040 1041define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1042; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: 1043; CHECK: # %bb.0: # %entry 1044; CHECK-NEXT: fsrmi a1, 0 1045; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 1046; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t 1047; CHECK-NEXT: fsrm a1 1048; CHECK-NEXT: ret 1049entry: 1050 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16( 1051 <vscale x 32 x half> %0, 1052 <vscale x 32 x half> %1, 1053 half %2, 1054 <vscale x 32 x i1> %3, 1055 iXLen 0, iXLen %4, iXLen 1) 1056 1057 ret <vscale x 32 x half> %a 1058} 1059 1060declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32( 1061 <vscale x 1 x float>, 1062 <vscale x 1 x float>, 1063 float, 1064 iXLen, iXLen); 1065 1066define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind { 1067; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: 1068; CHECK: # %bb.0: # %entry 1069; CHECK-NEXT: fsrmi a1, 0 1070; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1071; CHECK-NEXT: vfmul.vf v8, v8, fa0 1072; CHECK-NEXT: fsrm a1 1073; CHECK-NEXT: ret 1074entry: 1075 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32( 1076 <vscale x 1 x float> undef, 1077 <vscale x 1 x float> %0, 1078 float %1, 1079 iXLen 0, iXLen %2) 1080 1081 ret <vscale x 1 x float> %a 1082} 1083 1084declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32( 1085 <vscale x 1 x float>, 1086 <vscale x 1 x float>, 1087 float, 1088 <vscale x 1 x i1>, 1089 iXLen, iXLen, iXLen); 1090 1091define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1092; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: 1093; CHECK: # %bb.0: # %entry 1094; CHECK-NEXT: fsrmi a1, 0 1095; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 1096; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 1097; CHECK-NEXT: fsrm a1 1098; CHECK-NEXT: ret 1099entry: 1100 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32( 1101 <vscale x 1 x float> %0, 1102 <vscale x 1 x float> %1, 1103 float %2, 1104 <vscale x 1 x i1> %3, 1105 iXLen 0, iXLen %4, iXLen 1) 1106 1107 ret <vscale x 1 x float> %a 1108} 1109 1110declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32( 1111 <vscale x 2 x float>, 1112 <vscale x 2 x float>, 1113 float, 1114 iXLen, iXLen); 1115 1116define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind { 1117; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: 1118; CHECK: # %bb.0: # %entry 1119; CHECK-NEXT: fsrmi a1, 0 1120; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1121; CHECK-NEXT: vfmul.vf v8, v8, fa0 1122; CHECK-NEXT: fsrm a1 1123; CHECK-NEXT: ret 1124entry: 1125 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32( 1126 <vscale x 2 x float> undef, 1127 <vscale x 2 x float> %0, 1128 float %1, 1129 iXLen 0, iXLen %2) 1130 1131 ret <vscale x 2 x float> %a 1132} 1133 1134declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32( 1135 <vscale x 2 x float>, 1136 <vscale x 2 x float>, 1137 float, 1138 <vscale x 2 x i1>, 1139 iXLen, iXLen, iXLen); 1140 1141define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1142; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: 1143; CHECK: # %bb.0: # %entry 1144; CHECK-NEXT: fsrmi a1, 0 1145; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 1146; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 1147; CHECK-NEXT: fsrm a1 1148; CHECK-NEXT: ret 1149entry: 1150 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32( 1151 <vscale x 2 x float> %0, 1152 <vscale x 2 x float> %1, 1153 float %2, 1154 <vscale x 2 x i1> %3, 1155 iXLen 0, iXLen %4, iXLen 1) 1156 1157 ret <vscale x 2 x float> %a 1158} 1159 1160declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32( 1161 <vscale x 4 x float>, 1162 <vscale x 4 x float>, 1163 float, 1164 iXLen, iXLen); 1165 1166define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind { 1167; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: 1168; CHECK: # %bb.0: # %entry 1169; CHECK-NEXT: fsrmi a1, 0 1170; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1171; CHECK-NEXT: vfmul.vf v8, v8, fa0 1172; CHECK-NEXT: fsrm a1 1173; CHECK-NEXT: ret 1174entry: 1175 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32( 1176 <vscale x 4 x float> undef, 1177 <vscale x 4 x float> %0, 1178 float %1, 1179 iXLen 0, iXLen %2) 1180 1181 ret <vscale x 4 x float> %a 1182} 1183 1184declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32( 1185 <vscale x 4 x float>, 1186 <vscale x 4 x float>, 1187 float, 1188 <vscale x 4 x i1>, 1189 iXLen, iXLen, iXLen); 1190 1191define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1192; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: 1193; CHECK: # %bb.0: # %entry 1194; CHECK-NEXT: fsrmi a1, 0 1195; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 1196; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t 1197; CHECK-NEXT: fsrm a1 1198; CHECK-NEXT: ret 1199entry: 1200 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32( 1201 <vscale x 4 x float> %0, 1202 <vscale x 4 x float> %1, 1203 float %2, 1204 <vscale x 4 x i1> %3, 1205 iXLen 0, iXLen %4, iXLen 1) 1206 1207 ret <vscale x 4 x float> %a 1208} 1209 1210declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32( 1211 <vscale x 8 x float>, 1212 <vscale x 8 x float>, 1213 float, 1214 iXLen, iXLen); 1215 1216define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind { 1217; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: 1218; CHECK: # %bb.0: # %entry 1219; CHECK-NEXT: fsrmi a1, 0 1220; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1221; CHECK-NEXT: vfmul.vf v8, v8, fa0 1222; CHECK-NEXT: fsrm a1 1223; CHECK-NEXT: ret 1224entry: 1225 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32( 1226 <vscale x 8 x float> undef, 1227 <vscale x 8 x float> %0, 1228 float %1, 1229 iXLen 0, iXLen %2) 1230 1231 ret <vscale x 8 x float> %a 1232} 1233 1234declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32( 1235 <vscale x 8 x float>, 1236 <vscale x 8 x float>, 1237 float, 1238 <vscale x 8 x i1>, 1239 iXLen, iXLen, iXLen); 1240 1241define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1242; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: 1243; CHECK: # %bb.0: # %entry 1244; CHECK-NEXT: fsrmi a1, 0 1245; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 1246; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t 1247; CHECK-NEXT: fsrm a1 1248; CHECK-NEXT: ret 1249entry: 1250 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32( 1251 <vscale x 8 x float> %0, 1252 <vscale x 8 x float> %1, 1253 float %2, 1254 <vscale x 8 x i1> %3, 1255 iXLen 0, iXLen %4, iXLen 1) 1256 1257 ret <vscale x 8 x float> %a 1258} 1259 1260declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32( 1261 <vscale x 16 x float>, 1262 <vscale x 16 x float>, 1263 float, 1264 iXLen, iXLen); 1265 1266define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind { 1267; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: 1268; CHECK: # %bb.0: # %entry 1269; CHECK-NEXT: fsrmi a1, 0 1270; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1271; CHECK-NEXT: vfmul.vf v8, v8, fa0 1272; CHECK-NEXT: fsrm a1 1273; CHECK-NEXT: ret 1274entry: 1275 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32( 1276 <vscale x 16 x float> undef, 1277 <vscale x 16 x float> %0, 1278 float %1, 1279 iXLen 0, iXLen %2) 1280 1281 ret <vscale x 16 x float> %a 1282} 1283 1284declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32( 1285 <vscale x 16 x float>, 1286 <vscale x 16 x float>, 1287 float, 1288 <vscale x 16 x i1>, 1289 iXLen, iXLen, iXLen); 1290 1291define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1292; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: 1293; CHECK: # %bb.0: # %entry 1294; CHECK-NEXT: fsrmi a1, 0 1295; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 1296; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t 1297; CHECK-NEXT: fsrm a1 1298; CHECK-NEXT: ret 1299entry: 1300 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32( 1301 <vscale x 16 x float> %0, 1302 <vscale x 16 x float> %1, 1303 float %2, 1304 <vscale x 16 x i1> %3, 1305 iXLen 0, iXLen %4, iXLen 1) 1306 1307 ret <vscale x 16 x float> %a 1308} 1309 1310declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64( 1311 <vscale x 1 x double>, 1312 <vscale x 1 x double>, 1313 double, 1314 iXLen, iXLen); 1315 1316define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind { 1317; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: 1318; CHECK: # %bb.0: # %entry 1319; CHECK-NEXT: fsrmi a1, 0 1320; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1321; CHECK-NEXT: vfmul.vf v8, v8, fa0 1322; CHECK-NEXT: fsrm a1 1323; CHECK-NEXT: ret 1324entry: 1325 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64( 1326 <vscale x 1 x double> undef, 1327 <vscale x 1 x double> %0, 1328 double %1, 1329 iXLen 0, iXLen %2) 1330 1331 ret <vscale x 1 x double> %a 1332} 1333 1334declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64( 1335 <vscale x 1 x double>, 1336 <vscale x 1 x double>, 1337 double, 1338 <vscale x 1 x i1>, 1339 iXLen, iXLen, iXLen); 1340 1341define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1342; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: 1343; CHECK: # %bb.0: # %entry 1344; CHECK-NEXT: fsrmi a1, 0 1345; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 1346; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t 1347; CHECK-NEXT: fsrm a1 1348; CHECK-NEXT: ret 1349entry: 1350 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64( 1351 <vscale x 1 x double> %0, 1352 <vscale x 1 x double> %1, 1353 double %2, 1354 <vscale x 1 x i1> %3, 1355 iXLen 0, iXLen %4, iXLen 1) 1356 1357 ret <vscale x 1 x double> %a 1358} 1359 1360declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64( 1361 <vscale x 2 x double>, 1362 <vscale x 2 x double>, 1363 double, 1364 iXLen, iXLen); 1365 1366define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind { 1367; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: 1368; CHECK: # %bb.0: # %entry 1369; CHECK-NEXT: fsrmi a1, 0 1370; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1371; CHECK-NEXT: vfmul.vf v8, v8, fa0 1372; CHECK-NEXT: fsrm a1 1373; CHECK-NEXT: ret 1374entry: 1375 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64( 1376 <vscale x 2 x double> undef, 1377 <vscale x 2 x double> %0, 1378 double %1, 1379 iXLen 0, iXLen %2) 1380 1381 ret <vscale x 2 x double> %a 1382} 1383 1384declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64( 1385 <vscale x 2 x double>, 1386 <vscale x 2 x double>, 1387 double, 1388 <vscale x 2 x i1>, 1389 iXLen, iXLen, iXLen); 1390 1391define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1392; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: 1393; CHECK: # %bb.0: # %entry 1394; CHECK-NEXT: fsrmi a1, 0 1395; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 1396; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t 1397; CHECK-NEXT: fsrm a1 1398; CHECK-NEXT: ret 1399entry: 1400 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64( 1401 <vscale x 2 x double> %0, 1402 <vscale x 2 x double> %1, 1403 double %2, 1404 <vscale x 2 x i1> %3, 1405 iXLen 0, iXLen %4, iXLen 1) 1406 1407 ret <vscale x 2 x double> %a 1408} 1409 1410declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64( 1411 <vscale x 4 x double>, 1412 <vscale x 4 x double>, 1413 double, 1414 iXLen, iXLen); 1415 1416define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind { 1417; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: 1418; CHECK: # %bb.0: # %entry 1419; CHECK-NEXT: fsrmi a1, 0 1420; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1421; CHECK-NEXT: vfmul.vf v8, v8, fa0 1422; CHECK-NEXT: fsrm a1 1423; CHECK-NEXT: ret 1424entry: 1425 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64( 1426 <vscale x 4 x double> undef, 1427 <vscale x 4 x double> %0, 1428 double %1, 1429 iXLen 0, iXLen %2) 1430 1431 ret <vscale x 4 x double> %a 1432} 1433 1434declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64( 1435 <vscale x 4 x double>, 1436 <vscale x 4 x double>, 1437 double, 1438 <vscale x 4 x i1>, 1439 iXLen, iXLen, iXLen); 1440 1441define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1442; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: 1443; CHECK: # %bb.0: # %entry 1444; CHECK-NEXT: fsrmi a1, 0 1445; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 1446; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t 1447; CHECK-NEXT: fsrm a1 1448; CHECK-NEXT: ret 1449entry: 1450 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64( 1451 <vscale x 4 x double> %0, 1452 <vscale x 4 x double> %1, 1453 double %2, 1454 <vscale x 4 x i1> %3, 1455 iXLen 0, iXLen %4, iXLen 1) 1456 1457 ret <vscale x 4 x double> %a 1458} 1459 1460declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64( 1461 <vscale x 8 x double>, 1462 <vscale x 8 x double>, 1463 double, 1464 iXLen, iXLen); 1465 1466define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind { 1467; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: 1468; CHECK: # %bb.0: # %entry 1469; CHECK-NEXT: fsrmi a1, 0 1470; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1471; CHECK-NEXT: vfmul.vf v8, v8, fa0 1472; CHECK-NEXT: fsrm a1 1473; CHECK-NEXT: ret 1474entry: 1475 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64( 1476 <vscale x 8 x double> undef, 1477 <vscale x 8 x double> %0, 1478 double %1, 1479 iXLen 0, iXLen %2) 1480 1481 ret <vscale x 8 x double> %a 1482} 1483 1484declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64( 1485 <vscale x 8 x double>, 1486 <vscale x 8 x double>, 1487 double, 1488 <vscale x 8 x i1>, 1489 iXLen, iXLen, iXLen); 1490 1491define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1492; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: 1493; CHECK: # %bb.0: # %entry 1494; CHECK-NEXT: fsrmi a1, 0 1495; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 1496; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t 1497; CHECK-NEXT: fsrm a1 1498; CHECK-NEXT: ret 1499entry: 1500 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64( 1501 <vscale x 8 x double> %0, 1502 <vscale x 8 x double> %1, 1503 double %2, 1504 <vscale x 8 x i1> %3, 1505 iXLen 0, iXLen %4, iXLen 1) 1506 1507 ret <vscale x 8 x double> %a 1508} 1509