1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \ 3; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \ 5; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s 6; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfhmin,+zvfh \ 7; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s 8; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \ 9; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s 10; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zvfhmin \ 11; RUN: -target-abi=ilp32d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN 12; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zvfhmin \ 13; RUN: -target-abi=lp64d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN 14 15; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd 16 17declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16( 18 <vscale x 1 x half>, 19 <vscale x 1 x half>, 20 <vscale x 1 x half>, 21 iXLen, iXLen); 22 23define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind { 24; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: 25; CHECK: # %bb.0: # %entry 26; CHECK-NEXT: fsrmi a1, 0 27; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 28; CHECK-NEXT: vfadd.vv v8, v8, v9 29; CHECK-NEXT: fsrm a1 30; CHECK-NEXT: ret 31entry: 32 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16( 33 <vscale x 1 x half> undef, 34 <vscale x 1 x half> %0, 35 <vscale x 1 x half> %1, 36 iXLen 0, iXLen %2) 37 38 ret <vscale x 1 x half> %a 39} 40 41declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( 42 <vscale x 1 x half>, 43 <vscale x 1 x half>, 44 <vscale x 1 x half>, 45 <vscale x 1 x i1>, 46 iXLen, iXLen, iXLen); 47 48define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 49; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: 50; CHECK: # %bb.0: # %entry 51; CHECK-NEXT: fsrmi a1, 0 52; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 53; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 54; CHECK-NEXT: fsrm a1 55; CHECK-NEXT: ret 56entry: 57 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( 58 <vscale x 1 x half> %0, 59 <vscale x 1 x half> %1, 60 <vscale x 1 x half> %2, 61 <vscale x 1 x i1> %3, 62 iXLen 0, iXLen %4, iXLen 1) 63 64 ret <vscale x 1 x half> %a 65} 66 67declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16( 68 <vscale x 2 x half>, 69 <vscale x 2 x half>, 70 <vscale x 2 x half>, 71 iXLen, iXLen); 72 73define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind { 74; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: 75; CHECK: # %bb.0: # %entry 76; CHECK-NEXT: fsrmi a1, 0 77; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 78; CHECK-NEXT: vfadd.vv v8, v8, v9 79; CHECK-NEXT: fsrm a1 80; CHECK-NEXT: ret 81entry: 82 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16( 83 <vscale x 2 x half> undef, 84 <vscale x 2 x half> %0, 85 <vscale x 2 x half> %1, 86 iXLen 0, iXLen %2) 87 88 ret <vscale x 2 x half> %a 89} 90 91declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( 92 <vscale x 2 x half>, 93 <vscale x 2 x half>, 94 <vscale x 2 x half>, 95 <vscale x 2 x i1>, 96 iXLen, iXLen, iXLen); 97 98define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 99; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: 100; CHECK: # %bb.0: # %entry 101; CHECK-NEXT: fsrmi a1, 0 102; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 103; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 104; CHECK-NEXT: fsrm a1 105; CHECK-NEXT: ret 106entry: 107 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16( 108 <vscale x 2 x half> %0, 109 <vscale x 2 x half> %1, 110 <vscale x 2 x half> %2, 111 <vscale x 2 x i1> %3, 112 iXLen 0, iXLen %4, iXLen 1) 113 114 ret <vscale x 2 x half> %a 115} 116 117declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16( 118 <vscale x 4 x half>, 119 <vscale x 4 x half>, 120 <vscale x 4 x half>, 121 iXLen, iXLen); 122 123define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind { 124; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: 125; CHECK: # %bb.0: # %entry 126; CHECK-NEXT: fsrmi a1, 0 127; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 128; CHECK-NEXT: vfadd.vv v8, v8, v9 129; CHECK-NEXT: fsrm a1 130; CHECK-NEXT: ret 131entry: 132 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16( 133 <vscale x 4 x half> undef, 134 <vscale x 4 x half> %0, 135 <vscale x 4 x half> %1, 136 iXLen 0, iXLen %2) 137 138 ret <vscale x 4 x half> %a 139} 140 141declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( 142 <vscale x 4 x half>, 143 <vscale x 4 x half>, 144 <vscale x 4 x half>, 145 <vscale x 4 x i1>, 146 iXLen, iXLen, iXLen); 147 148define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 149; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: 150; CHECK: # %bb.0: # %entry 151; CHECK-NEXT: fsrmi a1, 0 152; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 153; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 154; CHECK-NEXT: fsrm a1 155; CHECK-NEXT: ret 156entry: 157 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16( 158 <vscale x 4 x half> %0, 159 <vscale x 4 x half> %1, 160 <vscale x 4 x half> %2, 161 <vscale x 4 x i1> %3, 162 iXLen 0, iXLen %4, iXLen 1) 163 164 ret <vscale x 4 x half> %a 165} 166 167declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16( 168 <vscale x 8 x half>, 169 <vscale x 8 x half>, 170 <vscale x 8 x half>, 171 iXLen, iXLen); 172 173define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind { 174; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: 175; CHECK: # %bb.0: # %entry 176; CHECK-NEXT: fsrmi a1, 0 177; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 178; CHECK-NEXT: vfadd.vv v8, v8, v10 179; CHECK-NEXT: fsrm a1 180; CHECK-NEXT: ret 181entry: 182 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16( 183 <vscale x 8 x half> undef, 184 <vscale x 8 x half> %0, 185 <vscale x 8 x half> %1, 186 iXLen 0, iXLen %2) 187 188 ret <vscale x 8 x half> %a 189} 190 191declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( 192 <vscale x 8 x half>, 193 <vscale x 8 x half>, 194 <vscale x 8 x half>, 195 <vscale x 8 x i1>, 196 iXLen, iXLen, iXLen); 197 198define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 199; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: 200; CHECK: # %bb.0: # %entry 201; CHECK-NEXT: fsrmi a1, 0 202; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 203; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t 204; CHECK-NEXT: fsrm a1 205; CHECK-NEXT: ret 206entry: 207 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16( 208 <vscale x 8 x half> %0, 209 <vscale x 8 x half> %1, 210 <vscale x 8 x half> %2, 211 <vscale x 8 x i1> %3, 212 iXLen 0, iXLen %4, iXLen 1) 213 214 ret <vscale x 8 x half> %a 215} 216 217declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16( 218 <vscale x 16 x half>, 219 <vscale x 16 x half>, 220 <vscale x 16 x half>, 221 iXLen, iXLen); 222 223define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind { 224; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: 225; CHECK: # %bb.0: # %entry 226; CHECK-NEXT: fsrmi a1, 0 227; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 228; CHECK-NEXT: vfadd.vv v8, v8, v12 229; CHECK-NEXT: fsrm a1 230; CHECK-NEXT: ret 231entry: 232 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16( 233 <vscale x 16 x half> undef, 234 <vscale x 16 x half> %0, 235 <vscale x 16 x half> %1, 236 iXLen 0, iXLen %2) 237 238 ret <vscale x 16 x half> %a 239} 240 241declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( 242 <vscale x 16 x half>, 243 <vscale x 16 x half>, 244 <vscale x 16 x half>, 245 <vscale x 16 x i1>, 246 iXLen, iXLen, iXLen); 247 248define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 249; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: 250; CHECK: # %bb.0: # %entry 251; CHECK-NEXT: fsrmi a1, 0 252; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 253; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t 254; CHECK-NEXT: fsrm a1 255; CHECK-NEXT: ret 256entry: 257 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16( 258 <vscale x 16 x half> %0, 259 <vscale x 16 x half> %1, 260 <vscale x 16 x half> %2, 261 <vscale x 16 x i1> %3, 262 iXLen 0, iXLen %4, iXLen 1) 263 264 ret <vscale x 16 x half> %a 265} 266 267declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16( 268 <vscale x 32 x half>, 269 <vscale x 32 x half>, 270 <vscale x 32 x half>, 271 iXLen, iXLen); 272 273define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind { 274; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: 275; CHECK: # %bb.0: # %entry 276; CHECK-NEXT: fsrmi a1, 0 277; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 278; CHECK-NEXT: vfadd.vv v8, v8, v16 279; CHECK-NEXT: fsrm a1 280; CHECK-NEXT: ret 281entry: 282 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16( 283 <vscale x 32 x half> undef, 284 <vscale x 32 x half> %0, 285 <vscale x 32 x half> %1, 286 iXLen 0, iXLen %2) 287 288 ret <vscale x 32 x half> %a 289} 290 291declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( 292 <vscale x 32 x half>, 293 <vscale x 32 x half>, 294 <vscale x 32 x half>, 295 <vscale x 32 x i1>, 296 iXLen, iXLen, iXLen); 297 298define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 299; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: 300; CHECK: # %bb.0: # %entry 301; CHECK-NEXT: vl8re16.v v24, (a0) 302; CHECK-NEXT: fsrmi a0, 0 303; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 304; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t 305; CHECK-NEXT: fsrm a0 306; CHECK-NEXT: ret 307entry: 308 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16( 309 <vscale x 32 x half> %0, 310 <vscale x 32 x half> %1, 311 <vscale x 32 x half> %2, 312 <vscale x 32 x i1> %3, 313 iXLen 0, iXLen %4, iXLen 1) 314 315 ret <vscale x 32 x half> %a 316} 317 318declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32( 319 <vscale x 1 x float>, 320 <vscale x 1 x float>, 321 <vscale x 1 x float>, 322 iXLen, iXLen); 323 324define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind { 325; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: 326; CHECK: # %bb.0: # %entry 327; CHECK-NEXT: fsrmi a1, 0 328; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 329; CHECK-NEXT: vfadd.vv v8, v8, v9 330; CHECK-NEXT: fsrm a1 331; CHECK-NEXT: ret 332entry: 333 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32( 334 <vscale x 1 x float> undef, 335 <vscale x 1 x float> %0, 336 <vscale x 1 x float> %1, 337 iXLen 0, iXLen %2) 338 339 ret <vscale x 1 x float> %a 340} 341 342declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( 343 <vscale x 1 x float>, 344 <vscale x 1 x float>, 345 <vscale x 1 x float>, 346 <vscale x 1 x i1>, 347 iXLen, iXLen, iXLen); 348 349define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 350; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: 351; CHECK: # %bb.0: # %entry 352; CHECK-NEXT: fsrmi a1, 0 353; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 354; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 355; CHECK-NEXT: fsrm a1 356; CHECK-NEXT: ret 357entry: 358 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32( 359 <vscale x 1 x float> %0, 360 <vscale x 1 x float> %1, 361 <vscale x 1 x float> %2, 362 <vscale x 1 x i1> %3, 363 iXLen 0, iXLen %4, iXLen 1) 364 365 ret <vscale x 1 x float> %a 366} 367 368declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32( 369 <vscale x 2 x float>, 370 <vscale x 2 x float>, 371 <vscale x 2 x float>, 372 iXLen, iXLen); 373 374define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind { 375; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: 376; CHECK: # %bb.0: # %entry 377; CHECK-NEXT: fsrmi a1, 0 378; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 379; CHECK-NEXT: vfadd.vv v8, v8, v9 380; CHECK-NEXT: fsrm a1 381; CHECK-NEXT: ret 382entry: 383 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32( 384 <vscale x 2 x float> undef, 385 <vscale x 2 x float> %0, 386 <vscale x 2 x float> %1, 387 iXLen 0, iXLen %2) 388 389 ret <vscale x 2 x float> %a 390} 391 392declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( 393 <vscale x 2 x float>, 394 <vscale x 2 x float>, 395 <vscale x 2 x float>, 396 <vscale x 2 x i1>, 397 iXLen, iXLen, iXLen); 398 399define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 400; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: 401; CHECK: # %bb.0: # %entry 402; CHECK-NEXT: fsrmi a1, 0 403; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 404; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 405; CHECK-NEXT: fsrm a1 406; CHECK-NEXT: ret 407entry: 408 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32( 409 <vscale x 2 x float> %0, 410 <vscale x 2 x float> %1, 411 <vscale x 2 x float> %2, 412 <vscale x 2 x i1> %3, 413 iXLen 0, iXLen %4, iXLen 1) 414 415 ret <vscale x 2 x float> %a 416} 417 418declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32( 419 <vscale x 4 x float>, 420 <vscale x 4 x float>, 421 <vscale x 4 x float>, 422 iXLen, iXLen); 423 424define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind { 425; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: 426; CHECK: # %bb.0: # %entry 427; CHECK-NEXT: fsrmi a1, 0 428; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 429; CHECK-NEXT: vfadd.vv v8, v8, v10 430; CHECK-NEXT: fsrm a1 431; CHECK-NEXT: ret 432entry: 433 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32( 434 <vscale x 4 x float> undef, 435 <vscale x 4 x float> %0, 436 <vscale x 4 x float> %1, 437 iXLen 0, iXLen %2) 438 439 ret <vscale x 4 x float> %a 440} 441 442declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( 443 <vscale x 4 x float>, 444 <vscale x 4 x float>, 445 <vscale x 4 x float>, 446 <vscale x 4 x i1>, 447 iXLen, iXLen, iXLen); 448 449define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 450; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: 451; CHECK: # %bb.0: # %entry 452; CHECK-NEXT: fsrmi a1, 0 453; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 454; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t 455; CHECK-NEXT: fsrm a1 456; CHECK-NEXT: ret 457entry: 458 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32( 459 <vscale x 4 x float> %0, 460 <vscale x 4 x float> %1, 461 <vscale x 4 x float> %2, 462 <vscale x 4 x i1> %3, 463 iXLen 0, iXLen %4, iXLen 1) 464 465 ret <vscale x 4 x float> %a 466} 467 468declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32( 469 <vscale x 8 x float>, 470 <vscale x 8 x float>, 471 <vscale x 8 x float>, 472 iXLen, iXLen); 473 474define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind { 475; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: 476; CHECK: # %bb.0: # %entry 477; CHECK-NEXT: fsrmi a1, 0 478; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 479; CHECK-NEXT: vfadd.vv v8, v8, v12 480; CHECK-NEXT: fsrm a1 481; CHECK-NEXT: ret 482entry: 483 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32( 484 <vscale x 8 x float> undef, 485 <vscale x 8 x float> %0, 486 <vscale x 8 x float> %1, 487 iXLen 0, iXLen %2) 488 489 ret <vscale x 8 x float> %a 490} 491 492declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( 493 <vscale x 8 x float>, 494 <vscale x 8 x float>, 495 <vscale x 8 x float>, 496 <vscale x 8 x i1>, 497 iXLen, iXLen, iXLen); 498 499define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 500; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: 501; CHECK: # %bb.0: # %entry 502; CHECK-NEXT: fsrmi a1, 0 503; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 504; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t 505; CHECK-NEXT: fsrm a1 506; CHECK-NEXT: ret 507entry: 508 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32( 509 <vscale x 8 x float> %0, 510 <vscale x 8 x float> %1, 511 <vscale x 8 x float> %2, 512 <vscale x 8 x i1> %3, 513 iXLen 0, iXLen %4, iXLen 1) 514 515 ret <vscale x 8 x float> %a 516} 517 518declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32( 519 <vscale x 16 x float>, 520 <vscale x 16 x float>, 521 <vscale x 16 x float>, 522 iXLen, iXLen); 523 524define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind { 525; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: 526; CHECK: # %bb.0: # %entry 527; CHECK-NEXT: fsrmi a1, 0 528; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 529; CHECK-NEXT: vfadd.vv v8, v8, v16 530; CHECK-NEXT: fsrm a1 531; CHECK-NEXT: ret 532entry: 533 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32( 534 <vscale x 16 x float> undef, 535 <vscale x 16 x float> %0, 536 <vscale x 16 x float> %1, 537 iXLen 0, iXLen %2) 538 539 ret <vscale x 16 x float> %a 540} 541 542declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( 543 <vscale x 16 x float>, 544 <vscale x 16 x float>, 545 <vscale x 16 x float>, 546 <vscale x 16 x i1>, 547 iXLen, iXLen, iXLen); 548 549define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 550; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: 551; CHECK: # %bb.0: # %entry 552; CHECK-NEXT: vl8re32.v v24, (a0) 553; CHECK-NEXT: fsrmi a0, 0 554; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 555; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t 556; CHECK-NEXT: fsrm a0 557; CHECK-NEXT: ret 558entry: 559 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32( 560 <vscale x 16 x float> %0, 561 <vscale x 16 x float> %1, 562 <vscale x 16 x float> %2, 563 <vscale x 16 x i1> %3, 564 iXLen 0, iXLen %4, iXLen 1) 565 566 ret <vscale x 16 x float> %a 567} 568 569declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64( 570 <vscale x 1 x double>, 571 <vscale x 1 x double>, 572 <vscale x 1 x double>, 573 iXLen, iXLen); 574 575define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind { 576; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: 577; CHECK: # %bb.0: # %entry 578; CHECK-NEXT: fsrmi a1, 0 579; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 580; CHECK-NEXT: vfadd.vv v8, v8, v9 581; CHECK-NEXT: fsrm a1 582; CHECK-NEXT: ret 583entry: 584 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64( 585 <vscale x 1 x double> undef, 586 <vscale x 1 x double> %0, 587 <vscale x 1 x double> %1, 588 iXLen 0, iXLen %2) 589 590 ret <vscale x 1 x double> %a 591} 592 593declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( 594 <vscale x 1 x double>, 595 <vscale x 1 x double>, 596 <vscale x 1 x double>, 597 <vscale x 1 x i1>, 598 iXLen, iXLen, iXLen); 599 600define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 601; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: 602; CHECK: # %bb.0: # %entry 603; CHECK-NEXT: fsrmi a1, 0 604; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 605; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t 606; CHECK-NEXT: fsrm a1 607; CHECK-NEXT: ret 608entry: 609 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64( 610 <vscale x 1 x double> %0, 611 <vscale x 1 x double> %1, 612 <vscale x 1 x double> %2, 613 <vscale x 1 x i1> %3, 614 iXLen 0, iXLen %4, iXLen 1) 615 616 ret <vscale x 1 x double> %a 617} 618 619declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64( 620 <vscale x 2 x double>, 621 <vscale x 2 x double>, 622 <vscale x 2 x double>, 623 iXLen, iXLen); 624 625define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind { 626; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: 627; CHECK: # %bb.0: # %entry 628; CHECK-NEXT: fsrmi a1, 0 629; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 630; CHECK-NEXT: vfadd.vv v8, v8, v10 631; CHECK-NEXT: fsrm a1 632; CHECK-NEXT: ret 633entry: 634 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64( 635 <vscale x 2 x double> undef, 636 <vscale x 2 x double> %0, 637 <vscale x 2 x double> %1, 638 iXLen 0, iXLen %2) 639 640 ret <vscale x 2 x double> %a 641} 642 643declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( 644 <vscale x 2 x double>, 645 <vscale x 2 x double>, 646 <vscale x 2 x double>, 647 <vscale x 2 x i1>, 648 iXLen, iXLen, iXLen); 649 650define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 651; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: 652; CHECK: # %bb.0: # %entry 653; CHECK-NEXT: fsrmi a1, 0 654; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 655; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t 656; CHECK-NEXT: fsrm a1 657; CHECK-NEXT: ret 658entry: 659 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64( 660 <vscale x 2 x double> %0, 661 <vscale x 2 x double> %1, 662 <vscale x 2 x double> %2, 663 <vscale x 2 x i1> %3, 664 iXLen 0, iXLen %4, iXLen 1) 665 666 ret <vscale x 2 x double> %a 667} 668 669declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64( 670 <vscale x 4 x double>, 671 <vscale x 4 x double>, 672 <vscale x 4 x double>, 673 iXLen, iXLen); 674 675define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind { 676; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: 677; CHECK: # %bb.0: # %entry 678; CHECK-NEXT: fsrmi a1, 0 679; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 680; CHECK-NEXT: vfadd.vv v8, v8, v12 681; CHECK-NEXT: fsrm a1 682; CHECK-NEXT: ret 683entry: 684 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64( 685 <vscale x 4 x double> undef, 686 <vscale x 4 x double> %0, 687 <vscale x 4 x double> %1, 688 iXLen 0, iXLen %2) 689 690 ret <vscale x 4 x double> %a 691} 692 693declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( 694 <vscale x 4 x double>, 695 <vscale x 4 x double>, 696 <vscale x 4 x double>, 697 <vscale x 4 x i1>, 698 iXLen, iXLen, iXLen); 699 700define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 701; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: 702; CHECK: # %bb.0: # %entry 703; CHECK-NEXT: fsrmi a1, 0 704; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 705; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t 706; CHECK-NEXT: fsrm a1 707; CHECK-NEXT: ret 708entry: 709 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64( 710 <vscale x 4 x double> %0, 711 <vscale x 4 x double> %1, 712 <vscale x 4 x double> %2, 713 <vscale x 4 x i1> %3, 714 iXLen 0, iXLen %4, iXLen 1) 715 716 ret <vscale x 4 x double> %a 717} 718 719declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64( 720 <vscale x 8 x double>, 721 <vscale x 8 x double>, 722 <vscale x 8 x double>, 723 iXLen, iXLen); 724 725define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind { 726; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: 727; CHECK: # %bb.0: # %entry 728; CHECK-NEXT: fsrmi a1, 0 729; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 730; CHECK-NEXT: vfadd.vv v8, v8, v16 731; CHECK-NEXT: fsrm a1 732; CHECK-NEXT: ret 733entry: 734 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64( 735 <vscale x 8 x double> undef, 736 <vscale x 8 x double> %0, 737 <vscale x 8 x double> %1, 738 iXLen 0, iXLen %2) 739 740 ret <vscale x 8 x double> %a 741} 742 743declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( 744 <vscale x 8 x double>, 745 <vscale x 8 x double>, 746 <vscale x 8 x double>, 747 <vscale x 8 x i1>, 748 iXLen, iXLen, iXLen); 749 750define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 751; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: 752; CHECK: # %bb.0: # %entry 753; CHECK-NEXT: vl8re64.v v24, (a0) 754; CHECK-NEXT: fsrmi a0, 0 755; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 756; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t 757; CHECK-NEXT: fsrm a0 758; CHECK-NEXT: ret 759entry: 760 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64( 761 <vscale x 8 x double> %0, 762 <vscale x 8 x double> %1, 763 <vscale x 8 x double> %2, 764 <vscale x 8 x i1> %3, 765 iXLen 0, iXLen %4, iXLen 1) 766 767 ret <vscale x 8 x double> %a 768} 769 770declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16( 771 <vscale x 1 x half>, 772 <vscale x 1 x half>, 773 half, 774 iXLen, iXLen); 775 776define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind { 777; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: 778; CHECK: # %bb.0: # %entry 779; CHECK-NEXT: fsrmi a1, 0 780; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 781; CHECK-NEXT: vfadd.vf v8, v8, fa0 782; CHECK-NEXT: fsrm a1 783; CHECK-NEXT: ret 784entry: 785 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16( 786 <vscale x 1 x half> undef, 787 <vscale x 1 x half> %0, 788 half %1, 789 iXLen 0, iXLen %2) 790 791 ret <vscale x 1 x half> %a 792} 793 794declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16( 795 <vscale x 1 x half>, 796 <vscale x 1 x half>, 797 half, 798 <vscale x 1 x i1>, 799 iXLen, iXLen, iXLen); 800 801define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 802; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: 803; CHECK: # %bb.0: # %entry 804; CHECK-NEXT: fsrmi a1, 0 805; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 806; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 807; CHECK-NEXT: fsrm a1 808; CHECK-NEXT: ret 809entry: 810 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16( 811 <vscale x 1 x half> %0, 812 <vscale x 1 x half> %1, 813 half %2, 814 <vscale x 1 x i1> %3, 815 iXLen 0, iXLen %4, iXLen 1) 816 817 ret <vscale x 1 x half> %a 818} 819 820declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16( 821 <vscale x 2 x half>, 822 <vscale x 2 x half>, 823 half, 824 iXLen, iXLen); 825 826define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind { 827; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: fsrmi a1, 0 830; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 831; CHECK-NEXT: vfadd.vf v8, v8, fa0 832; CHECK-NEXT: fsrm a1 833; CHECK-NEXT: ret 834entry: 835 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16( 836 <vscale x 2 x half> undef, 837 <vscale x 2 x half> %0, 838 half %1, 839 iXLen 0, iXLen %2) 840 841 ret <vscale x 2 x half> %a 842} 843 844declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16( 845 <vscale x 2 x half>, 846 <vscale x 2 x half>, 847 half, 848 <vscale x 2 x i1>, 849 iXLen, iXLen, iXLen); 850 851define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 852; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: 853; CHECK: # %bb.0: # %entry 854; CHECK-NEXT: fsrmi a1, 0 855; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 856; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 857; CHECK-NEXT: fsrm a1 858; CHECK-NEXT: ret 859entry: 860 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16( 861 <vscale x 2 x half> %0, 862 <vscale x 2 x half> %1, 863 half %2, 864 <vscale x 2 x i1> %3, 865 iXLen 0, iXLen %4, iXLen 1) 866 867 ret <vscale x 2 x half> %a 868} 869 870declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16( 871 <vscale x 4 x half>, 872 <vscale x 4 x half>, 873 half, 874 iXLen, iXLen); 875 876define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind { 877; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: 878; CHECK: # %bb.0: # %entry 879; CHECK-NEXT: fsrmi a1, 0 880; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 881; CHECK-NEXT: vfadd.vf v8, v8, fa0 882; CHECK-NEXT: fsrm a1 883; CHECK-NEXT: ret 884entry: 885 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16( 886 <vscale x 4 x half> undef, 887 <vscale x 4 x half> %0, 888 half %1, 889 iXLen 0, iXLen %2) 890 891 ret <vscale x 4 x half> %a 892} 893 894declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16( 895 <vscale x 4 x half>, 896 <vscale x 4 x half>, 897 half, 898 <vscale x 4 x i1>, 899 iXLen, iXLen, iXLen); 900 901define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 902; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: 903; CHECK: # %bb.0: # %entry 904; CHECK-NEXT: fsrmi a1, 0 905; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 906; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 907; CHECK-NEXT: fsrm a1 908; CHECK-NEXT: ret 909entry: 910 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16( 911 <vscale x 4 x half> %0, 912 <vscale x 4 x half> %1, 913 half %2, 914 <vscale x 4 x i1> %3, 915 iXLen 0, iXLen %4, iXLen 1) 916 917 ret <vscale x 4 x half> %a 918} 919 920declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16( 921 <vscale x 8 x half>, 922 <vscale x 8 x half>, 923 half, 924 iXLen, iXLen); 925 926define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind { 927; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: 928; CHECK: # %bb.0: # %entry 929; CHECK-NEXT: fsrmi a1, 0 930; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 931; CHECK-NEXT: vfadd.vf v8, v8, fa0 932; CHECK-NEXT: fsrm a1 933; CHECK-NEXT: ret 934entry: 935 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16( 936 <vscale x 8 x half> undef, 937 <vscale x 8 x half> %0, 938 half %1, 939 iXLen 0, iXLen %2) 940 941 ret <vscale x 8 x half> %a 942} 943 944declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16( 945 <vscale x 8 x half>, 946 <vscale x 8 x half>, 947 half, 948 <vscale x 8 x i1>, 949 iXLen, iXLen, iXLen); 950 951define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 952; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: 953; CHECK: # %bb.0: # %entry 954; CHECK-NEXT: fsrmi a1, 0 955; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 956; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t 957; CHECK-NEXT: fsrm a1 958; CHECK-NEXT: ret 959entry: 960 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16( 961 <vscale x 8 x half> %0, 962 <vscale x 8 x half> %1, 963 half %2, 964 <vscale x 8 x i1> %3, 965 iXLen 0, iXLen %4, iXLen 1) 966 967 ret <vscale x 8 x half> %a 968} 969 970declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16( 971 <vscale x 16 x half>, 972 <vscale x 16 x half>, 973 half, 974 iXLen, iXLen); 975 976define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind { 977; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: 978; CHECK: # %bb.0: # %entry 979; CHECK-NEXT: fsrmi a1, 0 980; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 981; CHECK-NEXT: vfadd.vf v8, v8, fa0 982; CHECK-NEXT: fsrm a1 983; CHECK-NEXT: ret 984entry: 985 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16( 986 <vscale x 16 x half> undef, 987 <vscale x 16 x half> %0, 988 half %1, 989 iXLen 0, iXLen %2) 990 991 ret <vscale x 16 x half> %a 992} 993 994declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16( 995 <vscale x 16 x half>, 996 <vscale x 16 x half>, 997 half, 998 <vscale x 16 x i1>, 999 iXLen, iXLen, iXLen); 1000 1001define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1002; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: 1003; CHECK: # %bb.0: # %entry 1004; CHECK-NEXT: fsrmi a1, 0 1005; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 1006; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t 1007; CHECK-NEXT: fsrm a1 1008; CHECK-NEXT: ret 1009entry: 1010 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16( 1011 <vscale x 16 x half> %0, 1012 <vscale x 16 x half> %1, 1013 half %2, 1014 <vscale x 16 x i1> %3, 1015 iXLen 0, iXLen %4, iXLen 1) 1016 1017 ret <vscale x 16 x half> %a 1018} 1019 1020declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16( 1021 <vscale x 32 x half>, 1022 <vscale x 32 x half>, 1023 half, 1024 iXLen, iXLen); 1025 1026define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind { 1027; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: 1028; CHECK: # %bb.0: # %entry 1029; CHECK-NEXT: fsrmi a1, 0 1030; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1031; CHECK-NEXT: vfadd.vf v8, v8, fa0 1032; CHECK-NEXT: fsrm a1 1033; CHECK-NEXT: ret 1034entry: 1035 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16( 1036 <vscale x 32 x half> undef, 1037 <vscale x 32 x half> %0, 1038 half %1, 1039 iXLen 0, iXLen %2) 1040 1041 ret <vscale x 32 x half> %a 1042} 1043 1044declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16( 1045 <vscale x 32 x half>, 1046 <vscale x 32 x half>, 1047 half, 1048 <vscale x 32 x i1>, 1049 iXLen, iXLen, iXLen); 1050 1051define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1052; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: 1053; CHECK: # %bb.0: # %entry 1054; CHECK-NEXT: fsrmi a1, 0 1055; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 1056; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t 1057; CHECK-NEXT: fsrm a1 1058; CHECK-NEXT: ret 1059entry: 1060 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16( 1061 <vscale x 32 x half> %0, 1062 <vscale x 32 x half> %1, 1063 half %2, 1064 <vscale x 32 x i1> %3, 1065 iXLen 0, iXLen %4, iXLen 1) 1066 1067 ret <vscale x 32 x half> %a 1068} 1069 1070declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32( 1071 <vscale x 1 x float>, 1072 <vscale x 1 x float>, 1073 float, 1074 iXLen, iXLen); 1075 1076define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind { 1077; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: 1078; CHECK: # %bb.0: # %entry 1079; CHECK-NEXT: fsrmi a1, 0 1080; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1081; CHECK-NEXT: vfadd.vf v8, v8, fa0 1082; CHECK-NEXT: fsrm a1 1083; CHECK-NEXT: ret 1084entry: 1085 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32( 1086 <vscale x 1 x float> undef, 1087 <vscale x 1 x float> %0, 1088 float %1, 1089 iXLen 0, iXLen %2) 1090 1091 ret <vscale x 1 x float> %a 1092} 1093 1094declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32( 1095 <vscale x 1 x float>, 1096 <vscale x 1 x float>, 1097 float, 1098 <vscale x 1 x i1>, 1099 iXLen, iXLen, iXLen); 1100 1101define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1102; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: 1103; CHECK: # %bb.0: # %entry 1104; CHECK-NEXT: fsrmi a1, 0 1105; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 1106; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 1107; CHECK-NEXT: fsrm a1 1108; CHECK-NEXT: ret 1109entry: 1110 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32( 1111 <vscale x 1 x float> %0, 1112 <vscale x 1 x float> %1, 1113 float %2, 1114 <vscale x 1 x i1> %3, 1115 iXLen 0, iXLen %4, iXLen 1) 1116 1117 ret <vscale x 1 x float> %a 1118} 1119 1120declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32( 1121 <vscale x 2 x float>, 1122 <vscale x 2 x float>, 1123 float, 1124 iXLen, iXLen); 1125 1126define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind { 1127; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: 1128; CHECK: # %bb.0: # %entry 1129; CHECK-NEXT: fsrmi a1, 0 1130; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1131; CHECK-NEXT: vfadd.vf v8, v8, fa0 1132; CHECK-NEXT: fsrm a1 1133; CHECK-NEXT: ret 1134entry: 1135 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32( 1136 <vscale x 2 x float> undef, 1137 <vscale x 2 x float> %0, 1138 float %1, 1139 iXLen 0, iXLen %2) 1140 1141 ret <vscale x 2 x float> %a 1142} 1143 1144declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32( 1145 <vscale x 2 x float>, 1146 <vscale x 2 x float>, 1147 float, 1148 <vscale x 2 x i1>, 1149 iXLen, iXLen, iXLen); 1150 1151define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1152; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: 1153; CHECK: # %bb.0: # %entry 1154; CHECK-NEXT: fsrmi a1, 0 1155; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 1156; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 1157; CHECK-NEXT: fsrm a1 1158; CHECK-NEXT: ret 1159entry: 1160 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32( 1161 <vscale x 2 x float> %0, 1162 <vscale x 2 x float> %1, 1163 float %2, 1164 <vscale x 2 x i1> %3, 1165 iXLen 0, iXLen %4, iXLen 1) 1166 1167 ret <vscale x 2 x float> %a 1168} 1169 1170declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32( 1171 <vscale x 4 x float>, 1172 <vscale x 4 x float>, 1173 float, 1174 iXLen, iXLen); 1175 1176define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind { 1177; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: 1178; CHECK: # %bb.0: # %entry 1179; CHECK-NEXT: fsrmi a1, 0 1180; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1181; CHECK-NEXT: vfadd.vf v8, v8, fa0 1182; CHECK-NEXT: fsrm a1 1183; CHECK-NEXT: ret 1184entry: 1185 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32( 1186 <vscale x 4 x float> undef, 1187 <vscale x 4 x float> %0, 1188 float %1, 1189 iXLen 0, iXLen %2) 1190 1191 ret <vscale x 4 x float> %a 1192} 1193 1194declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32( 1195 <vscale x 4 x float>, 1196 <vscale x 4 x float>, 1197 float, 1198 <vscale x 4 x i1>, 1199 iXLen, iXLen, iXLen); 1200 1201define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1202; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: 1203; CHECK: # %bb.0: # %entry 1204; CHECK-NEXT: fsrmi a1, 0 1205; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 1206; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t 1207; CHECK-NEXT: fsrm a1 1208; CHECK-NEXT: ret 1209entry: 1210 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32( 1211 <vscale x 4 x float> %0, 1212 <vscale x 4 x float> %1, 1213 float %2, 1214 <vscale x 4 x i1> %3, 1215 iXLen 0, iXLen %4, iXLen 1) 1216 1217 ret <vscale x 4 x float> %a 1218} 1219 1220declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32( 1221 <vscale x 8 x float>, 1222 <vscale x 8 x float>, 1223 float, 1224 iXLen, iXLen); 1225 1226define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind { 1227; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: 1228; CHECK: # %bb.0: # %entry 1229; CHECK-NEXT: fsrmi a1, 0 1230; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1231; CHECK-NEXT: vfadd.vf v8, v8, fa0 1232; CHECK-NEXT: fsrm a1 1233; CHECK-NEXT: ret 1234entry: 1235 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32( 1236 <vscale x 8 x float> undef, 1237 <vscale x 8 x float> %0, 1238 float %1, 1239 iXLen 0, iXLen %2) 1240 1241 ret <vscale x 8 x float> %a 1242} 1243 1244declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32( 1245 <vscale x 8 x float>, 1246 <vscale x 8 x float>, 1247 float, 1248 <vscale x 8 x i1>, 1249 iXLen, iXLen, iXLen); 1250 1251define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1252; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: 1253; CHECK: # %bb.0: # %entry 1254; CHECK-NEXT: fsrmi a1, 0 1255; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 1256; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t 1257; CHECK-NEXT: fsrm a1 1258; CHECK-NEXT: ret 1259entry: 1260 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32( 1261 <vscale x 8 x float> %0, 1262 <vscale x 8 x float> %1, 1263 float %2, 1264 <vscale x 8 x i1> %3, 1265 iXLen 0, iXLen %4, iXLen 1) 1266 1267 ret <vscale x 8 x float> %a 1268} 1269 1270declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32( 1271 <vscale x 16 x float>, 1272 <vscale x 16 x float>, 1273 float, 1274 iXLen, iXLen); 1275 1276define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind { 1277; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: 1278; CHECK: # %bb.0: # %entry 1279; CHECK-NEXT: fsrmi a1, 0 1280; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1281; CHECK-NEXT: vfadd.vf v8, v8, fa0 1282; CHECK-NEXT: fsrm a1 1283; CHECK-NEXT: ret 1284entry: 1285 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32( 1286 <vscale x 16 x float> undef, 1287 <vscale x 16 x float> %0, 1288 float %1, 1289 iXLen 0, iXLen %2) 1290 1291 ret <vscale x 16 x float> %a 1292} 1293 1294declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32( 1295 <vscale x 16 x float>, 1296 <vscale x 16 x float>, 1297 float, 1298 <vscale x 16 x i1>, 1299 iXLen, iXLen, iXLen); 1300 1301define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1302; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: 1303; CHECK: # %bb.0: # %entry 1304; CHECK-NEXT: fsrmi a1, 0 1305; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 1306; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t 1307; CHECK-NEXT: fsrm a1 1308; CHECK-NEXT: ret 1309entry: 1310 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32( 1311 <vscale x 16 x float> %0, 1312 <vscale x 16 x float> %1, 1313 float %2, 1314 <vscale x 16 x i1> %3, 1315 iXLen 0, iXLen %4, iXLen 1) 1316 1317 ret <vscale x 16 x float> %a 1318} 1319 1320declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64( 1321 <vscale x 1 x double>, 1322 <vscale x 1 x double>, 1323 double, 1324 iXLen, iXLen); 1325 1326define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind { 1327; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: 1328; CHECK: # %bb.0: # %entry 1329; CHECK-NEXT: fsrmi a1, 0 1330; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1331; CHECK-NEXT: vfadd.vf v8, v8, fa0 1332; CHECK-NEXT: fsrm a1 1333; CHECK-NEXT: ret 1334entry: 1335 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64( 1336 <vscale x 1 x double> undef, 1337 <vscale x 1 x double> %0, 1338 double %1, 1339 iXLen 0, iXLen %2) 1340 1341 ret <vscale x 1 x double> %a 1342} 1343 1344declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64( 1345 <vscale x 1 x double>, 1346 <vscale x 1 x double>, 1347 double, 1348 <vscale x 1 x i1>, 1349 iXLen, iXLen, iXLen); 1350 1351define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1352; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: 1353; CHECK: # %bb.0: # %entry 1354; CHECK-NEXT: fsrmi a1, 0 1355; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 1356; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t 1357; CHECK-NEXT: fsrm a1 1358; CHECK-NEXT: ret 1359entry: 1360 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64( 1361 <vscale x 1 x double> %0, 1362 <vscale x 1 x double> %1, 1363 double %2, 1364 <vscale x 1 x i1> %3, 1365 iXLen 0, iXLen %4, iXLen 1) 1366 1367 ret <vscale x 1 x double> %a 1368} 1369 1370declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64( 1371 <vscale x 2 x double>, 1372 <vscale x 2 x double>, 1373 double, 1374 iXLen, iXLen); 1375 1376define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind { 1377; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: 1378; CHECK: # %bb.0: # %entry 1379; CHECK-NEXT: fsrmi a1, 0 1380; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1381; CHECK-NEXT: vfadd.vf v8, v8, fa0 1382; CHECK-NEXT: fsrm a1 1383; CHECK-NEXT: ret 1384entry: 1385 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64( 1386 <vscale x 2 x double> undef, 1387 <vscale x 2 x double> %0, 1388 double %1, 1389 iXLen 0, iXLen %2) 1390 1391 ret <vscale x 2 x double> %a 1392} 1393 1394declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64( 1395 <vscale x 2 x double>, 1396 <vscale x 2 x double>, 1397 double, 1398 <vscale x 2 x i1>, 1399 iXLen, iXLen, iXLen); 1400 1401define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1402; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: 1403; CHECK: # %bb.0: # %entry 1404; CHECK-NEXT: fsrmi a1, 0 1405; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 1406; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t 1407; CHECK-NEXT: fsrm a1 1408; CHECK-NEXT: ret 1409entry: 1410 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64( 1411 <vscale x 2 x double> %0, 1412 <vscale x 2 x double> %1, 1413 double %2, 1414 <vscale x 2 x i1> %3, 1415 iXLen 0, iXLen %4, iXLen 1) 1416 1417 ret <vscale x 2 x double> %a 1418} 1419 1420declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64( 1421 <vscale x 4 x double>, 1422 <vscale x 4 x double>, 1423 double, 1424 iXLen, iXLen); 1425 1426define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind { 1427; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: 1428; CHECK: # %bb.0: # %entry 1429; CHECK-NEXT: fsrmi a1, 0 1430; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1431; CHECK-NEXT: vfadd.vf v8, v8, fa0 1432; CHECK-NEXT: fsrm a1 1433; CHECK-NEXT: ret 1434entry: 1435 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64( 1436 <vscale x 4 x double> undef, 1437 <vscale x 4 x double> %0, 1438 double %1, 1439 iXLen 0, iXLen %2) 1440 1441 ret <vscale x 4 x double> %a 1442} 1443 1444declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64( 1445 <vscale x 4 x double>, 1446 <vscale x 4 x double>, 1447 double, 1448 <vscale x 4 x i1>, 1449 iXLen, iXLen, iXLen); 1450 1451define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1452; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: 1453; CHECK: # %bb.0: # %entry 1454; CHECK-NEXT: fsrmi a1, 0 1455; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 1456; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t 1457; CHECK-NEXT: fsrm a1 1458; CHECK-NEXT: ret 1459entry: 1460 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64( 1461 <vscale x 4 x double> %0, 1462 <vscale x 4 x double> %1, 1463 double %2, 1464 <vscale x 4 x i1> %3, 1465 iXLen 0, iXLen %4, iXLen 1) 1466 1467 ret <vscale x 4 x double> %a 1468} 1469 1470declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64( 1471 <vscale x 8 x double>, 1472 <vscale x 8 x double>, 1473 double, 1474 iXLen, iXLen); 1475 1476define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind { 1477; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: 1478; CHECK: # %bb.0: # %entry 1479; CHECK-NEXT: fsrmi a1, 0 1480; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1481; CHECK-NEXT: vfadd.vf v8, v8, fa0 1482; CHECK-NEXT: fsrm a1 1483; CHECK-NEXT: ret 1484entry: 1485 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64( 1486 <vscale x 8 x double> undef, 1487 <vscale x 8 x double> %0, 1488 double %1, 1489 iXLen 0, iXLen %2) 1490 1491 ret <vscale x 8 x double> %a 1492} 1493 1494declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64( 1495 <vscale x 8 x double>, 1496 <vscale x 8 x double>, 1497 double, 1498 <vscale x 8 x i1>, 1499 iXLen, iXLen, iXLen); 1500 1501define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1502; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: 1503; CHECK: # %bb.0: # %entry 1504; CHECK-NEXT: fsrmi a1, 0 1505; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 1506; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t 1507; CHECK-NEXT: fsrm a1 1508; CHECK-NEXT: ret 1509entry: 1510 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64( 1511 <vscale x 8 x double> %0, 1512 <vscale x 8 x double> %1, 1513 double %2, 1514 <vscale x 8 x i1> %3, 1515 iXLen 0, iXLen %4, iXLen 1) 1516 1517 ret <vscale x 8 x double> %a 1518} 1519