1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 \ 7; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D 8; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \ 9; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D 10 11; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulhsu 12 13declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( 14 <vscale x 1 x i8>, 15 <vscale x 1 x i8>, 16 <vscale x 1 x i8>, 17 iXLen); 18 19define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 20; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: 21; CHECK: # %bb.0: # %entry 22; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 23; CHECK-NEXT: vmulhsu.vv v8, v8, v9 24; CHECK-NEXT: ret 25entry: 26 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( 27 <vscale x 1 x i8> undef, 28 <vscale x 1 x i8> %0, 29 <vscale x 1 x i8> %1, 30 iXLen %2) 31 32 ret <vscale x 1 x i8> %a 33} 34 35declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( 36 <vscale x 1 x i8>, 37 <vscale x 1 x i8>, 38 <vscale x 1 x i8>, 39 <vscale x 1 x i1>, 40 iXLen, iXLen); 41 42define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 43; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8: 44; CHECK: # %bb.0: # %entry 45; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 46; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 47; CHECK-NEXT: ret 48entry: 49 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( 50 <vscale x 1 x i8> %0, 51 <vscale x 1 x i8> %1, 52 <vscale x 1 x i8> %2, 53 <vscale x 1 x i1> %3, 54 iXLen %4, iXLen 1) 55 56 ret <vscale x 1 x i8> %a 57} 58 59declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( 60 <vscale x 2 x i8>, 61 <vscale x 2 x i8>, 62 <vscale x 2 x i8>, 63 iXLen); 64 65define <vscale x 2 x i8> @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 66; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8: 67; CHECK: # %bb.0: # %entry 68; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 69; CHECK-NEXT: vmulhsu.vv v8, v8, v9 70; CHECK-NEXT: ret 71entry: 72 %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( 73 <vscale x 2 x i8> undef, 74 <vscale x 2 x i8> %0, 75 <vscale x 2 x i8> %1, 76 iXLen %2) 77 78 ret <vscale x 2 x i8> %a 79} 80 81declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( 82 <vscale x 2 x i8>, 83 <vscale x 2 x i8>, 84 <vscale x 2 x i8>, 85 <vscale x 2 x i1>, 86 iXLen, iXLen); 87 88define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 89; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8: 90; CHECK: # %bb.0: # %entry 91; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 92; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 93; CHECK-NEXT: ret 94entry: 95 %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( 96 <vscale x 2 x i8> %0, 97 <vscale x 2 x i8> %1, 98 <vscale x 2 x i8> %2, 99 <vscale x 2 x i1> %3, 100 iXLen %4, iXLen 1) 101 102 ret <vscale x 2 x i8> %a 103} 104 105declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( 106 <vscale x 4 x i8>, 107 <vscale x 4 x i8>, 108 <vscale x 4 x i8>, 109 iXLen); 110 111define <vscale x 4 x i8> @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 112; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8: 113; CHECK: # %bb.0: # %entry 114; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 115; CHECK-NEXT: vmulhsu.vv v8, v8, v9 116; CHECK-NEXT: ret 117entry: 118 %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( 119 <vscale x 4 x i8> undef, 120 <vscale x 4 x i8> %0, 121 <vscale x 4 x i8> %1, 122 iXLen %2) 123 124 ret <vscale x 4 x i8> %a 125} 126 127declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( 128 <vscale x 4 x i8>, 129 <vscale x 4 x i8>, 130 <vscale x 4 x i8>, 131 <vscale x 4 x i1>, 132 iXLen, iXLen); 133 134define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 135; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8: 136; CHECK: # %bb.0: # %entry 137; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 138; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 139; CHECK-NEXT: ret 140entry: 141 %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( 142 <vscale x 4 x i8> %0, 143 <vscale x 4 x i8> %1, 144 <vscale x 4 x i8> %2, 145 <vscale x 4 x i1> %3, 146 iXLen %4, iXLen 1) 147 148 ret <vscale x 4 x i8> %a 149} 150 151declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( 152 <vscale x 8 x i8>, 153 <vscale x 8 x i8>, 154 <vscale x 8 x i8>, 155 iXLen); 156 157define <vscale x 8 x i8> @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 158; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8: 159; CHECK: # %bb.0: # %entry 160; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 161; CHECK-NEXT: vmulhsu.vv v8, v8, v9 162; CHECK-NEXT: ret 163entry: 164 %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( 165 <vscale x 8 x i8> undef, 166 <vscale x 8 x i8> %0, 167 <vscale x 8 x i8> %1, 168 iXLen %2) 169 170 ret <vscale x 8 x i8> %a 171} 172 173declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( 174 <vscale x 8 x i8>, 175 <vscale x 8 x i8>, 176 <vscale x 8 x i8>, 177 <vscale x 8 x i1>, 178 iXLen, iXLen); 179 180define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 181; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8: 182; CHECK: # %bb.0: # %entry 183; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 184; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 185; CHECK-NEXT: ret 186entry: 187 %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( 188 <vscale x 8 x i8> %0, 189 <vscale x 8 x i8> %1, 190 <vscale x 8 x i8> %2, 191 <vscale x 8 x i1> %3, 192 iXLen %4, iXLen 1) 193 194 ret <vscale x 8 x i8> %a 195} 196 197declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( 198 <vscale x 16 x i8>, 199 <vscale x 16 x i8>, 200 <vscale x 16 x i8>, 201 iXLen); 202 203define <vscale x 16 x i8> @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 204; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8: 205; CHECK: # %bb.0: # %entry 206; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 207; CHECK-NEXT: vmulhsu.vv v8, v8, v10 208; CHECK-NEXT: ret 209entry: 210 %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( 211 <vscale x 16 x i8> undef, 212 <vscale x 16 x i8> %0, 213 <vscale x 16 x i8> %1, 214 iXLen %2) 215 216 ret <vscale x 16 x i8> %a 217} 218 219declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( 220 <vscale x 16 x i8>, 221 <vscale x 16 x i8>, 222 <vscale x 16 x i8>, 223 <vscale x 16 x i1>, 224 iXLen, iXLen); 225 226define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 227; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8: 228; CHECK: # %bb.0: # %entry 229; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 230; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t 231; CHECK-NEXT: ret 232entry: 233 %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( 234 <vscale x 16 x i8> %0, 235 <vscale x 16 x i8> %1, 236 <vscale x 16 x i8> %2, 237 <vscale x 16 x i1> %3, 238 iXLen %4, iXLen 1) 239 240 ret <vscale x 16 x i8> %a 241} 242 243declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( 244 <vscale x 32 x i8>, 245 <vscale x 32 x i8>, 246 <vscale x 32 x i8>, 247 iXLen); 248 249define <vscale x 32 x i8> @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 250; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8: 251; CHECK: # %bb.0: # %entry 252; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 253; CHECK-NEXT: vmulhsu.vv v8, v8, v12 254; CHECK-NEXT: ret 255entry: 256 %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( 257 <vscale x 32 x i8> undef, 258 <vscale x 32 x i8> %0, 259 <vscale x 32 x i8> %1, 260 iXLen %2) 261 262 ret <vscale x 32 x i8> %a 263} 264 265declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( 266 <vscale x 32 x i8>, 267 <vscale x 32 x i8>, 268 <vscale x 32 x i8>, 269 <vscale x 32 x i1>, 270 iXLen, iXLen); 271 272define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 273; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8: 274; CHECK: # %bb.0: # %entry 275; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 276; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t 277; CHECK-NEXT: ret 278entry: 279 %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( 280 <vscale x 32 x i8> %0, 281 <vscale x 32 x i8> %1, 282 <vscale x 32 x i8> %2, 283 <vscale x 32 x i1> %3, 284 iXLen %4, iXLen 1) 285 286 ret <vscale x 32 x i8> %a 287} 288 289declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( 290 <vscale x 64 x i8>, 291 <vscale x 64 x i8>, 292 <vscale x 64 x i8>, 293 iXLen); 294 295define <vscale x 64 x i8> @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 296; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8: 297; CHECK: # %bb.0: # %entry 298; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 299; CHECK-NEXT: vmulhsu.vv v8, v8, v16 300; CHECK-NEXT: ret 301entry: 302 %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( 303 <vscale x 64 x i8> undef, 304 <vscale x 64 x i8> %0, 305 <vscale x 64 x i8> %1, 306 iXLen %2) 307 308 ret <vscale x 64 x i8> %a 309} 310 311declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( 312 <vscale x 64 x i8>, 313 <vscale x 64 x i8>, 314 <vscale x 64 x i8>, 315 <vscale x 64 x i1>, 316 iXLen, iXLen); 317 318define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 319; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vl8r.v v24, (a0) 322; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 323; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t 324; CHECK-NEXT: ret 325entry: 326 %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( 327 <vscale x 64 x i8> %0, 328 <vscale x 64 x i8> %1, 329 <vscale x 64 x i8> %2, 330 <vscale x 64 x i1> %3, 331 iXLen %4, iXLen 1) 332 333 ret <vscale x 64 x i8> %a 334} 335 336declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( 337 <vscale x 1 x i16>, 338 <vscale x 1 x i16>, 339 <vscale x 1 x i16>, 340 iXLen); 341 342define <vscale x 1 x i16> @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 343; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16: 344; CHECK: # %bb.0: # %entry 345; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 346; CHECK-NEXT: vmulhsu.vv v8, v8, v9 347; CHECK-NEXT: ret 348entry: 349 %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( 350 <vscale x 1 x i16> undef, 351 <vscale x 1 x i16> %0, 352 <vscale x 1 x i16> %1, 353 iXLen %2) 354 355 ret <vscale x 1 x i16> %a 356} 357 358declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( 359 <vscale x 1 x i16>, 360 <vscale x 1 x i16>, 361 <vscale x 1 x i16>, 362 <vscale x 1 x i1>, 363 iXLen, iXLen); 364 365define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 366; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16: 367; CHECK: # %bb.0: # %entry 368; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 369; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 370; CHECK-NEXT: ret 371entry: 372 %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( 373 <vscale x 1 x i16> %0, 374 <vscale x 1 x i16> %1, 375 <vscale x 1 x i16> %2, 376 <vscale x 1 x i1> %3, 377 iXLen %4, iXLen 1) 378 379 ret <vscale x 1 x i16> %a 380} 381 382declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( 383 <vscale x 2 x i16>, 384 <vscale x 2 x i16>, 385 <vscale x 2 x i16>, 386 iXLen); 387 388define <vscale x 2 x i16> @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 389; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16: 390; CHECK: # %bb.0: # %entry 391; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 392; CHECK-NEXT: vmulhsu.vv v8, v8, v9 393; CHECK-NEXT: ret 394entry: 395 %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( 396 <vscale x 2 x i16> undef, 397 <vscale x 2 x i16> %0, 398 <vscale x 2 x i16> %1, 399 iXLen %2) 400 401 ret <vscale x 2 x i16> %a 402} 403 404declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( 405 <vscale x 2 x i16>, 406 <vscale x 2 x i16>, 407 <vscale x 2 x i16>, 408 <vscale x 2 x i1>, 409 iXLen, iXLen); 410 411define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 412; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16: 413; CHECK: # %bb.0: # %entry 414; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 415; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 416; CHECK-NEXT: ret 417entry: 418 %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( 419 <vscale x 2 x i16> %0, 420 <vscale x 2 x i16> %1, 421 <vscale x 2 x i16> %2, 422 <vscale x 2 x i1> %3, 423 iXLen %4, iXLen 1) 424 425 ret <vscale x 2 x i16> %a 426} 427 428declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( 429 <vscale x 4 x i16>, 430 <vscale x 4 x i16>, 431 <vscale x 4 x i16>, 432 iXLen); 433 434define <vscale x 4 x i16> @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 435; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16: 436; CHECK: # %bb.0: # %entry 437; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 438; CHECK-NEXT: vmulhsu.vv v8, v8, v9 439; CHECK-NEXT: ret 440entry: 441 %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( 442 <vscale x 4 x i16> undef, 443 <vscale x 4 x i16> %0, 444 <vscale x 4 x i16> %1, 445 iXLen %2) 446 447 ret <vscale x 4 x i16> %a 448} 449 450declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( 451 <vscale x 4 x i16>, 452 <vscale x 4 x i16>, 453 <vscale x 4 x i16>, 454 <vscale x 4 x i1>, 455 iXLen, iXLen); 456 457define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 458; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16: 459; CHECK: # %bb.0: # %entry 460; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 461; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 462; CHECK-NEXT: ret 463entry: 464 %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( 465 <vscale x 4 x i16> %0, 466 <vscale x 4 x i16> %1, 467 <vscale x 4 x i16> %2, 468 <vscale x 4 x i1> %3, 469 iXLen %4, iXLen 1) 470 471 ret <vscale x 4 x i16> %a 472} 473 474declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( 475 <vscale x 8 x i16>, 476 <vscale x 8 x i16>, 477 <vscale x 8 x i16>, 478 iXLen); 479 480define <vscale x 8 x i16> @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 481; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16: 482; CHECK: # %bb.0: # %entry 483; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 484; CHECK-NEXT: vmulhsu.vv v8, v8, v10 485; CHECK-NEXT: ret 486entry: 487 %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( 488 <vscale x 8 x i16> undef, 489 <vscale x 8 x i16> %0, 490 <vscale x 8 x i16> %1, 491 iXLen %2) 492 493 ret <vscale x 8 x i16> %a 494} 495 496declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( 497 <vscale x 8 x i16>, 498 <vscale x 8 x i16>, 499 <vscale x 8 x i16>, 500 <vscale x 8 x i1>, 501 iXLen, iXLen); 502 503define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 504; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16: 505; CHECK: # %bb.0: # %entry 506; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 507; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t 508; CHECK-NEXT: ret 509entry: 510 %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( 511 <vscale x 8 x i16> %0, 512 <vscale x 8 x i16> %1, 513 <vscale x 8 x i16> %2, 514 <vscale x 8 x i1> %3, 515 iXLen %4, iXLen 1) 516 517 ret <vscale x 8 x i16> %a 518} 519 520declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( 521 <vscale x 16 x i16>, 522 <vscale x 16 x i16>, 523 <vscale x 16 x i16>, 524 iXLen); 525 526define <vscale x 16 x i16> @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 527; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16: 528; CHECK: # %bb.0: # %entry 529; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 530; CHECK-NEXT: vmulhsu.vv v8, v8, v12 531; CHECK-NEXT: ret 532entry: 533 %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( 534 <vscale x 16 x i16> undef, 535 <vscale x 16 x i16> %0, 536 <vscale x 16 x i16> %1, 537 iXLen %2) 538 539 ret <vscale x 16 x i16> %a 540} 541 542declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( 543 <vscale x 16 x i16>, 544 <vscale x 16 x i16>, 545 <vscale x 16 x i16>, 546 <vscale x 16 x i1>, 547 iXLen, iXLen); 548 549define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 550; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16: 551; CHECK: # %bb.0: # %entry 552; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 553; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t 554; CHECK-NEXT: ret 555entry: 556 %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( 557 <vscale x 16 x i16> %0, 558 <vscale x 16 x i16> %1, 559 <vscale x 16 x i16> %2, 560 <vscale x 16 x i1> %3, 561 iXLen %4, iXLen 1) 562 563 ret <vscale x 16 x i16> %a 564} 565 566declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( 567 <vscale x 32 x i16>, 568 <vscale x 32 x i16>, 569 <vscale x 32 x i16>, 570 iXLen); 571 572define <vscale x 32 x i16> @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 573; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16: 574; CHECK: # %bb.0: # %entry 575; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 576; CHECK-NEXT: vmulhsu.vv v8, v8, v16 577; CHECK-NEXT: ret 578entry: 579 %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( 580 <vscale x 32 x i16> undef, 581 <vscale x 32 x i16> %0, 582 <vscale x 32 x i16> %1, 583 iXLen %2) 584 585 ret <vscale x 32 x i16> %a 586} 587 588declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( 589 <vscale x 32 x i16>, 590 <vscale x 32 x i16>, 591 <vscale x 32 x i16>, 592 <vscale x 32 x i1>, 593 iXLen, iXLen); 594 595define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 596; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: 597; CHECK: # %bb.0: # %entry 598; CHECK-NEXT: vl8re16.v v24, (a0) 599; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 600; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t 601; CHECK-NEXT: ret 602entry: 603 %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( 604 <vscale x 32 x i16> %0, 605 <vscale x 32 x i16> %1, 606 <vscale x 32 x i16> %2, 607 <vscale x 32 x i1> %3, 608 iXLen %4, iXLen 1) 609 610 ret <vscale x 32 x i16> %a 611} 612 613declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( 614 <vscale x 1 x i32>, 615 <vscale x 1 x i32>, 616 <vscale x 1 x i32>, 617 iXLen); 618 619define <vscale x 1 x i32> @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 620; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32: 621; CHECK: # %bb.0: # %entry 622; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 623; CHECK-NEXT: vmulhsu.vv v8, v8, v9 624; CHECK-NEXT: ret 625entry: 626 %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( 627 <vscale x 1 x i32> undef, 628 <vscale x 1 x i32> %0, 629 <vscale x 1 x i32> %1, 630 iXLen %2) 631 632 ret <vscale x 1 x i32> %a 633} 634 635declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( 636 <vscale x 1 x i32>, 637 <vscale x 1 x i32>, 638 <vscale x 1 x i32>, 639 <vscale x 1 x i1>, 640 iXLen, iXLen); 641 642define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 643; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32: 644; CHECK: # %bb.0: # %entry 645; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 646; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 647; CHECK-NEXT: ret 648entry: 649 %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( 650 <vscale x 1 x i32> %0, 651 <vscale x 1 x i32> %1, 652 <vscale x 1 x i32> %2, 653 <vscale x 1 x i1> %3, 654 iXLen %4, iXLen 1) 655 656 ret <vscale x 1 x i32> %a 657} 658 659declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( 660 <vscale x 2 x i32>, 661 <vscale x 2 x i32>, 662 <vscale x 2 x i32>, 663 iXLen); 664 665define <vscale x 2 x i32> @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 666; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32: 667; CHECK: # %bb.0: # %entry 668; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 669; CHECK-NEXT: vmulhsu.vv v8, v8, v9 670; CHECK-NEXT: ret 671entry: 672 %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( 673 <vscale x 2 x i32> undef, 674 <vscale x 2 x i32> %0, 675 <vscale x 2 x i32> %1, 676 iXLen %2) 677 678 ret <vscale x 2 x i32> %a 679} 680 681declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( 682 <vscale x 2 x i32>, 683 <vscale x 2 x i32>, 684 <vscale x 2 x i32>, 685 <vscale x 2 x i1>, 686 iXLen, iXLen); 687 688define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 689; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32: 690; CHECK: # %bb.0: # %entry 691; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 692; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 693; CHECK-NEXT: ret 694entry: 695 %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( 696 <vscale x 2 x i32> %0, 697 <vscale x 2 x i32> %1, 698 <vscale x 2 x i32> %2, 699 <vscale x 2 x i1> %3, 700 iXLen %4, iXLen 1) 701 702 ret <vscale x 2 x i32> %a 703} 704 705declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( 706 <vscale x 4 x i32>, 707 <vscale x 4 x i32>, 708 <vscale x 4 x i32>, 709 iXLen); 710 711define <vscale x 4 x i32> @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 712; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32: 713; CHECK: # %bb.0: # %entry 714; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 715; CHECK-NEXT: vmulhsu.vv v8, v8, v10 716; CHECK-NEXT: ret 717entry: 718 %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( 719 <vscale x 4 x i32> undef, 720 <vscale x 4 x i32> %0, 721 <vscale x 4 x i32> %1, 722 iXLen %2) 723 724 ret <vscale x 4 x i32> %a 725} 726 727declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( 728 <vscale x 4 x i32>, 729 <vscale x 4 x i32>, 730 <vscale x 4 x i32>, 731 <vscale x 4 x i1>, 732 iXLen, iXLen); 733 734define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 735; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32: 736; CHECK: # %bb.0: # %entry 737; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 738; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t 739; CHECK-NEXT: ret 740entry: 741 %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( 742 <vscale x 4 x i32> %0, 743 <vscale x 4 x i32> %1, 744 <vscale x 4 x i32> %2, 745 <vscale x 4 x i1> %3, 746 iXLen %4, iXLen 1) 747 748 ret <vscale x 4 x i32> %a 749} 750 751declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( 752 <vscale x 8 x i32>, 753 <vscale x 8 x i32>, 754 <vscale x 8 x i32>, 755 iXLen); 756 757define <vscale x 8 x i32> @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 758; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32: 759; CHECK: # %bb.0: # %entry 760; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 761; CHECK-NEXT: vmulhsu.vv v8, v8, v12 762; CHECK-NEXT: ret 763entry: 764 %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( 765 <vscale x 8 x i32> undef, 766 <vscale x 8 x i32> %0, 767 <vscale x 8 x i32> %1, 768 iXLen %2) 769 770 ret <vscale x 8 x i32> %a 771} 772 773declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( 774 <vscale x 8 x i32>, 775 <vscale x 8 x i32>, 776 <vscale x 8 x i32>, 777 <vscale x 8 x i1>, 778 iXLen, iXLen); 779 780define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 781; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32: 782; CHECK: # %bb.0: # %entry 783; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 784; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t 785; CHECK-NEXT: ret 786entry: 787 %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( 788 <vscale x 8 x i32> %0, 789 <vscale x 8 x i32> %1, 790 <vscale x 8 x i32> %2, 791 <vscale x 8 x i1> %3, 792 iXLen %4, iXLen 1) 793 794 ret <vscale x 8 x i32> %a 795} 796 797declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( 798 <vscale x 16 x i32>, 799 <vscale x 16 x i32>, 800 <vscale x 16 x i32>, 801 iXLen); 802 803define <vscale x 16 x i32> @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 804; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32: 805; CHECK: # %bb.0: # %entry 806; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 807; CHECK-NEXT: vmulhsu.vv v8, v8, v16 808; CHECK-NEXT: ret 809entry: 810 %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( 811 <vscale x 16 x i32> undef, 812 <vscale x 16 x i32> %0, 813 <vscale x 16 x i32> %1, 814 iXLen %2) 815 816 ret <vscale x 16 x i32> %a 817} 818 819declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( 820 <vscale x 16 x i32>, 821 <vscale x 16 x i32>, 822 <vscale x 16 x i32>, 823 <vscale x 16 x i1>, 824 iXLen, iXLen); 825 826define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 827; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: 828; CHECK: # %bb.0: # %entry 829; CHECK-NEXT: vl8re32.v v24, (a0) 830; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 831; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t 832; CHECK-NEXT: ret 833entry: 834 %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( 835 <vscale x 16 x i32> %0, 836 <vscale x 16 x i32> %1, 837 <vscale x 16 x i32> %2, 838 <vscale x 16 x i1> %3, 839 iXLen %4, iXLen 1) 840 841 ret <vscale x 16 x i32> %a 842} 843 844declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( 845 <vscale x 1 x i64>, 846 <vscale x 1 x i64>, 847 <vscale x 1 x i64>, 848 iXLen); 849 850define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 851; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64: 852; CHECK: # %bb.0: # %entry 853; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 854; CHECK-NEXT: vmulhsu.vv v8, v8, v9 855; CHECK-NEXT: ret 856entry: 857 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( 858 <vscale x 1 x i64> undef, 859 <vscale x 1 x i64> %0, 860 <vscale x 1 x i64> %1, 861 iXLen %2) 862 863 ret <vscale x 1 x i64> %a 864} 865 866declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( 867 <vscale x 1 x i64>, 868 <vscale x 1 x i64>, 869 <vscale x 1 x i64>, 870 <vscale x 1 x i1>, 871 iXLen, iXLen); 872 873define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 874; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64: 875; CHECK: # %bb.0: # %entry 876; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 877; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t 878; CHECK-NEXT: ret 879entry: 880 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( 881 <vscale x 1 x i64> %0, 882 <vscale x 1 x i64> %1, 883 <vscale x 1 x i64> %2, 884 <vscale x 1 x i1> %3, 885 iXLen %4, iXLen 1) 886 887 ret <vscale x 1 x i64> %a 888} 889 890declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( 891 <vscale x 2 x i64>, 892 <vscale x 2 x i64>, 893 <vscale x 2 x i64>, 894 iXLen); 895 896define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 897; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64: 898; CHECK: # %bb.0: # %entry 899; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 900; CHECK-NEXT: vmulhsu.vv v8, v8, v10 901; CHECK-NEXT: ret 902entry: 903 %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( 904 <vscale x 2 x i64> undef, 905 <vscale x 2 x i64> %0, 906 <vscale x 2 x i64> %1, 907 iXLen %2) 908 909 ret <vscale x 2 x i64> %a 910} 911 912declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( 913 <vscale x 2 x i64>, 914 <vscale x 2 x i64>, 915 <vscale x 2 x i64>, 916 <vscale x 2 x i1>, 917 iXLen, iXLen); 918 919define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 920; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64: 921; CHECK: # %bb.0: # %entry 922; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 923; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t 924; CHECK-NEXT: ret 925entry: 926 %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( 927 <vscale x 2 x i64> %0, 928 <vscale x 2 x i64> %1, 929 <vscale x 2 x i64> %2, 930 <vscale x 2 x i1> %3, 931 iXLen %4, iXLen 1) 932 933 ret <vscale x 2 x i64> %a 934} 935 936declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( 937 <vscale x 4 x i64>, 938 <vscale x 4 x i64>, 939 <vscale x 4 x i64>, 940 iXLen); 941 942define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 943; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64: 944; CHECK: # %bb.0: # %entry 945; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 946; CHECK-NEXT: vmulhsu.vv v8, v8, v12 947; CHECK-NEXT: ret 948entry: 949 %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( 950 <vscale x 4 x i64> undef, 951 <vscale x 4 x i64> %0, 952 <vscale x 4 x i64> %1, 953 iXLen %2) 954 955 ret <vscale x 4 x i64> %a 956} 957 958declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( 959 <vscale x 4 x i64>, 960 <vscale x 4 x i64>, 961 <vscale x 4 x i64>, 962 <vscale x 4 x i1>, 963 iXLen, iXLen); 964 965define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 966; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64: 967; CHECK: # %bb.0: # %entry 968; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 969; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t 970; CHECK-NEXT: ret 971entry: 972 %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( 973 <vscale x 4 x i64> %0, 974 <vscale x 4 x i64> %1, 975 <vscale x 4 x i64> %2, 976 <vscale x 4 x i1> %3, 977 iXLen %4, iXLen 1) 978 979 ret <vscale x 4 x i64> %a 980} 981 982declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( 983 <vscale x 8 x i64>, 984 <vscale x 8 x i64>, 985 <vscale x 8 x i64>, 986 iXLen); 987 988define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 989; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64: 990; CHECK: # %bb.0: # %entry 991; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 992; CHECK-NEXT: vmulhsu.vv v8, v8, v16 993; CHECK-NEXT: ret 994entry: 995 %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( 996 <vscale x 8 x i64> undef, 997 <vscale x 8 x i64> %0, 998 <vscale x 8 x i64> %1, 999 iXLen %2) 1000 1001 ret <vscale x 8 x i64> %a 1002} 1003 1004declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( 1005 <vscale x 8 x i64>, 1006 <vscale x 8 x i64>, 1007 <vscale x 8 x i64>, 1008 <vscale x 8 x i1>, 1009 iXLen, iXLen); 1010 1011define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1012; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: 1013; CHECK: # %bb.0: # %entry 1014; CHECK-NEXT: vl8re64.v v24, (a0) 1015; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1016; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t 1017; CHECK-NEXT: ret 1018entry: 1019 %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( 1020 <vscale x 8 x i64> %0, 1021 <vscale x 8 x i64> %1, 1022 <vscale x 8 x i64> %2, 1023 <vscale x 8 x i1> %3, 1024 iXLen %4, iXLen 1) 1025 1026 ret <vscale x 8 x i64> %a 1027} 1028 1029declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8( 1030 <vscale x 1 x i8>, 1031 <vscale x 1 x i8>, 1032 i8, 1033 iXLen); 1034 1035define <vscale x 1 x i8> @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 1036; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8: 1037; CHECK: # %bb.0: # %entry 1038; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1039; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1040; CHECK-NEXT: ret 1041entry: 1042 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8( 1043 <vscale x 1 x i8> undef, 1044 <vscale x 1 x i8> %0, 1045 i8 %1, 1046 iXLen %2) 1047 1048 ret <vscale x 1 x i8> %a 1049} 1050 1051declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8( 1052 <vscale x 1 x i8>, 1053 <vscale x 1 x i8>, 1054 i8, 1055 <vscale x 1 x i1>, 1056 iXLen, iXLen); 1057 1058define <vscale x 1 x i8> @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1059; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8: 1060; CHECK: # %bb.0: # %entry 1061; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 1062; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1063; CHECK-NEXT: ret 1064entry: 1065 %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8( 1066 <vscale x 1 x i8> %0, 1067 <vscale x 1 x i8> %1, 1068 i8 %2, 1069 <vscale x 1 x i1> %3, 1070 iXLen %4, iXLen 1) 1071 1072 ret <vscale x 1 x i8> %a 1073} 1074 1075declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8( 1076 <vscale x 2 x i8>, 1077 <vscale x 2 x i8>, 1078 i8, 1079 iXLen); 1080 1081define <vscale x 2 x i8> @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 1082; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8: 1083; CHECK: # %bb.0: # %entry 1084; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1085; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1086; CHECK-NEXT: ret 1087entry: 1088 %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8( 1089 <vscale x 2 x i8> undef, 1090 <vscale x 2 x i8> %0, 1091 i8 %1, 1092 iXLen %2) 1093 1094 ret <vscale x 2 x i8> %a 1095} 1096 1097declare <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8( 1098 <vscale x 2 x i8>, 1099 <vscale x 2 x i8>, 1100 i8, 1101 <vscale x 2 x i1>, 1102 iXLen, iXLen); 1103 1104define <vscale x 2 x i8> @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1105; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8: 1106; CHECK: # %bb.0: # %entry 1107; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1108; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1109; CHECK-NEXT: ret 1110entry: 1111 %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8( 1112 <vscale x 2 x i8> %0, 1113 <vscale x 2 x i8> %1, 1114 i8 %2, 1115 <vscale x 2 x i1> %3, 1116 iXLen %4, iXLen 1) 1117 1118 ret <vscale x 2 x i8> %a 1119} 1120 1121declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8( 1122 <vscale x 4 x i8>, 1123 <vscale x 4 x i8>, 1124 i8, 1125 iXLen); 1126 1127define <vscale x 4 x i8> @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1128; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8: 1129; CHECK: # %bb.0: # %entry 1130; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1131; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1132; CHECK-NEXT: ret 1133entry: 1134 %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8( 1135 <vscale x 4 x i8> undef, 1136 <vscale x 4 x i8> %0, 1137 i8 %1, 1138 iXLen %2) 1139 1140 ret <vscale x 4 x i8> %a 1141} 1142 1143declare <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8( 1144 <vscale x 4 x i8>, 1145 <vscale x 4 x i8>, 1146 i8, 1147 <vscale x 4 x i1>, 1148 iXLen, iXLen); 1149 1150define <vscale x 4 x i8> @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1151; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8: 1152; CHECK: # %bb.0: # %entry 1153; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1154; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1155; CHECK-NEXT: ret 1156entry: 1157 %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8( 1158 <vscale x 4 x i8> %0, 1159 <vscale x 4 x i8> %1, 1160 i8 %2, 1161 <vscale x 4 x i1> %3, 1162 iXLen %4, iXLen 1) 1163 1164 ret <vscale x 4 x i8> %a 1165} 1166 1167declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8( 1168 <vscale x 8 x i8>, 1169 <vscale x 8 x i8>, 1170 i8, 1171 iXLen); 1172 1173define <vscale x 8 x i8> @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1174; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8: 1175; CHECK: # %bb.0: # %entry 1176; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1177; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1178; CHECK-NEXT: ret 1179entry: 1180 %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8( 1181 <vscale x 8 x i8> undef, 1182 <vscale x 8 x i8> %0, 1183 i8 %1, 1184 iXLen %2) 1185 1186 ret <vscale x 8 x i8> %a 1187} 1188 1189declare <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8( 1190 <vscale x 8 x i8>, 1191 <vscale x 8 x i8>, 1192 i8, 1193 <vscale x 8 x i1>, 1194 iXLen, iXLen); 1195 1196define <vscale x 8 x i8> @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1197; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8: 1198; CHECK: # %bb.0: # %entry 1199; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1200; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1201; CHECK-NEXT: ret 1202entry: 1203 %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8( 1204 <vscale x 8 x i8> %0, 1205 <vscale x 8 x i8> %1, 1206 i8 %2, 1207 <vscale x 8 x i1> %3, 1208 iXLen %4, iXLen 1) 1209 1210 ret <vscale x 8 x i8> %a 1211} 1212 1213declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8( 1214 <vscale x 16 x i8>, 1215 <vscale x 16 x i8>, 1216 i8, 1217 iXLen); 1218 1219define <vscale x 16 x i8> @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1220; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8: 1221; CHECK: # %bb.0: # %entry 1222; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1223; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1224; CHECK-NEXT: ret 1225entry: 1226 %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8( 1227 <vscale x 16 x i8> undef, 1228 <vscale x 16 x i8> %0, 1229 i8 %1, 1230 iXLen %2) 1231 1232 ret <vscale x 16 x i8> %a 1233} 1234 1235declare <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8( 1236 <vscale x 16 x i8>, 1237 <vscale x 16 x i8>, 1238 i8, 1239 <vscale x 16 x i1>, 1240 iXLen, iXLen); 1241 1242define <vscale x 16 x i8> @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1243; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8: 1244; CHECK: # %bb.0: # %entry 1245; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1246; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t 1247; CHECK-NEXT: ret 1248entry: 1249 %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8( 1250 <vscale x 16 x i8> %0, 1251 <vscale x 16 x i8> %1, 1252 i8 %2, 1253 <vscale x 16 x i1> %3, 1254 iXLen %4, iXLen 1) 1255 1256 ret <vscale x 16 x i8> %a 1257} 1258 1259declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8( 1260 <vscale x 32 x i8>, 1261 <vscale x 32 x i8>, 1262 i8, 1263 iXLen); 1264 1265define <vscale x 32 x i8> @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1266; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8: 1267; CHECK: # %bb.0: # %entry 1268; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1269; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1270; CHECK-NEXT: ret 1271entry: 1272 %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8( 1273 <vscale x 32 x i8> undef, 1274 <vscale x 32 x i8> %0, 1275 i8 %1, 1276 iXLen %2) 1277 1278 ret <vscale x 32 x i8> %a 1279} 1280 1281declare <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8( 1282 <vscale x 32 x i8>, 1283 <vscale x 32 x i8>, 1284 i8, 1285 <vscale x 32 x i1>, 1286 iXLen, iXLen); 1287 1288define <vscale x 32 x i8> @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1289; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8: 1290; CHECK: # %bb.0: # %entry 1291; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1292; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t 1293; CHECK-NEXT: ret 1294entry: 1295 %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8( 1296 <vscale x 32 x i8> %0, 1297 <vscale x 32 x i8> %1, 1298 i8 %2, 1299 <vscale x 32 x i1> %3, 1300 iXLen %4, iXLen 1) 1301 1302 ret <vscale x 32 x i8> %a 1303} 1304 1305declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8( 1306 <vscale x 64 x i8>, 1307 <vscale x 64 x i8>, 1308 i8, 1309 iXLen); 1310 1311define <vscale x 64 x i8> @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 1312; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8: 1313; CHECK: # %bb.0: # %entry 1314; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 1315; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1316; CHECK-NEXT: ret 1317entry: 1318 %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8( 1319 <vscale x 64 x i8> undef, 1320 <vscale x 64 x i8> %0, 1321 i8 %1, 1322 iXLen %2) 1323 1324 ret <vscale x 64 x i8> %a 1325} 1326 1327declare <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8( 1328 <vscale x 64 x i8>, 1329 <vscale x 64 x i8>, 1330 i8, 1331 <vscale x 64 x i1>, 1332 iXLen, iXLen); 1333 1334define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1335; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8: 1336; CHECK: # %bb.0: # %entry 1337; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1338; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t 1339; CHECK-NEXT: ret 1340entry: 1341 %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8( 1342 <vscale x 64 x i8> %0, 1343 <vscale x 64 x i8> %1, 1344 i8 %2, 1345 <vscale x 64 x i1> %3, 1346 iXLen %4, iXLen 1) 1347 1348 ret <vscale x 64 x i8> %a 1349} 1350 1351declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16( 1352 <vscale x 1 x i16>, 1353 <vscale x 1 x i16>, 1354 i16, 1355 iXLen); 1356 1357define <vscale x 1 x i16> @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1358; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16: 1359; CHECK: # %bb.0: # %entry 1360; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1361; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1362; CHECK-NEXT: ret 1363entry: 1364 %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16( 1365 <vscale x 1 x i16> undef, 1366 <vscale x 1 x i16> %0, 1367 i16 %1, 1368 iXLen %2) 1369 1370 ret <vscale x 1 x i16> %a 1371} 1372 1373declare <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16( 1374 <vscale x 1 x i16>, 1375 <vscale x 1 x i16>, 1376 i16, 1377 <vscale x 1 x i1>, 1378 iXLen, iXLen); 1379 1380define <vscale x 1 x i16> @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1381; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16: 1382; CHECK: # %bb.0: # %entry 1383; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1384; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1385; CHECK-NEXT: ret 1386entry: 1387 %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16( 1388 <vscale x 1 x i16> %0, 1389 <vscale x 1 x i16> %1, 1390 i16 %2, 1391 <vscale x 1 x i1> %3, 1392 iXLen %4, iXLen 1) 1393 1394 ret <vscale x 1 x i16> %a 1395} 1396 1397declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16( 1398 <vscale x 2 x i16>, 1399 <vscale x 2 x i16>, 1400 i16, 1401 iXLen); 1402 1403define <vscale x 2 x i16> @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1404; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16: 1405; CHECK: # %bb.0: # %entry 1406; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1407; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1408; CHECK-NEXT: ret 1409entry: 1410 %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16( 1411 <vscale x 2 x i16> undef, 1412 <vscale x 2 x i16> %0, 1413 i16 %1, 1414 iXLen %2) 1415 1416 ret <vscale x 2 x i16> %a 1417} 1418 1419declare <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16( 1420 <vscale x 2 x i16>, 1421 <vscale x 2 x i16>, 1422 i16, 1423 <vscale x 2 x i1>, 1424 iXLen, iXLen); 1425 1426define <vscale x 2 x i16> @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1427; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16: 1428; CHECK: # %bb.0: # %entry 1429; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1430; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1431; CHECK-NEXT: ret 1432entry: 1433 %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16( 1434 <vscale x 2 x i16> %0, 1435 <vscale x 2 x i16> %1, 1436 i16 %2, 1437 <vscale x 2 x i1> %3, 1438 iXLen %4, iXLen 1) 1439 1440 ret <vscale x 2 x i16> %a 1441} 1442 1443declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16( 1444 <vscale x 4 x i16>, 1445 <vscale x 4 x i16>, 1446 i16, 1447 iXLen); 1448 1449define <vscale x 4 x i16> @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1450; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16: 1451; CHECK: # %bb.0: # %entry 1452; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1453; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1454; CHECK-NEXT: ret 1455entry: 1456 %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16( 1457 <vscale x 4 x i16> undef, 1458 <vscale x 4 x i16> %0, 1459 i16 %1, 1460 iXLen %2) 1461 1462 ret <vscale x 4 x i16> %a 1463} 1464 1465declare <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16( 1466 <vscale x 4 x i16>, 1467 <vscale x 4 x i16>, 1468 i16, 1469 <vscale x 4 x i1>, 1470 iXLen, iXLen); 1471 1472define <vscale x 4 x i16> @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1473; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16: 1474; CHECK: # %bb.0: # %entry 1475; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1476; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1477; CHECK-NEXT: ret 1478entry: 1479 %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16( 1480 <vscale x 4 x i16> %0, 1481 <vscale x 4 x i16> %1, 1482 i16 %2, 1483 <vscale x 4 x i1> %3, 1484 iXLen %4, iXLen 1) 1485 1486 ret <vscale x 4 x i16> %a 1487} 1488 1489declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16( 1490 <vscale x 8 x i16>, 1491 <vscale x 8 x i16>, 1492 i16, 1493 iXLen); 1494 1495define <vscale x 8 x i16> @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1496; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16: 1497; CHECK: # %bb.0: # %entry 1498; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1499; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1500; CHECK-NEXT: ret 1501entry: 1502 %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16( 1503 <vscale x 8 x i16> undef, 1504 <vscale x 8 x i16> %0, 1505 i16 %1, 1506 iXLen %2) 1507 1508 ret <vscale x 8 x i16> %a 1509} 1510 1511declare <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16( 1512 <vscale x 8 x i16>, 1513 <vscale x 8 x i16>, 1514 i16, 1515 <vscale x 8 x i1>, 1516 iXLen, iXLen); 1517 1518define <vscale x 8 x i16> @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1519; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16: 1520; CHECK: # %bb.0: # %entry 1521; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1522; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t 1523; CHECK-NEXT: ret 1524entry: 1525 %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16( 1526 <vscale x 8 x i16> %0, 1527 <vscale x 8 x i16> %1, 1528 i16 %2, 1529 <vscale x 8 x i1> %3, 1530 iXLen %4, iXLen 1) 1531 1532 ret <vscale x 8 x i16> %a 1533} 1534 1535declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16( 1536 <vscale x 16 x i16>, 1537 <vscale x 16 x i16>, 1538 i16, 1539 iXLen); 1540 1541define <vscale x 16 x i16> @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1542; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16: 1543; CHECK: # %bb.0: # %entry 1544; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1545; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1546; CHECK-NEXT: ret 1547entry: 1548 %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16( 1549 <vscale x 16 x i16> undef, 1550 <vscale x 16 x i16> %0, 1551 i16 %1, 1552 iXLen %2) 1553 1554 ret <vscale x 16 x i16> %a 1555} 1556 1557declare <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16( 1558 <vscale x 16 x i16>, 1559 <vscale x 16 x i16>, 1560 i16, 1561 <vscale x 16 x i1>, 1562 iXLen, iXLen); 1563 1564define <vscale x 16 x i16> @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1565; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16: 1566; CHECK: # %bb.0: # %entry 1567; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1568; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t 1569; CHECK-NEXT: ret 1570entry: 1571 %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16( 1572 <vscale x 16 x i16> %0, 1573 <vscale x 16 x i16> %1, 1574 i16 %2, 1575 <vscale x 16 x i1> %3, 1576 iXLen %4, iXLen 1) 1577 1578 ret <vscale x 16 x i16> %a 1579} 1580 1581declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16( 1582 <vscale x 32 x i16>, 1583 <vscale x 32 x i16>, 1584 i16, 1585 iXLen); 1586 1587define <vscale x 32 x i16> @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 1588; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16: 1589; CHECK: # %bb.0: # %entry 1590; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1591; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1592; CHECK-NEXT: ret 1593entry: 1594 %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16( 1595 <vscale x 32 x i16> undef, 1596 <vscale x 32 x i16> %0, 1597 i16 %1, 1598 iXLen %2) 1599 1600 ret <vscale x 32 x i16> %a 1601} 1602 1603declare <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16( 1604 <vscale x 32 x i16>, 1605 <vscale x 32 x i16>, 1606 i16, 1607 <vscale x 32 x i1>, 1608 iXLen, iXLen); 1609 1610define <vscale x 32 x i16> @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1611; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16: 1612; CHECK: # %bb.0: # %entry 1613; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1614; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t 1615; CHECK-NEXT: ret 1616entry: 1617 %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16( 1618 <vscale x 32 x i16> %0, 1619 <vscale x 32 x i16> %1, 1620 i16 %2, 1621 <vscale x 32 x i1> %3, 1622 iXLen %4, iXLen 1) 1623 1624 ret <vscale x 32 x i16> %a 1625} 1626 1627declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32( 1628 <vscale x 1 x i32>, 1629 <vscale x 1 x i32>, 1630 i32, 1631 iXLen); 1632 1633define <vscale x 1 x i32> @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1634; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32: 1635; CHECK: # %bb.0: # %entry 1636; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1637; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1638; CHECK-NEXT: ret 1639entry: 1640 %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32( 1641 <vscale x 1 x i32> undef, 1642 <vscale x 1 x i32> %0, 1643 i32 %1, 1644 iXLen %2) 1645 1646 ret <vscale x 1 x i32> %a 1647} 1648 1649declare <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32( 1650 <vscale x 1 x i32>, 1651 <vscale x 1 x i32>, 1652 i32, 1653 <vscale x 1 x i1>, 1654 iXLen, iXLen); 1655 1656define <vscale x 1 x i32> @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1657; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32: 1658; CHECK: # %bb.0: # %entry 1659; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1660; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1661; CHECK-NEXT: ret 1662entry: 1663 %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32( 1664 <vscale x 1 x i32> %0, 1665 <vscale x 1 x i32> %1, 1666 i32 %2, 1667 <vscale x 1 x i1> %3, 1668 iXLen %4, iXLen 1) 1669 1670 ret <vscale x 1 x i32> %a 1671} 1672 1673declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32( 1674 <vscale x 2 x i32>, 1675 <vscale x 2 x i32>, 1676 i32, 1677 iXLen); 1678 1679define <vscale x 2 x i32> @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1680; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32: 1681; CHECK: # %bb.0: # %entry 1682; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1683; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1684; CHECK-NEXT: ret 1685entry: 1686 %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32( 1687 <vscale x 2 x i32> undef, 1688 <vscale x 2 x i32> %0, 1689 i32 %1, 1690 iXLen %2) 1691 1692 ret <vscale x 2 x i32> %a 1693} 1694 1695declare <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32( 1696 <vscale x 2 x i32>, 1697 <vscale x 2 x i32>, 1698 i32, 1699 <vscale x 2 x i1>, 1700 iXLen, iXLen); 1701 1702define <vscale x 2 x i32> @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1703; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32: 1704; CHECK: # %bb.0: # %entry 1705; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1706; CHECK-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1707; CHECK-NEXT: ret 1708entry: 1709 %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32( 1710 <vscale x 2 x i32> %0, 1711 <vscale x 2 x i32> %1, 1712 i32 %2, 1713 <vscale x 2 x i1> %3, 1714 iXLen %4, iXLen 1) 1715 1716 ret <vscale x 2 x i32> %a 1717} 1718 1719declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32( 1720 <vscale x 4 x i32>, 1721 <vscale x 4 x i32>, 1722 i32, 1723 iXLen); 1724 1725define <vscale x 4 x i32> @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1726; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32: 1727; CHECK: # %bb.0: # %entry 1728; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1729; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1730; CHECK-NEXT: ret 1731entry: 1732 %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32( 1733 <vscale x 4 x i32> undef, 1734 <vscale x 4 x i32> %0, 1735 i32 %1, 1736 iXLen %2) 1737 1738 ret <vscale x 4 x i32> %a 1739} 1740 1741declare <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32( 1742 <vscale x 4 x i32>, 1743 <vscale x 4 x i32>, 1744 i32, 1745 <vscale x 4 x i1>, 1746 iXLen, iXLen); 1747 1748define <vscale x 4 x i32> @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1749; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32: 1750; CHECK: # %bb.0: # %entry 1751; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1752; CHECK-NEXT: vmulhsu.vx v8, v10, a0, v0.t 1753; CHECK-NEXT: ret 1754entry: 1755 %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32( 1756 <vscale x 4 x i32> %0, 1757 <vscale x 4 x i32> %1, 1758 i32 %2, 1759 <vscale x 4 x i1> %3, 1760 iXLen %4, iXLen 1) 1761 1762 ret <vscale x 4 x i32> %a 1763} 1764 1765declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32( 1766 <vscale x 8 x i32>, 1767 <vscale x 8 x i32>, 1768 i32, 1769 iXLen); 1770 1771define <vscale x 8 x i32> @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1772; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32: 1773; CHECK: # %bb.0: # %entry 1774; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1775; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1776; CHECK-NEXT: ret 1777entry: 1778 %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32( 1779 <vscale x 8 x i32> undef, 1780 <vscale x 8 x i32> %0, 1781 i32 %1, 1782 iXLen %2) 1783 1784 ret <vscale x 8 x i32> %a 1785} 1786 1787declare <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32( 1788 <vscale x 8 x i32>, 1789 <vscale x 8 x i32>, 1790 i32, 1791 <vscale x 8 x i1>, 1792 iXLen, iXLen); 1793 1794define <vscale x 8 x i32> @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1795; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32: 1796; CHECK: # %bb.0: # %entry 1797; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1798; CHECK-NEXT: vmulhsu.vx v8, v12, a0, v0.t 1799; CHECK-NEXT: ret 1800entry: 1801 %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32( 1802 <vscale x 8 x i32> %0, 1803 <vscale x 8 x i32> %1, 1804 i32 %2, 1805 <vscale x 8 x i1> %3, 1806 iXLen %4, iXLen 1) 1807 1808 ret <vscale x 8 x i32> %a 1809} 1810 1811declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32( 1812 <vscale x 16 x i32>, 1813 <vscale x 16 x i32>, 1814 i32, 1815 iXLen); 1816 1817define <vscale x 16 x i32> @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 1818; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32: 1819; CHECK: # %bb.0: # %entry 1820; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1821; CHECK-NEXT: vmulhsu.vx v8, v8, a0 1822; CHECK-NEXT: ret 1823entry: 1824 %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32( 1825 <vscale x 16 x i32> undef, 1826 <vscale x 16 x i32> %0, 1827 i32 %1, 1828 iXLen %2) 1829 1830 ret <vscale x 16 x i32> %a 1831} 1832 1833declare <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32( 1834 <vscale x 16 x i32>, 1835 <vscale x 16 x i32>, 1836 i32, 1837 <vscale x 16 x i1>, 1838 iXLen, iXLen); 1839 1840define <vscale x 16 x i32> @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1841; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32: 1842; CHECK: # %bb.0: # %entry 1843; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1844; CHECK-NEXT: vmulhsu.vx v8, v16, a0, v0.t 1845; CHECK-NEXT: ret 1846entry: 1847 %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32( 1848 <vscale x 16 x i32> %0, 1849 <vscale x 16 x i32> %1, 1850 i32 %2, 1851 <vscale x 16 x i1> %3, 1852 iXLen %4, iXLen 1) 1853 1854 ret <vscale x 16 x i32> %a 1855} 1856 1857declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64( 1858 <vscale x 1 x i64>, 1859 <vscale x 1 x i64>, 1860 i64, 1861 iXLen); 1862 1863define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1864; RV32-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64: 1865; RV32: # %bb.0: # %entry 1866; RV32-NEXT: addi sp, sp, -16 1867; RV32-NEXT: sw a0, 8(sp) 1868; RV32-NEXT: sw a1, 12(sp) 1869; RV32-NEXT: addi a0, sp, 8 1870; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1871; RV32-NEXT: vlse64.v v9, (a0), zero 1872; RV32-NEXT: vmulhsu.vv v8, v8, v9 1873; RV32-NEXT: addi sp, sp, 16 1874; RV32-NEXT: ret 1875; 1876; RV64-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64: 1877; RV64: # %bb.0: # %entry 1878; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1879; RV64-NEXT: vmulhsu.vx v8, v8, a0 1880; RV64-NEXT: ret 1881entry: 1882 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64( 1883 <vscale x 1 x i64> undef, 1884 <vscale x 1 x i64> %0, 1885 i64 %1, 1886 iXLen %2) 1887 1888 ret <vscale x 1 x i64> %a 1889} 1890 1891declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64( 1892 <vscale x 1 x i64>, 1893 <vscale x 1 x i64>, 1894 i64, 1895 <vscale x 1 x i1>, 1896 iXLen, iXLen); 1897 1898define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1899; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64: 1900; RV32: # %bb.0: # %entry 1901; RV32-NEXT: addi sp, sp, -16 1902; RV32-NEXT: sw a0, 8(sp) 1903; RV32-NEXT: sw a1, 12(sp) 1904; RV32-NEXT: addi a0, sp, 8 1905; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1906; RV32-NEXT: vlse64.v v10, (a0), zero 1907; RV32-NEXT: vmulhsu.vv v8, v9, v10, v0.t 1908; RV32-NEXT: addi sp, sp, 16 1909; RV32-NEXT: ret 1910; 1911; RV64-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64: 1912; RV64: # %bb.0: # %entry 1913; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1914; RV64-NEXT: vmulhsu.vx v8, v9, a0, v0.t 1915; RV64-NEXT: ret 1916entry: 1917 %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64( 1918 <vscale x 1 x i64> %0, 1919 <vscale x 1 x i64> %1, 1920 i64 %2, 1921 <vscale x 1 x i1> %3, 1922 iXLen %4, iXLen 1) 1923 1924 ret <vscale x 1 x i64> %a 1925} 1926 1927declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64( 1928 <vscale x 2 x i64>, 1929 <vscale x 2 x i64>, 1930 i64, 1931 iXLen); 1932 1933define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 1934; RV32-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64: 1935; RV32: # %bb.0: # %entry 1936; RV32-NEXT: addi sp, sp, -16 1937; RV32-NEXT: sw a0, 8(sp) 1938; RV32-NEXT: sw a1, 12(sp) 1939; RV32-NEXT: addi a0, sp, 8 1940; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1941; RV32-NEXT: vlse64.v v10, (a0), zero 1942; RV32-NEXT: vmulhsu.vv v8, v8, v10 1943; RV32-NEXT: addi sp, sp, 16 1944; RV32-NEXT: ret 1945; 1946; RV64-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64: 1947; RV64: # %bb.0: # %entry 1948; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1949; RV64-NEXT: vmulhsu.vx v8, v8, a0 1950; RV64-NEXT: ret 1951entry: 1952 %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64( 1953 <vscale x 2 x i64> undef, 1954 <vscale x 2 x i64> %0, 1955 i64 %1, 1956 iXLen %2) 1957 1958 ret <vscale x 2 x i64> %a 1959} 1960 1961declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64( 1962 <vscale x 2 x i64>, 1963 <vscale x 2 x i64>, 1964 i64, 1965 <vscale x 2 x i1>, 1966 iXLen, iXLen); 1967 1968define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1969; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64: 1970; RV32: # %bb.0: # %entry 1971; RV32-NEXT: addi sp, sp, -16 1972; RV32-NEXT: sw a0, 8(sp) 1973; RV32-NEXT: sw a1, 12(sp) 1974; RV32-NEXT: addi a0, sp, 8 1975; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 1976; RV32-NEXT: vlse64.v v12, (a0), zero 1977; RV32-NEXT: vmulhsu.vv v8, v10, v12, v0.t 1978; RV32-NEXT: addi sp, sp, 16 1979; RV32-NEXT: ret 1980; 1981; RV64-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64: 1982; RV64: # %bb.0: # %entry 1983; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1984; RV64-NEXT: vmulhsu.vx v8, v10, a0, v0.t 1985; RV64-NEXT: ret 1986entry: 1987 %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64( 1988 <vscale x 2 x i64> %0, 1989 <vscale x 2 x i64> %1, 1990 i64 %2, 1991 <vscale x 2 x i1> %3, 1992 iXLen %4, iXLen 1) 1993 1994 ret <vscale x 2 x i64> %a 1995} 1996 1997declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64( 1998 <vscale x 4 x i64>, 1999 <vscale x 4 x i64>, 2000 i64, 2001 iXLen); 2002 2003define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 2004; RV32-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64: 2005; RV32: # %bb.0: # %entry 2006; RV32-NEXT: addi sp, sp, -16 2007; RV32-NEXT: sw a0, 8(sp) 2008; RV32-NEXT: sw a1, 12(sp) 2009; RV32-NEXT: addi a0, sp, 8 2010; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 2011; RV32-NEXT: vlse64.v v12, (a0), zero 2012; RV32-NEXT: vmulhsu.vv v8, v8, v12 2013; RV32-NEXT: addi sp, sp, 16 2014; RV32-NEXT: ret 2015; 2016; RV64-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64: 2017; RV64: # %bb.0: # %entry 2018; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2019; RV64-NEXT: vmulhsu.vx v8, v8, a0 2020; RV64-NEXT: ret 2021entry: 2022 %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64( 2023 <vscale x 4 x i64> undef, 2024 <vscale x 4 x i64> %0, 2025 i64 %1, 2026 iXLen %2) 2027 2028 ret <vscale x 4 x i64> %a 2029} 2030 2031declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64( 2032 <vscale x 4 x i64>, 2033 <vscale x 4 x i64>, 2034 i64, 2035 <vscale x 4 x i1>, 2036 iXLen, iXLen); 2037 2038define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2039; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64: 2040; RV32: # %bb.0: # %entry 2041; RV32-NEXT: addi sp, sp, -16 2042; RV32-NEXT: sw a0, 8(sp) 2043; RV32-NEXT: sw a1, 12(sp) 2044; RV32-NEXT: addi a0, sp, 8 2045; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 2046; RV32-NEXT: vlse64.v v16, (a0), zero 2047; RV32-NEXT: vmulhsu.vv v8, v12, v16, v0.t 2048; RV32-NEXT: addi sp, sp, 16 2049; RV32-NEXT: ret 2050; 2051; RV64-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64: 2052; RV64: # %bb.0: # %entry 2053; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 2054; RV64-NEXT: vmulhsu.vx v8, v12, a0, v0.t 2055; RV64-NEXT: ret 2056entry: 2057 %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64( 2058 <vscale x 4 x i64> %0, 2059 <vscale x 4 x i64> %1, 2060 i64 %2, 2061 <vscale x 4 x i1> %3, 2062 iXLen %4, iXLen 1) 2063 2064 ret <vscale x 4 x i64> %a 2065} 2066 2067declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64( 2068 <vscale x 8 x i64>, 2069 <vscale x 8 x i64>, 2070 i64, 2071 iXLen); 2072 2073define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 2074; RV32-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64: 2075; RV32: # %bb.0: # %entry 2076; RV32-NEXT: addi sp, sp, -16 2077; RV32-NEXT: sw a0, 8(sp) 2078; RV32-NEXT: sw a1, 12(sp) 2079; RV32-NEXT: addi a0, sp, 8 2080; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2081; RV32-NEXT: vlse64.v v16, (a0), zero 2082; RV32-NEXT: vmulhsu.vv v8, v8, v16 2083; RV32-NEXT: addi sp, sp, 16 2084; RV32-NEXT: ret 2085; 2086; RV64-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64: 2087; RV64: # %bb.0: # %entry 2088; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2089; RV64-NEXT: vmulhsu.vx v8, v8, a0 2090; RV64-NEXT: ret 2091entry: 2092 %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64( 2093 <vscale x 8 x i64> undef, 2094 <vscale x 8 x i64> %0, 2095 i64 %1, 2096 iXLen %2) 2097 2098 ret <vscale x 8 x i64> %a 2099} 2100 2101declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64( 2102 <vscale x 8 x i64>, 2103 <vscale x 8 x i64>, 2104 i64, 2105 <vscale x 8 x i1>, 2106 iXLen, iXLen); 2107 2108define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2109; RV32-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64: 2110; RV32: # %bb.0: # %entry 2111; RV32-NEXT: addi sp, sp, -16 2112; RV32-NEXT: sw a0, 8(sp) 2113; RV32-NEXT: sw a1, 12(sp) 2114; RV32-NEXT: addi a0, sp, 8 2115; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu 2116; RV32-NEXT: vlse64.v v24, (a0), zero 2117; RV32-NEXT: vmulhsu.vv v8, v16, v24, v0.t 2118; RV32-NEXT: addi sp, sp, 16 2119; RV32-NEXT: ret 2120; 2121; RV64-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64: 2122; RV64: # %bb.0: # %entry 2123; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 2124; RV64-NEXT: vmulhsu.vx v8, v16, a0, v0.t 2125; RV64-NEXT: ret 2126entry: 2127 %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64( 2128 <vscale x 8 x i64> %0, 2129 <vscale x 8 x i64> %1, 2130 i64 %2, 2131 <vscale x 8 x i1> %3, 2132 iXLen %4, iXLen 1) 2133 2134 ret <vscale x 8 x i64> %a 2135} 2136