1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK 6 7declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8( 8 <vscale x 1 x i64>, 9 <vscale x 1 x i8>, 10 iXLen); 11 12define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, iXLen %1) nounwind { 13; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: 14; CHECK: # %bb.0: # %entry 15; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 16; CHECK-NEXT: vzext.vf8 v9, v8 17; CHECK-NEXT: vmv.v.v v8, v9 18; CHECK-NEXT: ret 19entry: 20 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8( 21 <vscale x 1 x i64> undef, 22 <vscale x 1 x i8> %0, 23 iXLen %1) 24 25 ret <vscale x 1 x i64> %a 26} 27 28declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( 29 <vscale x 1 x i64>, 30 <vscale x 1 x i8>, 31 <vscale x 1 x i1>, 32 iXLen, 33 iXLen); 34 35define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { 36; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: 37; CHECK: # %bb.0: # %entry 38; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 39; CHECK-NEXT: vzext.vf8 v8, v9, v0.t 40; CHECK-NEXT: ret 41entry: 42 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( 43 <vscale x 1 x i64> %1, 44 <vscale x 1 x i8> %2, 45 <vscale x 1 x i1> %0, 46 iXLen %3, iXLen 1) 47 48 ret <vscale x 1 x i64> %a 49} 50 51declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8( 52 <vscale x 2 x i64>, 53 <vscale x 2 x i8>, 54 iXLen); 55 56define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, iXLen %1) nounwind { 57; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64: 58; CHECK: # %bb.0: # %entry 59; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 60; CHECK-NEXT: vzext.vf8 v10, v8 61; CHECK-NEXT: vmv.v.v v8, v10 62; CHECK-NEXT: ret 63entry: 64 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8( 65 <vscale x 2 x i64> undef, 66 <vscale x 2 x i8> %0, 67 iXLen %1) 68 69 ret <vscale x 2 x i64> %a 70} 71 72declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8( 73 <vscale x 2 x i64>, 74 <vscale x 2 x i8>, 75 <vscale x 2 x i1>, 76 iXLen, 77 iXLen); 78 79define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { 80; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64: 81; CHECK: # %bb.0: # %entry 82; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 83; CHECK-NEXT: vzext.vf8 v8, v10, v0.t 84; CHECK-NEXT: ret 85entry: 86 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8( 87 <vscale x 2 x i64> %1, 88 <vscale x 2 x i8> %2, 89 <vscale x 2 x i1> %0, 90 iXLen %3, iXLen 1) 91 92 ret <vscale x 2 x i64> %a 93} 94 95declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8( 96 <vscale x 4 x i64>, 97 <vscale x 4 x i8>, 98 iXLen); 99 100define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, iXLen %1) nounwind { 101; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64: 102; CHECK: # %bb.0: # %entry 103; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 104; CHECK-NEXT: vzext.vf8 v12, v8 105; CHECK-NEXT: vmv.v.v v8, v12 106; CHECK-NEXT: ret 107entry: 108 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8( 109 <vscale x 4 x i64> undef, 110 <vscale x 4 x i8> %0, 111 iXLen %1) 112 113 ret <vscale x 4 x i64> %a 114} 115 116declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8( 117 <vscale x 4 x i64>, 118 <vscale x 4 x i8>, 119 <vscale x 4 x i1>, 120 iXLen, 121 iXLen); 122 123define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { 124; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64: 125; CHECK: # %bb.0: # %entry 126; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 127; CHECK-NEXT: vzext.vf8 v8, v12, v0.t 128; CHECK-NEXT: ret 129entry: 130 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8( 131 <vscale x 4 x i64> %1, 132 <vscale x 4 x i8> %2, 133 <vscale x 4 x i1> %0, 134 iXLen %3, iXLen 1) 135 136 ret <vscale x 4 x i64> %a 137} 138 139declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8( 140 <vscale x 8 x i64>, 141 <vscale x 8 x i8>, 142 iXLen); 143 144define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, iXLen %1) nounwind { 145; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64: 146; CHECK: # %bb.0: # %entry 147; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 148; CHECK-NEXT: vzext.vf8 v16, v8 149; CHECK-NEXT: vmv.v.v v8, v16 150; CHECK-NEXT: ret 151entry: 152 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8( 153 <vscale x 8 x i64> undef, 154 <vscale x 8 x i8> %0, 155 iXLen %1) 156 157 ret <vscale x 8 x i64> %a 158} 159 160declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8( 161 <vscale x 8 x i64>, 162 <vscale x 8 x i8>, 163 <vscale x 8 x i1>, 164 iXLen, 165 iXLen); 166 167define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { 168; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64: 169; CHECK: # %bb.0: # %entry 170; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 171; CHECK-NEXT: vzext.vf8 v8, v16, v0.t 172; CHECK-NEXT: ret 173entry: 174 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8( 175 <vscale x 8 x i64> %1, 176 <vscale x 8 x i8> %2, 177 <vscale x 8 x i1> %0, 178 iXLen %3, iXLen 1) 179 180 ret <vscale x 8 x i64> %a 181} 182 183declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16( 184 <vscale x 1 x i64>, 185 <vscale x 1 x i16>, 186 iXLen); 187 188define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, iXLen %1) nounwind { 189; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64: 190; CHECK: # %bb.0: # %entry 191; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 192; CHECK-NEXT: vzext.vf4 v9, v8 193; CHECK-NEXT: vmv.v.v v8, v9 194; CHECK-NEXT: ret 195entry: 196 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16( 197 <vscale x 1 x i64> undef, 198 <vscale x 1 x i16> %0, 199 iXLen %1) 200 201 ret <vscale x 1 x i64> %a 202} 203 204declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( 205 <vscale x 1 x i64>, 206 <vscale x 1 x i16>, 207 <vscale x 1 x i1>, 208 iXLen, 209 iXLen); 210 211define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { 212; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: 213; CHECK: # %bb.0: # %entry 214; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 215; CHECK-NEXT: vzext.vf4 v8, v9, v0.t 216; CHECK-NEXT: ret 217entry: 218 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( 219 <vscale x 1 x i64> %1, 220 <vscale x 1 x i16> %2, 221 <vscale x 1 x i1> %0, 222 iXLen %3, iXLen 1) 223 224 ret <vscale x 1 x i64> %a 225} 226 227declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16( 228 <vscale x 2 x i64>, 229 <vscale x 2 x i16>, 230 iXLen); 231 232define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, iXLen %1) nounwind { 233; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64: 234; CHECK: # %bb.0: # %entry 235; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 236; CHECK-NEXT: vzext.vf4 v10, v8 237; CHECK-NEXT: vmv.v.v v8, v10 238; CHECK-NEXT: ret 239entry: 240 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16( 241 <vscale x 2 x i64> undef, 242 <vscale x 2 x i16> %0, 243 iXLen %1) 244 245 ret <vscale x 2 x i64> %a 246} 247 248declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16( 249 <vscale x 2 x i64>, 250 <vscale x 2 x i16>, 251 <vscale x 2 x i1>, 252 iXLen, 253 iXLen); 254 255define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { 256; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64: 257; CHECK: # %bb.0: # %entry 258; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 259; CHECK-NEXT: vzext.vf4 v8, v10, v0.t 260; CHECK-NEXT: ret 261entry: 262 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16( 263 <vscale x 2 x i64> %1, 264 <vscale x 2 x i16> %2, 265 <vscale x 2 x i1> %0, 266 iXLen %3, iXLen 1) 267 268 ret <vscale x 2 x i64> %a 269} 270 271declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16( 272 <vscale x 4 x i64>, 273 <vscale x 4 x i16>, 274 iXLen); 275 276define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, iXLen %1) nounwind { 277; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64: 278; CHECK: # %bb.0: # %entry 279; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 280; CHECK-NEXT: vzext.vf4 v12, v8 281; CHECK-NEXT: vmv.v.v v8, v12 282; CHECK-NEXT: ret 283entry: 284 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16( 285 <vscale x 4 x i64> undef, 286 <vscale x 4 x i16> %0, 287 iXLen %1) 288 289 ret <vscale x 4 x i64> %a 290} 291 292declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16( 293 <vscale x 4 x i64>, 294 <vscale x 4 x i16>, 295 <vscale x 4 x i1>, 296 iXLen, 297 iXLen); 298 299define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { 300; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64: 301; CHECK: # %bb.0: # %entry 302; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 303; CHECK-NEXT: vzext.vf4 v8, v12, v0.t 304; CHECK-NEXT: ret 305entry: 306 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16( 307 <vscale x 4 x i64> %1, 308 <vscale x 4 x i16> %2, 309 <vscale x 4 x i1> %0, 310 iXLen %3, iXLen 1) 311 312 ret <vscale x 4 x i64> %a 313} 314 315declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16( 316 <vscale x 8 x i64>, 317 <vscale x 8 x i16>, 318 iXLen); 319 320define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, iXLen %1) nounwind { 321; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64: 322; CHECK: # %bb.0: # %entry 323; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 324; CHECK-NEXT: vzext.vf4 v16, v8 325; CHECK-NEXT: vmv.v.v v8, v16 326; CHECK-NEXT: ret 327entry: 328 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16( 329 <vscale x 8 x i64> undef, 330 <vscale x 8 x i16> %0, 331 iXLen %1) 332 333 ret <vscale x 8 x i64> %a 334} 335 336declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16( 337 <vscale x 8 x i64>, 338 <vscale x 8 x i16>, 339 <vscale x 8 x i1>, 340 iXLen, 341 iXLen); 342 343define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { 344; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64: 345; CHECK: # %bb.0: # %entry 346; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 347; CHECK-NEXT: vzext.vf4 v8, v16, v0.t 348; CHECK-NEXT: ret 349entry: 350 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16( 351 <vscale x 8 x i64> %1, 352 <vscale x 8 x i16> %2, 353 <vscale x 8 x i1> %0, 354 iXLen %3, iXLen 1) 355 356 ret <vscale x 8 x i64> %a 357} 358 359declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8( 360 <vscale x 1 x i32>, 361 <vscale x 1 x i8>, 362 iXLen); 363 364define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, iXLen %1) nounwind { 365; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32: 366; CHECK: # %bb.0: # %entry 367; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 368; CHECK-NEXT: vzext.vf4 v9, v8 369; CHECK-NEXT: vmv1r.v v8, v9 370; CHECK-NEXT: ret 371entry: 372 %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8( 373 <vscale x 1 x i32> undef, 374 <vscale x 1 x i8> %0, 375 iXLen %1) 376 377 ret <vscale x 1 x i32> %a 378} 379 380declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8( 381 <vscale x 1 x i32>, 382 <vscale x 1 x i8>, 383 <vscale x 1 x i1>, 384 iXLen, 385 iXLen); 386 387define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { 388; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32: 389; CHECK: # %bb.0: # %entry 390; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 391; CHECK-NEXT: vzext.vf4 v8, v9, v0.t 392; CHECK-NEXT: ret 393entry: 394 %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8( 395 <vscale x 1 x i32> %1, 396 <vscale x 1 x i8> %2, 397 <vscale x 1 x i1> %0, 398 iXLen %3, iXLen 1) 399 400 ret <vscale x 1 x i32> %a 401} 402 403declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8( 404 <vscale x 2 x i32>, 405 <vscale x 2 x i8>, 406 iXLen); 407 408define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, iXLen %1) nounwind { 409; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32: 410; CHECK: # %bb.0: # %entry 411; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 412; CHECK-NEXT: vzext.vf4 v9, v8 413; CHECK-NEXT: vmv.v.v v8, v9 414; CHECK-NEXT: ret 415entry: 416 %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8( 417 <vscale x 2 x i32> undef, 418 <vscale x 2 x i8> %0, 419 iXLen %1) 420 421 ret <vscale x 2 x i32> %a 422} 423 424declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8( 425 <vscale x 2 x i32>, 426 <vscale x 2 x i8>, 427 <vscale x 2 x i1>, 428 iXLen, 429 iXLen); 430 431define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { 432; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32: 433; CHECK: # %bb.0: # %entry 434; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 435; CHECK-NEXT: vzext.vf4 v8, v9, v0.t 436; CHECK-NEXT: ret 437entry: 438 %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8( 439 <vscale x 2 x i32> %1, 440 <vscale x 2 x i8> %2, 441 <vscale x 2 x i1> %0, 442 iXLen %3, iXLen 1) 443 444 ret <vscale x 2 x i32> %a 445} 446 447declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8( 448 <vscale x 4 x i32>, 449 <vscale x 4 x i8>, 450 iXLen); 451 452define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, iXLen %1) nounwind { 453; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32: 454; CHECK: # %bb.0: # %entry 455; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 456; CHECK-NEXT: vzext.vf4 v10, v8 457; CHECK-NEXT: vmv.v.v v8, v10 458; CHECK-NEXT: ret 459entry: 460 %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8( 461 <vscale x 4 x i32> undef, 462 <vscale x 4 x i8> %0, 463 iXLen %1) 464 465 ret <vscale x 4 x i32> %a 466} 467 468declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8( 469 <vscale x 4 x i32>, 470 <vscale x 4 x i8>, 471 <vscale x 4 x i1>, 472 iXLen, 473 iXLen); 474 475define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { 476; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32: 477; CHECK: # %bb.0: # %entry 478; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 479; CHECK-NEXT: vzext.vf4 v8, v10, v0.t 480; CHECK-NEXT: ret 481entry: 482 %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8( 483 <vscale x 4 x i32> %1, 484 <vscale x 4 x i8> %2, 485 <vscale x 4 x i1> %0, 486 iXLen %3, iXLen 1) 487 488 ret <vscale x 4 x i32> %a 489} 490 491declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8( 492 <vscale x 8 x i32>, 493 <vscale x 8 x i8>, 494 iXLen); 495 496define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, iXLen %1) nounwind { 497; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32: 498; CHECK: # %bb.0: # %entry 499; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 500; CHECK-NEXT: vzext.vf4 v12, v8 501; CHECK-NEXT: vmv.v.v v8, v12 502; CHECK-NEXT: ret 503entry: 504 %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8( 505 <vscale x 8 x i32> undef, 506 <vscale x 8 x i8> %0, 507 iXLen %1) 508 509 ret <vscale x 8 x i32> %a 510} 511 512declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8( 513 <vscale x 8 x i32>, 514 <vscale x 8 x i8>, 515 <vscale x 8 x i1>, 516 iXLen, 517 iXLen); 518 519define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { 520; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32: 521; CHECK: # %bb.0: # %entry 522; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 523; CHECK-NEXT: vzext.vf4 v8, v12, v0.t 524; CHECK-NEXT: ret 525entry: 526 %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8( 527 <vscale x 8 x i32> %1, 528 <vscale x 8 x i8> %2, 529 <vscale x 8 x i1> %0, 530 iXLen %3, iXLen 1) 531 532 ret <vscale x 8 x i32> %a 533} 534 535declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8( 536 <vscale x 16 x i32>, 537 <vscale x 16 x i8>, 538 iXLen); 539 540define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0, iXLen %1) nounwind { 541; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32: 542; CHECK: # %bb.0: # %entry 543; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 544; CHECK-NEXT: vzext.vf4 v16, v8 545; CHECK-NEXT: vmv.v.v v8, v16 546; CHECK-NEXT: ret 547entry: 548 %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8( 549 <vscale x 16 x i32> undef, 550 <vscale x 16 x i8> %0, 551 iXLen %1) 552 553 ret <vscale x 16 x i32> %a 554} 555 556declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8( 557 <vscale x 16 x i32>, 558 <vscale x 16 x i8>, 559 <vscale x 16 x i1>, 560 iXLen, 561 iXLen); 562 563define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { 564; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32: 565; CHECK: # %bb.0: # %entry 566; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 567; CHECK-NEXT: vzext.vf4 v8, v16, v0.t 568; CHECK-NEXT: ret 569entry: 570 %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8( 571 <vscale x 16 x i32> %1, 572 <vscale x 16 x i8> %2, 573 <vscale x 16 x i1> %0, 574 iXLen %3, iXLen 1) 575 576 ret <vscale x 16 x i32> %a 577} 578 579declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32( 580 <vscale x 1 x i64>, 581 <vscale x 1 x i32>, 582 iXLen); 583 584define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, iXLen %1) nounwind { 585; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64: 586; CHECK: # %bb.0: # %entry 587; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 588; CHECK-NEXT: vzext.vf2 v9, v8 589; CHECK-NEXT: vmv.v.v v8, v9 590; CHECK-NEXT: ret 591entry: 592 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32( 593 <vscale x 1 x i64> undef, 594 <vscale x 1 x i32> %0, 595 iXLen %1) 596 597 ret <vscale x 1 x i64> %a 598} 599 600declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( 601 <vscale x 1 x i64>, 602 <vscale x 1 x i32>, 603 <vscale x 1 x i1>, 604 iXLen, 605 iXLen); 606 607define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { 608; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: 609; CHECK: # %bb.0: # %entry 610; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 611; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 612; CHECK-NEXT: ret 613entry: 614 %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( 615 <vscale x 1 x i64> %1, 616 <vscale x 1 x i32> %2, 617 <vscale x 1 x i1> %0, 618 iXLen %3, iXLen 1) 619 620 ret <vscale x 1 x i64> %a 621} 622 623declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32( 624 <vscale x 2 x i64>, 625 <vscale x 2 x i32>, 626 iXLen); 627 628define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, iXLen %1) nounwind { 629; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64: 630; CHECK: # %bb.0: # %entry 631; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 632; CHECK-NEXT: vzext.vf2 v10, v8 633; CHECK-NEXT: vmv.v.v v8, v10 634; CHECK-NEXT: ret 635entry: 636 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32( 637 <vscale x 2 x i64> undef, 638 <vscale x 2 x i32> %0, 639 iXLen %1) 640 641 ret <vscale x 2 x i64> %a 642} 643 644declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32( 645 <vscale x 2 x i64>, 646 <vscale x 2 x i32>, 647 <vscale x 2 x i1>, 648 iXLen, 649 iXLen); 650 651define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { 652; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64: 653; CHECK: # %bb.0: # %entry 654; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 655; CHECK-NEXT: vzext.vf2 v8, v10, v0.t 656; CHECK-NEXT: ret 657entry: 658 %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32( 659 <vscale x 2 x i64> %1, 660 <vscale x 2 x i32> %2, 661 <vscale x 2 x i1> %0, 662 iXLen %3, iXLen 1) 663 664 ret <vscale x 2 x i64> %a 665} 666 667declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32( 668 <vscale x 4 x i64>, 669 <vscale x 4 x i32>, 670 iXLen); 671 672define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, iXLen %1) nounwind { 673; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64: 674; CHECK: # %bb.0: # %entry 675; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 676; CHECK-NEXT: vzext.vf2 v12, v8 677; CHECK-NEXT: vmv.v.v v8, v12 678; CHECK-NEXT: ret 679entry: 680 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32( 681 <vscale x 4 x i64> undef, 682 <vscale x 4 x i32> %0, 683 iXLen %1) 684 685 ret <vscale x 4 x i64> %a 686} 687 688declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32( 689 <vscale x 4 x i64>, 690 <vscale x 4 x i32>, 691 <vscale x 4 x i1>, 692 iXLen, 693 iXLen); 694 695define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { 696; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64: 697; CHECK: # %bb.0: # %entry 698; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 699; CHECK-NEXT: vzext.vf2 v8, v12, v0.t 700; CHECK-NEXT: ret 701entry: 702 %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32( 703 <vscale x 4 x i64> %1, 704 <vscale x 4 x i32> %2, 705 <vscale x 4 x i1> %0, 706 iXLen %3, iXLen 1) 707 708 ret <vscale x 4 x i64> %a 709} 710 711declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32( 712 <vscale x 8 x i64>, 713 <vscale x 8 x i32>, 714 iXLen); 715 716define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, iXLen %1) nounwind { 717; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64: 718; CHECK: # %bb.0: # %entry 719; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 720; CHECK-NEXT: vzext.vf2 v16, v8 721; CHECK-NEXT: vmv.v.v v8, v16 722; CHECK-NEXT: ret 723entry: 724 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32( 725 <vscale x 8 x i64> undef, 726 <vscale x 8 x i32> %0, 727 iXLen %1) 728 729 ret <vscale x 8 x i64> %a 730} 731 732declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32( 733 <vscale x 8 x i64>, 734 <vscale x 8 x i32>, 735 <vscale x 8 x i1>, 736 iXLen, 737 iXLen); 738 739define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { 740; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64: 741; CHECK: # %bb.0: # %entry 742; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 743; CHECK-NEXT: vzext.vf2 v8, v16, v0.t 744; CHECK-NEXT: ret 745entry: 746 %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32( 747 <vscale x 8 x i64> %1, 748 <vscale x 8 x i32> %2, 749 <vscale x 8 x i1> %0, 750 iXLen %3, iXLen 1) 751 752 ret <vscale x 8 x i64> %a 753} 754 755declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16( 756 <vscale x 1 x i32>, 757 <vscale x 1 x i16>, 758 iXLen); 759 760define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, iXLen %1) nounwind { 761; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32: 762; CHECK: # %bb.0: # %entry 763; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 764; CHECK-NEXT: vzext.vf2 v9, v8 765; CHECK-NEXT: vmv1r.v v8, v9 766; CHECK-NEXT: ret 767entry: 768 %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16( 769 <vscale x 1 x i32> undef, 770 <vscale x 1 x i16> %0, 771 iXLen %1) 772 773 ret <vscale x 1 x i32> %a 774} 775 776declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16( 777 <vscale x 1 x i32>, 778 <vscale x 1 x i16>, 779 <vscale x 1 x i1>, 780 iXLen, 781 iXLen); 782 783define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { 784; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32: 785; CHECK: # %bb.0: # %entry 786; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 787; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 788; CHECK-NEXT: ret 789entry: 790 %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16( 791 <vscale x 1 x i32> %1, 792 <vscale x 1 x i16> %2, 793 <vscale x 1 x i1> %0, 794 iXLen %3, iXLen 1) 795 796 ret <vscale x 1 x i32> %a 797} 798 799declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16( 800 <vscale x 2 x i32>, 801 <vscale x 2 x i16>, 802 iXLen); 803 804define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, iXLen %1) nounwind { 805; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32: 806; CHECK: # %bb.0: # %entry 807; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 808; CHECK-NEXT: vzext.vf2 v9, v8 809; CHECK-NEXT: vmv.v.v v8, v9 810; CHECK-NEXT: ret 811entry: 812 %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16( 813 <vscale x 2 x i32> undef, 814 <vscale x 2 x i16> %0, 815 iXLen %1) 816 817 ret <vscale x 2 x i32> %a 818} 819 820declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16( 821 <vscale x 2 x i32>, 822 <vscale x 2 x i16>, 823 <vscale x 2 x i1>, 824 iXLen, 825 iXLen); 826 827define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { 828; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32: 829; CHECK: # %bb.0: # %entry 830; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 831; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 832; CHECK-NEXT: ret 833entry: 834 %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16( 835 <vscale x 2 x i32> %1, 836 <vscale x 2 x i16> %2, 837 <vscale x 2 x i1> %0, 838 iXLen %3, iXLen 1) 839 840 ret <vscale x 2 x i32> %a 841} 842 843declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16( 844 <vscale x 4 x i32>, 845 <vscale x 4 x i16>, 846 iXLen); 847 848define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, iXLen %1) nounwind { 849; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32: 850; CHECK: # %bb.0: # %entry 851; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 852; CHECK-NEXT: vzext.vf2 v10, v8 853; CHECK-NEXT: vmv.v.v v8, v10 854; CHECK-NEXT: ret 855entry: 856 %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16( 857 <vscale x 4 x i32> undef, 858 <vscale x 4 x i16> %0, 859 iXLen %1) 860 861 ret <vscale x 4 x i32> %a 862} 863 864declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16( 865 <vscale x 4 x i32>, 866 <vscale x 4 x i16>, 867 <vscale x 4 x i1>, 868 iXLen, 869 iXLen); 870 871define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { 872; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32: 873; CHECK: # %bb.0: # %entry 874; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 875; CHECK-NEXT: vzext.vf2 v8, v10, v0.t 876; CHECK-NEXT: ret 877entry: 878 %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16( 879 <vscale x 4 x i32> %1, 880 <vscale x 4 x i16> %2, 881 <vscale x 4 x i1> %0, 882 iXLen %3, iXLen 1) 883 884 ret <vscale x 4 x i32> %a 885} 886 887declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16( 888 <vscale x 8 x i32>, 889 <vscale x 8 x i16>, 890 iXLen); 891 892define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, iXLen %1) nounwind { 893; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32: 894; CHECK: # %bb.0: # %entry 895; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 896; CHECK-NEXT: vzext.vf2 v12, v8 897; CHECK-NEXT: vmv.v.v v8, v12 898; CHECK-NEXT: ret 899entry: 900 %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16( 901 <vscale x 8 x i32> undef, 902 <vscale x 8 x i16> %0, 903 iXLen %1) 904 905 ret <vscale x 8 x i32> %a 906} 907 908declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16( 909 <vscale x 8 x i32>, 910 <vscale x 8 x i16>, 911 <vscale x 8 x i1>, 912 iXLen, 913 iXLen); 914 915define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { 916; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32: 917; CHECK: # %bb.0: # %entry 918; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 919; CHECK-NEXT: vzext.vf2 v8, v12, v0.t 920; CHECK-NEXT: ret 921entry: 922 %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16( 923 <vscale x 8 x i32> %1, 924 <vscale x 8 x i16> %2, 925 <vscale x 8 x i1> %0, 926 iXLen %3, iXLen 1) 927 928 ret <vscale x 8 x i32> %a 929} 930 931declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16( 932 <vscale x 16 x i32>, 933 <vscale x 16 x i16>, 934 iXLen); 935 936define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0, iXLen %1) nounwind { 937; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32: 938; CHECK: # %bb.0: # %entry 939; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 940; CHECK-NEXT: vzext.vf2 v16, v8 941; CHECK-NEXT: vmv.v.v v8, v16 942; CHECK-NEXT: ret 943entry: 944 %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16( 945 <vscale x 16 x i32> undef, 946 <vscale x 16 x i16> %0, 947 iXLen %1) 948 949 ret <vscale x 16 x i32> %a 950} 951 952declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16( 953 <vscale x 16 x i32>, 954 <vscale x 16 x i16>, 955 <vscale x 16 x i1>, 956 iXLen, 957 iXLen); 958 959define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { 960; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32: 961; CHECK: # %bb.0: # %entry 962; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 963; CHECK-NEXT: vzext.vf2 v8, v16, v0.t 964; CHECK-NEXT: ret 965entry: 966 %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16( 967 <vscale x 16 x i32> %1, 968 <vscale x 16 x i16> %2, 969 <vscale x 16 x i1> %0, 970 iXLen %3, iXLen 1) 971 972 ret <vscale x 16 x i32> %a 973} 974 975declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8( 976 <vscale x 1 x i16>, 977 <vscale x 1 x i8>, 978 iXLen); 979 980define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, iXLen %1) nounwind { 981; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16: 982; CHECK: # %bb.0: # %entry 983; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 984; CHECK-NEXT: vzext.vf2 v9, v8 985; CHECK-NEXT: vmv1r.v v8, v9 986; CHECK-NEXT: ret 987entry: 988 %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8( 989 <vscale x 1 x i16> undef, 990 <vscale x 1 x i8> %0, 991 iXLen %1) 992 993 ret <vscale x 1 x i16> %a 994} 995 996declare <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8( 997 <vscale x 1 x i16>, 998 <vscale x 1 x i8>, 999 <vscale x 1 x i1>, 1000 iXLen, 1001 iXLen); 1002 1003define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { 1004; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16: 1005; CHECK: # %bb.0: # %entry 1006; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 1007; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 1008; CHECK-NEXT: ret 1009entry: 1010 %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8( 1011 <vscale x 1 x i16> %1, 1012 <vscale x 1 x i8> %2, 1013 <vscale x 1 x i1> %0, 1014 iXLen %3, iXLen 1) 1015 1016 ret <vscale x 1 x i16> %a 1017} 1018 1019declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8( 1020 <vscale x 2 x i16>, 1021 <vscale x 2 x i8>, 1022 iXLen); 1023 1024define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, iXLen %1) nounwind { 1025; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16: 1026; CHECK: # %bb.0: # %entry 1027; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 1028; CHECK-NEXT: vzext.vf2 v9, v8 1029; CHECK-NEXT: vmv1r.v v8, v9 1030; CHECK-NEXT: ret 1031entry: 1032 %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8( 1033 <vscale x 2 x i16> undef, 1034 <vscale x 2 x i8> %0, 1035 iXLen %1) 1036 1037 ret <vscale x 2 x i16> %a 1038} 1039 1040declare <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8( 1041 <vscale x 2 x i16>, 1042 <vscale x 2 x i8>, 1043 <vscale x 2 x i1>, 1044 iXLen, 1045 iXLen); 1046 1047define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { 1048; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16: 1049; CHECK: # %bb.0: # %entry 1050; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 1051; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 1052; CHECK-NEXT: ret 1053entry: 1054 %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8( 1055 <vscale x 2 x i16> %1, 1056 <vscale x 2 x i8> %2, 1057 <vscale x 2 x i1> %0, 1058 iXLen %3, iXLen 1) 1059 1060 ret <vscale x 2 x i16> %a 1061} 1062 1063declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8( 1064 <vscale x 4 x i16>, 1065 <vscale x 4 x i8>, 1066 iXLen); 1067 1068define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, iXLen %1) nounwind { 1069; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16: 1070; CHECK: # %bb.0: # %entry 1071; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 1072; CHECK-NEXT: vzext.vf2 v9, v8 1073; CHECK-NEXT: vmv.v.v v8, v9 1074; CHECK-NEXT: ret 1075entry: 1076 %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8( 1077 <vscale x 4 x i16> undef, 1078 <vscale x 4 x i8> %0, 1079 iXLen %1) 1080 1081 ret <vscale x 4 x i16> %a 1082} 1083 1084declare <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8( 1085 <vscale x 4 x i16>, 1086 <vscale x 4 x i8>, 1087 <vscale x 4 x i1>, 1088 iXLen, 1089 iXLen); 1090 1091define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { 1092; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16: 1093; CHECK: # %bb.0: # %entry 1094; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 1095; CHECK-NEXT: vzext.vf2 v8, v9, v0.t 1096; CHECK-NEXT: ret 1097entry: 1098 %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8( 1099 <vscale x 4 x i16> %1, 1100 <vscale x 4 x i8> %2, 1101 <vscale x 4 x i1> %0, 1102 iXLen %3, iXLen 1) 1103 1104 ret <vscale x 4 x i16> %a 1105} 1106 1107declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8( 1108 <vscale x 8 x i16>, 1109 <vscale x 8 x i8>, 1110 iXLen); 1111 1112define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, iXLen %1) nounwind { 1113; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16: 1114; CHECK: # %bb.0: # %entry 1115; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1116; CHECK-NEXT: vzext.vf2 v10, v8 1117; CHECK-NEXT: vmv.v.v v8, v10 1118; CHECK-NEXT: ret 1119entry: 1120 %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8( 1121 <vscale x 8 x i16> undef, 1122 <vscale x 8 x i8> %0, 1123 iXLen %1) 1124 1125 ret <vscale x 8 x i16> %a 1126} 1127 1128declare <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8( 1129 <vscale x 8 x i16>, 1130 <vscale x 8 x i8>, 1131 <vscale x 8 x i1>, 1132 iXLen, 1133 iXLen); 1134 1135define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { 1136; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16: 1137; CHECK: # %bb.0: # %entry 1138; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 1139; CHECK-NEXT: vzext.vf2 v8, v10, v0.t 1140; CHECK-NEXT: ret 1141entry: 1142 %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8( 1143 <vscale x 8 x i16> %1, 1144 <vscale x 8 x i8> %2, 1145 <vscale x 8 x i1> %0, 1146 iXLen %3, iXLen 1) 1147 1148 ret <vscale x 8 x i16> %a 1149} 1150 1151declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8( 1152 <vscale x 16 x i16>, 1153 <vscale x 16 x i8>, 1154 iXLen); 1155 1156define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0, iXLen %1) nounwind { 1157; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16: 1158; CHECK: # %bb.0: # %entry 1159; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1160; CHECK-NEXT: vzext.vf2 v12, v8 1161; CHECK-NEXT: vmv.v.v v8, v12 1162; CHECK-NEXT: ret 1163entry: 1164 %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8( 1165 <vscale x 16 x i16> undef, 1166 <vscale x 16 x i8> %0, 1167 iXLen %1) 1168 1169 ret <vscale x 16 x i16> %a 1170} 1171 1172declare <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8( 1173 <vscale x 16 x i16>, 1174 <vscale x 16 x i8>, 1175 <vscale x 16 x i1>, 1176 iXLen, 1177 iXLen); 1178 1179define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { 1180; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16: 1181; CHECK: # %bb.0: # %entry 1182; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 1183; CHECK-NEXT: vzext.vf2 v8, v12, v0.t 1184; CHECK-NEXT: ret 1185entry: 1186 %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8( 1187 <vscale x 16 x i16> %1, 1188 <vscale x 16 x i8> %2, 1189 <vscale x 16 x i1> %0, 1190 iXLen %3, iXLen 1) 1191 1192 ret <vscale x 16 x i16> %a 1193} 1194 1195declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8( 1196 <vscale x 32 x i16>, 1197 <vscale x 32 x i8>, 1198 iXLen); 1199 1200define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0, iXLen %1) nounwind { 1201; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16: 1202; CHECK: # %bb.0: # %entry 1203; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1204; CHECK-NEXT: vzext.vf2 v16, v8 1205; CHECK-NEXT: vmv.v.v v8, v16 1206; CHECK-NEXT: ret 1207entry: 1208 %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8( 1209 <vscale x 32 x i16> undef, 1210 <vscale x 32 x i8> %0, 1211 iXLen %1) 1212 1213 ret <vscale x 32 x i16> %a 1214} 1215 1216declare <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8( 1217 <vscale x 32 x i16>, 1218 <vscale x 32 x i8>, 1219 <vscale x 32 x i1>, 1220 iXLen, 1221 iXLen); 1222 1223define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { 1224; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16: 1225; CHECK: # %bb.0: # %entry 1226; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 1227; CHECK-NEXT: vzext.vf2 v8, v16, v0.t 1228; CHECK-NEXT: ret 1229entry: 1230 %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8( 1231 <vscale x 32 x i16> %1, 1232 <vscale x 32 x i8> %2, 1233 <vscale x 32 x i1> %0, 1234 iXLen %3, iXLen 1) 1235 1236 ret <vscale x 32 x i16> %a 1237} 1238