1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 iXLen); 11 12define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 13; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: 14; CHECK: # %bb.0: # %entry 15; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 16; CHECK-NEXT: vmsltu.vv v0, v9, v8 17; CHECK-NEXT: ret 18entry: 19 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8( 20 <vscale x 1 x i8> %0, 21 <vscale x 1 x i8> %1, 22 iXLen %2) 23 24 ret <vscale x 1 x i1> %a 25} 26 27declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8( 28 <vscale x 1 x i1>, 29 <vscale x 1 x i8>, 30 <vscale x 1 x i8>, 31 <vscale x 1 x i1>, 32 iXLen); 33 34define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind { 35; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: 36; CHECK: # %bb.0: # %entry 37; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 38; CHECK-NEXT: vmv1r.v v11, v0 39; CHECK-NEXT: vmsltu.vv v0, v9, v8 40; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 41; CHECK-NEXT: vmv1r.v v0, v11 42; CHECK-NEXT: ret 43entry: 44 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8( 45 <vscale x 1 x i8> %1, 46 <vscale x 1 x i8> %2, 47 iXLen %4) 48 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8( 49 <vscale x 1 x i1> %0, 50 <vscale x 1 x i8> %2, 51 <vscale x 1 x i8> %3, 52 <vscale x 1 x i1> %mask, 53 iXLen %4) 54 55 ret <vscale x 1 x i1> %a 56} 57 58declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8( 59 <vscale x 2 x i8>, 60 <vscale x 2 x i8>, 61 iXLen); 62 63define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 64; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: 65; CHECK: # %bb.0: # %entry 66; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 67; CHECK-NEXT: vmsltu.vv v0, v9, v8 68; CHECK-NEXT: ret 69entry: 70 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8( 71 <vscale x 2 x i8> %0, 72 <vscale x 2 x i8> %1, 73 iXLen %2) 74 75 ret <vscale x 2 x i1> %a 76} 77 78declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8( 79 <vscale x 2 x i1>, 80 <vscale x 2 x i8>, 81 <vscale x 2 x i8>, 82 <vscale x 2 x i1>, 83 iXLen); 84 85define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind { 86; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: 87; CHECK: # %bb.0: # %entry 88; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 89; CHECK-NEXT: vmv1r.v v11, v0 90; CHECK-NEXT: vmsltu.vv v0, v9, v8 91; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 92; CHECK-NEXT: vmv1r.v v0, v11 93; CHECK-NEXT: ret 94entry: 95 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8( 96 <vscale x 2 x i8> %1, 97 <vscale x 2 x i8> %2, 98 iXLen %4) 99 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8( 100 <vscale x 2 x i1> %0, 101 <vscale x 2 x i8> %2, 102 <vscale x 2 x i8> %3, 103 <vscale x 2 x i1> %mask, 104 iXLen %4) 105 106 ret <vscale x 2 x i1> %a 107} 108 109declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8( 110 <vscale x 4 x i8>, 111 <vscale x 4 x i8>, 112 iXLen); 113 114define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 115; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: 116; CHECK: # %bb.0: # %entry 117; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 118; CHECK-NEXT: vmsltu.vv v0, v9, v8 119; CHECK-NEXT: ret 120entry: 121 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8( 122 <vscale x 4 x i8> %0, 123 <vscale x 4 x i8> %1, 124 iXLen %2) 125 126 ret <vscale x 4 x i1> %a 127} 128 129declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8( 130 <vscale x 4 x i1>, 131 <vscale x 4 x i8>, 132 <vscale x 4 x i8>, 133 <vscale x 4 x i1>, 134 iXLen); 135 136define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind { 137; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: 138; CHECK: # %bb.0: # %entry 139; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 140; CHECK-NEXT: vmv1r.v v11, v0 141; CHECK-NEXT: vmsltu.vv v0, v9, v8 142; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 143; CHECK-NEXT: vmv1r.v v0, v11 144; CHECK-NEXT: ret 145entry: 146 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8( 147 <vscale x 4 x i8> %1, 148 <vscale x 4 x i8> %2, 149 iXLen %4) 150 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8( 151 <vscale x 4 x i1> %0, 152 <vscale x 4 x i8> %2, 153 <vscale x 4 x i8> %3, 154 <vscale x 4 x i1> %mask, 155 iXLen %4) 156 157 ret <vscale x 4 x i1> %a 158} 159 160declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8( 161 <vscale x 8 x i8>, 162 <vscale x 8 x i8>, 163 iXLen); 164 165define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 166; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: 167; CHECK: # %bb.0: # %entry 168; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 169; CHECK-NEXT: vmsltu.vv v0, v9, v8 170; CHECK-NEXT: ret 171entry: 172 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8( 173 <vscale x 8 x i8> %0, 174 <vscale x 8 x i8> %1, 175 iXLen %2) 176 177 ret <vscale x 8 x i1> %a 178} 179 180declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8( 181 <vscale x 8 x i1>, 182 <vscale x 8 x i8>, 183 <vscale x 8 x i8>, 184 <vscale x 8 x i1>, 185 iXLen); 186 187define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind { 188; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: 189; CHECK: # %bb.0: # %entry 190; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 191; CHECK-NEXT: vmv1r.v v11, v0 192; CHECK-NEXT: vmsltu.vv v0, v9, v8 193; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 194; CHECK-NEXT: vmv.v.v v0, v11 195; CHECK-NEXT: ret 196entry: 197 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8( 198 <vscale x 8 x i8> %1, 199 <vscale x 8 x i8> %2, 200 iXLen %4) 201 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8( 202 <vscale x 8 x i1> %0, 203 <vscale x 8 x i8> %2, 204 <vscale x 8 x i8> %3, 205 <vscale x 8 x i1> %mask, 206 iXLen %4) 207 208 ret <vscale x 8 x i1> %a 209} 210 211declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8( 212 <vscale x 16 x i8>, 213 <vscale x 16 x i8>, 214 iXLen); 215 216define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 217; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: 218; CHECK: # %bb.0: # %entry 219; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 220; CHECK-NEXT: vmsltu.vv v0, v10, v8 221; CHECK-NEXT: ret 222entry: 223 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8( 224 <vscale x 16 x i8> %0, 225 <vscale x 16 x i8> %1, 226 iXLen %2) 227 228 ret <vscale x 16 x i1> %a 229} 230 231declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8( 232 <vscale x 16 x i1>, 233 <vscale x 16 x i8>, 234 <vscale x 16 x i8>, 235 <vscale x 16 x i1>, 236 iXLen); 237 238define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind { 239; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: 240; CHECK: # %bb.0: # %entry 241; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 242; CHECK-NEXT: vmv1r.v v14, v0 243; CHECK-NEXT: vmsltu.vv v0, v10, v8 244; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t 245; CHECK-NEXT: vmv1r.v v0, v14 246; CHECK-NEXT: ret 247entry: 248 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8( 249 <vscale x 16 x i8> %1, 250 <vscale x 16 x i8> %2, 251 iXLen %4) 252 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8( 253 <vscale x 16 x i1> %0, 254 <vscale x 16 x i8> %2, 255 <vscale x 16 x i8> %3, 256 <vscale x 16 x i1> %mask, 257 iXLen %4) 258 259 ret <vscale x 16 x i1> %a 260} 261 262declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8( 263 <vscale x 32 x i8>, 264 <vscale x 32 x i8>, 265 iXLen); 266 267define <vscale x 32 x i1> @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 268; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: 269; CHECK: # %bb.0: # %entry 270; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 271; CHECK-NEXT: vmsltu.vv v0, v12, v8 272; CHECK-NEXT: ret 273entry: 274 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8( 275 <vscale x 32 x i8> %0, 276 <vscale x 32 x i8> %1, 277 iXLen %2) 278 279 ret <vscale x 32 x i1> %a 280} 281 282declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8( 283 <vscale x 32 x i1>, 284 <vscale x 32 x i8>, 285 <vscale x 32 x i8>, 286 <vscale x 32 x i1>, 287 iXLen); 288 289define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind { 290; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: 291; CHECK: # %bb.0: # %entry 292; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 293; CHECK-NEXT: vmv1r.v v20, v0 294; CHECK-NEXT: vmsltu.vv v0, v12, v8 295; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t 296; CHECK-NEXT: vmv1r.v v0, v20 297; CHECK-NEXT: ret 298entry: 299 %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8( 300 <vscale x 32 x i8> %1, 301 <vscale x 32 x i8> %2, 302 iXLen %4) 303 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8( 304 <vscale x 32 x i1> %0, 305 <vscale x 32 x i8> %2, 306 <vscale x 32 x i8> %3, 307 <vscale x 32 x i1> %mask, 308 iXLen %4) 309 310 ret <vscale x 32 x i1> %a 311} 312 313declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16( 314 <vscale x 1 x i16>, 315 <vscale x 1 x i16>, 316 iXLen); 317 318define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 319; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: 320; CHECK: # %bb.0: # %entry 321; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 322; CHECK-NEXT: vmsltu.vv v0, v9, v8 323; CHECK-NEXT: ret 324entry: 325 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16( 326 <vscale x 1 x i16> %0, 327 <vscale x 1 x i16> %1, 328 iXLen %2) 329 330 ret <vscale x 1 x i1> %a 331} 332 333declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16( 334 <vscale x 1 x i1>, 335 <vscale x 1 x i16>, 336 <vscale x 1 x i16>, 337 <vscale x 1 x i1>, 338 iXLen); 339 340define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind { 341; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: 342; CHECK: # %bb.0: # %entry 343; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 344; CHECK-NEXT: vmv1r.v v11, v0 345; CHECK-NEXT: vmsltu.vv v0, v9, v8 346; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 347; CHECK-NEXT: vmv1r.v v0, v11 348; CHECK-NEXT: ret 349entry: 350 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16( 351 <vscale x 1 x i16> %1, 352 <vscale x 1 x i16> %2, 353 iXLen %4) 354 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16( 355 <vscale x 1 x i1> %0, 356 <vscale x 1 x i16> %2, 357 <vscale x 1 x i16> %3, 358 <vscale x 1 x i1> %mask, 359 iXLen %4) 360 361 ret <vscale x 1 x i1> %a 362} 363 364declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16( 365 <vscale x 2 x i16>, 366 <vscale x 2 x i16>, 367 iXLen); 368 369define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 370; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: 371; CHECK: # %bb.0: # %entry 372; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 373; CHECK-NEXT: vmsltu.vv v0, v9, v8 374; CHECK-NEXT: ret 375entry: 376 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16( 377 <vscale x 2 x i16> %0, 378 <vscale x 2 x i16> %1, 379 iXLen %2) 380 381 ret <vscale x 2 x i1> %a 382} 383 384declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16( 385 <vscale x 2 x i1>, 386 <vscale x 2 x i16>, 387 <vscale x 2 x i16>, 388 <vscale x 2 x i1>, 389 iXLen); 390 391define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind { 392; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: 393; CHECK: # %bb.0: # %entry 394; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 395; CHECK-NEXT: vmv1r.v v11, v0 396; CHECK-NEXT: vmsltu.vv v0, v9, v8 397; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 398; CHECK-NEXT: vmv1r.v v0, v11 399; CHECK-NEXT: ret 400entry: 401 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16( 402 <vscale x 2 x i16> %1, 403 <vscale x 2 x i16> %2, 404 iXLen %4) 405 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16( 406 <vscale x 2 x i1> %0, 407 <vscale x 2 x i16> %2, 408 <vscale x 2 x i16> %3, 409 <vscale x 2 x i1> %mask, 410 iXLen %4) 411 412 ret <vscale x 2 x i1> %a 413} 414 415declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16( 416 <vscale x 4 x i16>, 417 <vscale x 4 x i16>, 418 iXLen); 419 420define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 421; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: 422; CHECK: # %bb.0: # %entry 423; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 424; CHECK-NEXT: vmsltu.vv v0, v9, v8 425; CHECK-NEXT: ret 426entry: 427 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16( 428 <vscale x 4 x i16> %0, 429 <vscale x 4 x i16> %1, 430 iXLen %2) 431 432 ret <vscale x 4 x i1> %a 433} 434 435declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16( 436 <vscale x 4 x i1>, 437 <vscale x 4 x i16>, 438 <vscale x 4 x i16>, 439 <vscale x 4 x i1>, 440 iXLen); 441 442define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind { 443; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: 444; CHECK: # %bb.0: # %entry 445; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 446; CHECK-NEXT: vmv1r.v v11, v0 447; CHECK-NEXT: vmsltu.vv v0, v9, v8 448; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 449; CHECK-NEXT: vmv.v.v v0, v11 450; CHECK-NEXT: ret 451entry: 452 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16( 453 <vscale x 4 x i16> %1, 454 <vscale x 4 x i16> %2, 455 iXLen %4) 456 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16( 457 <vscale x 4 x i1> %0, 458 <vscale x 4 x i16> %2, 459 <vscale x 4 x i16> %3, 460 <vscale x 4 x i1> %mask, 461 iXLen %4) 462 463 ret <vscale x 4 x i1> %a 464} 465 466declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16( 467 <vscale x 8 x i16>, 468 <vscale x 8 x i16>, 469 iXLen); 470 471define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 472; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: 473; CHECK: # %bb.0: # %entry 474; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 475; CHECK-NEXT: vmsltu.vv v0, v10, v8 476; CHECK-NEXT: ret 477entry: 478 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16( 479 <vscale x 8 x i16> %0, 480 <vscale x 8 x i16> %1, 481 iXLen %2) 482 483 ret <vscale x 8 x i1> %a 484} 485 486declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16( 487 <vscale x 8 x i1>, 488 <vscale x 8 x i16>, 489 <vscale x 8 x i16>, 490 <vscale x 8 x i1>, 491 iXLen); 492 493define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind { 494; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: 495; CHECK: # %bb.0: # %entry 496; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 497; CHECK-NEXT: vmv1r.v v14, v0 498; CHECK-NEXT: vmsltu.vv v0, v10, v8 499; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t 500; CHECK-NEXT: vmv1r.v v0, v14 501; CHECK-NEXT: ret 502entry: 503 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16( 504 <vscale x 8 x i16> %1, 505 <vscale x 8 x i16> %2, 506 iXLen %4) 507 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16( 508 <vscale x 8 x i1> %0, 509 <vscale x 8 x i16> %2, 510 <vscale x 8 x i16> %3, 511 <vscale x 8 x i1> %mask, 512 iXLen %4) 513 514 ret <vscale x 8 x i1> %a 515} 516 517declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16( 518 <vscale x 16 x i16>, 519 <vscale x 16 x i16>, 520 iXLen); 521 522define <vscale x 16 x i1> @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 523; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: 524; CHECK: # %bb.0: # %entry 525; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 526; CHECK-NEXT: vmsltu.vv v0, v12, v8 527; CHECK-NEXT: ret 528entry: 529 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16( 530 <vscale x 16 x i16> %0, 531 <vscale x 16 x i16> %1, 532 iXLen %2) 533 534 ret <vscale x 16 x i1> %a 535} 536 537declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16( 538 <vscale x 16 x i1>, 539 <vscale x 16 x i16>, 540 <vscale x 16 x i16>, 541 <vscale x 16 x i1>, 542 iXLen); 543 544define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind { 545; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: 546; CHECK: # %bb.0: # %entry 547; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 548; CHECK-NEXT: vmv1r.v v20, v0 549; CHECK-NEXT: vmsltu.vv v0, v12, v8 550; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t 551; CHECK-NEXT: vmv1r.v v0, v20 552; CHECK-NEXT: ret 553entry: 554 %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16( 555 <vscale x 16 x i16> %1, 556 <vscale x 16 x i16> %2, 557 iXLen %4) 558 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16( 559 <vscale x 16 x i1> %0, 560 <vscale x 16 x i16> %2, 561 <vscale x 16 x i16> %3, 562 <vscale x 16 x i1> %mask, 563 iXLen %4) 564 565 ret <vscale x 16 x i1> %a 566} 567 568declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32( 569 <vscale x 1 x i32>, 570 <vscale x 1 x i32>, 571 iXLen); 572 573define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 574; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: 575; CHECK: # %bb.0: # %entry 576; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 577; CHECK-NEXT: vmsltu.vv v0, v9, v8 578; CHECK-NEXT: ret 579entry: 580 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32( 581 <vscale x 1 x i32> %0, 582 <vscale x 1 x i32> %1, 583 iXLen %2) 584 585 ret <vscale x 1 x i1> %a 586} 587 588declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32( 589 <vscale x 1 x i1>, 590 <vscale x 1 x i32>, 591 <vscale x 1 x i32>, 592 <vscale x 1 x i1>, 593 iXLen); 594 595define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind { 596; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: 597; CHECK: # %bb.0: # %entry 598; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 599; CHECK-NEXT: vmv1r.v v11, v0 600; CHECK-NEXT: vmsltu.vv v0, v9, v8 601; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 602; CHECK-NEXT: vmv1r.v v0, v11 603; CHECK-NEXT: ret 604entry: 605 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32( 606 <vscale x 1 x i32> %1, 607 <vscale x 1 x i32> %2, 608 iXLen %4) 609 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32( 610 <vscale x 1 x i1> %0, 611 <vscale x 1 x i32> %2, 612 <vscale x 1 x i32> %3, 613 <vscale x 1 x i1> %mask, 614 iXLen %4) 615 616 ret <vscale x 1 x i1> %a 617} 618 619declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32( 620 <vscale x 2 x i32>, 621 <vscale x 2 x i32>, 622 iXLen); 623 624define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 625; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: 626; CHECK: # %bb.0: # %entry 627; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 628; CHECK-NEXT: vmsltu.vv v0, v9, v8 629; CHECK-NEXT: ret 630entry: 631 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32( 632 <vscale x 2 x i32> %0, 633 <vscale x 2 x i32> %1, 634 iXLen %2) 635 636 ret <vscale x 2 x i1> %a 637} 638 639declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32( 640 <vscale x 2 x i1>, 641 <vscale x 2 x i32>, 642 <vscale x 2 x i32>, 643 <vscale x 2 x i1>, 644 iXLen); 645 646define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind { 647; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: 648; CHECK: # %bb.0: # %entry 649; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 650; CHECK-NEXT: vmv1r.v v11, v0 651; CHECK-NEXT: vmsltu.vv v0, v9, v8 652; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 653; CHECK-NEXT: vmv.v.v v0, v11 654; CHECK-NEXT: ret 655entry: 656 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32( 657 <vscale x 2 x i32> %1, 658 <vscale x 2 x i32> %2, 659 iXLen %4) 660 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32( 661 <vscale x 2 x i1> %0, 662 <vscale x 2 x i32> %2, 663 <vscale x 2 x i32> %3, 664 <vscale x 2 x i1> %mask, 665 iXLen %4) 666 667 ret <vscale x 2 x i1> %a 668} 669 670declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32( 671 <vscale x 4 x i32>, 672 <vscale x 4 x i32>, 673 iXLen); 674 675define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 676; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: 677; CHECK: # %bb.0: # %entry 678; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 679; CHECK-NEXT: vmsltu.vv v0, v10, v8 680; CHECK-NEXT: ret 681entry: 682 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32( 683 <vscale x 4 x i32> %0, 684 <vscale x 4 x i32> %1, 685 iXLen %2) 686 687 ret <vscale x 4 x i1> %a 688} 689 690declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32( 691 <vscale x 4 x i1>, 692 <vscale x 4 x i32>, 693 <vscale x 4 x i32>, 694 <vscale x 4 x i1>, 695 iXLen); 696 697define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind { 698; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: 699; CHECK: # %bb.0: # %entry 700; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 701; CHECK-NEXT: vmv1r.v v14, v0 702; CHECK-NEXT: vmsltu.vv v0, v10, v8 703; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t 704; CHECK-NEXT: vmv1r.v v0, v14 705; CHECK-NEXT: ret 706entry: 707 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32( 708 <vscale x 4 x i32> %1, 709 <vscale x 4 x i32> %2, 710 iXLen %4) 711 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32( 712 <vscale x 4 x i1> %0, 713 <vscale x 4 x i32> %2, 714 <vscale x 4 x i32> %3, 715 <vscale x 4 x i1> %mask, 716 iXLen %4) 717 718 ret <vscale x 4 x i1> %a 719} 720 721declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32( 722 <vscale x 8 x i32>, 723 <vscale x 8 x i32>, 724 iXLen); 725 726define <vscale x 8 x i1> @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 727; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: 728; CHECK: # %bb.0: # %entry 729; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 730; CHECK-NEXT: vmsltu.vv v0, v12, v8 731; CHECK-NEXT: ret 732entry: 733 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32( 734 <vscale x 8 x i32> %0, 735 <vscale x 8 x i32> %1, 736 iXLen %2) 737 738 ret <vscale x 8 x i1> %a 739} 740 741declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32( 742 <vscale x 8 x i1>, 743 <vscale x 8 x i32>, 744 <vscale x 8 x i32>, 745 <vscale x 8 x i1>, 746 iXLen); 747 748define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind { 749; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: 750; CHECK: # %bb.0: # %entry 751; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 752; CHECK-NEXT: vmv1r.v v20, v0 753; CHECK-NEXT: vmsltu.vv v0, v12, v8 754; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t 755; CHECK-NEXT: vmv1r.v v0, v20 756; CHECK-NEXT: ret 757entry: 758 %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32( 759 <vscale x 8 x i32> %1, 760 <vscale x 8 x i32> %2, 761 iXLen %4) 762 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32( 763 <vscale x 8 x i1> %0, 764 <vscale x 8 x i32> %2, 765 <vscale x 8 x i32> %3, 766 <vscale x 8 x i1> %mask, 767 iXLen %4) 768 769 ret <vscale x 8 x i1> %a 770} 771 772declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64( 773 <vscale x 1 x i64>, 774 <vscale x 1 x i64>, 775 iXLen); 776 777define <vscale x 1 x i1> @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 778; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: 779; CHECK: # %bb.0: # %entry 780; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 781; CHECK-NEXT: vmsltu.vv v0, v9, v8 782; CHECK-NEXT: ret 783entry: 784 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64( 785 <vscale x 1 x i64> %0, 786 <vscale x 1 x i64> %1, 787 iXLen %2) 788 789 ret <vscale x 1 x i1> %a 790} 791 792declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64( 793 <vscale x 1 x i1>, 794 <vscale x 1 x i64>, 795 <vscale x 1 x i64>, 796 <vscale x 1 x i1>, 797 iXLen); 798 799define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind { 800; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: 801; CHECK: # %bb.0: # %entry 802; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 803; CHECK-NEXT: vmv1r.v v11, v0 804; CHECK-NEXT: vmsltu.vv v0, v9, v8 805; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t 806; CHECK-NEXT: vmv.v.v v0, v11 807; CHECK-NEXT: ret 808entry: 809 %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64( 810 <vscale x 1 x i64> %1, 811 <vscale x 1 x i64> %2, 812 iXLen %4) 813 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64( 814 <vscale x 1 x i1> %0, 815 <vscale x 1 x i64> %2, 816 <vscale x 1 x i64> %3, 817 <vscale x 1 x i1> %mask, 818 iXLen %4) 819 820 ret <vscale x 1 x i1> %a 821} 822 823declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64( 824 <vscale x 2 x i64>, 825 <vscale x 2 x i64>, 826 iXLen); 827 828define <vscale x 2 x i1> @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 829; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: 830; CHECK: # %bb.0: # %entry 831; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 832; CHECK-NEXT: vmsltu.vv v0, v10, v8 833; CHECK-NEXT: ret 834entry: 835 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64( 836 <vscale x 2 x i64> %0, 837 <vscale x 2 x i64> %1, 838 iXLen %2) 839 840 ret <vscale x 2 x i1> %a 841} 842 843declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64( 844 <vscale x 2 x i1>, 845 <vscale x 2 x i64>, 846 <vscale x 2 x i64>, 847 <vscale x 2 x i1>, 848 iXLen); 849 850define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind { 851; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: 852; CHECK: # %bb.0: # %entry 853; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 854; CHECK-NEXT: vmv1r.v v14, v0 855; CHECK-NEXT: vmsltu.vv v0, v10, v8 856; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t 857; CHECK-NEXT: vmv1r.v v0, v14 858; CHECK-NEXT: ret 859entry: 860 %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64( 861 <vscale x 2 x i64> %1, 862 <vscale x 2 x i64> %2, 863 iXLen %4) 864 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64( 865 <vscale x 2 x i1> %0, 866 <vscale x 2 x i64> %2, 867 <vscale x 2 x i64> %3, 868 <vscale x 2 x i1> %mask, 869 iXLen %4) 870 871 ret <vscale x 2 x i1> %a 872} 873 874declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64( 875 <vscale x 4 x i64>, 876 <vscale x 4 x i64>, 877 iXLen); 878 879define <vscale x 4 x i1> @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 880; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: 881; CHECK: # %bb.0: # %entry 882; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 883; CHECK-NEXT: vmsltu.vv v0, v12, v8 884; CHECK-NEXT: ret 885entry: 886 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64( 887 <vscale x 4 x i64> %0, 888 <vscale x 4 x i64> %1, 889 iXLen %2) 890 891 ret <vscale x 4 x i1> %a 892} 893 894declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64( 895 <vscale x 4 x i1>, 896 <vscale x 4 x i64>, 897 <vscale x 4 x i64>, 898 <vscale x 4 x i1>, 899 iXLen); 900 901define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind { 902; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: 903; CHECK: # %bb.0: # %entry 904; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 905; CHECK-NEXT: vmv1r.v v20, v0 906; CHECK-NEXT: vmsltu.vv v0, v12, v8 907; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t 908; CHECK-NEXT: vmv1r.v v0, v20 909; CHECK-NEXT: ret 910entry: 911 %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64( 912 <vscale x 4 x i64> %1, 913 <vscale x 4 x i64> %2, 914 iXLen %4) 915 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64( 916 <vscale x 4 x i1> %0, 917 <vscale x 4 x i64> %2, 918 <vscale x 4 x i64> %3, 919 <vscale x 4 x i1> %mask, 920 iXLen %4) 921 922 ret <vscale x 4 x i1> %a 923} 924 925declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8( 926 <vscale x 1 x i8>, 927 i8, 928 iXLen); 929 930define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 931; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: 932; CHECK: # %bb.0: # %entry 933; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 934; CHECK-NEXT: vmsgtu.vx v0, v8, a0 935; CHECK-NEXT: ret 936entry: 937 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8( 938 <vscale x 1 x i8> %0, 939 i8 %1, 940 iXLen %2) 941 942 ret <vscale x 1 x i1> %a 943} 944 945declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8( 946 <vscale x 1 x i1>, 947 <vscale x 1 x i8>, 948 i8, 949 <vscale x 1 x i1>, 950 iXLen); 951 952define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 953; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8: 954; CHECK: # %bb.0: # %entry 955; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 956; CHECK-NEXT: vmv1r.v v10, v0 957; CHECK-NEXT: vmv1r.v v0, v9 958; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 959; CHECK-NEXT: vmv1r.v v0, v10 960; CHECK-NEXT: ret 961entry: 962 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8( 963 <vscale x 1 x i1> %0, 964 <vscale x 1 x i8> %1, 965 i8 %2, 966 <vscale x 1 x i1> %3, 967 iXLen %4) 968 969 ret <vscale x 1 x i1> %a 970} 971 972declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8( 973 <vscale x 2 x i8>, 974 i8, 975 iXLen); 976 977define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 978; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: 979; CHECK: # %bb.0: # %entry 980; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 981; CHECK-NEXT: vmsgtu.vx v0, v8, a0 982; CHECK-NEXT: ret 983entry: 984 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8( 985 <vscale x 2 x i8> %0, 986 i8 %1, 987 iXLen %2) 988 989 ret <vscale x 2 x i1> %a 990} 991 992declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8( 993 <vscale x 2 x i1>, 994 <vscale x 2 x i8>, 995 i8, 996 <vscale x 2 x i1>, 997 iXLen); 998 999define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1000; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8: 1001; CHECK: # %bb.0: # %entry 1002; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1003; CHECK-NEXT: vmv1r.v v10, v0 1004; CHECK-NEXT: vmv1r.v v0, v9 1005; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1006; CHECK-NEXT: vmv1r.v v0, v10 1007; CHECK-NEXT: ret 1008entry: 1009 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8( 1010 <vscale x 2 x i1> %0, 1011 <vscale x 2 x i8> %1, 1012 i8 %2, 1013 <vscale x 2 x i1> %3, 1014 iXLen %4) 1015 1016 ret <vscale x 2 x i1> %a 1017} 1018 1019declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8( 1020 <vscale x 4 x i8>, 1021 i8, 1022 iXLen); 1023 1024define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1025; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: 1026; CHECK: # %bb.0: # %entry 1027; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1028; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1029; CHECK-NEXT: ret 1030entry: 1031 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8( 1032 <vscale x 4 x i8> %0, 1033 i8 %1, 1034 iXLen %2) 1035 1036 ret <vscale x 4 x i1> %a 1037} 1038 1039declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8( 1040 <vscale x 4 x i1>, 1041 <vscale x 4 x i8>, 1042 i8, 1043 <vscale x 4 x i1>, 1044 iXLen); 1045 1046define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1047; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8: 1048; CHECK: # %bb.0: # %entry 1049; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1050; CHECK-NEXT: vmv1r.v v10, v0 1051; CHECK-NEXT: vmv1r.v v0, v9 1052; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1053; CHECK-NEXT: vmv1r.v v0, v10 1054; CHECK-NEXT: ret 1055entry: 1056 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8( 1057 <vscale x 4 x i1> %0, 1058 <vscale x 4 x i8> %1, 1059 i8 %2, 1060 <vscale x 4 x i1> %3, 1061 iXLen %4) 1062 1063 ret <vscale x 4 x i1> %a 1064} 1065 1066declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8( 1067 <vscale x 8 x i8>, 1068 i8, 1069 iXLen); 1070 1071define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1072; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: 1073; CHECK: # %bb.0: # %entry 1074; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1075; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1076; CHECK-NEXT: ret 1077entry: 1078 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8( 1079 <vscale x 8 x i8> %0, 1080 i8 %1, 1081 iXLen %2) 1082 1083 ret <vscale x 8 x i1> %a 1084} 1085 1086declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8( 1087 <vscale x 8 x i1>, 1088 <vscale x 8 x i8>, 1089 i8, 1090 <vscale x 8 x i1>, 1091 iXLen); 1092 1093define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1094; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8: 1095; CHECK: # %bb.0: # %entry 1096; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1097; CHECK-NEXT: vmv1r.v v10, v0 1098; CHECK-NEXT: vmv1r.v v0, v9 1099; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1100; CHECK-NEXT: vmv.v.v v0, v10 1101; CHECK-NEXT: ret 1102entry: 1103 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8( 1104 <vscale x 8 x i1> %0, 1105 <vscale x 8 x i8> %1, 1106 i8 %2, 1107 <vscale x 8 x i1> %3, 1108 iXLen %4) 1109 1110 ret <vscale x 8 x i1> %a 1111} 1112 1113declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8( 1114 <vscale x 16 x i8>, 1115 i8, 1116 iXLen); 1117 1118define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1119; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: 1120; CHECK: # %bb.0: # %entry 1121; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1122; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1123; CHECK-NEXT: ret 1124entry: 1125 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8( 1126 <vscale x 16 x i8> %0, 1127 i8 %1, 1128 iXLen %2) 1129 1130 ret <vscale x 16 x i1> %a 1131} 1132 1133declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8( 1134 <vscale x 16 x i1>, 1135 <vscale x 16 x i8>, 1136 i8, 1137 <vscale x 16 x i1>, 1138 iXLen); 1139 1140define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1141; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8: 1142; CHECK: # %bb.0: # %entry 1143; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1144; CHECK-NEXT: vmv1r.v v11, v0 1145; CHECK-NEXT: vmv1r.v v0, v10 1146; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t 1147; CHECK-NEXT: vmv1r.v v0, v11 1148; CHECK-NEXT: ret 1149entry: 1150 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8( 1151 <vscale x 16 x i1> %0, 1152 <vscale x 16 x i8> %1, 1153 i8 %2, 1154 <vscale x 16 x i1> %3, 1155 iXLen %4) 1156 1157 ret <vscale x 16 x i1> %a 1158} 1159 1160declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8( 1161 <vscale x 32 x i8>, 1162 i8, 1163 iXLen); 1164 1165define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1166; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: 1167; CHECK: # %bb.0: # %entry 1168; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1169; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1170; CHECK-NEXT: ret 1171entry: 1172 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8( 1173 <vscale x 32 x i8> %0, 1174 i8 %1, 1175 iXLen %2) 1176 1177 ret <vscale x 32 x i1> %a 1178} 1179 1180declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8( 1181 <vscale x 32 x i1>, 1182 <vscale x 32 x i8>, 1183 i8, 1184 <vscale x 32 x i1>, 1185 iXLen); 1186 1187define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1188; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8: 1189; CHECK: # %bb.0: # %entry 1190; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1191; CHECK-NEXT: vmv1r.v v13, v0 1192; CHECK-NEXT: vmv1r.v v0, v12 1193; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t 1194; CHECK-NEXT: vmv1r.v v0, v13 1195; CHECK-NEXT: ret 1196entry: 1197 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8( 1198 <vscale x 32 x i1> %0, 1199 <vscale x 32 x i8> %1, 1200 i8 %2, 1201 <vscale x 32 x i1> %3, 1202 iXLen %4) 1203 1204 ret <vscale x 32 x i1> %a 1205} 1206 1207declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16( 1208 <vscale x 1 x i16>, 1209 i16, 1210 iXLen); 1211 1212define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1213; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: 1214; CHECK: # %bb.0: # %entry 1215; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1216; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1217; CHECK-NEXT: ret 1218entry: 1219 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16( 1220 <vscale x 1 x i16> %0, 1221 i16 %1, 1222 iXLen %2) 1223 1224 ret <vscale x 1 x i1> %a 1225} 1226 1227declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16( 1228 <vscale x 1 x i1>, 1229 <vscale x 1 x i16>, 1230 i16, 1231 <vscale x 1 x i1>, 1232 iXLen); 1233 1234define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1235; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16: 1236; CHECK: # %bb.0: # %entry 1237; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1238; CHECK-NEXT: vmv1r.v v10, v0 1239; CHECK-NEXT: vmv1r.v v0, v9 1240; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1241; CHECK-NEXT: vmv1r.v v0, v10 1242; CHECK-NEXT: ret 1243entry: 1244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16( 1245 <vscale x 1 x i1> %0, 1246 <vscale x 1 x i16> %1, 1247 i16 %2, 1248 <vscale x 1 x i1> %3, 1249 iXLen %4) 1250 1251 ret <vscale x 1 x i1> %a 1252} 1253 1254declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16( 1255 <vscale x 2 x i16>, 1256 i16, 1257 iXLen); 1258 1259define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1260; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: 1261; CHECK: # %bb.0: # %entry 1262; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1263; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1264; CHECK-NEXT: ret 1265entry: 1266 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16( 1267 <vscale x 2 x i16> %0, 1268 i16 %1, 1269 iXLen %2) 1270 1271 ret <vscale x 2 x i1> %a 1272} 1273 1274declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16( 1275 <vscale x 2 x i1>, 1276 <vscale x 2 x i16>, 1277 i16, 1278 <vscale x 2 x i1>, 1279 iXLen); 1280 1281define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1282; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16: 1283; CHECK: # %bb.0: # %entry 1284; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1285; CHECK-NEXT: vmv1r.v v10, v0 1286; CHECK-NEXT: vmv1r.v v0, v9 1287; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1288; CHECK-NEXT: vmv1r.v v0, v10 1289; CHECK-NEXT: ret 1290entry: 1291 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16( 1292 <vscale x 2 x i1> %0, 1293 <vscale x 2 x i16> %1, 1294 i16 %2, 1295 <vscale x 2 x i1> %3, 1296 iXLen %4) 1297 1298 ret <vscale x 2 x i1> %a 1299} 1300 1301declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16( 1302 <vscale x 4 x i16>, 1303 i16, 1304 iXLen); 1305 1306define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1307; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: 1308; CHECK: # %bb.0: # %entry 1309; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1310; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1311; CHECK-NEXT: ret 1312entry: 1313 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16( 1314 <vscale x 4 x i16> %0, 1315 i16 %1, 1316 iXLen %2) 1317 1318 ret <vscale x 4 x i1> %a 1319} 1320 1321declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16( 1322 <vscale x 4 x i1>, 1323 <vscale x 4 x i16>, 1324 i16, 1325 <vscale x 4 x i1>, 1326 iXLen); 1327 1328define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1329; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16: 1330; CHECK: # %bb.0: # %entry 1331; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1332; CHECK-NEXT: vmv1r.v v10, v0 1333; CHECK-NEXT: vmv1r.v v0, v9 1334; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1335; CHECK-NEXT: vmv.v.v v0, v10 1336; CHECK-NEXT: ret 1337entry: 1338 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16( 1339 <vscale x 4 x i1> %0, 1340 <vscale x 4 x i16> %1, 1341 i16 %2, 1342 <vscale x 4 x i1> %3, 1343 iXLen %4) 1344 1345 ret <vscale x 4 x i1> %a 1346} 1347 1348declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16( 1349 <vscale x 8 x i16>, 1350 i16, 1351 iXLen); 1352 1353define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1354; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: 1355; CHECK: # %bb.0: # %entry 1356; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1357; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1358; CHECK-NEXT: ret 1359entry: 1360 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16( 1361 <vscale x 8 x i16> %0, 1362 i16 %1, 1363 iXLen %2) 1364 1365 ret <vscale x 8 x i1> %a 1366} 1367 1368declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16( 1369 <vscale x 8 x i1>, 1370 <vscale x 8 x i16>, 1371 i16, 1372 <vscale x 8 x i1>, 1373 iXLen); 1374 1375define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1376; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16: 1377; CHECK: # %bb.0: # %entry 1378; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1379; CHECK-NEXT: vmv1r.v v11, v0 1380; CHECK-NEXT: vmv1r.v v0, v10 1381; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t 1382; CHECK-NEXT: vmv1r.v v0, v11 1383; CHECK-NEXT: ret 1384entry: 1385 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16( 1386 <vscale x 8 x i1> %0, 1387 <vscale x 8 x i16> %1, 1388 i16 %2, 1389 <vscale x 8 x i1> %3, 1390 iXLen %4) 1391 1392 ret <vscale x 8 x i1> %a 1393} 1394 1395declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16( 1396 <vscale x 16 x i16>, 1397 i16, 1398 iXLen); 1399 1400define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1401; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: 1402; CHECK: # %bb.0: # %entry 1403; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1404; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1405; CHECK-NEXT: ret 1406entry: 1407 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16( 1408 <vscale x 16 x i16> %0, 1409 i16 %1, 1410 iXLen %2) 1411 1412 ret <vscale x 16 x i1> %a 1413} 1414 1415declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16( 1416 <vscale x 16 x i1>, 1417 <vscale x 16 x i16>, 1418 i16, 1419 <vscale x 16 x i1>, 1420 iXLen); 1421 1422define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1423; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16: 1424; CHECK: # %bb.0: # %entry 1425; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1426; CHECK-NEXT: vmv1r.v v13, v0 1427; CHECK-NEXT: vmv1r.v v0, v12 1428; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t 1429; CHECK-NEXT: vmv1r.v v0, v13 1430; CHECK-NEXT: ret 1431entry: 1432 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16( 1433 <vscale x 16 x i1> %0, 1434 <vscale x 16 x i16> %1, 1435 i16 %2, 1436 <vscale x 16 x i1> %3, 1437 iXLen %4) 1438 1439 ret <vscale x 16 x i1> %a 1440} 1441 1442declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32( 1443 <vscale x 1 x i32>, 1444 i32, 1445 iXLen); 1446 1447define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1448; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: 1449; CHECK: # %bb.0: # %entry 1450; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1451; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1452; CHECK-NEXT: ret 1453entry: 1454 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32( 1455 <vscale x 1 x i32> %0, 1456 i32 %1, 1457 iXLen %2) 1458 1459 ret <vscale x 1 x i1> %a 1460} 1461 1462declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32( 1463 <vscale x 1 x i1>, 1464 <vscale x 1 x i32>, 1465 i32, 1466 <vscale x 1 x i1>, 1467 iXLen); 1468 1469define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1470; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32: 1471; CHECK: # %bb.0: # %entry 1472; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1473; CHECK-NEXT: vmv1r.v v10, v0 1474; CHECK-NEXT: vmv1r.v v0, v9 1475; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1476; CHECK-NEXT: vmv1r.v v0, v10 1477; CHECK-NEXT: ret 1478entry: 1479 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32( 1480 <vscale x 1 x i1> %0, 1481 <vscale x 1 x i32> %1, 1482 i32 %2, 1483 <vscale x 1 x i1> %3, 1484 iXLen %4) 1485 1486 ret <vscale x 1 x i1> %a 1487} 1488 1489declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32( 1490 <vscale x 2 x i32>, 1491 i32, 1492 iXLen); 1493 1494define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1495; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: 1496; CHECK: # %bb.0: # %entry 1497; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1498; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1499; CHECK-NEXT: ret 1500entry: 1501 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32( 1502 <vscale x 2 x i32> %0, 1503 i32 %1, 1504 iXLen %2) 1505 1506 ret <vscale x 2 x i1> %a 1507} 1508 1509declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32( 1510 <vscale x 2 x i1>, 1511 <vscale x 2 x i32>, 1512 i32, 1513 <vscale x 2 x i1>, 1514 iXLen); 1515 1516define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1517; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32: 1518; CHECK: # %bb.0: # %entry 1519; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1520; CHECK-NEXT: vmv1r.v v10, v0 1521; CHECK-NEXT: vmv1r.v v0, v9 1522; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1523; CHECK-NEXT: vmv.v.v v0, v10 1524; CHECK-NEXT: ret 1525entry: 1526 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32( 1527 <vscale x 2 x i1> %0, 1528 <vscale x 2 x i32> %1, 1529 i32 %2, 1530 <vscale x 2 x i1> %3, 1531 iXLen %4) 1532 1533 ret <vscale x 2 x i1> %a 1534} 1535 1536declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32( 1537 <vscale x 4 x i32>, 1538 i32, 1539 iXLen); 1540 1541define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1542; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: 1543; CHECK: # %bb.0: # %entry 1544; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1545; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1546; CHECK-NEXT: ret 1547entry: 1548 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32( 1549 <vscale x 4 x i32> %0, 1550 i32 %1, 1551 iXLen %2) 1552 1553 ret <vscale x 4 x i1> %a 1554} 1555 1556declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32( 1557 <vscale x 4 x i1>, 1558 <vscale x 4 x i32>, 1559 i32, 1560 <vscale x 4 x i1>, 1561 iXLen); 1562 1563define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1564; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32: 1565; CHECK: # %bb.0: # %entry 1566; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1567; CHECK-NEXT: vmv1r.v v11, v0 1568; CHECK-NEXT: vmv1r.v v0, v10 1569; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t 1570; CHECK-NEXT: vmv1r.v v0, v11 1571; CHECK-NEXT: ret 1572entry: 1573 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32( 1574 <vscale x 4 x i1> %0, 1575 <vscale x 4 x i32> %1, 1576 i32 %2, 1577 <vscale x 4 x i1> %3, 1578 iXLen %4) 1579 1580 ret <vscale x 4 x i1> %a 1581} 1582 1583declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32( 1584 <vscale x 8 x i32>, 1585 i32, 1586 iXLen); 1587 1588define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1589; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: 1590; CHECK: # %bb.0: # %entry 1591; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1592; CHECK-NEXT: vmsgtu.vx v0, v8, a0 1593; CHECK-NEXT: ret 1594entry: 1595 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32( 1596 <vscale x 8 x i32> %0, 1597 i32 %1, 1598 iXLen %2) 1599 1600 ret <vscale x 8 x i1> %a 1601} 1602 1603declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32( 1604 <vscale x 8 x i1>, 1605 <vscale x 8 x i32>, 1606 i32, 1607 <vscale x 8 x i1>, 1608 iXLen); 1609 1610define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1611; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32: 1612; CHECK: # %bb.0: # %entry 1613; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1614; CHECK-NEXT: vmv1r.v v13, v0 1615; CHECK-NEXT: vmv1r.v v0, v12 1616; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t 1617; CHECK-NEXT: vmv1r.v v0, v13 1618; CHECK-NEXT: ret 1619entry: 1620 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32( 1621 <vscale x 8 x i1> %0, 1622 <vscale x 8 x i32> %1, 1623 i32 %2, 1624 <vscale x 8 x i1> %3, 1625 iXLen %4) 1626 1627 ret <vscale x 8 x i1> %a 1628} 1629 1630declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64( 1631 <vscale x 1 x i64>, 1632 i64, 1633 iXLen); 1634 1635define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1636; RV32-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: 1637; RV32: # %bb.0: # %entry 1638; RV32-NEXT: addi sp, sp, -16 1639; RV32-NEXT: sw a0, 8(sp) 1640; RV32-NEXT: sw a1, 12(sp) 1641; RV32-NEXT: addi a0, sp, 8 1642; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1643; RV32-NEXT: vlse64.v v9, (a0), zero 1644; RV32-NEXT: vmsltu.vv v0, v9, v8 1645; RV32-NEXT: addi sp, sp, 16 1646; RV32-NEXT: ret 1647; 1648; RV64-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: 1649; RV64: # %bb.0: # %entry 1650; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1651; RV64-NEXT: vmsgtu.vx v0, v8, a0 1652; RV64-NEXT: ret 1653entry: 1654 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64( 1655 <vscale x 1 x i64> %0, 1656 i64 %1, 1657 iXLen %2) 1658 1659 ret <vscale x 1 x i1> %a 1660} 1661 1662declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64( 1663 <vscale x 1 x i1>, 1664 <vscale x 1 x i64>, 1665 i64, 1666 <vscale x 1 x i1>, 1667 iXLen); 1668 1669define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1670; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: 1671; RV32: # %bb.0: # %entry 1672; RV32-NEXT: addi sp, sp, -16 1673; RV32-NEXT: sw a0, 8(sp) 1674; RV32-NEXT: sw a1, 12(sp) 1675; RV32-NEXT: addi a0, sp, 8 1676; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1677; RV32-NEXT: vlse64.v v11, (a0), zero 1678; RV32-NEXT: vmv1r.v v10, v0 1679; RV32-NEXT: vmv1r.v v0, v9 1680; RV32-NEXT: vmsltu.vv v10, v11, v8, v0.t 1681; RV32-NEXT: vmv.v.v v0, v10 1682; RV32-NEXT: addi sp, sp, 16 1683; RV32-NEXT: ret 1684; 1685; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64: 1686; RV64: # %bb.0: # %entry 1687; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1688; RV64-NEXT: vmv1r.v v10, v0 1689; RV64-NEXT: vmv1r.v v0, v9 1690; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t 1691; RV64-NEXT: vmv.v.v v0, v10 1692; RV64-NEXT: ret 1693entry: 1694 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64( 1695 <vscale x 1 x i1> %0, 1696 <vscale x 1 x i64> %1, 1697 i64 %2, 1698 <vscale x 1 x i1> %3, 1699 iXLen %4) 1700 1701 ret <vscale x 1 x i1> %a 1702} 1703 1704declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64( 1705 <vscale x 2 x i64>, 1706 i64, 1707 iXLen); 1708 1709define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 1710; RV32-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: 1711; RV32: # %bb.0: # %entry 1712; RV32-NEXT: addi sp, sp, -16 1713; RV32-NEXT: sw a0, 8(sp) 1714; RV32-NEXT: sw a1, 12(sp) 1715; RV32-NEXT: addi a0, sp, 8 1716; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1717; RV32-NEXT: vlse64.v v10, (a0), zero 1718; RV32-NEXT: vmsltu.vv v0, v10, v8 1719; RV32-NEXT: addi sp, sp, 16 1720; RV32-NEXT: ret 1721; 1722; RV64-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: 1723; RV64: # %bb.0: # %entry 1724; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1725; RV64-NEXT: vmsgtu.vx v0, v8, a0 1726; RV64-NEXT: ret 1727entry: 1728 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64( 1729 <vscale x 2 x i64> %0, 1730 i64 %1, 1731 iXLen %2) 1732 1733 ret <vscale x 2 x i1> %a 1734} 1735 1736declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64( 1737 <vscale x 2 x i1>, 1738 <vscale x 2 x i64>, 1739 i64, 1740 <vscale x 2 x i1>, 1741 iXLen); 1742 1743define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1744; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: 1745; RV32: # %bb.0: # %entry 1746; RV32-NEXT: addi sp, sp, -16 1747; RV32-NEXT: sw a0, 8(sp) 1748; RV32-NEXT: sw a1, 12(sp) 1749; RV32-NEXT: addi a0, sp, 8 1750; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 1751; RV32-NEXT: vlse64.v v12, (a0), zero 1752; RV32-NEXT: vmv1r.v v11, v0 1753; RV32-NEXT: vmv1r.v v0, v10 1754; RV32-NEXT: vmsltu.vv v11, v12, v8, v0.t 1755; RV32-NEXT: vmv1r.v v0, v11 1756; RV32-NEXT: addi sp, sp, 16 1757; RV32-NEXT: ret 1758; 1759; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64: 1760; RV64: # %bb.0: # %entry 1761; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1762; RV64-NEXT: vmv1r.v v11, v0 1763; RV64-NEXT: vmv1r.v v0, v10 1764; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t 1765; RV64-NEXT: vmv1r.v v0, v11 1766; RV64-NEXT: ret 1767entry: 1768 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64( 1769 <vscale x 2 x i1> %0, 1770 <vscale x 2 x i64> %1, 1771 i64 %2, 1772 <vscale x 2 x i1> %3, 1773 iXLen %4) 1774 1775 ret <vscale x 2 x i1> %a 1776} 1777 1778declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64( 1779 <vscale x 4 x i64>, 1780 i64, 1781 iXLen); 1782 1783define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 1784; RV32-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: 1785; RV32: # %bb.0: # %entry 1786; RV32-NEXT: addi sp, sp, -16 1787; RV32-NEXT: sw a0, 8(sp) 1788; RV32-NEXT: sw a1, 12(sp) 1789; RV32-NEXT: addi a0, sp, 8 1790; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1791; RV32-NEXT: vlse64.v v12, (a0), zero 1792; RV32-NEXT: vmsltu.vv v0, v12, v8 1793; RV32-NEXT: addi sp, sp, 16 1794; RV32-NEXT: ret 1795; 1796; RV64-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: 1797; RV64: # %bb.0: # %entry 1798; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1799; RV64-NEXT: vmsgtu.vx v0, v8, a0 1800; RV64-NEXT: ret 1801entry: 1802 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64( 1803 <vscale x 4 x i64> %0, 1804 i64 %1, 1805 iXLen %2) 1806 1807 ret <vscale x 4 x i1> %a 1808} 1809 1810declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64( 1811 <vscale x 4 x i1>, 1812 <vscale x 4 x i64>, 1813 i64, 1814 <vscale x 4 x i1>, 1815 iXLen); 1816 1817define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1818; RV32-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: 1819; RV32: # %bb.0: # %entry 1820; RV32-NEXT: addi sp, sp, -16 1821; RV32-NEXT: sw a0, 8(sp) 1822; RV32-NEXT: sw a1, 12(sp) 1823; RV32-NEXT: addi a0, sp, 8 1824; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 1825; RV32-NEXT: vlse64.v v16, (a0), zero 1826; RV32-NEXT: vmv1r.v v13, v0 1827; RV32-NEXT: vmv1r.v v0, v12 1828; RV32-NEXT: vmsltu.vv v13, v16, v8, v0.t 1829; RV32-NEXT: vmv1r.v v0, v13 1830; RV32-NEXT: addi sp, sp, 16 1831; RV32-NEXT: ret 1832; 1833; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64: 1834; RV64: # %bb.0: # %entry 1835; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 1836; RV64-NEXT: vmv1r.v v13, v0 1837; RV64-NEXT: vmv1r.v v0, v12 1838; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t 1839; RV64-NEXT: vmv1r.v v0, v13 1840; RV64-NEXT: ret 1841entry: 1842 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64( 1843 <vscale x 4 x i1> %0, 1844 <vscale x 4 x i64> %1, 1845 i64 %2, 1846 <vscale x 4 x i1> %3, 1847 iXLen %4) 1848 1849 ret <vscale x 4 x i1> %a 1850} 1851 1852define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind { 1853; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8: 1854; CHECK: # %bb.0: # %entry 1855; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 1856; CHECK-NEXT: vmsgtu.vi v0, v8, 9 1857; CHECK-NEXT: ret 1858entry: 1859 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8( 1860 <vscale x 1 x i8> %0, 1861 i8 9, 1862 iXLen %1) 1863 1864 ret <vscale x 1 x i1> %a 1865} 1866 1867define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 1868; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8: 1869; CHECK: # %bb.0: # %entry 1870; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 1871; CHECK-NEXT: vmv1r.v v10, v0 1872; CHECK-NEXT: vmv1r.v v0, v9 1873; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 1874; CHECK-NEXT: vmv1r.v v0, v10 1875; CHECK-NEXT: ret 1876entry: 1877 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8( 1878 <vscale x 1 x i1> %0, 1879 <vscale x 1 x i8> %1, 1880 i8 9, 1881 <vscale x 1 x i1> %2, 1882 iXLen %3) 1883 1884 ret <vscale x 1 x i1> %a 1885} 1886 1887define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind { 1888; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8: 1889; CHECK: # %bb.0: # %entry 1890; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 1891; CHECK-NEXT: vmsgtu.vi v0, v8, 9 1892; CHECK-NEXT: ret 1893entry: 1894 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8( 1895 <vscale x 2 x i8> %0, 1896 i8 9, 1897 iXLen %1) 1898 1899 ret <vscale x 2 x i1> %a 1900} 1901 1902define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 1903; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8: 1904; CHECK: # %bb.0: # %entry 1905; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 1906; CHECK-NEXT: vmv1r.v v10, v0 1907; CHECK-NEXT: vmv1r.v v0, v9 1908; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 1909; CHECK-NEXT: vmv1r.v v0, v10 1910; CHECK-NEXT: ret 1911entry: 1912 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8( 1913 <vscale x 2 x i1> %0, 1914 <vscale x 2 x i8> %1, 1915 i8 9, 1916 <vscale x 2 x i1> %2, 1917 iXLen %3) 1918 1919 ret <vscale x 2 x i1> %a 1920} 1921 1922define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind { 1923; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8: 1924; CHECK: # %bb.0: # %entry 1925; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 1926; CHECK-NEXT: vmsgtu.vi v0, v8, 9 1927; CHECK-NEXT: ret 1928entry: 1929 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8( 1930 <vscale x 4 x i8> %0, 1931 i8 9, 1932 iXLen %1) 1933 1934 ret <vscale x 4 x i1> %a 1935} 1936 1937define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 1938; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8: 1939; CHECK: # %bb.0: # %entry 1940; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 1941; CHECK-NEXT: vmv1r.v v10, v0 1942; CHECK-NEXT: vmv1r.v v0, v9 1943; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 1944; CHECK-NEXT: vmv1r.v v0, v10 1945; CHECK-NEXT: ret 1946entry: 1947 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8( 1948 <vscale x 4 x i1> %0, 1949 <vscale x 4 x i8> %1, 1950 i8 9, 1951 <vscale x 4 x i1> %2, 1952 iXLen %3) 1953 1954 ret <vscale x 4 x i1> %a 1955} 1956 1957define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind { 1958; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8: 1959; CHECK: # %bb.0: # %entry 1960; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 1961; CHECK-NEXT: vmsgtu.vi v0, v8, 9 1962; CHECK-NEXT: ret 1963entry: 1964 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8( 1965 <vscale x 8 x i8> %0, 1966 i8 9, 1967 iXLen %1) 1968 1969 ret <vscale x 8 x i1> %a 1970} 1971 1972define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 1973; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8: 1974; CHECK: # %bb.0: # %entry 1975; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 1976; CHECK-NEXT: vmv1r.v v10, v0 1977; CHECK-NEXT: vmv1r.v v0, v9 1978; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 1979; CHECK-NEXT: vmv.v.v v0, v10 1980; CHECK-NEXT: ret 1981entry: 1982 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8( 1983 <vscale x 8 x i1> %0, 1984 <vscale x 8 x i8> %1, 1985 i8 9, 1986 <vscale x 8 x i1> %2, 1987 iXLen %3) 1988 1989 ret <vscale x 8 x i1> %a 1990} 1991 1992define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind { 1993; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8: 1994; CHECK: # %bb.0: # %entry 1995; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 1996; CHECK-NEXT: vmsgtu.vi v0, v8, 9 1997; CHECK-NEXT: ret 1998entry: 1999 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8( 2000 <vscale x 16 x i8> %0, 2001 i8 9, 2002 iXLen %1) 2003 2004 ret <vscale x 16 x i1> %a 2005} 2006 2007define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 2008; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8: 2009; CHECK: # %bb.0: # %entry 2010; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 2011; CHECK-NEXT: vmv1r.v v11, v0 2012; CHECK-NEXT: vmv1r.v v0, v10 2013; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t 2014; CHECK-NEXT: vmv1r.v v0, v11 2015; CHECK-NEXT: ret 2016entry: 2017 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8( 2018 <vscale x 16 x i1> %0, 2019 <vscale x 16 x i8> %1, 2020 i8 9, 2021 <vscale x 16 x i1> %2, 2022 iXLen %3) 2023 2024 ret <vscale x 16 x i1> %a 2025} 2026 2027define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind { 2028; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8: 2029; CHECK: # %bb.0: # %entry 2030; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 2031; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2032; CHECK-NEXT: ret 2033entry: 2034 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8( 2035 <vscale x 32 x i8> %0, 2036 i8 9, 2037 iXLen %1) 2038 2039 ret <vscale x 32 x i1> %a 2040} 2041 2042define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 2043; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8: 2044; CHECK: # %bb.0: # %entry 2045; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 2046; CHECK-NEXT: vmv1r.v v13, v0 2047; CHECK-NEXT: vmv1r.v v0, v12 2048; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t 2049; CHECK-NEXT: vmv1r.v v0, v13 2050; CHECK-NEXT: ret 2051entry: 2052 %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8( 2053 <vscale x 32 x i1> %0, 2054 <vscale x 32 x i8> %1, 2055 i8 9, 2056 <vscale x 32 x i1> %2, 2057 iXLen %3) 2058 2059 ret <vscale x 32 x i1> %a 2060} 2061 2062define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind { 2063; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16: 2064; CHECK: # %bb.0: # %entry 2065; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2066; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2067; CHECK-NEXT: ret 2068entry: 2069 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16( 2070 <vscale x 1 x i16> %0, 2071 i16 9, 2072 iXLen %1) 2073 2074 ret <vscale x 1 x i1> %a 2075} 2076 2077define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2078; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16: 2079; CHECK: # %bb.0: # %entry 2080; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 2081; CHECK-NEXT: vmv1r.v v10, v0 2082; CHECK-NEXT: vmv1r.v v0, v9 2083; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2084; CHECK-NEXT: vmv1r.v v0, v10 2085; CHECK-NEXT: ret 2086entry: 2087 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16( 2088 <vscale x 1 x i1> %0, 2089 <vscale x 1 x i16> %1, 2090 i16 9, 2091 <vscale x 1 x i1> %2, 2092 iXLen %3) 2093 2094 ret <vscale x 1 x i1> %a 2095} 2096 2097define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind { 2098; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16: 2099; CHECK: # %bb.0: # %entry 2100; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 2101; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2102; CHECK-NEXT: ret 2103entry: 2104 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16( 2105 <vscale x 2 x i16> %0, 2106 i16 9, 2107 iXLen %1) 2108 2109 ret <vscale x 2 x i1> %a 2110} 2111 2112define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2113; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16: 2114; CHECK: # %bb.0: # %entry 2115; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 2116; CHECK-NEXT: vmv1r.v v10, v0 2117; CHECK-NEXT: vmv1r.v v0, v9 2118; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2119; CHECK-NEXT: vmv1r.v v0, v10 2120; CHECK-NEXT: ret 2121entry: 2122 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16( 2123 <vscale x 2 x i1> %0, 2124 <vscale x 2 x i16> %1, 2125 i16 9, 2126 <vscale x 2 x i1> %2, 2127 iXLen %3) 2128 2129 ret <vscale x 2 x i1> %a 2130} 2131 2132define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind { 2133; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16: 2134; CHECK: # %bb.0: # %entry 2135; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 2136; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2137; CHECK-NEXT: ret 2138entry: 2139 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16( 2140 <vscale x 4 x i16> %0, 2141 i16 9, 2142 iXLen %1) 2143 2144 ret <vscale x 4 x i1> %a 2145} 2146 2147define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2148; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16: 2149; CHECK: # %bb.0: # %entry 2150; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 2151; CHECK-NEXT: vmv1r.v v10, v0 2152; CHECK-NEXT: vmv1r.v v0, v9 2153; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2154; CHECK-NEXT: vmv.v.v v0, v10 2155; CHECK-NEXT: ret 2156entry: 2157 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16( 2158 <vscale x 4 x i1> %0, 2159 <vscale x 4 x i16> %1, 2160 i16 9, 2161 <vscale x 4 x i1> %2, 2162 iXLen %3) 2163 2164 ret <vscale x 4 x i1> %a 2165} 2166 2167define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind { 2168; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16: 2169; CHECK: # %bb.0: # %entry 2170; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 2171; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2172; CHECK-NEXT: ret 2173entry: 2174 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16( 2175 <vscale x 8 x i16> %0, 2176 i16 9, 2177 iXLen %1) 2178 2179 ret <vscale x 8 x i1> %a 2180} 2181 2182define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2183; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16: 2184; CHECK: # %bb.0: # %entry 2185; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 2186; CHECK-NEXT: vmv1r.v v11, v0 2187; CHECK-NEXT: vmv1r.v v0, v10 2188; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t 2189; CHECK-NEXT: vmv1r.v v0, v11 2190; CHECK-NEXT: ret 2191entry: 2192 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16( 2193 <vscale x 8 x i1> %0, 2194 <vscale x 8 x i16> %1, 2195 i16 9, 2196 <vscale x 8 x i1> %2, 2197 iXLen %3) 2198 2199 ret <vscale x 8 x i1> %a 2200} 2201 2202define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind { 2203; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16: 2204; CHECK: # %bb.0: # %entry 2205; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 2206; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2207; CHECK-NEXT: ret 2208entry: 2209 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16( 2210 <vscale x 16 x i16> %0, 2211 i16 9, 2212 iXLen %1) 2213 2214 ret <vscale x 16 x i1> %a 2215} 2216 2217define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 2218; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16: 2219; CHECK: # %bb.0: # %entry 2220; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 2221; CHECK-NEXT: vmv1r.v v13, v0 2222; CHECK-NEXT: vmv1r.v v0, v12 2223; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t 2224; CHECK-NEXT: vmv1r.v v0, v13 2225; CHECK-NEXT: ret 2226entry: 2227 %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16( 2228 <vscale x 16 x i1> %0, 2229 <vscale x 16 x i16> %1, 2230 i16 9, 2231 <vscale x 16 x i1> %2, 2232 iXLen %3) 2233 2234 ret <vscale x 16 x i1> %a 2235} 2236 2237define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind { 2238; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32: 2239; CHECK: # %bb.0: # %entry 2240; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2241; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2242; CHECK-NEXT: ret 2243entry: 2244 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32( 2245 <vscale x 1 x i32> %0, 2246 i32 9, 2247 iXLen %1) 2248 2249 ret <vscale x 1 x i1> %a 2250} 2251 2252define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2253; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32: 2254; CHECK: # %bb.0: # %entry 2255; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 2256; CHECK-NEXT: vmv1r.v v10, v0 2257; CHECK-NEXT: vmv1r.v v0, v9 2258; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2259; CHECK-NEXT: vmv1r.v v0, v10 2260; CHECK-NEXT: ret 2261entry: 2262 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32( 2263 <vscale x 1 x i1> %0, 2264 <vscale x 1 x i32> %1, 2265 i32 9, 2266 <vscale x 1 x i1> %2, 2267 iXLen %3) 2268 2269 ret <vscale x 1 x i1> %a 2270} 2271 2272define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind { 2273; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32: 2274; CHECK: # %bb.0: # %entry 2275; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 2276; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2277; CHECK-NEXT: ret 2278entry: 2279 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32( 2280 <vscale x 2 x i32> %0, 2281 i32 9, 2282 iXLen %1) 2283 2284 ret <vscale x 2 x i1> %a 2285} 2286 2287define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2288; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32: 2289; CHECK: # %bb.0: # %entry 2290; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 2291; CHECK-NEXT: vmv1r.v v10, v0 2292; CHECK-NEXT: vmv1r.v v0, v9 2293; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2294; CHECK-NEXT: vmv.v.v v0, v10 2295; CHECK-NEXT: ret 2296entry: 2297 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32( 2298 <vscale x 2 x i1> %0, 2299 <vscale x 2 x i32> %1, 2300 i32 9, 2301 <vscale x 2 x i1> %2, 2302 iXLen %3) 2303 2304 ret <vscale x 2 x i1> %a 2305} 2306 2307define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind { 2308; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32: 2309; CHECK: # %bb.0: # %entry 2310; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 2311; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2312; CHECK-NEXT: ret 2313entry: 2314 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32( 2315 <vscale x 4 x i32> %0, 2316 i32 9, 2317 iXLen %1) 2318 2319 ret <vscale x 4 x i1> %a 2320} 2321 2322define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2323; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32: 2324; CHECK: # %bb.0: # %entry 2325; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 2326; CHECK-NEXT: vmv1r.v v11, v0 2327; CHECK-NEXT: vmv1r.v v0, v10 2328; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t 2329; CHECK-NEXT: vmv1r.v v0, v11 2330; CHECK-NEXT: ret 2331entry: 2332 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32( 2333 <vscale x 4 x i1> %0, 2334 <vscale x 4 x i32> %1, 2335 i32 9, 2336 <vscale x 4 x i1> %2, 2337 iXLen %3) 2338 2339 ret <vscale x 4 x i1> %a 2340} 2341 2342define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind { 2343; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32: 2344; CHECK: # %bb.0: # %entry 2345; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 2346; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2347; CHECK-NEXT: ret 2348entry: 2349 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32( 2350 <vscale x 8 x i32> %0, 2351 i32 9, 2352 iXLen %1) 2353 2354 ret <vscale x 8 x i1> %a 2355} 2356 2357define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2358; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32: 2359; CHECK: # %bb.0: # %entry 2360; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 2361; CHECK-NEXT: vmv1r.v v13, v0 2362; CHECK-NEXT: vmv1r.v v0, v12 2363; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t 2364; CHECK-NEXT: vmv1r.v v0, v13 2365; CHECK-NEXT: ret 2366entry: 2367 %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32( 2368 <vscale x 8 x i1> %0, 2369 <vscale x 8 x i32> %1, 2370 i32 9, 2371 <vscale x 8 x i1> %2, 2372 iXLen %3) 2373 2374 ret <vscale x 8 x i1> %a 2375} 2376 2377define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind { 2378; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64: 2379; CHECK: # %bb.0: # %entry 2380; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 2381; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2382; CHECK-NEXT: ret 2383entry: 2384 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64( 2385 <vscale x 1 x i64> %0, 2386 i64 9, 2387 iXLen %1) 2388 2389 ret <vscale x 1 x i1> %a 2390} 2391 2392define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2393; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64: 2394; CHECK: # %bb.0: # %entry 2395; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 2396; CHECK-NEXT: vmv1r.v v10, v0 2397; CHECK-NEXT: vmv1r.v v0, v9 2398; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t 2399; CHECK-NEXT: vmv.v.v v0, v10 2400; CHECK-NEXT: ret 2401entry: 2402 %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64( 2403 <vscale x 1 x i1> %0, 2404 <vscale x 1 x i64> %1, 2405 i64 9, 2406 <vscale x 1 x i1> %2, 2407 iXLen %3) 2408 2409 ret <vscale x 1 x i1> %a 2410} 2411 2412define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind { 2413; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64: 2414; CHECK: # %bb.0: # %entry 2415; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 2416; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2417; CHECK-NEXT: ret 2418entry: 2419 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64( 2420 <vscale x 2 x i64> %0, 2421 i64 9, 2422 iXLen %1) 2423 2424 ret <vscale x 2 x i1> %a 2425} 2426 2427define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2428; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64: 2429; CHECK: # %bb.0: # %entry 2430; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 2431; CHECK-NEXT: vmv1r.v v11, v0 2432; CHECK-NEXT: vmv1r.v v0, v10 2433; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t 2434; CHECK-NEXT: vmv1r.v v0, v11 2435; CHECK-NEXT: ret 2436entry: 2437 %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64( 2438 <vscale x 2 x i1> %0, 2439 <vscale x 2 x i64> %1, 2440 i64 9, 2441 <vscale x 2 x i1> %2, 2442 iXLen %3) 2443 2444 ret <vscale x 2 x i1> %a 2445} 2446 2447define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind { 2448; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64: 2449; CHECK: # %bb.0: # %entry 2450; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 2451; CHECK-NEXT: vmsgtu.vi v0, v8, 9 2452; CHECK-NEXT: ret 2453entry: 2454 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64( 2455 <vscale x 4 x i64> %0, 2456 i64 9, 2457 iXLen %1) 2458 2459 ret <vscale x 4 x i1> %a 2460} 2461 2462define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2463; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64: 2464; CHECK: # %bb.0: # %entry 2465; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 2466; CHECK-NEXT: vmv1r.v v13, v0 2467; CHECK-NEXT: vmv1r.v v0, v12 2468; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t 2469; CHECK-NEXT: vmv1r.v v0, v13 2470; CHECK-NEXT: ret 2471entry: 2472 %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64( 2473 <vscale x 4 x i1> %0, 2474 <vscale x 4 x i64> %1, 2475 i64 9, 2476 <vscale x 4 x i1> %2, 2477 iXLen %3) 2478 2479 ret <vscale x 4 x i1> %a 2480} 2481