1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 <vscale x 1 x i8>, 11 iXLen); 12 13define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 14; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 17; CHECK-NEXT: vmax.vv v8, v8, v9 18; CHECK-NEXT: ret 19entry: 20 %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8( 21 <vscale x 1 x i8> undef, 22 <vscale x 1 x i8> %0, 23 <vscale x 1 x i8> %1, 24 iXLen %2) 25 26 ret <vscale x 1 x i8> %a 27} 28 29declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( 30 <vscale x 1 x i8>, 31 <vscale x 1 x i8>, 32 <vscale x 1 x i8>, 33 <vscale x 1 x i1>, 34 iXLen, 35 iXLen); 36 37define <vscale x 1 x i8> @intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 38; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i8_nxv1i8_nxv1i8: 39; CHECK: # %bb.0: # %entry 40; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 41; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 42; CHECK-NEXT: ret 43entry: 44 %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8( 45 <vscale x 1 x i8> %0, 46 <vscale x 1 x i8> %1, 47 <vscale x 1 x i8> %2, 48 <vscale x 1 x i1> %3, 49 iXLen %4, iXLen 1) 50 51 ret <vscale x 1 x i8> %a 52} 53 54declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8( 55 <vscale x 2 x i8>, 56 <vscale x 2 x i8>, 57 <vscale x 2 x i8>, 58 iXLen); 59 60define <vscale x 2 x i8> @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 61; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: 62; CHECK: # %bb.0: # %entry 63; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 64; CHECK-NEXT: vmax.vv v8, v8, v9 65; CHECK-NEXT: ret 66entry: 67 %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8( 68 <vscale x 2 x i8> undef, 69 <vscale x 2 x i8> %0, 70 <vscale x 2 x i8> %1, 71 iXLen %2) 72 73 ret <vscale x 2 x i8> %a 74} 75 76declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( 77 <vscale x 2 x i8>, 78 <vscale x 2 x i8>, 79 <vscale x 2 x i8>, 80 <vscale x 2 x i1>, 81 iXLen, 82 iXLen); 83 84define <vscale x 2 x i8> @intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 85; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i8_nxv2i8_nxv2i8: 86; CHECK: # %bb.0: # %entry 87; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 88; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 89; CHECK-NEXT: ret 90entry: 91 %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8( 92 <vscale x 2 x i8> %0, 93 <vscale x 2 x i8> %1, 94 <vscale x 2 x i8> %2, 95 <vscale x 2 x i1> %3, 96 iXLen %4, iXLen 1) 97 98 ret <vscale x 2 x i8> %a 99} 100 101declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8( 102 <vscale x 4 x i8>, 103 <vscale x 4 x i8>, 104 <vscale x 4 x i8>, 105 iXLen); 106 107define <vscale x 4 x i8> @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 108; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: 109; CHECK: # %bb.0: # %entry 110; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 111; CHECK-NEXT: vmax.vv v8, v8, v9 112; CHECK-NEXT: ret 113entry: 114 %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8( 115 <vscale x 4 x i8> undef, 116 <vscale x 4 x i8> %0, 117 <vscale x 4 x i8> %1, 118 iXLen %2) 119 120 ret <vscale x 4 x i8> %a 121} 122 123declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( 124 <vscale x 4 x i8>, 125 <vscale x 4 x i8>, 126 <vscale x 4 x i8>, 127 <vscale x 4 x i1>, 128 iXLen, 129 iXLen); 130 131define <vscale x 4 x i8> @intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 132; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i8_nxv4i8_nxv4i8: 133; CHECK: # %bb.0: # %entry 134; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 135; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 136; CHECK-NEXT: ret 137entry: 138 %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8( 139 <vscale x 4 x i8> %0, 140 <vscale x 4 x i8> %1, 141 <vscale x 4 x i8> %2, 142 <vscale x 4 x i1> %3, 143 iXLen %4, iXLen 1) 144 145 ret <vscale x 4 x i8> %a 146} 147 148declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8( 149 <vscale x 8 x i8>, 150 <vscale x 8 x i8>, 151 <vscale x 8 x i8>, 152 iXLen); 153 154define <vscale x 8 x i8> @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 155; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: 156; CHECK: # %bb.0: # %entry 157; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 158; CHECK-NEXT: vmax.vv v8, v8, v9 159; CHECK-NEXT: ret 160entry: 161 %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8( 162 <vscale x 8 x i8> undef, 163 <vscale x 8 x i8> %0, 164 <vscale x 8 x i8> %1, 165 iXLen %2) 166 167 ret <vscale x 8 x i8> %a 168} 169 170declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( 171 <vscale x 8 x i8>, 172 <vscale x 8 x i8>, 173 <vscale x 8 x i8>, 174 <vscale x 8 x i1>, 175 iXLen, 176 iXLen); 177 178define <vscale x 8 x i8> @intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 179; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i8_nxv8i8_nxv8i8: 180; CHECK: # %bb.0: # %entry 181; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 182; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 183; CHECK-NEXT: ret 184entry: 185 %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8( 186 <vscale x 8 x i8> %0, 187 <vscale x 8 x i8> %1, 188 <vscale x 8 x i8> %2, 189 <vscale x 8 x i1> %3, 190 iXLen %4, iXLen 1) 191 192 ret <vscale x 8 x i8> %a 193} 194 195declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8( 196 <vscale x 16 x i8>, 197 <vscale x 16 x i8>, 198 <vscale x 16 x i8>, 199 iXLen); 200 201define <vscale x 16 x i8> @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 202; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: 203; CHECK: # %bb.0: # %entry 204; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 205; CHECK-NEXT: vmax.vv v8, v8, v10 206; CHECK-NEXT: ret 207entry: 208 %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8( 209 <vscale x 16 x i8> undef, 210 <vscale x 16 x i8> %0, 211 <vscale x 16 x i8> %1, 212 iXLen %2) 213 214 ret <vscale x 16 x i8> %a 215} 216 217declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( 218 <vscale x 16 x i8>, 219 <vscale x 16 x i8>, 220 <vscale x 16 x i8>, 221 <vscale x 16 x i1>, 222 iXLen, 223 iXLen); 224 225define <vscale x 16 x i8> @intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 226; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i8_nxv16i8_nxv16i8: 227; CHECK: # %bb.0: # %entry 228; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 229; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t 230; CHECK-NEXT: ret 231entry: 232 %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8( 233 <vscale x 16 x i8> %0, 234 <vscale x 16 x i8> %1, 235 <vscale x 16 x i8> %2, 236 <vscale x 16 x i1> %3, 237 iXLen %4, iXLen 1) 238 239 ret <vscale x 16 x i8> %a 240} 241 242declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8( 243 <vscale x 32 x i8>, 244 <vscale x 32 x i8>, 245 <vscale x 32 x i8>, 246 iXLen); 247 248define <vscale x 32 x i8> @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 249; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: 250; CHECK: # %bb.0: # %entry 251; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 252; CHECK-NEXT: vmax.vv v8, v8, v12 253; CHECK-NEXT: ret 254entry: 255 %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8( 256 <vscale x 32 x i8> undef, 257 <vscale x 32 x i8> %0, 258 <vscale x 32 x i8> %1, 259 iXLen %2) 260 261 ret <vscale x 32 x i8> %a 262} 263 264declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( 265 <vscale x 32 x i8>, 266 <vscale x 32 x i8>, 267 <vscale x 32 x i8>, 268 <vscale x 32 x i1>, 269 iXLen, 270 iXLen); 271 272define <vscale x 32 x i8> @intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 273; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i8_nxv32i8_nxv32i8: 274; CHECK: # %bb.0: # %entry 275; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 276; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t 277; CHECK-NEXT: ret 278entry: 279 %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8( 280 <vscale x 32 x i8> %0, 281 <vscale x 32 x i8> %1, 282 <vscale x 32 x i8> %2, 283 <vscale x 32 x i1> %3, 284 iXLen %4, iXLen 1) 285 286 ret <vscale x 32 x i8> %a 287} 288 289declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8( 290 <vscale x 64 x i8>, 291 <vscale x 64 x i8>, 292 <vscale x 64 x i8>, 293 iXLen); 294 295define <vscale x 64 x i8> @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 296; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: 297; CHECK: # %bb.0: # %entry 298; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 299; CHECK-NEXT: vmax.vv v8, v8, v16 300; CHECK-NEXT: ret 301entry: 302 %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8( 303 <vscale x 64 x i8> undef, 304 <vscale x 64 x i8> %0, 305 <vscale x 64 x i8> %1, 306 iXLen %2) 307 308 ret <vscale x 64 x i8> %a 309} 310 311declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( 312 <vscale x 64 x i8>, 313 <vscale x 64 x i8>, 314 <vscale x 64 x i8>, 315 <vscale x 64 x i1>, 316 iXLen, 317 iXLen); 318 319define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 320; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: 321; CHECK: # %bb.0: # %entry 322; CHECK-NEXT: vl8r.v v24, (a0) 323; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 324; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t 325; CHECK-NEXT: ret 326entry: 327 %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8( 328 <vscale x 64 x i8> %0, 329 <vscale x 64 x i8> %1, 330 <vscale x 64 x i8> %2, 331 <vscale x 64 x i1> %3, 332 iXLen %4, iXLen 1) 333 334 ret <vscale x 64 x i8> %a 335} 336 337declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16( 338 <vscale x 1 x i16>, 339 <vscale x 1 x i16>, 340 <vscale x 1 x i16>, 341 iXLen); 342 343define <vscale x 1 x i16> @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 344; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: 345; CHECK: # %bb.0: # %entry 346; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 347; CHECK-NEXT: vmax.vv v8, v8, v9 348; CHECK-NEXT: ret 349entry: 350 %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16( 351 <vscale x 1 x i16> undef, 352 <vscale x 1 x i16> %0, 353 <vscale x 1 x i16> %1, 354 iXLen %2) 355 356 ret <vscale x 1 x i16> %a 357} 358 359declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( 360 <vscale x 1 x i16>, 361 <vscale x 1 x i16>, 362 <vscale x 1 x i16>, 363 <vscale x 1 x i1>, 364 iXLen, 365 iXLen); 366 367define <vscale x 1 x i16> @intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 368; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i16_nxv1i16_nxv1i16: 369; CHECK: # %bb.0: # %entry 370; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 371; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 372; CHECK-NEXT: ret 373entry: 374 %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16( 375 <vscale x 1 x i16> %0, 376 <vscale x 1 x i16> %1, 377 <vscale x 1 x i16> %2, 378 <vscale x 1 x i1> %3, 379 iXLen %4, iXLen 1) 380 381 ret <vscale x 1 x i16> %a 382} 383 384declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16( 385 <vscale x 2 x i16>, 386 <vscale x 2 x i16>, 387 <vscale x 2 x i16>, 388 iXLen); 389 390define <vscale x 2 x i16> @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 391; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: 392; CHECK: # %bb.0: # %entry 393; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 394; CHECK-NEXT: vmax.vv v8, v8, v9 395; CHECK-NEXT: ret 396entry: 397 %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16( 398 <vscale x 2 x i16> undef, 399 <vscale x 2 x i16> %0, 400 <vscale x 2 x i16> %1, 401 iXLen %2) 402 403 ret <vscale x 2 x i16> %a 404} 405 406declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( 407 <vscale x 2 x i16>, 408 <vscale x 2 x i16>, 409 <vscale x 2 x i16>, 410 <vscale x 2 x i1>, 411 iXLen, 412 iXLen); 413 414define <vscale x 2 x i16> @intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 415; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i16_nxv2i16_nxv2i16: 416; CHECK: # %bb.0: # %entry 417; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 418; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 419; CHECK-NEXT: ret 420entry: 421 %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16( 422 <vscale x 2 x i16> %0, 423 <vscale x 2 x i16> %1, 424 <vscale x 2 x i16> %2, 425 <vscale x 2 x i1> %3, 426 iXLen %4, iXLen 1) 427 428 ret <vscale x 2 x i16> %a 429} 430 431declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16( 432 <vscale x 4 x i16>, 433 <vscale x 4 x i16>, 434 <vscale x 4 x i16>, 435 iXLen); 436 437define <vscale x 4 x i16> @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 438; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: 439; CHECK: # %bb.0: # %entry 440; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 441; CHECK-NEXT: vmax.vv v8, v8, v9 442; CHECK-NEXT: ret 443entry: 444 %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16( 445 <vscale x 4 x i16> undef, 446 <vscale x 4 x i16> %0, 447 <vscale x 4 x i16> %1, 448 iXLen %2) 449 450 ret <vscale x 4 x i16> %a 451} 452 453declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( 454 <vscale x 4 x i16>, 455 <vscale x 4 x i16>, 456 <vscale x 4 x i16>, 457 <vscale x 4 x i1>, 458 iXLen, 459 iXLen); 460 461define <vscale x 4 x i16> @intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 462; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i16_nxv4i16_nxv4i16: 463; CHECK: # %bb.0: # %entry 464; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 465; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 466; CHECK-NEXT: ret 467entry: 468 %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16( 469 <vscale x 4 x i16> %0, 470 <vscale x 4 x i16> %1, 471 <vscale x 4 x i16> %2, 472 <vscale x 4 x i1> %3, 473 iXLen %4, iXLen 1) 474 475 ret <vscale x 4 x i16> %a 476} 477 478declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16( 479 <vscale x 8 x i16>, 480 <vscale x 8 x i16>, 481 <vscale x 8 x i16>, 482 iXLen); 483 484define <vscale x 8 x i16> @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 485; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: 486; CHECK: # %bb.0: # %entry 487; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 488; CHECK-NEXT: vmax.vv v8, v8, v10 489; CHECK-NEXT: ret 490entry: 491 %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16( 492 <vscale x 8 x i16> undef, 493 <vscale x 8 x i16> %0, 494 <vscale x 8 x i16> %1, 495 iXLen %2) 496 497 ret <vscale x 8 x i16> %a 498} 499 500declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( 501 <vscale x 8 x i16>, 502 <vscale x 8 x i16>, 503 <vscale x 8 x i16>, 504 <vscale x 8 x i1>, 505 iXLen, 506 iXLen); 507 508define <vscale x 8 x i16> @intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 509; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i16_nxv8i16_nxv8i16: 510; CHECK: # %bb.0: # %entry 511; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 512; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t 513; CHECK-NEXT: ret 514entry: 515 %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16( 516 <vscale x 8 x i16> %0, 517 <vscale x 8 x i16> %1, 518 <vscale x 8 x i16> %2, 519 <vscale x 8 x i1> %3, 520 iXLen %4, iXLen 1) 521 522 ret <vscale x 8 x i16> %a 523} 524 525declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16( 526 <vscale x 16 x i16>, 527 <vscale x 16 x i16>, 528 <vscale x 16 x i16>, 529 iXLen); 530 531define <vscale x 16 x i16> @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 532; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: 533; CHECK: # %bb.0: # %entry 534; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 535; CHECK-NEXT: vmax.vv v8, v8, v12 536; CHECK-NEXT: ret 537entry: 538 %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16( 539 <vscale x 16 x i16> undef, 540 <vscale x 16 x i16> %0, 541 <vscale x 16 x i16> %1, 542 iXLen %2) 543 544 ret <vscale x 16 x i16> %a 545} 546 547declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( 548 <vscale x 16 x i16>, 549 <vscale x 16 x i16>, 550 <vscale x 16 x i16>, 551 <vscale x 16 x i1>, 552 iXLen, 553 iXLen); 554 555define <vscale x 16 x i16> @intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 556; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i16_nxv16i16_nxv16i16: 557; CHECK: # %bb.0: # %entry 558; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 559; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t 560; CHECK-NEXT: ret 561entry: 562 %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16( 563 <vscale x 16 x i16> %0, 564 <vscale x 16 x i16> %1, 565 <vscale x 16 x i16> %2, 566 <vscale x 16 x i1> %3, 567 iXLen %4, iXLen 1) 568 569 ret <vscale x 16 x i16> %a 570} 571 572declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16( 573 <vscale x 32 x i16>, 574 <vscale x 32 x i16>, 575 <vscale x 32 x i16>, 576 iXLen); 577 578define <vscale x 32 x i16> @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 579; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: 580; CHECK: # %bb.0: # %entry 581; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 582; CHECK-NEXT: vmax.vv v8, v8, v16 583; CHECK-NEXT: ret 584entry: 585 %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16( 586 <vscale x 32 x i16> undef, 587 <vscale x 32 x i16> %0, 588 <vscale x 32 x i16> %1, 589 iXLen %2) 590 591 ret <vscale x 32 x i16> %a 592} 593 594declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( 595 <vscale x 32 x i16>, 596 <vscale x 32 x i16>, 597 <vscale x 32 x i16>, 598 <vscale x 32 x i1>, 599 iXLen, 600 iXLen); 601 602define <vscale x 32 x i16> @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 603; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: 604; CHECK: # %bb.0: # %entry 605; CHECK-NEXT: vl8re16.v v24, (a0) 606; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 607; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t 608; CHECK-NEXT: ret 609entry: 610 %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16( 611 <vscale x 32 x i16> %0, 612 <vscale x 32 x i16> %1, 613 <vscale x 32 x i16> %2, 614 <vscale x 32 x i1> %3, 615 iXLen %4, iXLen 1) 616 617 ret <vscale x 32 x i16> %a 618} 619 620declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32( 621 <vscale x 1 x i32>, 622 <vscale x 1 x i32>, 623 <vscale x 1 x i32>, 624 iXLen); 625 626define <vscale x 1 x i32> @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 627; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: 628; CHECK: # %bb.0: # %entry 629; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 630; CHECK-NEXT: vmax.vv v8, v8, v9 631; CHECK-NEXT: ret 632entry: 633 %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32( 634 <vscale x 1 x i32> undef, 635 <vscale x 1 x i32> %0, 636 <vscale x 1 x i32> %1, 637 iXLen %2) 638 639 ret <vscale x 1 x i32> %a 640} 641 642declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( 643 <vscale x 1 x i32>, 644 <vscale x 1 x i32>, 645 <vscale x 1 x i32>, 646 <vscale x 1 x i1>, 647 iXLen, 648 iXLen); 649 650define <vscale x 1 x i32> @intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 651; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i32_nxv1i32_nxv1i32: 652; CHECK: # %bb.0: # %entry 653; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 654; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 655; CHECK-NEXT: ret 656entry: 657 %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32( 658 <vscale x 1 x i32> %0, 659 <vscale x 1 x i32> %1, 660 <vscale x 1 x i32> %2, 661 <vscale x 1 x i1> %3, 662 iXLen %4, iXLen 1) 663 664 ret <vscale x 1 x i32> %a 665} 666 667declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32( 668 <vscale x 2 x i32>, 669 <vscale x 2 x i32>, 670 <vscale x 2 x i32>, 671 iXLen); 672 673define <vscale x 2 x i32> @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 674; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: 675; CHECK: # %bb.0: # %entry 676; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 677; CHECK-NEXT: vmax.vv v8, v8, v9 678; CHECK-NEXT: ret 679entry: 680 %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32( 681 <vscale x 2 x i32> undef, 682 <vscale x 2 x i32> %0, 683 <vscale x 2 x i32> %1, 684 iXLen %2) 685 686 ret <vscale x 2 x i32> %a 687} 688 689declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( 690 <vscale x 2 x i32>, 691 <vscale x 2 x i32>, 692 <vscale x 2 x i32>, 693 <vscale x 2 x i1>, 694 iXLen, 695 iXLen); 696 697define <vscale x 2 x i32> @intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 698; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i32_nxv2i32_nxv2i32: 699; CHECK: # %bb.0: # %entry 700; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 701; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 702; CHECK-NEXT: ret 703entry: 704 %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32( 705 <vscale x 2 x i32> %0, 706 <vscale x 2 x i32> %1, 707 <vscale x 2 x i32> %2, 708 <vscale x 2 x i1> %3, 709 iXLen %4, iXLen 1) 710 711 ret <vscale x 2 x i32> %a 712} 713 714declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32( 715 <vscale x 4 x i32>, 716 <vscale x 4 x i32>, 717 <vscale x 4 x i32>, 718 iXLen); 719 720define <vscale x 4 x i32> @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 721; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: 722; CHECK: # %bb.0: # %entry 723; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 724; CHECK-NEXT: vmax.vv v8, v8, v10 725; CHECK-NEXT: ret 726entry: 727 %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32( 728 <vscale x 4 x i32> undef, 729 <vscale x 4 x i32> %0, 730 <vscale x 4 x i32> %1, 731 iXLen %2) 732 733 ret <vscale x 4 x i32> %a 734} 735 736declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( 737 <vscale x 4 x i32>, 738 <vscale x 4 x i32>, 739 <vscale x 4 x i32>, 740 <vscale x 4 x i1>, 741 iXLen, 742 iXLen); 743 744define <vscale x 4 x i32> @intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 745; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i32_nxv4i32_nxv4i32: 746; CHECK: # %bb.0: # %entry 747; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 748; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t 749; CHECK-NEXT: ret 750entry: 751 %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32( 752 <vscale x 4 x i32> %0, 753 <vscale x 4 x i32> %1, 754 <vscale x 4 x i32> %2, 755 <vscale x 4 x i1> %3, 756 iXLen %4, iXLen 1) 757 758 ret <vscale x 4 x i32> %a 759} 760 761declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32( 762 <vscale x 8 x i32>, 763 <vscale x 8 x i32>, 764 <vscale x 8 x i32>, 765 iXLen); 766 767define <vscale x 8 x i32> @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 768; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: 769; CHECK: # %bb.0: # %entry 770; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 771; CHECK-NEXT: vmax.vv v8, v8, v12 772; CHECK-NEXT: ret 773entry: 774 %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32( 775 <vscale x 8 x i32> undef, 776 <vscale x 8 x i32> %0, 777 <vscale x 8 x i32> %1, 778 iXLen %2) 779 780 ret <vscale x 8 x i32> %a 781} 782 783declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( 784 <vscale x 8 x i32>, 785 <vscale x 8 x i32>, 786 <vscale x 8 x i32>, 787 <vscale x 8 x i1>, 788 iXLen, 789 iXLen); 790 791define <vscale x 8 x i32> @intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 792; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i32_nxv8i32_nxv8i32: 793; CHECK: # %bb.0: # %entry 794; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 795; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t 796; CHECK-NEXT: ret 797entry: 798 %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32( 799 <vscale x 8 x i32> %0, 800 <vscale x 8 x i32> %1, 801 <vscale x 8 x i32> %2, 802 <vscale x 8 x i1> %3, 803 iXLen %4, iXLen 1) 804 805 ret <vscale x 8 x i32> %a 806} 807 808declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32( 809 <vscale x 16 x i32>, 810 <vscale x 16 x i32>, 811 <vscale x 16 x i32>, 812 iXLen); 813 814define <vscale x 16 x i32> @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 815; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: 816; CHECK: # %bb.0: # %entry 817; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 818; CHECK-NEXT: vmax.vv v8, v8, v16 819; CHECK-NEXT: ret 820entry: 821 %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32( 822 <vscale x 16 x i32> undef, 823 <vscale x 16 x i32> %0, 824 <vscale x 16 x i32> %1, 825 iXLen %2) 826 827 ret <vscale x 16 x i32> %a 828} 829 830declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( 831 <vscale x 16 x i32>, 832 <vscale x 16 x i32>, 833 <vscale x 16 x i32>, 834 <vscale x 16 x i1>, 835 iXLen, 836 iXLen); 837 838define <vscale x 16 x i32> @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 839; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: 840; CHECK: # %bb.0: # %entry 841; CHECK-NEXT: vl8re32.v v24, (a0) 842; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 843; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t 844; CHECK-NEXT: ret 845entry: 846 %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32( 847 <vscale x 16 x i32> %0, 848 <vscale x 16 x i32> %1, 849 <vscale x 16 x i32> %2, 850 <vscale x 16 x i1> %3, 851 iXLen %4, iXLen 1) 852 853 ret <vscale x 16 x i32> %a 854} 855 856declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64( 857 <vscale x 1 x i64>, 858 <vscale x 1 x i64>, 859 <vscale x 1 x i64>, 860 iXLen); 861 862define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 863; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: 864; CHECK: # %bb.0: # %entry 865; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 866; CHECK-NEXT: vmax.vv v8, v8, v9 867; CHECK-NEXT: ret 868entry: 869 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64( 870 <vscale x 1 x i64> undef, 871 <vscale x 1 x i64> %0, 872 <vscale x 1 x i64> %1, 873 iXLen %2) 874 875 ret <vscale x 1 x i64> %a 876} 877 878declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( 879 <vscale x 1 x i64>, 880 <vscale x 1 x i64>, 881 <vscale x 1 x i64>, 882 <vscale x 1 x i1>, 883 iXLen, 884 iXLen); 885 886define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 887; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64: 888; CHECK: # %bb.0: # %entry 889; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 890; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t 891; CHECK-NEXT: ret 892entry: 893 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64( 894 <vscale x 1 x i64> %0, 895 <vscale x 1 x i64> %1, 896 <vscale x 1 x i64> %2, 897 <vscale x 1 x i1> %3, 898 iXLen %4, iXLen 1) 899 900 ret <vscale x 1 x i64> %a 901} 902 903declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64( 904 <vscale x 2 x i64>, 905 <vscale x 2 x i64>, 906 <vscale x 2 x i64>, 907 iXLen); 908 909define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 910; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: 911; CHECK: # %bb.0: # %entry 912; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 913; CHECK-NEXT: vmax.vv v8, v8, v10 914; CHECK-NEXT: ret 915entry: 916 %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64( 917 <vscale x 2 x i64> undef, 918 <vscale x 2 x i64> %0, 919 <vscale x 2 x i64> %1, 920 iXLen %2) 921 922 ret <vscale x 2 x i64> %a 923} 924 925declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( 926 <vscale x 2 x i64>, 927 <vscale x 2 x i64>, 928 <vscale x 2 x i64>, 929 <vscale x 2 x i1>, 930 iXLen, 931 iXLen); 932 933define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 934; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64: 935; CHECK: # %bb.0: # %entry 936; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 937; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t 938; CHECK-NEXT: ret 939entry: 940 %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64( 941 <vscale x 2 x i64> %0, 942 <vscale x 2 x i64> %1, 943 <vscale x 2 x i64> %2, 944 <vscale x 2 x i1> %3, 945 iXLen %4, iXLen 1) 946 947 ret <vscale x 2 x i64> %a 948} 949 950declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64( 951 <vscale x 4 x i64>, 952 <vscale x 4 x i64>, 953 <vscale x 4 x i64>, 954 iXLen); 955 956define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 957; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: 958; CHECK: # %bb.0: # %entry 959; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 960; CHECK-NEXT: vmax.vv v8, v8, v12 961; CHECK-NEXT: ret 962entry: 963 %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64( 964 <vscale x 4 x i64> undef, 965 <vscale x 4 x i64> %0, 966 <vscale x 4 x i64> %1, 967 iXLen %2) 968 969 ret <vscale x 4 x i64> %a 970} 971 972declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( 973 <vscale x 4 x i64>, 974 <vscale x 4 x i64>, 975 <vscale x 4 x i64>, 976 <vscale x 4 x i1>, 977 iXLen, 978 iXLen); 979 980define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 981; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64: 982; CHECK: # %bb.0: # %entry 983; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 984; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t 985; CHECK-NEXT: ret 986entry: 987 %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64( 988 <vscale x 4 x i64> %0, 989 <vscale x 4 x i64> %1, 990 <vscale x 4 x i64> %2, 991 <vscale x 4 x i1> %3, 992 iXLen %4, iXLen 1) 993 994 ret <vscale x 4 x i64> %a 995} 996 997declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64( 998 <vscale x 8 x i64>, 999 <vscale x 8 x i64>, 1000 <vscale x 8 x i64>, 1001 iXLen); 1002 1003define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 1004; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: 1005; CHECK: # %bb.0: # %entry 1006; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1007; CHECK-NEXT: vmax.vv v8, v8, v16 1008; CHECK-NEXT: ret 1009entry: 1010 %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64( 1011 <vscale x 8 x i64> undef, 1012 <vscale x 8 x i64> %0, 1013 <vscale x 8 x i64> %1, 1014 iXLen %2) 1015 1016 ret <vscale x 8 x i64> %a 1017} 1018 1019declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( 1020 <vscale x 8 x i64>, 1021 <vscale x 8 x i64>, 1022 <vscale x 8 x i64>, 1023 <vscale x 8 x i1>, 1024 iXLen, 1025 iXLen); 1026 1027define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1028; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: 1029; CHECK: # %bb.0: # %entry 1030; CHECK-NEXT: vl8re64.v v24, (a0) 1031; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1032; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t 1033; CHECK-NEXT: ret 1034entry: 1035 %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64( 1036 <vscale x 8 x i64> %0, 1037 <vscale x 8 x i64> %1, 1038 <vscale x 8 x i64> %2, 1039 <vscale x 8 x i1> %3, 1040 iXLen %4, iXLen 1) 1041 1042 ret <vscale x 8 x i64> %a 1043} 1044 1045declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8( 1046 <vscale x 1 x i8>, 1047 <vscale x 1 x i8>, 1048 i8, 1049 iXLen); 1050 1051define <vscale x 1 x i8> @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 1052; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: 1053; CHECK: # %bb.0: # %entry 1054; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1055; CHECK-NEXT: vmax.vx v8, v8, a0 1056; CHECK-NEXT: ret 1057entry: 1058 %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8( 1059 <vscale x 1 x i8> undef, 1060 <vscale x 1 x i8> %0, 1061 i8 %1, 1062 iXLen %2) 1063 1064 ret <vscale x 1 x i8> %a 1065} 1066 1067declare <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8( 1068 <vscale x 1 x i8>, 1069 <vscale x 1 x i8>, 1070 i8, 1071 <vscale x 1 x i1>, 1072 iXLen, 1073 iXLen); 1074 1075define <vscale x 1 x i8> @intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1076; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i8_nxv1i8_i8: 1077; CHECK: # %bb.0: # %entry 1078; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 1079; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1080; CHECK-NEXT: ret 1081entry: 1082 %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8( 1083 <vscale x 1 x i8> %0, 1084 <vscale x 1 x i8> %1, 1085 i8 %2, 1086 <vscale x 1 x i1> %3, 1087 iXLen %4, iXLen 1) 1088 1089 ret <vscale x 1 x i8> %a 1090} 1091 1092declare <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8( 1093 <vscale x 2 x i8>, 1094 <vscale x 2 x i8>, 1095 i8, 1096 iXLen); 1097 1098define <vscale x 2 x i8> @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 1099; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: 1100; CHECK: # %bb.0: # %entry 1101; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1102; CHECK-NEXT: vmax.vx v8, v8, a0 1103; CHECK-NEXT: ret 1104entry: 1105 %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8( 1106 <vscale x 2 x i8> undef, 1107 <vscale x 2 x i8> %0, 1108 i8 %1, 1109 iXLen %2) 1110 1111 ret <vscale x 2 x i8> %a 1112} 1113 1114declare <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8( 1115 <vscale x 2 x i8>, 1116 <vscale x 2 x i8>, 1117 i8, 1118 <vscale x 2 x i1>, 1119 iXLen, 1120 iXLen); 1121 1122define <vscale x 2 x i8> @intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1123; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i8_nxv2i8_i8: 1124; CHECK: # %bb.0: # %entry 1125; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1126; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1127; CHECK-NEXT: ret 1128entry: 1129 %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8( 1130 <vscale x 2 x i8> %0, 1131 <vscale x 2 x i8> %1, 1132 i8 %2, 1133 <vscale x 2 x i1> %3, 1134 iXLen %4, iXLen 1) 1135 1136 ret <vscale x 2 x i8> %a 1137} 1138 1139declare <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8( 1140 <vscale x 4 x i8>, 1141 <vscale x 4 x i8>, 1142 i8, 1143 iXLen); 1144 1145define <vscale x 4 x i8> @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1146; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: 1147; CHECK: # %bb.0: # %entry 1148; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1149; CHECK-NEXT: vmax.vx v8, v8, a0 1150; CHECK-NEXT: ret 1151entry: 1152 %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8( 1153 <vscale x 4 x i8> undef, 1154 <vscale x 4 x i8> %0, 1155 i8 %1, 1156 iXLen %2) 1157 1158 ret <vscale x 4 x i8> %a 1159} 1160 1161declare <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8( 1162 <vscale x 4 x i8>, 1163 <vscale x 4 x i8>, 1164 i8, 1165 <vscale x 4 x i1>, 1166 iXLen, 1167 iXLen); 1168 1169define <vscale x 4 x i8> @intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1170; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i8_nxv4i8_i8: 1171; CHECK: # %bb.0: # %entry 1172; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1173; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1174; CHECK-NEXT: ret 1175entry: 1176 %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8( 1177 <vscale x 4 x i8> %0, 1178 <vscale x 4 x i8> %1, 1179 i8 %2, 1180 <vscale x 4 x i1> %3, 1181 iXLen %4, iXLen 1) 1182 1183 ret <vscale x 4 x i8> %a 1184} 1185 1186declare <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8( 1187 <vscale x 8 x i8>, 1188 <vscale x 8 x i8>, 1189 i8, 1190 iXLen); 1191 1192define <vscale x 8 x i8> @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1193; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: 1194; CHECK: # %bb.0: # %entry 1195; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1196; CHECK-NEXT: vmax.vx v8, v8, a0 1197; CHECK-NEXT: ret 1198entry: 1199 %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8( 1200 <vscale x 8 x i8> undef, 1201 <vscale x 8 x i8> %0, 1202 i8 %1, 1203 iXLen %2) 1204 1205 ret <vscale x 8 x i8> %a 1206} 1207 1208declare <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8( 1209 <vscale x 8 x i8>, 1210 <vscale x 8 x i8>, 1211 i8, 1212 <vscale x 8 x i1>, 1213 iXLen, 1214 iXLen); 1215 1216define <vscale x 8 x i8> @intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1217; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i8_nxv8i8_i8: 1218; CHECK: # %bb.0: # %entry 1219; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1220; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1221; CHECK-NEXT: ret 1222entry: 1223 %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8( 1224 <vscale x 8 x i8> %0, 1225 <vscale x 8 x i8> %1, 1226 i8 %2, 1227 <vscale x 8 x i1> %3, 1228 iXLen %4, iXLen 1) 1229 1230 ret <vscale x 8 x i8> %a 1231} 1232 1233declare <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8( 1234 <vscale x 16 x i8>, 1235 <vscale x 16 x i8>, 1236 i8, 1237 iXLen); 1238 1239define <vscale x 16 x i8> @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1240; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: 1241; CHECK: # %bb.0: # %entry 1242; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1243; CHECK-NEXT: vmax.vx v8, v8, a0 1244; CHECK-NEXT: ret 1245entry: 1246 %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8( 1247 <vscale x 16 x i8> undef, 1248 <vscale x 16 x i8> %0, 1249 i8 %1, 1250 iXLen %2) 1251 1252 ret <vscale x 16 x i8> %a 1253} 1254 1255declare <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8( 1256 <vscale x 16 x i8>, 1257 <vscale x 16 x i8>, 1258 i8, 1259 <vscale x 16 x i1>, 1260 iXLen, 1261 iXLen); 1262 1263define <vscale x 16 x i8> @intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1264; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i8_nxv16i8_i8: 1265; CHECK: # %bb.0: # %entry 1266; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1267; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t 1268; CHECK-NEXT: ret 1269entry: 1270 %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8( 1271 <vscale x 16 x i8> %0, 1272 <vscale x 16 x i8> %1, 1273 i8 %2, 1274 <vscale x 16 x i1> %3, 1275 iXLen %4, iXLen 1) 1276 1277 ret <vscale x 16 x i8> %a 1278} 1279 1280declare <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8( 1281 <vscale x 32 x i8>, 1282 <vscale x 32 x i8>, 1283 i8, 1284 iXLen); 1285 1286define <vscale x 32 x i8> @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1287; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: 1288; CHECK: # %bb.0: # %entry 1289; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1290; CHECK-NEXT: vmax.vx v8, v8, a0 1291; CHECK-NEXT: ret 1292entry: 1293 %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8( 1294 <vscale x 32 x i8> undef, 1295 <vscale x 32 x i8> %0, 1296 i8 %1, 1297 iXLen %2) 1298 1299 ret <vscale x 32 x i8> %a 1300} 1301 1302declare <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8( 1303 <vscale x 32 x i8>, 1304 <vscale x 32 x i8>, 1305 i8, 1306 <vscale x 32 x i1>, 1307 iXLen, 1308 iXLen); 1309 1310define <vscale x 32 x i8> @intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1311; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i8_nxv32i8_i8: 1312; CHECK: # %bb.0: # %entry 1313; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1314; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t 1315; CHECK-NEXT: ret 1316entry: 1317 %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8( 1318 <vscale x 32 x i8> %0, 1319 <vscale x 32 x i8> %1, 1320 i8 %2, 1321 <vscale x 32 x i1> %3, 1322 iXLen %4, iXLen 1) 1323 1324 ret <vscale x 32 x i8> %a 1325} 1326 1327declare <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8( 1328 <vscale x 64 x i8>, 1329 <vscale x 64 x i8>, 1330 i8, 1331 iXLen); 1332 1333define <vscale x 64 x i8> @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 1334; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: 1335; CHECK: # %bb.0: # %entry 1336; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 1337; CHECK-NEXT: vmax.vx v8, v8, a0 1338; CHECK-NEXT: ret 1339entry: 1340 %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8( 1341 <vscale x 64 x i8> undef, 1342 <vscale x 64 x i8> %0, 1343 i8 %1, 1344 iXLen %2) 1345 1346 ret <vscale x 64 x i8> %a 1347} 1348 1349declare <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8( 1350 <vscale x 64 x i8>, 1351 <vscale x 64 x i8>, 1352 i8, 1353 <vscale x 64 x i1>, 1354 iXLen, 1355 iXLen); 1356 1357define <vscale x 64 x i8> @intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1358; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv64i8_nxv64i8_i8: 1359; CHECK: # %bb.0: # %entry 1360; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1361; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t 1362; CHECK-NEXT: ret 1363entry: 1364 %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8( 1365 <vscale x 64 x i8> %0, 1366 <vscale x 64 x i8> %1, 1367 i8 %2, 1368 <vscale x 64 x i1> %3, 1369 iXLen %4, iXLen 1) 1370 1371 ret <vscale x 64 x i8> %a 1372} 1373 1374declare <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16( 1375 <vscale x 1 x i16>, 1376 <vscale x 1 x i16>, 1377 i16, 1378 iXLen); 1379 1380define <vscale x 1 x i16> @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1381; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: 1382; CHECK: # %bb.0: # %entry 1383; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1384; CHECK-NEXT: vmax.vx v8, v8, a0 1385; CHECK-NEXT: ret 1386entry: 1387 %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16( 1388 <vscale x 1 x i16> undef, 1389 <vscale x 1 x i16> %0, 1390 i16 %1, 1391 iXLen %2) 1392 1393 ret <vscale x 1 x i16> %a 1394} 1395 1396declare <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16( 1397 <vscale x 1 x i16>, 1398 <vscale x 1 x i16>, 1399 i16, 1400 <vscale x 1 x i1>, 1401 iXLen, 1402 iXLen); 1403 1404define <vscale x 1 x i16> @intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1405; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i16_nxv1i16_i16: 1406; CHECK: # %bb.0: # %entry 1407; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1408; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1409; CHECK-NEXT: ret 1410entry: 1411 %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16( 1412 <vscale x 1 x i16> %0, 1413 <vscale x 1 x i16> %1, 1414 i16 %2, 1415 <vscale x 1 x i1> %3, 1416 iXLen %4, iXLen 1) 1417 1418 ret <vscale x 1 x i16> %a 1419} 1420 1421declare <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16( 1422 <vscale x 2 x i16>, 1423 <vscale x 2 x i16>, 1424 i16, 1425 iXLen); 1426 1427define <vscale x 2 x i16> @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1428; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: 1429; CHECK: # %bb.0: # %entry 1430; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1431; CHECK-NEXT: vmax.vx v8, v8, a0 1432; CHECK-NEXT: ret 1433entry: 1434 %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16( 1435 <vscale x 2 x i16> undef, 1436 <vscale x 2 x i16> %0, 1437 i16 %1, 1438 iXLen %2) 1439 1440 ret <vscale x 2 x i16> %a 1441} 1442 1443declare <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16( 1444 <vscale x 2 x i16>, 1445 <vscale x 2 x i16>, 1446 i16, 1447 <vscale x 2 x i1>, 1448 iXLen, 1449 iXLen); 1450 1451define <vscale x 2 x i16> @intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1452; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i16_nxv2i16_i16: 1453; CHECK: # %bb.0: # %entry 1454; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1455; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1456; CHECK-NEXT: ret 1457entry: 1458 %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16( 1459 <vscale x 2 x i16> %0, 1460 <vscale x 2 x i16> %1, 1461 i16 %2, 1462 <vscale x 2 x i1> %3, 1463 iXLen %4, iXLen 1) 1464 1465 ret <vscale x 2 x i16> %a 1466} 1467 1468declare <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16( 1469 <vscale x 4 x i16>, 1470 <vscale x 4 x i16>, 1471 i16, 1472 iXLen); 1473 1474define <vscale x 4 x i16> @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1475; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: 1476; CHECK: # %bb.0: # %entry 1477; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1478; CHECK-NEXT: vmax.vx v8, v8, a0 1479; CHECK-NEXT: ret 1480entry: 1481 %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16( 1482 <vscale x 4 x i16> undef, 1483 <vscale x 4 x i16> %0, 1484 i16 %1, 1485 iXLen %2) 1486 1487 ret <vscale x 4 x i16> %a 1488} 1489 1490declare <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16( 1491 <vscale x 4 x i16>, 1492 <vscale x 4 x i16>, 1493 i16, 1494 <vscale x 4 x i1>, 1495 iXLen, 1496 iXLen); 1497 1498define <vscale x 4 x i16> @intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1499; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i16_nxv4i16_i16: 1500; CHECK: # %bb.0: # %entry 1501; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1502; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1503; CHECK-NEXT: ret 1504entry: 1505 %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16( 1506 <vscale x 4 x i16> %0, 1507 <vscale x 4 x i16> %1, 1508 i16 %2, 1509 <vscale x 4 x i1> %3, 1510 iXLen %4, iXLen 1) 1511 1512 ret <vscale x 4 x i16> %a 1513} 1514 1515declare <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16( 1516 <vscale x 8 x i16>, 1517 <vscale x 8 x i16>, 1518 i16, 1519 iXLen); 1520 1521define <vscale x 8 x i16> @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1522; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: 1523; CHECK: # %bb.0: # %entry 1524; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1525; CHECK-NEXT: vmax.vx v8, v8, a0 1526; CHECK-NEXT: ret 1527entry: 1528 %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16( 1529 <vscale x 8 x i16> undef, 1530 <vscale x 8 x i16> %0, 1531 i16 %1, 1532 iXLen %2) 1533 1534 ret <vscale x 8 x i16> %a 1535} 1536 1537declare <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16( 1538 <vscale x 8 x i16>, 1539 <vscale x 8 x i16>, 1540 i16, 1541 <vscale x 8 x i1>, 1542 iXLen, 1543 iXLen); 1544 1545define <vscale x 8 x i16> @intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1546; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i16_nxv8i16_i16: 1547; CHECK: # %bb.0: # %entry 1548; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1549; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t 1550; CHECK-NEXT: ret 1551entry: 1552 %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16( 1553 <vscale x 8 x i16> %0, 1554 <vscale x 8 x i16> %1, 1555 i16 %2, 1556 <vscale x 8 x i1> %3, 1557 iXLen %4, iXLen 1) 1558 1559 ret <vscale x 8 x i16> %a 1560} 1561 1562declare <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16( 1563 <vscale x 16 x i16>, 1564 <vscale x 16 x i16>, 1565 i16, 1566 iXLen); 1567 1568define <vscale x 16 x i16> @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1569; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: 1570; CHECK: # %bb.0: # %entry 1571; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1572; CHECK-NEXT: vmax.vx v8, v8, a0 1573; CHECK-NEXT: ret 1574entry: 1575 %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16( 1576 <vscale x 16 x i16> undef, 1577 <vscale x 16 x i16> %0, 1578 i16 %1, 1579 iXLen %2) 1580 1581 ret <vscale x 16 x i16> %a 1582} 1583 1584declare <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16( 1585 <vscale x 16 x i16>, 1586 <vscale x 16 x i16>, 1587 i16, 1588 <vscale x 16 x i1>, 1589 iXLen, 1590 iXLen); 1591 1592define <vscale x 16 x i16> @intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1593; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i16_nxv16i16_i16: 1594; CHECK: # %bb.0: # %entry 1595; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1596; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t 1597; CHECK-NEXT: ret 1598entry: 1599 %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16( 1600 <vscale x 16 x i16> %0, 1601 <vscale x 16 x i16> %1, 1602 i16 %2, 1603 <vscale x 16 x i1> %3, 1604 iXLen %4, iXLen 1) 1605 1606 ret <vscale x 16 x i16> %a 1607} 1608 1609declare <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16( 1610 <vscale x 32 x i16>, 1611 <vscale x 32 x i16>, 1612 i16, 1613 iXLen); 1614 1615define <vscale x 32 x i16> @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 1616; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: 1617; CHECK: # %bb.0: # %entry 1618; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1619; CHECK-NEXT: vmax.vx v8, v8, a0 1620; CHECK-NEXT: ret 1621entry: 1622 %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16( 1623 <vscale x 32 x i16> undef, 1624 <vscale x 32 x i16> %0, 1625 i16 %1, 1626 iXLen %2) 1627 1628 ret <vscale x 32 x i16> %a 1629} 1630 1631declare <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16( 1632 <vscale x 32 x i16>, 1633 <vscale x 32 x i16>, 1634 i16, 1635 <vscale x 32 x i1>, 1636 iXLen, 1637 iXLen); 1638 1639define <vscale x 32 x i16> @intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1640; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv32i16_nxv32i16_i16: 1641; CHECK: # %bb.0: # %entry 1642; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1643; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t 1644; CHECK-NEXT: ret 1645entry: 1646 %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16( 1647 <vscale x 32 x i16> %0, 1648 <vscale x 32 x i16> %1, 1649 i16 %2, 1650 <vscale x 32 x i1> %3, 1651 iXLen %4, iXLen 1) 1652 1653 ret <vscale x 32 x i16> %a 1654} 1655 1656declare <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32( 1657 <vscale x 1 x i32>, 1658 <vscale x 1 x i32>, 1659 i32, 1660 iXLen); 1661 1662define <vscale x 1 x i32> @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1663; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: 1664; CHECK: # %bb.0: # %entry 1665; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1666; CHECK-NEXT: vmax.vx v8, v8, a0 1667; CHECK-NEXT: ret 1668entry: 1669 %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32( 1670 <vscale x 1 x i32> undef, 1671 <vscale x 1 x i32> %0, 1672 i32 %1, 1673 iXLen %2) 1674 1675 ret <vscale x 1 x i32> %a 1676} 1677 1678declare <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32( 1679 <vscale x 1 x i32>, 1680 <vscale x 1 x i32>, 1681 i32, 1682 <vscale x 1 x i1>, 1683 iXLen, 1684 iXLen); 1685 1686define <vscale x 1 x i32> @intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1687; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i32_nxv1i32_i32: 1688; CHECK: # %bb.0: # %entry 1689; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1690; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1691; CHECK-NEXT: ret 1692entry: 1693 %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32( 1694 <vscale x 1 x i32> %0, 1695 <vscale x 1 x i32> %1, 1696 i32 %2, 1697 <vscale x 1 x i1> %3, 1698 iXLen %4, iXLen 1) 1699 1700 ret <vscale x 1 x i32> %a 1701} 1702 1703declare <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32( 1704 <vscale x 2 x i32>, 1705 <vscale x 2 x i32>, 1706 i32, 1707 iXLen); 1708 1709define <vscale x 2 x i32> @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1710; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: 1711; CHECK: # %bb.0: # %entry 1712; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1713; CHECK-NEXT: vmax.vx v8, v8, a0 1714; CHECK-NEXT: ret 1715entry: 1716 %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32( 1717 <vscale x 2 x i32> undef, 1718 <vscale x 2 x i32> %0, 1719 i32 %1, 1720 iXLen %2) 1721 1722 ret <vscale x 2 x i32> %a 1723} 1724 1725declare <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32( 1726 <vscale x 2 x i32>, 1727 <vscale x 2 x i32>, 1728 i32, 1729 <vscale x 2 x i1>, 1730 iXLen, 1731 iXLen); 1732 1733define <vscale x 2 x i32> @intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1734; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i32_nxv2i32_i32: 1735; CHECK: # %bb.0: # %entry 1736; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1737; CHECK-NEXT: vmax.vx v8, v9, a0, v0.t 1738; CHECK-NEXT: ret 1739entry: 1740 %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32( 1741 <vscale x 2 x i32> %0, 1742 <vscale x 2 x i32> %1, 1743 i32 %2, 1744 <vscale x 2 x i1> %3, 1745 iXLen %4, iXLen 1) 1746 1747 ret <vscale x 2 x i32> %a 1748} 1749 1750declare <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32( 1751 <vscale x 4 x i32>, 1752 <vscale x 4 x i32>, 1753 i32, 1754 iXLen); 1755 1756define <vscale x 4 x i32> @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1757; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: 1758; CHECK: # %bb.0: # %entry 1759; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1760; CHECK-NEXT: vmax.vx v8, v8, a0 1761; CHECK-NEXT: ret 1762entry: 1763 %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32( 1764 <vscale x 4 x i32> undef, 1765 <vscale x 4 x i32> %0, 1766 i32 %1, 1767 iXLen %2) 1768 1769 ret <vscale x 4 x i32> %a 1770} 1771 1772declare <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32( 1773 <vscale x 4 x i32>, 1774 <vscale x 4 x i32>, 1775 i32, 1776 <vscale x 4 x i1>, 1777 iXLen, 1778 iXLen); 1779 1780define <vscale x 4 x i32> @intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1781; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i32_nxv4i32_i32: 1782; CHECK: # %bb.0: # %entry 1783; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1784; CHECK-NEXT: vmax.vx v8, v10, a0, v0.t 1785; CHECK-NEXT: ret 1786entry: 1787 %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32( 1788 <vscale x 4 x i32> %0, 1789 <vscale x 4 x i32> %1, 1790 i32 %2, 1791 <vscale x 4 x i1> %3, 1792 iXLen %4, iXLen 1) 1793 1794 ret <vscale x 4 x i32> %a 1795} 1796 1797declare <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32( 1798 <vscale x 8 x i32>, 1799 <vscale x 8 x i32>, 1800 i32, 1801 iXLen); 1802 1803define <vscale x 8 x i32> @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1804; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: 1805; CHECK: # %bb.0: # %entry 1806; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1807; CHECK-NEXT: vmax.vx v8, v8, a0 1808; CHECK-NEXT: ret 1809entry: 1810 %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32( 1811 <vscale x 8 x i32> undef, 1812 <vscale x 8 x i32> %0, 1813 i32 %1, 1814 iXLen %2) 1815 1816 ret <vscale x 8 x i32> %a 1817} 1818 1819declare <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32( 1820 <vscale x 8 x i32>, 1821 <vscale x 8 x i32>, 1822 i32, 1823 <vscale x 8 x i1>, 1824 iXLen, 1825 iXLen); 1826 1827define <vscale x 8 x i32> @intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1828; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i32_nxv8i32_i32: 1829; CHECK: # %bb.0: # %entry 1830; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1831; CHECK-NEXT: vmax.vx v8, v12, a0, v0.t 1832; CHECK-NEXT: ret 1833entry: 1834 %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32( 1835 <vscale x 8 x i32> %0, 1836 <vscale x 8 x i32> %1, 1837 i32 %2, 1838 <vscale x 8 x i1> %3, 1839 iXLen %4, iXLen 1) 1840 1841 ret <vscale x 8 x i32> %a 1842} 1843 1844declare <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32( 1845 <vscale x 16 x i32>, 1846 <vscale x 16 x i32>, 1847 i32, 1848 iXLen); 1849 1850define <vscale x 16 x i32> @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 1851; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: 1852; CHECK: # %bb.0: # %entry 1853; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1854; CHECK-NEXT: vmax.vx v8, v8, a0 1855; CHECK-NEXT: ret 1856entry: 1857 %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32( 1858 <vscale x 16 x i32> undef, 1859 <vscale x 16 x i32> %0, 1860 i32 %1, 1861 iXLen %2) 1862 1863 ret <vscale x 16 x i32> %a 1864} 1865 1866declare <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32( 1867 <vscale x 16 x i32>, 1868 <vscale x 16 x i32>, 1869 i32, 1870 <vscale x 16 x i1>, 1871 iXLen, 1872 iXLen); 1873 1874define <vscale x 16 x i32> @intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1875; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv16i32_nxv16i32_i32: 1876; CHECK: # %bb.0: # %entry 1877; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1878; CHECK-NEXT: vmax.vx v8, v16, a0, v0.t 1879; CHECK-NEXT: ret 1880entry: 1881 %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32( 1882 <vscale x 16 x i32> %0, 1883 <vscale x 16 x i32> %1, 1884 i32 %2, 1885 <vscale x 16 x i1> %3, 1886 iXLen %4, iXLen 1) 1887 1888 ret <vscale x 16 x i32> %a 1889} 1890 1891declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64( 1892 <vscale x 1 x i64>, 1893 <vscale x 1 x i64>, 1894 i64, 1895 iXLen); 1896 1897define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1898; RV32-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: 1899; RV32: # %bb.0: # %entry 1900; RV32-NEXT: addi sp, sp, -16 1901; RV32-NEXT: sw a0, 8(sp) 1902; RV32-NEXT: sw a1, 12(sp) 1903; RV32-NEXT: addi a0, sp, 8 1904; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1905; RV32-NEXT: vlse64.v v9, (a0), zero 1906; RV32-NEXT: vmax.vv v8, v8, v9 1907; RV32-NEXT: addi sp, sp, 16 1908; RV32-NEXT: ret 1909; 1910; RV64-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: 1911; RV64: # %bb.0: # %entry 1912; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1913; RV64-NEXT: vmax.vx v8, v8, a0 1914; RV64-NEXT: ret 1915entry: 1916 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64( 1917 <vscale x 1 x i64> undef, 1918 <vscale x 1 x i64> %0, 1919 i64 %1, 1920 iXLen %2) 1921 1922 ret <vscale x 1 x i64> %a 1923} 1924 1925declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64( 1926 <vscale x 1 x i64>, 1927 <vscale x 1 x i64>, 1928 i64, 1929 <vscale x 1 x i1>, 1930 iXLen, 1931 iXLen); 1932 1933define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1934; RV32-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: 1935; RV32: # %bb.0: # %entry 1936; RV32-NEXT: addi sp, sp, -16 1937; RV32-NEXT: sw a0, 8(sp) 1938; RV32-NEXT: sw a1, 12(sp) 1939; RV32-NEXT: addi a0, sp, 8 1940; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1941; RV32-NEXT: vlse64.v v10, (a0), zero 1942; RV32-NEXT: vmax.vv v8, v9, v10, v0.t 1943; RV32-NEXT: addi sp, sp, 16 1944; RV32-NEXT: ret 1945; 1946; RV64-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64: 1947; RV64: # %bb.0: # %entry 1948; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1949; RV64-NEXT: vmax.vx v8, v9, a0, v0.t 1950; RV64-NEXT: ret 1951entry: 1952 %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64( 1953 <vscale x 1 x i64> %0, 1954 <vscale x 1 x i64> %1, 1955 i64 %2, 1956 <vscale x 1 x i1> %3, 1957 iXLen %4, iXLen 1) 1958 1959 ret <vscale x 1 x i64> %a 1960} 1961 1962declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64( 1963 <vscale x 2 x i64>, 1964 <vscale x 2 x i64>, 1965 i64, 1966 iXLen); 1967 1968define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 1969; RV32-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: 1970; RV32: # %bb.0: # %entry 1971; RV32-NEXT: addi sp, sp, -16 1972; RV32-NEXT: sw a0, 8(sp) 1973; RV32-NEXT: sw a1, 12(sp) 1974; RV32-NEXT: addi a0, sp, 8 1975; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1976; RV32-NEXT: vlse64.v v10, (a0), zero 1977; RV32-NEXT: vmax.vv v8, v8, v10 1978; RV32-NEXT: addi sp, sp, 16 1979; RV32-NEXT: ret 1980; 1981; RV64-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: 1982; RV64: # %bb.0: # %entry 1983; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1984; RV64-NEXT: vmax.vx v8, v8, a0 1985; RV64-NEXT: ret 1986entry: 1987 %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64( 1988 <vscale x 2 x i64> undef, 1989 <vscale x 2 x i64> %0, 1990 i64 %1, 1991 iXLen %2) 1992 1993 ret <vscale x 2 x i64> %a 1994} 1995 1996declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64( 1997 <vscale x 2 x i64>, 1998 <vscale x 2 x i64>, 1999 i64, 2000 <vscale x 2 x i1>, 2001 iXLen, 2002 iXLen); 2003 2004define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 2005; RV32-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: 2006; RV32: # %bb.0: # %entry 2007; RV32-NEXT: addi sp, sp, -16 2008; RV32-NEXT: sw a0, 8(sp) 2009; RV32-NEXT: sw a1, 12(sp) 2010; RV32-NEXT: addi a0, sp, 8 2011; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 2012; RV32-NEXT: vlse64.v v12, (a0), zero 2013; RV32-NEXT: vmax.vv v8, v10, v12, v0.t 2014; RV32-NEXT: addi sp, sp, 16 2015; RV32-NEXT: ret 2016; 2017; RV64-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64: 2018; RV64: # %bb.0: # %entry 2019; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 2020; RV64-NEXT: vmax.vx v8, v10, a0, v0.t 2021; RV64-NEXT: ret 2022entry: 2023 %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64( 2024 <vscale x 2 x i64> %0, 2025 <vscale x 2 x i64> %1, 2026 i64 %2, 2027 <vscale x 2 x i1> %3, 2028 iXLen %4, iXLen 1) 2029 2030 ret <vscale x 2 x i64> %a 2031} 2032 2033declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64( 2034 <vscale x 4 x i64>, 2035 <vscale x 4 x i64>, 2036 i64, 2037 iXLen); 2038 2039define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 2040; RV32-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: 2041; RV32: # %bb.0: # %entry 2042; RV32-NEXT: addi sp, sp, -16 2043; RV32-NEXT: sw a0, 8(sp) 2044; RV32-NEXT: sw a1, 12(sp) 2045; RV32-NEXT: addi a0, sp, 8 2046; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 2047; RV32-NEXT: vlse64.v v12, (a0), zero 2048; RV32-NEXT: vmax.vv v8, v8, v12 2049; RV32-NEXT: addi sp, sp, 16 2050; RV32-NEXT: ret 2051; 2052; RV64-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: 2053; RV64: # %bb.0: # %entry 2054; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2055; RV64-NEXT: vmax.vx v8, v8, a0 2056; RV64-NEXT: ret 2057entry: 2058 %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64( 2059 <vscale x 4 x i64> undef, 2060 <vscale x 4 x i64> %0, 2061 i64 %1, 2062 iXLen %2) 2063 2064 ret <vscale x 4 x i64> %a 2065} 2066 2067declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64( 2068 <vscale x 4 x i64>, 2069 <vscale x 4 x i64>, 2070 i64, 2071 <vscale x 4 x i1>, 2072 iXLen, 2073 iXLen); 2074 2075define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2076; RV32-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: 2077; RV32: # %bb.0: # %entry 2078; RV32-NEXT: addi sp, sp, -16 2079; RV32-NEXT: sw a0, 8(sp) 2080; RV32-NEXT: sw a1, 12(sp) 2081; RV32-NEXT: addi a0, sp, 8 2082; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 2083; RV32-NEXT: vlse64.v v16, (a0), zero 2084; RV32-NEXT: vmax.vv v8, v12, v16, v0.t 2085; RV32-NEXT: addi sp, sp, 16 2086; RV32-NEXT: ret 2087; 2088; RV64-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64: 2089; RV64: # %bb.0: # %entry 2090; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 2091; RV64-NEXT: vmax.vx v8, v12, a0, v0.t 2092; RV64-NEXT: ret 2093entry: 2094 %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64( 2095 <vscale x 4 x i64> %0, 2096 <vscale x 4 x i64> %1, 2097 i64 %2, 2098 <vscale x 4 x i1> %3, 2099 iXLen %4, iXLen 1) 2100 2101 ret <vscale x 4 x i64> %a 2102} 2103 2104declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64( 2105 <vscale x 8 x i64>, 2106 <vscale x 8 x i64>, 2107 i64, 2108 iXLen); 2109 2110define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 2111; RV32-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: 2112; RV32: # %bb.0: # %entry 2113; RV32-NEXT: addi sp, sp, -16 2114; RV32-NEXT: sw a0, 8(sp) 2115; RV32-NEXT: sw a1, 12(sp) 2116; RV32-NEXT: addi a0, sp, 8 2117; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2118; RV32-NEXT: vlse64.v v16, (a0), zero 2119; RV32-NEXT: vmax.vv v8, v8, v16 2120; RV32-NEXT: addi sp, sp, 16 2121; RV32-NEXT: ret 2122; 2123; RV64-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: 2124; RV64: # %bb.0: # %entry 2125; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2126; RV64-NEXT: vmax.vx v8, v8, a0 2127; RV64-NEXT: ret 2128entry: 2129 %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64( 2130 <vscale x 8 x i64> undef, 2131 <vscale x 8 x i64> %0, 2132 i64 %1, 2133 iXLen %2) 2134 2135 ret <vscale x 8 x i64> %a 2136} 2137 2138declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64( 2139 <vscale x 8 x i64>, 2140 <vscale x 8 x i64>, 2141 i64, 2142 <vscale x 8 x i1>, 2143 iXLen, 2144 iXLen); 2145 2146define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2147; RV32-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: 2148; RV32: # %bb.0: # %entry 2149; RV32-NEXT: addi sp, sp, -16 2150; RV32-NEXT: sw a0, 8(sp) 2151; RV32-NEXT: sw a1, 12(sp) 2152; RV32-NEXT: addi a0, sp, 8 2153; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu 2154; RV32-NEXT: vlse64.v v24, (a0), zero 2155; RV32-NEXT: vmax.vv v8, v16, v24, v0.t 2156; RV32-NEXT: addi sp, sp, 16 2157; RV32-NEXT: ret 2158; 2159; RV64-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64: 2160; RV64: # %bb.0: # %entry 2161; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 2162; RV64-NEXT: vmax.vx v8, v16, a0, v0.t 2163; RV64-NEXT: ret 2164entry: 2165 %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64( 2166 <vscale x 8 x i64> %0, 2167 <vscale x 8 x i64> %1, 2168 i64 %2, 2169 <vscale x 8 x i1> %3, 2170 iXLen %4, iXLen 1) 2171 2172 ret <vscale x 8 x i64> %a 2173} 2174