1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 <vscale x 1 x i8>, 11 iXLen, iXLen); 12 13define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 14; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: csrwi vxrm, 0 17; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 18; CHECK-NEXT: vasub.vv v8, v8, v9 19; CHECK-NEXT: ret 20entry: 21 %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8( 22 <vscale x 1 x i8> undef, 23 <vscale x 1 x i8> %0, 24 <vscale x 1 x i8> %1, 25 iXLen 0, iXLen %2) 26 27 ret <vscale x 1 x i8> %a 28} 29 30declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( 31 <vscale x 1 x i8>, 32 <vscale x 1 x i8>, 33 <vscale x 1 x i8>, 34 <vscale x 1 x i1>, 35 iXLen, iXLen, iXLen); 36 37define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 38; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8: 39; CHECK: # %bb.0: # %entry 40; CHECK-NEXT: csrwi vxrm, 1 41; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 42; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 43; CHECK-NEXT: ret 44entry: 45 %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8( 46 <vscale x 1 x i8> %0, 47 <vscale x 1 x i8> %1, 48 <vscale x 1 x i8> %2, 49 <vscale x 1 x i1> %3, 50 iXLen 1, iXLen %4, iXLen 1) 51 52 ret <vscale x 1 x i8> %a 53} 54 55declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8( 56 <vscale x 2 x i8>, 57 <vscale x 2 x i8>, 58 <vscale x 2 x i8>, 59 iXLen, iXLen); 60 61define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 62; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8: 63; CHECK: # %bb.0: # %entry 64; CHECK-NEXT: csrwi vxrm, 0 65; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 66; CHECK-NEXT: vasub.vv v8, v8, v9 67; CHECK-NEXT: ret 68entry: 69 %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8( 70 <vscale x 2 x i8> undef, 71 <vscale x 2 x i8> %0, 72 <vscale x 2 x i8> %1, 73 iXLen 0, iXLen %2) 74 75 ret <vscale x 2 x i8> %a 76} 77 78declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( 79 <vscale x 2 x i8>, 80 <vscale x 2 x i8>, 81 <vscale x 2 x i8>, 82 <vscale x 2 x i1>, 83 iXLen, iXLen, iXLen); 84 85define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 86; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8: 87; CHECK: # %bb.0: # %entry 88; CHECK-NEXT: csrwi vxrm, 1 89; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 90; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 91; CHECK-NEXT: ret 92entry: 93 %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8( 94 <vscale x 2 x i8> %0, 95 <vscale x 2 x i8> %1, 96 <vscale x 2 x i8> %2, 97 <vscale x 2 x i1> %3, 98 iXLen 1, iXLen %4, iXLen 1) 99 100 ret <vscale x 2 x i8> %a 101} 102 103declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8( 104 <vscale x 4 x i8>, 105 <vscale x 4 x i8>, 106 <vscale x 4 x i8>, 107 iXLen, iXLen); 108 109define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 110; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8: 111; CHECK: # %bb.0: # %entry 112; CHECK-NEXT: csrwi vxrm, 0 113; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 114; CHECK-NEXT: vasub.vv v8, v8, v9 115; CHECK-NEXT: ret 116entry: 117 %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8( 118 <vscale x 4 x i8> undef, 119 <vscale x 4 x i8> %0, 120 <vscale x 4 x i8> %1, 121 iXLen 0, iXLen %2) 122 123 ret <vscale x 4 x i8> %a 124} 125 126declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( 127 <vscale x 4 x i8>, 128 <vscale x 4 x i8>, 129 <vscale x 4 x i8>, 130 <vscale x 4 x i1>, 131 iXLen, iXLen, iXLen); 132 133define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 134; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8: 135; CHECK: # %bb.0: # %entry 136; CHECK-NEXT: csrwi vxrm, 1 137; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 138; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 139; CHECK-NEXT: ret 140entry: 141 %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8( 142 <vscale x 4 x i8> %0, 143 <vscale x 4 x i8> %1, 144 <vscale x 4 x i8> %2, 145 <vscale x 4 x i1> %3, 146 iXLen 1, iXLen %4, iXLen 1) 147 148 ret <vscale x 4 x i8> %a 149} 150 151declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8( 152 <vscale x 8 x i8>, 153 <vscale x 8 x i8>, 154 <vscale x 8 x i8>, 155 iXLen, iXLen); 156 157define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 158; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8: 159; CHECK: # %bb.0: # %entry 160; CHECK-NEXT: csrwi vxrm, 0 161; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 162; CHECK-NEXT: vasub.vv v8, v8, v9 163; CHECK-NEXT: ret 164entry: 165 %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8( 166 <vscale x 8 x i8> undef, 167 <vscale x 8 x i8> %0, 168 <vscale x 8 x i8> %1, 169 iXLen 0, iXLen %2) 170 171 ret <vscale x 8 x i8> %a 172} 173 174declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( 175 <vscale x 8 x i8>, 176 <vscale x 8 x i8>, 177 <vscale x 8 x i8>, 178 <vscale x 8 x i1>, 179 iXLen, iXLen, iXLen); 180 181define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 182; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8: 183; CHECK: # %bb.0: # %entry 184; CHECK-NEXT: csrwi vxrm, 1 185; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 186; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 187; CHECK-NEXT: ret 188entry: 189 %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8( 190 <vscale x 8 x i8> %0, 191 <vscale x 8 x i8> %1, 192 <vscale x 8 x i8> %2, 193 <vscale x 8 x i1> %3, 194 iXLen 1, iXLen %4, iXLen 1) 195 196 ret <vscale x 8 x i8> %a 197} 198 199declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8( 200 <vscale x 16 x i8>, 201 <vscale x 16 x i8>, 202 <vscale x 16 x i8>, 203 iXLen, iXLen); 204 205define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 206; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8: 207; CHECK: # %bb.0: # %entry 208; CHECK-NEXT: csrwi vxrm, 0 209; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 210; CHECK-NEXT: vasub.vv v8, v8, v10 211; CHECK-NEXT: ret 212entry: 213 %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8( 214 <vscale x 16 x i8> undef, 215 <vscale x 16 x i8> %0, 216 <vscale x 16 x i8> %1, 217 iXLen 0, iXLen %2) 218 219 ret <vscale x 16 x i8> %a 220} 221 222declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( 223 <vscale x 16 x i8>, 224 <vscale x 16 x i8>, 225 <vscale x 16 x i8>, 226 <vscale x 16 x i1>, 227 iXLen, iXLen, iXLen); 228 229define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 230; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8: 231; CHECK: # %bb.0: # %entry 232; CHECK-NEXT: csrwi vxrm, 1 233; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 234; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t 235; CHECK-NEXT: ret 236entry: 237 %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8( 238 <vscale x 16 x i8> %0, 239 <vscale x 16 x i8> %1, 240 <vscale x 16 x i8> %2, 241 <vscale x 16 x i1> %3, 242 iXLen 1, iXLen %4, iXLen 1) 243 244 ret <vscale x 16 x i8> %a 245} 246 247declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8( 248 <vscale x 32 x i8>, 249 <vscale x 32 x i8>, 250 <vscale x 32 x i8>, 251 iXLen, iXLen); 252 253define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 254; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8: 255; CHECK: # %bb.0: # %entry 256; CHECK-NEXT: csrwi vxrm, 0 257; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 258; CHECK-NEXT: vasub.vv v8, v8, v12 259; CHECK-NEXT: ret 260entry: 261 %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8( 262 <vscale x 32 x i8> undef, 263 <vscale x 32 x i8> %0, 264 <vscale x 32 x i8> %1, 265 iXLen 0, iXLen %2) 266 267 ret <vscale x 32 x i8> %a 268} 269 270declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( 271 <vscale x 32 x i8>, 272 <vscale x 32 x i8>, 273 <vscale x 32 x i8>, 274 <vscale x 32 x i1>, 275 iXLen, iXLen, iXLen); 276 277define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 278; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8: 279; CHECK: # %bb.0: # %entry 280; CHECK-NEXT: csrwi vxrm, 1 281; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 282; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t 283; CHECK-NEXT: ret 284entry: 285 %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8( 286 <vscale x 32 x i8> %0, 287 <vscale x 32 x i8> %1, 288 <vscale x 32 x i8> %2, 289 <vscale x 32 x i1> %3, 290 iXLen 1, iXLen %4, iXLen 1) 291 292 ret <vscale x 32 x i8> %a 293} 294 295declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8( 296 <vscale x 64 x i8>, 297 <vscale x 64 x i8>, 298 <vscale x 64 x i8>, 299 iXLen, iXLen); 300 301define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 302; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8: 303; CHECK: # %bb.0: # %entry 304; CHECK-NEXT: csrwi vxrm, 0 305; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 306; CHECK-NEXT: vasub.vv v8, v8, v16 307; CHECK-NEXT: ret 308entry: 309 %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8( 310 <vscale x 64 x i8> undef, 311 <vscale x 64 x i8> %0, 312 <vscale x 64 x i8> %1, 313 iXLen 0, iXLen %2) 314 315 ret <vscale x 64 x i8> %a 316} 317 318declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( 319 <vscale x 64 x i8>, 320 <vscale x 64 x i8>, 321 <vscale x 64 x i8>, 322 <vscale x 64 x i1>, 323 iXLen, iXLen, iXLen); 324 325define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 326; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: 327; CHECK: # %bb.0: # %entry 328; CHECK-NEXT: vl8r.v v24, (a0) 329; CHECK-NEXT: csrwi vxrm, 1 330; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 331; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t 332; CHECK-NEXT: ret 333entry: 334 %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8( 335 <vscale x 64 x i8> %0, 336 <vscale x 64 x i8> %1, 337 <vscale x 64 x i8> %2, 338 <vscale x 64 x i1> %3, 339 iXLen 1, iXLen %4, iXLen 1) 340 341 ret <vscale x 64 x i8> %a 342} 343 344declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16( 345 <vscale x 1 x i16>, 346 <vscale x 1 x i16>, 347 <vscale x 1 x i16>, 348 iXLen, iXLen); 349 350define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 351; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16: 352; CHECK: # %bb.0: # %entry 353; CHECK-NEXT: csrwi vxrm, 0 354; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 355; CHECK-NEXT: vasub.vv v8, v8, v9 356; CHECK-NEXT: ret 357entry: 358 %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16( 359 <vscale x 1 x i16> undef, 360 <vscale x 1 x i16> %0, 361 <vscale x 1 x i16> %1, 362 iXLen 0, iXLen %2) 363 364 ret <vscale x 1 x i16> %a 365} 366 367declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( 368 <vscale x 1 x i16>, 369 <vscale x 1 x i16>, 370 <vscale x 1 x i16>, 371 <vscale x 1 x i1>, 372 iXLen, iXLen, iXLen); 373 374define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 375; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16: 376; CHECK: # %bb.0: # %entry 377; CHECK-NEXT: csrwi vxrm, 1 378; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 379; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 380; CHECK-NEXT: ret 381entry: 382 %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16( 383 <vscale x 1 x i16> %0, 384 <vscale x 1 x i16> %1, 385 <vscale x 1 x i16> %2, 386 <vscale x 1 x i1> %3, 387 iXLen 1, iXLen %4, iXLen 1) 388 389 ret <vscale x 1 x i16> %a 390} 391 392declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16( 393 <vscale x 2 x i16>, 394 <vscale x 2 x i16>, 395 <vscale x 2 x i16>, 396 iXLen, iXLen); 397 398define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 399; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16: 400; CHECK: # %bb.0: # %entry 401; CHECK-NEXT: csrwi vxrm, 0 402; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 403; CHECK-NEXT: vasub.vv v8, v8, v9 404; CHECK-NEXT: ret 405entry: 406 %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16( 407 <vscale x 2 x i16> undef, 408 <vscale x 2 x i16> %0, 409 <vscale x 2 x i16> %1, 410 iXLen 0, iXLen %2) 411 412 ret <vscale x 2 x i16> %a 413} 414 415declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( 416 <vscale x 2 x i16>, 417 <vscale x 2 x i16>, 418 <vscale x 2 x i16>, 419 <vscale x 2 x i1>, 420 iXLen, iXLen, iXLen); 421 422define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 423; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16: 424; CHECK: # %bb.0: # %entry 425; CHECK-NEXT: csrwi vxrm, 1 426; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 427; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 428; CHECK-NEXT: ret 429entry: 430 %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16( 431 <vscale x 2 x i16> %0, 432 <vscale x 2 x i16> %1, 433 <vscale x 2 x i16> %2, 434 <vscale x 2 x i1> %3, 435 iXLen 1, iXLen %4, iXLen 1) 436 437 ret <vscale x 2 x i16> %a 438} 439 440declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16( 441 <vscale x 4 x i16>, 442 <vscale x 4 x i16>, 443 <vscale x 4 x i16>, 444 iXLen, iXLen); 445 446define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 447; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16: 448; CHECK: # %bb.0: # %entry 449; CHECK-NEXT: csrwi vxrm, 0 450; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 451; CHECK-NEXT: vasub.vv v8, v8, v9 452; CHECK-NEXT: ret 453entry: 454 %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16( 455 <vscale x 4 x i16> undef, 456 <vscale x 4 x i16> %0, 457 <vscale x 4 x i16> %1, 458 iXLen 0, iXLen %2) 459 460 ret <vscale x 4 x i16> %a 461} 462 463declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( 464 <vscale x 4 x i16>, 465 <vscale x 4 x i16>, 466 <vscale x 4 x i16>, 467 <vscale x 4 x i1>, 468 iXLen, iXLen, iXLen); 469 470define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 471; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: csrwi vxrm, 1 474; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 475; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 476; CHECK-NEXT: ret 477entry: 478 %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16( 479 <vscale x 4 x i16> %0, 480 <vscale x 4 x i16> %1, 481 <vscale x 4 x i16> %2, 482 <vscale x 4 x i1> %3, 483 iXLen 1, iXLen %4, iXLen 1) 484 485 ret <vscale x 4 x i16> %a 486} 487 488declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16( 489 <vscale x 8 x i16>, 490 <vscale x 8 x i16>, 491 <vscale x 8 x i16>, 492 iXLen, iXLen); 493 494define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 495; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: csrwi vxrm, 0 498; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 499; CHECK-NEXT: vasub.vv v8, v8, v10 500; CHECK-NEXT: ret 501entry: 502 %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16( 503 <vscale x 8 x i16> undef, 504 <vscale x 8 x i16> %0, 505 <vscale x 8 x i16> %1, 506 iXLen 0, iXLen %2) 507 508 ret <vscale x 8 x i16> %a 509} 510 511declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( 512 <vscale x 8 x i16>, 513 <vscale x 8 x i16>, 514 <vscale x 8 x i16>, 515 <vscale x 8 x i1>, 516 iXLen, iXLen, iXLen); 517 518define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 519; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16: 520; CHECK: # %bb.0: # %entry 521; CHECK-NEXT: csrwi vxrm, 1 522; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 523; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t 524; CHECK-NEXT: ret 525entry: 526 %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16( 527 <vscale x 8 x i16> %0, 528 <vscale x 8 x i16> %1, 529 <vscale x 8 x i16> %2, 530 <vscale x 8 x i1> %3, 531 iXLen 1, iXLen %4, iXLen 1) 532 533 ret <vscale x 8 x i16> %a 534} 535 536declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16( 537 <vscale x 16 x i16>, 538 <vscale x 16 x i16>, 539 <vscale x 16 x i16>, 540 iXLen, iXLen); 541 542define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 543; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16: 544; CHECK: # %bb.0: # %entry 545; CHECK-NEXT: csrwi vxrm, 0 546; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 547; CHECK-NEXT: vasub.vv v8, v8, v12 548; CHECK-NEXT: ret 549entry: 550 %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16( 551 <vscale x 16 x i16> undef, 552 <vscale x 16 x i16> %0, 553 <vscale x 16 x i16> %1, 554 iXLen 0, iXLen %2) 555 556 ret <vscale x 16 x i16> %a 557} 558 559declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( 560 <vscale x 16 x i16>, 561 <vscale x 16 x i16>, 562 <vscale x 16 x i16>, 563 <vscale x 16 x i1>, 564 iXLen, iXLen, iXLen); 565 566define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 567; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16: 568; CHECK: # %bb.0: # %entry 569; CHECK-NEXT: csrwi vxrm, 1 570; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 571; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t 572; CHECK-NEXT: ret 573entry: 574 %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16( 575 <vscale x 16 x i16> %0, 576 <vscale x 16 x i16> %1, 577 <vscale x 16 x i16> %2, 578 <vscale x 16 x i1> %3, 579 iXLen 1, iXLen %4, iXLen 1) 580 581 ret <vscale x 16 x i16> %a 582} 583 584declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16( 585 <vscale x 32 x i16>, 586 <vscale x 32 x i16>, 587 <vscale x 32 x i16>, 588 iXLen, iXLen); 589 590define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 591; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16: 592; CHECK: # %bb.0: # %entry 593; CHECK-NEXT: csrwi vxrm, 0 594; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 595; CHECK-NEXT: vasub.vv v8, v8, v16 596; CHECK-NEXT: ret 597entry: 598 %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16( 599 <vscale x 32 x i16> undef, 600 <vscale x 32 x i16> %0, 601 <vscale x 32 x i16> %1, 602 iXLen 0, iXLen %2) 603 604 ret <vscale x 32 x i16> %a 605} 606 607declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( 608 <vscale x 32 x i16>, 609 <vscale x 32 x i16>, 610 <vscale x 32 x i16>, 611 <vscale x 32 x i1>, 612 iXLen, iXLen, iXLen); 613 614define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 615; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: 616; CHECK: # %bb.0: # %entry 617; CHECK-NEXT: vl8re16.v v24, (a0) 618; CHECK-NEXT: csrwi vxrm, 1 619; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 620; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t 621; CHECK-NEXT: ret 622entry: 623 %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16( 624 <vscale x 32 x i16> %0, 625 <vscale x 32 x i16> %1, 626 <vscale x 32 x i16> %2, 627 <vscale x 32 x i1> %3, 628 iXLen 1, iXLen %4, iXLen 1) 629 630 ret <vscale x 32 x i16> %a 631} 632 633declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32( 634 <vscale x 1 x i32>, 635 <vscale x 1 x i32>, 636 <vscale x 1 x i32>, 637 iXLen, iXLen); 638 639define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 640; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32: 641; CHECK: # %bb.0: # %entry 642; CHECK-NEXT: csrwi vxrm, 0 643; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 644; CHECK-NEXT: vasub.vv v8, v8, v9 645; CHECK-NEXT: ret 646entry: 647 %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32( 648 <vscale x 1 x i32> undef, 649 <vscale x 1 x i32> %0, 650 <vscale x 1 x i32> %1, 651 iXLen 0, iXLen %2) 652 653 ret <vscale x 1 x i32> %a 654} 655 656declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( 657 <vscale x 1 x i32>, 658 <vscale x 1 x i32>, 659 <vscale x 1 x i32>, 660 <vscale x 1 x i1>, 661 iXLen, iXLen, iXLen); 662 663define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 664; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32: 665; CHECK: # %bb.0: # %entry 666; CHECK-NEXT: csrwi vxrm, 1 667; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 668; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 669; CHECK-NEXT: ret 670entry: 671 %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32( 672 <vscale x 1 x i32> %0, 673 <vscale x 1 x i32> %1, 674 <vscale x 1 x i32> %2, 675 <vscale x 1 x i1> %3, 676 iXLen 1, iXLen %4, iXLen 1) 677 678 ret <vscale x 1 x i32> %a 679} 680 681declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32( 682 <vscale x 2 x i32>, 683 <vscale x 2 x i32>, 684 <vscale x 2 x i32>, 685 iXLen, iXLen); 686 687define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 688; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32: 689; CHECK: # %bb.0: # %entry 690; CHECK-NEXT: csrwi vxrm, 0 691; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 692; CHECK-NEXT: vasub.vv v8, v8, v9 693; CHECK-NEXT: ret 694entry: 695 %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32( 696 <vscale x 2 x i32> undef, 697 <vscale x 2 x i32> %0, 698 <vscale x 2 x i32> %1, 699 iXLen 0, iXLen %2) 700 701 ret <vscale x 2 x i32> %a 702} 703 704declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( 705 <vscale x 2 x i32>, 706 <vscale x 2 x i32>, 707 <vscale x 2 x i32>, 708 <vscale x 2 x i1>, 709 iXLen, iXLen, iXLen); 710 711define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 712; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32: 713; CHECK: # %bb.0: # %entry 714; CHECK-NEXT: csrwi vxrm, 1 715; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 716; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 717; CHECK-NEXT: ret 718entry: 719 %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32( 720 <vscale x 2 x i32> %0, 721 <vscale x 2 x i32> %1, 722 <vscale x 2 x i32> %2, 723 <vscale x 2 x i1> %3, 724 iXLen 1, iXLen %4, iXLen 1) 725 726 ret <vscale x 2 x i32> %a 727} 728 729declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32( 730 <vscale x 4 x i32>, 731 <vscale x 4 x i32>, 732 <vscale x 4 x i32>, 733 iXLen, iXLen); 734 735define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 736; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32: 737; CHECK: # %bb.0: # %entry 738; CHECK-NEXT: csrwi vxrm, 0 739; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 740; CHECK-NEXT: vasub.vv v8, v8, v10 741; CHECK-NEXT: ret 742entry: 743 %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32( 744 <vscale x 4 x i32> undef, 745 <vscale x 4 x i32> %0, 746 <vscale x 4 x i32> %1, 747 iXLen 0, iXLen %2) 748 749 ret <vscale x 4 x i32> %a 750} 751 752declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( 753 <vscale x 4 x i32>, 754 <vscale x 4 x i32>, 755 <vscale x 4 x i32>, 756 <vscale x 4 x i1>, 757 iXLen, iXLen, iXLen); 758 759define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 760; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32: 761; CHECK: # %bb.0: # %entry 762; CHECK-NEXT: csrwi vxrm, 1 763; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 764; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t 765; CHECK-NEXT: ret 766entry: 767 %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32( 768 <vscale x 4 x i32> %0, 769 <vscale x 4 x i32> %1, 770 <vscale x 4 x i32> %2, 771 <vscale x 4 x i1> %3, 772 iXLen 1, iXLen %4, iXLen 1) 773 774 ret <vscale x 4 x i32> %a 775} 776 777declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32( 778 <vscale x 8 x i32>, 779 <vscale x 8 x i32>, 780 <vscale x 8 x i32>, 781 iXLen, iXLen); 782 783define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 784; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32: 785; CHECK: # %bb.0: # %entry 786; CHECK-NEXT: csrwi vxrm, 0 787; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 788; CHECK-NEXT: vasub.vv v8, v8, v12 789; CHECK-NEXT: ret 790entry: 791 %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32( 792 <vscale x 8 x i32> undef, 793 <vscale x 8 x i32> %0, 794 <vscale x 8 x i32> %1, 795 iXLen 0, iXLen %2) 796 797 ret <vscale x 8 x i32> %a 798} 799 800declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( 801 <vscale x 8 x i32>, 802 <vscale x 8 x i32>, 803 <vscale x 8 x i32>, 804 <vscale x 8 x i1>, 805 iXLen, iXLen, iXLen); 806 807define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 808; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32: 809; CHECK: # %bb.0: # %entry 810; CHECK-NEXT: csrwi vxrm, 1 811; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 812; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t 813; CHECK-NEXT: ret 814entry: 815 %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32( 816 <vscale x 8 x i32> %0, 817 <vscale x 8 x i32> %1, 818 <vscale x 8 x i32> %2, 819 <vscale x 8 x i1> %3, 820 iXLen 1, iXLen %4, iXLen 1) 821 822 ret <vscale x 8 x i32> %a 823} 824 825declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32( 826 <vscale x 16 x i32>, 827 <vscale x 16 x i32>, 828 <vscale x 16 x i32>, 829 iXLen, iXLen); 830 831define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 832; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32: 833; CHECK: # %bb.0: # %entry 834; CHECK-NEXT: csrwi vxrm, 0 835; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 836; CHECK-NEXT: vasub.vv v8, v8, v16 837; CHECK-NEXT: ret 838entry: 839 %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32( 840 <vscale x 16 x i32> undef, 841 <vscale x 16 x i32> %0, 842 <vscale x 16 x i32> %1, 843 iXLen 0, iXLen %2) 844 845 ret <vscale x 16 x i32> %a 846} 847 848declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( 849 <vscale x 16 x i32>, 850 <vscale x 16 x i32>, 851 <vscale x 16 x i32>, 852 <vscale x 16 x i1>, 853 iXLen, iXLen, iXLen); 854 855define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 856; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: 857; CHECK: # %bb.0: # %entry 858; CHECK-NEXT: vl8re32.v v24, (a0) 859; CHECK-NEXT: csrwi vxrm, 1 860; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 861; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t 862; CHECK-NEXT: ret 863entry: 864 %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32( 865 <vscale x 16 x i32> %0, 866 <vscale x 16 x i32> %1, 867 <vscale x 16 x i32> %2, 868 <vscale x 16 x i1> %3, 869 iXLen 1, iXLen %4, iXLen 1) 870 871 ret <vscale x 16 x i32> %a 872} 873 874declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64( 875 <vscale x 1 x i64>, 876 <vscale x 1 x i64>, 877 <vscale x 1 x i64>, 878 iXLen, iXLen); 879 880define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 881; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64: 882; CHECK: # %bb.0: # %entry 883; CHECK-NEXT: csrwi vxrm, 0 884; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 885; CHECK-NEXT: vasub.vv v8, v8, v9 886; CHECK-NEXT: ret 887entry: 888 %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64( 889 <vscale x 1 x i64> undef, 890 <vscale x 1 x i64> %0, 891 <vscale x 1 x i64> %1, 892 iXLen 0, iXLen %2) 893 894 ret <vscale x 1 x i64> %a 895} 896 897declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( 898 <vscale x 1 x i64>, 899 <vscale x 1 x i64>, 900 <vscale x 1 x i64>, 901 <vscale x 1 x i1>, 902 iXLen, iXLen, iXLen); 903 904define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 905; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64: 906; CHECK: # %bb.0: # %entry 907; CHECK-NEXT: csrwi vxrm, 1 908; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 909; CHECK-NEXT: vasub.vv v8, v9, v10, v0.t 910; CHECK-NEXT: ret 911entry: 912 %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64( 913 <vscale x 1 x i64> %0, 914 <vscale x 1 x i64> %1, 915 <vscale x 1 x i64> %2, 916 <vscale x 1 x i1> %3, 917 iXLen 1, iXLen %4, iXLen 1) 918 919 ret <vscale x 1 x i64> %a 920} 921 922declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64( 923 <vscale x 2 x i64>, 924 <vscale x 2 x i64>, 925 <vscale x 2 x i64>, 926 iXLen, iXLen); 927 928define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 929; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64: 930; CHECK: # %bb.0: # %entry 931; CHECK-NEXT: csrwi vxrm, 0 932; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 933; CHECK-NEXT: vasub.vv v8, v8, v10 934; CHECK-NEXT: ret 935entry: 936 %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64( 937 <vscale x 2 x i64> undef, 938 <vscale x 2 x i64> %0, 939 <vscale x 2 x i64> %1, 940 iXLen 0, iXLen %2) 941 942 ret <vscale x 2 x i64> %a 943} 944 945declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( 946 <vscale x 2 x i64>, 947 <vscale x 2 x i64>, 948 <vscale x 2 x i64>, 949 <vscale x 2 x i1>, 950 iXLen, iXLen, iXLen); 951 952define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 953; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64: 954; CHECK: # %bb.0: # %entry 955; CHECK-NEXT: csrwi vxrm, 1 956; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 957; CHECK-NEXT: vasub.vv v8, v10, v12, v0.t 958; CHECK-NEXT: ret 959entry: 960 %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64( 961 <vscale x 2 x i64> %0, 962 <vscale x 2 x i64> %1, 963 <vscale x 2 x i64> %2, 964 <vscale x 2 x i1> %3, 965 iXLen 1, iXLen %4, iXLen 1) 966 967 ret <vscale x 2 x i64> %a 968} 969 970declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64( 971 <vscale x 4 x i64>, 972 <vscale x 4 x i64>, 973 <vscale x 4 x i64>, 974 iXLen, iXLen); 975 976define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 977; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64: 978; CHECK: # %bb.0: # %entry 979; CHECK-NEXT: csrwi vxrm, 0 980; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 981; CHECK-NEXT: vasub.vv v8, v8, v12 982; CHECK-NEXT: ret 983entry: 984 %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64( 985 <vscale x 4 x i64> undef, 986 <vscale x 4 x i64> %0, 987 <vscale x 4 x i64> %1, 988 iXLen 0, iXLen %2) 989 990 ret <vscale x 4 x i64> %a 991} 992 993declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( 994 <vscale x 4 x i64>, 995 <vscale x 4 x i64>, 996 <vscale x 4 x i64>, 997 <vscale x 4 x i1>, 998 iXLen, iXLen, iXLen); 999 1000define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1001; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64: 1002; CHECK: # %bb.0: # %entry 1003; CHECK-NEXT: csrwi vxrm, 1 1004; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 1005; CHECK-NEXT: vasub.vv v8, v12, v16, v0.t 1006; CHECK-NEXT: ret 1007entry: 1008 %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64( 1009 <vscale x 4 x i64> %0, 1010 <vscale x 4 x i64> %1, 1011 <vscale x 4 x i64> %2, 1012 <vscale x 4 x i1> %3, 1013 iXLen 1, iXLen %4, iXLen 1) 1014 1015 ret <vscale x 4 x i64> %a 1016} 1017 1018declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64( 1019 <vscale x 8 x i64>, 1020 <vscale x 8 x i64>, 1021 <vscale x 8 x i64>, 1022 iXLen, iXLen); 1023 1024define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 1025; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64: 1026; CHECK: # %bb.0: # %entry 1027; CHECK-NEXT: csrwi vxrm, 0 1028; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1029; CHECK-NEXT: vasub.vv v8, v8, v16 1030; CHECK-NEXT: ret 1031entry: 1032 %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64( 1033 <vscale x 8 x i64> undef, 1034 <vscale x 8 x i64> %0, 1035 <vscale x 8 x i64> %1, 1036 iXLen 0, iXLen %2) 1037 1038 ret <vscale x 8 x i64> %a 1039} 1040 1041declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( 1042 <vscale x 8 x i64>, 1043 <vscale x 8 x i64>, 1044 <vscale x 8 x i64>, 1045 <vscale x 8 x i1>, 1046 iXLen, iXLen, iXLen); 1047 1048define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1049; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: 1050; CHECK: # %bb.0: # %entry 1051; CHECK-NEXT: vl8re64.v v24, (a0) 1052; CHECK-NEXT: csrwi vxrm, 1 1053; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1054; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t 1055; CHECK-NEXT: ret 1056entry: 1057 %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64( 1058 <vscale x 8 x i64> %0, 1059 <vscale x 8 x i64> %1, 1060 <vscale x 8 x i64> %2, 1061 <vscale x 8 x i1> %3, 1062 iXLen 1, iXLen %4, iXLen 1) 1063 1064 ret <vscale x 8 x i64> %a 1065} 1066 1067declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8( 1068 <vscale x 1 x i8>, 1069 <vscale x 1 x i8>, 1070 i8, 1071 iXLen, iXLen); 1072 1073define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 1074; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8: 1075; CHECK: # %bb.0: # %entry 1076; CHECK-NEXT: csrwi vxrm, 0 1077; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1078; CHECK-NEXT: vasub.vx v8, v8, a0 1079; CHECK-NEXT: ret 1080entry: 1081 %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8( 1082 <vscale x 1 x i8> undef, 1083 <vscale x 1 x i8> %0, 1084 i8 %1, 1085 iXLen 0, iXLen %2) 1086 1087 ret <vscale x 1 x i8> %a 1088} 1089 1090declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8( 1091 <vscale x 1 x i8>, 1092 <vscale x 1 x i8>, 1093 i8, 1094 <vscale x 1 x i1>, 1095 iXLen, iXLen, iXLen); 1096 1097define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1098; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8: 1099; CHECK: # %bb.0: # %entry 1100; CHECK-NEXT: csrwi vxrm, 1 1101; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 1102; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1103; CHECK-NEXT: ret 1104entry: 1105 %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8( 1106 <vscale x 1 x i8> %0, 1107 <vscale x 1 x i8> %1, 1108 i8 %2, 1109 <vscale x 1 x i1> %3, 1110 iXLen 1, iXLen %4, iXLen 1) 1111 1112 ret <vscale x 1 x i8> %a 1113} 1114 1115declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8( 1116 <vscale x 2 x i8>, 1117 <vscale x 2 x i8>, 1118 i8, 1119 iXLen, iXLen); 1120 1121define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 1122; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8: 1123; CHECK: # %bb.0: # %entry 1124; CHECK-NEXT: csrwi vxrm, 0 1125; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1126; CHECK-NEXT: vasub.vx v8, v8, a0 1127; CHECK-NEXT: ret 1128entry: 1129 %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8( 1130 <vscale x 2 x i8> undef, 1131 <vscale x 2 x i8> %0, 1132 i8 %1, 1133 iXLen 0, iXLen %2) 1134 1135 ret <vscale x 2 x i8> %a 1136} 1137 1138declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8( 1139 <vscale x 2 x i8>, 1140 <vscale x 2 x i8>, 1141 i8, 1142 <vscale x 2 x i1>, 1143 iXLen, iXLen, iXLen); 1144 1145define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1146; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8: 1147; CHECK: # %bb.0: # %entry 1148; CHECK-NEXT: csrwi vxrm, 1 1149; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1150; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1151; CHECK-NEXT: ret 1152entry: 1153 %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8( 1154 <vscale x 2 x i8> %0, 1155 <vscale x 2 x i8> %1, 1156 i8 %2, 1157 <vscale x 2 x i1> %3, 1158 iXLen 1, iXLen %4, iXLen 1) 1159 1160 ret <vscale x 2 x i8> %a 1161} 1162 1163declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8( 1164 <vscale x 4 x i8>, 1165 <vscale x 4 x i8>, 1166 i8, 1167 iXLen, iXLen); 1168 1169define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1170; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8: 1171; CHECK: # %bb.0: # %entry 1172; CHECK-NEXT: csrwi vxrm, 0 1173; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1174; CHECK-NEXT: vasub.vx v8, v8, a0 1175; CHECK-NEXT: ret 1176entry: 1177 %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8( 1178 <vscale x 4 x i8> undef, 1179 <vscale x 4 x i8> %0, 1180 i8 %1, 1181 iXLen 0, iXLen %2) 1182 1183 ret <vscale x 4 x i8> %a 1184} 1185 1186declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8( 1187 <vscale x 4 x i8>, 1188 <vscale x 4 x i8>, 1189 i8, 1190 <vscale x 4 x i1>, 1191 iXLen, iXLen, iXLen); 1192 1193define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1194; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8: 1195; CHECK: # %bb.0: # %entry 1196; CHECK-NEXT: csrwi vxrm, 1 1197; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1198; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1199; CHECK-NEXT: ret 1200entry: 1201 %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8( 1202 <vscale x 4 x i8> %0, 1203 <vscale x 4 x i8> %1, 1204 i8 %2, 1205 <vscale x 4 x i1> %3, 1206 iXLen 1, iXLen %4, iXLen 1) 1207 1208 ret <vscale x 4 x i8> %a 1209} 1210 1211declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8( 1212 <vscale x 8 x i8>, 1213 <vscale x 8 x i8>, 1214 i8, 1215 iXLen, iXLen); 1216 1217define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1218; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8: 1219; CHECK: # %bb.0: # %entry 1220; CHECK-NEXT: csrwi vxrm, 0 1221; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1222; CHECK-NEXT: vasub.vx v8, v8, a0 1223; CHECK-NEXT: ret 1224entry: 1225 %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8( 1226 <vscale x 8 x i8> undef, 1227 <vscale x 8 x i8> %0, 1228 i8 %1, 1229 iXLen 0, iXLen %2) 1230 1231 ret <vscale x 8 x i8> %a 1232} 1233 1234declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8( 1235 <vscale x 8 x i8>, 1236 <vscale x 8 x i8>, 1237 i8, 1238 <vscale x 8 x i1>, 1239 iXLen, iXLen, iXLen); 1240 1241define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1242; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8: 1243; CHECK: # %bb.0: # %entry 1244; CHECK-NEXT: csrwi vxrm, 1 1245; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1246; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1247; CHECK-NEXT: ret 1248entry: 1249 %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8( 1250 <vscale x 8 x i8> %0, 1251 <vscale x 8 x i8> %1, 1252 i8 %2, 1253 <vscale x 8 x i1> %3, 1254 iXLen 1, iXLen %4, iXLen 1) 1255 1256 ret <vscale x 8 x i8> %a 1257} 1258 1259declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8( 1260 <vscale x 16 x i8>, 1261 <vscale x 16 x i8>, 1262 i8, 1263 iXLen, iXLen); 1264 1265define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1266; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8: 1267; CHECK: # %bb.0: # %entry 1268; CHECK-NEXT: csrwi vxrm, 0 1269; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1270; CHECK-NEXT: vasub.vx v8, v8, a0 1271; CHECK-NEXT: ret 1272entry: 1273 %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8( 1274 <vscale x 16 x i8> undef, 1275 <vscale x 16 x i8> %0, 1276 i8 %1, 1277 iXLen 0, iXLen %2) 1278 1279 ret <vscale x 16 x i8> %a 1280} 1281 1282declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8( 1283 <vscale x 16 x i8>, 1284 <vscale x 16 x i8>, 1285 i8, 1286 <vscale x 16 x i1>, 1287 iXLen, iXLen, iXLen); 1288 1289define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1290; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8: 1291; CHECK: # %bb.0: # %entry 1292; CHECK-NEXT: csrwi vxrm, 1 1293; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1294; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t 1295; CHECK-NEXT: ret 1296entry: 1297 %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8( 1298 <vscale x 16 x i8> %0, 1299 <vscale x 16 x i8> %1, 1300 i8 %2, 1301 <vscale x 16 x i1> %3, 1302 iXLen 1, iXLen %4, iXLen 1) 1303 1304 ret <vscale x 16 x i8> %a 1305} 1306 1307declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8( 1308 <vscale x 32 x i8>, 1309 <vscale x 32 x i8>, 1310 i8, 1311 iXLen, iXLen); 1312 1313define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1314; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8: 1315; CHECK: # %bb.0: # %entry 1316; CHECK-NEXT: csrwi vxrm, 0 1317; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1318; CHECK-NEXT: vasub.vx v8, v8, a0 1319; CHECK-NEXT: ret 1320entry: 1321 %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8( 1322 <vscale x 32 x i8> undef, 1323 <vscale x 32 x i8> %0, 1324 i8 %1, 1325 iXLen 0, iXLen %2) 1326 1327 ret <vscale x 32 x i8> %a 1328} 1329 1330declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8( 1331 <vscale x 32 x i8>, 1332 <vscale x 32 x i8>, 1333 i8, 1334 <vscale x 32 x i1>, 1335 iXLen, iXLen, iXLen); 1336 1337define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1338; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8: 1339; CHECK: # %bb.0: # %entry 1340; CHECK-NEXT: csrwi vxrm, 1 1341; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1342; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t 1343; CHECK-NEXT: ret 1344entry: 1345 %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8( 1346 <vscale x 32 x i8> %0, 1347 <vscale x 32 x i8> %1, 1348 i8 %2, 1349 <vscale x 32 x i1> %3, 1350 iXLen 1, iXLen %4, iXLen 1) 1351 1352 ret <vscale x 32 x i8> %a 1353} 1354 1355declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8( 1356 <vscale x 64 x i8>, 1357 <vscale x 64 x i8>, 1358 i8, 1359 iXLen, iXLen); 1360 1361define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 1362; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8: 1363; CHECK: # %bb.0: # %entry 1364; CHECK-NEXT: csrwi vxrm, 0 1365; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 1366; CHECK-NEXT: vasub.vx v8, v8, a0 1367; CHECK-NEXT: ret 1368entry: 1369 %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8( 1370 <vscale x 64 x i8> undef, 1371 <vscale x 64 x i8> %0, 1372 i8 %1, 1373 iXLen 0, iXLen %2) 1374 1375 ret <vscale x 64 x i8> %a 1376} 1377 1378declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8( 1379 <vscale x 64 x i8>, 1380 <vscale x 64 x i8>, 1381 i8, 1382 <vscale x 64 x i1>, 1383 iXLen, iXLen, iXLen); 1384 1385define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1386; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8: 1387; CHECK: # %bb.0: # %entry 1388; CHECK-NEXT: csrwi vxrm, 1 1389; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1390; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t 1391; CHECK-NEXT: ret 1392entry: 1393 %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8( 1394 <vscale x 64 x i8> %0, 1395 <vscale x 64 x i8> %1, 1396 i8 %2, 1397 <vscale x 64 x i1> %3, 1398 iXLen 1, iXLen %4, iXLen 1) 1399 1400 ret <vscale x 64 x i8> %a 1401} 1402 1403declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16( 1404 <vscale x 1 x i16>, 1405 <vscale x 1 x i16>, 1406 i16, 1407 iXLen, iXLen); 1408 1409define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1410; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16: 1411; CHECK: # %bb.0: # %entry 1412; CHECK-NEXT: csrwi vxrm, 0 1413; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1414; CHECK-NEXT: vasub.vx v8, v8, a0 1415; CHECK-NEXT: ret 1416entry: 1417 %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16( 1418 <vscale x 1 x i16> undef, 1419 <vscale x 1 x i16> %0, 1420 i16 %1, 1421 iXLen 0, iXLen %2) 1422 1423 ret <vscale x 1 x i16> %a 1424} 1425 1426declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16( 1427 <vscale x 1 x i16>, 1428 <vscale x 1 x i16>, 1429 i16, 1430 <vscale x 1 x i1>, 1431 iXLen, iXLen, iXLen); 1432 1433define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1434; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16: 1435; CHECK: # %bb.0: # %entry 1436; CHECK-NEXT: csrwi vxrm, 1 1437; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1438; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1439; CHECK-NEXT: ret 1440entry: 1441 %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16( 1442 <vscale x 1 x i16> %0, 1443 <vscale x 1 x i16> %1, 1444 i16 %2, 1445 <vscale x 1 x i1> %3, 1446 iXLen 1, iXLen %4, iXLen 1) 1447 1448 ret <vscale x 1 x i16> %a 1449} 1450 1451declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16( 1452 <vscale x 2 x i16>, 1453 <vscale x 2 x i16>, 1454 i16, 1455 iXLen, iXLen); 1456 1457define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1458; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16: 1459; CHECK: # %bb.0: # %entry 1460; CHECK-NEXT: csrwi vxrm, 0 1461; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1462; CHECK-NEXT: vasub.vx v8, v8, a0 1463; CHECK-NEXT: ret 1464entry: 1465 %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16( 1466 <vscale x 2 x i16> undef, 1467 <vscale x 2 x i16> %0, 1468 i16 %1, 1469 iXLen 0, iXLen %2) 1470 1471 ret <vscale x 2 x i16> %a 1472} 1473 1474declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16( 1475 <vscale x 2 x i16>, 1476 <vscale x 2 x i16>, 1477 i16, 1478 <vscale x 2 x i1>, 1479 iXLen, iXLen, iXLen); 1480 1481define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1482; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16: 1483; CHECK: # %bb.0: # %entry 1484; CHECK-NEXT: csrwi vxrm, 1 1485; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1486; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1487; CHECK-NEXT: ret 1488entry: 1489 %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16( 1490 <vscale x 2 x i16> %0, 1491 <vscale x 2 x i16> %1, 1492 i16 %2, 1493 <vscale x 2 x i1> %3, 1494 iXLen 1, iXLen %4, iXLen 1) 1495 1496 ret <vscale x 2 x i16> %a 1497} 1498 1499declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16( 1500 <vscale x 4 x i16>, 1501 <vscale x 4 x i16>, 1502 i16, 1503 iXLen, iXLen); 1504 1505define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1506; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16: 1507; CHECK: # %bb.0: # %entry 1508; CHECK-NEXT: csrwi vxrm, 0 1509; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1510; CHECK-NEXT: vasub.vx v8, v8, a0 1511; CHECK-NEXT: ret 1512entry: 1513 %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16( 1514 <vscale x 4 x i16> undef, 1515 <vscale x 4 x i16> %0, 1516 i16 %1, 1517 iXLen 0, iXLen %2) 1518 1519 ret <vscale x 4 x i16> %a 1520} 1521 1522declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16( 1523 <vscale x 4 x i16>, 1524 <vscale x 4 x i16>, 1525 i16, 1526 <vscale x 4 x i1>, 1527 iXLen, iXLen, iXLen); 1528 1529define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1530; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16: 1531; CHECK: # %bb.0: # %entry 1532; CHECK-NEXT: csrwi vxrm, 1 1533; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1534; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1535; CHECK-NEXT: ret 1536entry: 1537 %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16( 1538 <vscale x 4 x i16> %0, 1539 <vscale x 4 x i16> %1, 1540 i16 %2, 1541 <vscale x 4 x i1> %3, 1542 iXLen 1, iXLen %4, iXLen 1) 1543 1544 ret <vscale x 4 x i16> %a 1545} 1546 1547declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16( 1548 <vscale x 8 x i16>, 1549 <vscale x 8 x i16>, 1550 i16, 1551 iXLen, iXLen); 1552 1553define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1554; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16: 1555; CHECK: # %bb.0: # %entry 1556; CHECK-NEXT: csrwi vxrm, 0 1557; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1558; CHECK-NEXT: vasub.vx v8, v8, a0 1559; CHECK-NEXT: ret 1560entry: 1561 %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16( 1562 <vscale x 8 x i16> undef, 1563 <vscale x 8 x i16> %0, 1564 i16 %1, 1565 iXLen 0, iXLen %2) 1566 1567 ret <vscale x 8 x i16> %a 1568} 1569 1570declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16( 1571 <vscale x 8 x i16>, 1572 <vscale x 8 x i16>, 1573 i16, 1574 <vscale x 8 x i1>, 1575 iXLen, iXLen, iXLen); 1576 1577define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1578; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16: 1579; CHECK: # %bb.0: # %entry 1580; CHECK-NEXT: csrwi vxrm, 1 1581; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1582; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t 1583; CHECK-NEXT: ret 1584entry: 1585 %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16( 1586 <vscale x 8 x i16> %0, 1587 <vscale x 8 x i16> %1, 1588 i16 %2, 1589 <vscale x 8 x i1> %3, 1590 iXLen 1, iXLen %4, iXLen 1) 1591 1592 ret <vscale x 8 x i16> %a 1593} 1594 1595declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16( 1596 <vscale x 16 x i16>, 1597 <vscale x 16 x i16>, 1598 i16, 1599 iXLen, iXLen); 1600 1601define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1602; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16: 1603; CHECK: # %bb.0: # %entry 1604; CHECK-NEXT: csrwi vxrm, 0 1605; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1606; CHECK-NEXT: vasub.vx v8, v8, a0 1607; CHECK-NEXT: ret 1608entry: 1609 %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16( 1610 <vscale x 16 x i16> undef, 1611 <vscale x 16 x i16> %0, 1612 i16 %1, 1613 iXLen 0, iXLen %2) 1614 1615 ret <vscale x 16 x i16> %a 1616} 1617 1618declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16( 1619 <vscale x 16 x i16>, 1620 <vscale x 16 x i16>, 1621 i16, 1622 <vscale x 16 x i1>, 1623 iXLen, iXLen, iXLen); 1624 1625define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1626; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16: 1627; CHECK: # %bb.0: # %entry 1628; CHECK-NEXT: csrwi vxrm, 1 1629; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1630; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t 1631; CHECK-NEXT: ret 1632entry: 1633 %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16( 1634 <vscale x 16 x i16> %0, 1635 <vscale x 16 x i16> %1, 1636 i16 %2, 1637 <vscale x 16 x i1> %3, 1638 iXLen 1, iXLen %4, iXLen 1) 1639 1640 ret <vscale x 16 x i16> %a 1641} 1642 1643declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16( 1644 <vscale x 32 x i16>, 1645 <vscale x 32 x i16>, 1646 i16, 1647 iXLen, iXLen); 1648 1649define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 1650; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16: 1651; CHECK: # %bb.0: # %entry 1652; CHECK-NEXT: csrwi vxrm, 0 1653; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1654; CHECK-NEXT: vasub.vx v8, v8, a0 1655; CHECK-NEXT: ret 1656entry: 1657 %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16( 1658 <vscale x 32 x i16> undef, 1659 <vscale x 32 x i16> %0, 1660 i16 %1, 1661 iXLen 0, iXLen %2) 1662 1663 ret <vscale x 32 x i16> %a 1664} 1665 1666declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16( 1667 <vscale x 32 x i16>, 1668 <vscale x 32 x i16>, 1669 i16, 1670 <vscale x 32 x i1>, 1671 iXLen, iXLen, iXLen); 1672 1673define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1674; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16: 1675; CHECK: # %bb.0: # %entry 1676; CHECK-NEXT: csrwi vxrm, 1 1677; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1678; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t 1679; CHECK-NEXT: ret 1680entry: 1681 %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16( 1682 <vscale x 32 x i16> %0, 1683 <vscale x 32 x i16> %1, 1684 i16 %2, 1685 <vscale x 32 x i1> %3, 1686 iXLen 1, iXLen %4, iXLen 1) 1687 1688 ret <vscale x 32 x i16> %a 1689} 1690 1691declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32( 1692 <vscale x 1 x i32>, 1693 <vscale x 1 x i32>, 1694 i32, 1695 iXLen, iXLen); 1696 1697define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1698; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32: 1699; CHECK: # %bb.0: # %entry 1700; CHECK-NEXT: csrwi vxrm, 0 1701; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1702; CHECK-NEXT: vasub.vx v8, v8, a0 1703; CHECK-NEXT: ret 1704entry: 1705 %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32( 1706 <vscale x 1 x i32> undef, 1707 <vscale x 1 x i32> %0, 1708 i32 %1, 1709 iXLen 0, iXLen %2) 1710 1711 ret <vscale x 1 x i32> %a 1712} 1713 1714declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32( 1715 <vscale x 1 x i32>, 1716 <vscale x 1 x i32>, 1717 i32, 1718 <vscale x 1 x i1>, 1719 iXLen, iXLen, iXLen); 1720 1721define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1722; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32: 1723; CHECK: # %bb.0: # %entry 1724; CHECK-NEXT: csrwi vxrm, 1 1725; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1726; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1727; CHECK-NEXT: ret 1728entry: 1729 %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32( 1730 <vscale x 1 x i32> %0, 1731 <vscale x 1 x i32> %1, 1732 i32 %2, 1733 <vscale x 1 x i1> %3, 1734 iXLen 1, iXLen %4, iXLen 1) 1735 1736 ret <vscale x 1 x i32> %a 1737} 1738 1739declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32( 1740 <vscale x 2 x i32>, 1741 <vscale x 2 x i32>, 1742 i32, 1743 iXLen, iXLen); 1744 1745define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1746; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32: 1747; CHECK: # %bb.0: # %entry 1748; CHECK-NEXT: csrwi vxrm, 0 1749; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1750; CHECK-NEXT: vasub.vx v8, v8, a0 1751; CHECK-NEXT: ret 1752entry: 1753 %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32( 1754 <vscale x 2 x i32> undef, 1755 <vscale x 2 x i32> %0, 1756 i32 %1, 1757 iXLen 0, iXLen %2) 1758 1759 ret <vscale x 2 x i32> %a 1760} 1761 1762declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32( 1763 <vscale x 2 x i32>, 1764 <vscale x 2 x i32>, 1765 i32, 1766 <vscale x 2 x i1>, 1767 iXLen, iXLen, iXLen); 1768 1769define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1770; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32: 1771; CHECK: # %bb.0: # %entry 1772; CHECK-NEXT: csrwi vxrm, 1 1773; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1774; CHECK-NEXT: vasub.vx v8, v9, a0, v0.t 1775; CHECK-NEXT: ret 1776entry: 1777 %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32( 1778 <vscale x 2 x i32> %0, 1779 <vscale x 2 x i32> %1, 1780 i32 %2, 1781 <vscale x 2 x i1> %3, 1782 iXLen 1, iXLen %4, iXLen 1) 1783 1784 ret <vscale x 2 x i32> %a 1785} 1786 1787declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32( 1788 <vscale x 4 x i32>, 1789 <vscale x 4 x i32>, 1790 i32, 1791 iXLen, iXLen); 1792 1793define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1794; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32: 1795; CHECK: # %bb.0: # %entry 1796; CHECK-NEXT: csrwi vxrm, 0 1797; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1798; CHECK-NEXT: vasub.vx v8, v8, a0 1799; CHECK-NEXT: ret 1800entry: 1801 %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32( 1802 <vscale x 4 x i32> undef, 1803 <vscale x 4 x i32> %0, 1804 i32 %1, 1805 iXLen 0, iXLen %2) 1806 1807 ret <vscale x 4 x i32> %a 1808} 1809 1810declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32( 1811 <vscale x 4 x i32>, 1812 <vscale x 4 x i32>, 1813 i32, 1814 <vscale x 4 x i1>, 1815 iXLen, iXLen, iXLen); 1816 1817define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1818; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32: 1819; CHECK: # %bb.0: # %entry 1820; CHECK-NEXT: csrwi vxrm, 1 1821; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1822; CHECK-NEXT: vasub.vx v8, v10, a0, v0.t 1823; CHECK-NEXT: ret 1824entry: 1825 %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32( 1826 <vscale x 4 x i32> %0, 1827 <vscale x 4 x i32> %1, 1828 i32 %2, 1829 <vscale x 4 x i1> %3, 1830 iXLen 1, iXLen %4, iXLen 1) 1831 1832 ret <vscale x 4 x i32> %a 1833} 1834 1835declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32( 1836 <vscale x 8 x i32>, 1837 <vscale x 8 x i32>, 1838 i32, 1839 iXLen, iXLen); 1840 1841define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1842; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32: 1843; CHECK: # %bb.0: # %entry 1844; CHECK-NEXT: csrwi vxrm, 0 1845; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1846; CHECK-NEXT: vasub.vx v8, v8, a0 1847; CHECK-NEXT: ret 1848entry: 1849 %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32( 1850 <vscale x 8 x i32> undef, 1851 <vscale x 8 x i32> %0, 1852 i32 %1, 1853 iXLen 0, iXLen %2) 1854 1855 ret <vscale x 8 x i32> %a 1856} 1857 1858declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32( 1859 <vscale x 8 x i32>, 1860 <vscale x 8 x i32>, 1861 i32, 1862 <vscale x 8 x i1>, 1863 iXLen, iXLen, iXLen); 1864 1865define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1866; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32: 1867; CHECK: # %bb.0: # %entry 1868; CHECK-NEXT: csrwi vxrm, 1 1869; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1870; CHECK-NEXT: vasub.vx v8, v12, a0, v0.t 1871; CHECK-NEXT: ret 1872entry: 1873 %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32( 1874 <vscale x 8 x i32> %0, 1875 <vscale x 8 x i32> %1, 1876 i32 %2, 1877 <vscale x 8 x i1> %3, 1878 iXLen 1, iXLen %4, iXLen 1) 1879 1880 ret <vscale x 8 x i32> %a 1881} 1882 1883declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32( 1884 <vscale x 16 x i32>, 1885 <vscale x 16 x i32>, 1886 i32, 1887 iXLen, iXLen); 1888 1889define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 1890; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32: 1891; CHECK: # %bb.0: # %entry 1892; CHECK-NEXT: csrwi vxrm, 0 1893; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1894; CHECK-NEXT: vasub.vx v8, v8, a0 1895; CHECK-NEXT: ret 1896entry: 1897 %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32( 1898 <vscale x 16 x i32> undef, 1899 <vscale x 16 x i32> %0, 1900 i32 %1, 1901 iXLen 0, iXLen %2) 1902 1903 ret <vscale x 16 x i32> %a 1904} 1905 1906declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32( 1907 <vscale x 16 x i32>, 1908 <vscale x 16 x i32>, 1909 i32, 1910 <vscale x 16 x i1>, 1911 iXLen, iXLen, iXLen); 1912 1913define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1914; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32: 1915; CHECK: # %bb.0: # %entry 1916; CHECK-NEXT: csrwi vxrm, 1 1917; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1918; CHECK-NEXT: vasub.vx v8, v16, a0, v0.t 1919; CHECK-NEXT: ret 1920entry: 1921 %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32( 1922 <vscale x 16 x i32> %0, 1923 <vscale x 16 x i32> %1, 1924 i32 %2, 1925 <vscale x 16 x i1> %3, 1926 iXLen 1, iXLen %4, iXLen 1) 1927 1928 ret <vscale x 16 x i32> %a 1929} 1930 1931declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64( 1932 <vscale x 1 x i64>, 1933 <vscale x 1 x i64>, 1934 i64, 1935 iXLen, iXLen); 1936 1937define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1938; RV32-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: 1939; RV32: # %bb.0: # %entry 1940; RV32-NEXT: addi sp, sp, -16 1941; RV32-NEXT: sw a0, 8(sp) 1942; RV32-NEXT: sw a1, 12(sp) 1943; RV32-NEXT: addi a0, sp, 8 1944; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1945; RV32-NEXT: vlse64.v v9, (a0), zero 1946; RV32-NEXT: csrwi vxrm, 0 1947; RV32-NEXT: vasub.vv v8, v8, v9 1948; RV32-NEXT: addi sp, sp, 16 1949; RV32-NEXT: ret 1950; 1951; RV64-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: 1952; RV64: # %bb.0: # %entry 1953; RV64-NEXT: csrwi vxrm, 0 1954; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1955; RV64-NEXT: vasub.vx v8, v8, a0 1956; RV64-NEXT: ret 1957entry: 1958 %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64( 1959 <vscale x 1 x i64> undef, 1960 <vscale x 1 x i64> %0, 1961 i64 %1, 1962 iXLen 0, iXLen %2) 1963 1964 ret <vscale x 1 x i64> %a 1965} 1966 1967declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64( 1968 <vscale x 1 x i64>, 1969 <vscale x 1 x i64>, 1970 i64, 1971 <vscale x 1 x i1>, 1972 iXLen, iXLen, iXLen); 1973 1974define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1975; RV32-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: 1976; RV32: # %bb.0: # %entry 1977; RV32-NEXT: addi sp, sp, -16 1978; RV32-NEXT: sw a0, 8(sp) 1979; RV32-NEXT: sw a1, 12(sp) 1980; RV32-NEXT: addi a0, sp, 8 1981; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1982; RV32-NEXT: vlse64.v v10, (a0), zero 1983; RV32-NEXT: csrwi vxrm, 1 1984; RV32-NEXT: vasub.vv v8, v9, v10, v0.t 1985; RV32-NEXT: addi sp, sp, 16 1986; RV32-NEXT: ret 1987; 1988; RV64-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64: 1989; RV64: # %bb.0: # %entry 1990; RV64-NEXT: csrwi vxrm, 1 1991; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1992; RV64-NEXT: vasub.vx v8, v9, a0, v0.t 1993; RV64-NEXT: ret 1994entry: 1995 %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64( 1996 <vscale x 1 x i64> %0, 1997 <vscale x 1 x i64> %1, 1998 i64 %2, 1999 <vscale x 1 x i1> %3, 2000 iXLen 1, iXLen %4, iXLen 1) 2001 2002 ret <vscale x 1 x i64> %a 2003} 2004 2005declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64( 2006 <vscale x 2 x i64>, 2007 <vscale x 2 x i64>, 2008 i64, 2009 iXLen, iXLen); 2010 2011define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 2012; RV32-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: 2013; RV32: # %bb.0: # %entry 2014; RV32-NEXT: addi sp, sp, -16 2015; RV32-NEXT: sw a0, 8(sp) 2016; RV32-NEXT: sw a1, 12(sp) 2017; RV32-NEXT: addi a0, sp, 8 2018; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 2019; RV32-NEXT: vlse64.v v10, (a0), zero 2020; RV32-NEXT: csrwi vxrm, 0 2021; RV32-NEXT: vasub.vv v8, v8, v10 2022; RV32-NEXT: addi sp, sp, 16 2023; RV32-NEXT: ret 2024; 2025; RV64-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: 2026; RV64: # %bb.0: # %entry 2027; RV64-NEXT: csrwi vxrm, 0 2028; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 2029; RV64-NEXT: vasub.vx v8, v8, a0 2030; RV64-NEXT: ret 2031entry: 2032 %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64( 2033 <vscale x 2 x i64> undef, 2034 <vscale x 2 x i64> %0, 2035 i64 %1, 2036 iXLen 0, iXLen %2) 2037 2038 ret <vscale x 2 x i64> %a 2039} 2040 2041declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64( 2042 <vscale x 2 x i64>, 2043 <vscale x 2 x i64>, 2044 i64, 2045 <vscale x 2 x i1>, 2046 iXLen, iXLen, iXLen); 2047 2048define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 2049; RV32-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: 2050; RV32: # %bb.0: # %entry 2051; RV32-NEXT: addi sp, sp, -16 2052; RV32-NEXT: sw a0, 8(sp) 2053; RV32-NEXT: sw a1, 12(sp) 2054; RV32-NEXT: addi a0, sp, 8 2055; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 2056; RV32-NEXT: vlse64.v v12, (a0), zero 2057; RV32-NEXT: csrwi vxrm, 1 2058; RV32-NEXT: vasub.vv v8, v10, v12, v0.t 2059; RV32-NEXT: addi sp, sp, 16 2060; RV32-NEXT: ret 2061; 2062; RV64-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64: 2063; RV64: # %bb.0: # %entry 2064; RV64-NEXT: csrwi vxrm, 1 2065; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 2066; RV64-NEXT: vasub.vx v8, v10, a0, v0.t 2067; RV64-NEXT: ret 2068entry: 2069 %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64( 2070 <vscale x 2 x i64> %0, 2071 <vscale x 2 x i64> %1, 2072 i64 %2, 2073 <vscale x 2 x i1> %3, 2074 iXLen 1, iXLen %4, iXLen 1) 2075 2076 ret <vscale x 2 x i64> %a 2077} 2078 2079declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64( 2080 <vscale x 4 x i64>, 2081 <vscale x 4 x i64>, 2082 i64, 2083 iXLen, iXLen); 2084 2085define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 2086; RV32-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: 2087; RV32: # %bb.0: # %entry 2088; RV32-NEXT: addi sp, sp, -16 2089; RV32-NEXT: sw a0, 8(sp) 2090; RV32-NEXT: sw a1, 12(sp) 2091; RV32-NEXT: addi a0, sp, 8 2092; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 2093; RV32-NEXT: vlse64.v v12, (a0), zero 2094; RV32-NEXT: csrwi vxrm, 0 2095; RV32-NEXT: vasub.vv v8, v8, v12 2096; RV32-NEXT: addi sp, sp, 16 2097; RV32-NEXT: ret 2098; 2099; RV64-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: 2100; RV64: # %bb.0: # %entry 2101; RV64-NEXT: csrwi vxrm, 0 2102; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2103; RV64-NEXT: vasub.vx v8, v8, a0 2104; RV64-NEXT: ret 2105entry: 2106 %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64( 2107 <vscale x 4 x i64> undef, 2108 <vscale x 4 x i64> %0, 2109 i64 %1, 2110 iXLen 0, iXLen %2) 2111 2112 ret <vscale x 4 x i64> %a 2113} 2114 2115declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64( 2116 <vscale x 4 x i64>, 2117 <vscale x 4 x i64>, 2118 i64, 2119 <vscale x 4 x i1>, 2120 iXLen, iXLen, iXLen); 2121 2122define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2123; RV32-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: 2124; RV32: # %bb.0: # %entry 2125; RV32-NEXT: addi sp, sp, -16 2126; RV32-NEXT: sw a0, 8(sp) 2127; RV32-NEXT: sw a1, 12(sp) 2128; RV32-NEXT: addi a0, sp, 8 2129; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 2130; RV32-NEXT: vlse64.v v16, (a0), zero 2131; RV32-NEXT: csrwi vxrm, 1 2132; RV32-NEXT: vasub.vv v8, v12, v16, v0.t 2133; RV32-NEXT: addi sp, sp, 16 2134; RV32-NEXT: ret 2135; 2136; RV64-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64: 2137; RV64: # %bb.0: # %entry 2138; RV64-NEXT: csrwi vxrm, 1 2139; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 2140; RV64-NEXT: vasub.vx v8, v12, a0, v0.t 2141; RV64-NEXT: ret 2142entry: 2143 %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64( 2144 <vscale x 4 x i64> %0, 2145 <vscale x 4 x i64> %1, 2146 i64 %2, 2147 <vscale x 4 x i1> %3, 2148 iXLen 1, iXLen %4, iXLen 1) 2149 2150 ret <vscale x 4 x i64> %a 2151} 2152 2153declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64( 2154 <vscale x 8 x i64>, 2155 <vscale x 8 x i64>, 2156 i64, 2157 iXLen, iXLen); 2158 2159define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 2160; RV32-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: 2161; RV32: # %bb.0: # %entry 2162; RV32-NEXT: addi sp, sp, -16 2163; RV32-NEXT: sw a0, 8(sp) 2164; RV32-NEXT: sw a1, 12(sp) 2165; RV32-NEXT: addi a0, sp, 8 2166; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2167; RV32-NEXT: vlse64.v v16, (a0), zero 2168; RV32-NEXT: csrwi vxrm, 0 2169; RV32-NEXT: vasub.vv v8, v8, v16 2170; RV32-NEXT: addi sp, sp, 16 2171; RV32-NEXT: ret 2172; 2173; RV64-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: 2174; RV64: # %bb.0: # %entry 2175; RV64-NEXT: csrwi vxrm, 0 2176; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2177; RV64-NEXT: vasub.vx v8, v8, a0 2178; RV64-NEXT: ret 2179entry: 2180 %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64( 2181 <vscale x 8 x i64> undef, 2182 <vscale x 8 x i64> %0, 2183 i64 %1, 2184 iXLen 0, iXLen %2) 2185 2186 ret <vscale x 8 x i64> %a 2187} 2188 2189declare <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64( 2190 <vscale x 8 x i64>, 2191 <vscale x 8 x i64>, 2192 i64, 2193 <vscale x 8 x i1>, 2194 iXLen, iXLen, iXLen); 2195 2196define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2197; RV32-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: 2198; RV32: # %bb.0: # %entry 2199; RV32-NEXT: addi sp, sp, -16 2200; RV32-NEXT: sw a0, 8(sp) 2201; RV32-NEXT: sw a1, 12(sp) 2202; RV32-NEXT: addi a0, sp, 8 2203; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu 2204; RV32-NEXT: vlse64.v v24, (a0), zero 2205; RV32-NEXT: csrwi vxrm, 1 2206; RV32-NEXT: vasub.vv v8, v16, v24, v0.t 2207; RV32-NEXT: addi sp, sp, 16 2208; RV32-NEXT: ret 2209; 2210; RV64-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64: 2211; RV64: # %bb.0: # %entry 2212; RV64-NEXT: csrwi vxrm, 1 2213; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 2214; RV64-NEXT: vasub.vx v8, v16, a0, v0.t 2215; RV64-NEXT: ret 2216entry: 2217 %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64( 2218 <vscale x 8 x i64> %0, 2219 <vscale x 8 x i64> %1, 2220 i64 %2, 2221 <vscale x 8 x i1> %3, 2222 iXLen 1, iXLen %4, iXLen 1) 2223 2224 ret <vscale x 8 x i64> %a 2225} 2226