1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 <vscale x 1 x i8>, 11 iXLen); 12 13define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 14; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: 15; CHECK: # %bb.0: # %entry 16; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 17; CHECK-NEXT: vsub.vv v8, v8, v9 18; CHECK-NEXT: ret 19entry: 20 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8( 21 <vscale x 1 x i8> undef, 22 <vscale x 1 x i8> %0, 23 <vscale x 1 x i8> %1, 24 iXLen %2) 25 26 ret <vscale x 1 x i8> %a 27} 28 29declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( 30 <vscale x 1 x i8>, 31 <vscale x 1 x i8>, 32 <vscale x 1 x i8>, 33 <vscale x 1 x i1>, 34 iXLen, iXLen); 35 36define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 37; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8: 38; CHECK: # %bb.0: # %entry 39; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 40; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 41; CHECK-NEXT: ret 42entry: 43 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( 44 <vscale x 1 x i8> %0, 45 <vscale x 1 x i8> %1, 46 <vscale x 1 x i8> %2, 47 <vscale x 1 x i1> %3, 48 iXLen %4, iXLen 1) 49 50 ret <vscale x 1 x i8> %a 51} 52 53declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8( 54 <vscale x 2 x i8>, 55 <vscale x 2 x i8>, 56 <vscale x 2 x i8>, 57 iXLen); 58 59define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 60; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8: 61; CHECK: # %bb.0: # %entry 62; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 63; CHECK-NEXT: vsub.vv v8, v8, v9 64; CHECK-NEXT: ret 65entry: 66 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8( 67 <vscale x 2 x i8> undef, 68 <vscale x 2 x i8> %0, 69 <vscale x 2 x i8> %1, 70 iXLen %2) 71 72 ret <vscale x 2 x i8> %a 73} 74 75declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( 76 <vscale x 2 x i8>, 77 <vscale x 2 x i8>, 78 <vscale x 2 x i8>, 79 <vscale x 2 x i1>, 80 iXLen, iXLen); 81 82define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 83; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8: 84; CHECK: # %bb.0: # %entry 85; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 86; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 87; CHECK-NEXT: ret 88entry: 89 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( 90 <vscale x 2 x i8> %0, 91 <vscale x 2 x i8> %1, 92 <vscale x 2 x i8> %2, 93 <vscale x 2 x i1> %3, 94 iXLen %4, iXLen 1) 95 96 ret <vscale x 2 x i8> %a 97} 98 99declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8( 100 <vscale x 4 x i8>, 101 <vscale x 4 x i8>, 102 <vscale x 4 x i8>, 103 iXLen); 104 105define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 106; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8: 107; CHECK: # %bb.0: # %entry 108; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 109; CHECK-NEXT: vsub.vv v8, v8, v9 110; CHECK-NEXT: ret 111entry: 112 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8( 113 <vscale x 4 x i8> undef, 114 <vscale x 4 x i8> %0, 115 <vscale x 4 x i8> %1, 116 iXLen %2) 117 118 ret <vscale x 4 x i8> %a 119} 120 121declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( 122 <vscale x 4 x i8>, 123 <vscale x 4 x i8>, 124 <vscale x 4 x i8>, 125 <vscale x 4 x i1>, 126 iXLen, iXLen); 127 128define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 129; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8: 130; CHECK: # %bb.0: # %entry 131; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 132; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 133; CHECK-NEXT: ret 134entry: 135 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( 136 <vscale x 4 x i8> %0, 137 <vscale x 4 x i8> %1, 138 <vscale x 4 x i8> %2, 139 <vscale x 4 x i1> %3, 140 iXLen %4, iXLen 1) 141 142 ret <vscale x 4 x i8> %a 143} 144 145declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8( 146 <vscale x 8 x i8>, 147 <vscale x 8 x i8>, 148 <vscale x 8 x i8>, 149 iXLen); 150 151define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 152; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8: 153; CHECK: # %bb.0: # %entry 154; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 155; CHECK-NEXT: vsub.vv v8, v8, v9 156; CHECK-NEXT: ret 157entry: 158 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8( 159 <vscale x 8 x i8> undef, 160 <vscale x 8 x i8> %0, 161 <vscale x 8 x i8> %1, 162 iXLen %2) 163 164 ret <vscale x 8 x i8> %a 165} 166 167declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( 168 <vscale x 8 x i8>, 169 <vscale x 8 x i8>, 170 <vscale x 8 x i8>, 171 <vscale x 8 x i1>, 172 iXLen, iXLen); 173 174define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 175; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8: 176; CHECK: # %bb.0: # %entry 177; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 178; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 179; CHECK-NEXT: ret 180entry: 181 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( 182 <vscale x 8 x i8> %0, 183 <vscale x 8 x i8> %1, 184 <vscale x 8 x i8> %2, 185 <vscale x 8 x i1> %3, 186 iXLen %4, iXLen 1) 187 188 ret <vscale x 8 x i8> %a 189} 190 191declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8( 192 <vscale x 16 x i8>, 193 <vscale x 16 x i8>, 194 <vscale x 16 x i8>, 195 iXLen); 196 197define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 198; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8: 199; CHECK: # %bb.0: # %entry 200; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 201; CHECK-NEXT: vsub.vv v8, v8, v10 202; CHECK-NEXT: ret 203entry: 204 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8( 205 <vscale x 16 x i8> undef, 206 <vscale x 16 x i8> %0, 207 <vscale x 16 x i8> %1, 208 iXLen %2) 209 210 ret <vscale x 16 x i8> %a 211} 212 213declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( 214 <vscale x 16 x i8>, 215 <vscale x 16 x i8>, 216 <vscale x 16 x i8>, 217 <vscale x 16 x i1>, 218 iXLen, iXLen); 219 220define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 221; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8: 222; CHECK: # %bb.0: # %entry 223; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 224; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t 225; CHECK-NEXT: ret 226entry: 227 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( 228 <vscale x 16 x i8> %0, 229 <vscale x 16 x i8> %1, 230 <vscale x 16 x i8> %2, 231 <vscale x 16 x i1> %3, 232 iXLen %4, iXLen 1) 233 234 ret <vscale x 16 x i8> %a 235} 236 237declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8( 238 <vscale x 32 x i8>, 239 <vscale x 32 x i8>, 240 <vscale x 32 x i8>, 241 iXLen); 242 243define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 244; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8: 245; CHECK: # %bb.0: # %entry 246; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 247; CHECK-NEXT: vsub.vv v8, v8, v12 248; CHECK-NEXT: ret 249entry: 250 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8( 251 <vscale x 32 x i8> undef, 252 <vscale x 32 x i8> %0, 253 <vscale x 32 x i8> %1, 254 iXLen %2) 255 256 ret <vscale x 32 x i8> %a 257} 258 259declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( 260 <vscale x 32 x i8>, 261 <vscale x 32 x i8>, 262 <vscale x 32 x i8>, 263 <vscale x 32 x i1>, 264 iXLen, iXLen); 265 266define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 267; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8: 268; CHECK: # %bb.0: # %entry 269; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 270; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t 271; CHECK-NEXT: ret 272entry: 273 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( 274 <vscale x 32 x i8> %0, 275 <vscale x 32 x i8> %1, 276 <vscale x 32 x i8> %2, 277 <vscale x 32 x i1> %3, 278 iXLen %4, iXLen 1) 279 280 ret <vscale x 32 x i8> %a 281} 282 283declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8( 284 <vscale x 64 x i8>, 285 <vscale x 64 x i8>, 286 <vscale x 64 x i8>, 287 iXLen); 288 289define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 290; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8: 291; CHECK: # %bb.0: # %entry 292; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 293; CHECK-NEXT: vsub.vv v8, v8, v16 294; CHECK-NEXT: ret 295entry: 296 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8( 297 <vscale x 64 x i8> undef, 298 <vscale x 64 x i8> %0, 299 <vscale x 64 x i8> %1, 300 iXLen %2) 301 302 ret <vscale x 64 x i8> %a 303} 304 305declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( 306 <vscale x 64 x i8>, 307 <vscale x 64 x i8>, 308 <vscale x 64 x i8>, 309 <vscale x 64 x i1>, 310 iXLen, iXLen); 311 312define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 313; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: 314; CHECK: # %bb.0: # %entry 315; CHECK-NEXT: vl8r.v v24, (a0) 316; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 317; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t 318; CHECK-NEXT: ret 319entry: 320 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( 321 <vscale x 64 x i8> %0, 322 <vscale x 64 x i8> %1, 323 <vscale x 64 x i8> %2, 324 <vscale x 64 x i1> %3, 325 iXLen %4, iXLen 1) 326 327 ret <vscale x 64 x i8> %a 328} 329 330declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16( 331 <vscale x 1 x i16>, 332 <vscale x 1 x i16>, 333 <vscale x 1 x i16>, 334 iXLen); 335 336define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 337; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16: 338; CHECK: # %bb.0: # %entry 339; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 340; CHECK-NEXT: vsub.vv v8, v8, v9 341; CHECK-NEXT: ret 342entry: 343 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16( 344 <vscale x 1 x i16> undef, 345 <vscale x 1 x i16> %0, 346 <vscale x 1 x i16> %1, 347 iXLen %2) 348 349 ret <vscale x 1 x i16> %a 350} 351 352declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( 353 <vscale x 1 x i16>, 354 <vscale x 1 x i16>, 355 <vscale x 1 x i16>, 356 <vscale x 1 x i1>, 357 iXLen, iXLen); 358 359define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 360; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16: 361; CHECK: # %bb.0: # %entry 362; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 363; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 364; CHECK-NEXT: ret 365entry: 366 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( 367 <vscale x 1 x i16> %0, 368 <vscale x 1 x i16> %1, 369 <vscale x 1 x i16> %2, 370 <vscale x 1 x i1> %3, 371 iXLen %4, iXLen 1) 372 373 ret <vscale x 1 x i16> %a 374} 375 376declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16( 377 <vscale x 2 x i16>, 378 <vscale x 2 x i16>, 379 <vscale x 2 x i16>, 380 iXLen); 381 382define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 383; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16: 384; CHECK: # %bb.0: # %entry 385; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 386; CHECK-NEXT: vsub.vv v8, v8, v9 387; CHECK-NEXT: ret 388entry: 389 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16( 390 <vscale x 2 x i16> undef, 391 <vscale x 2 x i16> %0, 392 <vscale x 2 x i16> %1, 393 iXLen %2) 394 395 ret <vscale x 2 x i16> %a 396} 397 398declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( 399 <vscale x 2 x i16>, 400 <vscale x 2 x i16>, 401 <vscale x 2 x i16>, 402 <vscale x 2 x i1>, 403 iXLen, iXLen); 404 405define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 406; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16: 407; CHECK: # %bb.0: # %entry 408; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 409; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 410; CHECK-NEXT: ret 411entry: 412 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( 413 <vscale x 2 x i16> %0, 414 <vscale x 2 x i16> %1, 415 <vscale x 2 x i16> %2, 416 <vscale x 2 x i1> %3, 417 iXLen %4, iXLen 1) 418 419 ret <vscale x 2 x i16> %a 420} 421 422declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16( 423 <vscale x 4 x i16>, 424 <vscale x 4 x i16>, 425 <vscale x 4 x i16>, 426 iXLen); 427 428define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 429; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16: 430; CHECK: # %bb.0: # %entry 431; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 432; CHECK-NEXT: vsub.vv v8, v8, v9 433; CHECK-NEXT: ret 434entry: 435 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16( 436 <vscale x 4 x i16> undef, 437 <vscale x 4 x i16> %0, 438 <vscale x 4 x i16> %1, 439 iXLen %2) 440 441 ret <vscale x 4 x i16> %a 442} 443 444declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( 445 <vscale x 4 x i16>, 446 <vscale x 4 x i16>, 447 <vscale x 4 x i16>, 448 <vscale x 4 x i1>, 449 iXLen, iXLen); 450 451define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 452; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16: 453; CHECK: # %bb.0: # %entry 454; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 455; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 456; CHECK-NEXT: ret 457entry: 458 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( 459 <vscale x 4 x i16> %0, 460 <vscale x 4 x i16> %1, 461 <vscale x 4 x i16> %2, 462 <vscale x 4 x i1> %3, 463 iXLen %4, iXLen 1) 464 465 ret <vscale x 4 x i16> %a 466} 467 468declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16( 469 <vscale x 8 x i16>, 470 <vscale x 8 x i16>, 471 <vscale x 8 x i16>, 472 iXLen); 473 474define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 475; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16: 476; CHECK: # %bb.0: # %entry 477; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 478; CHECK-NEXT: vsub.vv v8, v8, v10 479; CHECK-NEXT: ret 480entry: 481 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16( 482 <vscale x 8 x i16> undef, 483 <vscale x 8 x i16> %0, 484 <vscale x 8 x i16> %1, 485 iXLen %2) 486 487 ret <vscale x 8 x i16> %a 488} 489 490declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( 491 <vscale x 8 x i16>, 492 <vscale x 8 x i16>, 493 <vscale x 8 x i16>, 494 <vscale x 8 x i1>, 495 iXLen, iXLen); 496 497define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 498; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16: 499; CHECK: # %bb.0: # %entry 500; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 501; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t 502; CHECK-NEXT: ret 503entry: 504 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( 505 <vscale x 8 x i16> %0, 506 <vscale x 8 x i16> %1, 507 <vscale x 8 x i16> %2, 508 <vscale x 8 x i1> %3, 509 iXLen %4, iXLen 1) 510 511 ret <vscale x 8 x i16> %a 512} 513 514declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16( 515 <vscale x 16 x i16>, 516 <vscale x 16 x i16>, 517 <vscale x 16 x i16>, 518 iXLen); 519 520define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 521; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16: 522; CHECK: # %bb.0: # %entry 523; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 524; CHECK-NEXT: vsub.vv v8, v8, v12 525; CHECK-NEXT: ret 526entry: 527 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16( 528 <vscale x 16 x i16> undef, 529 <vscale x 16 x i16> %0, 530 <vscale x 16 x i16> %1, 531 iXLen %2) 532 533 ret <vscale x 16 x i16> %a 534} 535 536declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( 537 <vscale x 16 x i16>, 538 <vscale x 16 x i16>, 539 <vscale x 16 x i16>, 540 <vscale x 16 x i1>, 541 iXLen, iXLen); 542 543define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 544; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16: 545; CHECK: # %bb.0: # %entry 546; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 547; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t 548; CHECK-NEXT: ret 549entry: 550 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( 551 <vscale x 16 x i16> %0, 552 <vscale x 16 x i16> %1, 553 <vscale x 16 x i16> %2, 554 <vscale x 16 x i1> %3, 555 iXLen %4, iXLen 1) 556 557 ret <vscale x 16 x i16> %a 558} 559 560declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16( 561 <vscale x 32 x i16>, 562 <vscale x 32 x i16>, 563 <vscale x 32 x i16>, 564 iXLen); 565 566define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 567; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16: 568; CHECK: # %bb.0: # %entry 569; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 570; CHECK-NEXT: vsub.vv v8, v8, v16 571; CHECK-NEXT: ret 572entry: 573 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16( 574 <vscale x 32 x i16> undef, 575 <vscale x 32 x i16> %0, 576 <vscale x 32 x i16> %1, 577 iXLen %2) 578 579 ret <vscale x 32 x i16> %a 580} 581 582declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( 583 <vscale x 32 x i16>, 584 <vscale x 32 x i16>, 585 <vscale x 32 x i16>, 586 <vscale x 32 x i1>, 587 iXLen, iXLen); 588 589define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 590; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: 591; CHECK: # %bb.0: # %entry 592; CHECK-NEXT: vl8re16.v v24, (a0) 593; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 594; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t 595; CHECK-NEXT: ret 596entry: 597 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( 598 <vscale x 32 x i16> %0, 599 <vscale x 32 x i16> %1, 600 <vscale x 32 x i16> %2, 601 <vscale x 32 x i1> %3, 602 iXLen %4, iXLen 1) 603 604 ret <vscale x 32 x i16> %a 605} 606 607declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32( 608 <vscale x 1 x i32>, 609 <vscale x 1 x i32>, 610 <vscale x 1 x i32>, 611 iXLen); 612 613define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 614; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32: 615; CHECK: # %bb.0: # %entry 616; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 617; CHECK-NEXT: vsub.vv v8, v8, v9 618; CHECK-NEXT: ret 619entry: 620 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32( 621 <vscale x 1 x i32> undef, 622 <vscale x 1 x i32> %0, 623 <vscale x 1 x i32> %1, 624 iXLen %2) 625 626 ret <vscale x 1 x i32> %a 627} 628 629declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( 630 <vscale x 1 x i32>, 631 <vscale x 1 x i32>, 632 <vscale x 1 x i32>, 633 <vscale x 1 x i1>, 634 iXLen, iXLen); 635 636define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 637; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32: 638; CHECK: # %bb.0: # %entry 639; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 640; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 641; CHECK-NEXT: ret 642entry: 643 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( 644 <vscale x 1 x i32> %0, 645 <vscale x 1 x i32> %1, 646 <vscale x 1 x i32> %2, 647 <vscale x 1 x i1> %3, 648 iXLen %4, iXLen 1) 649 650 ret <vscale x 1 x i32> %a 651} 652 653declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32( 654 <vscale x 2 x i32>, 655 <vscale x 2 x i32>, 656 <vscale x 2 x i32>, 657 iXLen); 658 659define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 660; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32: 661; CHECK: # %bb.0: # %entry 662; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 663; CHECK-NEXT: vsub.vv v8, v8, v9 664; CHECK-NEXT: ret 665entry: 666 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32( 667 <vscale x 2 x i32> undef, 668 <vscale x 2 x i32> %0, 669 <vscale x 2 x i32> %1, 670 iXLen %2) 671 672 ret <vscale x 2 x i32> %a 673} 674 675declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( 676 <vscale x 2 x i32>, 677 <vscale x 2 x i32>, 678 <vscale x 2 x i32>, 679 <vscale x 2 x i1>, 680 iXLen, iXLen); 681 682define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 683; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32: 684; CHECK: # %bb.0: # %entry 685; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 686; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 687; CHECK-NEXT: ret 688entry: 689 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( 690 <vscale x 2 x i32> %0, 691 <vscale x 2 x i32> %1, 692 <vscale x 2 x i32> %2, 693 <vscale x 2 x i1> %3, 694 iXLen %4, iXLen 1) 695 696 ret <vscale x 2 x i32> %a 697} 698 699declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32( 700 <vscale x 4 x i32>, 701 <vscale x 4 x i32>, 702 <vscale x 4 x i32>, 703 iXLen); 704 705define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 706; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32: 707; CHECK: # %bb.0: # %entry 708; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 709; CHECK-NEXT: vsub.vv v8, v8, v10 710; CHECK-NEXT: ret 711entry: 712 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32( 713 <vscale x 4 x i32> undef, 714 <vscale x 4 x i32> %0, 715 <vscale x 4 x i32> %1, 716 iXLen %2) 717 718 ret <vscale x 4 x i32> %a 719} 720 721declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( 722 <vscale x 4 x i32>, 723 <vscale x 4 x i32>, 724 <vscale x 4 x i32>, 725 <vscale x 4 x i1>, 726 iXLen, iXLen); 727 728define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 729; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32: 730; CHECK: # %bb.0: # %entry 731; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 732; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t 733; CHECK-NEXT: ret 734entry: 735 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( 736 <vscale x 4 x i32> %0, 737 <vscale x 4 x i32> %1, 738 <vscale x 4 x i32> %2, 739 <vscale x 4 x i1> %3, 740 iXLen %4, iXLen 1) 741 742 ret <vscale x 4 x i32> %a 743} 744 745declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32( 746 <vscale x 8 x i32>, 747 <vscale x 8 x i32>, 748 <vscale x 8 x i32>, 749 iXLen); 750 751define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 752; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32: 753; CHECK: # %bb.0: # %entry 754; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 755; CHECK-NEXT: vsub.vv v8, v8, v12 756; CHECK-NEXT: ret 757entry: 758 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32( 759 <vscale x 8 x i32> undef, 760 <vscale x 8 x i32> %0, 761 <vscale x 8 x i32> %1, 762 iXLen %2) 763 764 ret <vscale x 8 x i32> %a 765} 766 767declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( 768 <vscale x 8 x i32>, 769 <vscale x 8 x i32>, 770 <vscale x 8 x i32>, 771 <vscale x 8 x i1>, 772 iXLen, iXLen); 773 774define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 775; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32: 776; CHECK: # %bb.0: # %entry 777; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 778; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t 779; CHECK-NEXT: ret 780entry: 781 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( 782 <vscale x 8 x i32> %0, 783 <vscale x 8 x i32> %1, 784 <vscale x 8 x i32> %2, 785 <vscale x 8 x i1> %3, 786 iXLen %4, iXLen 1) 787 788 ret <vscale x 8 x i32> %a 789} 790 791declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32( 792 <vscale x 16 x i32>, 793 <vscale x 16 x i32>, 794 <vscale x 16 x i32>, 795 iXLen); 796 797define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 798; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32: 799; CHECK: # %bb.0: # %entry 800; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 801; CHECK-NEXT: vsub.vv v8, v8, v16 802; CHECK-NEXT: ret 803entry: 804 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32( 805 <vscale x 16 x i32> undef, 806 <vscale x 16 x i32> %0, 807 <vscale x 16 x i32> %1, 808 iXLen %2) 809 810 ret <vscale x 16 x i32> %a 811} 812 813declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( 814 <vscale x 16 x i32>, 815 <vscale x 16 x i32>, 816 <vscale x 16 x i32>, 817 <vscale x 16 x i1>, 818 iXLen, iXLen); 819 820define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 821; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: 822; CHECK: # %bb.0: # %entry 823; CHECK-NEXT: vl8re32.v v24, (a0) 824; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 825; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t 826; CHECK-NEXT: ret 827entry: 828 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( 829 <vscale x 16 x i32> %0, 830 <vscale x 16 x i32> %1, 831 <vscale x 16 x i32> %2, 832 <vscale x 16 x i1> %3, 833 iXLen %4, iXLen 1) 834 835 ret <vscale x 16 x i32> %a 836} 837 838declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64( 839 <vscale x 1 x i64>, 840 <vscale x 1 x i64>, 841 <vscale x 1 x i64>, 842 iXLen); 843 844define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 845; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64: 846; CHECK: # %bb.0: # %entry 847; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 848; CHECK-NEXT: vsub.vv v8, v8, v9 849; CHECK-NEXT: ret 850entry: 851 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64( 852 <vscale x 1 x i64> undef, 853 <vscale x 1 x i64> %0, 854 <vscale x 1 x i64> %1, 855 iXLen %2) 856 857 ret <vscale x 1 x i64> %a 858} 859 860declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( 861 <vscale x 1 x i64>, 862 <vscale x 1 x i64>, 863 <vscale x 1 x i64>, 864 <vscale x 1 x i1>, 865 iXLen, iXLen); 866 867define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 868; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64: 869; CHECK: # %bb.0: # %entry 870; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 871; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t 872; CHECK-NEXT: ret 873entry: 874 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( 875 <vscale x 1 x i64> %0, 876 <vscale x 1 x i64> %1, 877 <vscale x 1 x i64> %2, 878 <vscale x 1 x i1> %3, 879 iXLen %4, iXLen 1) 880 881 ret <vscale x 1 x i64> %a 882} 883 884declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64( 885 <vscale x 2 x i64>, 886 <vscale x 2 x i64>, 887 <vscale x 2 x i64>, 888 iXLen); 889 890define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 891; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64: 892; CHECK: # %bb.0: # %entry 893; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 894; CHECK-NEXT: vsub.vv v8, v8, v10 895; CHECK-NEXT: ret 896entry: 897 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64( 898 <vscale x 2 x i64> undef, 899 <vscale x 2 x i64> %0, 900 <vscale x 2 x i64> %1, 901 iXLen %2) 902 903 ret <vscale x 2 x i64> %a 904} 905 906declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( 907 <vscale x 2 x i64>, 908 <vscale x 2 x i64>, 909 <vscale x 2 x i64>, 910 <vscale x 2 x i1>, 911 iXLen, iXLen); 912 913define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 914; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64: 915; CHECK: # %bb.0: # %entry 916; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 917; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t 918; CHECK-NEXT: ret 919entry: 920 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( 921 <vscale x 2 x i64> %0, 922 <vscale x 2 x i64> %1, 923 <vscale x 2 x i64> %2, 924 <vscale x 2 x i1> %3, 925 iXLen %4, iXLen 1) 926 927 ret <vscale x 2 x i64> %a 928} 929 930declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64( 931 <vscale x 4 x i64>, 932 <vscale x 4 x i64>, 933 <vscale x 4 x i64>, 934 iXLen); 935 936define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 937; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64: 938; CHECK: # %bb.0: # %entry 939; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 940; CHECK-NEXT: vsub.vv v8, v8, v12 941; CHECK-NEXT: ret 942entry: 943 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64( 944 <vscale x 4 x i64> undef, 945 <vscale x 4 x i64> %0, 946 <vscale x 4 x i64> %1, 947 iXLen %2) 948 949 ret <vscale x 4 x i64> %a 950} 951 952declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( 953 <vscale x 4 x i64>, 954 <vscale x 4 x i64>, 955 <vscale x 4 x i64>, 956 <vscale x 4 x i1>, 957 iXLen, iXLen); 958 959define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 960; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64: 961; CHECK: # %bb.0: # %entry 962; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 963; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t 964; CHECK-NEXT: ret 965entry: 966 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( 967 <vscale x 4 x i64> %0, 968 <vscale x 4 x i64> %1, 969 <vscale x 4 x i64> %2, 970 <vscale x 4 x i1> %3, 971 iXLen %4, iXLen 1) 972 973 ret <vscale x 4 x i64> %a 974} 975 976declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64( 977 <vscale x 8 x i64>, 978 <vscale x 8 x i64>, 979 <vscale x 8 x i64>, 980 iXLen); 981 982define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 983; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64: 984; CHECK: # %bb.0: # %entry 985; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 986; CHECK-NEXT: vsub.vv v8, v8, v16 987; CHECK-NEXT: ret 988entry: 989 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64( 990 <vscale x 8 x i64> undef, 991 <vscale x 8 x i64> %0, 992 <vscale x 8 x i64> %1, 993 iXLen %2) 994 995 ret <vscale x 8 x i64> %a 996} 997 998declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( 999 <vscale x 8 x i64>, 1000 <vscale x 8 x i64>, 1001 <vscale x 8 x i64>, 1002 <vscale x 8 x i1>, 1003 iXLen, iXLen); 1004 1005define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1006; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: 1007; CHECK: # %bb.0: # %entry 1008; CHECK-NEXT: vl8re64.v v24, (a0) 1009; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu 1010; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t 1011; CHECK-NEXT: ret 1012entry: 1013 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( 1014 <vscale x 8 x i64> %0, 1015 <vscale x 8 x i64> %1, 1016 <vscale x 8 x i64> %2, 1017 <vscale x 8 x i1> %3, 1018 iXLen %4, iXLen 1) 1019 1020 ret <vscale x 8 x i64> %a 1021} 1022 1023declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8( 1024 <vscale x 1 x i8>, 1025 <vscale x 1 x i8>, 1026 i8, 1027 iXLen); 1028 1029define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind { 1030; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8: 1031; CHECK: # %bb.0: # %entry 1032; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 1033; CHECK-NEXT: vsub.vx v8, v8, a0 1034; CHECK-NEXT: ret 1035entry: 1036 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8( 1037 <vscale x 1 x i8> undef, 1038 <vscale x 1 x i8> %0, 1039 i8 %1, 1040 iXLen %2) 1041 1042 ret <vscale x 1 x i8> %a 1043} 1044 1045declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8( 1046 <vscale x 1 x i8>, 1047 <vscale x 1 x i8>, 1048 i8, 1049 <vscale x 1 x i1>, 1050 iXLen, iXLen); 1051 1052define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1053; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8: 1054; CHECK: # %bb.0: # %entry 1055; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu 1056; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1057; CHECK-NEXT: ret 1058entry: 1059 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8( 1060 <vscale x 1 x i8> %0, 1061 <vscale x 1 x i8> %1, 1062 i8 %2, 1063 <vscale x 1 x i1> %3, 1064 iXLen %4, iXLen 1) 1065 1066 ret <vscale x 1 x i8> %a 1067} 1068 1069declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8( 1070 <vscale x 2 x i8>, 1071 <vscale x 2 x i8>, 1072 i8, 1073 iXLen); 1074 1075define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind { 1076; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8: 1077; CHECK: # %bb.0: # %entry 1078; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 1079; CHECK-NEXT: vsub.vx v8, v8, a0 1080; CHECK-NEXT: ret 1081entry: 1082 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8( 1083 <vscale x 2 x i8> undef, 1084 <vscale x 2 x i8> %0, 1085 i8 %1, 1086 iXLen %2) 1087 1088 ret <vscale x 2 x i8> %a 1089} 1090 1091declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8( 1092 <vscale x 2 x i8>, 1093 <vscale x 2 x i8>, 1094 i8, 1095 <vscale x 2 x i1>, 1096 iXLen, iXLen); 1097 1098define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1099; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8: 1100; CHECK: # %bb.0: # %entry 1101; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu 1102; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1103; CHECK-NEXT: ret 1104entry: 1105 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8( 1106 <vscale x 2 x i8> %0, 1107 <vscale x 2 x i8> %1, 1108 i8 %2, 1109 <vscale x 2 x i1> %3, 1110 iXLen %4, iXLen 1) 1111 1112 ret <vscale x 2 x i8> %a 1113} 1114 1115declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8( 1116 <vscale x 4 x i8>, 1117 <vscale x 4 x i8>, 1118 i8, 1119 iXLen); 1120 1121define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind { 1122; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8: 1123; CHECK: # %bb.0: # %entry 1124; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 1125; CHECK-NEXT: vsub.vx v8, v8, a0 1126; CHECK-NEXT: ret 1127entry: 1128 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8( 1129 <vscale x 4 x i8> undef, 1130 <vscale x 4 x i8> %0, 1131 i8 %1, 1132 iXLen %2) 1133 1134 ret <vscale x 4 x i8> %a 1135} 1136 1137declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8( 1138 <vscale x 4 x i8>, 1139 <vscale x 4 x i8>, 1140 i8, 1141 <vscale x 4 x i1>, 1142 iXLen, iXLen); 1143 1144define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1145; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8: 1146; CHECK: # %bb.0: # %entry 1147; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu 1148; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1149; CHECK-NEXT: ret 1150entry: 1151 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8( 1152 <vscale x 4 x i8> %0, 1153 <vscale x 4 x i8> %1, 1154 i8 %2, 1155 <vscale x 4 x i1> %3, 1156 iXLen %4, iXLen 1) 1157 1158 ret <vscale x 4 x i8> %a 1159} 1160 1161declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8( 1162 <vscale x 8 x i8>, 1163 <vscale x 8 x i8>, 1164 i8, 1165 iXLen); 1166 1167define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind { 1168; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8: 1169; CHECK: # %bb.0: # %entry 1170; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 1171; CHECK-NEXT: vsub.vx v8, v8, a0 1172; CHECK-NEXT: ret 1173entry: 1174 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8( 1175 <vscale x 8 x i8> undef, 1176 <vscale x 8 x i8> %0, 1177 i8 %1, 1178 iXLen %2) 1179 1180 ret <vscale x 8 x i8> %a 1181} 1182 1183declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8( 1184 <vscale x 8 x i8>, 1185 <vscale x 8 x i8>, 1186 i8, 1187 <vscale x 8 x i1>, 1188 iXLen, iXLen); 1189 1190define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1191; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8: 1192; CHECK: # %bb.0: # %entry 1193; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu 1194; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1195; CHECK-NEXT: ret 1196entry: 1197 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8( 1198 <vscale x 8 x i8> %0, 1199 <vscale x 8 x i8> %1, 1200 i8 %2, 1201 <vscale x 8 x i1> %3, 1202 iXLen %4, iXLen 1) 1203 1204 ret <vscale x 8 x i8> %a 1205} 1206 1207declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8( 1208 <vscale x 16 x i8>, 1209 <vscale x 16 x i8>, 1210 i8, 1211 iXLen); 1212 1213define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind { 1214; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8: 1215; CHECK: # %bb.0: # %entry 1216; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 1217; CHECK-NEXT: vsub.vx v8, v8, a0 1218; CHECK-NEXT: ret 1219entry: 1220 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8( 1221 <vscale x 16 x i8> undef, 1222 <vscale x 16 x i8> %0, 1223 i8 %1, 1224 iXLen %2) 1225 1226 ret <vscale x 16 x i8> %a 1227} 1228 1229declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8( 1230 <vscale x 16 x i8>, 1231 <vscale x 16 x i8>, 1232 i8, 1233 <vscale x 16 x i1>, 1234 iXLen, iXLen); 1235 1236define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1237; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8: 1238; CHECK: # %bb.0: # %entry 1239; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu 1240; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t 1241; CHECK-NEXT: ret 1242entry: 1243 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8( 1244 <vscale x 16 x i8> %0, 1245 <vscale x 16 x i8> %1, 1246 i8 %2, 1247 <vscale x 16 x i1> %3, 1248 iXLen %4, iXLen 1) 1249 1250 ret <vscale x 16 x i8> %a 1251} 1252 1253declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8( 1254 <vscale x 32 x i8>, 1255 <vscale x 32 x i8>, 1256 i8, 1257 iXLen); 1258 1259define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind { 1260; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8: 1261; CHECK: # %bb.0: # %entry 1262; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 1263; CHECK-NEXT: vsub.vx v8, v8, a0 1264; CHECK-NEXT: ret 1265entry: 1266 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8( 1267 <vscale x 32 x i8> undef, 1268 <vscale x 32 x i8> %0, 1269 i8 %1, 1270 iXLen %2) 1271 1272 ret <vscale x 32 x i8> %a 1273} 1274 1275declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8( 1276 <vscale x 32 x i8>, 1277 <vscale x 32 x i8>, 1278 i8, 1279 <vscale x 32 x i1>, 1280 iXLen, iXLen); 1281 1282define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1283; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8: 1284; CHECK: # %bb.0: # %entry 1285; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu 1286; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t 1287; CHECK-NEXT: ret 1288entry: 1289 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8( 1290 <vscale x 32 x i8> %0, 1291 <vscale x 32 x i8> %1, 1292 i8 %2, 1293 <vscale x 32 x i1> %3, 1294 iXLen %4, iXLen 1) 1295 1296 ret <vscale x 32 x i8> %a 1297} 1298 1299declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8( 1300 <vscale x 64 x i8>, 1301 <vscale x 64 x i8>, 1302 i8, 1303 iXLen); 1304 1305define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind { 1306; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8: 1307; CHECK: # %bb.0: # %entry 1308; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 1309; CHECK-NEXT: vsub.vx v8, v8, a0 1310; CHECK-NEXT: ret 1311entry: 1312 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8( 1313 <vscale x 64 x i8> undef, 1314 <vscale x 64 x i8> %0, 1315 i8 %1, 1316 iXLen %2) 1317 1318 ret <vscale x 64 x i8> %a 1319} 1320 1321declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8( 1322 <vscale x 64 x i8>, 1323 <vscale x 64 x i8>, 1324 i8, 1325 <vscale x 64 x i1>, 1326 iXLen, iXLen); 1327 1328define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { 1329; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8: 1330; CHECK: # %bb.0: # %entry 1331; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu 1332; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t 1333; CHECK-NEXT: ret 1334entry: 1335 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8( 1336 <vscale x 64 x i8> %0, 1337 <vscale x 64 x i8> %1, 1338 i8 %2, 1339 <vscale x 64 x i1> %3, 1340 iXLen %4, iXLen 1) 1341 1342 ret <vscale x 64 x i8> %a 1343} 1344 1345declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16( 1346 <vscale x 1 x i16>, 1347 <vscale x 1 x i16>, 1348 i16, 1349 iXLen); 1350 1351define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind { 1352; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16: 1353; CHECK: # %bb.0: # %entry 1354; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 1355; CHECK-NEXT: vsub.vx v8, v8, a0 1356; CHECK-NEXT: ret 1357entry: 1358 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16( 1359 <vscale x 1 x i16> undef, 1360 <vscale x 1 x i16> %0, 1361 i16 %1, 1362 iXLen %2) 1363 1364 ret <vscale x 1 x i16> %a 1365} 1366 1367declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16( 1368 <vscale x 1 x i16>, 1369 <vscale x 1 x i16>, 1370 i16, 1371 <vscale x 1 x i1>, 1372 iXLen, iXLen); 1373 1374define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1375; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16: 1376; CHECK: # %bb.0: # %entry 1377; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu 1378; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1379; CHECK-NEXT: ret 1380entry: 1381 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16( 1382 <vscale x 1 x i16> %0, 1383 <vscale x 1 x i16> %1, 1384 i16 %2, 1385 <vscale x 1 x i1> %3, 1386 iXLen %4, iXLen 1) 1387 1388 ret <vscale x 1 x i16> %a 1389} 1390 1391declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16( 1392 <vscale x 2 x i16>, 1393 <vscale x 2 x i16>, 1394 i16, 1395 iXLen); 1396 1397define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind { 1398; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16: 1399; CHECK: # %bb.0: # %entry 1400; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 1401; CHECK-NEXT: vsub.vx v8, v8, a0 1402; CHECK-NEXT: ret 1403entry: 1404 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16( 1405 <vscale x 2 x i16> undef, 1406 <vscale x 2 x i16> %0, 1407 i16 %1, 1408 iXLen %2) 1409 1410 ret <vscale x 2 x i16> %a 1411} 1412 1413declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16( 1414 <vscale x 2 x i16>, 1415 <vscale x 2 x i16>, 1416 i16, 1417 <vscale x 2 x i1>, 1418 iXLen, iXLen); 1419 1420define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1421; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16: 1422; CHECK: # %bb.0: # %entry 1423; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu 1424; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1425; CHECK-NEXT: ret 1426entry: 1427 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16( 1428 <vscale x 2 x i16> %0, 1429 <vscale x 2 x i16> %1, 1430 i16 %2, 1431 <vscale x 2 x i1> %3, 1432 iXLen %4, iXLen 1) 1433 1434 ret <vscale x 2 x i16> %a 1435} 1436 1437declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16( 1438 <vscale x 4 x i16>, 1439 <vscale x 4 x i16>, 1440 i16, 1441 iXLen); 1442 1443define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind { 1444; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16: 1445; CHECK: # %bb.0: # %entry 1446; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 1447; CHECK-NEXT: vsub.vx v8, v8, a0 1448; CHECK-NEXT: ret 1449entry: 1450 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16( 1451 <vscale x 4 x i16> undef, 1452 <vscale x 4 x i16> %0, 1453 i16 %1, 1454 iXLen %2) 1455 1456 ret <vscale x 4 x i16> %a 1457} 1458 1459declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16( 1460 <vscale x 4 x i16>, 1461 <vscale x 4 x i16>, 1462 i16, 1463 <vscale x 4 x i1>, 1464 iXLen, iXLen); 1465 1466define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1467; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16: 1468; CHECK: # %bb.0: # %entry 1469; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu 1470; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1471; CHECK-NEXT: ret 1472entry: 1473 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16( 1474 <vscale x 4 x i16> %0, 1475 <vscale x 4 x i16> %1, 1476 i16 %2, 1477 <vscale x 4 x i1> %3, 1478 iXLen %4, iXLen 1) 1479 1480 ret <vscale x 4 x i16> %a 1481} 1482 1483declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16( 1484 <vscale x 8 x i16>, 1485 <vscale x 8 x i16>, 1486 i16, 1487 iXLen); 1488 1489define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind { 1490; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16: 1491; CHECK: # %bb.0: # %entry 1492; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1493; CHECK-NEXT: vsub.vx v8, v8, a0 1494; CHECK-NEXT: ret 1495entry: 1496 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16( 1497 <vscale x 8 x i16> undef, 1498 <vscale x 8 x i16> %0, 1499 i16 %1, 1500 iXLen %2) 1501 1502 ret <vscale x 8 x i16> %a 1503} 1504 1505declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16( 1506 <vscale x 8 x i16>, 1507 <vscale x 8 x i16>, 1508 i16, 1509 <vscale x 8 x i1>, 1510 iXLen, iXLen); 1511 1512define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1513; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16: 1514; CHECK: # %bb.0: # %entry 1515; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu 1516; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t 1517; CHECK-NEXT: ret 1518entry: 1519 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16( 1520 <vscale x 8 x i16> %0, 1521 <vscale x 8 x i16> %1, 1522 i16 %2, 1523 <vscale x 8 x i1> %3, 1524 iXLen %4, iXLen 1) 1525 1526 ret <vscale x 8 x i16> %a 1527} 1528 1529declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16( 1530 <vscale x 16 x i16>, 1531 <vscale x 16 x i16>, 1532 i16, 1533 iXLen); 1534 1535define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind { 1536; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16: 1537; CHECK: # %bb.0: # %entry 1538; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1539; CHECK-NEXT: vsub.vx v8, v8, a0 1540; CHECK-NEXT: ret 1541entry: 1542 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16( 1543 <vscale x 16 x i16> undef, 1544 <vscale x 16 x i16> %0, 1545 i16 %1, 1546 iXLen %2) 1547 1548 ret <vscale x 16 x i16> %a 1549} 1550 1551declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16( 1552 <vscale x 16 x i16>, 1553 <vscale x 16 x i16>, 1554 i16, 1555 <vscale x 16 x i1>, 1556 iXLen, iXLen); 1557 1558define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1559; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16: 1560; CHECK: # %bb.0: # %entry 1561; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu 1562; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t 1563; CHECK-NEXT: ret 1564entry: 1565 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16( 1566 <vscale x 16 x i16> %0, 1567 <vscale x 16 x i16> %1, 1568 i16 %2, 1569 <vscale x 16 x i1> %3, 1570 iXLen %4, iXLen 1) 1571 1572 ret <vscale x 16 x i16> %a 1573} 1574 1575declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16( 1576 <vscale x 32 x i16>, 1577 <vscale x 32 x i16>, 1578 i16, 1579 iXLen); 1580 1581define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind { 1582; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16: 1583; CHECK: # %bb.0: # %entry 1584; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1585; CHECK-NEXT: vsub.vx v8, v8, a0 1586; CHECK-NEXT: ret 1587entry: 1588 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16( 1589 <vscale x 32 x i16> undef, 1590 <vscale x 32 x i16> %0, 1591 i16 %1, 1592 iXLen %2) 1593 1594 ret <vscale x 32 x i16> %a 1595} 1596 1597declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16( 1598 <vscale x 32 x i16>, 1599 <vscale x 32 x i16>, 1600 i16, 1601 <vscale x 32 x i1>, 1602 iXLen, iXLen); 1603 1604define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { 1605; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16: 1606; CHECK: # %bb.0: # %entry 1607; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu 1608; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t 1609; CHECK-NEXT: ret 1610entry: 1611 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16( 1612 <vscale x 32 x i16> %0, 1613 <vscale x 32 x i16> %1, 1614 i16 %2, 1615 <vscale x 32 x i1> %3, 1616 iXLen %4, iXLen 1) 1617 1618 ret <vscale x 32 x i16> %a 1619} 1620 1621declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32( 1622 <vscale x 1 x i32>, 1623 <vscale x 1 x i32>, 1624 i32, 1625 iXLen); 1626 1627define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind { 1628; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32: 1629; CHECK: # %bb.0: # %entry 1630; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1631; CHECK-NEXT: vsub.vx v8, v8, a0 1632; CHECK-NEXT: ret 1633entry: 1634 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32( 1635 <vscale x 1 x i32> undef, 1636 <vscale x 1 x i32> %0, 1637 i32 %1, 1638 iXLen %2) 1639 1640 ret <vscale x 1 x i32> %a 1641} 1642 1643declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32( 1644 <vscale x 1 x i32>, 1645 <vscale x 1 x i32>, 1646 i32, 1647 <vscale x 1 x i1>, 1648 iXLen, iXLen); 1649 1650define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1651; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32: 1652; CHECK: # %bb.0: # %entry 1653; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu 1654; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1655; CHECK-NEXT: ret 1656entry: 1657 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32( 1658 <vscale x 1 x i32> %0, 1659 <vscale x 1 x i32> %1, 1660 i32 %2, 1661 <vscale x 1 x i1> %3, 1662 iXLen %4, iXLen 1) 1663 1664 ret <vscale x 1 x i32> %a 1665} 1666 1667declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32( 1668 <vscale x 2 x i32>, 1669 <vscale x 2 x i32>, 1670 i32, 1671 iXLen); 1672 1673define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind { 1674; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32: 1675; CHECK: # %bb.0: # %entry 1676; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1677; CHECK-NEXT: vsub.vx v8, v8, a0 1678; CHECK-NEXT: ret 1679entry: 1680 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32( 1681 <vscale x 2 x i32> undef, 1682 <vscale x 2 x i32> %0, 1683 i32 %1, 1684 iXLen %2) 1685 1686 ret <vscale x 2 x i32> %a 1687} 1688 1689declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32( 1690 <vscale x 2 x i32>, 1691 <vscale x 2 x i32>, 1692 i32, 1693 <vscale x 2 x i1>, 1694 iXLen, iXLen); 1695 1696define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1697; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32: 1698; CHECK: # %bb.0: # %entry 1699; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu 1700; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t 1701; CHECK-NEXT: ret 1702entry: 1703 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32( 1704 <vscale x 2 x i32> %0, 1705 <vscale x 2 x i32> %1, 1706 i32 %2, 1707 <vscale x 2 x i1> %3, 1708 iXLen %4, iXLen 1) 1709 1710 ret <vscale x 2 x i32> %a 1711} 1712 1713declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32( 1714 <vscale x 4 x i32>, 1715 <vscale x 4 x i32>, 1716 i32, 1717 iXLen); 1718 1719define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind { 1720; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32: 1721; CHECK: # %bb.0: # %entry 1722; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1723; CHECK-NEXT: vsub.vx v8, v8, a0 1724; CHECK-NEXT: ret 1725entry: 1726 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32( 1727 <vscale x 4 x i32> undef, 1728 <vscale x 4 x i32> %0, 1729 i32 %1, 1730 iXLen %2) 1731 1732 ret <vscale x 4 x i32> %a 1733} 1734 1735declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32( 1736 <vscale x 4 x i32>, 1737 <vscale x 4 x i32>, 1738 i32, 1739 <vscale x 4 x i1>, 1740 iXLen, iXLen); 1741 1742define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 1743; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32: 1744; CHECK: # %bb.0: # %entry 1745; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu 1746; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t 1747; CHECK-NEXT: ret 1748entry: 1749 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32( 1750 <vscale x 4 x i32> %0, 1751 <vscale x 4 x i32> %1, 1752 i32 %2, 1753 <vscale x 4 x i1> %3, 1754 iXLen %4, iXLen 1) 1755 1756 ret <vscale x 4 x i32> %a 1757} 1758 1759declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32( 1760 <vscale x 8 x i32>, 1761 <vscale x 8 x i32>, 1762 i32, 1763 iXLen); 1764 1765define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind { 1766; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32: 1767; CHECK: # %bb.0: # %entry 1768; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1769; CHECK-NEXT: vsub.vx v8, v8, a0 1770; CHECK-NEXT: ret 1771entry: 1772 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32( 1773 <vscale x 8 x i32> undef, 1774 <vscale x 8 x i32> %0, 1775 i32 %1, 1776 iXLen %2) 1777 1778 ret <vscale x 8 x i32> %a 1779} 1780 1781declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32( 1782 <vscale x 8 x i32>, 1783 <vscale x 8 x i32>, 1784 i32, 1785 <vscale x 8 x i1>, 1786 iXLen, iXLen); 1787 1788define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 1789; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32: 1790; CHECK: # %bb.0: # %entry 1791; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu 1792; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t 1793; CHECK-NEXT: ret 1794entry: 1795 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32( 1796 <vscale x 8 x i32> %0, 1797 <vscale x 8 x i32> %1, 1798 i32 %2, 1799 <vscale x 8 x i1> %3, 1800 iXLen %4, iXLen 1) 1801 1802 ret <vscale x 8 x i32> %a 1803} 1804 1805declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32( 1806 <vscale x 16 x i32>, 1807 <vscale x 16 x i32>, 1808 i32, 1809 iXLen); 1810 1811define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind { 1812; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32: 1813; CHECK: # %bb.0: # %entry 1814; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1815; CHECK-NEXT: vsub.vx v8, v8, a0 1816; CHECK-NEXT: ret 1817entry: 1818 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32( 1819 <vscale x 16 x i32> undef, 1820 <vscale x 16 x i32> %0, 1821 i32 %1, 1822 iXLen %2) 1823 1824 ret <vscale x 16 x i32> %a 1825} 1826 1827declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32( 1828 <vscale x 16 x i32>, 1829 <vscale x 16 x i32>, 1830 i32, 1831 <vscale x 16 x i1>, 1832 iXLen, iXLen); 1833 1834define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { 1835; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32: 1836; CHECK: # %bb.0: # %entry 1837; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu 1838; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t 1839; CHECK-NEXT: ret 1840entry: 1841 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32( 1842 <vscale x 16 x i32> %0, 1843 <vscale x 16 x i32> %1, 1844 i32 %2, 1845 <vscale x 16 x i1> %3, 1846 iXLen %4, iXLen 1) 1847 1848 ret <vscale x 16 x i32> %a 1849} 1850 1851declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64( 1852 <vscale x 1 x i64>, 1853 <vscale x 1 x i64>, 1854 i64, 1855 iXLen); 1856 1857define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind { 1858; RV32-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64: 1859; RV32: # %bb.0: # %entry 1860; RV32-NEXT: addi sp, sp, -16 1861; RV32-NEXT: sw a0, 8(sp) 1862; RV32-NEXT: sw a1, 12(sp) 1863; RV32-NEXT: addi a0, sp, 8 1864; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1865; RV32-NEXT: vlse64.v v9, (a0), zero 1866; RV32-NEXT: vsub.vv v8, v8, v9 1867; RV32-NEXT: addi sp, sp, 16 1868; RV32-NEXT: ret 1869; 1870; RV64-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64: 1871; RV64: # %bb.0: # %entry 1872; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1873; RV64-NEXT: vsub.vx v8, v8, a0 1874; RV64-NEXT: ret 1875entry: 1876 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64( 1877 <vscale x 1 x i64> undef, 1878 <vscale x 1 x i64> %0, 1879 i64 %1, 1880 iXLen %2) 1881 1882 ret <vscale x 1 x i64> %a 1883} 1884 1885declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64( 1886 <vscale x 1 x i64>, 1887 <vscale x 1 x i64>, 1888 i64, 1889 <vscale x 1 x i1>, 1890 iXLen, iXLen); 1891 1892define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { 1893; RV32-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64: 1894; RV32: # %bb.0: # %entry 1895; RV32-NEXT: addi sp, sp, -16 1896; RV32-NEXT: sw a0, 8(sp) 1897; RV32-NEXT: sw a1, 12(sp) 1898; RV32-NEXT: addi a0, sp, 8 1899; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu 1900; RV32-NEXT: vlse64.v v10, (a0), zero 1901; RV32-NEXT: vsub.vv v8, v9, v10, v0.t 1902; RV32-NEXT: addi sp, sp, 16 1903; RV32-NEXT: ret 1904; 1905; RV64-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64: 1906; RV64: # %bb.0: # %entry 1907; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu 1908; RV64-NEXT: vsub.vx v8, v9, a0, v0.t 1909; RV64-NEXT: ret 1910entry: 1911 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64( 1912 <vscale x 1 x i64> %0, 1913 <vscale x 1 x i64> %1, 1914 i64 %2, 1915 <vscale x 1 x i1> %3, 1916 iXLen %4, iXLen 1) 1917 1918 ret <vscale x 1 x i64> %a 1919} 1920 1921declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64( 1922 <vscale x 2 x i64>, 1923 <vscale x 2 x i64>, 1924 i64, 1925 iXLen); 1926 1927define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind { 1928; RV32-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64: 1929; RV32: # %bb.0: # %entry 1930; RV32-NEXT: addi sp, sp, -16 1931; RV32-NEXT: sw a0, 8(sp) 1932; RV32-NEXT: sw a1, 12(sp) 1933; RV32-NEXT: addi a0, sp, 8 1934; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1935; RV32-NEXT: vlse64.v v10, (a0), zero 1936; RV32-NEXT: vsub.vv v8, v8, v10 1937; RV32-NEXT: addi sp, sp, 16 1938; RV32-NEXT: ret 1939; 1940; RV64-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64: 1941; RV64: # %bb.0: # %entry 1942; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1943; RV64-NEXT: vsub.vx v8, v8, a0 1944; RV64-NEXT: ret 1945entry: 1946 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64( 1947 <vscale x 2 x i64> undef, 1948 <vscale x 2 x i64> %0, 1949 i64 %1, 1950 iXLen %2) 1951 1952 ret <vscale x 2 x i64> %a 1953} 1954 1955declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64( 1956 <vscale x 2 x i64>, 1957 <vscale x 2 x i64>, 1958 i64, 1959 <vscale x 2 x i1>, 1960 iXLen, iXLen); 1961 1962define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { 1963; RV32-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64: 1964; RV32: # %bb.0: # %entry 1965; RV32-NEXT: addi sp, sp, -16 1966; RV32-NEXT: sw a0, 8(sp) 1967; RV32-NEXT: sw a1, 12(sp) 1968; RV32-NEXT: addi a0, sp, 8 1969; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu 1970; RV32-NEXT: vlse64.v v12, (a0), zero 1971; RV32-NEXT: vsub.vv v8, v10, v12, v0.t 1972; RV32-NEXT: addi sp, sp, 16 1973; RV32-NEXT: ret 1974; 1975; RV64-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64: 1976; RV64: # %bb.0: # %entry 1977; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu 1978; RV64-NEXT: vsub.vx v8, v10, a0, v0.t 1979; RV64-NEXT: ret 1980entry: 1981 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64( 1982 <vscale x 2 x i64> %0, 1983 <vscale x 2 x i64> %1, 1984 i64 %2, 1985 <vscale x 2 x i1> %3, 1986 iXLen %4, iXLen 1) 1987 1988 ret <vscale x 2 x i64> %a 1989} 1990 1991declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64( 1992 <vscale x 4 x i64>, 1993 <vscale x 4 x i64>, 1994 i64, 1995 iXLen); 1996 1997define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind { 1998; RV32-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64: 1999; RV32: # %bb.0: # %entry 2000; RV32-NEXT: addi sp, sp, -16 2001; RV32-NEXT: sw a0, 8(sp) 2002; RV32-NEXT: sw a1, 12(sp) 2003; RV32-NEXT: addi a0, sp, 8 2004; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 2005; RV32-NEXT: vlse64.v v12, (a0), zero 2006; RV32-NEXT: vsub.vv v8, v8, v12 2007; RV32-NEXT: addi sp, sp, 16 2008; RV32-NEXT: ret 2009; 2010; RV64-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64: 2011; RV64: # %bb.0: # %entry 2012; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 2013; RV64-NEXT: vsub.vx v8, v8, a0 2014; RV64-NEXT: ret 2015entry: 2016 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64( 2017 <vscale x 4 x i64> undef, 2018 <vscale x 4 x i64> %0, 2019 i64 %1, 2020 iXLen %2) 2021 2022 ret <vscale x 4 x i64> %a 2023} 2024 2025declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64( 2026 <vscale x 4 x i64>, 2027 <vscale x 4 x i64>, 2028 i64, 2029 <vscale x 4 x i1>, 2030 iXLen, iXLen); 2031 2032define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { 2033; RV32-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64: 2034; RV32: # %bb.0: # %entry 2035; RV32-NEXT: addi sp, sp, -16 2036; RV32-NEXT: sw a0, 8(sp) 2037; RV32-NEXT: sw a1, 12(sp) 2038; RV32-NEXT: addi a0, sp, 8 2039; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu 2040; RV32-NEXT: vlse64.v v16, (a0), zero 2041; RV32-NEXT: vsub.vv v8, v12, v16, v0.t 2042; RV32-NEXT: addi sp, sp, 16 2043; RV32-NEXT: ret 2044; 2045; RV64-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64: 2046; RV64: # %bb.0: # %entry 2047; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu 2048; RV64-NEXT: vsub.vx v8, v12, a0, v0.t 2049; RV64-NEXT: ret 2050entry: 2051 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64( 2052 <vscale x 4 x i64> %0, 2053 <vscale x 4 x i64> %1, 2054 i64 %2, 2055 <vscale x 4 x i1> %3, 2056 iXLen %4, iXLen 1) 2057 2058 ret <vscale x 4 x i64> %a 2059} 2060 2061declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64( 2062 <vscale x 8 x i64>, 2063 <vscale x 8 x i64>, 2064 i64, 2065 iXLen); 2066 2067define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind { 2068; RV32-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64: 2069; RV32: # %bb.0: # %entry 2070; RV32-NEXT: addi sp, sp, -16 2071; RV32-NEXT: sw a0, 8(sp) 2072; RV32-NEXT: sw a1, 12(sp) 2073; RV32-NEXT: addi a0, sp, 8 2074; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2075; RV32-NEXT: vlse64.v v16, (a0), zero 2076; RV32-NEXT: vsub.vv v8, v8, v16 2077; RV32-NEXT: addi sp, sp, 16 2078; RV32-NEXT: ret 2079; 2080; RV64-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64: 2081; RV64: # %bb.0: # %entry 2082; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2083; RV64-NEXT: vsub.vx v8, v8, a0 2084; RV64-NEXT: ret 2085entry: 2086 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64( 2087 <vscale x 8 x i64> undef, 2088 <vscale x 8 x i64> %0, 2089 i64 %1, 2090 iXLen %2) 2091 2092 ret <vscale x 8 x i64> %a 2093} 2094 2095declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64( 2096 <vscale x 8 x i64>, 2097 <vscale x 8 x i64>, 2098 i64, 2099 <vscale x 8 x i1>, 2100 iXLen, iXLen); 2101 2102define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { 2103; RV32-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64: 2104; RV32: # %bb.0: # %entry 2105; RV32-NEXT: addi sp, sp, -16 2106; RV32-NEXT: sw a0, 8(sp) 2107; RV32-NEXT: sw a1, 12(sp) 2108; RV32-NEXT: addi a0, sp, 8 2109; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu 2110; RV32-NEXT: vlse64.v v24, (a0), zero 2111; RV32-NEXT: vsub.vv v8, v16, v24, v0.t 2112; RV32-NEXT: addi sp, sp, 16 2113; RV32-NEXT: ret 2114; 2115; RV64-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64: 2116; RV64: # %bb.0: # %entry 2117; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu 2118; RV64-NEXT: vsub.vx v8, v16, a0, v0.t 2119; RV64-NEXT: ret 2120entry: 2121 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64( 2122 <vscale x 8 x i64> %0, 2123 <vscale x 8 x i64> %1, 2124 i64 %2, 2125 <vscale x 8 x i1> %3, 2126 iXLen %4, iXLen 1) 2127 2128 ret <vscale x 8 x i64> %a 2129} 2130 2131define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind { 2132; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8: 2133; CHECK: # %bb.0: # %entry 2134; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 2135; CHECK-NEXT: vadd.vi v8, v8, -9 2136; CHECK-NEXT: ret 2137entry: 2138 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8( 2139 <vscale x 1 x i8> undef, 2140 <vscale x 1 x i8> %0, 2141 i8 9, 2142 iXLen %1) 2143 2144 ret <vscale x 1 x i8> %a 2145} 2146 2147define <vscale x 1 x i8> @intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 2148; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8: 2149; CHECK: # %bb.0: # %entry 2150; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 2151; CHECK-NEXT: vadd.vi v8, v9, -9 2152; CHECK-NEXT: ret 2153entry: 2154 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8( 2155 <vscale x 1 x i8> %0, 2156 <vscale x 1 x i8> %1, 2157 i8 9, 2158 iXLen %2) 2159 2160 ret <vscale x 1 x i8> %a 2161} 2162 2163define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2164; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: 2165; CHECK: # %bb.0: # %entry 2166; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu 2167; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2168; CHECK-NEXT: ret 2169entry: 2170 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8( 2171 <vscale x 1 x i8> %0, 2172 <vscale x 1 x i8> %1, 2173 i8 -9, 2174 <vscale x 1 x i1> %2, 2175 iXLen %3, iXLen 1) 2176 2177 ret <vscale x 1 x i8> %a 2178} 2179 2180define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind { 2181; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8: 2182; CHECK: # %bb.0: # %entry 2183; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 2184; CHECK-NEXT: vadd.vi v8, v8, -9 2185; CHECK-NEXT: ret 2186entry: 2187 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8( 2188 <vscale x 2 x i8> undef, 2189 <vscale x 2 x i8> %0, 2190 i8 9, 2191 iXLen %1) 2192 2193 ret <vscale x 2 x i8> %a 2194} 2195 2196define <vscale x 2 x i8> @intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { 2197; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8: 2198; CHECK: # %bb.0: # %entry 2199; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma 2200; CHECK-NEXT: vadd.vi v8, v9, -9 2201; CHECK-NEXT: ret 2202entry: 2203 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8( 2204 <vscale x 2 x i8> %0, 2205 <vscale x 2 x i8> %1, 2206 i8 9, 2207 iXLen %2) 2208 2209 ret <vscale x 2 x i8> %a 2210} 2211 2212define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2213; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8: 2214; CHECK: # %bb.0: # %entry 2215; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu 2216; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2217; CHECK-NEXT: ret 2218entry: 2219 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8( 2220 <vscale x 2 x i8> %0, 2221 <vscale x 2 x i8> %1, 2222 i8 -9, 2223 <vscale x 2 x i1> %2, 2224 iXLen %3, iXLen 1) 2225 2226 ret <vscale x 2 x i8> %a 2227} 2228 2229define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind { 2230; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8: 2231; CHECK: # %bb.0: # %entry 2232; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 2233; CHECK-NEXT: vadd.vi v8, v8, -9 2234; CHECK-NEXT: ret 2235entry: 2236 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8( 2237 <vscale x 4 x i8> undef, 2238 <vscale x 4 x i8> %0, 2239 i8 9, 2240 iXLen %1) 2241 2242 ret <vscale x 4 x i8> %a 2243} 2244 2245define <vscale x 4 x i8> @intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { 2246; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8: 2247; CHECK: # %bb.0: # %entry 2248; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma 2249; CHECK-NEXT: vadd.vi v8, v9, -9 2250; CHECK-NEXT: ret 2251entry: 2252 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8( 2253 <vscale x 4 x i8> %0, 2254 <vscale x 4 x i8> %1, 2255 i8 9, 2256 iXLen %2) 2257 2258 ret <vscale x 4 x i8> %a 2259} 2260 2261define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2262; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8: 2263; CHECK: # %bb.0: # %entry 2264; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu 2265; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2266; CHECK-NEXT: ret 2267entry: 2268 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8( 2269 <vscale x 4 x i8> %0, 2270 <vscale x 4 x i8> %1, 2271 i8 -9, 2272 <vscale x 4 x i1> %2, 2273 iXLen %3, iXLen 1) 2274 2275 ret <vscale x 4 x i8> %a 2276} 2277 2278define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind { 2279; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8: 2280; CHECK: # %bb.0: # %entry 2281; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 2282; CHECK-NEXT: vadd.vi v8, v8, -9 2283; CHECK-NEXT: ret 2284entry: 2285 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8( 2286 <vscale x 8 x i8> undef, 2287 <vscale x 8 x i8> %0, 2288 i8 9, 2289 iXLen %1) 2290 2291 ret <vscale x 8 x i8> %a 2292} 2293 2294define <vscale x 8 x i8> @intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { 2295; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8: 2296; CHECK: # %bb.0: # %entry 2297; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma 2298; CHECK-NEXT: vadd.vi v8, v9, -9 2299; CHECK-NEXT: ret 2300entry: 2301 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8( 2302 <vscale x 8 x i8> %0, 2303 <vscale x 8 x i8> %1, 2304 i8 9, 2305 iXLen %2) 2306 2307 ret <vscale x 8 x i8> %a 2308} 2309 2310define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2311; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8: 2312; CHECK: # %bb.0: # %entry 2313; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu 2314; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2315; CHECK-NEXT: ret 2316entry: 2317 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8( 2318 <vscale x 8 x i8> %0, 2319 <vscale x 8 x i8> %1, 2320 i8 -9, 2321 <vscale x 8 x i1> %2, 2322 iXLen %3, iXLen 1) 2323 2324 ret <vscale x 8 x i8> %a 2325} 2326 2327define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind { 2328; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8: 2329; CHECK: # %bb.0: # %entry 2330; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 2331; CHECK-NEXT: vadd.vi v8, v8, -9 2332; CHECK-NEXT: ret 2333entry: 2334 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8( 2335 <vscale x 16 x i8> undef, 2336 <vscale x 16 x i8> %0, 2337 i8 9, 2338 iXLen %1) 2339 2340 ret <vscale x 16 x i8> %a 2341} 2342 2343define <vscale x 16 x i8> @intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { 2344; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8: 2345; CHECK: # %bb.0: # %entry 2346; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma 2347; CHECK-NEXT: vadd.vi v8, v10, -9 2348; CHECK-NEXT: ret 2349entry: 2350 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8( 2351 <vscale x 16 x i8> %0, 2352 <vscale x 16 x i8> %1, 2353 i8 9, 2354 iXLen %2) 2355 2356 ret <vscale x 16 x i8> %a 2357} 2358 2359define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 2360; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8: 2361; CHECK: # %bb.0: # %entry 2362; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu 2363; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t 2364; CHECK-NEXT: ret 2365entry: 2366 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8( 2367 <vscale x 16 x i8> %0, 2368 <vscale x 16 x i8> %1, 2369 i8 -9, 2370 <vscale x 16 x i1> %2, 2371 iXLen %3, iXLen 1) 2372 2373 ret <vscale x 16 x i8> %a 2374} 2375 2376define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind { 2377; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8: 2378; CHECK: # %bb.0: # %entry 2379; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 2380; CHECK-NEXT: vadd.vi v8, v8, -9 2381; CHECK-NEXT: ret 2382entry: 2383 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8( 2384 <vscale x 32 x i8> undef, 2385 <vscale x 32 x i8> %0, 2386 i8 9, 2387 iXLen %1) 2388 2389 ret <vscale x 32 x i8> %a 2390} 2391 2392define <vscale x 32 x i8> @intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { 2393; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8: 2394; CHECK: # %bb.0: # %entry 2395; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma 2396; CHECK-NEXT: vadd.vi v8, v12, -9 2397; CHECK-NEXT: ret 2398entry: 2399 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8( 2400 <vscale x 32 x i8> %0, 2401 <vscale x 32 x i8> %1, 2402 i8 9, 2403 iXLen %2) 2404 2405 ret <vscale x 32 x i8> %a 2406} 2407 2408define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 2409; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8: 2410; CHECK: # %bb.0: # %entry 2411; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu 2412; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t 2413; CHECK-NEXT: ret 2414entry: 2415 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8( 2416 <vscale x 32 x i8> %0, 2417 <vscale x 32 x i8> %1, 2418 i8 -9, 2419 <vscale x 32 x i1> %2, 2420 iXLen %3, iXLen 1) 2421 2422 ret <vscale x 32 x i8> %a 2423} 2424 2425define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind { 2426; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8: 2427; CHECK: # %bb.0: # %entry 2428; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 2429; CHECK-NEXT: vadd.vi v8, v8, 9 2430; CHECK-NEXT: ret 2431entry: 2432 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8( 2433 <vscale x 64 x i8> undef, 2434 <vscale x 64 x i8> %0, 2435 i8 -9, 2436 iXLen %1) 2437 2438 ret <vscale x 64 x i8> %a 2439} 2440 2441define <vscale x 64 x i8> @intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { 2442; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8: 2443; CHECK: # %bb.0: # %entry 2444; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma 2445; CHECK-NEXT: vadd.vi v8, v16, -9 2446; CHECK-NEXT: ret 2447entry: 2448 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8( 2449 <vscale x 64 x i8> %0, 2450 <vscale x 64 x i8> %1, 2451 i8 9, 2452 iXLen %2) 2453 2454 ret <vscale x 64 x i8> %a 2455} 2456 2457define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind { 2458; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8: 2459; CHECK: # %bb.0: # %entry 2460; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu 2461; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t 2462; CHECK-NEXT: ret 2463entry: 2464 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8( 2465 <vscale x 64 x i8> %0, 2466 <vscale x 64 x i8> %1, 2467 i8 -9, 2468 <vscale x 64 x i1> %2, 2469 iXLen %3, iXLen 1) 2470 2471 ret <vscale x 64 x i8> %a 2472} 2473 2474define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind { 2475; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16: 2476; CHECK: # %bb.0: # %entry 2477; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 2478; CHECK-NEXT: vadd.vi v8, v8, -9 2479; CHECK-NEXT: ret 2480entry: 2481 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16( 2482 <vscale x 1 x i16> undef, 2483 <vscale x 1 x i16> %0, 2484 i16 9, 2485 iXLen %1) 2486 2487 ret <vscale x 1 x i16> %a 2488} 2489 2490define <vscale x 1 x i16> @intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { 2491; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16: 2492; CHECK: # %bb.0: # %entry 2493; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma 2494; CHECK-NEXT: vadd.vi v8, v9, -9 2495; CHECK-NEXT: ret 2496entry: 2497 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16( 2498 <vscale x 1 x i16> %0, 2499 <vscale x 1 x i16> %1, 2500 i16 9, 2501 iXLen %2) 2502 2503 ret <vscale x 1 x i16> %a 2504} 2505 2506define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2507; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16: 2508; CHECK: # %bb.0: # %entry 2509; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu 2510; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2511; CHECK-NEXT: ret 2512entry: 2513 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16( 2514 <vscale x 1 x i16> %0, 2515 <vscale x 1 x i16> %1, 2516 i16 -9, 2517 <vscale x 1 x i1> %2, 2518 iXLen %3, iXLen 1) 2519 2520 ret <vscale x 1 x i16> %a 2521} 2522 2523define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind { 2524; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16: 2525; CHECK: # %bb.0: # %entry 2526; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 2527; CHECK-NEXT: vadd.vi v8, v8, -9 2528; CHECK-NEXT: ret 2529entry: 2530 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16( 2531 <vscale x 2 x i16> undef, 2532 <vscale x 2 x i16> %0, 2533 i16 9, 2534 iXLen %1) 2535 2536 ret <vscale x 2 x i16> %a 2537} 2538 2539define <vscale x 2 x i16> @intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { 2540; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16: 2541; CHECK: # %bb.0: # %entry 2542; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma 2543; CHECK-NEXT: vadd.vi v8, v9, -9 2544; CHECK-NEXT: ret 2545entry: 2546 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16( 2547 <vscale x 2 x i16> %0, 2548 <vscale x 2 x i16> %1, 2549 i16 9, 2550 iXLen %2) 2551 2552 ret <vscale x 2 x i16> %a 2553} 2554 2555define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2556; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16: 2557; CHECK: # %bb.0: # %entry 2558; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu 2559; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2560; CHECK-NEXT: ret 2561entry: 2562 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16( 2563 <vscale x 2 x i16> %0, 2564 <vscale x 2 x i16> %1, 2565 i16 -9, 2566 <vscale x 2 x i1> %2, 2567 iXLen %3, iXLen 1) 2568 2569 ret <vscale x 2 x i16> %a 2570} 2571 2572define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind { 2573; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16: 2574; CHECK: # %bb.0: # %entry 2575; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 2576; CHECK-NEXT: vadd.vi v8, v8, -9 2577; CHECK-NEXT: ret 2578entry: 2579 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16( 2580 <vscale x 4 x i16> undef, 2581 <vscale x 4 x i16> %0, 2582 i16 9, 2583 iXLen %1) 2584 2585 ret <vscale x 4 x i16> %a 2586} 2587 2588define <vscale x 4 x i16> @intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { 2589; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16: 2590; CHECK: # %bb.0: # %entry 2591; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma 2592; CHECK-NEXT: vadd.vi v8, v9, -9 2593; CHECK-NEXT: ret 2594entry: 2595 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16( 2596 <vscale x 4 x i16> %0, 2597 <vscale x 4 x i16> %1, 2598 i16 9, 2599 iXLen %2) 2600 2601 ret <vscale x 4 x i16> %a 2602} 2603 2604define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2605; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16: 2606; CHECK: # %bb.0: # %entry 2607; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu 2608; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2609; CHECK-NEXT: ret 2610entry: 2611 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16( 2612 <vscale x 4 x i16> %0, 2613 <vscale x 4 x i16> %1, 2614 i16 -9, 2615 <vscale x 4 x i1> %2, 2616 iXLen %3, iXLen 1) 2617 2618 ret <vscale x 4 x i16> %a 2619} 2620 2621define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind { 2622; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16: 2623; CHECK: # %bb.0: # %entry 2624; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 2625; CHECK-NEXT: vadd.vi v8, v8, -9 2626; CHECK-NEXT: ret 2627entry: 2628 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16( 2629 <vscale x 8 x i16> undef, 2630 <vscale x 8 x i16> %0, 2631 i16 9, 2632 iXLen %1) 2633 2634 ret <vscale x 8 x i16> %a 2635} 2636 2637define <vscale x 8 x i16> @intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { 2638; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16: 2639; CHECK: # %bb.0: # %entry 2640; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma 2641; CHECK-NEXT: vadd.vi v8, v10, -9 2642; CHECK-NEXT: ret 2643entry: 2644 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16( 2645 <vscale x 8 x i16> %0, 2646 <vscale x 8 x i16> %1, 2647 i16 9, 2648 iXLen %2) 2649 2650 ret <vscale x 8 x i16> %a 2651} 2652 2653define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2654; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16: 2655; CHECK: # %bb.0: # %entry 2656; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu 2657; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t 2658; CHECK-NEXT: ret 2659entry: 2660 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16( 2661 <vscale x 8 x i16> %0, 2662 <vscale x 8 x i16> %1, 2663 i16 -9, 2664 <vscale x 8 x i1> %2, 2665 iXLen %3, iXLen 1) 2666 2667 ret <vscale x 8 x i16> %a 2668} 2669 2670define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind { 2671; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16: 2672; CHECK: # %bb.0: # %entry 2673; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 2674; CHECK-NEXT: vadd.vi v8, v8, -9 2675; CHECK-NEXT: ret 2676entry: 2677 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16( 2678 <vscale x 16 x i16> undef, 2679 <vscale x 16 x i16> %0, 2680 i16 9, 2681 iXLen %1) 2682 2683 ret <vscale x 16 x i16> %a 2684} 2685 2686define <vscale x 16 x i16> @intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { 2687; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16: 2688; CHECK: # %bb.0: # %entry 2689; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma 2690; CHECK-NEXT: vadd.vi v8, v12, -9 2691; CHECK-NEXT: ret 2692entry: 2693 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16( 2694 <vscale x 16 x i16> %0, 2695 <vscale x 16 x i16> %1, 2696 i16 9, 2697 iXLen %2) 2698 2699 ret <vscale x 16 x i16> %a 2700} 2701 2702define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 2703; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16: 2704; CHECK: # %bb.0: # %entry 2705; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu 2706; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t 2707; CHECK-NEXT: ret 2708entry: 2709 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16( 2710 <vscale x 16 x i16> %0, 2711 <vscale x 16 x i16> %1, 2712 i16 -9, 2713 <vscale x 16 x i1> %2, 2714 iXLen %3, iXLen 1) 2715 2716 ret <vscale x 16 x i16> %a 2717} 2718 2719define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind { 2720; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16: 2721; CHECK: # %bb.0: # %entry 2722; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 2723; CHECK-NEXT: vadd.vi v8, v8, -9 2724; CHECK-NEXT: ret 2725entry: 2726 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16( 2727 <vscale x 32 x i16> undef, 2728 <vscale x 32 x i16> %0, 2729 i16 9, 2730 iXLen %1) 2731 2732 ret <vscale x 32 x i16> %a 2733} 2734 2735define <vscale x 32 x i16> @intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { 2736; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16: 2737; CHECK: # %bb.0: # %entry 2738; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma 2739; CHECK-NEXT: vadd.vi v8, v16, -9 2740; CHECK-NEXT: ret 2741entry: 2742 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16( 2743 <vscale x 32 x i16> %0, 2744 <vscale x 32 x i16> %1, 2745 i16 9, 2746 iXLen %2) 2747 2748 ret <vscale x 32 x i16> %a 2749} 2750 2751define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 2752; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16: 2753; CHECK: # %bb.0: # %entry 2754; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu 2755; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t 2756; CHECK-NEXT: ret 2757entry: 2758 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16( 2759 <vscale x 32 x i16> %0, 2760 <vscale x 32 x i16> %1, 2761 i16 -9, 2762 <vscale x 32 x i1> %2, 2763 iXLen %3, iXLen 1) 2764 2765 ret <vscale x 32 x i16> %a 2766} 2767 2768define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind { 2769; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32: 2770; CHECK: # %bb.0: # %entry 2771; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 2772; CHECK-NEXT: vadd.vi v8, v8, -9 2773; CHECK-NEXT: ret 2774entry: 2775 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32( 2776 <vscale x 1 x i32> undef, 2777 <vscale x 1 x i32> %0, 2778 i32 9, 2779 iXLen %1) 2780 2781 ret <vscale x 1 x i32> %a 2782} 2783 2784define <vscale x 1 x i32> @intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { 2785; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32: 2786; CHECK: # %bb.0: # %entry 2787; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma 2788; CHECK-NEXT: vadd.vi v8, v9, -9 2789; CHECK-NEXT: ret 2790entry: 2791 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32( 2792 <vscale x 1 x i32> %0, 2793 <vscale x 1 x i32> %1, 2794 i32 9, 2795 iXLen %2) 2796 2797 ret <vscale x 1 x i32> %a 2798} 2799 2800define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 2801; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32: 2802; CHECK: # %bb.0: # %entry 2803; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu 2804; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2805; CHECK-NEXT: ret 2806entry: 2807 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32( 2808 <vscale x 1 x i32> %0, 2809 <vscale x 1 x i32> %1, 2810 i32 -9, 2811 <vscale x 1 x i1> %2, 2812 iXLen %3, iXLen 1) 2813 2814 ret <vscale x 1 x i32> %a 2815} 2816 2817define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind { 2818; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32: 2819; CHECK: # %bb.0: # %entry 2820; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 2821; CHECK-NEXT: vadd.vi v8, v8, -9 2822; CHECK-NEXT: ret 2823entry: 2824 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32( 2825 <vscale x 2 x i32> undef, 2826 <vscale x 2 x i32> %0, 2827 i32 9, 2828 iXLen %1) 2829 2830 ret <vscale x 2 x i32> %a 2831} 2832 2833define <vscale x 2 x i32> @intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { 2834; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32: 2835; CHECK: # %bb.0: # %entry 2836; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma 2837; CHECK-NEXT: vadd.vi v8, v9, -9 2838; CHECK-NEXT: ret 2839entry: 2840 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32( 2841 <vscale x 2 x i32> %0, 2842 <vscale x 2 x i32> %1, 2843 i32 9, 2844 iXLen %2) 2845 2846 ret <vscale x 2 x i32> %a 2847} 2848 2849define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 2850; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32: 2851; CHECK: # %bb.0: # %entry 2852; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu 2853; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t 2854; CHECK-NEXT: ret 2855entry: 2856 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32( 2857 <vscale x 2 x i32> %0, 2858 <vscale x 2 x i32> %1, 2859 i32 -9, 2860 <vscale x 2 x i1> %2, 2861 iXLen %3, iXLen 1) 2862 2863 ret <vscale x 2 x i32> %a 2864} 2865 2866define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind { 2867; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32: 2868; CHECK: # %bb.0: # %entry 2869; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 2870; CHECK-NEXT: vadd.vi v8, v8, -9 2871; CHECK-NEXT: ret 2872entry: 2873 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32( 2874 <vscale x 4 x i32> undef, 2875 <vscale x 4 x i32> %0, 2876 i32 9, 2877 iXLen %1) 2878 2879 ret <vscale x 4 x i32> %a 2880} 2881 2882define <vscale x 4 x i32> @intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { 2883; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32: 2884; CHECK: # %bb.0: # %entry 2885; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma 2886; CHECK-NEXT: vadd.vi v8, v10, -9 2887; CHECK-NEXT: ret 2888entry: 2889 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32( 2890 <vscale x 4 x i32> %0, 2891 <vscale x 4 x i32> %1, 2892 i32 9, 2893 iXLen %2) 2894 2895 ret <vscale x 4 x i32> %a 2896} 2897 2898define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 2899; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32: 2900; CHECK: # %bb.0: # %entry 2901; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu 2902; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t 2903; CHECK-NEXT: ret 2904entry: 2905 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32( 2906 <vscale x 4 x i32> %0, 2907 <vscale x 4 x i32> %1, 2908 i32 -9, 2909 <vscale x 4 x i1> %2, 2910 iXLen %3, iXLen 1) 2911 2912 ret <vscale x 4 x i32> %a 2913} 2914 2915define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind { 2916; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32: 2917; CHECK: # %bb.0: # %entry 2918; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 2919; CHECK-NEXT: vadd.vi v8, v8, -9 2920; CHECK-NEXT: ret 2921entry: 2922 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32( 2923 <vscale x 8 x i32> undef, 2924 <vscale x 8 x i32> %0, 2925 i32 9, 2926 iXLen %1) 2927 2928 ret <vscale x 8 x i32> %a 2929} 2930 2931define <vscale x 8 x i32> @intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { 2932; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32: 2933; CHECK: # %bb.0: # %entry 2934; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma 2935; CHECK-NEXT: vadd.vi v8, v12, -9 2936; CHECK-NEXT: ret 2937entry: 2938 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32( 2939 <vscale x 8 x i32> %0, 2940 <vscale x 8 x i32> %1, 2941 i32 9, 2942 iXLen %2) 2943 2944 ret <vscale x 8 x i32> %a 2945} 2946 2947define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 2948; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32: 2949; CHECK: # %bb.0: # %entry 2950; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu 2951; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t 2952; CHECK-NEXT: ret 2953entry: 2954 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32( 2955 <vscale x 8 x i32> %0, 2956 <vscale x 8 x i32> %1, 2957 i32 -9, 2958 <vscale x 8 x i1> %2, 2959 iXLen %3, iXLen 1) 2960 2961 ret <vscale x 8 x i32> %a 2962} 2963 2964define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind { 2965; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32: 2966; CHECK: # %bb.0: # %entry 2967; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 2968; CHECK-NEXT: vadd.vi v8, v8, -9 2969; CHECK-NEXT: ret 2970entry: 2971 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32( 2972 <vscale x 16 x i32> undef, 2973 <vscale x 16 x i32> %0, 2974 i32 9, 2975 iXLen %1) 2976 2977 ret <vscale x 16 x i32> %a 2978} 2979 2980define <vscale x 16 x i32> @intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { 2981; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32: 2982; CHECK: # %bb.0: # %entry 2983; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma 2984; CHECK-NEXT: vadd.vi v8, v16, -9 2985; CHECK-NEXT: ret 2986entry: 2987 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32( 2988 <vscale x 16 x i32> %0, 2989 <vscale x 16 x i32> %1, 2990 i32 9, 2991 iXLen %2) 2992 2993 ret <vscale x 16 x i32> %a 2994} 2995 2996define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 2997; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32: 2998; CHECK: # %bb.0: # %entry 2999; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu 3000; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t 3001; CHECK-NEXT: ret 3002entry: 3003 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32( 3004 <vscale x 16 x i32> %0, 3005 <vscale x 16 x i32> %1, 3006 i32 -9, 3007 <vscale x 16 x i1> %2, 3008 iXLen %3, iXLen 1) 3009 3010 ret <vscale x 16 x i32> %a 3011} 3012 3013define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind { 3014; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64: 3015; CHECK: # %bb.0: # %entry 3016; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 3017; CHECK-NEXT: vadd.vi v8, v8, -9 3018; CHECK-NEXT: ret 3019entry: 3020 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64( 3021 <vscale x 1 x i64> undef, 3022 <vscale x 1 x i64> %0, 3023 i64 9, 3024 iXLen %1) 3025 3026 ret <vscale x 1 x i64> %a 3027} 3028 3029define <vscale x 1 x i64> @intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind { 3030; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64: 3031; CHECK: # %bb.0: # %entry 3032; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma 3033; CHECK-NEXT: vadd.vi v8, v9, -9 3034; CHECK-NEXT: ret 3035entry: 3036 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64( 3037 <vscale x 1 x i64> %0, 3038 <vscale x 1 x i64> %1, 3039 i64 9, 3040 iXLen %2) 3041 3042 ret <vscale x 1 x i64> %a 3043} 3044 3045define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 3046; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64: 3047; CHECK: # %bb.0: # %entry 3048; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu 3049; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t 3050; CHECK-NEXT: ret 3051entry: 3052 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64( 3053 <vscale x 1 x i64> %0, 3054 <vscale x 1 x i64> %1, 3055 i64 9, 3056 <vscale x 1 x i1> %2, 3057 iXLen %3, iXLen 1) 3058 3059 ret <vscale x 1 x i64> %a 3060} 3061 3062define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind { 3063; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64: 3064; CHECK: # %bb.0: # %entry 3065; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 3066; CHECK-NEXT: vadd.vi v8, v8, -9 3067; CHECK-NEXT: ret 3068entry: 3069 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64( 3070 <vscale x 2 x i64> undef, 3071 <vscale x 2 x i64> %0, 3072 i64 9, 3073 iXLen %1) 3074 3075 ret <vscale x 2 x i64> %a 3076} 3077 3078define <vscale x 2 x i64> @intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind { 3079; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64: 3080; CHECK: # %bb.0: # %entry 3081; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma 3082; CHECK-NEXT: vadd.vi v8, v10, -9 3083; CHECK-NEXT: ret 3084entry: 3085 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64( 3086 <vscale x 2 x i64> %0, 3087 <vscale x 2 x i64> %1, 3088 i64 9, 3089 iXLen %2) 3090 3091 ret <vscale x 2 x i64> %a 3092} 3093 3094define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 3095; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64: 3096; CHECK: # %bb.0: # %entry 3097; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu 3098; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t 3099; CHECK-NEXT: ret 3100entry: 3101 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64( 3102 <vscale x 2 x i64> %0, 3103 <vscale x 2 x i64> %1, 3104 i64 9, 3105 <vscale x 2 x i1> %2, 3106 iXLen %3, iXLen 1) 3107 3108 ret <vscale x 2 x i64> %a 3109} 3110 3111define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind { 3112; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64: 3113; CHECK: # %bb.0: # %entry 3114; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 3115; CHECK-NEXT: vadd.vi v8, v8, -9 3116; CHECK-NEXT: ret 3117entry: 3118 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64( 3119 <vscale x 4 x i64> undef, 3120 <vscale x 4 x i64> %0, 3121 i64 9, 3122 iXLen %1) 3123 3124 ret <vscale x 4 x i64> %a 3125} 3126 3127define <vscale x 4 x i64> @intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind { 3128; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64: 3129; CHECK: # %bb.0: # %entry 3130; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma 3131; CHECK-NEXT: vadd.vi v8, v12, -9 3132; CHECK-NEXT: ret 3133entry: 3134 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64( 3135 <vscale x 4 x i64> %0, 3136 <vscale x 4 x i64> %1, 3137 i64 9, 3138 iXLen %2) 3139 3140 ret <vscale x 4 x i64> %a 3141} 3142 3143define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 3144; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64: 3145; CHECK: # %bb.0: # %entry 3146; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu 3147; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t 3148; CHECK-NEXT: ret 3149entry: 3150 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64( 3151 <vscale x 4 x i64> %0, 3152 <vscale x 4 x i64> %1, 3153 i64 9, 3154 <vscale x 4 x i1> %2, 3155 iXLen %3, iXLen 1) 3156 3157 ret <vscale x 4 x i64> %a 3158} 3159 3160define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind { 3161; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64: 3162; CHECK: # %bb.0: # %entry 3163; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 3164; CHECK-NEXT: vadd.vi v8, v8, -9 3165; CHECK-NEXT: ret 3166entry: 3167 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64( 3168 <vscale x 8 x i64> undef, 3169 <vscale x 8 x i64> %0, 3170 i64 9, 3171 iXLen %1) 3172 3173 ret <vscale x 8 x i64> %a 3174} 3175 3176define <vscale x 8 x i64> @intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind { 3177; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64: 3178; CHECK: # %bb.0: # %entry 3179; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma 3180; CHECK-NEXT: vadd.vi v8, v16, -9 3181; CHECK-NEXT: ret 3182entry: 3183 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64( 3184 <vscale x 8 x i64> %0, 3185 <vscale x 8 x i64> %1, 3186 i64 9, 3187 iXLen %2) 3188 3189 ret <vscale x 8 x i64> %a 3190} 3191 3192define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 3193; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64: 3194; CHECK: # %bb.0: # %entry 3195; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu 3196; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t 3197; CHECK-NEXT: ret 3198entry: 3199 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64( 3200 <vscale x 8 x i64> %0, 3201 <vscale x 8 x i64> %1, 3202 i64 9, 3203 <vscale x 8 x i1> %2, 3204 iXLen %3, iXLen 1) 3205 3206 ret <vscale x 8 x i64> %a 3207} 3208