1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 <vscale x 1 x i8>, 11 <vscale x 1 x i1>, 12 iXLen); 13 14define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 15; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: 16; CHECK: # %bb.0: # %entry 17; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 18; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 19; CHECK-NEXT: ret 20entry: 21 %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8( 22 <vscale x 1 x i8> undef, 23 <vscale x 1 x i8> %0, 24 <vscale x 1 x i8> %1, 25 <vscale x 1 x i1> %2, 26 iXLen %3) 27 28 ret <vscale x 1 x i8> %a 29} 30 31declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8( 32 <vscale x 2 x i8>, 33 <vscale x 2 x i8>, 34 <vscale x 2 x i8>, 35 <vscale x 2 x i1>, 36 iXLen); 37 38define <vscale x 2 x i8> @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 39; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: 40; CHECK: # %bb.0: # %entry 41; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 42; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 43; CHECK-NEXT: ret 44entry: 45 %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8( 46 <vscale x 2 x i8> undef, 47 <vscale x 2 x i8> %0, 48 <vscale x 2 x i8> %1, 49 <vscale x 2 x i1> %2, 50 iXLen %3) 51 52 ret <vscale x 2 x i8> %a 53} 54 55declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8( 56 <vscale x 4 x i8>, 57 <vscale x 4 x i8>, 58 <vscale x 4 x i8>, 59 <vscale x 4 x i1>, 60 iXLen); 61 62define <vscale x 4 x i8> @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 63; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: 64; CHECK: # %bb.0: # %entry 65; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 66; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 67; CHECK-NEXT: ret 68entry: 69 %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8( 70 <vscale x 4 x i8> undef, 71 <vscale x 4 x i8> %0, 72 <vscale x 4 x i8> %1, 73 <vscale x 4 x i1> %2, 74 iXLen %3) 75 76 ret <vscale x 4 x i8> %a 77} 78 79declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8( 80 <vscale x 8 x i8>, 81 <vscale x 8 x i8>, 82 <vscale x 8 x i8>, 83 <vscale x 8 x i1>, 84 iXLen); 85 86define <vscale x 8 x i8> @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 87; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: 88; CHECK: # %bb.0: # %entry 89; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 90; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 91; CHECK-NEXT: ret 92entry: 93 %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8( 94 <vscale x 8 x i8> undef, 95 <vscale x 8 x i8> %0, 96 <vscale x 8 x i8> %1, 97 <vscale x 8 x i1> %2, 98 iXLen %3) 99 100 ret <vscale x 8 x i8> %a 101} 102 103declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8( 104 <vscale x 16 x i8>, 105 <vscale x 16 x i8>, 106 <vscale x 16 x i8>, 107 <vscale x 16 x i1>, 108 iXLen); 109 110define <vscale x 16 x i8> @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 111; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: 112; CHECK: # %bb.0: # %entry 113; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 114; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 115; CHECK-NEXT: ret 116entry: 117 %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8( 118 <vscale x 16 x i8> undef, 119 <vscale x 16 x i8> %0, 120 <vscale x 16 x i8> %1, 121 <vscale x 16 x i1> %2, 122 iXLen %3) 123 124 ret <vscale x 16 x i8> %a 125} 126 127declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8( 128 <vscale x 32 x i8>, 129 <vscale x 32 x i8>, 130 <vscale x 32 x i8>, 131 <vscale x 32 x i1>, 132 iXLen); 133 134define <vscale x 32 x i8> @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 135; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: 136; CHECK: # %bb.0: # %entry 137; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 138; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 139; CHECK-NEXT: ret 140entry: 141 %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8( 142 <vscale x 32 x i8> undef, 143 <vscale x 32 x i8> %0, 144 <vscale x 32 x i8> %1, 145 <vscale x 32 x i1> %2, 146 iXLen %3) 147 148 ret <vscale x 32 x i8> %a 149} 150 151declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8( 152 <vscale x 64 x i8>, 153 <vscale x 64 x i8>, 154 <vscale x 64 x i8>, 155 <vscale x 64 x i1>, 156 iXLen); 157 158define <vscale x 64 x i8> @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind { 159; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: 160; CHECK: # %bb.0: # %entry 161; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 162; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 163; CHECK-NEXT: ret 164entry: 165 %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8( 166 <vscale x 64 x i8> undef, 167 <vscale x 64 x i8> %0, 168 <vscale x 64 x i8> %1, 169 <vscale x 64 x i1> %2, 170 iXLen %3) 171 172 ret <vscale x 64 x i8> %a 173} 174 175declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16( 176 <vscale x 1 x i16>, 177 <vscale x 1 x i16>, 178 <vscale x 1 x i16>, 179 <vscale x 1 x i1>, 180 iXLen); 181 182define <vscale x 1 x i16> @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 183; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: 184; CHECK: # %bb.0: # %entry 185; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 186; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 187; CHECK-NEXT: ret 188entry: 189 %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16( 190 <vscale x 1 x i16> undef, 191 <vscale x 1 x i16> %0, 192 <vscale x 1 x i16> %1, 193 <vscale x 1 x i1> %2, 194 iXLen %3) 195 196 ret <vscale x 1 x i16> %a 197} 198 199declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16( 200 <vscale x 2 x i16>, 201 <vscale x 2 x i16>, 202 <vscale x 2 x i16>, 203 <vscale x 2 x i1>, 204 iXLen); 205 206define <vscale x 2 x i16> @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 207; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: 208; CHECK: # %bb.0: # %entry 209; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 210; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 211; CHECK-NEXT: ret 212entry: 213 %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16( 214 <vscale x 2 x i16> undef, 215 <vscale x 2 x i16> %0, 216 <vscale x 2 x i16> %1, 217 <vscale x 2 x i1> %2, 218 iXLen %3) 219 220 ret <vscale x 2 x i16> %a 221} 222 223declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16( 224 <vscale x 4 x i16>, 225 <vscale x 4 x i16>, 226 <vscale x 4 x i16>, 227 <vscale x 4 x i1>, 228 iXLen); 229 230define <vscale x 4 x i16> @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 231; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: 232; CHECK: # %bb.0: # %entry 233; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 234; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 235; CHECK-NEXT: ret 236entry: 237 %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16( 238 <vscale x 4 x i16> undef, 239 <vscale x 4 x i16> %0, 240 <vscale x 4 x i16> %1, 241 <vscale x 4 x i1> %2, 242 iXLen %3) 243 244 ret <vscale x 4 x i16> %a 245} 246 247declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16( 248 <vscale x 8 x i16>, 249 <vscale x 8 x i16>, 250 <vscale x 8 x i16>, 251 <vscale x 8 x i1>, 252 iXLen); 253 254define <vscale x 8 x i16> @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 255; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: 256; CHECK: # %bb.0: # %entry 257; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 258; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 259; CHECK-NEXT: ret 260entry: 261 %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16( 262 <vscale x 8 x i16> undef, 263 <vscale x 8 x i16> %0, 264 <vscale x 8 x i16> %1, 265 <vscale x 8 x i1> %2, 266 iXLen %3) 267 268 ret <vscale x 8 x i16> %a 269} 270 271declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16( 272 <vscale x 16 x i16>, 273 <vscale x 16 x i16>, 274 <vscale x 16 x i16>, 275 <vscale x 16 x i1>, 276 iXLen); 277 278define <vscale x 16 x i16> @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 279; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: 280; CHECK: # %bb.0: # %entry 281; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 282; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 283; CHECK-NEXT: ret 284entry: 285 %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16( 286 <vscale x 16 x i16> undef, 287 <vscale x 16 x i16> %0, 288 <vscale x 16 x i16> %1, 289 <vscale x 16 x i1> %2, 290 iXLen %3) 291 292 ret <vscale x 16 x i16> %a 293} 294 295declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16( 296 <vscale x 32 x i16>, 297 <vscale x 32 x i16>, 298 <vscale x 32 x i16>, 299 <vscale x 32 x i1>, 300 iXLen); 301 302define <vscale x 32 x i16> @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 303; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: 304; CHECK: # %bb.0: # %entry 305; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 306; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 307; CHECK-NEXT: ret 308entry: 309 %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16( 310 <vscale x 32 x i16> undef, 311 <vscale x 32 x i16> %0, 312 <vscale x 32 x i16> %1, 313 <vscale x 32 x i1> %2, 314 iXLen %3) 315 316 ret <vscale x 32 x i16> %a 317} 318 319declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32( 320 <vscale x 1 x i32>, 321 <vscale x 1 x i32>, 322 <vscale x 1 x i32>, 323 <vscale x 1 x i1>, 324 iXLen); 325 326define <vscale x 1 x i32> @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 327; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: 328; CHECK: # %bb.0: # %entry 329; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 330; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 331; CHECK-NEXT: ret 332entry: 333 %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32( 334 <vscale x 1 x i32> undef, 335 <vscale x 1 x i32> %0, 336 <vscale x 1 x i32> %1, 337 <vscale x 1 x i1> %2, 338 iXLen %3) 339 340 ret <vscale x 1 x i32> %a 341} 342 343declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32( 344 <vscale x 2 x i32>, 345 <vscale x 2 x i32>, 346 <vscale x 2 x i32>, 347 <vscale x 2 x i1>, 348 iXLen); 349 350define <vscale x 2 x i32> @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 351; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: 352; CHECK: # %bb.0: # %entry 353; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 354; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 355; CHECK-NEXT: ret 356entry: 357 %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32( 358 <vscale x 2 x i32> undef, 359 <vscale x 2 x i32> %0, 360 <vscale x 2 x i32> %1, 361 <vscale x 2 x i1> %2, 362 iXLen %3) 363 364 ret <vscale x 2 x i32> %a 365} 366 367declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32( 368 <vscale x 4 x i32>, 369 <vscale x 4 x i32>, 370 <vscale x 4 x i32>, 371 <vscale x 4 x i1>, 372 iXLen); 373 374define <vscale x 4 x i32> @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 375; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: 376; CHECK: # %bb.0: # %entry 377; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 378; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 379; CHECK-NEXT: ret 380entry: 381 %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32( 382 <vscale x 4 x i32> undef, 383 <vscale x 4 x i32> %0, 384 <vscale x 4 x i32> %1, 385 <vscale x 4 x i1> %2, 386 iXLen %3) 387 388 ret <vscale x 4 x i32> %a 389} 390 391declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32( 392 <vscale x 8 x i32>, 393 <vscale x 8 x i32>, 394 <vscale x 8 x i32>, 395 <vscale x 8 x i1>, 396 iXLen); 397 398define <vscale x 8 x i32> @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 399; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: 400; CHECK: # %bb.0: # %entry 401; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 402; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 403; CHECK-NEXT: ret 404entry: 405 %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32( 406 <vscale x 8 x i32> undef, 407 <vscale x 8 x i32> %0, 408 <vscale x 8 x i32> %1, 409 <vscale x 8 x i1> %2, 410 iXLen %3) 411 412 ret <vscale x 8 x i32> %a 413} 414 415declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32( 416 <vscale x 16 x i32>, 417 <vscale x 16 x i32>, 418 <vscale x 16 x i32>, 419 <vscale x 16 x i1>, 420 iXLen); 421 422define <vscale x 16 x i32> @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 423; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: 424; CHECK: # %bb.0: # %entry 425; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 426; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 427; CHECK-NEXT: ret 428entry: 429 %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32( 430 <vscale x 16 x i32> undef, 431 <vscale x 16 x i32> %0, 432 <vscale x 16 x i32> %1, 433 <vscale x 16 x i1> %2, 434 iXLen %3) 435 436 ret <vscale x 16 x i32> %a 437} 438 439declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64( 440 <vscale x 1 x i64>, 441 <vscale x 1 x i64>, 442 <vscale x 1 x i64>, 443 <vscale x 1 x i1>, 444 iXLen); 445 446define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 447; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: 448; CHECK: # %bb.0: # %entry 449; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 450; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 451; CHECK-NEXT: ret 452entry: 453 %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64( 454 <vscale x 1 x i64> undef, 455 <vscale x 1 x i64> %0, 456 <vscale x 1 x i64> %1, 457 <vscale x 1 x i1> %2, 458 iXLen %3) 459 460 ret <vscale x 1 x i64> %a 461} 462 463declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64( 464 <vscale x 2 x i64>, 465 <vscale x 2 x i64>, 466 <vscale x 2 x i64>, 467 <vscale x 2 x i1>, 468 iXLen); 469 470define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 471; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: 472; CHECK: # %bb.0: # %entry 473; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 474; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 475; CHECK-NEXT: ret 476entry: 477 %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64( 478 <vscale x 2 x i64> undef, 479 <vscale x 2 x i64> %0, 480 <vscale x 2 x i64> %1, 481 <vscale x 2 x i1> %2, 482 iXLen %3) 483 484 ret <vscale x 2 x i64> %a 485} 486 487declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64( 488 <vscale x 4 x i64>, 489 <vscale x 4 x i64>, 490 <vscale x 4 x i64>, 491 <vscale x 4 x i1>, 492 iXLen); 493 494define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 495; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: 496; CHECK: # %bb.0: # %entry 497; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 498; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 499; CHECK-NEXT: ret 500entry: 501 %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64( 502 <vscale x 4 x i64> undef, 503 <vscale x 4 x i64> %0, 504 <vscale x 4 x i64> %1, 505 <vscale x 4 x i1> %2, 506 iXLen %3) 507 508 ret <vscale x 4 x i64> %a 509} 510 511declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64( 512 <vscale x 8 x i64>, 513 <vscale x 8 x i64>, 514 <vscale x 8 x i64>, 515 <vscale x 8 x i1>, 516 iXLen); 517 518define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 519; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: 520; CHECK: # %bb.0: # %entry 521; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 522; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 523; CHECK-NEXT: ret 524entry: 525 %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64( 526 <vscale x 8 x i64> undef, 527 <vscale x 8 x i64> %0, 528 <vscale x 8 x i64> %1, 529 <vscale x 8 x i1> %2, 530 iXLen %3) 531 532 ret <vscale x 8 x i64> %a 533} 534 535declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8( 536 <vscale x 1 x i8>, 537 <vscale x 1 x i8>, 538 i8, 539 <vscale x 1 x i1>, 540 iXLen); 541 542define <vscale x 1 x i8> @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 543; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: 544; CHECK: # %bb.0: # %entry 545; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 546; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 547; CHECK-NEXT: ret 548entry: 549 %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8( 550 <vscale x 1 x i8> undef, 551 <vscale x 1 x i8> %0, 552 i8 %1, 553 <vscale x 1 x i1> %2, 554 iXLen %3) 555 556 ret <vscale x 1 x i8> %a 557} 558 559declare <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8( 560 <vscale x 2 x i8>, 561 <vscale x 2 x i8>, 562 i8, 563 <vscale x 2 x i1>, 564 iXLen); 565 566define <vscale x 2 x i8> @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 567; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: 568; CHECK: # %bb.0: # %entry 569; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 570; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 571; CHECK-NEXT: ret 572entry: 573 %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8( 574 <vscale x 2 x i8> undef, 575 <vscale x 2 x i8> %0, 576 i8 %1, 577 <vscale x 2 x i1> %2, 578 iXLen %3) 579 580 ret <vscale x 2 x i8> %a 581} 582 583declare <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8( 584 <vscale x 4 x i8>, 585 <vscale x 4 x i8>, 586 i8, 587 <vscale x 4 x i1>, 588 iXLen); 589 590define <vscale x 4 x i8> @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 591; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: 592; CHECK: # %bb.0: # %entry 593; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 594; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 595; CHECK-NEXT: ret 596entry: 597 %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8( 598 <vscale x 4 x i8> undef, 599 <vscale x 4 x i8> %0, 600 i8 %1, 601 <vscale x 4 x i1> %2, 602 iXLen %3) 603 604 ret <vscale x 4 x i8> %a 605} 606 607declare <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8( 608 <vscale x 8 x i8>, 609 <vscale x 8 x i8>, 610 i8, 611 <vscale x 8 x i1>, 612 iXLen); 613 614define <vscale x 8 x i8> @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 615; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: 616; CHECK: # %bb.0: # %entry 617; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 618; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 619; CHECK-NEXT: ret 620entry: 621 %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8( 622 <vscale x 8 x i8> undef, 623 <vscale x 8 x i8> %0, 624 i8 %1, 625 <vscale x 8 x i1> %2, 626 iXLen %3) 627 628 ret <vscale x 8 x i8> %a 629} 630 631declare <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8( 632 <vscale x 16 x i8>, 633 <vscale x 16 x i8>, 634 i8, 635 <vscale x 16 x i1>, 636 iXLen); 637 638define <vscale x 16 x i8> @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 639; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: 640; CHECK: # %bb.0: # %entry 641; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 642; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 643; CHECK-NEXT: ret 644entry: 645 %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8( 646 <vscale x 16 x i8> undef, 647 <vscale x 16 x i8> %0, 648 i8 %1, 649 <vscale x 16 x i1> %2, 650 iXLen %3) 651 652 ret <vscale x 16 x i8> %a 653} 654 655declare <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8( 656 <vscale x 32 x i8>, 657 <vscale x 32 x i8>, 658 i8, 659 <vscale x 32 x i1>, 660 iXLen); 661 662define <vscale x 32 x i8> @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 663; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: 664; CHECK: # %bb.0: # %entry 665; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 666; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 667; CHECK-NEXT: ret 668entry: 669 %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8( 670 <vscale x 32 x i8> undef, 671 <vscale x 32 x i8> %0, 672 i8 %1, 673 <vscale x 32 x i1> %2, 674 iXLen %3) 675 676 ret <vscale x 32 x i8> %a 677} 678 679declare <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8( 680 <vscale x 64 x i8>, 681 <vscale x 64 x i8>, 682 i8, 683 <vscale x 64 x i1>, 684 iXLen); 685 686define <vscale x 64 x i8> @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind { 687; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: 688; CHECK: # %bb.0: # %entry 689; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 690; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 691; CHECK-NEXT: ret 692entry: 693 %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8( 694 <vscale x 64 x i8> undef, 695 <vscale x 64 x i8> %0, 696 i8 %1, 697 <vscale x 64 x i1> %2, 698 iXLen %3) 699 700 ret <vscale x 64 x i8> %a 701} 702 703declare <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16( 704 <vscale x 1 x i16>, 705 <vscale x 1 x i16>, 706 i16, 707 <vscale x 1 x i1>, 708 iXLen); 709 710define <vscale x 1 x i16> @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 711; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: 712; CHECK: # %bb.0: # %entry 713; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 714; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 715; CHECK-NEXT: ret 716entry: 717 %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16( 718 <vscale x 1 x i16> undef, 719 <vscale x 1 x i16> %0, 720 i16 %1, 721 <vscale x 1 x i1> %2, 722 iXLen %3) 723 724 ret <vscale x 1 x i16> %a 725} 726 727declare <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16( 728 <vscale x 2 x i16>, 729 <vscale x 2 x i16>, 730 i16, 731 <vscale x 2 x i1>, 732 iXLen); 733 734define <vscale x 2 x i16> @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 735; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: 736; CHECK: # %bb.0: # %entry 737; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 738; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 739; CHECK-NEXT: ret 740entry: 741 %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16( 742 <vscale x 2 x i16> undef, 743 <vscale x 2 x i16> %0, 744 i16 %1, 745 <vscale x 2 x i1> %2, 746 iXLen %3) 747 748 ret <vscale x 2 x i16> %a 749} 750 751declare <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16( 752 <vscale x 4 x i16>, 753 <vscale x 4 x i16>, 754 i16, 755 <vscale x 4 x i1>, 756 iXLen); 757 758define <vscale x 4 x i16> @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 759; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: 760; CHECK: # %bb.0: # %entry 761; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 762; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 763; CHECK-NEXT: ret 764entry: 765 %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16( 766 <vscale x 4 x i16> undef, 767 <vscale x 4 x i16> %0, 768 i16 %1, 769 <vscale x 4 x i1> %2, 770 iXLen %3) 771 772 ret <vscale x 4 x i16> %a 773} 774 775declare <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16( 776 <vscale x 8 x i16>, 777 <vscale x 8 x i16>, 778 i16, 779 <vscale x 8 x i1>, 780 iXLen); 781 782define <vscale x 8 x i16> @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 783; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: 784; CHECK: # %bb.0: # %entry 785; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 786; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 787; CHECK-NEXT: ret 788entry: 789 %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16( 790 <vscale x 8 x i16> undef, 791 <vscale x 8 x i16> %0, 792 i16 %1, 793 <vscale x 8 x i1> %2, 794 iXLen %3) 795 796 ret <vscale x 8 x i16> %a 797} 798 799declare <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16( 800 <vscale x 16 x i16>, 801 <vscale x 16 x i16>, 802 i16, 803 <vscale x 16 x i1>, 804 iXLen); 805 806define <vscale x 16 x i16> @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 807; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: 808; CHECK: # %bb.0: # %entry 809; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 810; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 811; CHECK-NEXT: ret 812entry: 813 %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16( 814 <vscale x 16 x i16> undef, 815 <vscale x 16 x i16> %0, 816 i16 %1, 817 <vscale x 16 x i1> %2, 818 iXLen %3) 819 820 ret <vscale x 16 x i16> %a 821} 822 823declare <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16( 824 <vscale x 32 x i16>, 825 <vscale x 32 x i16>, 826 i16, 827 <vscale x 32 x i1>, 828 iXLen); 829 830define <vscale x 32 x i16> @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind { 831; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: 832; CHECK: # %bb.0: # %entry 833; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 834; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 835; CHECK-NEXT: ret 836entry: 837 %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16( 838 <vscale x 32 x i16> undef, 839 <vscale x 32 x i16> %0, 840 i16 %1, 841 <vscale x 32 x i1> %2, 842 iXLen %3) 843 844 ret <vscale x 32 x i16> %a 845} 846 847declare <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32( 848 <vscale x 1 x i32>, 849 <vscale x 1 x i32>, 850 i32, 851 <vscale x 1 x i1>, 852 iXLen); 853 854define <vscale x 1 x i32> @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 855; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: 856; CHECK: # %bb.0: # %entry 857; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 858; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 859; CHECK-NEXT: ret 860entry: 861 %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32( 862 <vscale x 1 x i32> undef, 863 <vscale x 1 x i32> %0, 864 i32 %1, 865 <vscale x 1 x i1> %2, 866 iXLen %3) 867 868 ret <vscale x 1 x i32> %a 869} 870 871declare <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32( 872 <vscale x 2 x i32>, 873 <vscale x 2 x i32>, 874 i32, 875 <vscale x 2 x i1>, 876 iXLen); 877 878define <vscale x 2 x i32> @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 879; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: 880; CHECK: # %bb.0: # %entry 881; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 882; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 883; CHECK-NEXT: ret 884entry: 885 %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32( 886 <vscale x 2 x i32> undef, 887 <vscale x 2 x i32> %0, 888 i32 %1, 889 <vscale x 2 x i1> %2, 890 iXLen %3) 891 892 ret <vscale x 2 x i32> %a 893} 894 895declare <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32( 896 <vscale x 4 x i32>, 897 <vscale x 4 x i32>, 898 i32, 899 <vscale x 4 x i1>, 900 iXLen); 901 902define <vscale x 4 x i32> @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 903; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: 904; CHECK: # %bb.0: # %entry 905; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 906; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 907; CHECK-NEXT: ret 908entry: 909 %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32( 910 <vscale x 4 x i32> undef, 911 <vscale x 4 x i32> %0, 912 i32 %1, 913 <vscale x 4 x i1> %2, 914 iXLen %3) 915 916 ret <vscale x 4 x i32> %a 917} 918 919declare <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32( 920 <vscale x 8 x i32>, 921 <vscale x 8 x i32>, 922 i32, 923 <vscale x 8 x i1>, 924 iXLen); 925 926define <vscale x 8 x i32> @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 927; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: 928; CHECK: # %bb.0: # %entry 929; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 930; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 931; CHECK-NEXT: ret 932entry: 933 %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32( 934 <vscale x 8 x i32> undef, 935 <vscale x 8 x i32> %0, 936 i32 %1, 937 <vscale x 8 x i1> %2, 938 iXLen %3) 939 940 ret <vscale x 8 x i32> %a 941} 942 943declare <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32( 944 <vscale x 16 x i32>, 945 <vscale x 16 x i32>, 946 i32, 947 <vscale x 16 x i1>, 948 iXLen); 949 950define <vscale x 16 x i32> @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind { 951; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: 952; CHECK: # %bb.0: # %entry 953; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 954; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 955; CHECK-NEXT: ret 956entry: 957 %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32( 958 <vscale x 16 x i32> undef, 959 <vscale x 16 x i32> %0, 960 i32 %1, 961 <vscale x 16 x i1> %2, 962 iXLen %3) 963 964 ret <vscale x 16 x i32> %a 965} 966 967declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64( 968 <vscale x 1 x i64>, 969 <vscale x 1 x i64>, 970 i64, 971 <vscale x 1 x i1>, 972 iXLen); 973 974define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind { 975; RV32-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: 976; RV32: # %bb.0: # %entry 977; RV32-NEXT: addi sp, sp, -16 978; RV32-NEXT: sw a0, 8(sp) 979; RV32-NEXT: sw a1, 12(sp) 980; RV32-NEXT: addi a0, sp, 8 981; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 982; RV32-NEXT: vlse64.v v9, (a0), zero 983; RV32-NEXT: vsbc.vvm v8, v8, v9, v0 984; RV32-NEXT: addi sp, sp, 16 985; RV32-NEXT: ret 986; 987; RV64-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: 988; RV64: # %bb.0: # %entry 989; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 990; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 991; RV64-NEXT: ret 992entry: 993 %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64( 994 <vscale x 1 x i64> undef, 995 <vscale x 1 x i64> %0, 996 i64 %1, 997 <vscale x 1 x i1> %2, 998 iXLen %3) 999 1000 ret <vscale x 1 x i64> %a 1001} 1002 1003declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64( 1004 <vscale x 2 x i64>, 1005 <vscale x 2 x i64>, 1006 i64, 1007 <vscale x 2 x i1>, 1008 iXLen); 1009 1010define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind { 1011; RV32-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: 1012; RV32: # %bb.0: # %entry 1013; RV32-NEXT: addi sp, sp, -16 1014; RV32-NEXT: sw a0, 8(sp) 1015; RV32-NEXT: sw a1, 12(sp) 1016; RV32-NEXT: addi a0, sp, 8 1017; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1018; RV32-NEXT: vlse64.v v10, (a0), zero 1019; RV32-NEXT: vsbc.vvm v8, v8, v10, v0 1020; RV32-NEXT: addi sp, sp, 16 1021; RV32-NEXT: ret 1022; 1023; RV64-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: 1024; RV64: # %bb.0: # %entry 1025; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1026; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 1027; RV64-NEXT: ret 1028entry: 1029 %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64( 1030 <vscale x 2 x i64> undef, 1031 <vscale x 2 x i64> %0, 1032 i64 %1, 1033 <vscale x 2 x i1> %2, 1034 iXLen %3) 1035 1036 ret <vscale x 2 x i64> %a 1037} 1038 1039declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64( 1040 <vscale x 4 x i64>, 1041 <vscale x 4 x i64>, 1042 i64, 1043 <vscale x 4 x i1>, 1044 iXLen); 1045 1046define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind { 1047; RV32-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: 1048; RV32: # %bb.0: # %entry 1049; RV32-NEXT: addi sp, sp, -16 1050; RV32-NEXT: sw a0, 8(sp) 1051; RV32-NEXT: sw a1, 12(sp) 1052; RV32-NEXT: addi a0, sp, 8 1053; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1054; RV32-NEXT: vlse64.v v12, (a0), zero 1055; RV32-NEXT: vsbc.vvm v8, v8, v12, v0 1056; RV32-NEXT: addi sp, sp, 16 1057; RV32-NEXT: ret 1058; 1059; RV64-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: 1060; RV64: # %bb.0: # %entry 1061; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1062; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 1063; RV64-NEXT: ret 1064entry: 1065 %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64( 1066 <vscale x 4 x i64> undef, 1067 <vscale x 4 x i64> %0, 1068 i64 %1, 1069 <vscale x 4 x i1> %2, 1070 iXLen %3) 1071 1072 ret <vscale x 4 x i64> %a 1073} 1074 1075declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64( 1076 <vscale x 8 x i64>, 1077 <vscale x 8 x i64>, 1078 i64, 1079 <vscale x 8 x i1>, 1080 iXLen); 1081 1082define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind { 1083; RV32-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: 1084; RV32: # %bb.0: # %entry 1085; RV32-NEXT: addi sp, sp, -16 1086; RV32-NEXT: sw a0, 8(sp) 1087; RV32-NEXT: sw a1, 12(sp) 1088; RV32-NEXT: addi a0, sp, 8 1089; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1090; RV32-NEXT: vlse64.v v16, (a0), zero 1091; RV32-NEXT: vsbc.vvm v8, v8, v16, v0 1092; RV32-NEXT: addi sp, sp, 16 1093; RV32-NEXT: ret 1094; 1095; RV64-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: 1096; RV64: # %bb.0: # %entry 1097; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1098; RV64-NEXT: vsbc.vxm v8, v8, a0, v0 1099; RV64-NEXT: ret 1100entry: 1101 %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64( 1102 <vscale x 8 x i64> undef, 1103 <vscale x 8 x i64> %0, 1104 i64 %1, 1105 <vscale x 8 x i1> %2, 1106 iXLen %3) 1107 1108 ret <vscale x 8 x i64> %a 1109} 1110