1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 4 5define <vscale x 1 x i8> @vadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) { 6; CHECK-LABEL: vadd_vx_nxv1i8: 7; CHECK: # %bb.0: 8; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma 9; CHECK-NEXT: vadd.vx v8, v8, a0 10; CHECK-NEXT: ret 11 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0 12 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer 13 %vc = add <vscale x 1 x i8> %va, %splat 14 ret <vscale x 1 x i8> %vc 15} 16 17define <vscale x 1 x i8> @vadd_vx_nxv1i8_0(<vscale x 1 x i8> %va) { 18; CHECK-LABEL: vadd_vx_nxv1i8_0: 19; CHECK: # %bb.0: 20; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 21; CHECK-NEXT: vadd.vi v8, v8, -1 22; CHECK-NEXT: ret 23 %vc = add <vscale x 1 x i8> %va, splat (i8 -1) 24 ret <vscale x 1 x i8> %vc 25} 26 27define <vscale x 1 x i8> @vadd_vx_nxv1i8_1(<vscale x 1 x i8> %va) { 28; CHECK-LABEL: vadd_vx_nxv1i8_1: 29; CHECK: # %bb.0: 30; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 31; CHECK-NEXT: vadd.vi v8, v8, 2 32; CHECK-NEXT: ret 33 %vc = add <vscale x 1 x i8> %va, splat (i8 2) 34 ret <vscale x 1 x i8> %vc 35} 36 37; Test constant adds to see if we can optimize them away for scalable vectors. 38define <vscale x 1 x i8> @vadd_ii_nxv1i8_1() { 39; CHECK-LABEL: vadd_ii_nxv1i8_1: 40; CHECK: # %bb.0: 41; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma 42; CHECK-NEXT: vmv.v.i v8, 5 43; CHECK-NEXT: ret 44 %vc = add <vscale x 1 x i8> splat (i8 2), splat (i8 3) 45 ret <vscale x 1 x i8> %vc 46} 47 48define <vscale x 2 x i8> @vadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) { 49; CHECK-LABEL: vadd_vx_nxv2i8: 50; CHECK: # %bb.0: 51; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma 52; CHECK-NEXT: vadd.vx v8, v8, a0 53; CHECK-NEXT: ret 54 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0 55 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer 56 %vc = add <vscale x 2 x i8> %va, %splat 57 ret <vscale x 2 x i8> %vc 58} 59 60define <vscale x 2 x i8> @vadd_vx_nxv2i8_0(<vscale x 2 x i8> %va) { 61; CHECK-LABEL: vadd_vx_nxv2i8_0: 62; CHECK: # %bb.0: 63; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 64; CHECK-NEXT: vadd.vi v8, v8, -1 65; CHECK-NEXT: ret 66 %vc = add <vscale x 2 x i8> %va, splat (i8 -1) 67 ret <vscale x 2 x i8> %vc 68} 69 70define <vscale x 2 x i8> @vadd_vx_nxv2i8_1(<vscale x 2 x i8> %va) { 71; CHECK-LABEL: vadd_vx_nxv2i8_1: 72; CHECK: # %bb.0: 73; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma 74; CHECK-NEXT: vadd.vi v8, v8, 2 75; CHECK-NEXT: ret 76 %vc = add <vscale x 2 x i8> %va, splat (i8 2) 77 ret <vscale x 2 x i8> %vc 78} 79 80define <vscale x 4 x i8> @vadd_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) { 81; CHECK-LABEL: vadd_vx_nxv4i8: 82; CHECK: # %bb.0: 83; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma 84; CHECK-NEXT: vadd.vx v8, v8, a0 85; CHECK-NEXT: ret 86 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0 87 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer 88 %vc = add <vscale x 4 x i8> %va, %splat 89 ret <vscale x 4 x i8> %vc 90} 91 92define <vscale x 4 x i8> @vadd_vx_nxv4i8_0(<vscale x 4 x i8> %va) { 93; CHECK-LABEL: vadd_vx_nxv4i8_0: 94; CHECK: # %bb.0: 95; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 96; CHECK-NEXT: vadd.vi v8, v8, -1 97; CHECK-NEXT: ret 98 %vc = add <vscale x 4 x i8> %va, splat (i8 -1) 99 ret <vscale x 4 x i8> %vc 100} 101 102define <vscale x 4 x i8> @vadd_vx_nxv4i8_1(<vscale x 4 x i8> %va) { 103; CHECK-LABEL: vadd_vx_nxv4i8_1: 104; CHECK: # %bb.0: 105; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma 106; CHECK-NEXT: vadd.vi v8, v8, 2 107; CHECK-NEXT: ret 108 %vc = add <vscale x 4 x i8> %va, splat (i8 2) 109 ret <vscale x 4 x i8> %vc 110} 111 112define <vscale x 8 x i8> @vadd_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) { 113; CHECK-LABEL: vadd_vx_nxv8i8: 114; CHECK: # %bb.0: 115; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma 116; CHECK-NEXT: vadd.vx v8, v8, a0 117; CHECK-NEXT: ret 118 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0 119 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer 120 %vc = add <vscale x 8 x i8> %va, %splat 121 ret <vscale x 8 x i8> %vc 122} 123 124define <vscale x 8 x i8> @vadd_vx_nxv8i8_0(<vscale x 8 x i8> %va) { 125; CHECK-LABEL: vadd_vx_nxv8i8_0: 126; CHECK: # %bb.0: 127; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 128; CHECK-NEXT: vadd.vi v8, v8, -1 129; CHECK-NEXT: ret 130 %vc = add <vscale x 8 x i8> %va, splat (i8 -1) 131 ret <vscale x 8 x i8> %vc 132} 133 134define <vscale x 8 x i8> @vadd_vx_nxv8i8_1(<vscale x 8 x i8> %va) { 135; CHECK-LABEL: vadd_vx_nxv8i8_1: 136; CHECK: # %bb.0: 137; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma 138; CHECK-NEXT: vadd.vi v8, v8, 2 139; CHECK-NEXT: ret 140 %vc = add <vscale x 8 x i8> %va, splat (i8 2) 141 ret <vscale x 8 x i8> %vc 142} 143 144define <vscale x 16 x i8> @vadd_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) { 145; CHECK-LABEL: vadd_vx_nxv16i8: 146; CHECK: # %bb.0: 147; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma 148; CHECK-NEXT: vadd.vx v8, v8, a0 149; CHECK-NEXT: ret 150 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0 151 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer 152 %vc = add <vscale x 16 x i8> %va, %splat 153 ret <vscale x 16 x i8> %vc 154} 155 156define <vscale x 16 x i8> @vadd_vx_nxv16i8_0(<vscale x 16 x i8> %va) { 157; CHECK-LABEL: vadd_vx_nxv16i8_0: 158; CHECK: # %bb.0: 159; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 160; CHECK-NEXT: vadd.vi v8, v8, -1 161; CHECK-NEXT: ret 162 %vc = add <vscale x 16 x i8> %va, splat (i8 -1) 163 ret <vscale x 16 x i8> %vc 164} 165 166define <vscale x 16 x i8> @vadd_vx_nxv16i8_1(<vscale x 16 x i8> %va) { 167; CHECK-LABEL: vadd_vx_nxv16i8_1: 168; CHECK: # %bb.0: 169; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma 170; CHECK-NEXT: vadd.vi v8, v8, 2 171; CHECK-NEXT: ret 172 %vc = add <vscale x 16 x i8> %va, splat (i8 2) 173 ret <vscale x 16 x i8> %vc 174} 175 176define <vscale x 32 x i8> @vadd_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) { 177; CHECK-LABEL: vadd_vx_nxv32i8: 178; CHECK: # %bb.0: 179; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma 180; CHECK-NEXT: vadd.vx v8, v8, a0 181; CHECK-NEXT: ret 182 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0 183 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer 184 %vc = add <vscale x 32 x i8> %va, %splat 185 ret <vscale x 32 x i8> %vc 186} 187 188define <vscale x 32 x i8> @vadd_vx_nxv32i8_0(<vscale x 32 x i8> %va) { 189; CHECK-LABEL: vadd_vx_nxv32i8_0: 190; CHECK: # %bb.0: 191; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 192; CHECK-NEXT: vadd.vi v8, v8, -1 193; CHECK-NEXT: ret 194 %vc = add <vscale x 32 x i8> %va, splat (i8 -1) 195 ret <vscale x 32 x i8> %vc 196} 197 198define <vscale x 32 x i8> @vadd_vx_nxv32i8_1(<vscale x 32 x i8> %va) { 199; CHECK-LABEL: vadd_vx_nxv32i8_1: 200; CHECK: # %bb.0: 201; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma 202; CHECK-NEXT: vadd.vi v8, v8, 2 203; CHECK-NEXT: ret 204 %vc = add <vscale x 32 x i8> %va, splat (i8 2) 205 ret <vscale x 32 x i8> %vc 206} 207 208define <vscale x 64 x i8> @vadd_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) { 209; CHECK-LABEL: vadd_vx_nxv64i8: 210; CHECK: # %bb.0: 211; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma 212; CHECK-NEXT: vadd.vx v8, v8, a0 213; CHECK-NEXT: ret 214 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0 215 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer 216 %vc = add <vscale x 64 x i8> %va, %splat 217 ret <vscale x 64 x i8> %vc 218} 219 220define <vscale x 64 x i8> @vadd_vx_nxv64i8_0(<vscale x 64 x i8> %va) { 221; CHECK-LABEL: vadd_vx_nxv64i8_0: 222; CHECK: # %bb.0: 223; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma 224; CHECK-NEXT: vadd.vi v8, v8, -1 225; CHECK-NEXT: ret 226 %vc = add <vscale x 64 x i8> %va, splat (i8 -1) 227 ret <vscale x 64 x i8> %vc 228} 229 230define <vscale x 64 x i8> @vadd_vx_nxv64i8_1(<vscale x 64 x i8> %va) { 231; CHECK-LABEL: vadd_vx_nxv64i8_1: 232; CHECK: # %bb.0: 233; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma 234; CHECK-NEXT: vadd.vi v8, v8, 2 235; CHECK-NEXT: ret 236 %vc = add <vscale x 64 x i8> %va, splat (i8 2) 237 ret <vscale x 64 x i8> %vc 238} 239 240define <vscale x 1 x i16> @vadd_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) { 241; CHECK-LABEL: vadd_vx_nxv1i16: 242; CHECK: # %bb.0: 243; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma 244; CHECK-NEXT: vadd.vx v8, v8, a0 245; CHECK-NEXT: ret 246 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 247 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 248 %vc = add <vscale x 1 x i16> %va, %splat 249 ret <vscale x 1 x i16> %vc 250} 251 252define <vscale x 1 x i16> @vadd_vx_nxv1i16_0(<vscale x 1 x i16> %va) { 253; CHECK-LABEL: vadd_vx_nxv1i16_0: 254; CHECK: # %bb.0: 255; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 256; CHECK-NEXT: vadd.vi v8, v8, -1 257; CHECK-NEXT: ret 258 %vc = add <vscale x 1 x i16> %va, splat (i16 -1) 259 ret <vscale x 1 x i16> %vc 260} 261 262define <vscale x 1 x i16> @vadd_vx_nxv1i16_1(<vscale x 1 x i16> %va) { 263; CHECK-LABEL: vadd_vx_nxv1i16_1: 264; CHECK: # %bb.0: 265; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma 266; CHECK-NEXT: vadd.vi v8, v8, 2 267; CHECK-NEXT: ret 268 %vc = add <vscale x 1 x i16> %va, splat (i16 2) 269 ret <vscale x 1 x i16> %vc 270} 271 272define <vscale x 2 x i16> @vadd_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) { 273; CHECK-LABEL: vadd_vx_nxv2i16: 274; CHECK: # %bb.0: 275; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma 276; CHECK-NEXT: vadd.vx v8, v8, a0 277; CHECK-NEXT: ret 278 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0 279 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer 280 %vc = add <vscale x 2 x i16> %va, %splat 281 ret <vscale x 2 x i16> %vc 282} 283 284define <vscale x 2 x i16> @vadd_vx_nxv2i16_0(<vscale x 2 x i16> %va) { 285; CHECK-LABEL: vadd_vx_nxv2i16_0: 286; CHECK: # %bb.0: 287; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 288; CHECK-NEXT: vadd.vi v8, v8, -1 289; CHECK-NEXT: ret 290 %vc = add <vscale x 2 x i16> %va, splat (i16 -1) 291 ret <vscale x 2 x i16> %vc 292} 293 294define <vscale x 2 x i16> @vadd_vx_nxv2i16_1(<vscale x 2 x i16> %va) { 295; CHECK-LABEL: vadd_vx_nxv2i16_1: 296; CHECK: # %bb.0: 297; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma 298; CHECK-NEXT: vadd.vi v8, v8, 2 299; CHECK-NEXT: ret 300 %vc = add <vscale x 2 x i16> %va, splat (i16 2) 301 ret <vscale x 2 x i16> %vc 302} 303 304define <vscale x 4 x i16> @vadd_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) { 305; CHECK-LABEL: vadd_vx_nxv4i16: 306; CHECK: # %bb.0: 307; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma 308; CHECK-NEXT: vadd.vx v8, v8, a0 309; CHECK-NEXT: ret 310 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0 311 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer 312 %vc = add <vscale x 4 x i16> %va, %splat 313 ret <vscale x 4 x i16> %vc 314} 315 316define <vscale x 4 x i16> @vadd_vx_nxv4i16_0(<vscale x 4 x i16> %va) { 317; CHECK-LABEL: vadd_vx_nxv4i16_0: 318; CHECK: # %bb.0: 319; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 320; CHECK-NEXT: vadd.vi v8, v8, -1 321; CHECK-NEXT: ret 322 %vc = add <vscale x 4 x i16> %va, splat (i16 -1) 323 ret <vscale x 4 x i16> %vc 324} 325 326define <vscale x 4 x i16> @vadd_vx_nxv4i16_1(<vscale x 4 x i16> %va) { 327; CHECK-LABEL: vadd_vx_nxv4i16_1: 328; CHECK: # %bb.0: 329; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma 330; CHECK-NEXT: vadd.vi v8, v8, 2 331; CHECK-NEXT: ret 332 %vc = add <vscale x 4 x i16> %va, splat (i16 2) 333 ret <vscale x 4 x i16> %vc 334} 335 336define <vscale x 8 x i16> @vadd_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) { 337; CHECK-LABEL: vadd_vx_nxv8i16: 338; CHECK: # %bb.0: 339; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma 340; CHECK-NEXT: vadd.vx v8, v8, a0 341; CHECK-NEXT: ret 342 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0 343 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 344 %vc = add <vscale x 8 x i16> %va, %splat 345 ret <vscale x 8 x i16> %vc 346} 347 348define <vscale x 8 x i16> @vadd_vx_nxv8i16_0(<vscale x 8 x i16> %va) { 349; CHECK-LABEL: vadd_vx_nxv8i16_0: 350; CHECK: # %bb.0: 351; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 352; CHECK-NEXT: vadd.vi v8, v8, -1 353; CHECK-NEXT: ret 354 %vc = add <vscale x 8 x i16> %va, splat (i16 -1) 355 ret <vscale x 8 x i16> %vc 356} 357 358define <vscale x 8 x i16> @vadd_vx_nxv8i16_1(<vscale x 8 x i16> %va) { 359; CHECK-LABEL: vadd_vx_nxv8i16_1: 360; CHECK: # %bb.0: 361; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma 362; CHECK-NEXT: vadd.vi v8, v8, 2 363; CHECK-NEXT: ret 364 %vc = add <vscale x 8 x i16> %va, splat (i16 2) 365 ret <vscale x 8 x i16> %vc 366} 367 368define <vscale x 16 x i16> @vadd_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) { 369; CHECK-LABEL: vadd_vx_nxv16i16: 370; CHECK: # %bb.0: 371; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma 372; CHECK-NEXT: vadd.vx v8, v8, a0 373; CHECK-NEXT: ret 374 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0 375 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer 376 %vc = add <vscale x 16 x i16> %va, %splat 377 ret <vscale x 16 x i16> %vc 378} 379 380define <vscale x 16 x i16> @vadd_vx_nxv16i16_0(<vscale x 16 x i16> %va) { 381; CHECK-LABEL: vadd_vx_nxv16i16_0: 382; CHECK: # %bb.0: 383; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 384; CHECK-NEXT: vadd.vi v8, v8, -1 385; CHECK-NEXT: ret 386 %vc = add <vscale x 16 x i16> %va, splat (i16 -1) 387 ret <vscale x 16 x i16> %vc 388} 389 390define <vscale x 16 x i16> @vadd_vx_nxv16i16_1(<vscale x 16 x i16> %va) { 391; CHECK-LABEL: vadd_vx_nxv16i16_1: 392; CHECK: # %bb.0: 393; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma 394; CHECK-NEXT: vadd.vi v8, v8, 2 395; CHECK-NEXT: ret 396 %vc = add <vscale x 16 x i16> %va, splat (i16 2) 397 ret <vscale x 16 x i16> %vc 398} 399 400define <vscale x 32 x i16> @vadd_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) { 401; CHECK-LABEL: vadd_vx_nxv32i16: 402; CHECK: # %bb.0: 403; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma 404; CHECK-NEXT: vadd.vx v8, v8, a0 405; CHECK-NEXT: ret 406 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0 407 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer 408 %vc = add <vscale x 32 x i16> %va, %splat 409 ret <vscale x 32 x i16> %vc 410} 411 412define <vscale x 32 x i16> @vadd_vx_nxv32i16_0(<vscale x 32 x i16> %va) { 413; CHECK-LABEL: vadd_vx_nxv32i16_0: 414; CHECK: # %bb.0: 415; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma 416; CHECK-NEXT: vadd.vi v8, v8, -1 417; CHECK-NEXT: ret 418 %vc = add <vscale x 32 x i16> %va, splat (i16 -1) 419 ret <vscale x 32 x i16> %vc 420} 421 422define <vscale x 32 x i16> @vadd_vx_nxv32i16_1(<vscale x 32 x i16> %va) { 423; CHECK-LABEL: vadd_vx_nxv32i16_1: 424; CHECK: # %bb.0: 425; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma 426; CHECK-NEXT: vadd.vi v8, v8, 2 427; CHECK-NEXT: ret 428 %vc = add <vscale x 32 x i16> %va, splat (i16 2) 429 ret <vscale x 32 x i16> %vc 430} 431 432define <vscale x 1 x i32> @vadd_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) { 433; CHECK-LABEL: vadd_vx_nxv1i32: 434; CHECK: # %bb.0: 435; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma 436; CHECK-NEXT: vadd.vx v8, v8, a0 437; CHECK-NEXT: ret 438 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0 439 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer 440 %vc = add <vscale x 1 x i32> %va, %splat 441 ret <vscale x 1 x i32> %vc 442} 443 444define <vscale x 1 x i32> @vadd_vx_nxv1i32_0(<vscale x 1 x i32> %va) { 445; CHECK-LABEL: vadd_vx_nxv1i32_0: 446; CHECK: # %bb.0: 447; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 448; CHECK-NEXT: vadd.vi v8, v8, -1 449; CHECK-NEXT: ret 450 %vc = add <vscale x 1 x i32> %va, splat (i32 -1) 451 ret <vscale x 1 x i32> %vc 452} 453 454define <vscale x 1 x i32> @vadd_vx_nxv1i32_1(<vscale x 1 x i32> %va) { 455; CHECK-LABEL: vadd_vx_nxv1i32_1: 456; CHECK: # %bb.0: 457; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma 458; CHECK-NEXT: vadd.vi v8, v8, 2 459; CHECK-NEXT: ret 460 %vc = add <vscale x 1 x i32> %va, splat (i32 2) 461 ret <vscale x 1 x i32> %vc 462} 463 464define <vscale x 2 x i32> @vadd_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) { 465; CHECK-LABEL: vadd_vx_nxv2i32: 466; CHECK: # %bb.0: 467; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma 468; CHECK-NEXT: vadd.vx v8, v8, a0 469; CHECK-NEXT: ret 470 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0 471 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 472 %vc = add <vscale x 2 x i32> %va, %splat 473 ret <vscale x 2 x i32> %vc 474} 475 476define <vscale x 2 x i32> @vadd_vx_nxv2i32_0(<vscale x 2 x i32> %va) { 477; CHECK-LABEL: vadd_vx_nxv2i32_0: 478; CHECK: # %bb.0: 479; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 480; CHECK-NEXT: vadd.vi v8, v8, -1 481; CHECK-NEXT: ret 482 %vc = add <vscale x 2 x i32> %va, splat (i32 -1) 483 ret <vscale x 2 x i32> %vc 484} 485 486define <vscale x 2 x i32> @vadd_vx_nxv2i32_1(<vscale x 2 x i32> %va) { 487; CHECK-LABEL: vadd_vx_nxv2i32_1: 488; CHECK: # %bb.0: 489; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma 490; CHECK-NEXT: vadd.vi v8, v8, 2 491; CHECK-NEXT: ret 492 %vc = add <vscale x 2 x i32> %va, splat (i32 2) 493 ret <vscale x 2 x i32> %vc 494} 495 496define <vscale x 4 x i32> @vadd_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) { 497; CHECK-LABEL: vadd_vx_nxv4i32: 498; CHECK: # %bb.0: 499; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma 500; CHECK-NEXT: vadd.vx v8, v8, a0 501; CHECK-NEXT: ret 502 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0 503 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 504 %vc = add <vscale x 4 x i32> %va, %splat 505 ret <vscale x 4 x i32> %vc 506} 507 508define <vscale x 4 x i32> @vadd_vx_nxv4i32_0(<vscale x 4 x i32> %va) { 509; CHECK-LABEL: vadd_vx_nxv4i32_0: 510; CHECK: # %bb.0: 511; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 512; CHECK-NEXT: vadd.vi v8, v8, -1 513; CHECK-NEXT: ret 514 %vc = add <vscale x 4 x i32> %va, splat (i32 -1) 515 ret <vscale x 4 x i32> %vc 516} 517 518define <vscale x 4 x i32> @vadd_vx_nxv4i32_1(<vscale x 4 x i32> %va) { 519; CHECK-LABEL: vadd_vx_nxv4i32_1: 520; CHECK: # %bb.0: 521; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma 522; CHECK-NEXT: vadd.vi v8, v8, 2 523; CHECK-NEXT: ret 524 %vc = add <vscale x 4 x i32> %va, splat (i32 2) 525 ret <vscale x 4 x i32> %vc 526} 527 528define <vscale x 8 x i32> @vadd_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) { 529; CHECK-LABEL: vadd_vx_nxv8i32: 530; CHECK: # %bb.0: 531; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma 532; CHECK-NEXT: vadd.vx v8, v8, a0 533; CHECK-NEXT: ret 534 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0 535 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 536 %vc = add <vscale x 8 x i32> %va, %splat 537 ret <vscale x 8 x i32> %vc 538} 539 540define <vscale x 8 x i32> @vadd_vx_nxv8i32_0(<vscale x 8 x i32> %va) { 541; CHECK-LABEL: vadd_vx_nxv8i32_0: 542; CHECK: # %bb.0: 543; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 544; CHECK-NEXT: vadd.vi v8, v8, -1 545; CHECK-NEXT: ret 546 %vc = add <vscale x 8 x i32> %va, splat (i32 -1) 547 ret <vscale x 8 x i32> %vc 548} 549 550define <vscale x 8 x i32> @vadd_vx_nxv8i32_1(<vscale x 8 x i32> %va) { 551; CHECK-LABEL: vadd_vx_nxv8i32_1: 552; CHECK: # %bb.0: 553; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 554; CHECK-NEXT: vadd.vi v8, v8, 2 555; CHECK-NEXT: ret 556 %vc = add <vscale x 8 x i32> %va, splat (i32 2) 557 ret <vscale x 8 x i32> %vc 558} 559 560define <vscale x 16 x i32> @vadd_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) { 561; CHECK-LABEL: vadd_vx_nxv16i32: 562; CHECK: # %bb.0: 563; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma 564; CHECK-NEXT: vadd.vx v8, v8, a0 565; CHECK-NEXT: ret 566 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0 567 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer 568 %vc = add <vscale x 16 x i32> %va, %splat 569 ret <vscale x 16 x i32> %vc 570} 571 572define <vscale x 16 x i32> @vadd_vx_nxv16i32_0(<vscale x 16 x i32> %va) { 573; CHECK-LABEL: vadd_vx_nxv16i32_0: 574; CHECK: # %bb.0: 575; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma 576; CHECK-NEXT: vadd.vi v8, v8, -1 577; CHECK-NEXT: ret 578 %vc = add <vscale x 16 x i32> %va, splat (i32 -1) 579 ret <vscale x 16 x i32> %vc 580} 581 582define <vscale x 16 x i32> @vadd_vx_nxv16i32_1(<vscale x 16 x i32> %va) { 583; CHECK-LABEL: vadd_vx_nxv16i32_1: 584; CHECK: # %bb.0: 585; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma 586; CHECK-NEXT: vadd.vi v8, v8, 2 587; CHECK-NEXT: ret 588 %vc = add <vscale x 16 x i32> %va, splat (i32 2) 589 ret <vscale x 16 x i32> %vc 590} 591 592define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) { 593; RV32-LABEL: vadd_vx_nxv1i64: 594; RV32: # %bb.0: 595; RV32-NEXT: addi sp, sp, -16 596; RV32-NEXT: .cfi_def_cfa_offset 16 597; RV32-NEXT: sw a0, 8(sp) 598; RV32-NEXT: sw a1, 12(sp) 599; RV32-NEXT: addi a0, sp, 8 600; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma 601; RV32-NEXT: vlse64.v v9, (a0), zero 602; RV32-NEXT: vadd.vv v8, v8, v9 603; RV32-NEXT: addi sp, sp, 16 604; RV32-NEXT: .cfi_def_cfa_offset 0 605; RV32-NEXT: ret 606; 607; RV64-LABEL: vadd_vx_nxv1i64: 608; RV64: # %bb.0: 609; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma 610; RV64-NEXT: vadd.vx v8, v8, a0 611; RV64-NEXT: ret 612 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0 613 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer 614 %vc = add <vscale x 1 x i64> %va, %splat 615 ret <vscale x 1 x i64> %vc 616} 617 618define <vscale x 1 x i64> @vadd_vx_nxv1i64_0(<vscale x 1 x i64> %va) { 619; CHECK-LABEL: vadd_vx_nxv1i64_0: 620; CHECK: # %bb.0: 621; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma 622; CHECK-NEXT: vadd.vi v8, v8, -1 623; CHECK-NEXT: ret 624 %vc = add <vscale x 1 x i64> %va, splat (i64 -1) 625 ret <vscale x 1 x i64> %vc 626} 627 628define <vscale x 1 x i64> @vadd_vx_nxv1i64_1(<vscale x 1 x i64> %va) { 629; CHECK-LABEL: vadd_vx_nxv1i64_1: 630; CHECK: # %bb.0: 631; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma 632; CHECK-NEXT: vadd.vi v8, v8, 2 633; CHECK-NEXT: ret 634 %vc = add <vscale x 1 x i64> %va, splat (i64 2) 635 ret <vscale x 1 x i64> %vc 636} 637 638define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) { 639; RV32-LABEL: vadd_vx_nxv2i64: 640; RV32: # %bb.0: 641; RV32-NEXT: addi sp, sp, -16 642; RV32-NEXT: .cfi_def_cfa_offset 16 643; RV32-NEXT: sw a0, 8(sp) 644; RV32-NEXT: sw a1, 12(sp) 645; RV32-NEXT: addi a0, sp, 8 646; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma 647; RV32-NEXT: vlse64.v v10, (a0), zero 648; RV32-NEXT: vadd.vv v8, v8, v10 649; RV32-NEXT: addi sp, sp, 16 650; RV32-NEXT: .cfi_def_cfa_offset 0 651; RV32-NEXT: ret 652; 653; RV64-LABEL: vadd_vx_nxv2i64: 654; RV64: # %bb.0: 655; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma 656; RV64-NEXT: vadd.vx v8, v8, a0 657; RV64-NEXT: ret 658 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0 659 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 660 %vc = add <vscale x 2 x i64> %va, %splat 661 ret <vscale x 2 x i64> %vc 662} 663 664define <vscale x 2 x i64> @vadd_vx_nxv2i64_0(<vscale x 2 x i64> %va) { 665; CHECK-LABEL: vadd_vx_nxv2i64_0: 666; CHECK: # %bb.0: 667; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 668; CHECK-NEXT: vadd.vi v8, v8, -1 669; CHECK-NEXT: ret 670 %vc = add <vscale x 2 x i64> %va, splat (i64 -1) 671 ret <vscale x 2 x i64> %vc 672} 673 674define <vscale x 2 x i64> @vadd_vx_nxv2i64_1(<vscale x 2 x i64> %va) { 675; CHECK-LABEL: vadd_vx_nxv2i64_1: 676; CHECK: # %bb.0: 677; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma 678; CHECK-NEXT: vadd.vi v8, v8, 2 679; CHECK-NEXT: ret 680 %vc = add <vscale x 2 x i64> %va, splat (i64 2) 681 ret <vscale x 2 x i64> %vc 682} 683 684define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) { 685; RV32-LABEL: vadd_vx_nxv4i64: 686; RV32: # %bb.0: 687; RV32-NEXT: addi sp, sp, -16 688; RV32-NEXT: .cfi_def_cfa_offset 16 689; RV32-NEXT: sw a0, 8(sp) 690; RV32-NEXT: sw a1, 12(sp) 691; RV32-NEXT: addi a0, sp, 8 692; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma 693; RV32-NEXT: vlse64.v v12, (a0), zero 694; RV32-NEXT: vadd.vv v8, v8, v12 695; RV32-NEXT: addi sp, sp, 16 696; RV32-NEXT: .cfi_def_cfa_offset 0 697; RV32-NEXT: ret 698; 699; RV64-LABEL: vadd_vx_nxv4i64: 700; RV64: # %bb.0: 701; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma 702; RV64-NEXT: vadd.vx v8, v8, a0 703; RV64-NEXT: ret 704 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0 705 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 706 %vc = add <vscale x 4 x i64> %va, %splat 707 ret <vscale x 4 x i64> %vc 708} 709 710define <vscale x 4 x i64> @vadd_vx_nxv4i64_0(<vscale x 4 x i64> %va) { 711; CHECK-LABEL: vadd_vx_nxv4i64_0: 712; CHECK: # %bb.0: 713; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma 714; CHECK-NEXT: vadd.vi v8, v8, -1 715; CHECK-NEXT: ret 716 %vc = add <vscale x 4 x i64> %va, splat (i64 -1) 717 ret <vscale x 4 x i64> %vc 718} 719 720define <vscale x 4 x i64> @vadd_vx_nxv4i64_1(<vscale x 4 x i64> %va) { 721; CHECK-LABEL: vadd_vx_nxv4i64_1: 722; CHECK: # %bb.0: 723; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma 724; CHECK-NEXT: vadd.vi v8, v8, 2 725; CHECK-NEXT: ret 726 %vc = add <vscale x 4 x i64> %va, splat (i64 2) 727 ret <vscale x 4 x i64> %vc 728} 729 730define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) { 731; RV32-LABEL: vadd_vx_nxv8i64: 732; RV32: # %bb.0: 733; RV32-NEXT: addi sp, sp, -16 734; RV32-NEXT: .cfi_def_cfa_offset 16 735; RV32-NEXT: sw a0, 8(sp) 736; RV32-NEXT: sw a1, 12(sp) 737; RV32-NEXT: addi a0, sp, 8 738; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma 739; RV32-NEXT: vlse64.v v16, (a0), zero 740; RV32-NEXT: vadd.vv v8, v8, v16 741; RV32-NEXT: addi sp, sp, 16 742; RV32-NEXT: .cfi_def_cfa_offset 0 743; RV32-NEXT: ret 744; 745; RV64-LABEL: vadd_vx_nxv8i64: 746; RV64: # %bb.0: 747; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma 748; RV64-NEXT: vadd.vx v8, v8, a0 749; RV64-NEXT: ret 750 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0 751 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 752 %vc = add <vscale x 8 x i64> %va, %splat 753 ret <vscale x 8 x i64> %vc 754} 755 756define <vscale x 8 x i64> @vadd_vx_nxv8i64_0(<vscale x 8 x i64> %va) { 757; CHECK-LABEL: vadd_vx_nxv8i64_0: 758; CHECK: # %bb.0: 759; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma 760; CHECK-NEXT: vadd.vi v8, v8, -1 761; CHECK-NEXT: ret 762 %vc = add <vscale x 8 x i64> %va, splat (i64 -1) 763 ret <vscale x 8 x i64> %vc 764} 765 766define <vscale x 8 x i64> @vadd_vx_nxv8i64_1(<vscale x 8 x i64> %va) { 767; CHECK-LABEL: vadd_vx_nxv8i64_1: 768; CHECK: # %bb.0: 769; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma 770; CHECK-NEXT: vadd.vi v8, v8, 2 771; CHECK-NEXT: ret 772 %vc = add <vscale x 8 x i64> %va, splat (i64 2) 773 ret <vscale x 8 x i64> %vc 774} 775 776define <vscale x 8 x i64> @vadd_xx_nxv8i64(i64 %a, i64 %b) nounwind { 777; RV32-LABEL: vadd_xx_nxv8i64: 778; RV32: # %bb.0: 779; RV32-NEXT: addi sp, sp, -16 780; RV32-NEXT: add a2, a0, a2 781; RV32-NEXT: add a1, a1, a3 782; RV32-NEXT: sltu a0, a2, a0 783; RV32-NEXT: add a0, a1, a0 784; RV32-NEXT: sw a2, 8(sp) 785; RV32-NEXT: sw a0, 12(sp) 786; RV32-NEXT: addi a0, sp, 8 787; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma 788; RV32-NEXT: vlse64.v v8, (a0), zero 789; RV32-NEXT: addi sp, sp, 16 790; RV32-NEXT: ret 791; 792; RV64-LABEL: vadd_xx_nxv8i64: 793; RV64: # %bb.0: 794; RV64-NEXT: add a0, a0, a1 795; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma 796; RV64-NEXT: vmv.v.x v8, a0 797; RV64-NEXT: ret 798 %head1 = insertelement <vscale x 8 x i64> poison, i64 %a, i32 0 799 %splat1 = shufflevector <vscale x 8 x i64> %head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 800 %head2 = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0 801 %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 802 %v = add <vscale x 8 x i64> %splat1, %splat2 803 ret <vscale x 8 x i64> %v 804} 805 806define <vscale x 8 x i32> @vadd_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) { 807; CHECK-LABEL: vadd_vv_mask_nxv8i32: 808; CHECK: # %bb.0: 809; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 810; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t 811; CHECK-NEXT: ret 812 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> zeroinitializer 813 %vc = add <vscale x 8 x i32> %va, %vs 814 ret <vscale x 8 x i32> %vc 815} 816 817define <vscale x 8 x i32> @vadd_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) { 818; CHECK-LABEL: vadd_vx_mask_nxv8i32: 819; CHECK: # %bb.0: 820; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu 821; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t 822; CHECK-NEXT: ret 823 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0 824 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 825 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer 826 %vc = add <vscale x 8 x i32> %va, %vs 827 ret <vscale x 8 x i32> %vc 828} 829 830define <vscale x 8 x i32> @vadd_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) { 831; CHECK-LABEL: vadd_vi_mask_nxv8i32: 832; CHECK: # %bb.0: 833; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 834; CHECK-NEXT: vadd.vi v8, v8, 7, v0.t 835; CHECK-NEXT: ret 836 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> zeroinitializer 837 %vc = add <vscale x 8 x i32> %va, %vs 838 ret <vscale x 8 x i32> %vc 839} 840 841define <vscale x 8 x i32> @vadd_vv_mask_negative0_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) { 842; CHECK-LABEL: vadd_vv_mask_negative0_nxv8i32: 843; CHECK: # %bb.0: 844; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 845; CHECK-NEXT: vmv.v.i v16, 1 846; CHECK-NEXT: vmerge.vvm v12, v16, v12, v0 847; CHECK-NEXT: vadd.vv v8, v8, v12 848; CHECK-NEXT: ret 849 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1) 850 %vc = add <vscale x 8 x i32> %va, %vs 851 ret <vscale x 8 x i32> %vc 852} 853 854define <vscale x 8 x i32> @vadd_vv_mask_negative1_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) { 855; CHECK-LABEL: vadd_vv_mask_negative1_nxv8i32: 856; CHECK: # %bb.0: 857; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma 858; CHECK-NEXT: vmv.v.i v16, 0 859; CHECK-NEXT: vmerge.vvm v12, v16, v12, v0 860; CHECK-NEXT: vadd.vv v8, v8, v12 861; CHECK-NEXT: vadd.vv v8, v8, v12 862; CHECK-NEXT: ret 863 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> zeroinitializer 864 %vc = add <vscale x 8 x i32> %va, %vs 865 %vd = add <vscale x 8 x i32> %vc, %vs 866 ret <vscale x 8 x i32> %vd 867} 868