1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ 3; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ 5; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <8 x i7> @llvm.vp.and.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) 8 9define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { 10; CHECK-LABEL: vand_vv_v8i7: 11; CHECK: # %bb.0: 12; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 13; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 14; CHECK-NEXT: ret 15 %v = call <8 x i7> @llvm.vp.and.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) 16 ret <8 x i7> %v 17} 18 19declare <2 x i8> @llvm.vp.and.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) 20 21define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { 22; CHECK-LABEL: vand_vv_v2i8: 23; CHECK: # %bb.0: 24; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 25; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 26; CHECK-NEXT: ret 27 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) 28 ret <2 x i8> %v 29} 30 31define <2 x i8> @vand_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { 32; CHECK-LABEL: vand_vv_v2i8_unmasked: 33; CHECK: # %bb.0: 34; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 35; CHECK-NEXT: vand.vv v8, v8, v9 36; CHECK-NEXT: ret 37 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl) 38 ret <2 x i8> %v 39} 40 41define <2 x i8> @vand_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { 42; CHECK-LABEL: vand_vx_v2i8: 43; CHECK: # %bb.0: 44; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 45; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 46; CHECK-NEXT: ret 47 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 48 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 49 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl) 50 ret <2 x i8> %v 51} 52 53define <2 x i8> @vand_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { 54; CHECK-LABEL: vand_vx_v2i8_commute: 55; CHECK: # %bb.0: 56; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 57; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 58; CHECK-NEXT: ret 59 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 60 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 61 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl) 62 ret <2 x i8> %v 63} 64 65define <2 x i8> @vand_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { 66; CHECK-LABEL: vand_vx_v2i8_unmasked: 67; CHECK: # %bb.0: 68; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 69; CHECK-NEXT: vand.vx v8, v8, a0 70; CHECK-NEXT: ret 71 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 72 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 73 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl) 74 ret <2 x i8> %v 75} 76 77define <2 x i8> @vand_vx_v2i8_unmasked_commute(<2 x i8> %va, i8 %b, i32 zeroext %evl) { 78; CHECK-LABEL: vand_vx_v2i8_unmasked_commute: 79; CHECK: # %bb.0: 80; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 81; CHECK-NEXT: vand.vx v8, v8, a0 82; CHECK-NEXT: ret 83 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 84 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 85 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl) 86 ret <2 x i8> %v 87} 88 89define <2 x i8> @vand_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { 90; CHECK-LABEL: vand_vi_v2i8: 91; CHECK: # %bb.0: 92; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 93; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 94; CHECK-NEXT: ret 95 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> %m, i32 %evl) 96 ret <2 x i8> %v 97} 98 99define <2 x i8> @vand_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { 100; CHECK-LABEL: vand_vi_v2i8_unmasked: 101; CHECK: # %bb.0: 102; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 103; CHECK-NEXT: vand.vi v8, v8, 4 104; CHECK-NEXT: ret 105 %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> splat (i1 true), i32 %evl) 106 ret <2 x i8> %v 107} 108 109declare <4 x i8> @llvm.vp.and.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) 110 111define <4 x i8> @vand_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { 112; CHECK-LABEL: vand_vv_v4i8: 113; CHECK: # %bb.0: 114; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 115; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 116; CHECK-NEXT: ret 117 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) 118 ret <4 x i8> %v 119} 120 121define <4 x i8> @vand_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { 122; CHECK-LABEL: vand_vv_v4i8_unmasked: 123; CHECK: # %bb.0: 124; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 125; CHECK-NEXT: vand.vv v8, v8, v9 126; CHECK-NEXT: ret 127 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl) 128 ret <4 x i8> %v 129} 130 131define <4 x i8> @vand_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { 132; CHECK-LABEL: vand_vx_v4i8: 133; CHECK: # %bb.0: 134; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 135; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 136; CHECK-NEXT: ret 137 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 138 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer 139 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl) 140 ret <4 x i8> %v 141} 142 143define <4 x i8> @vand_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { 144; CHECK-LABEL: vand_vx_v4i8_unmasked: 145; CHECK: # %bb.0: 146; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 147; CHECK-NEXT: vand.vx v8, v8, a0 148; CHECK-NEXT: ret 149 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 150 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer 151 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl) 152 ret <4 x i8> %v 153} 154 155define <4 x i8> @vand_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { 156; CHECK-LABEL: vand_vi_v4i8: 157; CHECK: # %bb.0: 158; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 159; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 160; CHECK-NEXT: ret 161 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> %m, i32 %evl) 162 ret <4 x i8> %v 163} 164 165define <4 x i8> @vand_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { 166; CHECK-LABEL: vand_vi_v4i8_unmasked: 167; CHECK: # %bb.0: 168; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 169; CHECK-NEXT: vand.vi v8, v8, 4 170; CHECK-NEXT: ret 171 %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> splat (i1 true), i32 %evl) 172 ret <4 x i8> %v 173} 174 175declare <8 x i8> @llvm.vp.and.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) 176 177define <8 x i8> @vand_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { 178; CHECK-LABEL: vand_vv_v8i8: 179; CHECK: # %bb.0: 180; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 181; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 182; CHECK-NEXT: ret 183 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) 184 ret <8 x i8> %v 185} 186 187define <8 x i8> @vand_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { 188; CHECK-LABEL: vand_vv_v8i8_unmasked: 189; CHECK: # %bb.0: 190; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 191; CHECK-NEXT: vand.vv v8, v8, v9 192; CHECK-NEXT: ret 193 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl) 194 ret <8 x i8> %v 195} 196 197define <8 x i8> @vand_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { 198; CHECK-LABEL: vand_vx_v8i8: 199; CHECK: # %bb.0: 200; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 201; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 202; CHECK-NEXT: ret 203 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 204 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer 205 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl) 206 ret <8 x i8> %v 207} 208 209define <8 x i8> @vand_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { 210; CHECK-LABEL: vand_vx_v8i8_unmasked: 211; CHECK: # %bb.0: 212; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 213; CHECK-NEXT: vand.vx v8, v8, a0 214; CHECK-NEXT: ret 215 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 216 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer 217 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl) 218 ret <8 x i8> %v 219} 220 221define <8 x i8> @vand_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { 222; CHECK-LABEL: vand_vi_v8i8: 223; CHECK: # %bb.0: 224; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 225; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 226; CHECK-NEXT: ret 227 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> %m, i32 %evl) 228 ret <8 x i8> %v 229} 230 231define <8 x i8> @vand_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { 232; CHECK-LABEL: vand_vi_v8i8_unmasked: 233; CHECK: # %bb.0: 234; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 235; CHECK-NEXT: vand.vi v8, v8, 4 236; CHECK-NEXT: ret 237 %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> splat (i1 true), i32 %evl) 238 ret <8 x i8> %v 239} 240 241declare <16 x i8> @llvm.vp.and.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) 242 243define <16 x i8> @vand_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { 244; CHECK-LABEL: vand_vv_v16i8: 245; CHECK: # %bb.0: 246; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 247; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 248; CHECK-NEXT: ret 249 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) 250 ret <16 x i8> %v 251} 252 253define <16 x i8> @vand_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { 254; CHECK-LABEL: vand_vv_v16i8_unmasked: 255; CHECK: # %bb.0: 256; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 257; CHECK-NEXT: vand.vv v8, v8, v9 258; CHECK-NEXT: ret 259 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl) 260 ret <16 x i8> %v 261} 262 263define <16 x i8> @vand_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { 264; CHECK-LABEL: vand_vx_v16i8: 265; CHECK: # %bb.0: 266; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 267; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 268; CHECK-NEXT: ret 269 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 270 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer 271 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl) 272 ret <16 x i8> %v 273} 274 275define <16 x i8> @vand_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { 276; CHECK-LABEL: vand_vx_v16i8_unmasked: 277; CHECK: # %bb.0: 278; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 279; CHECK-NEXT: vand.vx v8, v8, a0 280; CHECK-NEXT: ret 281 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 282 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer 283 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl) 284 ret <16 x i8> %v 285} 286 287define <16 x i8> @vand_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { 288; CHECK-LABEL: vand_vi_v16i8: 289; CHECK: # %bb.0: 290; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 291; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 292; CHECK-NEXT: ret 293 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> %m, i32 %evl) 294 ret <16 x i8> %v 295} 296 297define <16 x i8> @vand_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { 298; CHECK-LABEL: vand_vi_v16i8_unmasked: 299; CHECK: # %bb.0: 300; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 301; CHECK-NEXT: vand.vi v8, v8, 4 302; CHECK-NEXT: ret 303 %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> splat (i1 true), i32 %evl) 304 ret <16 x i8> %v 305} 306 307declare <2 x i16> @llvm.vp.and.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) 308 309define <2 x i16> @vand_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { 310; CHECK-LABEL: vand_vv_v2i16: 311; CHECK: # %bb.0: 312; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 313; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 314; CHECK-NEXT: ret 315 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) 316 ret <2 x i16> %v 317} 318 319define <2 x i16> @vand_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { 320; CHECK-LABEL: vand_vv_v2i16_unmasked: 321; CHECK: # %bb.0: 322; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 323; CHECK-NEXT: vand.vv v8, v8, v9 324; CHECK-NEXT: ret 325 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl) 326 ret <2 x i16> %v 327} 328 329define <2 x i16> @vand_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { 330; CHECK-LABEL: vand_vx_v2i16: 331; CHECK: # %bb.0: 332; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 333; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 334; CHECK-NEXT: ret 335 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 336 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer 337 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl) 338 ret <2 x i16> %v 339} 340 341define <2 x i16> @vand_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { 342; CHECK-LABEL: vand_vx_v2i16_unmasked: 343; CHECK: # %bb.0: 344; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 345; CHECK-NEXT: vand.vx v8, v8, a0 346; CHECK-NEXT: ret 347 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 348 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer 349 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl) 350 ret <2 x i16> %v 351} 352 353define <2 x i16> @vand_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { 354; CHECK-LABEL: vand_vi_v2i16: 355; CHECK: # %bb.0: 356; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 357; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 358; CHECK-NEXT: ret 359 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> %m, i32 %evl) 360 ret <2 x i16> %v 361} 362 363define <2 x i16> @vand_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { 364; CHECK-LABEL: vand_vi_v2i16_unmasked: 365; CHECK: # %bb.0: 366; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 367; CHECK-NEXT: vand.vi v8, v8, 4 368; CHECK-NEXT: ret 369 %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> splat (i1 true), i32 %evl) 370 ret <2 x i16> %v 371} 372 373declare <4 x i16> @llvm.vp.and.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) 374 375define <4 x i16> @vand_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { 376; CHECK-LABEL: vand_vv_v4i16: 377; CHECK: # %bb.0: 378; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 379; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 380; CHECK-NEXT: ret 381 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) 382 ret <4 x i16> %v 383} 384 385define <4 x i16> @vand_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { 386; CHECK-LABEL: vand_vv_v4i16_unmasked: 387; CHECK: # %bb.0: 388; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 389; CHECK-NEXT: vand.vv v8, v8, v9 390; CHECK-NEXT: ret 391 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl) 392 ret <4 x i16> %v 393} 394 395define <4 x i16> @vand_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { 396; CHECK-LABEL: vand_vx_v4i16: 397; CHECK: # %bb.0: 398; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 399; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 400; CHECK-NEXT: ret 401 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 402 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer 403 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl) 404 ret <4 x i16> %v 405} 406 407define <4 x i16> @vand_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { 408; CHECK-LABEL: vand_vx_v4i16_unmasked: 409; CHECK: # %bb.0: 410; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 411; CHECK-NEXT: vand.vx v8, v8, a0 412; CHECK-NEXT: ret 413 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 414 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer 415 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl) 416 ret <4 x i16> %v 417} 418 419define <4 x i16> @vand_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { 420; CHECK-LABEL: vand_vi_v4i16: 421; CHECK: # %bb.0: 422; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 423; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 424; CHECK-NEXT: ret 425 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> %m, i32 %evl) 426 ret <4 x i16> %v 427} 428 429define <4 x i16> @vand_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { 430; CHECK-LABEL: vand_vi_v4i16_unmasked: 431; CHECK: # %bb.0: 432; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 433; CHECK-NEXT: vand.vi v8, v8, 4 434; CHECK-NEXT: ret 435 %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> splat (i1 true), i32 %evl) 436 ret <4 x i16> %v 437} 438 439declare <8 x i16> @llvm.vp.and.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) 440 441define <8 x i16> @vand_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { 442; CHECK-LABEL: vand_vv_v8i16: 443; CHECK: # %bb.0: 444; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 445; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 446; CHECK-NEXT: ret 447 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) 448 ret <8 x i16> %v 449} 450 451define <8 x i16> @vand_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { 452; CHECK-LABEL: vand_vv_v8i16_unmasked: 453; CHECK: # %bb.0: 454; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 455; CHECK-NEXT: vand.vv v8, v8, v9 456; CHECK-NEXT: ret 457 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl) 458 ret <8 x i16> %v 459} 460 461define <8 x i16> @vand_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { 462; CHECK-LABEL: vand_vx_v8i16: 463; CHECK: # %bb.0: 464; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 465; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 466; CHECK-NEXT: ret 467 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 468 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer 469 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl) 470 ret <8 x i16> %v 471} 472 473define <8 x i16> @vand_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { 474; CHECK-LABEL: vand_vx_v8i16_unmasked: 475; CHECK: # %bb.0: 476; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 477; CHECK-NEXT: vand.vx v8, v8, a0 478; CHECK-NEXT: ret 479 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 480 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer 481 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl) 482 ret <8 x i16> %v 483} 484 485define <8 x i16> @vand_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { 486; CHECK-LABEL: vand_vi_v8i16: 487; CHECK: # %bb.0: 488; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 489; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 490; CHECK-NEXT: ret 491 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> %m, i32 %evl) 492 ret <8 x i16> %v 493} 494 495define <8 x i16> @vand_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { 496; CHECK-LABEL: vand_vi_v8i16_unmasked: 497; CHECK: # %bb.0: 498; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 499; CHECK-NEXT: vand.vi v8, v8, 4 500; CHECK-NEXT: ret 501 %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> splat (i1 true), i32 %evl) 502 ret <8 x i16> %v 503} 504 505declare <16 x i16> @llvm.vp.and.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) 506 507define <16 x i16> @vand_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { 508; CHECK-LABEL: vand_vv_v16i16: 509; CHECK: # %bb.0: 510; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 511; CHECK-NEXT: vand.vv v8, v8, v10, v0.t 512; CHECK-NEXT: ret 513 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) 514 ret <16 x i16> %v 515} 516 517define <16 x i16> @vand_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { 518; CHECK-LABEL: vand_vv_v16i16_unmasked: 519; CHECK: # %bb.0: 520; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 521; CHECK-NEXT: vand.vv v8, v8, v10 522; CHECK-NEXT: ret 523 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl) 524 ret <16 x i16> %v 525} 526 527define <16 x i16> @vand_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { 528; CHECK-LABEL: vand_vx_v16i16: 529; CHECK: # %bb.0: 530; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 531; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 532; CHECK-NEXT: ret 533 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 534 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer 535 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl) 536 ret <16 x i16> %v 537} 538 539define <16 x i16> @vand_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { 540; CHECK-LABEL: vand_vx_v16i16_unmasked: 541; CHECK: # %bb.0: 542; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 543; CHECK-NEXT: vand.vx v8, v8, a0 544; CHECK-NEXT: ret 545 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 546 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer 547 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl) 548 ret <16 x i16> %v 549} 550 551define <16 x i16> @vand_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { 552; CHECK-LABEL: vand_vi_v16i16: 553; CHECK: # %bb.0: 554; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 555; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 556; CHECK-NEXT: ret 557 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> %m, i32 %evl) 558 ret <16 x i16> %v 559} 560 561define <16 x i16> @vand_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { 562; CHECK-LABEL: vand_vi_v16i16_unmasked: 563; CHECK: # %bb.0: 564; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 565; CHECK-NEXT: vand.vi v8, v8, 4 566; CHECK-NEXT: ret 567 %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> splat (i1 true), i32 %evl) 568 ret <16 x i16> %v 569} 570 571declare <2 x i32> @llvm.vp.and.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) 572 573define <2 x i32> @vand_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { 574; CHECK-LABEL: vand_vv_v2i32: 575; CHECK: # %bb.0: 576; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 577; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 578; CHECK-NEXT: ret 579 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) 580 ret <2 x i32> %v 581} 582 583define <2 x i32> @vand_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { 584; CHECK-LABEL: vand_vv_v2i32_unmasked: 585; CHECK: # %bb.0: 586; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 587; CHECK-NEXT: vand.vv v8, v8, v9 588; CHECK-NEXT: ret 589 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl) 590 ret <2 x i32> %v 591} 592 593define <2 x i32> @vand_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { 594; CHECK-LABEL: vand_vx_v2i32: 595; CHECK: # %bb.0: 596; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 597; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 598; CHECK-NEXT: ret 599 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 600 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer 601 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl) 602 ret <2 x i32> %v 603} 604 605define <2 x i32> @vand_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { 606; CHECK-LABEL: vand_vx_v2i32_unmasked: 607; CHECK: # %bb.0: 608; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 609; CHECK-NEXT: vand.vx v8, v8, a0 610; CHECK-NEXT: ret 611 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 612 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer 613 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl) 614 ret <2 x i32> %v 615} 616 617define <2 x i32> @vand_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { 618; CHECK-LABEL: vand_vi_v2i32: 619; CHECK: # %bb.0: 620; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 621; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 622; CHECK-NEXT: ret 623 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> %m, i32 %evl) 624 ret <2 x i32> %v 625} 626 627define <2 x i32> @vand_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { 628; CHECK-LABEL: vand_vi_v2i32_unmasked: 629; CHECK: # %bb.0: 630; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 631; CHECK-NEXT: vand.vi v8, v8, 4 632; CHECK-NEXT: ret 633 %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> splat (i1 true), i32 %evl) 634 ret <2 x i32> %v 635} 636 637declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) 638 639define <4 x i32> @vand_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { 640; CHECK-LABEL: vand_vv_v4i32: 641; CHECK: # %bb.0: 642; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 643; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 644; CHECK-NEXT: ret 645 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) 646 ret <4 x i32> %v 647} 648 649define <4 x i32> @vand_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { 650; CHECK-LABEL: vand_vv_v4i32_unmasked: 651; CHECK: # %bb.0: 652; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 653; CHECK-NEXT: vand.vv v8, v8, v9 654; CHECK-NEXT: ret 655 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl) 656 ret <4 x i32> %v 657} 658 659define <4 x i32> @vand_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { 660; CHECK-LABEL: vand_vx_v4i32: 661; CHECK: # %bb.0: 662; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 663; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 664; CHECK-NEXT: ret 665 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 666 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer 667 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl) 668 ret <4 x i32> %v 669} 670 671define <4 x i32> @vand_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { 672; CHECK-LABEL: vand_vx_v4i32_unmasked: 673; CHECK: # %bb.0: 674; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 675; CHECK-NEXT: vand.vx v8, v8, a0 676; CHECK-NEXT: ret 677 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 678 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer 679 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl) 680 ret <4 x i32> %v 681} 682 683define <4 x i32> @vand_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { 684; CHECK-LABEL: vand_vi_v4i32: 685; CHECK: # %bb.0: 686; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 687; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 688; CHECK-NEXT: ret 689 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> %m, i32 %evl) 690 ret <4 x i32> %v 691} 692 693define <4 x i32> @vand_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { 694; CHECK-LABEL: vand_vi_v4i32_unmasked: 695; CHECK: # %bb.0: 696; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 697; CHECK-NEXT: vand.vi v8, v8, 4 698; CHECK-NEXT: ret 699 %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> splat (i1 true), i32 %evl) 700 ret <4 x i32> %v 701} 702 703declare <8 x i32> @llvm.vp.and.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) 704 705define <8 x i32> @vand_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { 706; CHECK-LABEL: vand_vv_v8i32: 707; CHECK: # %bb.0: 708; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 709; CHECK-NEXT: vand.vv v8, v8, v10, v0.t 710; CHECK-NEXT: ret 711 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) 712 ret <8 x i32> %v 713} 714 715define <8 x i32> @vand_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { 716; CHECK-LABEL: vand_vv_v8i32_unmasked: 717; CHECK: # %bb.0: 718; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 719; CHECK-NEXT: vand.vv v8, v8, v10 720; CHECK-NEXT: ret 721 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl) 722 ret <8 x i32> %v 723} 724 725define <8 x i32> @vand_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { 726; CHECK-LABEL: vand_vx_v8i32: 727; CHECK: # %bb.0: 728; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 729; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 730; CHECK-NEXT: ret 731 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 732 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer 733 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl) 734 ret <8 x i32> %v 735} 736 737define <8 x i32> @vand_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { 738; CHECK-LABEL: vand_vx_v8i32_unmasked: 739; CHECK: # %bb.0: 740; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 741; CHECK-NEXT: vand.vx v8, v8, a0 742; CHECK-NEXT: ret 743 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 744 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer 745 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl) 746 ret <8 x i32> %v 747} 748 749define <8 x i32> @vand_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { 750; CHECK-LABEL: vand_vi_v8i32: 751; CHECK: # %bb.0: 752; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 753; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 754; CHECK-NEXT: ret 755 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> %m, i32 %evl) 756 ret <8 x i32> %v 757} 758 759define <8 x i32> @vand_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { 760; CHECK-LABEL: vand_vi_v8i32_unmasked: 761; CHECK: # %bb.0: 762; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 763; CHECK-NEXT: vand.vi v8, v8, 4 764; CHECK-NEXT: ret 765 %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> splat (i1 true), i32 %evl) 766 ret <8 x i32> %v 767} 768 769declare <16 x i32> @llvm.vp.and.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) 770 771define <16 x i32> @vand_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { 772; CHECK-LABEL: vand_vv_v16i32: 773; CHECK: # %bb.0: 774; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 775; CHECK-NEXT: vand.vv v8, v8, v12, v0.t 776; CHECK-NEXT: ret 777 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) 778 ret <16 x i32> %v 779} 780 781define <16 x i32> @vand_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { 782; CHECK-LABEL: vand_vv_v16i32_unmasked: 783; CHECK: # %bb.0: 784; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 785; CHECK-NEXT: vand.vv v8, v8, v12 786; CHECK-NEXT: ret 787 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl) 788 ret <16 x i32> %v 789} 790 791define <16 x i32> @vand_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { 792; CHECK-LABEL: vand_vx_v16i32: 793; CHECK: # %bb.0: 794; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 795; CHECK-NEXT: vand.vx v8, v8, a0, v0.t 796; CHECK-NEXT: ret 797 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 798 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer 799 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl) 800 ret <16 x i32> %v 801} 802 803define <16 x i32> @vand_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { 804; CHECK-LABEL: vand_vx_v16i32_unmasked: 805; CHECK: # %bb.0: 806; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 807; CHECK-NEXT: vand.vx v8, v8, a0 808; CHECK-NEXT: ret 809 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 810 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer 811 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl) 812 ret <16 x i32> %v 813} 814 815define <16 x i32> @vand_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { 816; CHECK-LABEL: vand_vi_v16i32: 817; CHECK: # %bb.0: 818; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 819; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 820; CHECK-NEXT: ret 821 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> %m, i32 %evl) 822 ret <16 x i32> %v 823} 824 825define <16 x i32> @vand_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { 826; CHECK-LABEL: vand_vi_v16i32_unmasked: 827; CHECK: # %bb.0: 828; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 829; CHECK-NEXT: vand.vi v8, v8, 4 830; CHECK-NEXT: ret 831 %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> splat (i1 true), i32 %evl) 832 ret <16 x i32> %v 833} 834 835declare <2 x i64> @llvm.vp.and.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) 836 837define <2 x i64> @vand_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { 838; CHECK-LABEL: vand_vv_v2i64: 839; CHECK: # %bb.0: 840; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 841; CHECK-NEXT: vand.vv v8, v8, v9, v0.t 842; CHECK-NEXT: ret 843 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) 844 ret <2 x i64> %v 845} 846 847define <2 x i64> @vand_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { 848; CHECK-LABEL: vand_vv_v2i64_unmasked: 849; CHECK: # %bb.0: 850; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 851; CHECK-NEXT: vand.vv v8, v8, v9 852; CHECK-NEXT: ret 853 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl) 854 ret <2 x i64> %v 855} 856 857define <2 x i64> @vand_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { 858; RV32-LABEL: vand_vx_v2i64: 859; RV32: # %bb.0: 860; RV32-NEXT: addi sp, sp, -16 861; RV32-NEXT: .cfi_def_cfa_offset 16 862; RV32-NEXT: sw a0, 8(sp) 863; RV32-NEXT: sw a1, 12(sp) 864; RV32-NEXT: addi a0, sp, 8 865; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma 866; RV32-NEXT: vlse64.v v9, (a0), zero 867; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 868; RV32-NEXT: vand.vv v8, v8, v9, v0.t 869; RV32-NEXT: addi sp, sp, 16 870; RV32-NEXT: .cfi_def_cfa_offset 0 871; RV32-NEXT: ret 872; 873; RV64-LABEL: vand_vx_v2i64: 874; RV64: # %bb.0: 875; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 876; RV64-NEXT: vand.vx v8, v8, a0, v0.t 877; RV64-NEXT: ret 878 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 879 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer 880 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl) 881 ret <2 x i64> %v 882} 883 884define <2 x i64> @vand_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) { 885; RV32-LABEL: vand_vx_v2i64_unmasked: 886; RV32: # %bb.0: 887; RV32-NEXT: addi sp, sp, -16 888; RV32-NEXT: .cfi_def_cfa_offset 16 889; RV32-NEXT: sw a0, 8(sp) 890; RV32-NEXT: sw a1, 12(sp) 891; RV32-NEXT: addi a0, sp, 8 892; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma 893; RV32-NEXT: vlse64.v v9, (a0), zero 894; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 895; RV32-NEXT: vand.vv v8, v8, v9 896; RV32-NEXT: addi sp, sp, 16 897; RV32-NEXT: .cfi_def_cfa_offset 0 898; RV32-NEXT: ret 899; 900; RV64-LABEL: vand_vx_v2i64_unmasked: 901; RV64: # %bb.0: 902; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 903; RV64-NEXT: vand.vx v8, v8, a0 904; RV64-NEXT: ret 905 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 906 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer 907 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl) 908 ret <2 x i64> %v 909} 910 911define <2 x i64> @vand_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { 912; CHECK-LABEL: vand_vi_v2i64: 913; CHECK: # %bb.0: 914; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 915; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 916; CHECK-NEXT: ret 917 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> %m, i32 %evl) 918 ret <2 x i64> %v 919} 920 921define <2 x i64> @vand_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { 922; CHECK-LABEL: vand_vi_v2i64_unmasked: 923; CHECK: # %bb.0: 924; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 925; CHECK-NEXT: vand.vi v8, v8, 4 926; CHECK-NEXT: ret 927 %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> splat (i1 true), i32 %evl) 928 ret <2 x i64> %v 929} 930 931declare <4 x i64> @llvm.vp.and.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) 932 933define <4 x i64> @vand_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { 934; CHECK-LABEL: vand_vv_v4i64: 935; CHECK: # %bb.0: 936; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 937; CHECK-NEXT: vand.vv v8, v8, v10, v0.t 938; CHECK-NEXT: ret 939 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) 940 ret <4 x i64> %v 941} 942 943define <4 x i64> @vand_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { 944; CHECK-LABEL: vand_vv_v4i64_unmasked: 945; CHECK: # %bb.0: 946; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 947; CHECK-NEXT: vand.vv v8, v8, v10 948; CHECK-NEXT: ret 949 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl) 950 ret <4 x i64> %v 951} 952 953define <4 x i64> @vand_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { 954; RV32-LABEL: vand_vx_v4i64: 955; RV32: # %bb.0: 956; RV32-NEXT: addi sp, sp, -16 957; RV32-NEXT: .cfi_def_cfa_offset 16 958; RV32-NEXT: sw a0, 8(sp) 959; RV32-NEXT: sw a1, 12(sp) 960; RV32-NEXT: addi a0, sp, 8 961; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma 962; RV32-NEXT: vlse64.v v10, (a0), zero 963; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 964; RV32-NEXT: vand.vv v8, v8, v10, v0.t 965; RV32-NEXT: addi sp, sp, 16 966; RV32-NEXT: .cfi_def_cfa_offset 0 967; RV32-NEXT: ret 968; 969; RV64-LABEL: vand_vx_v4i64: 970; RV64: # %bb.0: 971; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 972; RV64-NEXT: vand.vx v8, v8, a0, v0.t 973; RV64-NEXT: ret 974 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 975 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer 976 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl) 977 ret <4 x i64> %v 978} 979 980define <4 x i64> @vand_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) { 981; RV32-LABEL: vand_vx_v4i64_unmasked: 982; RV32: # %bb.0: 983; RV32-NEXT: addi sp, sp, -16 984; RV32-NEXT: .cfi_def_cfa_offset 16 985; RV32-NEXT: sw a0, 8(sp) 986; RV32-NEXT: sw a1, 12(sp) 987; RV32-NEXT: addi a0, sp, 8 988; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma 989; RV32-NEXT: vlse64.v v10, (a0), zero 990; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 991; RV32-NEXT: vand.vv v8, v8, v10 992; RV32-NEXT: addi sp, sp, 16 993; RV32-NEXT: .cfi_def_cfa_offset 0 994; RV32-NEXT: ret 995; 996; RV64-LABEL: vand_vx_v4i64_unmasked: 997; RV64: # %bb.0: 998; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 999; RV64-NEXT: vand.vx v8, v8, a0 1000; RV64-NEXT: ret 1001 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 1002 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer 1003 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl) 1004 ret <4 x i64> %v 1005} 1006 1007define <4 x i64> @vand_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { 1008; CHECK-LABEL: vand_vi_v4i64: 1009; CHECK: # %bb.0: 1010; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1011; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 1012; CHECK-NEXT: ret 1013 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> %m, i32 %evl) 1014 ret <4 x i64> %v 1015} 1016 1017define <4 x i64> @vand_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { 1018; CHECK-LABEL: vand_vi_v4i64_unmasked: 1019; CHECK: # %bb.0: 1020; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1021; CHECK-NEXT: vand.vi v8, v8, 4 1022; CHECK-NEXT: ret 1023 %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> splat (i1 true), i32 %evl) 1024 ret <4 x i64> %v 1025} 1026 1027declare <8 x i64> @llvm.vp.and.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) 1028 1029define <8 x i64> @vand_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { 1030; CHECK-LABEL: vand_vv_v8i64: 1031; CHECK: # %bb.0: 1032; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1033; CHECK-NEXT: vand.vv v8, v8, v12, v0.t 1034; CHECK-NEXT: ret 1035 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) 1036 ret <8 x i64> %v 1037} 1038 1039define <8 x i64> @vand_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { 1040; CHECK-LABEL: vand_vv_v8i64_unmasked: 1041; CHECK: # %bb.0: 1042; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1043; CHECK-NEXT: vand.vv v8, v8, v12 1044; CHECK-NEXT: ret 1045 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl) 1046 ret <8 x i64> %v 1047} 1048 1049define <8 x i64> @vand_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { 1050; RV32-LABEL: vand_vx_v8i64: 1051; RV32: # %bb.0: 1052; RV32-NEXT: addi sp, sp, -16 1053; RV32-NEXT: .cfi_def_cfa_offset 16 1054; RV32-NEXT: sw a0, 8(sp) 1055; RV32-NEXT: sw a1, 12(sp) 1056; RV32-NEXT: addi a0, sp, 8 1057; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma 1058; RV32-NEXT: vlse64.v v12, (a0), zero 1059; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1060; RV32-NEXT: vand.vv v8, v8, v12, v0.t 1061; RV32-NEXT: addi sp, sp, 16 1062; RV32-NEXT: .cfi_def_cfa_offset 0 1063; RV32-NEXT: ret 1064; 1065; RV64-LABEL: vand_vx_v8i64: 1066; RV64: # %bb.0: 1067; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1068; RV64-NEXT: vand.vx v8, v8, a0, v0.t 1069; RV64-NEXT: ret 1070 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 1071 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer 1072 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl) 1073 ret <8 x i64> %v 1074} 1075 1076define <8 x i64> @vand_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) { 1077; RV32-LABEL: vand_vx_v8i64_unmasked: 1078; RV32: # %bb.0: 1079; RV32-NEXT: addi sp, sp, -16 1080; RV32-NEXT: .cfi_def_cfa_offset 16 1081; RV32-NEXT: sw a0, 8(sp) 1082; RV32-NEXT: sw a1, 12(sp) 1083; RV32-NEXT: addi a0, sp, 8 1084; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma 1085; RV32-NEXT: vlse64.v v12, (a0), zero 1086; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1087; RV32-NEXT: vand.vv v8, v8, v12 1088; RV32-NEXT: addi sp, sp, 16 1089; RV32-NEXT: .cfi_def_cfa_offset 0 1090; RV32-NEXT: ret 1091; 1092; RV64-LABEL: vand_vx_v8i64_unmasked: 1093; RV64: # %bb.0: 1094; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1095; RV64-NEXT: vand.vx v8, v8, a0 1096; RV64-NEXT: ret 1097 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 1098 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer 1099 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl) 1100 ret <8 x i64> %v 1101} 1102 1103define <8 x i64> @vand_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { 1104; CHECK-LABEL: vand_vi_v8i64: 1105; CHECK: # %bb.0: 1106; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1107; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 1108; CHECK-NEXT: ret 1109 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> %m, i32 %evl) 1110 ret <8 x i64> %v 1111} 1112 1113define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { 1114; CHECK-LABEL: vand_vi_v8i64_unmasked: 1115; CHECK: # %bb.0: 1116; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1117; CHECK-NEXT: vand.vi v8, v8, 4 1118; CHECK-NEXT: ret 1119 %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> splat (i1 true), i32 %evl) 1120 ret <8 x i64> %v 1121} 1122 1123declare <11 x i64> @llvm.vp.and.v11i64(<11 x i64>, <11 x i64>, <11 x i1>, i32) 1124 1125define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) { 1126; CHECK-LABEL: vand_vv_v11i64: 1127; CHECK: # %bb.0: 1128; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1129; CHECK-NEXT: vand.vv v8, v8, v16, v0.t 1130; CHECK-NEXT: ret 1131 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl) 1132 ret <11 x i64> %v 1133} 1134 1135define <11 x i64> @vand_vv_v11i64_unmasked(<11 x i64> %va, <11 x i64> %b, i32 zeroext %evl) { 1136; CHECK-LABEL: vand_vv_v11i64_unmasked: 1137; CHECK: # %bb.0: 1138; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1139; CHECK-NEXT: vand.vv v8, v8, v16 1140; CHECK-NEXT: ret 1141 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> splat (i1 true), i32 %evl) 1142 ret <11 x i64> %v 1143} 1144 1145define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zeroext %evl) { 1146; RV32-LABEL: vand_vx_v11i64: 1147; RV32: # %bb.0: 1148; RV32-NEXT: addi sp, sp, -16 1149; RV32-NEXT: .cfi_def_cfa_offset 16 1150; RV32-NEXT: sw a0, 8(sp) 1151; RV32-NEXT: sw a1, 12(sp) 1152; RV32-NEXT: addi a0, sp, 8 1153; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1154; RV32-NEXT: vlse64.v v16, (a0), zero 1155; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1156; RV32-NEXT: vand.vv v8, v8, v16, v0.t 1157; RV32-NEXT: addi sp, sp, 16 1158; RV32-NEXT: .cfi_def_cfa_offset 0 1159; RV32-NEXT: ret 1160; 1161; RV64-LABEL: vand_vx_v11i64: 1162; RV64: # %bb.0: 1163; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1164; RV64-NEXT: vand.vx v8, v8, a0, v0.t 1165; RV64-NEXT: ret 1166 %elt.head = insertelement <11 x i64> poison, i64 %b, i32 0 1167 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> poison, <11 x i32> zeroinitializer 1168 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl) 1169 ret <11 x i64> %v 1170} 1171 1172define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %evl) { 1173; RV32-LABEL: vand_vx_v11i64_unmasked: 1174; RV32: # %bb.0: 1175; RV32-NEXT: addi sp, sp, -16 1176; RV32-NEXT: .cfi_def_cfa_offset 16 1177; RV32-NEXT: sw a0, 8(sp) 1178; RV32-NEXT: sw a1, 12(sp) 1179; RV32-NEXT: addi a0, sp, 8 1180; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1181; RV32-NEXT: vlse64.v v16, (a0), zero 1182; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1183; RV32-NEXT: vand.vv v8, v8, v16 1184; RV32-NEXT: addi sp, sp, 16 1185; RV32-NEXT: .cfi_def_cfa_offset 0 1186; RV32-NEXT: ret 1187; 1188; RV64-LABEL: vand_vx_v11i64_unmasked: 1189; RV64: # %bb.0: 1190; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1191; RV64-NEXT: vand.vx v8, v8, a0 1192; RV64-NEXT: ret 1193 %elt.head = insertelement <11 x i64> poison, i64 %b, i32 0 1194 %vb = shufflevector <11 x i64> %elt.head, <11 x i64> poison, <11 x i32> zeroinitializer 1195 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> splat (i1 true), i32 %evl) 1196 ret <11 x i64> %v 1197} 1198 1199define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl) { 1200; CHECK-LABEL: vand_vi_v11i64: 1201; CHECK: # %bb.0: 1202; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1203; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 1204; CHECK-NEXT: ret 1205 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> splat (i64 4), <11 x i1> %m, i32 %evl) 1206 ret <11 x i64> %v 1207} 1208 1209define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) { 1210; CHECK-LABEL: vand_vi_v11i64_unmasked: 1211; CHECK: # %bb.0: 1212; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1213; CHECK-NEXT: vand.vi v8, v8, 4 1214; CHECK-NEXT: ret 1215 %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> splat (i64 4), <11 x i1> splat (i1 true), i32 %evl) 1216 ret <11 x i64> %v 1217} 1218 1219declare <16 x i64> @llvm.vp.and.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) 1220 1221define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { 1222; CHECK-LABEL: vand_vv_v16i64: 1223; CHECK: # %bb.0: 1224; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1225; CHECK-NEXT: vand.vv v8, v8, v16, v0.t 1226; CHECK-NEXT: ret 1227 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) 1228 ret <16 x i64> %v 1229} 1230 1231define <16 x i64> @vand_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { 1232; CHECK-LABEL: vand_vv_v16i64_unmasked: 1233; CHECK: # %bb.0: 1234; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1235; CHECK-NEXT: vand.vv v8, v8, v16 1236; CHECK-NEXT: ret 1237 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl) 1238 ret <16 x i64> %v 1239} 1240 1241define <16 x i64> @vand_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { 1242; RV32-LABEL: vand_vx_v16i64: 1243; RV32: # %bb.0: 1244; RV32-NEXT: addi sp, sp, -16 1245; RV32-NEXT: .cfi_def_cfa_offset 16 1246; RV32-NEXT: sw a0, 8(sp) 1247; RV32-NEXT: sw a1, 12(sp) 1248; RV32-NEXT: addi a0, sp, 8 1249; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1250; RV32-NEXT: vlse64.v v16, (a0), zero 1251; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1252; RV32-NEXT: vand.vv v8, v8, v16, v0.t 1253; RV32-NEXT: addi sp, sp, 16 1254; RV32-NEXT: .cfi_def_cfa_offset 0 1255; RV32-NEXT: ret 1256; 1257; RV64-LABEL: vand_vx_v16i64: 1258; RV64: # %bb.0: 1259; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1260; RV64-NEXT: vand.vx v8, v8, a0, v0.t 1261; RV64-NEXT: ret 1262 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 1263 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer 1264 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl) 1265 ret <16 x i64> %v 1266} 1267 1268define <16 x i64> @vand_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) { 1269; RV32-LABEL: vand_vx_v16i64_unmasked: 1270; RV32: # %bb.0: 1271; RV32-NEXT: addi sp, sp, -16 1272; RV32-NEXT: .cfi_def_cfa_offset 16 1273; RV32-NEXT: sw a0, 8(sp) 1274; RV32-NEXT: sw a1, 12(sp) 1275; RV32-NEXT: addi a0, sp, 8 1276; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1277; RV32-NEXT: vlse64.v v16, (a0), zero 1278; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1279; RV32-NEXT: vand.vv v8, v8, v16 1280; RV32-NEXT: addi sp, sp, 16 1281; RV32-NEXT: .cfi_def_cfa_offset 0 1282; RV32-NEXT: ret 1283; 1284; RV64-LABEL: vand_vx_v16i64_unmasked: 1285; RV64: # %bb.0: 1286; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1287; RV64-NEXT: vand.vx v8, v8, a0 1288; RV64-NEXT: ret 1289 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 1290 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer 1291 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl) 1292 ret <16 x i64> %v 1293} 1294 1295define <16 x i64> @vand_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { 1296; CHECK-LABEL: vand_vi_v16i64: 1297; CHECK: # %bb.0: 1298; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1299; CHECK-NEXT: vand.vi v8, v8, 4, v0.t 1300; CHECK-NEXT: ret 1301 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> %m, i32 %evl) 1302 ret <16 x i64> %v 1303} 1304 1305define <16 x i64> @vand_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { 1306; CHECK-LABEL: vand_vi_v16i64_unmasked: 1307; CHECK: # %bb.0: 1308; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1309; CHECK-NEXT: vand.vi v8, v8, 4 1310; CHECK-NEXT: ret 1311 %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> splat (i1 true), i32 %evl) 1312 ret <16 x i64> %v 1313} 1314