1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ 3; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ 5; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <8 x i7> @llvm.vp.xor.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32) 8 9define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { 10; CHECK-LABEL: vxor_vv_v8i7: 11; CHECK: # %bb.0: 12; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 13; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 14; CHECK-NEXT: ret 15 %v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl) 16 ret <8 x i7> %v 17} 18 19declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) 20 21define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) { 22; CHECK-LABEL: vxor_vv_v2i8: 23; CHECK: # %bb.0: 24; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 25; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 26; CHECK-NEXT: ret 27 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl) 28 ret <2 x i8> %v 29} 30 31define <2 x i8> @vxor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { 32; CHECK-LABEL: vxor_vv_v2i8_unmasked: 33; CHECK: # %bb.0: 34; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 35; CHECK-NEXT: vxor.vv v8, v8, v9 36; CHECK-NEXT: ret 37 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl) 38 ret <2 x i8> %v 39} 40 41define <2 x i8> @vxor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { 42; CHECK-LABEL: vxor_vx_v2i8: 43; CHECK: # %bb.0: 44; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 45; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 46; CHECK-NEXT: ret 47 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 48 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 49 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl) 50 ret <2 x i8> %v 51} 52 53define <2 x i8> @vxor_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) { 54; CHECK-LABEL: vxor_vx_v2i8_commute: 55; CHECK: # %bb.0: 56; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 57; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 58; CHECK-NEXT: ret 59 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 60 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 61 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl) 62 ret <2 x i8> %v 63} 64 65define <2 x i8> @vxor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { 66; CHECK-LABEL: vxor_vx_v2i8_unmasked: 67; CHECK: # %bb.0: 68; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 69; CHECK-NEXT: vxor.vx v8, v8, a0 70; CHECK-NEXT: ret 71 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 72 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer 73 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl) 74 ret <2 x i8> %v 75} 76 77define <2 x i8> @vxor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { 78; CHECK-LABEL: vxor_vi_v2i8: 79; CHECK: # %bb.0: 80; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 81; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 82; CHECK-NEXT: ret 83 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> %m, i32 %evl) 84 ret <2 x i8> %v 85} 86 87define <2 x i8> @vxor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { 88; CHECK-LABEL: vxor_vi_v2i8_unmasked: 89; CHECK: # %bb.0: 90; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 91; CHECK-NEXT: vxor.vi v8, v8, 7 92; CHECK-NEXT: ret 93 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> splat (i1 true), i32 %evl) 94 ret <2 x i8> %v 95} 96 97define <2 x i8> @vxor_vi_v2i8_1(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { 98; CHECK-LABEL: vxor_vi_v2i8_1: 99; CHECK: # %bb.0: 100; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 101; CHECK-NEXT: vnot.v v8, v8, v0.t 102; CHECK-NEXT: ret 103 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl) 104 ret <2 x i8> %v 105} 106 107define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) { 108; CHECK-LABEL: vxor_vi_v2i8_unmasked_1: 109; CHECK: # %bb.0: 110; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 111; CHECK-NEXT: vnot.v v8, v8 112; CHECK-NEXT: ret 113 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl) 114 ret <2 x i8> %v 115} 116 117declare <4 x i8> @llvm.vp.xor.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) 118 119define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { 120; CHECK-LABEL: vxor_vv_v4i8: 121; CHECK: # %bb.0: 122; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 123; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 124; CHECK-NEXT: ret 125 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl) 126 ret <4 x i8> %v 127} 128 129define <4 x i8> @vxor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { 130; CHECK-LABEL: vxor_vv_v4i8_unmasked: 131; CHECK: # %bb.0: 132; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 133; CHECK-NEXT: vxor.vv v8, v8, v9 134; CHECK-NEXT: ret 135 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl) 136 ret <4 x i8> %v 137} 138 139define <4 x i8> @vxor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) { 140; CHECK-LABEL: vxor_vx_v4i8: 141; CHECK: # %bb.0: 142; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 143; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 144; CHECK-NEXT: ret 145 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 146 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer 147 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl) 148 ret <4 x i8> %v 149} 150 151define <4 x i8> @vxor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { 152; CHECK-LABEL: vxor_vx_v4i8_unmasked: 153; CHECK: # %bb.0: 154; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 155; CHECK-NEXT: vxor.vx v8, v8, a0 156; CHECK-NEXT: ret 157 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 158 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer 159 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl) 160 ret <4 x i8> %v 161} 162 163define <4 x i8> @vxor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { 164; CHECK-LABEL: vxor_vi_v4i8: 165; CHECK: # %bb.0: 166; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 167; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 168; CHECK-NEXT: ret 169 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> %m, i32 %evl) 170 ret <4 x i8> %v 171} 172 173define <4 x i8> @vxor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { 174; CHECK-LABEL: vxor_vi_v4i8_unmasked: 175; CHECK: # %bb.0: 176; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 177; CHECK-NEXT: vxor.vi v8, v8, 7 178; CHECK-NEXT: ret 179 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> splat (i1 true), i32 %evl) 180 ret <4 x i8> %v 181} 182 183define <4 x i8> @vxor_vi_v4i8_1(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { 184; CHECK-LABEL: vxor_vi_v4i8_1: 185; CHECK: # %bb.0: 186; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 187; CHECK-NEXT: vnot.v v8, v8, v0.t 188; CHECK-NEXT: ret 189 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl) 190 ret <4 x i8> %v 191} 192 193define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) { 194; CHECK-LABEL: vxor_vi_v4i8_unmasked_1: 195; CHECK: # %bb.0: 196; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 197; CHECK-NEXT: vnot.v v8, v8 198; CHECK-NEXT: ret 199 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl) 200 ret <4 x i8> %v 201} 202 203declare <8 x i8> @llvm.vp.xor.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) 204 205define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { 206; CHECK-LABEL: vxor_vv_v8i8: 207; CHECK: # %bb.0: 208; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 209; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 210; CHECK-NEXT: ret 211 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl) 212 ret <8 x i8> %v 213} 214 215define <8 x i8> @vxor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { 216; CHECK-LABEL: vxor_vv_v8i8_unmasked: 217; CHECK: # %bb.0: 218; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 219; CHECK-NEXT: vxor.vv v8, v8, v9 220; CHECK-NEXT: ret 221 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl) 222 ret <8 x i8> %v 223} 224 225define <8 x i8> @vxor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { 226; CHECK-LABEL: vxor_vx_v8i8: 227; CHECK: # %bb.0: 228; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 229; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 230; CHECK-NEXT: ret 231 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 232 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer 233 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl) 234 ret <8 x i8> %v 235} 236 237define <8 x i8> @vxor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { 238; CHECK-LABEL: vxor_vx_v8i8_unmasked: 239; CHECK: # %bb.0: 240; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 241; CHECK-NEXT: vxor.vx v8, v8, a0 242; CHECK-NEXT: ret 243 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 244 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer 245 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl) 246 ret <8 x i8> %v 247} 248 249define <8 x i8> @vxor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { 250; CHECK-LABEL: vxor_vi_v8i8: 251; CHECK: # %bb.0: 252; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 253; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 254; CHECK-NEXT: ret 255 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> %m, i32 %evl) 256 ret <8 x i8> %v 257} 258 259define <8 x i8> @vxor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { 260; CHECK-LABEL: vxor_vi_v8i8_unmasked: 261; CHECK: # %bb.0: 262; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 263; CHECK-NEXT: vxor.vi v8, v8, 7 264; CHECK-NEXT: ret 265 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> splat (i1 true), i32 %evl) 266 ret <8 x i8> %v 267} 268 269define <8 x i8> @vxor_vi_v8i8_1(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { 270; CHECK-LABEL: vxor_vi_v8i8_1: 271; CHECK: # %bb.0: 272; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 273; CHECK-NEXT: vnot.v v8, v8, v0.t 274; CHECK-NEXT: ret 275 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl) 276 ret <8 x i8> %v 277} 278 279define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) { 280; CHECK-LABEL: vxor_vi_v8i8_unmasked_1: 281; CHECK: # %bb.0: 282; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 283; CHECK-NEXT: vnot.v v8, v8 284; CHECK-NEXT: ret 285 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl) 286 ret <8 x i8> %v 287} 288 289declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32) 290 291define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) { 292; CHECK-LABEL: vxor_vv_v9i8: 293; CHECK: # %bb.0: 294; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 295; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 296; CHECK-NEXT: ret 297 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl) 298 ret <9 x i8> %v 299} 300 301define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %evl) { 302; CHECK-LABEL: vxor_vv_v9i8_unmasked: 303; CHECK: # %bb.0: 304; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 305; CHECK-NEXT: vxor.vv v8, v8, v9 306; CHECK-NEXT: ret 307 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> splat (i1 true), i32 %evl) 308 ret <9 x i8> %v 309} 310 311define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) { 312; CHECK-LABEL: vxor_vx_v9i8: 313; CHECK: # %bb.0: 314; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 315; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 316; CHECK-NEXT: ret 317 %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0 318 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer 319 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) 320 ret <9 x i8> %v 321} 322 323define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) { 324; CHECK-LABEL: vxor_vx_v9i8_unmasked: 325; CHECK: # %bb.0: 326; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 327; CHECK-NEXT: vxor.vx v8, v8, a0 328; CHECK-NEXT: ret 329 %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0 330 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer 331 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> splat (i1 true), i32 %evl) 332 ret <9 x i8> %v 333} 334 335define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { 336; CHECK-LABEL: vxor_vi_v9i8: 337; CHECK: # %bb.0: 338; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 339; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 340; CHECK-NEXT: ret 341 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> %m, i32 %evl) 342 ret <9 x i8> %v 343} 344 345define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) { 346; CHECK-LABEL: vxor_vi_v9i8_unmasked: 347; CHECK: # %bb.0: 348; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 349; CHECK-NEXT: vxor.vi v8, v8, 7 350; CHECK-NEXT: ret 351 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> splat (i1 true), i32 %evl) 352 ret <9 x i8> %v 353} 354 355define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { 356; CHECK-LABEL: vxor_vi_v9i8_1: 357; CHECK: # %bb.0: 358; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 359; CHECK-NEXT: vnot.v v8, v8, v0.t 360; CHECK-NEXT: ret 361 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> %m, i32 %evl) 362 ret <9 x i8> %v 363} 364 365define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) { 366; CHECK-LABEL: vxor_vi_v9i8_unmasked_1: 367; CHECK: # %bb.0: 368; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 369; CHECK-NEXT: vnot.v v8, v8 370; CHECK-NEXT: ret 371 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> splat (i1 true), i32 %evl) 372 ret <9 x i8> %v 373} 374 375declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) 376 377define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { 378; CHECK-LABEL: vxor_vv_v16i8: 379; CHECK: # %bb.0: 380; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 381; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 382; CHECK-NEXT: ret 383 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl) 384 ret <16 x i8> %v 385} 386 387define <16 x i8> @vxor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { 388; CHECK-LABEL: vxor_vv_v16i8_unmasked: 389; CHECK: # %bb.0: 390; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 391; CHECK-NEXT: vxor.vv v8, v8, v9 392; CHECK-NEXT: ret 393 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl) 394 ret <16 x i8> %v 395} 396 397define <16 x i8> @vxor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) { 398; CHECK-LABEL: vxor_vx_v16i8: 399; CHECK: # %bb.0: 400; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 401; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 402; CHECK-NEXT: ret 403 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 404 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer 405 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl) 406 ret <16 x i8> %v 407} 408 409define <16 x i8> @vxor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { 410; CHECK-LABEL: vxor_vx_v16i8_unmasked: 411; CHECK: # %bb.0: 412; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 413; CHECK-NEXT: vxor.vx v8, v8, a0 414; CHECK-NEXT: ret 415 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 416 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer 417 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl) 418 ret <16 x i8> %v 419} 420 421define <16 x i8> @vxor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { 422; CHECK-LABEL: vxor_vi_v16i8: 423; CHECK: # %bb.0: 424; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 425; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 426; CHECK-NEXT: ret 427 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> %m, i32 %evl) 428 ret <16 x i8> %v 429} 430 431define <16 x i8> @vxor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { 432; CHECK-LABEL: vxor_vi_v16i8_unmasked: 433; CHECK: # %bb.0: 434; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 435; CHECK-NEXT: vxor.vi v8, v8, 7 436; CHECK-NEXT: ret 437 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> splat (i1 true), i32 %evl) 438 ret <16 x i8> %v 439} 440 441define <16 x i8> @vxor_vi_v16i8_1(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { 442; CHECK-LABEL: vxor_vi_v16i8_1: 443; CHECK: # %bb.0: 444; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 445; CHECK-NEXT: vnot.v v8, v8, v0.t 446; CHECK-NEXT: ret 447 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl) 448 ret <16 x i8> %v 449} 450 451define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) { 452; CHECK-LABEL: vxor_vi_v16i8_unmasked_1: 453; CHECK: # %bb.0: 454; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 455; CHECK-NEXT: vnot.v v8, v8 456; CHECK-NEXT: ret 457 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl) 458 ret <16 x i8> %v 459} 460 461declare <2 x i16> @llvm.vp.xor.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32) 462 463define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) { 464; CHECK-LABEL: vxor_vv_v2i16: 465; CHECK: # %bb.0: 466; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 467; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 468; CHECK-NEXT: ret 469 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl) 470 ret <2 x i16> %v 471} 472 473define <2 x i16> @vxor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { 474; CHECK-LABEL: vxor_vv_v2i16_unmasked: 475; CHECK: # %bb.0: 476; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 477; CHECK-NEXT: vxor.vv v8, v8, v9 478; CHECK-NEXT: ret 479 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl) 480 ret <2 x i16> %v 481} 482 483define <2 x i16> @vxor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) { 484; CHECK-LABEL: vxor_vx_v2i16: 485; CHECK: # %bb.0: 486; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 487; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 488; CHECK-NEXT: ret 489 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 490 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer 491 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl) 492 ret <2 x i16> %v 493} 494 495define <2 x i16> @vxor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { 496; CHECK-LABEL: vxor_vx_v2i16_unmasked: 497; CHECK: # %bb.0: 498; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 499; CHECK-NEXT: vxor.vx v8, v8, a0 500; CHECK-NEXT: ret 501 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 502 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer 503 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl) 504 ret <2 x i16> %v 505} 506 507define <2 x i16> @vxor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { 508; CHECK-LABEL: vxor_vi_v2i16: 509; CHECK: # %bb.0: 510; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 511; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 512; CHECK-NEXT: ret 513 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> %m, i32 %evl) 514 ret <2 x i16> %v 515} 516 517define <2 x i16> @vxor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { 518; CHECK-LABEL: vxor_vi_v2i16_unmasked: 519; CHECK: # %bb.0: 520; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 521; CHECK-NEXT: vxor.vi v8, v8, 7 522; CHECK-NEXT: ret 523 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> splat (i1 true), i32 %evl) 524 ret <2 x i16> %v 525} 526 527define <2 x i16> @vxor_vi_v2i16_1(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { 528; CHECK-LABEL: vxor_vi_v2i16_1: 529; CHECK: # %bb.0: 530; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 531; CHECK-NEXT: vnot.v v8, v8, v0.t 532; CHECK-NEXT: ret 533 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl) 534 ret <2 x i16> %v 535} 536 537define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) { 538; CHECK-LABEL: vxor_vi_v2i16_unmasked_1: 539; CHECK: # %bb.0: 540; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 541; CHECK-NEXT: vnot.v v8, v8 542; CHECK-NEXT: ret 543 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl) 544 ret <2 x i16> %v 545} 546 547declare <4 x i16> @llvm.vp.xor.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32) 548 549define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) { 550; CHECK-LABEL: vxor_vv_v4i16: 551; CHECK: # %bb.0: 552; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 553; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 554; CHECK-NEXT: ret 555 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl) 556 ret <4 x i16> %v 557} 558 559define <4 x i16> @vxor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { 560; CHECK-LABEL: vxor_vv_v4i16_unmasked: 561; CHECK: # %bb.0: 562; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 563; CHECK-NEXT: vxor.vv v8, v8, v9 564; CHECK-NEXT: ret 565 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl) 566 ret <4 x i16> %v 567} 568 569define <4 x i16> @vxor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) { 570; CHECK-LABEL: vxor_vx_v4i16: 571; CHECK: # %bb.0: 572; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 573; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 574; CHECK-NEXT: ret 575 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 576 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer 577 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl) 578 ret <4 x i16> %v 579} 580 581define <4 x i16> @vxor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { 582; CHECK-LABEL: vxor_vx_v4i16_unmasked: 583; CHECK: # %bb.0: 584; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 585; CHECK-NEXT: vxor.vx v8, v8, a0 586; CHECK-NEXT: ret 587 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 588 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer 589 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl) 590 ret <4 x i16> %v 591} 592 593define <4 x i16> @vxor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { 594; CHECK-LABEL: vxor_vi_v4i16: 595; CHECK: # %bb.0: 596; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 597; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 598; CHECK-NEXT: ret 599 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> %m, i32 %evl) 600 ret <4 x i16> %v 601} 602 603define <4 x i16> @vxor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { 604; CHECK-LABEL: vxor_vi_v4i16_unmasked: 605; CHECK: # %bb.0: 606; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 607; CHECK-NEXT: vxor.vi v8, v8, 7 608; CHECK-NEXT: ret 609 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> splat (i1 true), i32 %evl) 610 ret <4 x i16> %v 611} 612 613define <4 x i16> @vxor_vi_v4i16_1(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { 614; CHECK-LABEL: vxor_vi_v4i16_1: 615; CHECK: # %bb.0: 616; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 617; CHECK-NEXT: vnot.v v8, v8, v0.t 618; CHECK-NEXT: ret 619 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl) 620 ret <4 x i16> %v 621} 622 623define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) { 624; CHECK-LABEL: vxor_vi_v4i16_unmasked_1: 625; CHECK: # %bb.0: 626; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 627; CHECK-NEXT: vnot.v v8, v8 628; CHECK-NEXT: ret 629 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl) 630 ret <4 x i16> %v 631} 632 633declare <8 x i16> @llvm.vp.xor.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32) 634 635define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) { 636; CHECK-LABEL: vxor_vv_v8i16: 637; CHECK: # %bb.0: 638; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 639; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 640; CHECK-NEXT: ret 641 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl) 642 ret <8 x i16> %v 643} 644 645define <8 x i16> @vxor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { 646; CHECK-LABEL: vxor_vv_v8i16_unmasked: 647; CHECK: # %bb.0: 648; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 649; CHECK-NEXT: vxor.vv v8, v8, v9 650; CHECK-NEXT: ret 651 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl) 652 ret <8 x i16> %v 653} 654 655define <8 x i16> @vxor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) { 656; CHECK-LABEL: vxor_vx_v8i16: 657; CHECK: # %bb.0: 658; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 659; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 660; CHECK-NEXT: ret 661 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 662 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer 663 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl) 664 ret <8 x i16> %v 665} 666 667define <8 x i16> @vxor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { 668; CHECK-LABEL: vxor_vx_v8i16_unmasked: 669; CHECK: # %bb.0: 670; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 671; CHECK-NEXT: vxor.vx v8, v8, a0 672; CHECK-NEXT: ret 673 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 674 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer 675 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl) 676 ret <8 x i16> %v 677} 678 679define <8 x i16> @vxor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { 680; CHECK-LABEL: vxor_vi_v8i16: 681; CHECK: # %bb.0: 682; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 683; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 684; CHECK-NEXT: ret 685 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> %m, i32 %evl) 686 ret <8 x i16> %v 687} 688 689define <8 x i16> @vxor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { 690; CHECK-LABEL: vxor_vi_v8i16_unmasked: 691; CHECK: # %bb.0: 692; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 693; CHECK-NEXT: vxor.vi v8, v8, 7 694; CHECK-NEXT: ret 695 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> splat (i1 true), i32 %evl) 696 ret <8 x i16> %v 697} 698 699define <8 x i16> @vxor_vi_v8i16_1(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { 700; CHECK-LABEL: vxor_vi_v8i16_1: 701; CHECK: # %bb.0: 702; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 703; CHECK-NEXT: vnot.v v8, v8, v0.t 704; CHECK-NEXT: ret 705 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl) 706 ret <8 x i16> %v 707} 708 709define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) { 710; CHECK-LABEL: vxor_vi_v8i16_unmasked_1: 711; CHECK: # %bb.0: 712; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 713; CHECK-NEXT: vnot.v v8, v8 714; CHECK-NEXT: ret 715 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl) 716 ret <8 x i16> %v 717} 718 719declare <16 x i16> @llvm.vp.xor.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) 720 721define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { 722; CHECK-LABEL: vxor_vv_v16i16: 723; CHECK: # %bb.0: 724; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 725; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 726; CHECK-NEXT: ret 727 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl) 728 ret <16 x i16> %v 729} 730 731define <16 x i16> @vxor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { 732; CHECK-LABEL: vxor_vv_v16i16_unmasked: 733; CHECK: # %bb.0: 734; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 735; CHECK-NEXT: vxor.vv v8, v8, v10 736; CHECK-NEXT: ret 737 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl) 738 ret <16 x i16> %v 739} 740 741define <16 x i16> @vxor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) { 742; CHECK-LABEL: vxor_vx_v16i16: 743; CHECK: # %bb.0: 744; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 745; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 746; CHECK-NEXT: ret 747 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 748 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer 749 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl) 750 ret <16 x i16> %v 751} 752 753define <16 x i16> @vxor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { 754; CHECK-LABEL: vxor_vx_v16i16_unmasked: 755; CHECK: # %bb.0: 756; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 757; CHECK-NEXT: vxor.vx v8, v8, a0 758; CHECK-NEXT: ret 759 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 760 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer 761 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl) 762 ret <16 x i16> %v 763} 764 765define <16 x i16> @vxor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { 766; CHECK-LABEL: vxor_vi_v16i16: 767; CHECK: # %bb.0: 768; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 769; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 770; CHECK-NEXT: ret 771 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> %m, i32 %evl) 772 ret <16 x i16> %v 773} 774 775define <16 x i16> @vxor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { 776; CHECK-LABEL: vxor_vi_v16i16_unmasked: 777; CHECK: # %bb.0: 778; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 779; CHECK-NEXT: vxor.vi v8, v8, 7 780; CHECK-NEXT: ret 781 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> splat (i1 true), i32 %evl) 782 ret <16 x i16> %v 783} 784 785define <16 x i16> @vxor_vi_v16i16_1(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { 786; CHECK-LABEL: vxor_vi_v16i16_1: 787; CHECK: # %bb.0: 788; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 789; CHECK-NEXT: vnot.v v8, v8, v0.t 790; CHECK-NEXT: ret 791 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl) 792 ret <16 x i16> %v 793} 794 795define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) { 796; CHECK-LABEL: vxor_vi_v16i16_unmasked_1: 797; CHECK: # %bb.0: 798; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 799; CHECK-NEXT: vnot.v v8, v8 800; CHECK-NEXT: ret 801 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl) 802 ret <16 x i16> %v 803} 804 805declare <2 x i32> @llvm.vp.xor.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32) 806 807define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) { 808; CHECK-LABEL: vxor_vv_v2i32: 809; CHECK: # %bb.0: 810; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 811; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 812; CHECK-NEXT: ret 813 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl) 814 ret <2 x i32> %v 815} 816 817define <2 x i32> @vxor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { 818; CHECK-LABEL: vxor_vv_v2i32_unmasked: 819; CHECK: # %bb.0: 820; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 821; CHECK-NEXT: vxor.vv v8, v8, v9 822; CHECK-NEXT: ret 823 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl) 824 ret <2 x i32> %v 825} 826 827define <2 x i32> @vxor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) { 828; CHECK-LABEL: vxor_vx_v2i32: 829; CHECK: # %bb.0: 830; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 831; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 832; CHECK-NEXT: ret 833 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 834 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer 835 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl) 836 ret <2 x i32> %v 837} 838 839define <2 x i32> @vxor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { 840; CHECK-LABEL: vxor_vx_v2i32_unmasked: 841; CHECK: # %bb.0: 842; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 843; CHECK-NEXT: vxor.vx v8, v8, a0 844; CHECK-NEXT: ret 845 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 846 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer 847 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl) 848 ret <2 x i32> %v 849} 850 851define <2 x i32> @vxor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { 852; CHECK-LABEL: vxor_vi_v2i32: 853; CHECK: # %bb.0: 854; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 855; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 856; CHECK-NEXT: ret 857 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> %m, i32 %evl) 858 ret <2 x i32> %v 859} 860 861define <2 x i32> @vxor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { 862; CHECK-LABEL: vxor_vi_v2i32_unmasked: 863; CHECK: # %bb.0: 864; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 865; CHECK-NEXT: vxor.vi v8, v8, 7 866; CHECK-NEXT: ret 867 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> splat (i1 true), i32 %evl) 868 ret <2 x i32> %v 869} 870 871define <2 x i32> @vxor_vi_v2i32_1(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { 872; CHECK-LABEL: vxor_vi_v2i32_1: 873; CHECK: # %bb.0: 874; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 875; CHECK-NEXT: vnot.v v8, v8, v0.t 876; CHECK-NEXT: ret 877 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl) 878 ret <2 x i32> %v 879} 880 881define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) { 882; CHECK-LABEL: vxor_vi_v2i32_unmasked_1: 883; CHECK: # %bb.0: 884; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 885; CHECK-NEXT: vnot.v v8, v8 886; CHECK-NEXT: ret 887 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl) 888 ret <2 x i32> %v 889} 890 891declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) 892 893define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) { 894; CHECK-LABEL: vxor_vv_v4i32: 895; CHECK: # %bb.0: 896; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 897; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 898; CHECK-NEXT: ret 899 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl) 900 ret <4 x i32> %v 901} 902 903define <4 x i32> @vxor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { 904; CHECK-LABEL: vxor_vv_v4i32_unmasked: 905; CHECK: # %bb.0: 906; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 907; CHECK-NEXT: vxor.vv v8, v8, v9 908; CHECK-NEXT: ret 909 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl) 910 ret <4 x i32> %v 911} 912 913define <4 x i32> @vxor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) { 914; CHECK-LABEL: vxor_vx_v4i32: 915; CHECK: # %bb.0: 916; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 917; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 918; CHECK-NEXT: ret 919 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 920 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer 921 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl) 922 ret <4 x i32> %v 923} 924 925define <4 x i32> @vxor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { 926; CHECK-LABEL: vxor_vx_v4i32_unmasked: 927; CHECK: # %bb.0: 928; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 929; CHECK-NEXT: vxor.vx v8, v8, a0 930; CHECK-NEXT: ret 931 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 932 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer 933 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl) 934 ret <4 x i32> %v 935} 936 937define <4 x i32> @vxor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { 938; CHECK-LABEL: vxor_vi_v4i32: 939; CHECK: # %bb.0: 940; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 941; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 942; CHECK-NEXT: ret 943 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> %m, i32 %evl) 944 ret <4 x i32> %v 945} 946 947define <4 x i32> @vxor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { 948; CHECK-LABEL: vxor_vi_v4i32_unmasked: 949; CHECK: # %bb.0: 950; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 951; CHECK-NEXT: vxor.vi v8, v8, 7 952; CHECK-NEXT: ret 953 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> splat (i1 true), i32 %evl) 954 ret <4 x i32> %v 955} 956 957define <4 x i32> @vxor_vi_v4i32_1(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { 958; CHECK-LABEL: vxor_vi_v4i32_1: 959; CHECK: # %bb.0: 960; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 961; CHECK-NEXT: vnot.v v8, v8, v0.t 962; CHECK-NEXT: ret 963 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl) 964 ret <4 x i32> %v 965} 966 967define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) { 968; CHECK-LABEL: vxor_vi_v4i32_unmasked_1: 969; CHECK: # %bb.0: 970; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 971; CHECK-NEXT: vnot.v v8, v8 972; CHECK-NEXT: ret 973 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl) 974 ret <4 x i32> %v 975} 976 977declare <8 x i32> @llvm.vp.xor.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) 978 979define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) { 980; CHECK-LABEL: vxor_vv_v8i32: 981; CHECK: # %bb.0: 982; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 983; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 984; CHECK-NEXT: ret 985 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl) 986 ret <8 x i32> %v 987} 988 989define <8 x i32> @vxor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { 990; CHECK-LABEL: vxor_vv_v8i32_unmasked: 991; CHECK: # %bb.0: 992; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 993; CHECK-NEXT: vxor.vv v8, v8, v10 994; CHECK-NEXT: ret 995 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl) 996 ret <8 x i32> %v 997} 998 999define <8 x i32> @vxor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { 1000; CHECK-LABEL: vxor_vx_v8i32: 1001; CHECK: # %bb.0: 1002; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1003; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1004; CHECK-NEXT: ret 1005 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 1006 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer 1007 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl) 1008 ret <8 x i32> %v 1009} 1010 1011define <8 x i32> @vxor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { 1012; CHECK-LABEL: vxor_vx_v8i32_unmasked: 1013; CHECK: # %bb.0: 1014; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1015; CHECK-NEXT: vxor.vx v8, v8, a0 1016; CHECK-NEXT: ret 1017 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 1018 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer 1019 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl) 1020 ret <8 x i32> %v 1021} 1022 1023define <8 x i32> @vxor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { 1024; CHECK-LABEL: vxor_vi_v8i32: 1025; CHECK: # %bb.0: 1026; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1027; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1028; CHECK-NEXT: ret 1029 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> %m, i32 %evl) 1030 ret <8 x i32> %v 1031} 1032 1033define <8 x i32> @vxor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { 1034; CHECK-LABEL: vxor_vi_v8i32_unmasked: 1035; CHECK: # %bb.0: 1036; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1037; CHECK-NEXT: vxor.vi v8, v8, 7 1038; CHECK-NEXT: ret 1039 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> splat (i1 true), i32 %evl) 1040 ret <8 x i32> %v 1041} 1042 1043define <8 x i32> @vxor_vi_v8i32_1(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { 1044; CHECK-LABEL: vxor_vi_v8i32_1: 1045; CHECK: # %bb.0: 1046; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1047; CHECK-NEXT: vnot.v v8, v8, v0.t 1048; CHECK-NEXT: ret 1049 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl) 1050 ret <8 x i32> %v 1051} 1052 1053define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) { 1054; CHECK-LABEL: vxor_vi_v8i32_unmasked_1: 1055; CHECK: # %bb.0: 1056; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1057; CHECK-NEXT: vnot.v v8, v8 1058; CHECK-NEXT: ret 1059 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl) 1060 ret <8 x i32> %v 1061} 1062 1063declare <16 x i32> @llvm.vp.xor.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32) 1064 1065define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) { 1066; CHECK-LABEL: vxor_vv_v16i32: 1067; CHECK: # %bb.0: 1068; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1069; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 1070; CHECK-NEXT: ret 1071 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl) 1072 ret <16 x i32> %v 1073} 1074 1075define <16 x i32> @vxor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { 1076; CHECK-LABEL: vxor_vv_v16i32_unmasked: 1077; CHECK: # %bb.0: 1078; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1079; CHECK-NEXT: vxor.vv v8, v8, v12 1080; CHECK-NEXT: ret 1081 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl) 1082 ret <16 x i32> %v 1083} 1084 1085define <16 x i32> @vxor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) { 1086; CHECK-LABEL: vxor_vx_v16i32: 1087; CHECK: # %bb.0: 1088; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1089; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1090; CHECK-NEXT: ret 1091 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 1092 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer 1093 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl) 1094 ret <16 x i32> %v 1095} 1096 1097define <16 x i32> @vxor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { 1098; CHECK-LABEL: vxor_vx_v16i32_unmasked: 1099; CHECK: # %bb.0: 1100; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1101; CHECK-NEXT: vxor.vx v8, v8, a0 1102; CHECK-NEXT: ret 1103 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 1104 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer 1105 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl) 1106 ret <16 x i32> %v 1107} 1108 1109define <16 x i32> @vxor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { 1110; CHECK-LABEL: vxor_vi_v16i32: 1111; CHECK: # %bb.0: 1112; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1113; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1114; CHECK-NEXT: ret 1115 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> %m, i32 %evl) 1116 ret <16 x i32> %v 1117} 1118 1119define <16 x i32> @vxor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { 1120; CHECK-LABEL: vxor_vi_v16i32_unmasked: 1121; CHECK: # %bb.0: 1122; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1123; CHECK-NEXT: vxor.vi v8, v8, 7 1124; CHECK-NEXT: ret 1125 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> splat (i1 true), i32 %evl) 1126 ret <16 x i32> %v 1127} 1128 1129define <16 x i32> @vxor_vi_v16i32_1(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { 1130; CHECK-LABEL: vxor_vi_v16i32_1: 1131; CHECK: # %bb.0: 1132; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1133; CHECK-NEXT: vnot.v v8, v8, v0.t 1134; CHECK-NEXT: ret 1135 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl) 1136 ret <16 x i32> %v 1137} 1138 1139define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) { 1140; CHECK-LABEL: vxor_vi_v16i32_unmasked_1: 1141; CHECK: # %bb.0: 1142; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1143; CHECK-NEXT: vnot.v v8, v8 1144; CHECK-NEXT: ret 1145 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl) 1146 ret <16 x i32> %v 1147} 1148 1149declare <2 x i64> @llvm.vp.xor.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) 1150 1151define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) { 1152; CHECK-LABEL: vxor_vv_v2i64: 1153; CHECK: # %bb.0: 1154; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1155; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 1156; CHECK-NEXT: ret 1157 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl) 1158 ret <2 x i64> %v 1159} 1160 1161define <2 x i64> @vxor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { 1162; CHECK-LABEL: vxor_vv_v2i64_unmasked: 1163; CHECK: # %bb.0: 1164; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1165; CHECK-NEXT: vxor.vv v8, v8, v9 1166; CHECK-NEXT: ret 1167 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl) 1168 ret <2 x i64> %v 1169} 1170 1171define <2 x i64> @vxor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) { 1172; RV32-LABEL: vxor_vx_v2i64: 1173; RV32: # %bb.0: 1174; RV32-NEXT: addi sp, sp, -16 1175; RV32-NEXT: .cfi_def_cfa_offset 16 1176; RV32-NEXT: sw a0, 8(sp) 1177; RV32-NEXT: sw a1, 12(sp) 1178; RV32-NEXT: addi a0, sp, 8 1179; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma 1180; RV32-NEXT: vlse64.v v9, (a0), zero 1181; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1182; RV32-NEXT: vxor.vv v8, v8, v9, v0.t 1183; RV32-NEXT: addi sp, sp, 16 1184; RV32-NEXT: .cfi_def_cfa_offset 0 1185; RV32-NEXT: ret 1186; 1187; RV64-LABEL: vxor_vx_v2i64: 1188; RV64: # %bb.0: 1189; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1190; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1191; RV64-NEXT: ret 1192 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 1193 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer 1194 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl) 1195 ret <2 x i64> %v 1196} 1197 1198define <2 x i64> @vxor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) { 1199; RV32-LABEL: vxor_vx_v2i64_unmasked: 1200; RV32: # %bb.0: 1201; RV32-NEXT: addi sp, sp, -16 1202; RV32-NEXT: .cfi_def_cfa_offset 16 1203; RV32-NEXT: sw a0, 8(sp) 1204; RV32-NEXT: sw a1, 12(sp) 1205; RV32-NEXT: addi a0, sp, 8 1206; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma 1207; RV32-NEXT: vlse64.v v9, (a0), zero 1208; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1209; RV32-NEXT: vxor.vv v8, v8, v9 1210; RV32-NEXT: addi sp, sp, 16 1211; RV32-NEXT: .cfi_def_cfa_offset 0 1212; RV32-NEXT: ret 1213; 1214; RV64-LABEL: vxor_vx_v2i64_unmasked: 1215; RV64: # %bb.0: 1216; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1217; RV64-NEXT: vxor.vx v8, v8, a0 1218; RV64-NEXT: ret 1219 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 1220 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer 1221 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl) 1222 ret <2 x i64> %v 1223} 1224 1225define <2 x i64> @vxor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { 1226; CHECK-LABEL: vxor_vi_v2i64: 1227; CHECK: # %bb.0: 1228; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1229; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1230; CHECK-NEXT: ret 1231 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> %m, i32 %evl) 1232 ret <2 x i64> %v 1233} 1234 1235define <2 x i64> @vxor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { 1236; CHECK-LABEL: vxor_vi_v2i64_unmasked: 1237; CHECK: # %bb.0: 1238; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1239; CHECK-NEXT: vxor.vi v8, v8, 7 1240; CHECK-NEXT: ret 1241 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> splat (i1 true), i32 %evl) 1242 ret <2 x i64> %v 1243} 1244 1245define <2 x i64> @vxor_vi_v2i64_1(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { 1246; CHECK-LABEL: vxor_vi_v2i64_1: 1247; CHECK: # %bb.0: 1248; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1249; CHECK-NEXT: vnot.v v8, v8, v0.t 1250; CHECK-NEXT: ret 1251 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl) 1252 ret <2 x i64> %v 1253} 1254 1255define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) { 1256; CHECK-LABEL: vxor_vi_v2i64_unmasked_1: 1257; CHECK: # %bb.0: 1258; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1259; CHECK-NEXT: vnot.v v8, v8 1260; CHECK-NEXT: ret 1261 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl) 1262 ret <2 x i64> %v 1263} 1264 1265declare <4 x i64> @llvm.vp.xor.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) 1266 1267define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) { 1268; CHECK-LABEL: vxor_vv_v4i64: 1269; CHECK: # %bb.0: 1270; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1271; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 1272; CHECK-NEXT: ret 1273 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl) 1274 ret <4 x i64> %v 1275} 1276 1277define <4 x i64> @vxor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { 1278; CHECK-LABEL: vxor_vv_v4i64_unmasked: 1279; CHECK: # %bb.0: 1280; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1281; CHECK-NEXT: vxor.vv v8, v8, v10 1282; CHECK-NEXT: ret 1283 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl) 1284 ret <4 x i64> %v 1285} 1286 1287define <4 x i64> @vxor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) { 1288; RV32-LABEL: vxor_vx_v4i64: 1289; RV32: # %bb.0: 1290; RV32-NEXT: addi sp, sp, -16 1291; RV32-NEXT: .cfi_def_cfa_offset 16 1292; RV32-NEXT: sw a0, 8(sp) 1293; RV32-NEXT: sw a1, 12(sp) 1294; RV32-NEXT: addi a0, sp, 8 1295; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma 1296; RV32-NEXT: vlse64.v v10, (a0), zero 1297; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1298; RV32-NEXT: vxor.vv v8, v8, v10, v0.t 1299; RV32-NEXT: addi sp, sp, 16 1300; RV32-NEXT: .cfi_def_cfa_offset 0 1301; RV32-NEXT: ret 1302; 1303; RV64-LABEL: vxor_vx_v4i64: 1304; RV64: # %bb.0: 1305; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1306; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1307; RV64-NEXT: ret 1308 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 1309 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer 1310 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl) 1311 ret <4 x i64> %v 1312} 1313 1314define <4 x i64> @vxor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) { 1315; RV32-LABEL: vxor_vx_v4i64_unmasked: 1316; RV32: # %bb.0: 1317; RV32-NEXT: addi sp, sp, -16 1318; RV32-NEXT: .cfi_def_cfa_offset 16 1319; RV32-NEXT: sw a0, 8(sp) 1320; RV32-NEXT: sw a1, 12(sp) 1321; RV32-NEXT: addi a0, sp, 8 1322; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma 1323; RV32-NEXT: vlse64.v v10, (a0), zero 1324; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1325; RV32-NEXT: vxor.vv v8, v8, v10 1326; RV32-NEXT: addi sp, sp, 16 1327; RV32-NEXT: .cfi_def_cfa_offset 0 1328; RV32-NEXT: ret 1329; 1330; RV64-LABEL: vxor_vx_v4i64_unmasked: 1331; RV64: # %bb.0: 1332; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1333; RV64-NEXT: vxor.vx v8, v8, a0 1334; RV64-NEXT: ret 1335 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 1336 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer 1337 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl) 1338 ret <4 x i64> %v 1339} 1340 1341define <4 x i64> @vxor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { 1342; CHECK-LABEL: vxor_vi_v4i64: 1343; CHECK: # %bb.0: 1344; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1345; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1346; CHECK-NEXT: ret 1347 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> %m, i32 %evl) 1348 ret <4 x i64> %v 1349} 1350 1351define <4 x i64> @vxor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { 1352; CHECK-LABEL: vxor_vi_v4i64_unmasked: 1353; CHECK: # %bb.0: 1354; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1355; CHECK-NEXT: vxor.vi v8, v8, 7 1356; CHECK-NEXT: ret 1357 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> splat (i1 true), i32 %evl) 1358 ret <4 x i64> %v 1359} 1360 1361define <4 x i64> @vxor_vi_v4i64_1(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { 1362; CHECK-LABEL: vxor_vi_v4i64_1: 1363; CHECK: # %bb.0: 1364; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1365; CHECK-NEXT: vnot.v v8, v8, v0.t 1366; CHECK-NEXT: ret 1367 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl) 1368 ret <4 x i64> %v 1369} 1370 1371define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) { 1372; CHECK-LABEL: vxor_vi_v4i64_unmasked_1: 1373; CHECK: # %bb.0: 1374; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1375; CHECK-NEXT: vnot.v v8, v8 1376; CHECK-NEXT: ret 1377 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl) 1378 ret <4 x i64> %v 1379} 1380 1381declare <8 x i64> @llvm.vp.xor.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) 1382 1383define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) { 1384; CHECK-LABEL: vxor_vv_v8i64: 1385; CHECK: # %bb.0: 1386; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1387; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 1388; CHECK-NEXT: ret 1389 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl) 1390 ret <8 x i64> %v 1391} 1392 1393define <8 x i64> @vxor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { 1394; CHECK-LABEL: vxor_vv_v8i64_unmasked: 1395; CHECK: # %bb.0: 1396; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1397; CHECK-NEXT: vxor.vv v8, v8, v12 1398; CHECK-NEXT: ret 1399 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl) 1400 ret <8 x i64> %v 1401} 1402 1403define <8 x i64> @vxor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { 1404; RV32-LABEL: vxor_vx_v8i64: 1405; RV32: # %bb.0: 1406; RV32-NEXT: addi sp, sp, -16 1407; RV32-NEXT: .cfi_def_cfa_offset 16 1408; RV32-NEXT: sw a0, 8(sp) 1409; RV32-NEXT: sw a1, 12(sp) 1410; RV32-NEXT: addi a0, sp, 8 1411; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma 1412; RV32-NEXT: vlse64.v v12, (a0), zero 1413; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1414; RV32-NEXT: vxor.vv v8, v8, v12, v0.t 1415; RV32-NEXT: addi sp, sp, 16 1416; RV32-NEXT: .cfi_def_cfa_offset 0 1417; RV32-NEXT: ret 1418; 1419; RV64-LABEL: vxor_vx_v8i64: 1420; RV64: # %bb.0: 1421; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1422; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1423; RV64-NEXT: ret 1424 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 1425 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer 1426 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl) 1427 ret <8 x i64> %v 1428} 1429 1430define <8 x i64> @vxor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) { 1431; RV32-LABEL: vxor_vx_v8i64_unmasked: 1432; RV32: # %bb.0: 1433; RV32-NEXT: addi sp, sp, -16 1434; RV32-NEXT: .cfi_def_cfa_offset 16 1435; RV32-NEXT: sw a0, 8(sp) 1436; RV32-NEXT: sw a1, 12(sp) 1437; RV32-NEXT: addi a0, sp, 8 1438; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma 1439; RV32-NEXT: vlse64.v v12, (a0), zero 1440; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1441; RV32-NEXT: vxor.vv v8, v8, v12 1442; RV32-NEXT: addi sp, sp, 16 1443; RV32-NEXT: .cfi_def_cfa_offset 0 1444; RV32-NEXT: ret 1445; 1446; RV64-LABEL: vxor_vx_v8i64_unmasked: 1447; RV64: # %bb.0: 1448; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1449; RV64-NEXT: vxor.vx v8, v8, a0 1450; RV64-NEXT: ret 1451 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 1452 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer 1453 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl) 1454 ret <8 x i64> %v 1455} 1456 1457define <8 x i64> @vxor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { 1458; CHECK-LABEL: vxor_vi_v8i64: 1459; CHECK: # %bb.0: 1460; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1461; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1462; CHECK-NEXT: ret 1463 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> %m, i32 %evl) 1464 ret <8 x i64> %v 1465} 1466 1467define <8 x i64> @vxor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { 1468; CHECK-LABEL: vxor_vi_v8i64_unmasked: 1469; CHECK: # %bb.0: 1470; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1471; CHECK-NEXT: vxor.vi v8, v8, 7 1472; CHECK-NEXT: ret 1473 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> splat (i1 true), i32 %evl) 1474 ret <8 x i64> %v 1475} 1476 1477define <8 x i64> @vxor_vi_v8i64_1(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { 1478; CHECK-LABEL: vxor_vi_v8i64_1: 1479; CHECK: # %bb.0: 1480; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1481; CHECK-NEXT: vnot.v v8, v8, v0.t 1482; CHECK-NEXT: ret 1483 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl) 1484 ret <8 x i64> %v 1485} 1486 1487define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) { 1488; CHECK-LABEL: vxor_vi_v8i64_unmasked_1: 1489; CHECK: # %bb.0: 1490; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1491; CHECK-NEXT: vnot.v v8, v8 1492; CHECK-NEXT: ret 1493 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl) 1494 ret <8 x i64> %v 1495} 1496 1497declare <16 x i64> @llvm.vp.xor.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) 1498 1499define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { 1500; CHECK-LABEL: vxor_vv_v16i64: 1501; CHECK: # %bb.0: 1502; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1503; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t 1504; CHECK-NEXT: ret 1505 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl) 1506 ret <16 x i64> %v 1507} 1508 1509define <16 x i64> @vxor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { 1510; CHECK-LABEL: vxor_vv_v16i64_unmasked: 1511; CHECK: # %bb.0: 1512; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1513; CHECK-NEXT: vxor.vv v8, v8, v16 1514; CHECK-NEXT: ret 1515 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl) 1516 ret <16 x i64> %v 1517} 1518 1519define <16 x i64> @vxor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) { 1520; RV32-LABEL: vxor_vx_v16i64: 1521; RV32: # %bb.0: 1522; RV32-NEXT: addi sp, sp, -16 1523; RV32-NEXT: .cfi_def_cfa_offset 16 1524; RV32-NEXT: sw a0, 8(sp) 1525; RV32-NEXT: sw a1, 12(sp) 1526; RV32-NEXT: addi a0, sp, 8 1527; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1528; RV32-NEXT: vlse64.v v16, (a0), zero 1529; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1530; RV32-NEXT: vxor.vv v8, v8, v16, v0.t 1531; RV32-NEXT: addi sp, sp, 16 1532; RV32-NEXT: .cfi_def_cfa_offset 0 1533; RV32-NEXT: ret 1534; 1535; RV64-LABEL: vxor_vx_v16i64: 1536; RV64: # %bb.0: 1537; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1538; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1539; RV64-NEXT: ret 1540 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 1541 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer 1542 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl) 1543 ret <16 x i64> %v 1544} 1545 1546define <16 x i64> @vxor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) { 1547; RV32-LABEL: vxor_vx_v16i64_unmasked: 1548; RV32: # %bb.0: 1549; RV32-NEXT: addi sp, sp, -16 1550; RV32-NEXT: .cfi_def_cfa_offset 16 1551; RV32-NEXT: sw a0, 8(sp) 1552; RV32-NEXT: sw a1, 12(sp) 1553; RV32-NEXT: addi a0, sp, 8 1554; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma 1555; RV32-NEXT: vlse64.v v16, (a0), zero 1556; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 1557; RV32-NEXT: vxor.vv v8, v8, v16 1558; RV32-NEXT: addi sp, sp, 16 1559; RV32-NEXT: .cfi_def_cfa_offset 0 1560; RV32-NEXT: ret 1561; 1562; RV64-LABEL: vxor_vx_v16i64_unmasked: 1563; RV64: # %bb.0: 1564; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 1565; RV64-NEXT: vxor.vx v8, v8, a0 1566; RV64-NEXT: ret 1567 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 1568 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer 1569 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl) 1570 ret <16 x i64> %v 1571} 1572 1573define <16 x i64> @vxor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { 1574; CHECK-LABEL: vxor_vi_v16i64: 1575; CHECK: # %bb.0: 1576; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1577; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1578; CHECK-NEXT: ret 1579 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> %m, i32 %evl) 1580 ret <16 x i64> %v 1581} 1582 1583define <16 x i64> @vxor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { 1584; CHECK-LABEL: vxor_vi_v16i64_unmasked: 1585; CHECK: # %bb.0: 1586; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1587; CHECK-NEXT: vxor.vi v8, v8, 7 1588; CHECK-NEXT: ret 1589 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> splat (i1 true), i32 %evl) 1590 ret <16 x i64> %v 1591} 1592 1593define <16 x i64> @vxor_vi_v16i64_1(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { 1594; CHECK-LABEL: vxor_vi_v16i64_1: 1595; CHECK: # %bb.0: 1596; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1597; CHECK-NEXT: vnot.v v8, v8, v0.t 1598; CHECK-NEXT: ret 1599 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl) 1600 ret <16 x i64> %v 1601} 1602 1603define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) { 1604; CHECK-LABEL: vxor_vi_v16i64_unmasked_1: 1605; CHECK: # %bb.0: 1606; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 1607; CHECK-NEXT: vnot.v v8, v8 1608; CHECK-NEXT: ret 1609 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl) 1610 ret <16 x i64> %v 1611} 1612