1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ 3; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ 5; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 6 7declare <vscale x 8 x i7> @llvm.vp.xor.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32) 8 9define <vscale x 8 x i7> @vxor_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) { 10; CHECK-LABEL: vxor_vx_nxv8i7: 11; CHECK: # %bb.0: 12; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 13; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 14; CHECK-NEXT: ret 15 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0 16 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer 17 %v = call <vscale x 8 x i7> @llvm.vp.xor.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl) 18 ret <vscale x 8 x i7> %v 19} 20 21declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32) 22 23define <vscale x 1 x i8> @vxor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 24; CHECK-LABEL: vxor_vv_nxv1i8: 25; CHECK: # %bb.0: 26; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 27; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 28; CHECK-NEXT: ret 29 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl) 30 ret <vscale x 1 x i8> %v 31} 32 33define <vscale x 1 x i8> @vxor_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) { 34; CHECK-LABEL: vxor_vv_nxv1i8_unmasked: 35; CHECK: # %bb.0: 36; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 37; CHECK-NEXT: vxor.vv v8, v8, v9 38; CHECK-NEXT: ret 39 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) 40 ret <vscale x 1 x i8> %v 41} 42 43define <vscale x 1 x i8> @vxor_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 44; CHECK-LABEL: vxor_vx_nxv1i8: 45; CHECK: # %bb.0: 46; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 47; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 48; CHECK-NEXT: ret 49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0 50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer 51 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl) 52 ret <vscale x 1 x i8> %v 53} 54 55define <vscale x 1 x i8> @vxor_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) { 56; CHECK-LABEL: vxor_vx_nxv1i8_unmasked: 57; CHECK: # %bb.0: 58; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma 59; CHECK-NEXT: vxor.vx v8, v8, a0 60; CHECK-NEXT: ret 61 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0 62 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer 63 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) 64 ret <vscale x 1 x i8> %v 65} 66 67define <vscale x 1 x i8> @vxor_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 68; CHECK-LABEL: vxor_vi_nxv1i8: 69; CHECK: # %bb.0: 70; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 71; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 72; CHECK-NEXT: ret 73 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 7), <vscale x 1 x i1> %m, i32 %evl) 74 ret <vscale x 1 x i8> %v 75} 76 77define <vscale x 1 x i8> @vxor_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) { 78; CHECK-LABEL: vxor_vi_nxv1i8_unmasked: 79; CHECK: # %bb.0: 80; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 81; CHECK-NEXT: vxor.vi v8, v8, 7 82; CHECK-NEXT: ret 83 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 7), <vscale x 1 x i1> splat (i1 true), i32 %evl) 84 ret <vscale x 1 x i8> %v 85} 86 87define <vscale x 1 x i8> @vxor_vi_nxv1i8_1(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 88; CHECK-LABEL: vxor_vi_nxv1i8_1: 89; CHECK: # %bb.0: 90; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 91; CHECK-NEXT: vnot.v v8, v8, v0.t 92; CHECK-NEXT: ret 93 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl) 94 ret <vscale x 1 x i8> %v 95} 96 97define <vscale x 1 x i8> @vxor_vi_nxv1i8_unmasked_1(<vscale x 1 x i8> %va, i32 zeroext %evl) { 98; CHECK-LABEL: vxor_vi_nxv1i8_unmasked_1: 99; CHECK: # %bb.0: 100; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 101; CHECK-NEXT: vnot.v v8, v8 102; CHECK-NEXT: ret 103 %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl) 104 ret <vscale x 1 x i8> %v 105} 106 107declare <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32) 108 109define <vscale x 2 x i8> @vxor_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 110; CHECK-LABEL: vxor_vv_nxv2i8: 111; CHECK: # %bb.0: 112; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 113; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 114; CHECK-NEXT: ret 115 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl) 116 ret <vscale x 2 x i8> %v 117} 118 119define <vscale x 2 x i8> @vxor_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) { 120; CHECK-LABEL: vxor_vv_nxv2i8_unmasked: 121; CHECK: # %bb.0: 122; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 123; CHECK-NEXT: vxor.vv v8, v8, v9 124; CHECK-NEXT: ret 125 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) 126 ret <vscale x 2 x i8> %v 127} 128 129define <vscale x 2 x i8> @vxor_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 130; CHECK-LABEL: vxor_vx_nxv2i8: 131; CHECK: # %bb.0: 132; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 133; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 134; CHECK-NEXT: ret 135 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0 136 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer 137 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl) 138 ret <vscale x 2 x i8> %v 139} 140 141define <vscale x 2 x i8> @vxor_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) { 142; CHECK-LABEL: vxor_vx_nxv2i8_unmasked: 143; CHECK: # %bb.0: 144; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma 145; CHECK-NEXT: vxor.vx v8, v8, a0 146; CHECK-NEXT: ret 147 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0 148 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer 149 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) 150 ret <vscale x 2 x i8> %v 151} 152 153define <vscale x 2 x i8> @vxor_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 154; CHECK-LABEL: vxor_vi_nxv2i8: 155; CHECK: # %bb.0: 156; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 157; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 158; CHECK-NEXT: ret 159 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 7), <vscale x 2 x i1> %m, i32 %evl) 160 ret <vscale x 2 x i8> %v 161} 162 163define <vscale x 2 x i8> @vxor_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) { 164; CHECK-LABEL: vxor_vi_nxv2i8_unmasked: 165; CHECK: # %bb.0: 166; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 167; CHECK-NEXT: vxor.vi v8, v8, 7 168; CHECK-NEXT: ret 169 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 7), <vscale x 2 x i1> splat (i1 true), i32 %evl) 170 ret <vscale x 2 x i8> %v 171} 172 173define <vscale x 2 x i8> @vxor_vi_nxv2i8_1(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 174; CHECK-LABEL: vxor_vi_nxv2i8_1: 175; CHECK: # %bb.0: 176; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 177; CHECK-NEXT: vnot.v v8, v8, v0.t 178; CHECK-NEXT: ret 179 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl) 180 ret <vscale x 2 x i8> %v 181} 182 183define <vscale x 2 x i8> @vxor_vi_nxv2i8_unmasked_1(<vscale x 2 x i8> %va, i32 zeroext %evl) { 184; CHECK-LABEL: vxor_vi_nxv2i8_unmasked_1: 185; CHECK: # %bb.0: 186; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 187; CHECK-NEXT: vnot.v v8, v8 188; CHECK-NEXT: ret 189 %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl) 190 ret <vscale x 2 x i8> %v 191} 192 193declare <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32) 194 195define <vscale x 4 x i8> @vxor_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 196; CHECK-LABEL: vxor_vv_nxv4i8: 197; CHECK: # %bb.0: 198; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 199; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 200; CHECK-NEXT: ret 201 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl) 202 ret <vscale x 4 x i8> %v 203} 204 205define <vscale x 4 x i8> @vxor_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) { 206; CHECK-LABEL: vxor_vv_nxv4i8_unmasked: 207; CHECK: # %bb.0: 208; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 209; CHECK-NEXT: vxor.vv v8, v8, v9 210; CHECK-NEXT: ret 211 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) 212 ret <vscale x 4 x i8> %v 213} 214 215define <vscale x 4 x i8> @vxor_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 216; CHECK-LABEL: vxor_vx_nxv4i8: 217; CHECK: # %bb.0: 218; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 219; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 220; CHECK-NEXT: ret 221 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0 222 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer 223 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl) 224 ret <vscale x 4 x i8> %v 225} 226 227define <vscale x 4 x i8> @vxor_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) { 228; CHECK-LABEL: vxor_vx_nxv4i8_unmasked: 229; CHECK: # %bb.0: 230; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma 231; CHECK-NEXT: vxor.vx v8, v8, a0 232; CHECK-NEXT: ret 233 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0 234 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer 235 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) 236 ret <vscale x 4 x i8> %v 237} 238 239define <vscale x 4 x i8> @vxor_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 240; CHECK-LABEL: vxor_vi_nxv4i8: 241; CHECK: # %bb.0: 242; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 243; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 244; CHECK-NEXT: ret 245 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 7), <vscale x 4 x i1> %m, i32 %evl) 246 ret <vscale x 4 x i8> %v 247} 248 249define <vscale x 4 x i8> @vxor_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) { 250; CHECK-LABEL: vxor_vi_nxv4i8_unmasked: 251; CHECK: # %bb.0: 252; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 253; CHECK-NEXT: vxor.vi v8, v8, 7 254; CHECK-NEXT: ret 255 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 7), <vscale x 4 x i1> splat (i1 true), i32 %evl) 256 ret <vscale x 4 x i8> %v 257} 258 259define <vscale x 4 x i8> @vxor_vi_nxv4i8_1(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 260; CHECK-LABEL: vxor_vi_nxv4i8_1: 261; CHECK: # %bb.0: 262; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 263; CHECK-NEXT: vnot.v v8, v8, v0.t 264; CHECK-NEXT: ret 265 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl) 266 ret <vscale x 4 x i8> %v 267} 268 269define <vscale x 4 x i8> @vxor_vi_nxv4i8_unmasked_1(<vscale x 4 x i8> %va, i32 zeroext %evl) { 270; CHECK-LABEL: vxor_vi_nxv4i8_unmasked_1: 271; CHECK: # %bb.0: 272; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 273; CHECK-NEXT: vnot.v v8, v8 274; CHECK-NEXT: ret 275 %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl) 276 ret <vscale x 4 x i8> %v 277} 278 279declare <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32) 280 281define <vscale x 8 x i8> @vxor_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 282; CHECK-LABEL: vxor_vv_nxv8i8: 283; CHECK: # %bb.0: 284; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 285; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 286; CHECK-NEXT: ret 287 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl) 288 ret <vscale x 8 x i8> %v 289} 290 291define <vscale x 8 x i8> @vxor_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) { 292; CHECK-LABEL: vxor_vv_nxv8i8_unmasked: 293; CHECK: # %bb.0: 294; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 295; CHECK-NEXT: vxor.vv v8, v8, v9 296; CHECK-NEXT: ret 297 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) 298 ret <vscale x 8 x i8> %v 299} 300 301define <vscale x 8 x i8> @vxor_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 302; CHECK-LABEL: vxor_vx_nxv8i8: 303; CHECK: # %bb.0: 304; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 305; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 306; CHECK-NEXT: ret 307 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0 308 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer 309 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl) 310 ret <vscale x 8 x i8> %v 311} 312 313define <vscale x 8 x i8> @vxor_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) { 314; CHECK-LABEL: vxor_vx_nxv8i8_unmasked: 315; CHECK: # %bb.0: 316; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma 317; CHECK-NEXT: vxor.vx v8, v8, a0 318; CHECK-NEXT: ret 319 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0 320 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer 321 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) 322 ret <vscale x 8 x i8> %v 323} 324 325define <vscale x 8 x i8> @vxor_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 326; CHECK-LABEL: vxor_vi_nxv8i8: 327; CHECK: # %bb.0: 328; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 329; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 330; CHECK-NEXT: ret 331 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 7), <vscale x 8 x i1> %m, i32 %evl) 332 ret <vscale x 8 x i8> %v 333} 334 335define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) { 336; CHECK-LABEL: vxor_vi_nxv8i8_unmasked: 337; CHECK: # %bb.0: 338; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 339; CHECK-NEXT: vxor.vi v8, v8, 7 340; CHECK-NEXT: ret 341 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 7), <vscale x 8 x i1> splat (i1 true), i32 %evl) 342 ret <vscale x 8 x i8> %v 343} 344 345define <vscale x 8 x i8> @vxor_vi_nxv8i8_1(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 346; CHECK-LABEL: vxor_vi_nxv8i8_1: 347; CHECK: # %bb.0: 348; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 349; CHECK-NEXT: vnot.v v8, v8, v0.t 350; CHECK-NEXT: ret 351 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl) 352 ret <vscale x 8 x i8> %v 353} 354 355define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked_1(<vscale x 8 x i8> %va, i32 zeroext %evl) { 356; CHECK-LABEL: vxor_vi_nxv8i8_unmasked_1: 357; CHECK: # %bb.0: 358; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 359; CHECK-NEXT: vnot.v v8, v8 360; CHECK-NEXT: ret 361 %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl) 362 ret <vscale x 8 x i8> %v 363} 364 365declare <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8>, <vscale x 15 x i8>, <vscale x 15 x i1>, i32) 366 367define <vscale x 15 x i8> @vxor_vv_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 zeroext %evl) { 368; CHECK-LABEL: vxor_vv_nxv15i8: 369; CHECK: # %bb.0: 370; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 371; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 372; CHECK-NEXT: ret 373 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 %evl) 374 ret <vscale x 15 x i8> %v 375} 376 377define <vscale x 15 x i8> @vxor_vv_nxv15i8_unmasked(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, i32 zeroext %evl) { 378; CHECK-LABEL: vxor_vv_nxv15i8_unmasked: 379; CHECK: # %bb.0: 380; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 381; CHECK-NEXT: vxor.vv v8, v8, v10 382; CHECK-NEXT: ret 383 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> splat (i1 true), i32 %evl) 384 ret <vscale x 15 x i8> %v 385} 386 387define <vscale x 15 x i8> @vxor_vx_nxv15i8(<vscale x 15 x i8> %va, i8 %b, <vscale x 15 x i1> %m, i32 zeroext %evl) { 388; CHECK-LABEL: vxor_vx_nxv15i8: 389; CHECK: # %bb.0: 390; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 391; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 392; CHECK-NEXT: ret 393 %elt.head = insertelement <vscale x 15 x i8> poison, i8 %b, i32 0 394 %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer 395 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl) 396 ret <vscale x 15 x i8> %v 397} 398 399define <vscale x 15 x i8> @vxor_vx_nxv15i8_unmasked(<vscale x 15 x i8> %va, i8 %b, i32 zeroext %evl) { 400; CHECK-LABEL: vxor_vx_nxv15i8_unmasked: 401; CHECK: # %bb.0: 402; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 403; CHECK-NEXT: vxor.vx v8, v8, a0 404; CHECK-NEXT: ret 405 %elt.head = insertelement <vscale x 15 x i8> poison, i8 %b, i32 0 406 %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer 407 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> splat (i1 true), i32 %evl) 408 ret <vscale x 15 x i8> %v 409} 410 411define <vscale x 15 x i8> @vxor_vi_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) { 412; CHECK-LABEL: vxor_vi_nxv15i8: 413; CHECK: # %bb.0: 414; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 415; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 416; CHECK-NEXT: ret 417 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 7), <vscale x 15 x i1> %m, i32 %evl) 418 ret <vscale x 15 x i8> %v 419} 420 421define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked(<vscale x 15 x i8> %va, i32 zeroext %evl) { 422; CHECK-LABEL: vxor_vi_nxv15i8_unmasked: 423; CHECK: # %bb.0: 424; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 425; CHECK-NEXT: vxor.vi v8, v8, 7 426; CHECK-NEXT: ret 427 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 7), <vscale x 15 x i1> splat (i1 true), i32 %evl) 428 ret <vscale x 15 x i8> %v 429} 430 431define <vscale x 15 x i8> @vxor_vi_nxv15i8_1(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) { 432; CHECK-LABEL: vxor_vi_nxv15i8_1: 433; CHECK: # %bb.0: 434; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 435; CHECK-NEXT: vnot.v v8, v8, v0.t 436; CHECK-NEXT: ret 437 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 -1), <vscale x 15 x i1> %m, i32 %evl) 438 ret <vscale x 15 x i8> %v 439} 440 441define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked_1(<vscale x 15 x i8> %va, i32 zeroext %evl) { 442; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1: 443; CHECK: # %bb.0: 444; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 445; CHECK-NEXT: vnot.v v8, v8 446; CHECK-NEXT: ret 447 %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 -1), <vscale x 15 x i1> splat (i1 true), i32 %evl) 448 ret <vscale x 15 x i8> %v 449} 450 451declare <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32) 452 453define <vscale x 16 x i8> @vxor_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 454; CHECK-LABEL: vxor_vv_nxv16i8: 455; CHECK: # %bb.0: 456; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 457; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 458; CHECK-NEXT: ret 459 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl) 460 ret <vscale x 16 x i8> %v 461} 462 463define <vscale x 16 x i8> @vxor_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) { 464; CHECK-LABEL: vxor_vv_nxv16i8_unmasked: 465; CHECK: # %bb.0: 466; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 467; CHECK-NEXT: vxor.vv v8, v8, v10 468; CHECK-NEXT: ret 469 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) 470 ret <vscale x 16 x i8> %v 471} 472 473define <vscale x 16 x i8> @vxor_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 474; CHECK-LABEL: vxor_vx_nxv16i8: 475; CHECK: # %bb.0: 476; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 477; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 478; CHECK-NEXT: ret 479 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0 480 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer 481 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl) 482 ret <vscale x 16 x i8> %v 483} 484 485define <vscale x 16 x i8> @vxor_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) { 486; CHECK-LABEL: vxor_vx_nxv16i8_unmasked: 487; CHECK: # %bb.0: 488; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma 489; CHECK-NEXT: vxor.vx v8, v8, a0 490; CHECK-NEXT: ret 491 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0 492 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer 493 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) 494 ret <vscale x 16 x i8> %v 495} 496 497define <vscale x 16 x i8> @vxor_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 498; CHECK-LABEL: vxor_vi_nxv16i8: 499; CHECK: # %bb.0: 500; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 501; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 502; CHECK-NEXT: ret 503 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 7), <vscale x 16 x i1> %m, i32 %evl) 504 ret <vscale x 16 x i8> %v 505} 506 507define <vscale x 16 x i8> @vxor_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) { 508; CHECK-LABEL: vxor_vi_nxv16i8_unmasked: 509; CHECK: # %bb.0: 510; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 511; CHECK-NEXT: vxor.vi v8, v8, 7 512; CHECK-NEXT: ret 513 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 7), <vscale x 16 x i1> splat (i1 true), i32 %evl) 514 ret <vscale x 16 x i8> %v 515} 516 517define <vscale x 16 x i8> @vxor_vi_nxv16i8_1(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 518; CHECK-LABEL: vxor_vi_nxv16i8_1: 519; CHECK: # %bb.0: 520; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 521; CHECK-NEXT: vnot.v v8, v8, v0.t 522; CHECK-NEXT: ret 523 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl) 524 ret <vscale x 16 x i8> %v 525} 526 527define <vscale x 16 x i8> @vxor_vi_nxv16i8_unmasked_1(<vscale x 16 x i8> %va, i32 zeroext %evl) { 528; CHECK-LABEL: vxor_vi_nxv16i8_unmasked_1: 529; CHECK: # %bb.0: 530; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 531; CHECK-NEXT: vnot.v v8, v8 532; CHECK-NEXT: ret 533 %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl) 534 ret <vscale x 16 x i8> %v 535} 536 537declare <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32) 538 539define <vscale x 32 x i8> @vxor_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { 540; CHECK-LABEL: vxor_vv_nxv32i8: 541; CHECK: # %bb.0: 542; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 543; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 544; CHECK-NEXT: ret 545 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl) 546 ret <vscale x 32 x i8> %v 547} 548 549define <vscale x 32 x i8> @vxor_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) { 550; CHECK-LABEL: vxor_vv_nxv32i8_unmasked: 551; CHECK: # %bb.0: 552; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 553; CHECK-NEXT: vxor.vv v8, v8, v12 554; CHECK-NEXT: ret 555 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) 556 ret <vscale x 32 x i8> %v 557} 558 559define <vscale x 32 x i8> @vxor_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { 560; CHECK-LABEL: vxor_vx_nxv32i8: 561; CHECK: # %bb.0: 562; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 563; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 564; CHECK-NEXT: ret 565 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0 566 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer 567 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl) 568 ret <vscale x 32 x i8> %v 569} 570 571define <vscale x 32 x i8> @vxor_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) { 572; CHECK-LABEL: vxor_vx_nxv32i8_unmasked: 573; CHECK: # %bb.0: 574; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma 575; CHECK-NEXT: vxor.vx v8, v8, a0 576; CHECK-NEXT: ret 577 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0 578 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer 579 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) 580 ret <vscale x 32 x i8> %v 581} 582 583define <vscale x 32 x i8> @vxor_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 584; CHECK-LABEL: vxor_vi_nxv32i8: 585; CHECK: # %bb.0: 586; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 587; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 588; CHECK-NEXT: ret 589 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 7), <vscale x 32 x i1> %m, i32 %evl) 590 ret <vscale x 32 x i8> %v 591} 592 593define <vscale x 32 x i8> @vxor_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) { 594; CHECK-LABEL: vxor_vi_nxv32i8_unmasked: 595; CHECK: # %bb.0: 596; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 597; CHECK-NEXT: vxor.vi v8, v8, 7 598; CHECK-NEXT: ret 599 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 7), <vscale x 32 x i1> splat (i1 true), i32 %evl) 600 ret <vscale x 32 x i8> %v 601} 602 603define <vscale x 32 x i8> @vxor_vi_nxv32i8_1(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 604; CHECK-LABEL: vxor_vi_nxv32i8_1: 605; CHECK: # %bb.0: 606; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 607; CHECK-NEXT: vnot.v v8, v8, v0.t 608; CHECK-NEXT: ret 609 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl) 610 ret <vscale x 32 x i8> %v 611} 612 613define <vscale x 32 x i8> @vxor_vi_nxv32i8_unmasked_1(<vscale x 32 x i8> %va, i32 zeroext %evl) { 614; CHECK-LABEL: vxor_vi_nxv32i8_unmasked_1: 615; CHECK: # %bb.0: 616; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 617; CHECK-NEXT: vnot.v v8, v8 618; CHECK-NEXT: ret 619 %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl) 620 ret <vscale x 32 x i8> %v 621} 622 623declare <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32) 624 625define <vscale x 64 x i8> @vxor_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) { 626; CHECK-LABEL: vxor_vv_nxv64i8: 627; CHECK: # %bb.0: 628; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 629; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t 630; CHECK-NEXT: ret 631 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl) 632 ret <vscale x 64 x i8> %v 633} 634 635define <vscale x 64 x i8> @vxor_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) { 636; CHECK-LABEL: vxor_vv_nxv64i8_unmasked: 637; CHECK: # %bb.0: 638; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 639; CHECK-NEXT: vxor.vv v8, v8, v16 640; CHECK-NEXT: ret 641 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl) 642 ret <vscale x 64 x i8> %v 643} 644 645define <vscale x 64 x i8> @vxor_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) { 646; CHECK-LABEL: vxor_vx_nxv64i8: 647; CHECK: # %bb.0: 648; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 649; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 650; CHECK-NEXT: ret 651 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0 652 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer 653 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl) 654 ret <vscale x 64 x i8> %v 655} 656 657define <vscale x 64 x i8> @vxor_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) { 658; CHECK-LABEL: vxor_vx_nxv64i8_unmasked: 659; CHECK: # %bb.0: 660; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma 661; CHECK-NEXT: vxor.vx v8, v8, a0 662; CHECK-NEXT: ret 663 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0 664 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer 665 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl) 666 ret <vscale x 64 x i8> %v 667} 668 669define <vscale x 64 x i8> @vxor_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) { 670; CHECK-LABEL: vxor_vi_nxv64i8: 671; CHECK: # %bb.0: 672; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 673; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 674; CHECK-NEXT: ret 675 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 7), <vscale x 64 x i1> %m, i32 %evl) 676 ret <vscale x 64 x i8> %v 677} 678 679define <vscale x 64 x i8> @vxor_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) { 680; CHECK-LABEL: vxor_vi_nxv64i8_unmasked: 681; CHECK: # %bb.0: 682; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 683; CHECK-NEXT: vxor.vi v8, v8, 7 684; CHECK-NEXT: ret 685 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 7), <vscale x 64 x i1> splat (i1 true), i32 %evl) 686 ret <vscale x 64 x i8> %v 687} 688 689define <vscale x 64 x i8> @vxor_vi_nxv64i8_1(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) { 690; CHECK-LABEL: vxor_vi_nxv64i8_1: 691; CHECK: # %bb.0: 692; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 693; CHECK-NEXT: vnot.v v8, v8, v0.t 694; CHECK-NEXT: ret 695 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl) 696 ret <vscale x 64 x i8> %v 697} 698 699define <vscale x 64 x i8> @vxor_vi_nxv64i8_unmasked_1(<vscale x 64 x i8> %va, i32 zeroext %evl) { 700; CHECK-LABEL: vxor_vi_nxv64i8_unmasked_1: 701; CHECK: # %bb.0: 702; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 703; CHECK-NEXT: vnot.v v8, v8 704; CHECK-NEXT: ret 705 %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl) 706 ret <vscale x 64 x i8> %v 707} 708 709declare <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32) 710 711define <vscale x 1 x i16> @vxor_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 712; CHECK-LABEL: vxor_vv_nxv1i16: 713; CHECK: # %bb.0: 714; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 715; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 716; CHECK-NEXT: ret 717 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl) 718 ret <vscale x 1 x i16> %v 719} 720 721define <vscale x 1 x i16> @vxor_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) { 722; CHECK-LABEL: vxor_vv_nxv1i16_unmasked: 723; CHECK: # %bb.0: 724; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 725; CHECK-NEXT: vxor.vv v8, v8, v9 726; CHECK-NEXT: ret 727 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) 728 ret <vscale x 1 x i16> %v 729} 730 731define <vscale x 1 x i16> @vxor_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 732; CHECK-LABEL: vxor_vx_nxv1i16: 733; CHECK: # %bb.0: 734; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 735; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 736; CHECK-NEXT: ret 737 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 738 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 739 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl) 740 ret <vscale x 1 x i16> %v 741} 742 743define <vscale x 1 x i16> @vxor_vx_nxv1i16_commute(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 744; CHECK-LABEL: vxor_vx_nxv1i16_commute: 745; CHECK: # %bb.0: 746; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 747; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 748; CHECK-NEXT: ret 749 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 750 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 751 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl) 752 ret <vscale x 1 x i16> %v 753} 754 755define <vscale x 1 x i16> @vxor_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) { 756; CHECK-LABEL: vxor_vx_nxv1i16_unmasked: 757; CHECK: # %bb.0: 758; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma 759; CHECK-NEXT: vxor.vx v8, v8, a0 760; CHECK-NEXT: ret 761 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 762 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 763 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) 764 ret <vscale x 1 x i16> %v 765} 766 767define <vscale x 1 x i16> @vxor_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 768; CHECK-LABEL: vxor_vi_nxv1i16: 769; CHECK: # %bb.0: 770; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 771; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 772; CHECK-NEXT: ret 773 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 7), <vscale x 1 x i1> %m, i32 %evl) 774 ret <vscale x 1 x i16> %v 775} 776 777define <vscale x 1 x i16> @vxor_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) { 778; CHECK-LABEL: vxor_vi_nxv1i16_unmasked: 779; CHECK: # %bb.0: 780; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 781; CHECK-NEXT: vxor.vi v8, v8, 7 782; CHECK-NEXT: ret 783 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 7), <vscale x 1 x i1> splat (i1 true), i32 %evl) 784 ret <vscale x 1 x i16> %v 785} 786 787define <vscale x 1 x i16> @vxor_vi_nxv1i16_1(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 788; CHECK-LABEL: vxor_vi_nxv1i16_1: 789; CHECK: # %bb.0: 790; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 791; CHECK-NEXT: vnot.v v8, v8, v0.t 792; CHECK-NEXT: ret 793 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl) 794 ret <vscale x 1 x i16> %v 795} 796 797define <vscale x 1 x i16> @vxor_vi_nxv1i16_unmasked_1(<vscale x 1 x i16> %va, i32 zeroext %evl) { 798; CHECK-LABEL: vxor_vi_nxv1i16_unmasked_1: 799; CHECK: # %bb.0: 800; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma 801; CHECK-NEXT: vnot.v v8, v8 802; CHECK-NEXT: ret 803 %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl) 804 ret <vscale x 1 x i16> %v 805} 806 807declare <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32) 808 809define <vscale x 2 x i16> @vxor_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 810; CHECK-LABEL: vxor_vv_nxv2i16: 811; CHECK: # %bb.0: 812; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 813; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 814; CHECK-NEXT: ret 815 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl) 816 ret <vscale x 2 x i16> %v 817} 818 819define <vscale x 2 x i16> @vxor_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) { 820; CHECK-LABEL: vxor_vv_nxv2i16_unmasked: 821; CHECK: # %bb.0: 822; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 823; CHECK-NEXT: vxor.vv v8, v8, v9 824; CHECK-NEXT: ret 825 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) 826 ret <vscale x 2 x i16> %v 827} 828 829define <vscale x 2 x i16> @vxor_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 830; CHECK-LABEL: vxor_vx_nxv2i16: 831; CHECK: # %bb.0: 832; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 833; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 834; CHECK-NEXT: ret 835 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0 836 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer 837 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl) 838 ret <vscale x 2 x i16> %v 839} 840 841define <vscale x 2 x i16> @vxor_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) { 842; CHECK-LABEL: vxor_vx_nxv2i16_unmasked: 843; CHECK: # %bb.0: 844; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 845; CHECK-NEXT: vxor.vx v8, v8, a0 846; CHECK-NEXT: ret 847 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0 848 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer 849 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) 850 ret <vscale x 2 x i16> %v 851} 852 853define <vscale x 2 x i16> @vxor_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 854; CHECK-LABEL: vxor_vi_nxv2i16: 855; CHECK: # %bb.0: 856; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 857; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 858; CHECK-NEXT: ret 859 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 7), <vscale x 2 x i1> %m, i32 %evl) 860 ret <vscale x 2 x i16> %v 861} 862 863define <vscale x 2 x i16> @vxor_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) { 864; CHECK-LABEL: vxor_vi_nxv2i16_unmasked: 865; CHECK: # %bb.0: 866; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 867; CHECK-NEXT: vxor.vi v8, v8, 7 868; CHECK-NEXT: ret 869 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 7), <vscale x 2 x i1> splat (i1 true), i32 %evl) 870 ret <vscale x 2 x i16> %v 871} 872 873define <vscale x 2 x i16> @vxor_vi_nxv2i16_1(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 874; CHECK-LABEL: vxor_vi_nxv2i16_1: 875; CHECK: # %bb.0: 876; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 877; CHECK-NEXT: vnot.v v8, v8, v0.t 878; CHECK-NEXT: ret 879 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl) 880 ret <vscale x 2 x i16> %v 881} 882 883define <vscale x 2 x i16> @vxor_vi_nxv2i16_unmasked_1(<vscale x 2 x i16> %va, i32 zeroext %evl) { 884; CHECK-LABEL: vxor_vi_nxv2i16_unmasked_1: 885; CHECK: # %bb.0: 886; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma 887; CHECK-NEXT: vnot.v v8, v8 888; CHECK-NEXT: ret 889 %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl) 890 ret <vscale x 2 x i16> %v 891} 892 893declare <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32) 894 895define <vscale x 4 x i16> @vxor_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 896; CHECK-LABEL: vxor_vv_nxv4i16: 897; CHECK: # %bb.0: 898; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 899; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 900; CHECK-NEXT: ret 901 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl) 902 ret <vscale x 4 x i16> %v 903} 904 905define <vscale x 4 x i16> @vxor_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) { 906; CHECK-LABEL: vxor_vv_nxv4i16_unmasked: 907; CHECK: # %bb.0: 908; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 909; CHECK-NEXT: vxor.vv v8, v8, v9 910; CHECK-NEXT: ret 911 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) 912 ret <vscale x 4 x i16> %v 913} 914 915define <vscale x 4 x i16> @vxor_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 916; CHECK-LABEL: vxor_vx_nxv4i16: 917; CHECK: # %bb.0: 918; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 919; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 920; CHECK-NEXT: ret 921 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0 922 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer 923 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl) 924 ret <vscale x 4 x i16> %v 925} 926 927define <vscale x 4 x i16> @vxor_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) { 928; CHECK-LABEL: vxor_vx_nxv4i16_unmasked: 929; CHECK: # %bb.0: 930; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma 931; CHECK-NEXT: vxor.vx v8, v8, a0 932; CHECK-NEXT: ret 933 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0 934 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer 935 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) 936 ret <vscale x 4 x i16> %v 937} 938 939define <vscale x 4 x i16> @vxor_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 940; CHECK-LABEL: vxor_vi_nxv4i16: 941; CHECK: # %bb.0: 942; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 943; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 944; CHECK-NEXT: ret 945 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 7), <vscale x 4 x i1> %m, i32 %evl) 946 ret <vscale x 4 x i16> %v 947} 948 949define <vscale x 4 x i16> @vxor_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) { 950; CHECK-LABEL: vxor_vi_nxv4i16_unmasked: 951; CHECK: # %bb.0: 952; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 953; CHECK-NEXT: vxor.vi v8, v8, 7 954; CHECK-NEXT: ret 955 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 7), <vscale x 4 x i1> splat (i1 true), i32 %evl) 956 ret <vscale x 4 x i16> %v 957} 958 959define <vscale x 4 x i16> @vxor_vi_nxv4i16_1(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 960; CHECK-LABEL: vxor_vi_nxv4i16_1: 961; CHECK: # %bb.0: 962; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 963; CHECK-NEXT: vnot.v v8, v8, v0.t 964; CHECK-NEXT: ret 965 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl) 966 ret <vscale x 4 x i16> %v 967} 968 969define <vscale x 4 x i16> @vxor_vi_nxv4i16_unmasked_1(<vscale x 4 x i16> %va, i32 zeroext %evl) { 970; CHECK-LABEL: vxor_vi_nxv4i16_unmasked_1: 971; CHECK: # %bb.0: 972; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 973; CHECK-NEXT: vnot.v v8, v8 974; CHECK-NEXT: ret 975 %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl) 976 ret <vscale x 4 x i16> %v 977} 978 979declare <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32) 980 981define <vscale x 8 x i16> @vxor_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 982; CHECK-LABEL: vxor_vv_nxv8i16: 983; CHECK: # %bb.0: 984; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 985; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 986; CHECK-NEXT: ret 987 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl) 988 ret <vscale x 8 x i16> %v 989} 990 991define <vscale x 8 x i16> @vxor_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) { 992; CHECK-LABEL: vxor_vv_nxv8i16_unmasked: 993; CHECK: # %bb.0: 994; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 995; CHECK-NEXT: vxor.vv v8, v8, v10 996; CHECK-NEXT: ret 997 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) 998 ret <vscale x 8 x i16> %v 999} 1000 1001define <vscale x 8 x i16> @vxor_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1002; CHECK-LABEL: vxor_vx_nxv8i16: 1003; CHECK: # %bb.0: 1004; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1005; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1006; CHECK-NEXT: ret 1007 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0 1008 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 1009 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl) 1010 ret <vscale x 8 x i16> %v 1011} 1012 1013define <vscale x 8 x i16> @vxor_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) { 1014; CHECK-LABEL: vxor_vx_nxv8i16_unmasked: 1015; CHECK: # %bb.0: 1016; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma 1017; CHECK-NEXT: vxor.vx v8, v8, a0 1018; CHECK-NEXT: ret 1019 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0 1020 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 1021 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) 1022 ret <vscale x 8 x i16> %v 1023} 1024 1025define <vscale x 8 x i16> @vxor_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1026; CHECK-LABEL: vxor_vi_nxv8i16: 1027; CHECK: # %bb.0: 1028; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1029; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1030; CHECK-NEXT: ret 1031 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 7), <vscale x 8 x i1> %m, i32 %evl) 1032 ret <vscale x 8 x i16> %v 1033} 1034 1035define <vscale x 8 x i16> @vxor_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) { 1036; CHECK-LABEL: vxor_vi_nxv8i16_unmasked: 1037; CHECK: # %bb.0: 1038; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1039; CHECK-NEXT: vxor.vi v8, v8, 7 1040; CHECK-NEXT: ret 1041 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 7), <vscale x 8 x i1> splat (i1 true), i32 %evl) 1042 ret <vscale x 8 x i16> %v 1043} 1044 1045define <vscale x 8 x i16> @vxor_vi_nxv8i16_1(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1046; CHECK-LABEL: vxor_vi_nxv8i16_1: 1047; CHECK: # %bb.0: 1048; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1049; CHECK-NEXT: vnot.v v8, v8, v0.t 1050; CHECK-NEXT: ret 1051 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl) 1052 ret <vscale x 8 x i16> %v 1053} 1054 1055define <vscale x 8 x i16> @vxor_vi_nxv8i16_unmasked_1(<vscale x 8 x i16> %va, i32 zeroext %evl) { 1056; CHECK-LABEL: vxor_vi_nxv8i16_unmasked_1: 1057; CHECK: # %bb.0: 1058; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 1059; CHECK-NEXT: vnot.v v8, v8 1060; CHECK-NEXT: ret 1061 %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl) 1062 ret <vscale x 8 x i16> %v 1063} 1064 1065declare <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32) 1066 1067define <vscale x 16 x i16> @vxor_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1068; CHECK-LABEL: vxor_vv_nxv16i16: 1069; CHECK: # %bb.0: 1070; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1071; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 1072; CHECK-NEXT: ret 1073 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl) 1074 ret <vscale x 16 x i16> %v 1075} 1076 1077define <vscale x 16 x i16> @vxor_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) { 1078; CHECK-LABEL: vxor_vv_nxv16i16_unmasked: 1079; CHECK: # %bb.0: 1080; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1081; CHECK-NEXT: vxor.vv v8, v8, v12 1082; CHECK-NEXT: ret 1083 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) 1084 ret <vscale x 16 x i16> %v 1085} 1086 1087define <vscale x 16 x i16> @vxor_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1088; CHECK-LABEL: vxor_vx_nxv16i16: 1089; CHECK: # %bb.0: 1090; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1091; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1092; CHECK-NEXT: ret 1093 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0 1094 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer 1095 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl) 1096 ret <vscale x 16 x i16> %v 1097} 1098 1099define <vscale x 16 x i16> @vxor_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) { 1100; CHECK-LABEL: vxor_vx_nxv16i16_unmasked: 1101; CHECK: # %bb.0: 1102; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma 1103; CHECK-NEXT: vxor.vx v8, v8, a0 1104; CHECK-NEXT: ret 1105 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0 1106 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer 1107 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) 1108 ret <vscale x 16 x i16> %v 1109} 1110 1111define <vscale x 16 x i16> @vxor_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1112; CHECK-LABEL: vxor_vi_nxv16i16: 1113; CHECK: # %bb.0: 1114; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1115; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1116; CHECK-NEXT: ret 1117 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 7), <vscale x 16 x i1> %m, i32 %evl) 1118 ret <vscale x 16 x i16> %v 1119} 1120 1121define <vscale x 16 x i16> @vxor_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) { 1122; CHECK-LABEL: vxor_vi_nxv16i16_unmasked: 1123; CHECK: # %bb.0: 1124; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1125; CHECK-NEXT: vxor.vi v8, v8, 7 1126; CHECK-NEXT: ret 1127 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 7), <vscale x 16 x i1> splat (i1 true), i32 %evl) 1128 ret <vscale x 16 x i16> %v 1129} 1130 1131define <vscale x 16 x i16> @vxor_vi_nxv16i16_1(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1132; CHECK-LABEL: vxor_vi_nxv16i16_1: 1133; CHECK: # %bb.0: 1134; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1135; CHECK-NEXT: vnot.v v8, v8, v0.t 1136; CHECK-NEXT: ret 1137 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl) 1138 ret <vscale x 16 x i16> %v 1139} 1140 1141define <vscale x 16 x i16> @vxor_vi_nxv16i16_unmasked_1(<vscale x 16 x i16> %va, i32 zeroext %evl) { 1142; CHECK-LABEL: vxor_vi_nxv16i16_unmasked_1: 1143; CHECK: # %bb.0: 1144; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma 1145; CHECK-NEXT: vnot.v v8, v8 1146; CHECK-NEXT: ret 1147 %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl) 1148 ret <vscale x 16 x i16> %v 1149} 1150 1151declare <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32) 1152 1153define <vscale x 32 x i16> @vxor_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { 1154; CHECK-LABEL: vxor_vv_nxv32i16: 1155; CHECK: # %bb.0: 1156; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1157; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t 1158; CHECK-NEXT: ret 1159 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl) 1160 ret <vscale x 32 x i16> %v 1161} 1162 1163define <vscale x 32 x i16> @vxor_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) { 1164; CHECK-LABEL: vxor_vv_nxv32i16_unmasked: 1165; CHECK: # %bb.0: 1166; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1167; CHECK-NEXT: vxor.vv v8, v8, v16 1168; CHECK-NEXT: ret 1169 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl) 1170 ret <vscale x 32 x i16> %v 1171} 1172 1173define <vscale x 32 x i16> @vxor_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { 1174; CHECK-LABEL: vxor_vx_nxv32i16: 1175; CHECK: # %bb.0: 1176; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1177; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1178; CHECK-NEXT: ret 1179 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0 1180 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer 1181 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl) 1182 ret <vscale x 32 x i16> %v 1183} 1184 1185define <vscale x 32 x i16> @vxor_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) { 1186; CHECK-LABEL: vxor_vx_nxv32i16_unmasked: 1187; CHECK: # %bb.0: 1188; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma 1189; CHECK-NEXT: vxor.vx v8, v8, a0 1190; CHECK-NEXT: ret 1191 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0 1192 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer 1193 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl) 1194 ret <vscale x 32 x i16> %v 1195} 1196 1197define <vscale x 32 x i16> @vxor_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 1198; CHECK-LABEL: vxor_vi_nxv32i16: 1199; CHECK: # %bb.0: 1200; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1201; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1202; CHECK-NEXT: ret 1203 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 7), <vscale x 32 x i1> %m, i32 %evl) 1204 ret <vscale x 32 x i16> %v 1205} 1206 1207define <vscale x 32 x i16> @vxor_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) { 1208; CHECK-LABEL: vxor_vi_nxv32i16_unmasked: 1209; CHECK: # %bb.0: 1210; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1211; CHECK-NEXT: vxor.vi v8, v8, 7 1212; CHECK-NEXT: ret 1213 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 7), <vscale x 32 x i1> splat (i1 true), i32 %evl) 1214 ret <vscale x 32 x i16> %v 1215} 1216 1217define <vscale x 32 x i16> @vxor_vi_nxv32i16_1(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) { 1218; CHECK-LABEL: vxor_vi_nxv32i16_1: 1219; CHECK: # %bb.0: 1220; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1221; CHECK-NEXT: vnot.v v8, v8, v0.t 1222; CHECK-NEXT: ret 1223 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl) 1224 ret <vscale x 32 x i16> %v 1225} 1226 1227define <vscale x 32 x i16> @vxor_vi_nxv32i16_unmasked_1(<vscale x 32 x i16> %va, i32 zeroext %evl) { 1228; CHECK-LABEL: vxor_vi_nxv32i16_unmasked_1: 1229; CHECK: # %bb.0: 1230; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma 1231; CHECK-NEXT: vnot.v v8, v8 1232; CHECK-NEXT: ret 1233 %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl) 1234 ret <vscale x 32 x i16> %v 1235} 1236 1237declare <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32) 1238 1239define <vscale x 1 x i32> @vxor_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1240; CHECK-LABEL: vxor_vv_nxv1i32: 1241; CHECK: # %bb.0: 1242; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1243; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 1244; CHECK-NEXT: ret 1245 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl) 1246 ret <vscale x 1 x i32> %v 1247} 1248 1249define <vscale x 1 x i32> @vxor_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) { 1250; CHECK-LABEL: vxor_vv_nxv1i32_unmasked: 1251; CHECK: # %bb.0: 1252; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1253; CHECK-NEXT: vxor.vv v8, v8, v9 1254; CHECK-NEXT: ret 1255 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) 1256 ret <vscale x 1 x i32> %v 1257} 1258 1259define <vscale x 1 x i32> @vxor_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1260; CHECK-LABEL: vxor_vx_nxv1i32: 1261; CHECK: # %bb.0: 1262; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1263; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1264; CHECK-NEXT: ret 1265 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0 1266 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer 1267 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl) 1268 ret <vscale x 1 x i32> %v 1269} 1270 1271define <vscale x 1 x i32> @vxor_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) { 1272; CHECK-LABEL: vxor_vx_nxv1i32_unmasked: 1273; CHECK: # %bb.0: 1274; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma 1275; CHECK-NEXT: vxor.vx v8, v8, a0 1276; CHECK-NEXT: ret 1277 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0 1278 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer 1279 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) 1280 ret <vscale x 1 x i32> %v 1281} 1282 1283define <vscale x 1 x i32> @vxor_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1284; CHECK-LABEL: vxor_vi_nxv1i32: 1285; CHECK: # %bb.0: 1286; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1287; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1288; CHECK-NEXT: ret 1289 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 7), <vscale x 1 x i1> %m, i32 %evl) 1290 ret <vscale x 1 x i32> %v 1291} 1292 1293define <vscale x 1 x i32> @vxor_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) { 1294; CHECK-LABEL: vxor_vi_nxv1i32_unmasked: 1295; CHECK: # %bb.0: 1296; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1297; CHECK-NEXT: vxor.vi v8, v8, 7 1298; CHECK-NEXT: ret 1299 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 7), <vscale x 1 x i1> splat (i1 true), i32 %evl) 1300 ret <vscale x 1 x i32> %v 1301} 1302 1303define <vscale x 1 x i32> @vxor_vi_nxv1i32_1(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1304; CHECK-LABEL: vxor_vi_nxv1i32_1: 1305; CHECK: # %bb.0: 1306; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1307; CHECK-NEXT: vnot.v v8, v8, v0.t 1308; CHECK-NEXT: ret 1309 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl) 1310 ret <vscale x 1 x i32> %v 1311} 1312 1313define <vscale x 1 x i32> @vxor_vi_nxv1i32_unmasked_1(<vscale x 1 x i32> %va, i32 zeroext %evl) { 1314; CHECK-LABEL: vxor_vi_nxv1i32_unmasked_1: 1315; CHECK: # %bb.0: 1316; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma 1317; CHECK-NEXT: vnot.v v8, v8 1318; CHECK-NEXT: ret 1319 %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl) 1320 ret <vscale x 1 x i32> %v 1321} 1322 1323declare <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32) 1324 1325define <vscale x 2 x i32> @vxor_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1326; CHECK-LABEL: vxor_vv_nxv2i32: 1327; CHECK: # %bb.0: 1328; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1329; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 1330; CHECK-NEXT: ret 1331 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl) 1332 ret <vscale x 2 x i32> %v 1333} 1334 1335define <vscale x 2 x i32> @vxor_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) { 1336; CHECK-LABEL: vxor_vv_nxv2i32_unmasked: 1337; CHECK: # %bb.0: 1338; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1339; CHECK-NEXT: vxor.vv v8, v8, v9 1340; CHECK-NEXT: ret 1341 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) 1342 ret <vscale x 2 x i32> %v 1343} 1344 1345define <vscale x 2 x i32> @vxor_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1346; CHECK-LABEL: vxor_vx_nxv2i32: 1347; CHECK: # %bb.0: 1348; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1349; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1350; CHECK-NEXT: ret 1351 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0 1352 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 1353 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl) 1354 ret <vscale x 2 x i32> %v 1355} 1356 1357define <vscale x 2 x i32> @vxor_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) { 1358; CHECK-LABEL: vxor_vx_nxv2i32_unmasked: 1359; CHECK: # %bb.0: 1360; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 1361; CHECK-NEXT: vxor.vx v8, v8, a0 1362; CHECK-NEXT: ret 1363 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0 1364 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 1365 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) 1366 ret <vscale x 2 x i32> %v 1367} 1368 1369define <vscale x 2 x i32> @vxor_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1370; CHECK-LABEL: vxor_vi_nxv2i32: 1371; CHECK: # %bb.0: 1372; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1373; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1374; CHECK-NEXT: ret 1375 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 7), <vscale x 2 x i1> %m, i32 %evl) 1376 ret <vscale x 2 x i32> %v 1377} 1378 1379define <vscale x 2 x i32> @vxor_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) { 1380; CHECK-LABEL: vxor_vi_nxv2i32_unmasked: 1381; CHECK: # %bb.0: 1382; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1383; CHECK-NEXT: vxor.vi v8, v8, 7 1384; CHECK-NEXT: ret 1385 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 7), <vscale x 2 x i1> splat (i1 true), i32 %evl) 1386 ret <vscale x 2 x i32> %v 1387} 1388 1389define <vscale x 2 x i32> @vxor_vi_nxv2i32_1(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1390; CHECK-LABEL: vxor_vi_nxv2i32_1: 1391; CHECK: # %bb.0: 1392; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1393; CHECK-NEXT: vnot.v v8, v8, v0.t 1394; CHECK-NEXT: ret 1395 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl) 1396 ret <vscale x 2 x i32> %v 1397} 1398 1399define <vscale x 2 x i32> @vxor_vi_nxv2i32_unmasked_1(<vscale x 2 x i32> %va, i32 zeroext %evl) { 1400; CHECK-LABEL: vxor_vi_nxv2i32_unmasked_1: 1401; CHECK: # %bb.0: 1402; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 1403; CHECK-NEXT: vnot.v v8, v8 1404; CHECK-NEXT: ret 1405 %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl) 1406 ret <vscale x 2 x i32> %v 1407} 1408 1409declare <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32) 1410 1411define <vscale x 4 x i32> @vxor_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1412; CHECK-LABEL: vxor_vv_nxv4i32: 1413; CHECK: # %bb.0: 1414; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1415; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 1416; CHECK-NEXT: ret 1417 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl) 1418 ret <vscale x 4 x i32> %v 1419} 1420 1421define <vscale x 4 x i32> @vxor_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) { 1422; CHECK-LABEL: vxor_vv_nxv4i32_unmasked: 1423; CHECK: # %bb.0: 1424; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1425; CHECK-NEXT: vxor.vv v8, v8, v10 1426; CHECK-NEXT: ret 1427 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) 1428 ret <vscale x 4 x i32> %v 1429} 1430 1431define <vscale x 4 x i32> @vxor_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1432; CHECK-LABEL: vxor_vx_nxv4i32: 1433; CHECK: # %bb.0: 1434; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1435; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1436; CHECK-NEXT: ret 1437 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0 1438 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 1439 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl) 1440 ret <vscale x 4 x i32> %v 1441} 1442 1443define <vscale x 4 x i32> @vxor_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) { 1444; CHECK-LABEL: vxor_vx_nxv4i32_unmasked: 1445; CHECK: # %bb.0: 1446; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma 1447; CHECK-NEXT: vxor.vx v8, v8, a0 1448; CHECK-NEXT: ret 1449 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0 1450 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 1451 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) 1452 ret <vscale x 4 x i32> %v 1453} 1454 1455define <vscale x 4 x i32> @vxor_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1456; CHECK-LABEL: vxor_vi_nxv4i32: 1457; CHECK: # %bb.0: 1458; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1459; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1460; CHECK-NEXT: ret 1461 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 7), <vscale x 4 x i1> %m, i32 %evl) 1462 ret <vscale x 4 x i32> %v 1463} 1464 1465define <vscale x 4 x i32> @vxor_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) { 1466; CHECK-LABEL: vxor_vi_nxv4i32_unmasked: 1467; CHECK: # %bb.0: 1468; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1469; CHECK-NEXT: vxor.vi v8, v8, 7 1470; CHECK-NEXT: ret 1471 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 7), <vscale x 4 x i1> splat (i1 true), i32 %evl) 1472 ret <vscale x 4 x i32> %v 1473} 1474 1475define <vscale x 4 x i32> @vxor_vi_nxv4i32_1(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1476; CHECK-LABEL: vxor_vi_nxv4i32_1: 1477; CHECK: # %bb.0: 1478; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1479; CHECK-NEXT: vnot.v v8, v8, v0.t 1480; CHECK-NEXT: ret 1481 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl) 1482 ret <vscale x 4 x i32> %v 1483} 1484 1485define <vscale x 4 x i32> @vxor_vi_nxv4i32_unmasked_1(<vscale x 4 x i32> %va, i32 zeroext %evl) { 1486; CHECK-LABEL: vxor_vi_nxv4i32_unmasked_1: 1487; CHECK: # %bb.0: 1488; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma 1489; CHECK-NEXT: vnot.v v8, v8 1490; CHECK-NEXT: ret 1491 %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl) 1492 ret <vscale x 4 x i32> %v 1493} 1494 1495declare <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32) 1496 1497define <vscale x 8 x i32> @vxor_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1498; CHECK-LABEL: vxor_vv_nxv8i32: 1499; CHECK: # %bb.0: 1500; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1501; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 1502; CHECK-NEXT: ret 1503 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl) 1504 ret <vscale x 8 x i32> %v 1505} 1506 1507define <vscale x 8 x i32> @vxor_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) { 1508; CHECK-LABEL: vxor_vv_nxv8i32_unmasked: 1509; CHECK: # %bb.0: 1510; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1511; CHECK-NEXT: vxor.vv v8, v8, v12 1512; CHECK-NEXT: ret 1513 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) 1514 ret <vscale x 8 x i32> %v 1515} 1516 1517define <vscale x 8 x i32> @vxor_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1518; CHECK-LABEL: vxor_vx_nxv8i32: 1519; CHECK: # %bb.0: 1520; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1521; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1522; CHECK-NEXT: ret 1523 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0 1524 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 1525 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl) 1526 ret <vscale x 8 x i32> %v 1527} 1528 1529define <vscale x 8 x i32> @vxor_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) { 1530; CHECK-LABEL: vxor_vx_nxv8i32_unmasked: 1531; CHECK: # %bb.0: 1532; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma 1533; CHECK-NEXT: vxor.vx v8, v8, a0 1534; CHECK-NEXT: ret 1535 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0 1536 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 1537 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) 1538 ret <vscale x 8 x i32> %v 1539} 1540 1541define <vscale x 8 x i32> @vxor_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1542; CHECK-LABEL: vxor_vi_nxv8i32: 1543; CHECK: # %bb.0: 1544; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1545; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1546; CHECK-NEXT: ret 1547 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i1> %m, i32 %evl) 1548 ret <vscale x 8 x i32> %v 1549} 1550 1551define <vscale x 8 x i32> @vxor_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) { 1552; CHECK-LABEL: vxor_vi_nxv8i32_unmasked: 1553; CHECK: # %bb.0: 1554; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1555; CHECK-NEXT: vxor.vi v8, v8, 7 1556; CHECK-NEXT: ret 1557 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i1> splat (i1 true), i32 %evl) 1558 ret <vscale x 8 x i32> %v 1559} 1560 1561define <vscale x 8 x i32> @vxor_vi_nxv8i32_1(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 1562; CHECK-LABEL: vxor_vi_nxv8i32_1: 1563; CHECK: # %bb.0: 1564; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1565; CHECK-NEXT: vnot.v v8, v8, v0.t 1566; CHECK-NEXT: ret 1567 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl) 1568 ret <vscale x 8 x i32> %v 1569} 1570 1571define <vscale x 8 x i32> @vxor_vi_nxv8i32_unmasked_1(<vscale x 8 x i32> %va, i32 zeroext %evl) { 1572; CHECK-LABEL: vxor_vi_nxv8i32_unmasked_1: 1573; CHECK: # %bb.0: 1574; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma 1575; CHECK-NEXT: vnot.v v8, v8 1576; CHECK-NEXT: ret 1577 %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl) 1578 ret <vscale x 8 x i32> %v 1579} 1580 1581declare <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32) 1582 1583define <vscale x 16 x i32> @vxor_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1584; CHECK-LABEL: vxor_vv_nxv16i32: 1585; CHECK: # %bb.0: 1586; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1587; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t 1588; CHECK-NEXT: ret 1589 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl) 1590 ret <vscale x 16 x i32> %v 1591} 1592 1593define <vscale x 16 x i32> @vxor_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) { 1594; CHECK-LABEL: vxor_vv_nxv16i32_unmasked: 1595; CHECK: # %bb.0: 1596; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1597; CHECK-NEXT: vxor.vv v8, v8, v16 1598; CHECK-NEXT: ret 1599 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl) 1600 ret <vscale x 16 x i32> %v 1601} 1602 1603define <vscale x 16 x i32> @vxor_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1604; CHECK-LABEL: vxor_vx_nxv16i32: 1605; CHECK: # %bb.0: 1606; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1607; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t 1608; CHECK-NEXT: ret 1609 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0 1610 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer 1611 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl) 1612 ret <vscale x 16 x i32> %v 1613} 1614 1615define <vscale x 16 x i32> @vxor_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) { 1616; CHECK-LABEL: vxor_vx_nxv16i32_unmasked: 1617; CHECK: # %bb.0: 1618; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma 1619; CHECK-NEXT: vxor.vx v8, v8, a0 1620; CHECK-NEXT: ret 1621 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0 1622 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer 1623 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl) 1624 ret <vscale x 16 x i32> %v 1625} 1626 1627define <vscale x 16 x i32> @vxor_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1628; CHECK-LABEL: vxor_vi_nxv16i32: 1629; CHECK: # %bb.0: 1630; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1631; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1632; CHECK-NEXT: ret 1633 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 7), <vscale x 16 x i1> %m, i32 %evl) 1634 ret <vscale x 16 x i32> %v 1635} 1636 1637define <vscale x 16 x i32> @vxor_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) { 1638; CHECK-LABEL: vxor_vi_nxv16i32_unmasked: 1639; CHECK: # %bb.0: 1640; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1641; CHECK-NEXT: vxor.vi v8, v8, 7 1642; CHECK-NEXT: ret 1643 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 7), <vscale x 16 x i1> splat (i1 true), i32 %evl) 1644 ret <vscale x 16 x i32> %v 1645} 1646 1647define <vscale x 16 x i32> @vxor_vi_nxv16i32_1(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) { 1648; CHECK-LABEL: vxor_vi_nxv16i32_1: 1649; CHECK: # %bb.0: 1650; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1651; CHECK-NEXT: vnot.v v8, v8, v0.t 1652; CHECK-NEXT: ret 1653 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl) 1654 ret <vscale x 16 x i32> %v 1655} 1656 1657define <vscale x 16 x i32> @vxor_vi_nxv16i32_unmasked_1(<vscale x 16 x i32> %va, i32 zeroext %evl) { 1658; CHECK-LABEL: vxor_vi_nxv16i32_unmasked_1: 1659; CHECK: # %bb.0: 1660; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma 1661; CHECK-NEXT: vnot.v v8, v8 1662; CHECK-NEXT: ret 1663 %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl) 1664 ret <vscale x 16 x i32> %v 1665} 1666 1667declare <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32) 1668 1669define <vscale x 1 x i64> @vxor_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1670; CHECK-LABEL: vxor_vv_nxv1i64: 1671; CHECK: # %bb.0: 1672; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1673; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t 1674; CHECK-NEXT: ret 1675 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl) 1676 ret <vscale x 1 x i64> %v 1677} 1678 1679define <vscale x 1 x i64> @vxor_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) { 1680; CHECK-LABEL: vxor_vv_nxv1i64_unmasked: 1681; CHECK: # %bb.0: 1682; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1683; CHECK-NEXT: vxor.vv v8, v8, v9 1684; CHECK-NEXT: ret 1685 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl) 1686 ret <vscale x 1 x i64> %v 1687} 1688 1689define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1690; RV32-LABEL: vxor_vx_nxv1i64: 1691; RV32: # %bb.0: 1692; RV32-NEXT: addi sp, sp, -16 1693; RV32-NEXT: .cfi_def_cfa_offset 16 1694; RV32-NEXT: sw a0, 8(sp) 1695; RV32-NEXT: sw a1, 12(sp) 1696; RV32-NEXT: addi a0, sp, 8 1697; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1698; RV32-NEXT: vlse64.v v9, (a0), zero 1699; RV32-NEXT: vxor.vv v8, v8, v9, v0.t 1700; RV32-NEXT: addi sp, sp, 16 1701; RV32-NEXT: .cfi_def_cfa_offset 0 1702; RV32-NEXT: ret 1703; 1704; RV64-LABEL: vxor_vx_nxv1i64: 1705; RV64: # %bb.0: 1706; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1707; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1708; RV64-NEXT: ret 1709 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0 1710 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer 1711 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl) 1712 ret <vscale x 1 x i64> %v 1713} 1714 1715define <vscale x 1 x i64> @vxor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) { 1716; RV32-LABEL: vxor_vx_nxv1i64_unmasked: 1717; RV32: # %bb.0: 1718; RV32-NEXT: addi sp, sp, -16 1719; RV32-NEXT: .cfi_def_cfa_offset 16 1720; RV32-NEXT: sw a0, 8(sp) 1721; RV32-NEXT: sw a1, 12(sp) 1722; RV32-NEXT: addi a0, sp, 8 1723; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma 1724; RV32-NEXT: vlse64.v v9, (a0), zero 1725; RV32-NEXT: vxor.vv v8, v8, v9 1726; RV32-NEXT: addi sp, sp, 16 1727; RV32-NEXT: .cfi_def_cfa_offset 0 1728; RV32-NEXT: ret 1729; 1730; RV64-LABEL: vxor_vx_nxv1i64_unmasked: 1731; RV64: # %bb.0: 1732; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma 1733; RV64-NEXT: vxor.vx v8, v8, a0 1734; RV64-NEXT: ret 1735 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0 1736 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer 1737 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl) 1738 ret <vscale x 1 x i64> %v 1739} 1740 1741define <vscale x 1 x i64> @vxor_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1742; CHECK-LABEL: vxor_vi_nxv1i64: 1743; CHECK: # %bb.0: 1744; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1745; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1746; CHECK-NEXT: ret 1747 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 7), <vscale x 1 x i1> %m, i32 %evl) 1748 ret <vscale x 1 x i64> %v 1749} 1750 1751define <vscale x 1 x i64> @vxor_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) { 1752; CHECK-LABEL: vxor_vi_nxv1i64_unmasked: 1753; CHECK: # %bb.0: 1754; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1755; CHECK-NEXT: vxor.vi v8, v8, 7 1756; CHECK-NEXT: ret 1757 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 7), <vscale x 1 x i1> splat (i1 true), i32 %evl) 1758 ret <vscale x 1 x i64> %v 1759} 1760 1761define <vscale x 1 x i64> @vxor_vi_nxv1i64_1(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) { 1762; CHECK-LABEL: vxor_vi_nxv1i64_1: 1763; CHECK: # %bb.0: 1764; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1765; CHECK-NEXT: vnot.v v8, v8, v0.t 1766; CHECK-NEXT: ret 1767 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl) 1768 ret <vscale x 1 x i64> %v 1769} 1770 1771define <vscale x 1 x i64> @vxor_vi_nxv1i64_unmasked_1(<vscale x 1 x i64> %va, i32 zeroext %evl) { 1772; CHECK-LABEL: vxor_vi_nxv1i64_unmasked_1: 1773; CHECK: # %bb.0: 1774; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 1775; CHECK-NEXT: vnot.v v8, v8 1776; CHECK-NEXT: ret 1777 %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl) 1778 ret <vscale x 1 x i64> %v 1779} 1780 1781declare <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32) 1782 1783define <vscale x 2 x i64> @vxor_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1784; CHECK-LABEL: vxor_vv_nxv2i64: 1785; CHECK: # %bb.0: 1786; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1787; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t 1788; CHECK-NEXT: ret 1789 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl) 1790 ret <vscale x 2 x i64> %v 1791} 1792 1793define <vscale x 2 x i64> @vxor_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) { 1794; CHECK-LABEL: vxor_vv_nxv2i64_unmasked: 1795; CHECK: # %bb.0: 1796; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1797; CHECK-NEXT: vxor.vv v8, v8, v10 1798; CHECK-NEXT: ret 1799 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl) 1800 ret <vscale x 2 x i64> %v 1801} 1802 1803define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1804; RV32-LABEL: vxor_vx_nxv2i64: 1805; RV32: # %bb.0: 1806; RV32-NEXT: addi sp, sp, -16 1807; RV32-NEXT: .cfi_def_cfa_offset 16 1808; RV32-NEXT: sw a0, 8(sp) 1809; RV32-NEXT: sw a1, 12(sp) 1810; RV32-NEXT: addi a0, sp, 8 1811; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1812; RV32-NEXT: vlse64.v v10, (a0), zero 1813; RV32-NEXT: vxor.vv v8, v8, v10, v0.t 1814; RV32-NEXT: addi sp, sp, 16 1815; RV32-NEXT: .cfi_def_cfa_offset 0 1816; RV32-NEXT: ret 1817; 1818; RV64-LABEL: vxor_vx_nxv2i64: 1819; RV64: # %bb.0: 1820; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1821; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1822; RV64-NEXT: ret 1823 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0 1824 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 1825 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl) 1826 ret <vscale x 2 x i64> %v 1827} 1828 1829define <vscale x 2 x i64> @vxor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) { 1830; RV32-LABEL: vxor_vx_nxv2i64_unmasked: 1831; RV32: # %bb.0: 1832; RV32-NEXT: addi sp, sp, -16 1833; RV32-NEXT: .cfi_def_cfa_offset 16 1834; RV32-NEXT: sw a0, 8(sp) 1835; RV32-NEXT: sw a1, 12(sp) 1836; RV32-NEXT: addi a0, sp, 8 1837; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma 1838; RV32-NEXT: vlse64.v v10, (a0), zero 1839; RV32-NEXT: vxor.vv v8, v8, v10 1840; RV32-NEXT: addi sp, sp, 16 1841; RV32-NEXT: .cfi_def_cfa_offset 0 1842; RV32-NEXT: ret 1843; 1844; RV64-LABEL: vxor_vx_nxv2i64_unmasked: 1845; RV64: # %bb.0: 1846; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma 1847; RV64-NEXT: vxor.vx v8, v8, a0 1848; RV64-NEXT: ret 1849 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0 1850 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 1851 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl) 1852 ret <vscale x 2 x i64> %v 1853} 1854 1855define <vscale x 2 x i64> @vxor_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1856; CHECK-LABEL: vxor_vi_nxv2i64: 1857; CHECK: # %bb.0: 1858; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1859; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1860; CHECK-NEXT: ret 1861 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 7), <vscale x 2 x i1> %m, i32 %evl) 1862 ret <vscale x 2 x i64> %v 1863} 1864 1865define <vscale x 2 x i64> @vxor_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) { 1866; CHECK-LABEL: vxor_vi_nxv2i64_unmasked: 1867; CHECK: # %bb.0: 1868; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1869; CHECK-NEXT: vxor.vi v8, v8, 7 1870; CHECK-NEXT: ret 1871 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 7), <vscale x 2 x i1> splat (i1 true), i32 %evl) 1872 ret <vscale x 2 x i64> %v 1873} 1874 1875define <vscale x 2 x i64> @vxor_vi_nxv2i64_1(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) { 1876; CHECK-LABEL: vxor_vi_nxv2i64_1: 1877; CHECK: # %bb.0: 1878; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1879; CHECK-NEXT: vnot.v v8, v8, v0.t 1880; CHECK-NEXT: ret 1881 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl) 1882 ret <vscale x 2 x i64> %v 1883} 1884 1885define <vscale x 2 x i64> @vxor_vi_nxv2i64_unmasked_1(<vscale x 2 x i64> %va, i32 zeroext %evl) { 1886; CHECK-LABEL: vxor_vi_nxv2i64_unmasked_1: 1887; CHECK: # %bb.0: 1888; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma 1889; CHECK-NEXT: vnot.v v8, v8 1890; CHECK-NEXT: ret 1891 %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl) 1892 ret <vscale x 2 x i64> %v 1893} 1894 1895declare <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32) 1896 1897define <vscale x 4 x i64> @vxor_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1898; CHECK-LABEL: vxor_vv_nxv4i64: 1899; CHECK: # %bb.0: 1900; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1901; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t 1902; CHECK-NEXT: ret 1903 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl) 1904 ret <vscale x 4 x i64> %v 1905} 1906 1907define <vscale x 4 x i64> @vxor_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) { 1908; CHECK-LABEL: vxor_vv_nxv4i64_unmasked: 1909; CHECK: # %bb.0: 1910; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1911; CHECK-NEXT: vxor.vv v8, v8, v12 1912; CHECK-NEXT: ret 1913 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl) 1914 ret <vscale x 4 x i64> %v 1915} 1916 1917define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1918; RV32-LABEL: vxor_vx_nxv4i64: 1919; RV32: # %bb.0: 1920; RV32-NEXT: addi sp, sp, -16 1921; RV32-NEXT: .cfi_def_cfa_offset 16 1922; RV32-NEXT: sw a0, 8(sp) 1923; RV32-NEXT: sw a1, 12(sp) 1924; RV32-NEXT: addi a0, sp, 8 1925; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1926; RV32-NEXT: vlse64.v v12, (a0), zero 1927; RV32-NEXT: vxor.vv v8, v8, v12, v0.t 1928; RV32-NEXT: addi sp, sp, 16 1929; RV32-NEXT: .cfi_def_cfa_offset 0 1930; RV32-NEXT: ret 1931; 1932; RV64-LABEL: vxor_vx_nxv4i64: 1933; RV64: # %bb.0: 1934; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1935; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 1936; RV64-NEXT: ret 1937 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0 1938 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 1939 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl) 1940 ret <vscale x 4 x i64> %v 1941} 1942 1943define <vscale x 4 x i64> @vxor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) { 1944; RV32-LABEL: vxor_vx_nxv4i64_unmasked: 1945; RV32: # %bb.0: 1946; RV32-NEXT: addi sp, sp, -16 1947; RV32-NEXT: .cfi_def_cfa_offset 16 1948; RV32-NEXT: sw a0, 8(sp) 1949; RV32-NEXT: sw a1, 12(sp) 1950; RV32-NEXT: addi a0, sp, 8 1951; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma 1952; RV32-NEXT: vlse64.v v12, (a0), zero 1953; RV32-NEXT: vxor.vv v8, v8, v12 1954; RV32-NEXT: addi sp, sp, 16 1955; RV32-NEXT: .cfi_def_cfa_offset 0 1956; RV32-NEXT: ret 1957; 1958; RV64-LABEL: vxor_vx_nxv4i64_unmasked: 1959; RV64: # %bb.0: 1960; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma 1961; RV64-NEXT: vxor.vx v8, v8, a0 1962; RV64-NEXT: ret 1963 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0 1964 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 1965 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl) 1966 ret <vscale x 4 x i64> %v 1967} 1968 1969define <vscale x 4 x i64> @vxor_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1970; CHECK-LABEL: vxor_vi_nxv4i64: 1971; CHECK: # %bb.0: 1972; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1973; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 1974; CHECK-NEXT: ret 1975 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 7), <vscale x 4 x i1> %m, i32 %evl) 1976 ret <vscale x 4 x i64> %v 1977} 1978 1979define <vscale x 4 x i64> @vxor_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) { 1980; CHECK-LABEL: vxor_vi_nxv4i64_unmasked: 1981; CHECK: # %bb.0: 1982; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1983; CHECK-NEXT: vxor.vi v8, v8, 7 1984; CHECK-NEXT: ret 1985 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 7), <vscale x 4 x i1> splat (i1 true), i32 %evl) 1986 ret <vscale x 4 x i64> %v 1987} 1988 1989define <vscale x 4 x i64> @vxor_vi_nxv4i64_1(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) { 1990; CHECK-LABEL: vxor_vi_nxv4i64_1: 1991; CHECK: # %bb.0: 1992; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 1993; CHECK-NEXT: vnot.v v8, v8, v0.t 1994; CHECK-NEXT: ret 1995 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl) 1996 ret <vscale x 4 x i64> %v 1997} 1998 1999define <vscale x 4 x i64> @vxor_vi_nxv4i64_unmasked_1(<vscale x 4 x i64> %va, i32 zeroext %evl) { 2000; CHECK-LABEL: vxor_vi_nxv4i64_unmasked_1: 2001; CHECK: # %bb.0: 2002; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma 2003; CHECK-NEXT: vnot.v v8, v8 2004; CHECK-NEXT: ret 2005 %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl) 2006 ret <vscale x 4 x i64> %v 2007} 2008 2009declare <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32) 2010 2011define <vscale x 8 x i64> @vxor_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 2012; CHECK-LABEL: vxor_vv_nxv8i64: 2013; CHECK: # %bb.0: 2014; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2015; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t 2016; CHECK-NEXT: ret 2017 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl) 2018 ret <vscale x 8 x i64> %v 2019} 2020 2021define <vscale x 8 x i64> @vxor_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) { 2022; CHECK-LABEL: vxor_vv_nxv8i64_unmasked: 2023; CHECK: # %bb.0: 2024; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2025; CHECK-NEXT: vxor.vv v8, v8, v16 2026; CHECK-NEXT: ret 2027 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl) 2028 ret <vscale x 8 x i64> %v 2029} 2030 2031define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { 2032; RV32-LABEL: vxor_vx_nxv8i64: 2033; RV32: # %bb.0: 2034; RV32-NEXT: addi sp, sp, -16 2035; RV32-NEXT: .cfi_def_cfa_offset 16 2036; RV32-NEXT: sw a0, 8(sp) 2037; RV32-NEXT: sw a1, 12(sp) 2038; RV32-NEXT: addi a0, sp, 8 2039; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2040; RV32-NEXT: vlse64.v v16, (a0), zero 2041; RV32-NEXT: vxor.vv v8, v8, v16, v0.t 2042; RV32-NEXT: addi sp, sp, 16 2043; RV32-NEXT: .cfi_def_cfa_offset 0 2044; RV32-NEXT: ret 2045; 2046; RV64-LABEL: vxor_vx_nxv8i64: 2047; RV64: # %bb.0: 2048; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2049; RV64-NEXT: vxor.vx v8, v8, a0, v0.t 2050; RV64-NEXT: ret 2051 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0 2052 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 2053 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl) 2054 ret <vscale x 8 x i64> %v 2055} 2056 2057define <vscale x 8 x i64> @vxor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) { 2058; RV32-LABEL: vxor_vx_nxv8i64_unmasked: 2059; RV32: # %bb.0: 2060; RV32-NEXT: addi sp, sp, -16 2061; RV32-NEXT: .cfi_def_cfa_offset 16 2062; RV32-NEXT: sw a0, 8(sp) 2063; RV32-NEXT: sw a1, 12(sp) 2064; RV32-NEXT: addi a0, sp, 8 2065; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma 2066; RV32-NEXT: vlse64.v v16, (a0), zero 2067; RV32-NEXT: vxor.vv v8, v8, v16 2068; RV32-NEXT: addi sp, sp, 16 2069; RV32-NEXT: .cfi_def_cfa_offset 0 2070; RV32-NEXT: ret 2071; 2072; RV64-LABEL: vxor_vx_nxv8i64_unmasked: 2073; RV64: # %bb.0: 2074; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma 2075; RV64-NEXT: vxor.vx v8, v8, a0 2076; RV64-NEXT: ret 2077 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0 2078 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 2079 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl) 2080 ret <vscale x 8 x i64> %v 2081} 2082 2083define <vscale x 8 x i64> @vxor_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 2084; CHECK-LABEL: vxor_vi_nxv8i64: 2085; CHECK: # %bb.0: 2086; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2087; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t 2088; CHECK-NEXT: ret 2089 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 7), <vscale x 8 x i1> %m, i32 %evl) 2090 ret <vscale x 8 x i64> %v 2091} 2092 2093define <vscale x 8 x i64> @vxor_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) { 2094; CHECK-LABEL: vxor_vi_nxv8i64_unmasked: 2095; CHECK: # %bb.0: 2096; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2097; CHECK-NEXT: vxor.vi v8, v8, 7 2098; CHECK-NEXT: ret 2099 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 7), <vscale x 8 x i1> splat (i1 true), i32 %evl) 2100 ret <vscale x 8 x i64> %v 2101} 2102 2103define <vscale x 8 x i64> @vxor_vi_nxv8i64_1(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) { 2104; CHECK-LABEL: vxor_vi_nxv8i64_1: 2105; CHECK: # %bb.0: 2106; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2107; CHECK-NEXT: vnot.v v8, v8, v0.t 2108; CHECK-NEXT: ret 2109 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl) 2110 ret <vscale x 8 x i64> %v 2111} 2112 2113define <vscale x 8 x i64> @vxor_vi_nxv8i64_unmasked_1(<vscale x 8 x i64> %va, i32 zeroext %evl) { 2114; CHECK-LABEL: vxor_vi_nxv8i64_unmasked_1: 2115; CHECK: # %bb.0: 2116; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma 2117; CHECK-NEXT: vnot.v v8, v8 2118; CHECK-NEXT: ret 2119 %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl) 2120 ret <vscale x 8 x i64> %v 2121} 2122