xref: /llvm-project/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/xor.ll (revision a5c90e48b6f11bc6db7344503589648f76b16d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
3
4define void @xor_v16i8(ptr %res, ptr %a0, ptr %a1) nounwind {
5; CHECK-LABEL: xor_v16i8:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    vld $vr0, $a1, 0
8; CHECK-NEXT:    vld $vr1, $a2, 0
9; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
10; CHECK-NEXT:    vst $vr0, $a0, 0
11; CHECK-NEXT:    ret
12entry:
13  %v0 = load <16 x i8>, ptr %a0
14  %v1 = load <16 x i8>, ptr %a1
15  %v2 = xor <16 x i8> %v0, %v1
16  store <16 x i8> %v2, ptr %res
17  ret void
18}
19
20define void @xor_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind {
21; CHECK-LABEL: xor_v8i16:
22; CHECK:       # %bb.0: # %entry
23; CHECK-NEXT:    vld $vr0, $a1, 0
24; CHECK-NEXT:    vld $vr1, $a2, 0
25; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
26; CHECK-NEXT:    vst $vr0, $a0, 0
27; CHECK-NEXT:    ret
28entry:
29  %v0 = load <8 x i16>, ptr %a0
30  %v1 = load <8 x i16>, ptr %a1
31  %v2 = xor <8 x i16> %v0, %v1
32  store <8 x i16> %v2, ptr %res
33  ret void
34}
35
36define void @xor_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind {
37; CHECK-LABEL: xor_v4i32:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vld $vr0, $a1, 0
40; CHECK-NEXT:    vld $vr1, $a2, 0
41; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
42; CHECK-NEXT:    vst $vr0, $a0, 0
43; CHECK-NEXT:    ret
44entry:
45  %v0 = load <4 x i32>, ptr %a0
46  %v1 = load <4 x i32>, ptr %a1
47  %v2 = xor <4 x i32> %v0, %v1
48  store <4 x i32> %v2, ptr %res
49  ret void
50}
51
52define void @xor_v2i64(ptr %res, ptr %a0, ptr %a1) nounwind {
53; CHECK-LABEL: xor_v2i64:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vld $vr0, $a1, 0
56; CHECK-NEXT:    vld $vr1, $a2, 0
57; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
58; CHECK-NEXT:    vst $vr0, $a0, 0
59; CHECK-NEXT:    ret
60entry:
61  %v0 = load <2 x i64>, ptr %a0
62  %v1 = load <2 x i64>, ptr %a1
63  %v2 = xor <2 x i64> %v0, %v1
64  store <2 x i64> %v2, ptr %res
65  ret void
66}
67
68define void @xor_u_v16i8(ptr %res, ptr %a0) nounwind {
69; CHECK-LABEL: xor_u_v16i8:
70; CHECK:       # %bb.0: # %entry
71; CHECK-NEXT:    vld $vr0, $a1, 0
72; CHECK-NEXT:    vxori.b $vr0, $vr0, 31
73; CHECK-NEXT:    vst $vr0, $a0, 0
74; CHECK-NEXT:    ret
75entry:
76  %v0 = load <16 x i8>, ptr %a0
77  %v1 = xor <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
78  store <16 x i8> %v1, ptr %res
79  ret void
80}
81
82define void @xor_u_v8i16(ptr %res, ptr %a0) nounwind {
83; CHECK-LABEL: xor_u_v8i16:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vld $vr0, $a1, 0
86; CHECK-NEXT:    vrepli.h $vr1, 31
87; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
88; CHECK-NEXT:    vst $vr0, $a0, 0
89; CHECK-NEXT:    ret
90entry:
91  %v0 = load <8 x i16>, ptr %a0
92  %v1 = xor <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
93  store <8 x i16> %v1, ptr %res
94  ret void
95}
96
97define void @xor_u_v4i32(ptr %res, ptr %a0) nounwind {
98; CHECK-LABEL: xor_u_v4i32:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    vld $vr0, $a1, 0
101; CHECK-NEXT:    vrepli.w $vr1, 31
102; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
103; CHECK-NEXT:    vst $vr0, $a0, 0
104; CHECK-NEXT:    ret
105entry:
106  %v0 = load <4 x i32>, ptr %a0
107  %v1 = xor <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
108  store <4 x i32> %v1, ptr %res
109  ret void
110}
111
112define void @xor_u_v2i64(ptr %res, ptr %a0) nounwind {
113; CHECK-LABEL: xor_u_v2i64:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vld $vr0, $a1, 0
116; CHECK-NEXT:    vrepli.d $vr1, 31
117; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
118; CHECK-NEXT:    vst $vr0, $a0, 0
119; CHECK-NEXT:    ret
120entry:
121  %v0 = load <2 x i64>, ptr %a0
122  %v1 = xor <2 x i64> %v0, <i64 31, i64 31>
123  store <2 x i64> %v1, ptr %res
124  ret void
125}
126