xref: /llvm-project/llvm/test/CodeGen/AArch64/sve2-bcax.ll (revision ac8b4f874945f83eec8c8f56d9fc80093e02a7b2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck --check-prefix=SVE %s
3; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s -o - | FileCheck --check-prefix=SVE2 %s
4
5define <vscale x 2 x i64> @bcax_nxv2i64_1(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
6; SVE-LABEL: bcax_nxv2i64_1:
7; SVE:       // %bb.0:
8; SVE-NEXT:    bic z1.d, z2.d, z1.d
9; SVE-NEXT:    eor z0.d, z1.d, z0.d
10; SVE-NEXT:    ret
11;
12; SVE2-LABEL: bcax_nxv2i64_1:
13; SVE2:       // %bb.0:
14; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
15; SVE2-NEXT:    ret
16  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
17  %5 = and <vscale x 2 x i64> %4, %2
18  %6 = xor <vscale x 2 x i64> %5, %0
19  ret <vscale x 2 x i64> %6
20}
21
22define <vscale x 2 x i64> @bcax_nxv2i64_2(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
23; SVE-LABEL: bcax_nxv2i64_2:
24; SVE:       // %bb.0:
25; SVE-NEXT:    bic z0.d, z0.d, z1.d
26; SVE-NEXT:    eor z0.d, z0.d, z2.d
27; SVE-NEXT:    ret
28;
29; SVE2-LABEL: bcax_nxv2i64_2:
30; SVE2:       // %bb.0:
31; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
32; SVE2-NEXT:    mov z0.d, z2.d
33; SVE2-NEXT:    ret
34  %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
35  %5 = and <vscale x 2 x i64> %4, %0
36  %6 = xor <vscale x 2 x i64> %5, %2
37  ret <vscale x 2 x i64> %6
38}
39
40define <vscale x 4 x i32> @bcax_nxv4i32_1(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
41; SVE-LABEL: bcax_nxv4i32_1:
42; SVE:       // %bb.0:
43; SVE-NEXT:    bic z1.d, z2.d, z1.d
44; SVE-NEXT:    eor z0.d, z1.d, z0.d
45; SVE-NEXT:    ret
46;
47; SVE2-LABEL: bcax_nxv4i32_1:
48; SVE2:       // %bb.0:
49; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
50; SVE2-NEXT:    ret
51  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
52  %5 = and <vscale x 4 x i32> %4, %2
53  %6 = xor <vscale x 4 x i32> %5, %0
54  ret <vscale x 4 x i32> %6
55}
56
57define <vscale x 4 x i32> @bcax_nxv4i32_2(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
58; SVE-LABEL: bcax_nxv4i32_2:
59; SVE:       // %bb.0:
60; SVE-NEXT:    bic z0.d, z0.d, z1.d
61; SVE-NEXT:    eor z0.d, z0.d, z2.d
62; SVE-NEXT:    ret
63;
64; SVE2-LABEL: bcax_nxv4i32_2:
65; SVE2:       // %bb.0:
66; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
67; SVE2-NEXT:    mov z0.d, z2.d
68; SVE2-NEXT:    ret
69  %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
70  %5 = and <vscale x 4 x i32> %4, %0
71  %6 = xor <vscale x 4 x i32> %5, %2
72  ret <vscale x 4 x i32> %6
73}
74
75define <vscale x 8 x i16> @bcax_nxv8i16_1(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
76; SVE-LABEL: bcax_nxv8i16_1:
77; SVE:       // %bb.0:
78; SVE-NEXT:    bic z1.d, z2.d, z1.d
79; SVE-NEXT:    eor z0.d, z1.d, z0.d
80; SVE-NEXT:    ret
81;
82; SVE2-LABEL: bcax_nxv8i16_1:
83; SVE2:       // %bb.0:
84; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
85; SVE2-NEXT:    ret
86  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
87  %5 = and <vscale x 8 x i16> %4, %2
88  %6 = xor <vscale x 8 x i16> %5, %0
89  ret <vscale x 8 x i16> %6
90}
91
92define <vscale x 8 x i16> @bcax_nxv8i16_2(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
93; SVE-LABEL: bcax_nxv8i16_2:
94; SVE:       // %bb.0:
95; SVE-NEXT:    bic z0.d, z0.d, z1.d
96; SVE-NEXT:    eor z0.d, z0.d, z2.d
97; SVE-NEXT:    ret
98;
99; SVE2-LABEL: bcax_nxv8i16_2:
100; SVE2:       // %bb.0:
101; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
102; SVE2-NEXT:    mov z0.d, z2.d
103; SVE2-NEXT:    ret
104  %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
105  %5 = and <vscale x 8 x i16> %4, %0
106  %6 = xor <vscale x 8 x i16> %5, %2
107  ret <vscale x 8 x i16> %6
108}
109
110define <vscale x 16 x i8> @bcax_nxv16i8_1(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
111; SVE-LABEL: bcax_nxv16i8_1:
112; SVE:       // %bb.0:
113; SVE-NEXT:    bic z1.d, z2.d, z1.d
114; SVE-NEXT:    eor z0.d, z1.d, z0.d
115; SVE-NEXT:    ret
116;
117; SVE2-LABEL: bcax_nxv16i8_1:
118; SVE2:       // %bb.0:
119; SVE2-NEXT:    bcax z0.d, z0.d, z2.d, z1.d
120; SVE2-NEXT:    ret
121  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
122  %5 = and <vscale x 16 x i8> %4, %2
123  %6 = xor <vscale x 16 x i8> %5, %0
124  ret <vscale x 16 x i8> %6
125}
126
127define <vscale x 16 x i8> @bcax_nxv16i8_2(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
128; SVE-LABEL: bcax_nxv16i8_2:
129; SVE:       // %bb.0:
130; SVE-NEXT:    bic z0.d, z0.d, z1.d
131; SVE-NEXT:    eor z0.d, z0.d, z2.d
132; SVE-NEXT:    ret
133;
134; SVE2-LABEL: bcax_nxv16i8_2:
135; SVE2:       // %bb.0:
136; SVE2-NEXT:    bcax z2.d, z2.d, z0.d, z1.d
137; SVE2-NEXT:    mov z0.d, z2.d
138; SVE2-NEXT:    ret
139  %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
140  %5 = and <vscale x 16 x i8> %4, %0
141  %6 = xor <vscale x 16 x i8> %5, %2
142  ret <vscale x 16 x i8> %6
143}
144