xref: /llvm-project/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/and.ll (revision 67d3ef74b31e1517d4f679e754cc2b3041c95901)
1; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
2; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
3
4; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
5; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
6
7; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
8; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
9; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
10
11; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
12; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
13; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
14
15; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
16; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
17; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
18
19; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
20; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
21; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
22
23; CHECK: OpFunction
24; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
25; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[CharVec2]] %[[#]] %[[#]]
26; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
27; CHECK: OpReturnValue %[[Vec2CharR]]
28; CHECK: OpFunctionEnd
29
30; CHECK: OpFunction
31; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
32; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
33; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
34; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
35; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
36; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
37; CHECK: OpReturnValue %[[Vec3CharR2]]
38; CHECK: OpFunctionEnd
39
40; CHECK: OpFunction
41; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
42; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[ShortVec2]] %[[#]] %[[#]]
43; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
44; CHECK: OpReturnValue %[[Vec2ShortR]]
45; CHECK: OpFunctionEnd
46
47; CHECK: OpFunction
48; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
49; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
50; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
51; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
52; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
53; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
54; CHECK: OpReturnValue %[[Vec3ShortR2]]
55; CHECK: OpFunctionEnd
56
57; CHECK: OpFunction
58; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
59; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[IntVec2]] %[[#]] %[[#]]
60; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
61; CHECK: OpReturnValue %[[Vec2IntR]]
62; CHECK: OpFunctionEnd
63
64; CHECK: OpFunction
65; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
66; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
67; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
68; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
69; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
70; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
71; CHECK: OpReturnValue %[[Vec3IntR2]]
72; CHECK: OpFunctionEnd
73
74; CHECK: OpFunction
75; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
76; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[LongVec2]] %[[#]] %[[#]]
77; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
78; CHECK: OpReturnValue %[[Vec2LongR]]
79; CHECK: OpFunctionEnd
80
81; CHECK: OpFunction
82; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
83; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
84; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
85; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
86; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
87; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
88; CHECK: OpReturnValue %[[Vec3LongR2]]
89; CHECK: OpFunctionEnd
90
91define spir_func i8 @test_vector_reduce_and_v2i8(<2 x i8> %v) {
92entry:
93  %res = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v)
94  ret i8 %res
95}
96
97define spir_func i8 @test_vector_reduce_and_v3i8(<3 x i8> %v) {
98entry:
99  %res = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %v)
100  ret i8 %res
101}
102
103define spir_func i8 @test_vector_reduce_and_v4i8(<4 x i8> %v) {
104entry:
105  %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v)
106  ret i8 %res
107}
108
109define spir_func i8 @test_vector_reduce_and_v8i8(<8 x i8> %v) {
110entry:
111  %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v)
112  ret i8 %res
113}
114
115define spir_func i8 @test_vector_reduce_and_v16i8(<16 x i8> %v) {
116entry:
117  %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v)
118  ret i8 %res
119}
120
121define spir_func i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
122entry:
123  %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v)
124  ret i16 %res
125}
126
127define spir_func i16 @test_vector_reduce_and_v3i16(<3 x i16> %v) {
128entry:
129  %res = call i16 @llvm.vector.reduce.and.v3i16(<3 x i16> %v)
130  ret i16 %res
131}
132
133define spir_func i16 @test_vector_reduce_and_v4i16(<4 x i16> %v) {
134entry:
135  %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v)
136  ret i16 %res
137}
138
139define spir_func i16 @test_vector_reduce_and_v8i16(<8 x i16> %v) {
140entry:
141  %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v)
142  ret i16 %res
143}
144
145define spir_func i16 @test_vector_reduce_and_v16i16(<16 x i16> %v) {
146entry:
147  %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
148  ret i16 %res
149}
150
151
152define spir_func i32 @test_vector_reduce_and_v2i32(<2 x i32> %v) {
153entry:
154  %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v)
155  ret i32 %res
156}
157
158define spir_func i32 @test_vector_reduce_and_v3i32(<3 x i32> %v) {
159entry:
160  %res = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> %v)
161  ret i32 %res
162}
163
164define spir_func i32 @test_vector_reduce_and_v4i32(<4 x i32> %v) {
165entry:
166  %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v)
167  ret i32 %res
168}
169
170define spir_func i32 @test_vector_reduce_and_v8i32(<8 x i32> %v) {
171entry:
172  %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
173  ret i32 %res
174}
175
176define spir_func i32 @test_vector_reduce_and_v16i32(<16 x i32> %v) {
177entry:
178  %res = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v)
179  ret i32 %res
180}
181
182define spir_func i64 @test_vector_reduce_and_v2i64(<2 x i64> %v) {
183entry:
184  %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v)
185  ret i64 %res
186}
187
188define spir_func i64 @test_vector_reduce_and_v3i64(<3 x i64> %v) {
189entry:
190  %res = call i64 @llvm.vector.reduce.and.v3i64(<3 x i64> %v)
191  ret i64 %res
192}
193
194define spir_func i64 @test_vector_reduce_and_v4i64(<4 x i64> %v) {
195entry:
196  %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
197  ret i64 %res
198}
199
200define spir_func i64 @test_vector_reduce_and_v8i64(<8 x i64> %v) {
201entry:
202  %res = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v)
203  ret i64 %res
204}
205
206define spir_func i64 @test_vector_reduce_and_v16i64(<16 x i64> %v) {
207entry:
208  %res = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v)
209  ret i64 %res
210}
211
212declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>)
213declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>)
214declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
215declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
216declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
217
218declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
219declare i16 @llvm.vector.reduce.and.v3i16(<3 x i16>)
220declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
221declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
222declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
223
224declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
225declare i32 @llvm.vector.reduce.and.v3i32(<3 x i32>)
226declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
227declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
228declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>)
229
230declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
231declare i64 @llvm.vector.reduce.and.v3i64(<3 x i64>)
232declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
233declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>)
234declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)
235