xref: /llvm-project/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/or.ll (revision 67d3ef74b31e1517d4f679e754cc2b3041c95901)
1; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
2; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
3
4target triple = "spir64-unknown-unknown"
5
6; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
7; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
8; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
9
10; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
11; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
12; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
13
14; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
15; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
16; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
17
18; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
19; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
20; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
21
22; CHECK: OpFunction
23; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
24; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[CharVec2]] %[[#]] %[[#]]
25; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
26; CHECK: OpReturnValue %[[Vec2CharR]]
27; CHECK: OpFunctionEnd
28
29; CHECK: OpFunction
30; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
31; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
32; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
33; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
34; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseOr %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
35; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseOr %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
36; CHECK: OpReturnValue %[[Vec3CharR2]]
37; CHECK: OpFunctionEnd
38
39; CHECK: OpFunction
40; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
41; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[ShortVec2]] %[[#]] %[[#]]
42; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
43; CHECK: OpReturnValue %[[Vec2ShortR]]
44; CHECK: OpFunctionEnd
45
46; CHECK: OpFunction
47; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
48; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
49; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
50; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
51; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseOr %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
52; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseOr %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
53; CHECK: OpReturnValue %[[Vec3ShortR2]]
54; CHECK: OpFunctionEnd
55
56; CHECK: OpFunction
57; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
58; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[IntVec2]] %[[#]] %[[#]]
59; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
60; CHECK: OpReturnValue %[[Vec2IntR]]
61; CHECK: OpFunctionEnd
62
63; CHECK: OpFunction
64; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
65; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
66; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
67; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
68; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseOr %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
69; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseOr %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
70; CHECK: OpReturnValue %[[Vec3IntR2]]
71; CHECK: OpFunctionEnd
72
73; CHECK: OpFunction
74; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
75; CHECK: %[[Added1:.*]] = OpBitwiseOr %[[LongVec2]] %[[#]] %[[#]]
76; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
77; CHECK: OpReturnValue %[[Vec2LongR]]
78; CHECK: OpFunctionEnd
79
80; CHECK: OpFunction
81; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
82; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
83; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
84; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
85; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseOr %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
86; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseOr %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
87; CHECK: OpReturnValue %[[Vec3LongR2]]
88; CHECK: OpFunctionEnd
89
90define spir_func i8 @test_vector_reduce_or_v2i8(<2 x i8> %v) {
91entry:
92  %res = call i8 @llvm.vector.reduce.or.v2i8(<2 x i8> %v)
93  ret i8 %res
94}
95
96define spir_func i8 @test_vector_reduce_or_v3i8(<3 x i8> %v) {
97entry:
98  %res = call i8 @llvm.vector.reduce.or.v3i8(<3 x i8> %v)
99  ret i8 %res
100}
101
102define spir_func i8 @test_vector_reduce_or_v4i8(<4 x i8> %v) {
103entry:
104  %res = call i8 @llvm.vector.reduce.or.v4i8(<4 x i8> %v)
105  ret i8 %res
106}
107
108define spir_func i8 @test_vector_reduce_or_v8i8(<8 x i8> %v) {
109entry:
110  %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v)
111  ret i8 %res
112}
113
114define spir_func i8 @test_vector_reduce_or_v16i8(<16 x i8> %v) {
115entry:
116  %res = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %v)
117  ret i8 %res
118}
119
120define spir_func i16 @test_vector_reduce_or_v2i16(<2 x i16> %v) {
121entry:
122  %res = call i16 @llvm.vector.reduce.or.v2i16(<2 x i16> %v)
123  ret i16 %res
124}
125
126define spir_func i16 @test_vector_reduce_or_v3i16(<3 x i16> %v) {
127entry:
128  %res = call i16 @llvm.vector.reduce.or.v3i16(<3 x i16> %v)
129  ret i16 %res
130}
131
132define spir_func i16 @test_vector_reduce_or_v4i16(<4 x i16> %v) {
133entry:
134  %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v)
135  ret i16 %res
136}
137
138define spir_func i16 @test_vector_reduce_or_v8i16(<8 x i16> %v) {
139entry:
140  %res = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %v)
141  ret i16 %res
142}
143
144define spir_func i16 @test_vector_reduce_or_v16i16(<16 x i16> %v) {
145entry:
146  %res = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %v)
147  ret i16 %res
148}
149
150
151define spir_func i32 @test_vector_reduce_or_v2i32(<2 x i32> %v) {
152entry:
153  %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v)
154  ret i32 %res
155}
156
157define spir_func i32 @test_vector_reduce_or_v3i32(<3 x i32> %v) {
158entry:
159  %res = call i32 @llvm.vector.reduce.or.v3i32(<3 x i32> %v)
160  ret i32 %res
161}
162
163define spir_func i32 @test_vector_reduce_or_v4i32(<4 x i32> %v) {
164entry:
165  %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v)
166  ret i32 %res
167}
168
169define spir_func i32 @test_vector_reduce_or_v8i32(<8 x i32> %v) {
170entry:
171  %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v)
172  ret i32 %res
173}
174
175define spir_func i32 @test_vector_reduce_or_v16i32(<16 x i32> %v) {
176entry:
177  %res = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> %v)
178  ret i32 %res
179}
180
181define spir_func i64 @test_vector_reduce_or_v2i64(<2 x i64> %v) {
182entry:
183  %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v)
184  ret i64 %res
185}
186
187define spir_func i64 @test_vector_reduce_or_v3i64(<3 x i64> %v) {
188entry:
189  %res = call i64 @llvm.vector.reduce.or.v3i64(<3 x i64> %v)
190  ret i64 %res
191}
192
193define spir_func i64 @test_vector_reduce_or_v4i64(<4 x i64> %v) {
194entry:
195  %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v)
196  ret i64 %res
197}
198
199define spir_func i64 @test_vector_reduce_or_v8i64(<8 x i64> %v) {
200entry:
201  %res = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> %v)
202  ret i64 %res
203}
204
205define spir_func i64 @test_vector_reduce_or_v16i64(<16 x i64> %v) {
206entry:
207  %res = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> %v)
208  ret i64 %res
209}
210
211declare i8 @llvm.vector.reduce.or.v2i8(<2 x i8>)
212declare i8 @llvm.vector.reduce.or.v3i8(<3 x i8>)
213declare i8 @llvm.vector.reduce.or.v4i8(<4 x i8>)
214declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
215declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
216
217declare i16 @llvm.vector.reduce.or.v2i16(<2 x i16>)
218declare i16 @llvm.vector.reduce.or.v3i16(<3 x i16>)
219declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
220declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
221declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
222
223declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
224declare i32 @llvm.vector.reduce.or.v3i32(<3 x i32>)
225declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
226declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
227declare i32 @llvm.vector.reduce.or.v16i32(<16 x i32>)
228
229declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
230declare i64 @llvm.vector.reduce.or.v3i64(<3 x i64>)
231declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
232declare i64 @llvm.vector.reduce.or.v8i64(<8 x i64>)
233declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>)
234