xref: /llvm-project/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llvm-vector-reduce/mul.ll (revision 67d3ef74b31e1517d4f679e754cc2b3041c95901)
1; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
2; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
3
4target triple = "spir64-unknown-unknown"
5
6; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
7; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
8; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
9
10; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
11; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
12; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
13
14; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
15; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
16; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
17
18; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
19; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
20; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
21
22; CHECK: OpFunction
23; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
24; CHECK: %[[Added1:.*]] = OpIMul %[[CharVec2]] %[[#]] %[[#]]
25; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
26; CHECK: OpReturnValue %[[Vec2CharR]]
27; CHECK: OpFunctionEnd
28
29; CHECK: OpFunction
30; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
31; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
32; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
33; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
34; CHECK: %[[Vec3CharR1:.*]] = OpIMul %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
35; CHECK: %[[Vec3CharR2:.*]] = OpIMul %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
36; CHECK: OpReturnValue %[[Vec3CharR2]]
37; CHECK: OpFunctionEnd
38
39; CHECK: OpFunction
40; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
41; CHECK: %[[Added1:.*]] = OpIMul %[[ShortVec2]] %[[#]] %[[#]]
42; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
43; CHECK: OpReturnValue %[[Vec2ShortR]]
44; CHECK: OpFunctionEnd
45
46; CHECK: OpFunction
47; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
48; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
49; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
50; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
51; CHECK: %[[Vec3ShortR1:.*]] = OpIMul %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
52; CHECK: %[[Vec3ShortR2:.*]] = OpIMul %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
53; CHECK: OpReturnValue %[[Vec3ShortR2]]
54; CHECK: OpFunctionEnd
55
56; CHECK: OpFunction
57; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
58; CHECK: %[[Added1:.*]] = OpIMul %[[IntVec2]] %[[#]] %[[#]]
59; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
60; CHECK: OpReturnValue %[[Vec2IntR]]
61; CHECK: OpFunctionEnd
62
63; CHECK: OpFunction
64; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
65; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
66; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
67; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
68; CHECK: %[[Vec3IntR1:.*]] = OpIMul %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
69; CHECK: %[[Vec3IntR2:.*]] = OpIMul %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
70; CHECK: OpReturnValue %[[Vec3IntR2]]
71; CHECK: OpFunctionEnd
72
73; CHECK: OpFunction
74; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
75; CHECK: %[[Added1:.*]] = OpIMul %[[LongVec2]] %[[#]] %[[#]]
76; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
77; CHECK: OpReturnValue %[[Vec2LongR]]
78; CHECK: OpFunctionEnd
79
80; CHECK: OpFunction
81; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
82; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
83; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
84; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
85; CHECK: %[[Vec3LongR1:.*]] = OpIMul %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
86; CHECK: %[[Vec3LongR2:.*]] = OpIMul %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
87; CHECK: OpReturnValue %[[Vec3LongR2]]
88; CHECK: OpFunctionEnd
89
90define spir_func i8 @test_vector_reduce_mul_v2i8(<2 x i8> %v) {
91entry:
92  %res = call i8 @llvm.vector.reduce.mul.v2i8(<2 x i8> %v)
93  ret i8 %res
94}
95
96define spir_func i8 @test_vector_reduce_mul_v3i8(<3 x i8> %v) {
97entry:
98  %res = call i8 @llvm.vector.reduce.mul.v3i8(<3 x i8> %v)
99  ret i8 %res
100}
101
102define spir_func i8 @test_vector_reduce_mul_v4i8(<4 x i8> %v) {
103entry:
104  %res = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> %v)
105  ret i8 %res
106}
107
108define spir_func i8 @test_vector_reduce_mul_v8i8(<8 x i8> %v) {
109entry:
110  %res = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %v)
111  ret i8 %res
112}
113
114define spir_func i8 @test_vector_reduce_mul_v16i8(<16 x i8> %v) {
115entry:
116  %res = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %v)
117  ret i8 %res
118}
119
120define spir_func i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
121entry:
122  %res = call i16 @llvm.vector.reduce.mul.v2i16(<2 x i16> %v)
123  ret i16 %res
124}
125
126define spir_func i16 @test_vector_reduce_mul_v3i16(<3 x i16> %v) {
127entry:
128  %res = call i16 @llvm.vector.reduce.mul.v3i16(<3 x i16> %v)
129  ret i16 %res
130}
131
132define spir_func i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
133entry:
134  %res = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %v)
135  ret i16 %res
136}
137
138define spir_func i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
139entry:
140  %res = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %v)
141  ret i16 %res
142}
143
144define spir_func i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
145entry:
146  %res = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %v)
147  ret i16 %res
148}
149
150define spir_func i32 @test_vector_reduce_mul_v2i32(<2 x i32> %v) {
151entry:
152  %res = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %v)
153  ret i32 %res
154}
155
156define spir_func i32 @test_vector_reduce_mul_v3i32(<3 x i32> %v) {
157entry:
158  %res = call i32 @llvm.vector.reduce.mul.v3i32(<3 x i32> %v)
159  ret i32 %res
160}
161
162define spir_func i32 @test_vector_reduce_mul_v4i32(<4 x i32> %v) {
163entry:
164  %res = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %v)
165  ret i32 %res
166}
167
168define spir_func i32 @test_vector_reduce_mul_v8i32(<8 x i32> %v) {
169entry:
170  %res = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %v)
171  ret i32 %res
172}
173
174define spir_func i32 @test_vector_reduce_mul_v16i32(<16 x i32> %v) {
175entry:
176  %res = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> %v)
177  ret i32 %res
178}
179
180define spir_func i64 @test_vector_reduce_mul_v2i64(<2 x i64> %v) {
181entry:
182  %res = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v)
183  ret i64 %res
184}
185
186define spir_func i64 @test_vector_reduce_mul_v3i64(<3 x i64> %v) {
187entry:
188  %res = call i64 @llvm.vector.reduce.mul.v3i64(<3 x i64> %v)
189  ret i64 %res
190}
191
192define spir_func i64 @test_vector_reduce_mul_v4i64(<4 x i64> %v) {
193entry:
194  %res = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v)
195  ret i64 %res
196}
197
198define spir_func i64 @test_vector_reduce_mul_v8i64(<8 x i64> %v) {
199entry:
200  %res = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v)
201  ret i64 %res
202}
203
204define spir_func i64 @test_vector_reduce_mul_v16i64(<16 x i64> %v) {
205entry:
206  %res = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v)
207  ret i64 %res
208}
209
210declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
211declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
212declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
213declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
214declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
215
216declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
217declare i16 @llvm.vector.reduce.mul.v3i16(<3 x i16>)
218declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
219declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
220declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
221
222declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
223declare i32 @llvm.vector.reduce.mul.v3i32(<3 x i32>)
224declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
225declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
226declare i32 @llvm.vector.reduce.mul.v16i32(<16 x i32>)
227
228declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
229declare i64 @llvm.vector.reduce.mul.v3i64(<3 x i64>)
230declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
231declare i64 @llvm.vector.reduce.mul.v8i64(<8 x i64>)
232declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
233