xref: /llvm-project/llvm/test/Transforms/VectorCombine/X86/reduction-of-truncations.ll (revision 5ff44dbaea27d442c89278871dc2f75942d54716)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -S --passes=vector-combine -mtriple=x86_64-- -mcpu=x86-64    | FileCheck %s --check-prefixes=CHECK,X64
3; RUN: opt < %s -S --passes=vector-combine -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,X64
4; RUN: opt < %s -S --passes=vector-combine -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,X64
5; RUN: opt < %s -S --passes=vector-combine -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512
6
7;
8; Fold reduce(trunc(X)) -> trunc(reduce(X)) if more cost efficient
9;
10
11; Cheap AVX512 v8i64 -> v8i32 truncation
12define i32 @reduce_add_trunc_v8i64_i32(<8 x i64> %a0)  {
13; X64-LABEL: @reduce_add_trunc_v8i64_i32(
14; X64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[A0:%.*]])
15; X64-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i32
16; X64-NEXT:    ret i32 [[RED]]
17;
18; AVX512-LABEL: @reduce_add_trunc_v8i64_i32(
19; AVX512-NEXT:    [[TR:%.*]] = trunc <8 x i64> [[A0:%.*]] to <8 x i32>
20; AVX512-NEXT:    [[RED:%.*]] = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TR]])
21; AVX512-NEXT:    ret i32 [[RED]]
22;
23  %tr = trunc <8 x i64> %a0 to <8 x i32>
24  %red = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %tr)
25  ret i32 %red
26}
27declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
28
29; No legal vXi8 multiplication so vXi16 is always cheaper
30define i8 @reduce_mul_trunc_v16i16_i8(<16 x i16> %a0)  {
31; CHECK-LABEL: @reduce_mul_trunc_v16i16_i8(
32; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> [[A0:%.*]])
33; CHECK-NEXT:    [[RED:%.*]] = trunc i16 [[TMP1]] to i8
34; CHECK-NEXT:    ret i8 [[RED]]
35;
36  %tr = trunc <16 x i16> %a0 to <16 x i8>
37  %red = tail call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %tr)
38  ret i8 %red
39}
40declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
41
42define i8 @reduce_or_trunc_v8i32_i8(<8 x i32> %a0)  {
43; CHECK-LABEL: @reduce_or_trunc_v8i32_i8(
44; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[A0:%.*]])
45; CHECK-NEXT:    [[RED:%.*]] = trunc i32 [[TMP1]] to i8
46; CHECK-NEXT:    ret i8 [[RED]]
47;
48  %tr = trunc <8 x i32> %a0 to <8 x i8>
49  %red = tail call i8 @llvm.vector.reduce.or.v8i32(<8 x i8> %tr)
50  ret i8 %red
51}
52declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
53
54define i8 @reduce_xor_trunc_v16i64_i8(<16 x i64> %a0)  {
55; CHECK-LABEL: @reduce_xor_trunc_v16i64_i8(
56; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> [[A0:%.*]])
57; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i8
58; CHECK-NEXT:    ret i8 [[RED]]
59;
60  %tr = trunc <16 x i64> %a0 to <16 x i8>
61  %red = tail call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %tr)
62  ret i8 %red
63}
64declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
65
66; Truncation source has other uses - OK to truncate reduction
67define i16 @reduce_and_trunc_v16i64_i16(<16 x i64> %a0)  {
68; CHECK-LABEL: @reduce_and_trunc_v16i64_i16(
69; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> [[A0:%.*]])
70; CHECK-NEXT:    [[RED:%.*]] = trunc i64 [[TMP1]] to i16
71; CHECK-NEXT:    call void @use_v16i64(<16 x i64> [[A0]])
72; CHECK-NEXT:    ret i16 [[RED]]
73;
74  %tr = trunc <16 x i64> %a0 to <16 x i16>
75  %red = tail call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %tr)
76  call void @use_v16i64(<16 x i64> %a0)
77  ret i16 %red
78}
79declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
80
81; Negative Test: vXi16 multiply is much cheaper than vXi64
82define i16 @reduce_mul_trunc_v8i64_i16(<8 x i64> %a0)  {
83; CHECK-LABEL: @reduce_mul_trunc_v8i64_i16(
84; CHECK-NEXT:    [[TR:%.*]] = trunc <8 x i64> [[A0:%.*]] to <8 x i16>
85; CHECK-NEXT:    [[RED:%.*]] = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> [[TR]])
86; CHECK-NEXT:    ret i16 [[RED]]
87;
88  %tr = trunc <8 x i64> %a0 to <8 x i16>
89  %red = tail call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %tr)
90  ret i16 %red
91}
92declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
93
94; Negative Test: min/max reductions can't use pre-truncated types.
95define i8 @reduce_smin_trunc_v16i16_i8(<16 x i16> %a0)  {
96; CHECK-LABEL: @reduce_smin_trunc_v16i16_i8(
97; CHECK-NEXT:    [[TR:%.*]] = trunc <16 x i16> [[A0:%.*]] to <16 x i8>
98; CHECK-NEXT:    [[RED:%.*]] = tail call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> [[TR]])
99; CHECK-NEXT:    ret i8 [[RED]]
100;
101  %tr = trunc <16 x i16> %a0 to <16 x i8>
102  %red = tail call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %tr)
103  ret i8 %red
104}
105declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
106
107; Negative Test: Truncation has other uses.
108define i16 @reduce_and_trunc_v16i64_i16_multiuse(<16 x i64> %a0)  {
109; CHECK-LABEL: @reduce_and_trunc_v16i64_i16_multiuse(
110; CHECK-NEXT:    [[TR:%.*]] = trunc <16 x i64> [[A0:%.*]] to <16 x i16>
111; CHECK-NEXT:    [[RED:%.*]] = tail call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> [[TR]])
112; CHECK-NEXT:    call void @use_v16i16(<16 x i16> [[TR]])
113; CHECK-NEXT:    ret i16 [[RED]]
114;
115  %tr = trunc <16 x i64> %a0 to <16 x i16>
116  %red = tail call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %tr)
117  call void @use_v16i16(<16 x i16> %tr)
118  ret i16 %red
119}
120
121declare void @use_v16i64(<16 x i64>)
122declare void @use_v16i16(<16 x i16>)
123
124