xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/minimum-sizes.ll (revision 462cb3cd6cecd0511ecaf0e3ebcaba455ece587d)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+sse2     -S | FileCheck %s --check-prefix=SSE
3; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx      -S | FileCheck %s --check-prefix=AVX
4; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx2     -S | FileCheck %s --check-prefix=AVX
5; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx512f  -S | FileCheck %s --check-prefix=AVX
6; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx512vl -S | FileCheck %s --check-prefix=AVX
7
8target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-linux-gnu"
10
11; These tests ensure that we do not regress due to PR31243. Note that we set
12; the SLP threshold to force vectorization even when not profitable.
13
14; When computing minimum sizes, if we can prove the sign bit is zero, we can
15; zero-extend the roots back to their original sizes.
16;
17define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
18; SSE-LABEL: @PR31243_zext(
19; SSE-NEXT:  entry:
20; SSE-NEXT:    [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
21; SSE-NEXT:    [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
22; SSE-NEXT:    [[TMP2:%.*]] = or <2 x i8> [[TMP1]], splat (i8 1)
23; SSE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
24; SSE-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
25; SSE-NEXT:    [[T4:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR:%.*]], i64 [[TMP4]]
26; SSE-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
27; SSE-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
28; SSE-NEXT:    [[T5:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 [[TMP6]]
29; SSE-NEXT:    [[T6:%.*]] = load i8, ptr [[T4]], align 1
30; SSE-NEXT:    [[T7:%.*]] = load i8, ptr [[T5]], align 1
31; SSE-NEXT:    [[T8:%.*]] = add i8 [[T6]], [[T7]]
32; SSE-NEXT:    ret i8 [[T8]]
33;
34; AVX-LABEL: @PR31243_zext(
35; AVX-NEXT:  entry:
36; AVX-NEXT:    [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
37; AVX-NEXT:    [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
38; AVX-NEXT:    [[TMP2:%.*]] = or <2 x i8> [[TMP1]], splat (i8 1)
39; AVX-NEXT:    [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
40; AVX-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
41; AVX-NEXT:    [[T4:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR:%.*]], i64 [[TMP4]]
42; AVX-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
43; AVX-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
44; AVX-NEXT:    [[T5:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 [[TMP6]]
45; AVX-NEXT:    [[T6:%.*]] = load i8, ptr [[T4]], align 1
46; AVX-NEXT:    [[T7:%.*]] = load i8, ptr [[T5]], align 1
47; AVX-NEXT:    [[T8:%.*]] = add i8 [[T6]], [[T7]]
48; AVX-NEXT:    ret i8 [[T8]]
49;
50entry:
51  %t0 = zext i8 %v0 to i32
52  %t1 = zext i8 %v1 to i32
53  %t2 = or i32 %t0, 1
54  %t3 = or i32 %t1, 1
55  %t4 = getelementptr inbounds i8, ptr %ptr, i32 %t2
56  %t5 = getelementptr inbounds i8, ptr %ptr, i32 %t3
57  %t6 = load i8, ptr %t4
58  %t7 = load i8, ptr %t5
59  %t8 = add i8 %t6, %t7
60  ret i8 %t8
61}
62
63; When computing minimum sizes, if we cannot prove the sign bit is zero, we
64; have to include one extra bit for signedness since we will sign-extend the
65; roots.
66;
67; FIXME: This test is suboptimal since the compuation can be performed in i8.
68;        In general, we need to add an extra bit to the maximum bit width only
69;        if we can't prove that the upper bit of the original type is equal to
70;        the upper bit of the proposed smaller type. If these two bits are the
71;        same (either zero or one) we know that sign-extending from the smaller
72;        type will result in the same value. Since we don't yet perform this
73;        optimization, we make the proposed smaller type (i8) larger (i16) to
74;        ensure correctness.
75;
76define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
77; SSE-LABEL: @PR31243_sext(
78; SSE-NEXT:  entry:
79; SSE-NEXT:    [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
80; SSE-NEXT:    [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
81; SSE-NEXT:    [[TMP2:%.*]] = or <2 x i8> [[TMP1]], splat (i8 1)
82; SSE-NEXT:    [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
83; SSE-NEXT:    [[TMP4:%.*]] = sext i8 [[TMP3]] to i64
84; SSE-NEXT:    [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
85; SSE-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
86; SSE-NEXT:    [[TMP6:%.*]] = sext i8 [[TMP5]] to i64
87; SSE-NEXT:    [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
88; SSE-NEXT:    [[T6:%.*]] = load i8, ptr [[T4]], align 1
89; SSE-NEXT:    [[T7:%.*]] = load i8, ptr [[T5]], align 1
90; SSE-NEXT:    [[T8:%.*]] = add i8 [[T6]], [[T7]]
91; SSE-NEXT:    ret i8 [[T8]]
92;
93; AVX-LABEL: @PR31243_sext(
94; AVX-NEXT:  entry:
95; AVX-NEXT:    [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
96; AVX-NEXT:    [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
97; AVX-NEXT:    [[TMP2:%.*]] = or <2 x i8> [[TMP1]], splat (i8 1)
98; AVX-NEXT:    [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
99; AVX-NEXT:    [[TMP4:%.*]] = sext i8 [[TMP3]] to i64
100; AVX-NEXT:    [[T4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
101; AVX-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
102; AVX-NEXT:    [[TMP6:%.*]] = sext i8 [[TMP5]] to i64
103; AVX-NEXT:    [[T5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
104; AVX-NEXT:    [[T6:%.*]] = load i8, ptr [[T4]], align 1
105; AVX-NEXT:    [[T7:%.*]] = load i8, ptr [[T5]], align 1
106; AVX-NEXT:    [[T8:%.*]] = add i8 [[T6]], [[T7]]
107; AVX-NEXT:    ret i8 [[T8]]
108;
109entry:
110  %t0 = sext i8 %v0 to i32
111  %t1 = sext i8 %v1 to i32
112  %t2 = or i32 %t0, 1
113  %t3 = or i32 %t1, 1
114  %t4 = getelementptr inbounds i8, ptr %ptr, i32 %t2
115  %t5 = getelementptr inbounds i8, ptr %ptr, i32 %t3
116  %t6 = load i8, ptr %t4
117  %t7 = load i8, ptr %t5
118  %t8 = add i8 %t6, %t7
119  ret i8 %t8
120}
121