xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/AArch64/getelementptr2.ll (revision 462cb3cd6cecd0511ecaf0e3ebcaba455ece587d)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2;test_i16_extend NOTE: Assertions have been autogenerated by utils/update_test_checks.py
3; RUN: opt -S -mtriple=aarch64--linux-gnu -passes=slp-vectorizer,dce,instcombine -slp-threshold=-5 -pass-remarks-output=%t < %s | FileCheck %s
4; RUN: cat %t | FileCheck -check-prefix=YAML %s
5; RUN: opt -S -mtriple=aarch64--linux-gnu -passes='slp-vectorizer,dce,instcombine' -slp-threshold=-5 -pass-remarks-output=%t < %s | FileCheck %s
6; RUN: cat %t | FileCheck -check-prefix=YAML %s
7
8
9@global = internal global { ptr } zeroinitializer, align 8
10
11; YAML-LABEL: --- !Passed
12; YAML-NEXT:  Pass:            slp-vectorizer
13; YAML-NEXT:  Name:            VectorizedList
14; YAML-NEXT:  Function:        test_i16_extend
15; YAML-NEXT:  Args:
16; YAML-NEXT:    - String:          'SLP vectorized with cost '
17; YAML-NEXT:    - Cost:            '-20'
18; YAML-NEXT:    - String:          ' and with tree size '
19; YAML-NEXT:    - TreeSize:        '5'
20; YAML-NEXT:  ...
21
22; Make sure we vectorize to maximize the load with when loading i16 and
23; extending it for compute operations.
24define void @test_i16_extend(ptr %p.1, ptr %p.2, i32 %idx.i32) {
25; CHECK-LABEL: @test_i16_extend(
26; CHECK-NEXT:    [[P_0:%.*]] = load ptr, ptr @global, align 8
27; CHECK-NEXT:    [[IDX_0:%.*]] = zext i32 [[IDX_I32:%.*]] to i64
28; CHECK-NEXT:    [[T53:%.*]] = getelementptr inbounds nuw i16, ptr [[P_1:%.*]], i64 [[IDX_0]]
29; CHECK-NEXT:    [[T56:%.*]] = getelementptr inbounds nuw i16, ptr [[P_2:%.*]], i64 [[IDX_0]]
30; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[T53]], align 2
31; CHECK-NEXT:    [[TMP2:%.*]] = zext <8 x i16> [[TMP1]] to <8 x i32>
32; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr [[T56]], align 2
33; CHECK-NEXT:    [[TMP4:%.*]] = zext <8 x i16> [[TMP3]] to <8 x i32>
34; CHECK-NEXT:    [[TMP5:%.*]] = sub nsw <8 x i32> [[TMP2]], [[TMP4]]
35; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <8 x i32> [[TMP5]], i64 0
36; CHECK-NEXT:    [[TMP7:%.*]] = sext i32 [[TMP6]] to i64
37; CHECK-NEXT:    [[T60:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP7]]
38; CHECK-NEXT:    [[L_1:%.*]] = load i32, ptr [[T60]], align 4
39; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <8 x i32> [[TMP5]], i64 1
40; CHECK-NEXT:    [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
41; CHECK-NEXT:    [[T71:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP9]]
42; CHECK-NEXT:    [[L_2:%.*]] = load i32, ptr [[T71]], align 4
43; CHECK-NEXT:    [[TMP10:%.*]] = extractelement <8 x i32> [[TMP5]], i64 2
44; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[TMP10]] to i64
45; CHECK-NEXT:    [[T82:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP11]]
46; CHECK-NEXT:    [[L_3:%.*]] = load i32, ptr [[T82]], align 4
47; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <8 x i32> [[TMP5]], i64 3
48; CHECK-NEXT:    [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
49; CHECK-NEXT:    [[T93:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP13]]
50; CHECK-NEXT:    [[L_4:%.*]] = load i32, ptr [[T93]], align 4
51; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <8 x i32> [[TMP5]], i64 4
52; CHECK-NEXT:    [[TMP15:%.*]] = sext i32 [[TMP14]] to i64
53; CHECK-NEXT:    [[T104:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP15]]
54; CHECK-NEXT:    [[L_5:%.*]] = load i32, ptr [[T104]], align 4
55; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <8 x i32> [[TMP5]], i64 5
56; CHECK-NEXT:    [[TMP17:%.*]] = sext i32 [[TMP16]] to i64
57; CHECK-NEXT:    [[T115:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP17]]
58; CHECK-NEXT:    [[L_6:%.*]] = load i32, ptr [[T115]], align 4
59; CHECK-NEXT:    [[TMP18:%.*]] = extractelement <8 x i32> [[TMP5]], i64 6
60; CHECK-NEXT:    [[TMP19:%.*]] = sext i32 [[TMP18]] to i64
61; CHECK-NEXT:    [[T126:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP19]]
62; CHECK-NEXT:    [[L_7:%.*]] = load i32, ptr [[T126]], align 4
63; CHECK-NEXT:    [[TMP20:%.*]] = extractelement <8 x i32> [[TMP5]], i64 7
64; CHECK-NEXT:    [[TMP21:%.*]] = sext i32 [[TMP20]] to i64
65; CHECK-NEXT:    [[T137:%.*]] = getelementptr inbounds i32, ptr [[P_0]], i64 [[TMP21]]
66; CHECK-NEXT:    [[L_8:%.*]] = load i32, ptr [[T137]], align 4
67; CHECK-NEXT:    call void @use(i32 [[L_1]], i32 [[L_2]], i32 [[L_3]], i32 [[L_4]], i32 [[L_5]], i32 [[L_6]], i32 [[L_7]], i32 [[L_8]])
68; CHECK-NEXT:    ret void
69;
70  %p.0 = load ptr, ptr @global, align 8
71
72  %idx.0 = zext i32 %idx.i32 to i64
73  %idx.1 = add nsw i64 %idx.0, 1
74  %idx.2 = add nsw i64 %idx.0, 2
75  %idx.3 = add nsw i64 %idx.0, 3
76  %idx.4 = add nsw i64 %idx.0, 4
77  %idx.5 = add nsw i64 %idx.0, 5
78  %idx.6 = add nsw i64 %idx.0, 6
79  %idx.7 = add nsw i64 %idx.0, 7
80
81  %t53 = getelementptr inbounds i16, ptr %p.1, i64 %idx.0
82  %op1.l = load i16, ptr %t53, align 2
83  %op1.ext = zext i16 %op1.l to i64
84  %t56 = getelementptr inbounds i16, ptr %p.2, i64 %idx.0
85  %op2.l = load i16, ptr %t56, align 2
86  %op2.ext = zext i16 %op2.l to i64
87  %sub.1 = sub nsw i64 %op1.ext, %op2.ext
88
89  %t60 = getelementptr inbounds i32, ptr %p.0, i64 %sub.1
90  %l.1 = load i32, ptr %t60, align 4
91
92  %t64 = getelementptr inbounds i16, ptr %p.1, i64 %idx.1
93  %t65 = load i16, ptr %t64, align 2
94  %t66 = zext i16 %t65 to i64
95  %t67 = getelementptr inbounds i16, ptr %p.2, i64 %idx.1
96  %t68 = load i16, ptr %t67, align 2
97  %t69 = zext i16 %t68 to i64
98  %sub.2 = sub nsw i64 %t66, %t69
99
100  %t71 = getelementptr inbounds i32, ptr %p.0, i64 %sub.2
101  %l.2 = load i32, ptr %t71, align 4
102
103  %t75 = getelementptr inbounds i16, ptr %p.1, i64 %idx.2
104  %t76 = load i16, ptr %t75, align 2
105  %t77 = zext i16 %t76 to i64
106  %t78 = getelementptr inbounds i16, ptr %p.2, i64 %idx.2
107  %t79 = load i16, ptr %t78, align 2
108  %t80 = zext i16 %t79 to i64
109  %sub.3 = sub nsw i64 %t77, %t80
110
111  %t82 = getelementptr inbounds i32, ptr %p.0, i64 %sub.3
112  %l.3 = load i32, ptr %t82, align 4
113
114  %t86 = getelementptr inbounds i16, ptr %p.1, i64 %idx.3
115  %t87 = load i16, ptr %t86, align 2
116  %t88 = zext i16 %t87 to i64
117  %t89 = getelementptr inbounds i16, ptr %p.2, i64 %idx.3
118  %t90 = load i16, ptr %t89, align 2
119  %t91 = zext i16 %t90 to i64
120  %sub.4 = sub nsw i64 %t88, %t91
121
122  %t93 = getelementptr inbounds i32, ptr %p.0, i64 %sub.4
123  %l.4 = load i32, ptr %t93, align 4
124
125  %t97 = getelementptr inbounds i16, ptr %p.1, i64 %idx.4
126  %t98 = load i16, ptr %t97, align 2
127  %t99 = zext i16 %t98 to i64
128  %t100 = getelementptr inbounds i16, ptr %p.2, i64 %idx.4
129  %t101 = load i16, ptr %t100, align 2
130  %t102 = zext i16 %t101 to i64
131  %sub.5 = sub nsw i64 %t99, %t102
132
133  %t104 = getelementptr inbounds i32, ptr %p.0, i64 %sub.5
134  %l.5 = load i32, ptr %t104, align 4
135
136  %t108 = getelementptr inbounds i16, ptr %p.1, i64 %idx.5
137  %t109 = load i16, ptr %t108, align 2
138  %t110 = zext i16 %t109 to i64
139  %t111 = getelementptr inbounds i16, ptr %p.2, i64 %idx.5
140  %t112 = load i16, ptr %t111, align 2
141  %t113 = zext i16 %t112 to i64
142  %sub.6 = sub nsw i64 %t110, %t113
143
144  %t115 = getelementptr inbounds i32, ptr %p.0, i64 %sub.6
145  %l.6 = load i32, ptr %t115, align 4
146
147  %t119 = getelementptr inbounds i16, ptr %p.1, i64 %idx.6
148  %t120 = load i16, ptr %t119, align 2
149  %t121 = zext i16 %t120 to i64
150  %t122 = getelementptr inbounds i16, ptr %p.2, i64 %idx.6
151  %t123 = load i16, ptr %t122, align 2
152  %t124 = zext i16 %t123 to i64
153  %sub.7 = sub nsw i64 %t121, %t124
154
155  %t126 = getelementptr inbounds i32, ptr %p.0, i64 %sub.7
156  %l.7 = load i32, ptr %t126, align 4
157
158  %t130 = getelementptr inbounds i16, ptr %p.1, i64 %idx.7
159  %t131 = load i16, ptr %t130, align 2
160  %t132 = zext i16 %t131 to i64
161  %t133 = getelementptr inbounds i16, ptr %p.2, i64 %idx.7
162  %t134 = load i16, ptr %t133, align 2
163  %t135 = zext i16 %t134 to i64
164  %sub.8 = sub nsw i64 %t132, %t135
165
166  %t137 = getelementptr inbounds i32, ptr %p.0, i64 %sub.8
167  %l.8 = load i32, ptr %t137, align 4
168
169  call void @use(i32 %l.1, i32 %l.2, i32 %l.3, i32 %l.4, i32 %l.5, i32 %l.6, i32 %l.7, i32 %l.8)
170  ret void
171}
172
173declare void @use(i32, i32, i32, i32, i32, i32, i32, i32)
174