xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ctpop.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -mtriple=x86_64-unknown -mattr=+sse2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE --check-prefix=SSE2
3; RUN: opt < %s -mtriple=x86_64-unknown -mattr=+sse4.2,+popcnt -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE --check-prefix=SSE42
4; RUN: opt < %s -mtriple=x86_64-unknown -mattr=+avx,+popcnt -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
5; RUN: opt < %s -mtriple=x86_64-unknown -mattr=+avx2,+popcnt -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
6
7target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8
9@src64 = common global [4 x i64] zeroinitializer, align 32
10@dst64 = common global [4 x i64] zeroinitializer, align 32
11@src32 = common global [8 x i32] zeroinitializer, align 32
12@dst32 = common global [8 x i32] zeroinitializer, align 32
13@src16 = common global [16 x i16] zeroinitializer, align 32
14@dst16 = common global [16 x i16] zeroinitializer, align 32
15@src8  = common global [32 x i8] zeroinitializer, align 32
16@dst8  = common global [32 x i8] zeroinitializer, align 32
17
18declare i64 @llvm.ctpop.i64(i64)
19declare i32 @llvm.ctpop.i32(i32)
20declare i16 @llvm.ctpop.i16(i16)
21declare  i8 @llvm.ctpop.i8(i8)
22
23define void @ctpop_2i64() #0 {
24; SSE2-LABEL: @ctpop_2i64(
25; SSE2-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @src64, align 8
26; SSE2-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[TMP1]])
27; SSE2-NEXT:    store <2 x i64> [[TMP2]], ptr @dst64, align 8
28; SSE2-NEXT:    ret void
29;
30; SSE42-LABEL: @ctpop_2i64(
31; SSE42-NEXT:    [[LD0:%.*]] = load i64, ptr @src64, align 8
32; SSE42-NEXT:    [[LD1:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i32 0, i64 1), align 8
33; SSE42-NEXT:    [[CTPOP0:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD0]])
34; SSE42-NEXT:    [[CTPOP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD1]])
35; SSE42-NEXT:    store i64 [[CTPOP0]], ptr @dst64, align 8
36; SSE42-NEXT:    store i64 [[CTPOP1]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i32 0, i64 1), align 8
37; SSE42-NEXT:    ret void
38;
39; AVX1-LABEL: @ctpop_2i64(
40; AVX1-NEXT:    [[LD0:%.*]] = load i64, ptr @src64, align 8
41; AVX1-NEXT:    [[LD1:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i32 0, i64 1), align 8
42; AVX1-NEXT:    [[CTPOP0:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD0]])
43; AVX1-NEXT:    [[CTPOP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD1]])
44; AVX1-NEXT:    store i64 [[CTPOP0]], ptr @dst64, align 8
45; AVX1-NEXT:    store i64 [[CTPOP1]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i32 0, i64 1), align 8
46; AVX1-NEXT:    ret void
47;
48; AVX2-LABEL: @ctpop_2i64(
49; AVX2-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @src64, align 8
50; AVX2-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[TMP1]])
51; AVX2-NEXT:    store <2 x i64> [[TMP2]], ptr @dst64, align 8
52; AVX2-NEXT:    ret void
53;
54  %ld0 = load i64, ptr @src64, align 8
55  %ld1 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i32 0, i64 1), align 8
56  %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
57  %ctpop1 = call i64 @llvm.ctpop.i64(i64 %ld1)
58  store i64 %ctpop0, ptr @dst64, align 8
59  store i64 %ctpop1, ptr getelementptr inbounds ([4 x i64], ptr @dst64, i32 0, i64 1), align 8
60  ret void
61}
62
63define void @ctpop_4i64() #0 {
64; SSE2-LABEL: @ctpop_4i64(
65; SSE2-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @src64, align 4
66; SSE2-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[TMP1]])
67; SSE2-NEXT:    store <2 x i64> [[TMP2]], ptr @dst64, align 4
68; SSE2-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 2), align 4
69; SSE2-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[TMP3]])
70; SSE2-NEXT:    store <2 x i64> [[TMP4]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 2), align 4
71; SSE2-NEXT:    ret void
72;
73; SSE42-LABEL: @ctpop_4i64(
74; SSE42-NEXT:    [[LD0:%.*]] = load i64, ptr @src64, align 4
75; SSE42-NEXT:    [[LD1:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 1), align 4
76; SSE42-NEXT:    [[LD2:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 2), align 4
77; SSE42-NEXT:    [[LD3:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 3), align 4
78; SSE42-NEXT:    [[CTPOP0:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD0]])
79; SSE42-NEXT:    [[CTPOP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD1]])
80; SSE42-NEXT:    [[CTPOP2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD2]])
81; SSE42-NEXT:    [[CTPOP3:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD3]])
82; SSE42-NEXT:    store i64 [[CTPOP0]], ptr @dst64, align 4
83; SSE42-NEXT:    store i64 [[CTPOP1]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 1), align 4
84; SSE42-NEXT:    store i64 [[CTPOP2]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 2), align 4
85; SSE42-NEXT:    store i64 [[CTPOP3]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 3), align 4
86; SSE42-NEXT:    ret void
87;
88; AVX1-LABEL: @ctpop_4i64(
89; AVX1-NEXT:    [[LD0:%.*]] = load i64, ptr @src64, align 4
90; AVX1-NEXT:    [[LD1:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 1), align 4
91; AVX1-NEXT:    [[LD2:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 2), align 4
92; AVX1-NEXT:    [[LD3:%.*]] = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 3), align 4
93; AVX1-NEXT:    [[CTPOP0:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD0]])
94; AVX1-NEXT:    [[CTPOP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD1]])
95; AVX1-NEXT:    [[CTPOP2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD2]])
96; AVX1-NEXT:    [[CTPOP3:%.*]] = call i64 @llvm.ctpop.i64(i64 [[LD3]])
97; AVX1-NEXT:    store i64 [[CTPOP0]], ptr @dst64, align 4
98; AVX1-NEXT:    store i64 [[CTPOP1]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 1), align 4
99; AVX1-NEXT:    store i64 [[CTPOP2]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 2), align 4
100; AVX1-NEXT:    store i64 [[CTPOP3]], ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 3), align 4
101; AVX1-NEXT:    ret void
102;
103; AVX2-LABEL: @ctpop_4i64(
104; AVX2-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @src64, align 4
105; AVX2-NEXT:    [[TMP2:%.*]] = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> [[TMP1]])
106; AVX2-NEXT:    store <4 x i64> [[TMP2]], ptr @dst64, align 4
107; AVX2-NEXT:    ret void
108;
109  %ld0 = load i64, ptr @src64, align 4
110  %ld1 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 1), align 4
111  %ld2 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 2), align 4
112  %ld3 = load i64, ptr getelementptr inbounds ([4 x i64], ptr @src64, i64 0, i64 3), align 4
113  %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
114  %ctpop1 = call i64 @llvm.ctpop.i64(i64 %ld1)
115  %ctpop2 = call i64 @llvm.ctpop.i64(i64 %ld2)
116  %ctpop3 = call i64 @llvm.ctpop.i64(i64 %ld3)
117  store i64 %ctpop0, ptr @dst64, align 4
118  store i64 %ctpop1, ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 1), align 4
119  store i64 %ctpop2, ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 2), align 4
120  store i64 %ctpop3, ptr getelementptr inbounds ([4 x i64], ptr @dst64, i64 0, i64 3), align 4
121  ret void
122}
123
124define void @ctpop_4i32() #0 {
125; SSE2-LABEL: @ctpop_4i32(
126; SSE2-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @src32, align 4
127; SSE2-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> [[TMP1]])
128; SSE2-NEXT:    store <4 x i32> [[TMP2]], ptr @dst32, align 4
129; SSE2-NEXT:    ret void
130;
131; SSE42-LABEL: @ctpop_4i32(
132; SSE42-NEXT:    [[LD0:%.*]] = load i32, ptr @src32, align 4
133; SSE42-NEXT:    [[LD1:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 1), align 4
134; SSE42-NEXT:    [[LD2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 2), align 4
135; SSE42-NEXT:    [[LD3:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 3), align 4
136; SSE42-NEXT:    [[CTPOP0:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD0]])
137; SSE42-NEXT:    [[CTPOP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD1]])
138; SSE42-NEXT:    [[CTPOP2:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD2]])
139; SSE42-NEXT:    [[CTPOP3:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD3]])
140; SSE42-NEXT:    store i32 [[CTPOP0]], ptr @dst32, align 4
141; SSE42-NEXT:    store i32 [[CTPOP1]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 1), align 4
142; SSE42-NEXT:    store i32 [[CTPOP2]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 2), align 4
143; SSE42-NEXT:    store i32 [[CTPOP3]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 3), align 4
144; SSE42-NEXT:    ret void
145;
146; AVX-LABEL: @ctpop_4i32(
147; AVX-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @src32, align 4
148; AVX-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> [[TMP1]])
149; AVX-NEXT:    store <4 x i32> [[TMP2]], ptr @dst32, align 4
150; AVX-NEXT:    ret void
151;
152  %ld0 = load i32, ptr @src32, align 4
153  %ld1 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 1), align 4
154  %ld2 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 2), align 4
155  %ld3 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 3), align 4
156  %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
157  %ctpop1 = call i32 @llvm.ctpop.i32(i32 %ld1)
158  %ctpop2 = call i32 @llvm.ctpop.i32(i32 %ld2)
159  %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3)
160  store i32 %ctpop0, ptr @dst32, align 4
161  store i32 %ctpop1, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 1), align 4
162  store i32 %ctpop2, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 2), align 4
163  store i32 %ctpop3, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 3), align 4
164  ret void
165}
166
167define void @ctpop_8i32() #0 {
168; SSE2-LABEL: @ctpop_8i32(
169; SSE2-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @src32, align 2
170; SSE2-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> [[TMP1]])
171; SSE2-NEXT:    store <4 x i32> [[TMP2]], ptr @dst32, align 2
172; SSE2-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 4), align 2
173; SSE2-NEXT:    [[TMP4:%.*]] = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> [[TMP3]])
174; SSE2-NEXT:    store <4 x i32> [[TMP4]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 4), align 2
175; SSE2-NEXT:    ret void
176;
177; SSE42-LABEL: @ctpop_8i32(
178; SSE42-NEXT:    [[LD0:%.*]] = load i32, ptr @src32, align 2
179; SSE42-NEXT:    [[LD1:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 1), align 2
180; SSE42-NEXT:    [[LD2:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 2), align 2
181; SSE42-NEXT:    [[LD3:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 3), align 2
182; SSE42-NEXT:    [[LD4:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 4), align 2
183; SSE42-NEXT:    [[LD5:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 5), align 2
184; SSE42-NEXT:    [[LD6:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 6), align 2
185; SSE42-NEXT:    [[LD7:%.*]] = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 7), align 2
186; SSE42-NEXT:    [[CTPOP0:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD0]])
187; SSE42-NEXT:    [[CTPOP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD1]])
188; SSE42-NEXT:    [[CTPOP2:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD2]])
189; SSE42-NEXT:    [[CTPOP3:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD3]])
190; SSE42-NEXT:    [[CTPOP4:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD4]])
191; SSE42-NEXT:    [[CTPOP5:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD5]])
192; SSE42-NEXT:    [[CTPOP6:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD6]])
193; SSE42-NEXT:    [[CTPOP7:%.*]] = call i32 @llvm.ctpop.i32(i32 [[LD7]])
194; SSE42-NEXT:    store i32 [[CTPOP0]], ptr @dst32, align 2
195; SSE42-NEXT:    store i32 [[CTPOP1]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 1), align 2
196; SSE42-NEXT:    store i32 [[CTPOP2]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 2), align 2
197; SSE42-NEXT:    store i32 [[CTPOP3]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 3), align 2
198; SSE42-NEXT:    store i32 [[CTPOP4]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 4), align 2
199; SSE42-NEXT:    store i32 [[CTPOP5]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 5), align 2
200; SSE42-NEXT:    store i32 [[CTPOP6]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 6), align 2
201; SSE42-NEXT:    store i32 [[CTPOP7]], ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 7), align 2
202; SSE42-NEXT:    ret void
203;
204; AVX-LABEL: @ctpop_8i32(
205; AVX-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @src32, align 2
206; AVX-NEXT:    [[TMP2:%.*]] = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> [[TMP1]])
207; AVX-NEXT:    store <8 x i32> [[TMP2]], ptr @dst32, align 2
208; AVX-NEXT:    ret void
209;
210  %ld0 = load i32, ptr @src32, align 2
211  %ld1 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 1), align 2
212  %ld2 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 2), align 2
213  %ld3 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 3), align 2
214  %ld4 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 4), align 2
215  %ld5 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 5), align 2
216  %ld6 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 6), align 2
217  %ld7 = load i32, ptr getelementptr inbounds ([8 x i32], ptr @src32, i32 0, i64 7), align 2
218  %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
219  %ctpop1 = call i32 @llvm.ctpop.i32(i32 %ld1)
220  %ctpop2 = call i32 @llvm.ctpop.i32(i32 %ld2)
221  %ctpop3 = call i32 @llvm.ctpop.i32(i32 %ld3)
222  %ctpop4 = call i32 @llvm.ctpop.i32(i32 %ld4)
223  %ctpop5 = call i32 @llvm.ctpop.i32(i32 %ld5)
224  %ctpop6 = call i32 @llvm.ctpop.i32(i32 %ld6)
225  %ctpop7 = call i32 @llvm.ctpop.i32(i32 %ld7)
226  store i32 %ctpop0, ptr @dst32, align 2
227  store i32 %ctpop1, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 1), align 2
228  store i32 %ctpop2, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 2), align 2
229  store i32 %ctpop3, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 3), align 2
230  store i32 %ctpop4, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 4), align 2
231  store i32 %ctpop5, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 5), align 2
232  store i32 %ctpop6, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 6), align 2
233  store i32 %ctpop7, ptr getelementptr inbounds ([8 x i32], ptr @dst32, i32 0, i64 7), align 2
234  ret void
235}
236
237define void @ctpop_8i16() #0 {
238; CHECK-LABEL: @ctpop_8i16(
239; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @src16, align 2
240; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> [[TMP1]])
241; CHECK-NEXT:    store <8 x i16> [[TMP2]], ptr @dst16, align 2
242; CHECK-NEXT:    ret void
243;
244  %ld0 = load i16, ptr @src16, align 2
245  %ld1 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 1), align 2
246  %ld2 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 2), align 2
247  %ld3 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 3), align 2
248  %ld4 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 4), align 2
249  %ld5 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 5), align 2
250  %ld6 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 6), align 2
251  %ld7 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 7), align 2
252  %ctpop0 = call i16 @llvm.ctpop.i16(i16 %ld0)
253  %ctpop1 = call i16 @llvm.ctpop.i16(i16 %ld1)
254  %ctpop2 = call i16 @llvm.ctpop.i16(i16 %ld2)
255  %ctpop3 = call i16 @llvm.ctpop.i16(i16 %ld3)
256  %ctpop4 = call i16 @llvm.ctpop.i16(i16 %ld4)
257  %ctpop5 = call i16 @llvm.ctpop.i16(i16 %ld5)
258  %ctpop6 = call i16 @llvm.ctpop.i16(i16 %ld6)
259  %ctpop7 = call i16 @llvm.ctpop.i16(i16 %ld7)
260  store i16 %ctpop0, ptr @dst16, align 2
261  store i16 %ctpop1, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 1), align 2
262  store i16 %ctpop2, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 2), align 2
263  store i16 %ctpop3, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 3), align 2
264  store i16 %ctpop4, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 4), align 2
265  store i16 %ctpop5, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 5), align 2
266  store i16 %ctpop6, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 6), align 2
267  store i16 %ctpop7, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 7), align 2
268  ret void
269}
270
271define void @ctpop_16i16() #0 {
272; SSE-LABEL: @ctpop_16i16(
273; SSE-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @src16, align 2
274; SSE-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> [[TMP1]])
275; SSE-NEXT:    store <8 x i16> [[TMP2]], ptr @dst16, align 2
276; SSE-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 8), align 2
277; SSE-NEXT:    [[TMP4:%.*]] = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> [[TMP3]])
278; SSE-NEXT:    store <8 x i16> [[TMP4]], ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 8), align 2
279; SSE-NEXT:    ret void
280;
281; AVX-LABEL: @ctpop_16i16(
282; AVX-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @src16, align 2
283; AVX-NEXT:    [[TMP2:%.*]] = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> [[TMP1]])
284; AVX-NEXT:    store <16 x i16> [[TMP2]], ptr @dst16, align 2
285; AVX-NEXT:    ret void
286;
287  %ld0  = load i16, ptr @src16, align 2
288  %ld1  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  1), align 2
289  %ld2  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  2), align 2
290  %ld3  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  3), align 2
291  %ld4  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  4), align 2
292  %ld5  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  5), align 2
293  %ld6  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  6), align 2
294  %ld7  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  7), align 2
295  %ld8  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  8), align 2
296  %ld9  = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64  9), align 2
297  %ld10 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 10), align 2
298  %ld11 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 11), align 2
299  %ld12 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 12), align 2
300  %ld13 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 13), align 2
301  %ld14 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 14), align 2
302  %ld15 = load i16, ptr getelementptr inbounds ([16 x i16], ptr @src16, i16 0, i64 15), align 2
303  %ctpop0  = call i16 @llvm.ctpop.i16(i16 %ld0)
304  %ctpop1  = call i16 @llvm.ctpop.i16(i16 %ld1)
305  %ctpop2  = call i16 @llvm.ctpop.i16(i16 %ld2)
306  %ctpop3  = call i16 @llvm.ctpop.i16(i16 %ld3)
307  %ctpop4  = call i16 @llvm.ctpop.i16(i16 %ld4)
308  %ctpop5  = call i16 @llvm.ctpop.i16(i16 %ld5)
309  %ctpop6  = call i16 @llvm.ctpop.i16(i16 %ld6)
310  %ctpop7  = call i16 @llvm.ctpop.i16(i16 %ld7)
311  %ctpop8  = call i16 @llvm.ctpop.i16(i16 %ld8)
312  %ctpop9  = call i16 @llvm.ctpop.i16(i16 %ld9)
313  %ctpop10 = call i16 @llvm.ctpop.i16(i16 %ld10)
314  %ctpop11 = call i16 @llvm.ctpop.i16(i16 %ld11)
315  %ctpop12 = call i16 @llvm.ctpop.i16(i16 %ld12)
316  %ctpop13 = call i16 @llvm.ctpop.i16(i16 %ld13)
317  %ctpop14 = call i16 @llvm.ctpop.i16(i16 %ld14)
318  %ctpop15 = call i16 @llvm.ctpop.i16(i16 %ld15)
319  store i16 %ctpop0 , ptr @dst16, align 2
320  store i16 %ctpop1 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  1), align 2
321  store i16 %ctpop2 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  2), align 2
322  store i16 %ctpop3 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  3), align 2
323  store i16 %ctpop4 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  4), align 2
324  store i16 %ctpop5 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  5), align 2
325  store i16 %ctpop6 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  6), align 2
326  store i16 %ctpop7 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  7), align 2
327  store i16 %ctpop8 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  8), align 2
328  store i16 %ctpop9 , ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64  9), align 2
329  store i16 %ctpop10, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 10), align 2
330  store i16 %ctpop11, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 11), align 2
331  store i16 %ctpop12, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 12), align 2
332  store i16 %ctpop13, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 13), align 2
333  store i16 %ctpop14, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 14), align 2
334  store i16 %ctpop15, ptr getelementptr inbounds ([16 x i16], ptr @dst16, i16 0, i64 15), align 2
335  ret void
336}
337
338define void @ctpop_16i8() #0 {
339; CHECK-LABEL: @ctpop_16i8(
340; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @src8, align 1
341; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[TMP1]])
342; CHECK-NEXT:    store <16 x i8> [[TMP2]], ptr @dst8, align 1
343; CHECK-NEXT:    ret void
344;
345  %ld0  = load i8, ptr @src8, align 1
346  %ld1  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  1), align 1
347  %ld2  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  2), align 1
348  %ld3  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  3), align 1
349  %ld4  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  4), align 1
350  %ld5  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  5), align 1
351  %ld6  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  6), align 1
352  %ld7  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  7), align 1
353  %ld8  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  8), align 1
354  %ld9  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  9), align 1
355  %ld10 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 10), align 1
356  %ld11 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 11), align 1
357  %ld12 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 12), align 1
358  %ld13 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 13), align 1
359  %ld14 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 14), align 1
360  %ld15 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 15), align 1
361  %ctpop0  = call i8 @llvm.ctpop.i8(i8 %ld0)
362  %ctpop1  = call i8 @llvm.ctpop.i8(i8 %ld1)
363  %ctpop2  = call i8 @llvm.ctpop.i8(i8 %ld2)
364  %ctpop3  = call i8 @llvm.ctpop.i8(i8 %ld3)
365  %ctpop4  = call i8 @llvm.ctpop.i8(i8 %ld4)
366  %ctpop5  = call i8 @llvm.ctpop.i8(i8 %ld5)
367  %ctpop6  = call i8 @llvm.ctpop.i8(i8 %ld6)
368  %ctpop7  = call i8 @llvm.ctpop.i8(i8 %ld7)
369  %ctpop8  = call i8 @llvm.ctpop.i8(i8 %ld8)
370  %ctpop9  = call i8 @llvm.ctpop.i8(i8 %ld9)
371  %ctpop10 = call i8 @llvm.ctpop.i8(i8 %ld10)
372  %ctpop11 = call i8 @llvm.ctpop.i8(i8 %ld11)
373  %ctpop12 = call i8 @llvm.ctpop.i8(i8 %ld12)
374  %ctpop13 = call i8 @llvm.ctpop.i8(i8 %ld13)
375  %ctpop14 = call i8 @llvm.ctpop.i8(i8 %ld14)
376  %ctpop15 = call i8 @llvm.ctpop.i8(i8 %ld15)
377  store i8 %ctpop0 , ptr @dst8, align 1
378  store i8 %ctpop1 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  1), align 1
379  store i8 %ctpop2 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  2), align 1
380  store i8 %ctpop3 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  3), align 1
381  store i8 %ctpop4 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  4), align 1
382  store i8 %ctpop5 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  5), align 1
383  store i8 %ctpop6 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  6), align 1
384  store i8 %ctpop7 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  7), align 1
385  store i8 %ctpop8 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  8), align 1
386  store i8 %ctpop9 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  9), align 1
387  store i8 %ctpop10, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 10), align 1
388  store i8 %ctpop11, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 11), align 1
389  store i8 %ctpop12, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 12), align 1
390  store i8 %ctpop13, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 13), align 1
391  store i8 %ctpop14, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 14), align 1
392  store i8 %ctpop15, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 15), align 1
393  ret void
394}
395
396define void @ctpop_32i8() #0 {
397; SSE-LABEL: @ctpop_32i8(
398; SSE-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @src8, align 1
399; SSE-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[TMP1]])
400; SSE-NEXT:    store <16 x i8> [[TMP2]], ptr @dst8, align 1
401; SSE-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 16), align 1
402; SSE-NEXT:    [[TMP4:%.*]] = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> [[TMP3]])
403; SSE-NEXT:    store <16 x i8> [[TMP4]], ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 16), align 1
404; SSE-NEXT:    ret void
405;
406; AVX-LABEL: @ctpop_32i8(
407; AVX-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @src8, align 1
408; AVX-NEXT:    [[TMP2:%.*]] = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> [[TMP1]])
409; AVX-NEXT:    store <32 x i8> [[TMP2]], ptr @dst8, align 1
410; AVX-NEXT:    ret void
411;
412  %ld0  = load i8, ptr @src8, align 1
413  %ld1  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  1), align 1
414  %ld2  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  2), align 1
415  %ld3  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  3), align 1
416  %ld4  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  4), align 1
417  %ld5  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  5), align 1
418  %ld6  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  6), align 1
419  %ld7  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  7), align 1
420  %ld8  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  8), align 1
421  %ld9  = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64  9), align 1
422  %ld10 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 10), align 1
423  %ld11 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 11), align 1
424  %ld12 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 12), align 1
425  %ld13 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 13), align 1
426  %ld14 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 14), align 1
427  %ld15 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 15), align 1
428  %ld16 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 16), align 1
429  %ld17 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 17), align 1
430  %ld18 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 18), align 1
431  %ld19 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 19), align 1
432  %ld20 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 20), align 1
433  %ld21 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 21), align 1
434  %ld22 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 22), align 1
435  %ld23 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 23), align 1
436  %ld24 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 24), align 1
437  %ld25 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 25), align 1
438  %ld26 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 26), align 1
439  %ld27 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 27), align 1
440  %ld28 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 28), align 1
441  %ld29 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 29), align 1
442  %ld30 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 30), align 1
443  %ld31 = load i8, ptr getelementptr inbounds ([32 x i8], ptr @src8, i8 0, i64 31), align 1
444  %ctpop0  = call i8 @llvm.ctpop.i8(i8 %ld0)
445  %ctpop1  = call i8 @llvm.ctpop.i8(i8 %ld1)
446  %ctpop2  = call i8 @llvm.ctpop.i8(i8 %ld2)
447  %ctpop3  = call i8 @llvm.ctpop.i8(i8 %ld3)
448  %ctpop4  = call i8 @llvm.ctpop.i8(i8 %ld4)
449  %ctpop5  = call i8 @llvm.ctpop.i8(i8 %ld5)
450  %ctpop6  = call i8 @llvm.ctpop.i8(i8 %ld6)
451  %ctpop7  = call i8 @llvm.ctpop.i8(i8 %ld7)
452  %ctpop8  = call i8 @llvm.ctpop.i8(i8 %ld8)
453  %ctpop9  = call i8 @llvm.ctpop.i8(i8 %ld9)
454  %ctpop10 = call i8 @llvm.ctpop.i8(i8 %ld10)
455  %ctpop11 = call i8 @llvm.ctpop.i8(i8 %ld11)
456  %ctpop12 = call i8 @llvm.ctpop.i8(i8 %ld12)
457  %ctpop13 = call i8 @llvm.ctpop.i8(i8 %ld13)
458  %ctpop14 = call i8 @llvm.ctpop.i8(i8 %ld14)
459  %ctpop15 = call i8 @llvm.ctpop.i8(i8 %ld15)
460  %ctpop16 = call i8 @llvm.ctpop.i8(i8 %ld16)
461  %ctpop17 = call i8 @llvm.ctpop.i8(i8 %ld17)
462  %ctpop18 = call i8 @llvm.ctpop.i8(i8 %ld18)
463  %ctpop19 = call i8 @llvm.ctpop.i8(i8 %ld19)
464  %ctpop20 = call i8 @llvm.ctpop.i8(i8 %ld20)
465  %ctpop21 = call i8 @llvm.ctpop.i8(i8 %ld21)
466  %ctpop22 = call i8 @llvm.ctpop.i8(i8 %ld22)
467  %ctpop23 = call i8 @llvm.ctpop.i8(i8 %ld23)
468  %ctpop24 = call i8 @llvm.ctpop.i8(i8 %ld24)
469  %ctpop25 = call i8 @llvm.ctpop.i8(i8 %ld25)
470  %ctpop26 = call i8 @llvm.ctpop.i8(i8 %ld26)
471  %ctpop27 = call i8 @llvm.ctpop.i8(i8 %ld27)
472  %ctpop28 = call i8 @llvm.ctpop.i8(i8 %ld28)
473  %ctpop29 = call i8 @llvm.ctpop.i8(i8 %ld29)
474  %ctpop30 = call i8 @llvm.ctpop.i8(i8 %ld30)
475  %ctpop31 = call i8 @llvm.ctpop.i8(i8 %ld31)
476  store i8 %ctpop0 , ptr @dst8, align 1
477  store i8 %ctpop1 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  1), align 1
478  store i8 %ctpop2 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  2), align 1
479  store i8 %ctpop3 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  3), align 1
480  store i8 %ctpop4 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  4), align 1
481  store i8 %ctpop5 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  5), align 1
482  store i8 %ctpop6 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  6), align 1
483  store i8 %ctpop7 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  7), align 1
484  store i8 %ctpop8 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  8), align 1
485  store i8 %ctpop9 , ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64  9), align 1
486  store i8 %ctpop10, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 10), align 1
487  store i8 %ctpop11, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 11), align 1
488  store i8 %ctpop12, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 12), align 1
489  store i8 %ctpop13, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 13), align 1
490  store i8 %ctpop14, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 14), align 1
491  store i8 %ctpop15, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 15), align 1
492  store i8 %ctpop16, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 16), align 1
493  store i8 %ctpop17, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 17), align 1
494  store i8 %ctpop18, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 18), align 1
495  store i8 %ctpop19, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 19), align 1
496  store i8 %ctpop20, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 20), align 1
497  store i8 %ctpop21, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 21), align 1
498  store i8 %ctpop22, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 22), align 1
499  store i8 %ctpop23, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 23), align 1
500  store i8 %ctpop24, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 24), align 1
501  store i8 %ctpop25, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 25), align 1
502  store i8 %ctpop26, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 26), align 1
503  store i8 %ctpop27, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 27), align 1
504  store i8 %ctpop28, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 28), align 1
505  store i8 %ctpop29, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 29), align 1
506  store i8 %ctpop30, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 30), align 1
507  store i8 %ctpop31, ptr getelementptr inbounds ([32 x i8], ptr @dst8, i8 0, i64 31), align 1
508  ret void
509}
510
511attributes #0 = { nounwind }
512
513