xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/RISCV/long-mask-split.ll (revision 2a50dac9fb034a39ace861f7feb60c43ba23e53c)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt -S -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
3
4define i32 @test() {
5; CHECK-LABEL: define i32 @test(
6; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
7; CHECK-NEXT:  [[ENTRY:.*:]]
8; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr null, i64 16), align 8
9; CHECK-NEXT:    [[SHR_1_I:%.*]] = lshr i64 0, 0
10; CHECK-NEXT:    [[SHR_1_I_13:%.*]] = lshr i64 0, [[TMP0]]
11; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[SHR_1_I_13]] to i8
12; CHECK-NEXT:    [[STOREDV_1_I_13:%.*]] = and i8 0, [[TMP1]]
13; CHECK-NEXT:    [[SHR_1_I_14:%.*]] = lshr i64 0, [[TMP0]]
14; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[SHR_1_I_14]] to i8
15; CHECK-NEXT:    [[STOREDV_1_I_14:%.*]] = and i8 [[STOREDV_1_I_13]], [[TMP2]]
16; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr null, i64 32), align 8
17; CHECK-NEXT:    [[SHR_2_I:%.*]] = lshr i64 0, [[TMP3]]
18; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[SHR_2_I]] to i8
19; CHECK-NEXT:    [[STOREDV_2_I:%.*]] = and i8 [[STOREDV_1_I_14]], [[TMP4]]
20; CHECK-NEXT:    [[SHR_2_I_1:%.*]] = lshr i64 0, [[TMP3]]
21; CHECK-NEXT:    [[TMP5:%.*]] = trunc i64 [[SHR_2_I_1]] to i8
22; CHECK-NEXT:    [[STOREDV_2_I_1:%.*]] = and i8 [[STOREDV_2_I]], [[TMP5]]
23; CHECK-NEXT:    [[SHR_2_I_2:%.*]] = lshr i64 0, [[TMP3]]
24; CHECK-NEXT:    [[TMP6:%.*]] = trunc i64 [[SHR_2_I_2]] to i8
25; CHECK-NEXT:    [[STOREDV_2_I_2:%.*]] = and i8 [[STOREDV_2_I_1]], [[TMP6]]
26; CHECK-NEXT:    [[SHR_2_I_3:%.*]] = lshr i64 0, [[TMP3]]
27; CHECK-NEXT:    [[TMP7:%.*]] = trunc i64 [[SHR_2_I_3]] to i8
28; CHECK-NEXT:    [[STOREDV_2_I_3:%.*]] = and i8 [[STOREDV_2_I_2]], [[TMP7]]
29; CHECK-NEXT:    [[SHR_2_I_4:%.*]] = lshr i64 0, [[TMP3]]
30; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[SHR_2_I_4]] to i8
31; CHECK-NEXT:    [[STOREDV_2_I_4:%.*]] = and i8 [[STOREDV_2_I_3]], [[TMP8]]
32; CHECK-NEXT:    [[SHR_2_I_5:%.*]] = lshr i64 0, [[TMP3]]
33; CHECK-NEXT:    [[TMP9:%.*]] = trunc i64 [[SHR_2_I_5]] to i8
34; CHECK-NEXT:    [[STOREDV_2_I_5:%.*]] = and i8 [[STOREDV_2_I_4]], [[TMP9]]
35; CHECK-NEXT:    [[SHR_2_I_6:%.*]] = lshr i64 0, [[TMP3]]
36; CHECK-NEXT:    [[TMP10:%.*]] = trunc i64 [[SHR_2_I_6]] to i8
37; CHECK-NEXT:    [[STOREDV_2_I_6:%.*]] = and i8 [[STOREDV_2_I_5]], [[TMP10]]
38; CHECK-NEXT:    [[SHR_2_I_7:%.*]] = lshr i64 0, [[TMP3]]
39; CHECK-NEXT:    [[TMP11:%.*]] = trunc i64 [[SHR_2_I_7]] to i8
40; CHECK-NEXT:    [[STOREDV_2_I_7:%.*]] = and i8 [[STOREDV_2_I_6]], [[TMP11]]
41; CHECK-NEXT:    [[STOREDV_2_I_8:%.*]] = and i8 [[STOREDV_2_I_7]], 0
42; CHECK-NEXT:    [[SHR_2_I_9:%.*]] = lshr i64 0, [[TMP3]]
43; CHECK-NEXT:    [[TMP12:%.*]] = trunc i64 [[SHR_2_I_9]] to i8
44; CHECK-NEXT:    [[STOREDV_2_I_9:%.*]] = and i8 [[STOREDV_2_I_8]], [[TMP12]]
45; CHECK-NEXT:    [[SHR_2_I_10:%.*]] = lshr i64 0, [[TMP3]]
46; CHECK-NEXT:    [[TMP13:%.*]] = trunc i64 [[SHR_2_I_10]] to i8
47; CHECK-NEXT:    [[STOREDV_2_I_10:%.*]] = and i8 [[STOREDV_2_I_9]], [[TMP13]]
48; CHECK-NEXT:    [[SHR_2_I_11:%.*]] = lshr i64 0, [[TMP3]]
49; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[SHR_2_I_11]] to i8
50; CHECK-NEXT:    [[STOREDV_2_I_11:%.*]] = and i8 [[STOREDV_2_I_10]], [[TMP14]]
51; CHECK-NEXT:    [[SHR_2_I_12:%.*]] = lshr i64 0, [[TMP3]]
52; CHECK-NEXT:    [[TMP15:%.*]] = trunc i64 [[SHR_2_I_12]] to i8
53; CHECK-NEXT:    [[STOREDV_2_I_12:%.*]] = and i8 [[STOREDV_2_I_11]], [[TMP15]]
54; CHECK-NEXT:    [[SHR_2_I_13:%.*]] = lshr i64 0, [[TMP3]]
55; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[SHR_2_I_13]] to i8
56; CHECK-NEXT:    [[STOREDV_2_I_13:%.*]] = and i8 [[STOREDV_2_I_12]], [[TMP16]]
57; CHECK-NEXT:    [[STOREDV_2_I_14:%.*]] = and i8 [[STOREDV_2_I_13]], 0
58; CHECK-NEXT:    [[TMP17:%.*]] = load i64, ptr getelementptr (i8, ptr null, i64 48), align 8
59; CHECK-NEXT:    [[SHR_3_I:%.*]] = lshr i64 0, [[TMP17]]
60; CHECK-NEXT:    [[TMP18:%.*]] = trunc i64 [[SHR_3_I]] to i8
61; CHECK-NEXT:    [[STOREDV_3_I:%.*]] = and i8 [[STOREDV_2_I_14]], [[TMP18]]
62; CHECK-NEXT:    [[SHR_3_I_1:%.*]] = lshr i64 0, [[TMP17]]
63; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[SHR_3_I_1]] to i8
64; CHECK-NEXT:    [[STOREDV_3_I_1:%.*]] = and i8 [[STOREDV_3_I]], [[TMP19]]
65; CHECK-NEXT:    [[STOREDV_3_I_2:%.*]] = and i8 [[STOREDV_3_I_1]], 0
66; CHECK-NEXT:    [[SHR_3_I_3:%.*]] = lshr i64 0, [[TMP17]]
67; CHECK-NEXT:    [[TMP20:%.*]] = trunc i64 [[SHR_3_I_3]] to i8
68; CHECK-NEXT:    [[STOREDV_3_I_3:%.*]] = and i8 [[STOREDV_3_I_2]], [[TMP20]]
69; CHECK-NEXT:    [[SHR_3_I_4:%.*]] = lshr i64 0, [[TMP17]]
70; CHECK-NEXT:    [[TMP21:%.*]] = trunc i64 [[SHR_3_I_4]] to i8
71; CHECK-NEXT:    [[STOREDV_3_I_4:%.*]] = and i8 [[STOREDV_3_I_3]], [[TMP21]]
72; CHECK-NEXT:    [[SHR_3_I_5:%.*]] = lshr i64 0, [[TMP17]]
73; CHECK-NEXT:    [[TMP22:%.*]] = trunc i64 [[SHR_3_I_5]] to i8
74; CHECK-NEXT:    [[STOREDV_3_I_5:%.*]] = and i8 [[STOREDV_3_I_4]], [[TMP22]]
75; CHECK-NEXT:    [[STOREDV_3_I_6:%.*]] = and i8 [[STOREDV_3_I_5]], 0
76; CHECK-NEXT:    [[SHR_3_I_7:%.*]] = lshr i64 0, [[TMP17]]
77; CHECK-NEXT:    [[TMP23:%.*]] = trunc i64 [[SHR_3_I_7]] to i8
78; CHECK-NEXT:    [[STOREDV_3_I_7:%.*]] = and i8 [[STOREDV_3_I_6]], [[TMP23]]
79; CHECK-NEXT:    [[SHR_3_I_8:%.*]] = lshr i64 0, [[TMP17]]
80; CHECK-NEXT:    [[TMP24:%.*]] = trunc i64 [[SHR_3_I_8]] to i8
81; CHECK-NEXT:    [[STOREDV_3_I_8:%.*]] = and i8 [[STOREDV_3_I_7]], [[TMP24]]
82; CHECK-NEXT:    [[SHR_3_I_9:%.*]] = lshr i64 0, [[TMP17]]
83; CHECK-NEXT:    [[TMP25:%.*]] = trunc i64 [[SHR_3_I_9]] to i8
84; CHECK-NEXT:    [[STOREDV_3_I_9:%.*]] = and i8 [[STOREDV_3_I_8]], [[TMP25]]
85; CHECK-NEXT:    [[SHR_3_I_10:%.*]] = lshr i64 0, [[TMP17]]
86; CHECK-NEXT:    [[TMP26:%.*]] = trunc i64 [[SHR_3_I_10]] to i8
87; CHECK-NEXT:    [[STOREDV_3_I_10:%.*]] = and i8 [[STOREDV_3_I_9]], [[TMP26]]
88; CHECK-NEXT:    [[SHR_3_I_11:%.*]] = lshr i64 0, [[TMP17]]
89; CHECK-NEXT:    [[TMP27:%.*]] = trunc i64 [[SHR_3_I_11]] to i8
90; CHECK-NEXT:    [[STOREDV_3_I_11:%.*]] = and i8 [[STOREDV_3_I_10]], [[TMP27]]
91; CHECK-NEXT:    [[STOREDV_3_I_12:%.*]] = and i8 [[STOREDV_3_I_11]], 0
92; CHECK-NEXT:    [[SHR_3_I_13:%.*]] = lshr i64 0, [[TMP17]]
93; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[SHR_3_I_13]] to i8
94; CHECK-NEXT:    [[STOREDV_3_I_13:%.*]] = and i8 [[STOREDV_3_I_12]], [[TMP28]]
95; CHECK-NEXT:    [[SHR_3_I_14:%.*]] = lshr i64 0, [[TMP17]]
96; CHECK-NEXT:    [[TMP29:%.*]] = trunc i64 [[SHR_3_I_14]] to i8
97; CHECK-NEXT:    [[STOREDV_3_I_14:%.*]] = and i8 [[STOREDV_3_I_13]], [[TMP29]]
98; CHECK-NEXT:    [[TMP30:%.*]] = load i64, ptr null, align 8
99; CHECK-NEXT:    [[STOREDV_4_I:%.*]] = and i8 [[STOREDV_3_I_14]], 0
100; CHECK-NEXT:    [[SHR_4_I_1:%.*]] = lshr i64 0, [[TMP30]]
101; CHECK-NEXT:    [[TMP31:%.*]] = trunc i64 [[SHR_4_I_1]] to i8
102; CHECK-NEXT:    [[STOREDV_4_I_1:%.*]] = and i8 [[STOREDV_4_I]], [[TMP31]]
103; CHECK-NEXT:    [[SHR_4_I_2:%.*]] = lshr i64 0, [[TMP30]]
104; CHECK-NEXT:    [[TMP32:%.*]] = trunc i64 [[SHR_4_I_2]] to i8
105; CHECK-NEXT:    [[STOREDV_4_I_2:%.*]] = and i8 [[STOREDV_4_I_1]], [[TMP32]]
106; CHECK-NEXT:    [[SHR_4_I_3:%.*]] = lshr i64 0, [[TMP30]]
107; CHECK-NEXT:    [[TMP33:%.*]] = trunc i64 [[SHR_4_I_3]] to i8
108; CHECK-NEXT:    [[STOREDV_4_I_3:%.*]] = and i8 [[STOREDV_4_I_2]], [[TMP33]]
109; CHECK-NEXT:    [[SHR_4_I_4:%.*]] = lshr i64 0, [[TMP30]]
110; CHECK-NEXT:    [[TMP34:%.*]] = trunc i64 [[SHR_4_I_4]] to i8
111; CHECK-NEXT:    [[STOREDV_4_I_4:%.*]] = and i8 [[STOREDV_4_I_3]], [[TMP34]]
112; CHECK-NEXT:    [[STOREDV_4_I_5:%.*]] = and i8 [[STOREDV_4_I_4]], 0
113; CHECK-NEXT:    [[SHR_4_I_6:%.*]] = lshr i64 0, [[TMP30]]
114; CHECK-NEXT:    [[TMP35:%.*]] = trunc i64 [[SHR_4_I_6]] to i8
115; CHECK-NEXT:    [[STOREDV_4_I_6:%.*]] = and i8 [[STOREDV_4_I_5]], [[TMP35]]
116; CHECK-NEXT:    [[SHR_4_I_7:%.*]] = lshr i64 0, [[TMP30]]
117; CHECK-NEXT:    [[TMP36:%.*]] = trunc i64 [[SHR_4_I_7]] to i8
118; CHECK-NEXT:    [[STOREDV_4_I_7:%.*]] = and i8 [[STOREDV_4_I_6]], [[TMP36]]
119; CHECK-NEXT:    [[SHR_4_I_8:%.*]] = lshr i64 0, [[TMP30]]
120; CHECK-NEXT:    [[TMP37:%.*]] = trunc i64 [[SHR_4_I_8]] to i8
121; CHECK-NEXT:    [[STOREDV_4_I_8:%.*]] = and i8 [[STOREDV_4_I_7]], [[TMP37]]
122; CHECK-NEXT:    [[SHR_4_I_9:%.*]] = lshr i64 0, [[TMP30]]
123; CHECK-NEXT:    [[TMP38:%.*]] = trunc i64 [[SHR_4_I_9]] to i8
124; CHECK-NEXT:    [[STOREDV_4_I_9:%.*]] = and i8 [[STOREDV_4_I_8]], [[TMP38]]
125; CHECK-NEXT:    [[SHR_4_I_10:%.*]] = lshr i64 0, [[TMP30]]
126; CHECK-NEXT:    [[TMP39:%.*]] = trunc i64 [[SHR_4_I_10]] to i8
127; CHECK-NEXT:    [[STOREDV_4_I_10:%.*]] = and i8 [[STOREDV_4_I_9]], [[TMP39]]
128; CHECK-NEXT:    [[STOREDV_4_I_11:%.*]] = and i8 [[STOREDV_4_I_10]], 0
129; CHECK-NEXT:    [[SHR_4_I_12:%.*]] = lshr i64 0, [[TMP30]]
130; CHECK-NEXT:    [[TMP40:%.*]] = trunc i64 [[SHR_4_I_12]] to i8
131; CHECK-NEXT:    [[STOREDV_4_I_12:%.*]] = and i8 [[STOREDV_4_I_11]], [[TMP40]]
132; CHECK-NEXT:    [[SHR_4_I_13:%.*]] = lshr i64 0, [[TMP30]]
133; CHECK-NEXT:    [[TMP41:%.*]] = trunc i64 [[SHR_4_I_13]] to i8
134; CHECK-NEXT:    [[STOREDV_4_I_13:%.*]] = and i8 [[STOREDV_4_I_12]], [[TMP41]]
135; CHECK-NEXT:    [[SHR_4_I_14:%.*]] = lshr i64 0, [[TMP30]]
136; CHECK-NEXT:    [[TMP42:%.*]] = trunc i64 [[SHR_4_I_14]] to i8
137; CHECK-NEXT:    [[STOREDV_4_I_14:%.*]] = and i8 [[STOREDV_4_I_13]], [[TMP42]]
138; CHECK-NEXT:    [[TMP43:%.*]] = load i64, ptr getelementptr (i8, ptr null, i64 80), align 8
139; CHECK-NEXT:    [[SHR_5_I:%.*]] = lshr i64 0, [[TMP43]]
140; CHECK-NEXT:    [[TMP44:%.*]] = trunc i64 [[SHR_5_I]] to i8
141; CHECK-NEXT:    [[STOREDV_5_I:%.*]] = and i8 [[STOREDV_4_I_14]], [[TMP44]]
142; CHECK-NEXT:    [[SHR_5_I_1:%.*]] = lshr i64 0, [[TMP43]]
143; CHECK-NEXT:    [[TMP45:%.*]] = trunc i64 [[SHR_5_I_1]] to i8
144; CHECK-NEXT:    [[STOREDV_5_I_1:%.*]] = and i8 [[STOREDV_5_I]], [[TMP45]]
145; CHECK-NEXT:    [[SHR_5_I_2:%.*]] = lshr i64 0, [[TMP43]]
146; CHECK-NEXT:    [[TMP46:%.*]] = trunc i64 [[SHR_5_I_2]] to i8
147; CHECK-NEXT:    [[STOREDV_5_I_2:%.*]] = and i8 [[STOREDV_5_I_1]], [[TMP46]]
148; CHECK-NEXT:    [[STOREDV_5_I_3:%.*]] = and i8 [[STOREDV_5_I_2]], 0
149; CHECK-NEXT:    [[SHR_5_I_4:%.*]] = lshr i64 0, [[TMP43]]
150; CHECK-NEXT:    [[TMP47:%.*]] = trunc i64 [[SHR_5_I_4]] to i8
151; CHECK-NEXT:    [[STOREDV_5_I_4:%.*]] = and i8 [[STOREDV_5_I_3]], [[TMP47]]
152; CHECK-NEXT:    [[SHR_5_I_5:%.*]] = lshr i64 0, [[TMP43]]
153; CHECK-NEXT:    [[TMP48:%.*]] = trunc i64 [[SHR_5_I_5]] to i8
154; CHECK-NEXT:    [[STOREDV_5_I_5:%.*]] = and i8 [[STOREDV_5_I_4]], [[TMP48]]
155; CHECK-NEXT:    [[SHR_5_I_6:%.*]] = lshr i64 0, [[TMP43]]
156; CHECK-NEXT:    [[TMP49:%.*]] = trunc i64 [[SHR_5_I_6]] to i8
157; CHECK-NEXT:    [[STOREDV_5_I_6:%.*]] = and i8 [[STOREDV_5_I_5]], [[TMP49]]
158; CHECK-NEXT:    [[SHR_5_I_7:%.*]] = lshr i64 0, [[TMP43]]
159; CHECK-NEXT:    [[TMP50:%.*]] = trunc i64 [[SHR_5_I_7]] to i8
160; CHECK-NEXT:    [[STOREDV_5_I_7:%.*]] = and i8 [[STOREDV_5_I_6]], [[TMP50]]
161; CHECK-NEXT:    [[SHR_5_I_8:%.*]] = lshr i64 0, [[TMP43]]
162; CHECK-NEXT:    [[TMP51:%.*]] = trunc i64 [[SHR_5_I_8]] to i8
163; CHECK-NEXT:    [[STOREDV_5_I_8:%.*]] = and i8 [[STOREDV_5_I_7]], [[TMP51]]
164; CHECK-NEXT:    [[STOREDV_5_I_9:%.*]] = and i8 [[STOREDV_5_I_8]], 0
165; CHECK-NEXT:    [[SHR_5_I_10:%.*]] = lshr i64 0, [[TMP43]]
166; CHECK-NEXT:    [[TMP52:%.*]] = trunc i64 [[SHR_5_I_10]] to i8
167; CHECK-NEXT:    [[STOREDV_5_I_10:%.*]] = and i8 [[STOREDV_5_I_9]], [[TMP52]]
168; CHECK-NEXT:    [[SHR_5_I_11:%.*]] = lshr i64 0, [[TMP43]]
169; CHECK-NEXT:    [[TMP53:%.*]] = trunc i64 [[SHR_5_I_11]] to i8
170; CHECK-NEXT:    [[STOREDV_5_I_11:%.*]] = and i8 [[STOREDV_5_I_10]], [[TMP53]]
171; CHECK-NEXT:    [[SHR_5_I_12:%.*]] = lshr i64 0, [[TMP43]]
172; CHECK-NEXT:    [[TMP54:%.*]] = trunc i64 [[SHR_5_I_12]] to i8
173; CHECK-NEXT:    [[STOREDV_5_I_12:%.*]] = and i8 [[STOREDV_5_I_11]], [[TMP54]]
174; CHECK-NEXT:    [[SHR_5_I_13:%.*]] = lshr i64 0, [[TMP43]]
175; CHECK-NEXT:    [[TMP55:%.*]] = trunc i64 [[SHR_5_I_13]] to i8
176; CHECK-NEXT:    [[STOREDV_5_I_13:%.*]] = and i8 [[STOREDV_5_I_12]], [[TMP55]]
177; CHECK-NEXT:    [[SHR_5_I_14:%.*]] = lshr i64 0, [[TMP43]]
178; CHECK-NEXT:    [[TMP56:%.*]] = trunc i64 [[SHR_5_I_14]] to i8
179; CHECK-NEXT:    [[STOREDV_5_I_14:%.*]] = and i8 [[STOREDV_5_I_13]], [[TMP56]]
180; CHECK-NEXT:    [[TMP57:%.*]] = load i64, ptr null, align 8
181; CHECK-NEXT:    [[SHR_6_I:%.*]] = lshr i64 0, [[TMP57]]
182; CHECK-NEXT:    [[TMP58:%.*]] = trunc i64 [[SHR_6_I]] to i8
183; CHECK-NEXT:    [[STOREDV_6_I:%.*]] = and i8 [[STOREDV_5_I_14]], [[TMP58]]
184; CHECK-NEXT:    [[SHR_6_I_1:%.*]] = lshr i64 0, [[TMP57]]
185; CHECK-NEXT:    [[TMP59:%.*]] = trunc i64 [[SHR_6_I_1]] to i8
186; CHECK-NEXT:    [[STOREDV_6_I_1:%.*]] = and i8 [[STOREDV_6_I]], [[TMP59]]
187; CHECK-NEXT:    [[SHR_6_I_2:%.*]] = lshr i64 0, [[TMP57]]
188; CHECK-NEXT:    [[TMP60:%.*]] = trunc i64 [[SHR_6_I_2]] to i8
189; CHECK-NEXT:    [[STOREDV_6_I_2:%.*]] = and i8 [[STOREDV_6_I_1]], [[TMP60]]
190; CHECK-NEXT:    [[STOREDV_6_I_3:%.*]] = and i8 [[STOREDV_6_I_2]], 0
191; CHECK-NEXT:    [[SHR_6_I_4:%.*]] = lshr i64 0, [[TMP57]]
192; CHECK-NEXT:    [[TMP61:%.*]] = trunc i64 [[SHR_6_I_4]] to i8
193; CHECK-NEXT:    [[STOREDV_6_I_4:%.*]] = and i8 [[STOREDV_6_I_3]], [[TMP61]]
194; CHECK-NEXT:    [[SHR_6_I_5:%.*]] = lshr i64 0, [[TMP57]]
195; CHECK-NEXT:    [[TMP62:%.*]] = trunc i64 [[SHR_6_I_5]] to i8
196; CHECK-NEXT:    [[STOREDV_6_I_5:%.*]] = and i8 [[STOREDV_6_I_4]], [[TMP62]]
197; CHECK-NEXT:    [[SHR_6_I_6:%.*]] = lshr i64 0, [[TMP57]]
198; CHECK-NEXT:    [[TMP63:%.*]] = trunc i64 [[SHR_6_I_6]] to i8
199; CHECK-NEXT:    [[STOREDV_6_I_6:%.*]] = and i8 [[STOREDV_6_I_5]], [[TMP63]]
200; CHECK-NEXT:    [[SHR_6_I_7:%.*]] = lshr i64 0, [[TMP57]]
201; CHECK-NEXT:    [[TMP64:%.*]] = trunc i64 [[SHR_6_I_7]] to i8
202; CHECK-NEXT:    [[STOREDV_6_I_7:%.*]] = and i8 [[STOREDV_6_I_6]], [[TMP64]]
203; CHECK-NEXT:    [[STOREDV_6_I_8:%.*]] = and i8 [[STOREDV_6_I_7]], 0
204; CHECK-NEXT:    [[SHR_6_I_9:%.*]] = lshr i64 0, [[TMP57]]
205; CHECK-NEXT:    [[TMP65:%.*]] = trunc i64 [[SHR_6_I_9]] to i8
206; CHECK-NEXT:    [[STOREDV_6_I_9:%.*]] = and i8 [[STOREDV_6_I_8]], [[TMP65]]
207; CHECK-NEXT:    [[SHR_6_I_10:%.*]] = lshr i64 0, [[TMP57]]
208; CHECK-NEXT:    [[TMP66:%.*]] = trunc i64 [[SHR_6_I_10]] to i8
209; CHECK-NEXT:    [[STOREDV_6_I_10:%.*]] = and i8 [[STOREDV_6_I_9]], [[TMP66]]
210; CHECK-NEXT:    [[SHR_6_I_11:%.*]] = lshr i64 0, [[TMP57]]
211; CHECK-NEXT:    [[TMP67:%.*]] = trunc i64 [[SHR_6_I_11]] to i8
212; CHECK-NEXT:    [[STOREDV_6_I_11:%.*]] = and i8 [[STOREDV_6_I_10]], [[TMP67]]
213; CHECK-NEXT:    [[SHR_6_I_12:%.*]] = lshr i64 0, [[TMP57]]
214; CHECK-NEXT:    [[TMP68:%.*]] = trunc i64 [[SHR_6_I_12]] to i8
215; CHECK-NEXT:    [[STOREDV_6_I_12:%.*]] = and i8 [[STOREDV_6_I_11]], [[TMP68]]
216; CHECK-NEXT:    [[STOREDV_6_I_13:%.*]] = and i8 [[STOREDV_6_I_12]], 0
217; CHECK-NEXT:    [[SHR_6_I_14:%.*]] = lshr i64 0, 0
218; CHECK-NEXT:    [[TMP69:%.*]] = trunc i64 [[SHR_6_I_14]] to i8
219; CHECK-NEXT:    [[STOREDV_6_I_14:%.*]] = and i8 [[STOREDV_6_I_13]], [[TMP69]]
220; CHECK-NEXT:    store i8 [[STOREDV_6_I_14]], ptr null, align 1
221; CHECK-NEXT:    ret i32 0
222;
223entry:
224  %0 = load i64, ptr getelementptr (i8, ptr null, i64 16), align 8
225  %shr.1.i = lshr i64 0, 0
226  %shr.1.i.13 = lshr i64 0, %0
227  %1 = trunc i64 %shr.1.i.13 to i8
228  %storedv.1.i.13 = and i8 0, %1
229  %shr.1.i.14 = lshr i64 0, %0
230  %2 = trunc i64 %shr.1.i.14 to i8
231  %storedv.1.i.14 = and i8 %storedv.1.i.13, %2
232  %3 = load i64, ptr getelementptr (i8, ptr null, i64 32), align 8
233  %shr.2.i = lshr i64 0, %3
234  %4 = trunc i64 %shr.2.i to i8
235  %storedv.2.i = and i8 %storedv.1.i.14, %4
236  %shr.2.i.1 = lshr i64 0, %3
237  %5 = trunc i64 %shr.2.i.1 to i8
238  %storedv.2.i.1 = and i8 %storedv.2.i, %5
239  %shr.2.i.2 = lshr i64 0, %3
240  %6 = trunc i64 %shr.2.i.2 to i8
241  %storedv.2.i.2 = and i8 %storedv.2.i.1, %6
242  %shr.2.i.3 = lshr i64 0, %3
243  %7 = trunc i64 %shr.2.i.3 to i8
244  %storedv.2.i.3 = and i8 %storedv.2.i.2, %7
245  %shr.2.i.4 = lshr i64 0, %3
246  %8 = trunc i64 %shr.2.i.4 to i8
247  %storedv.2.i.4 = and i8 %storedv.2.i.3, %8
248  %shr.2.i.5 = lshr i64 0, %3
249  %9 = trunc i64 %shr.2.i.5 to i8
250  %storedv.2.i.5 = and i8 %storedv.2.i.4, %9
251  %shr.2.i.6 = lshr i64 0, %3
252  %10 = trunc i64 %shr.2.i.6 to i8
253  %storedv.2.i.6 = and i8 %storedv.2.i.5, %10
254  %shr.2.i.7 = lshr i64 0, %3
255  %11 = trunc i64 %shr.2.i.7 to i8
256  %storedv.2.i.7 = and i8 %storedv.2.i.6, %11
257  %storedv.2.i.8 = and i8 %storedv.2.i.7, 0
258  %shr.2.i.9 = lshr i64 0, %3
259  %12 = trunc i64 %shr.2.i.9 to i8
260  %storedv.2.i.9 = and i8 %storedv.2.i.8, %12
261  %shr.2.i.10 = lshr i64 0, %3
262  %13 = trunc i64 %shr.2.i.10 to i8
263  %storedv.2.i.10 = and i8 %storedv.2.i.9, %13
264  %shr.2.i.11 = lshr i64 0, %3
265  %14 = trunc i64 %shr.2.i.11 to i8
266  %storedv.2.i.11 = and i8 %storedv.2.i.10, %14
267  %shr.2.i.12 = lshr i64 0, %3
268  %15 = trunc i64 %shr.2.i.12 to i8
269  %storedv.2.i.12 = and i8 %storedv.2.i.11, %15
270  %shr.2.i.13 = lshr i64 0, %3
271  %16 = trunc i64 %shr.2.i.13 to i8
272  %storedv.2.i.13 = and i8 %storedv.2.i.12, %16
273  %storedv.2.i.14 = and i8 %storedv.2.i.13, 0
274  %17 = load i64, ptr getelementptr (i8, ptr null, i64 48), align 8
275  %shr.3.i = lshr i64 0, %17
276  %18 = trunc i64 %shr.3.i to i8
277  %storedv.3.i = and i8 %storedv.2.i.14, %18
278  %shr.3.i.1 = lshr i64 0, %17
279  %19 = trunc i64 %shr.3.i.1 to i8
280  %storedv.3.i.1 = and i8 %storedv.3.i, %19
281  %storedv.3.i.2 = and i8 %storedv.3.i.1, 0
282  %shr.3.i.3 = lshr i64 0, %17
283  %20 = trunc i64 %shr.3.i.3 to i8
284  %storedv.3.i.3 = and i8 %storedv.3.i.2, %20
285  %shr.3.i.4 = lshr i64 0, %17
286  %21 = trunc i64 %shr.3.i.4 to i8
287  %storedv.3.i.4 = and i8 %storedv.3.i.3, %21
288  %shr.3.i.5 = lshr i64 0, %17
289  %22 = trunc i64 %shr.3.i.5 to i8
290  %storedv.3.i.5 = and i8 %storedv.3.i.4, %22
291  %storedv.3.i.6 = and i8 %storedv.3.i.5, 0
292  %shr.3.i.7 = lshr i64 0, %17
293  %23 = trunc i64 %shr.3.i.7 to i8
294  %storedv.3.i.7 = and i8 %storedv.3.i.6, %23
295  %shr.3.i.8 = lshr i64 0, %17
296  %24 = trunc i64 %shr.3.i.8 to i8
297  %storedv.3.i.8 = and i8 %storedv.3.i.7, %24
298  %shr.3.i.9 = lshr i64 0, %17
299  %25 = trunc i64 %shr.3.i.9 to i8
300  %storedv.3.i.9 = and i8 %storedv.3.i.8, %25
301  %shr.3.i.10 = lshr i64 0, %17
302  %26 = trunc i64 %shr.3.i.10 to i8
303  %storedv.3.i.10 = and i8 %storedv.3.i.9, %26
304  %shr.3.i.11 = lshr i64 0, %17
305  %27 = trunc i64 %shr.3.i.11 to i8
306  %storedv.3.i.11 = and i8 %storedv.3.i.10, %27
307  %storedv.3.i.12 = and i8 %storedv.3.i.11, 0
308  %shr.3.i.13 = lshr i64 0, %17
309  %28 = trunc i64 %shr.3.i.13 to i8
310  %storedv.3.i.13 = and i8 %storedv.3.i.12, %28
311  %shr.3.i.14 = lshr i64 0, %17
312  %29 = trunc i64 %shr.3.i.14 to i8
313  %storedv.3.i.14 = and i8 %storedv.3.i.13, %29
314  %30 = load i64, ptr null, align 8
315  %storedv.4.i = and i8 %storedv.3.i.14, 0
316  %shr.4.i.1 = lshr i64 0, %30
317  %31 = trunc i64 %shr.4.i.1 to i8
318  %storedv.4.i.1 = and i8 %storedv.4.i, %31
319  %shr.4.i.2 = lshr i64 0, %30
320  %32 = trunc i64 %shr.4.i.2 to i8
321  %storedv.4.i.2 = and i8 %storedv.4.i.1, %32
322  %shr.4.i.3 = lshr i64 0, %30
323  %33 = trunc i64 %shr.4.i.3 to i8
324  %storedv.4.i.3 = and i8 %storedv.4.i.2, %33
325  %shr.4.i.4 = lshr i64 0, %30
326  %34 = trunc i64 %shr.4.i.4 to i8
327  %storedv.4.i.4 = and i8 %storedv.4.i.3, %34
328  %storedv.4.i.5 = and i8 %storedv.4.i.4, 0
329  %shr.4.i.6 = lshr i64 0, %30
330  %35 = trunc i64 %shr.4.i.6 to i8
331  %storedv.4.i.6 = and i8 %storedv.4.i.5, %35
332  %shr.4.i.7 = lshr i64 0, %30
333  %36 = trunc i64 %shr.4.i.7 to i8
334  %storedv.4.i.7 = and i8 %storedv.4.i.6, %36
335  %shr.4.i.8 = lshr i64 0, %30
336  %37 = trunc i64 %shr.4.i.8 to i8
337  %storedv.4.i.8 = and i8 %storedv.4.i.7, %37
338  %shr.4.i.9 = lshr i64 0, %30
339  %38 = trunc i64 %shr.4.i.9 to i8
340  %storedv.4.i.9 = and i8 %storedv.4.i.8, %38
341  %shr.4.i.10 = lshr i64 0, %30
342  %39 = trunc i64 %shr.4.i.10 to i8
343  %storedv.4.i.10 = and i8 %storedv.4.i.9, %39
344  %storedv.4.i.11 = and i8 %storedv.4.i.10, 0
345  %shr.4.i.12 = lshr i64 0, %30
346  %40 = trunc i64 %shr.4.i.12 to i8
347  %storedv.4.i.12 = and i8 %storedv.4.i.11, %40
348  %shr.4.i.13 = lshr i64 0, %30
349  %41 = trunc i64 %shr.4.i.13 to i8
350  %storedv.4.i.13 = and i8 %storedv.4.i.12, %41
351  %shr.4.i.14 = lshr i64 0, %30
352  %42 = trunc i64 %shr.4.i.14 to i8
353  %storedv.4.i.14 = and i8 %storedv.4.i.13, %42
354  %43 = load i64, ptr getelementptr (i8, ptr null, i64 80), align 8
355  %shr.5.i = lshr i64 0, %43
356  %44 = trunc i64 %shr.5.i to i8
357  %storedv.5.i = and i8 %storedv.4.i.14, %44
358  %shr.5.i.1 = lshr i64 0, %43
359  %45 = trunc i64 %shr.5.i.1 to i8
360  %storedv.5.i.1 = and i8 %storedv.5.i, %45
361  %shr.5.i.2 = lshr i64 0, %43
362  %46 = trunc i64 %shr.5.i.2 to i8
363  %storedv.5.i.2 = and i8 %storedv.5.i.1, %46
364  %storedv.5.i.3 = and i8 %storedv.5.i.2, 0
365  %shr.5.i.4 = lshr i64 0, %43
366  %47 = trunc i64 %shr.5.i.4 to i8
367  %storedv.5.i.4 = and i8 %storedv.5.i.3, %47
368  %shr.5.i.5 = lshr i64 0, %43
369  %48 = trunc i64 %shr.5.i.5 to i8
370  %storedv.5.i.5 = and i8 %storedv.5.i.4, %48
371  %shr.5.i.6 = lshr i64 0, %43
372  %49 = trunc i64 %shr.5.i.6 to i8
373  %storedv.5.i.6 = and i8 %storedv.5.i.5, %49
374  %shr.5.i.7 = lshr i64 0, %43
375  %50 = trunc i64 %shr.5.i.7 to i8
376  %storedv.5.i.7 = and i8 %storedv.5.i.6, %50
377  %shr.5.i.8 = lshr i64 0, %43
378  %51 = trunc i64 %shr.5.i.8 to i8
379  %storedv.5.i.8 = and i8 %storedv.5.i.7, %51
380  %storedv.5.i.9 = and i8 %storedv.5.i.8, 0
381  %shr.5.i.10 = lshr i64 0, %43
382  %52 = trunc i64 %shr.5.i.10 to i8
383  %storedv.5.i.10 = and i8 %storedv.5.i.9, %52
384  %shr.5.i.11 = lshr i64 0, %43
385  %53 = trunc i64 %shr.5.i.11 to i8
386  %storedv.5.i.11 = and i8 %storedv.5.i.10, %53
387  %shr.5.i.12 = lshr i64 0, %43
388  %54 = trunc i64 %shr.5.i.12 to i8
389  %storedv.5.i.12 = and i8 %storedv.5.i.11, %54
390  %shr.5.i.13 = lshr i64 0, %43
391  %55 = trunc i64 %shr.5.i.13 to i8
392  %storedv.5.i.13 = and i8 %storedv.5.i.12, %55
393  %shr.5.i.14 = lshr i64 0, %43
394  %56 = trunc i64 %shr.5.i.14 to i8
395  %storedv.5.i.14 = and i8 %storedv.5.i.13, %56
396  %57 = load i64, ptr null, align 8
397  %shr.6.i = lshr i64 0, %57
398  %58 = trunc i64 %shr.6.i to i8
399  %storedv.6.i = and i8 %storedv.5.i.14, %58
400  %shr.6.i.1 = lshr i64 0, %57
401  %59 = trunc i64 %shr.6.i.1 to i8
402  %storedv.6.i.1 = and i8 %storedv.6.i, %59
403  %shr.6.i.2 = lshr i64 0, %57
404  %60 = trunc i64 %shr.6.i.2 to i8
405  %storedv.6.i.2 = and i8 %storedv.6.i.1, %60
406  %storedv.6.i.3 = and i8 %storedv.6.i.2, 0
407  %shr.6.i.4 = lshr i64 0, %57
408  %61 = trunc i64 %shr.6.i.4 to i8
409  %storedv.6.i.4 = and i8 %storedv.6.i.3, %61
410  %shr.6.i.5 = lshr i64 0, %57
411  %62 = trunc i64 %shr.6.i.5 to i8
412  %storedv.6.i.5 = and i8 %storedv.6.i.4, %62
413  %shr.6.i.6 = lshr i64 0, %57
414  %63 = trunc i64 %shr.6.i.6 to i8
415  %storedv.6.i.6 = and i8 %storedv.6.i.5, %63
416  %shr.6.i.7 = lshr i64 0, %57
417  %64 = trunc i64 %shr.6.i.7 to i8
418  %storedv.6.i.7 = and i8 %storedv.6.i.6, %64
419  %storedv.6.i.8 = and i8 %storedv.6.i.7, 0
420  %shr.6.i.9 = lshr i64 0, %57
421  %65 = trunc i64 %shr.6.i.9 to i8
422  %storedv.6.i.9 = and i8 %storedv.6.i.8, %65
423  %shr.6.i.10 = lshr i64 0, %57
424  %66 = trunc i64 %shr.6.i.10 to i8
425  %storedv.6.i.10 = and i8 %storedv.6.i.9, %66
426  %shr.6.i.11 = lshr i64 0, %57
427  %67 = trunc i64 %shr.6.i.11 to i8
428  %storedv.6.i.11 = and i8 %storedv.6.i.10, %67
429  %shr.6.i.12 = lshr i64 0, %57
430  %68 = trunc i64 %shr.6.i.12 to i8
431  %storedv.6.i.12 = and i8 %storedv.6.i.11, %68
432  %storedv.6.i.13 = and i8 %storedv.6.i.12, 0
433  %shr.6.i.14 = lshr i64 0, 0
434  %69 = trunc i64 %shr.6.i.14 to i8
435  %storedv.6.i.14 = and i8 %storedv.6.i.13, %69
436  store i8 %storedv.6.i.14, ptr null, align 1
437  ret i32 0
438}
439