xref: /llvm-project/llvm/test/Transforms/SROA/tbaa-struct3.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt -p sroa -S %s | FileCheck %s
3
4
5target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
6
7define void @load_store_transfer_split_struct_tbaa_2_float(ptr dereferenceable(24) %res, float %a, float %b) {
8; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_float(
9; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A]] to i32
12; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[B]] to i32
13; CHECK-NEXT:    store i32 [[TMP0]], ptr [[RES]], align 4, !tbaa [[TBAA0:![0-9]+]]
14; CHECK-NEXT:    [[RES_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[RES]], i64 4
15; CHECK-NEXT:    store i32 [[TMP1]], ptr [[RES_SROA_IDX]], align 4, !tbaa [[TBAA0]]
16; CHECK-NEXT:    [[P:%.*]] = load ptr, ptr [[RES]], align 8
17; CHECK-NEXT:    ret void
18;
19entry:
20  %tmp = alloca { float, float }, align 4
21  store float %a, ptr %tmp, align 4
22  %tmp.4 = getelementptr inbounds i8, ptr %tmp, i64 4
23  store float %b, ptr %tmp.4, align 4
24  %l1 = load i64, ptr %tmp, !tbaa.struct !0
25  store i64 %l1, ptr %res, !tbaa.struct !0
26  %p = load ptr, ptr %res, align 8
27  ret void
28}
29
30define void @memcpy_transfer(ptr dereferenceable(24) %res, float %a, float %b) {
31; CHECK-LABEL: define void @memcpy_transfer(
32; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
33; CHECK-NEXT:  entry:
34; CHECK-NEXT:    [[L_PTR:%.*]] = load ptr, ptr [[RES]], align 8
35; CHECK-NEXT:    store float [[A]], ptr [[L_PTR]], align 1, !tbaa [[TBAA0]]
36; CHECK-NEXT:    [[TMP_SROA_2_0_L_PTR_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[L_PTR]], i64 4
37; CHECK-NEXT:    store float [[B]], ptr [[TMP_SROA_2_0_L_PTR_SROA_IDX]], align 1, !tbaa [[TBAA0]]
38; CHECK-NEXT:    ret void
39;
40entry:
41  %tmp = alloca { float, float }, align 4
42  store float %a, ptr %tmp, align 4
43  %__im_.i.i = getelementptr inbounds i8, ptr %tmp, i64 4
44  store float %b, ptr %__im_.i.i, align 4
45  %l.ptr = load ptr, ptr %res, align 8
46  call void @llvm.memcpy.p0.p0.i64(ptr %l.ptr, ptr %tmp, i64 8, i1 false), !tbaa.struct !0
47  ret void
48}
49
50define void @memcpy_transfer_tbaa_field_and_size_do_not_align(ptr dereferenceable(24) %res, float %a, float %b) {
51; CHECK-LABEL: define void @memcpy_transfer_tbaa_field_and_size_do_not_align(
52; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
53; CHECK-NEXT:  entry:
54; CHECK-NEXT:    [[L_PTR:%.*]] = load ptr, ptr [[RES]], align 8
55; CHECK-NEXT:    store float [[A]], ptr [[L_PTR]], align 1, !tbaa [[TBAA0]]
56; CHECK-NEXT:    [[TMP_SROA_2_0_L_PTR_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[L_PTR]], i64 4
57; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[B]] to i32
58; CHECK-NEXT:    [[TMP_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16
59; CHECK-NEXT:    store i16 [[TMP_SROA_2_0_EXTRACT_TRUNC]], ptr [[TMP_SROA_2_0_L_PTR_SROA_IDX]], align 1
60; CHECK-NEXT:    ret void
61;
62entry:
63  %tmp = alloca { float, float }, align 4
64  store float %a, ptr %tmp, align 4
65  %__im_.i.i = getelementptr inbounds i8, ptr %tmp, i64 4
66  store float %b, ptr %__im_.i.i, align 4
67  %l.ptr = load ptr, ptr %res, align 8
68  call void @llvm.memcpy.p0.p0.i64(ptr %l.ptr, ptr %tmp, i64 6, i1 false), !tbaa.struct !0
69  ret void
70}
71
72define void @load_store_transfer_split_struct_tbaa_2_i31(ptr dereferenceable(24) %res, i31 %a, i31 %b) {
73; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_i31(
74; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], i31 [[A:%.*]], i31 [[B:%.*]]) {
75; CHECK-NEXT:  entry:
76; CHECK-NEXT:    [[TMP:%.*]] = alloca { i31, i31 }, align 4
77; CHECK-NEXT:    store i31 [[A]], ptr [[TMP]], align 4
78; CHECK-NEXT:    [[TMP_4_TMP_4_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
79; CHECK-NEXT:    store i31 [[B]], ptr [[TMP_4_TMP_4_SROA_IDX]], align 4
80; CHECK-NEXT:    [[TMP_0_L1:%.*]] = load i62, ptr [[TMP]], align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
81; CHECK-NEXT:    store i62 [[TMP_0_L1]], ptr [[RES]], align 4, !tbaa.struct [[TBAA_STRUCT4]]
82; CHECK-NEXT:    ret void
83;
84entry:
85  %tmp = alloca { i31 , i31 }, align 4
86  store i31 %a, ptr %tmp, align 4
87  %tmp.4  = getelementptr inbounds i8, ptr %tmp, i64 4
88  store i31 %b, ptr %tmp.4, align 4
89  %l1 = load i62, ptr %tmp, !tbaa.struct !0
90  store i62 %l1, ptr %res, !tbaa.struct !0
91  ret void
92}
93
94
95declare <2 x float> @foo(ptr)
96
97define void @store_vector_part_first(ptr %y2, float %f) {
98; CHECK-LABEL: define void @store_vector_part_first(
99; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
100; CHECK-NEXT:    [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
101; CHECK-NEXT:    store <2 x float> [[V_1]], ptr [[Y2]], align 8, !tbaa [[TBAA5:![0-9]+]]
102; CHECK-NEXT:    [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 8
103; CHECK-NEXT:    store float [[F]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 8, !tbaa [[TBAA0]]
104; CHECK-NEXT:    ret void
105;
106  %x7 = alloca { float, float, float, float }
107  %v.1 = call <2 x float> @foo(ptr %y2)
108  store <2 x float> %v.1, ptr %x7
109  %gep = getelementptr i8, ptr %x7, i64 8
110  store float %f, ptr %gep
111  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !7
112  ret void
113}
114
115define void @store_vector_part_second(ptr %y2, float %f) {
116; CHECK-LABEL: define void @store_vector_part_second(
117; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
118; CHECK-NEXT:    [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
119; CHECK-NEXT:    store float [[F]], ptr [[Y2]], align 8, !tbaa [[TBAA0]]
120; CHECK-NEXT:    [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 4
121; CHECK-NEXT:    store <2 x float> [[V_1]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 4, !tbaa [[TBAA5]]
122; CHECK-NEXT:    ret void
123;
124  %x7 = alloca { float, float, float, float }
125  %v.1 = call <2 x float> @foo(ptr %y2)
126  store float %f, ptr %x7
127  %gep = getelementptr i8, ptr %x7, i64 4
128  store <2 x float> %v.1, ptr %gep
129  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !8
130  ret void
131}
132
133define void @store_vector_single(ptr %y2, float %f) {
134; CHECK-LABEL: define void @store_vector_single(
135; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
136; CHECK-NEXT:    [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
137; CHECK-NEXT:    store <2 x float> [[V_1]], ptr [[Y2]], align 4, !tbaa [[TBAA5]]
138; CHECK-NEXT:    ret void
139;
140  %x7 = alloca { float, float }
141  %v.1 = call <2 x float> @foo(ptr %y2)
142  store <2 x float> %v.1, ptr %x7
143  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %y2, ptr align 4 %x7, i64 8, i1 false), !tbaa.struct !9
144  ret void
145}
146
147declare void @llvm.memset.p0.i8(ptr nocapture, i8, i32, i1) nounwind
148
149define void @memset(ptr %dst, ptr align 8 %src) {
150; CHECK-LABEL: define void @memset(
151; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
152; CHECK-NEXT:  entry:
153; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca [7 x i8], align 1
154; CHECK-NEXT:    [[A_SROA_3:%.*]] = alloca i16, align 2
155; CHECK-NEXT:    [[A_SROA_4:%.*]] = alloca [10 x i8], align 1
156; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 7, i1 false)
157; CHECK-NEXT:    [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 7
158; CHECK-NEXT:    [[A_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
159; CHECK-NEXT:    store i16 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 2
160; CHECK-NEXT:    [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
161; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_0_SRC_SROA_IDX]], i32 10, i1 false)
162; CHECK-NEXT:    store i16 1, ptr [[A_SROA_3]], align 2
163; CHECK-NEXT:    [[A_SROA_0_1_A_1_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 1
164; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_1_A_1_SROA_IDX2]], i8 42, i32 6, i1 false)
165; CHECK-NEXT:    store i16 10794, ptr [[A_SROA_3]], align 2, !tbaa [[TBAA0]]
166; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 7, i1 true)
167; CHECK-NEXT:    [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 7
168; CHECK-NEXT:    [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A_SROA_3]], align 2
169; CHECK-NEXT:    store volatile i16 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
170; CHECK-NEXT:    [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
171; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 10, i1 true)
172; CHECK-NEXT:    ret void
173;
174entry:
175  %a = alloca [19 x i8]
176  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 19, i1 false)
177
178  %a.1 = getelementptr i8, ptr %a, i64 1
179  %a.7 = getelementptr i8, ptr %a, i64 7
180  store i16 1, ptr %a.7
181  call void @llvm.memset.p0.i32(ptr %a.1, i8 42, i32 8, i1 false), !tbaa.struct !12
182
183  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 19, i1 true)
184  ret void
185}
186
187define void @memset2(ptr %dst, ptr align 8 %src) {
188; CHECK-LABEL: define void @memset2(
189; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
190; CHECK-NEXT:  entry:
191; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca [209 x i8], align 1
192; CHECK-NEXT:    [[A_SROA_3:%.*]] = alloca i8, align 1
193; CHECK-NEXT:    [[A_SROA_4:%.*]] = alloca [90 x i8], align 1
194; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 209, i1 false)
195; CHECK-NEXT:    [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 209
196; CHECK-NEXT:    [[A_SROA_3_0_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
197; CHECK-NEXT:    store i8 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 1
198; CHECK-NEXT:    [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 210
199; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 2 [[A_SROA_4_0_SRC_SROA_IDX]], i32 90, i1 false)
200; CHECK-NEXT:    store i8 1, ptr [[A_SROA_3]], align 1
201; CHECK-NEXT:    [[A_SROA_0_202_A_202_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 202
202; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_202_A_202_SROA_IDX2]], i8 42, i32 7, i1 false), !tbaa [[TBAA5]]
203; CHECK-NEXT:    store i8 42, ptr [[A_SROA_3]], align 1, !tbaa [[TBAA5]]
204; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 209, i1 true)
205; CHECK-NEXT:    [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 209
206; CHECK-NEXT:    [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i8, ptr [[A_SROA_3]], align 1
207; CHECK-NEXT:    store volatile i8 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
208; CHECK-NEXT:    [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 210
209; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 90, i1 true)
210; CHECK-NEXT:    ret void
211;
212entry:
213  %a = alloca [300 x i8]
214
215  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 300, i1 false)
216
217  %a.202 = getelementptr [300 x i8], ptr %a, i64 0, i64 202
218
219  %a.209 = getelementptr [300 x i8], ptr %a, i64 0, i64 209
220
221
222  store i8 1, ptr  %a.209
223
224  call void @llvm.memset.p0.i32(ptr %a.202, i8 42, i32 8, i1 false), !tbaa.struct !15
225
226  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 300, i1 true)
227
228  ret void
229}
230
231
232
233define void @slice_store_v2i8_1(ptr %dst, ptr %dst.2, ptr %src) {
234; CHECK-LABEL: define void @slice_store_v2i8_1(
235; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
236; CHECK-NEXT:  entry:
237; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca [6 x i8], align 1
238; CHECK-NEXT:    [[A_SROA_2_SROA_0:%.*]] = alloca <2 x i8>, align 4
239; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 6, i1 false)
240; CHECK-NEXT:    [[A_SROA_2_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 6
241; CHECK-NEXT:    [[A_SROA_2_SROA_0_0_COPYLOAD:%.*]] = load <2 x i8>, ptr [[A_SROA_2_0_SRC_SROA_IDX]], align 2
242; CHECK-NEXT:    store <2 x i8> [[A_SROA_2_SROA_0_0_COPYLOAD]], ptr [[A_SROA_2_SROA_0]], align 4
243; CHECK-NEXT:    store <2 x i8> bitcast (<1 x i16> splat (i16 123) to <2 x i8>), ptr [[A_SROA_2_SROA_0]], align 4
244; CHECK-NEXT:    [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
245; CHECK-NEXT:    store <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4]], ptr [[DST_2]], align 2
246; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 6, i1 true)
247; CHECK-NEXT:    [[A_SROA_2_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 6
248; CHECK-NEXT:    [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
249; CHECK-NEXT:    store volatile <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1]], ptr [[A_SROA_2_0_DST_SROA_IDX]], align 1
250; CHECK-NEXT:    ret void
251;
252entry:
253  %a = alloca [20 x i8]
254
255  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 8, i1 false)
256  %a.6 = getelementptr inbounds i8, ptr %a, i64 6
257
258  store i32 123, ptr %a.6, !tbaa.struct !10
259
260  %v.4 = load <2 x i8>, ptr %a.6
261
262  store <2 x i8> %v.4, ptr %dst.2
263
264  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a, i32 8, i1 true)
265  ret void
266}
267
268define void @slice_store_v2i8_2(ptr %dst, ptr %dst.2, ptr %src) {
269; CHECK-LABEL: define void @slice_store_v2i8_2(
270; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
271; CHECK-NEXT:  entry:
272; CHECK-NEXT:    [[A_SROA_0_SROA_1:%.*]] = alloca <2 x i8>, align 2
273; CHECK-NEXT:    [[A_SROA_0_SROA_4:%.*]] = alloca i8, align 1
274; CHECK-NEXT:    [[A_SROA_4:%.*]] = alloca [5 x i8], align 1
275; CHECK-NEXT:    [[A_SROA_0_SROA_1_1_COPYLOAD:%.*]] = load <2 x i8>, ptr [[SRC]], align 8
276; CHECK-NEXT:    store <2 x i8> [[A_SROA_0_SROA_1_1_COPYLOAD]], ptr [[A_SROA_0_SROA_1]], align 2
277; CHECK-NEXT:    [[A_SROA_0_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
278; CHECK-NEXT:    [[A_SROA_0_SROA_4_1_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_0_SROA_4_1_SRC_SROA_IDX]], align 2
279; CHECK-NEXT:    store i8 [[A_SROA_0_SROA_4_1_COPYLOAD]], ptr [[A_SROA_0_SROA_4]], align 1
280; CHECK-NEXT:    [[A_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
281; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_1_SRC_SROA_IDX]], i32 5, i1 false)
282; CHECK-NEXT:    store <2 x i8> zeroinitializer, ptr [[A_SROA_0_SROA_1]], align 2
283; CHECK-NEXT:    store i8 0, ptr [[A_SROA_0_SROA_4]], align 1
284; CHECK-NEXT:    [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
285; CHECK-NEXT:    store <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4]], ptr [[DST_2]], align 2
286; CHECK-NEXT:    [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
287; CHECK-NEXT:    store volatile <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3]], ptr [[DST]], align 1
288; CHECK-NEXT:    [[A_SROA_0_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
289; CHECK-NEXT:    [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4:%.*]] = load volatile i8, ptr [[A_SROA_0_SROA_4]], align 1
290; CHECK-NEXT:    store volatile i8 [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4]], ptr [[A_SROA_0_SROA_4_1_DST_SROA_IDX]], align 1
291; CHECK-NEXT:    [[A_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
292; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_1_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 5, i1 true)
293; CHECK-NEXT:    ret void
294;
295entry:
296  %a = alloca [20 x i8]
297
298  %a.1 = getelementptr inbounds i8, ptr %a, i64 1
299  call void @llvm.memcpy.p0.p0.i32(ptr %a.1, ptr align 8 %src, i32 8, i1 false)
300
301  store i32 123, ptr %a, !tbaa.struct !11
302
303  %v.4 = load <2 x i8>, ptr %a.1
304  store <2 x i8> %v.4, ptr %dst.2
305
306  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a.1, i32 8, i1 true)
307  ret void
308}
309
310define double @tbaa_struct_load(ptr %src, ptr %dst) {
311; CHECK-LABEL: define double @tbaa_struct_load(
312; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
313; CHECK-NEXT:    [[TMP_SROA_0:%.*]] = alloca double, align 8
314; CHECK-NEXT:    [[TMP_SROA_3:%.*]] = alloca i64, align 8
315; CHECK-NEXT:    [[TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[SRC]], align 8
316; CHECK-NEXT:    store double [[TMP_SROA_0_0_COPYLOAD]], ptr [[TMP_SROA_0]], align 8
317; CHECK-NEXT:    [[TMP_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
318; CHECK-NEXT:    [[TMP_SROA_3_0_COPYLOAD:%.*]] = load i64, ptr [[TMP_SROA_3_0_SRC_SROA_IDX]], align 8
319; CHECK-NEXT:    store i64 [[TMP_SROA_3_0_COPYLOAD]], ptr [[TMP_SROA_3]], align 8
320; CHECK-NEXT:    [[TMP_SROA_0_0_TMP_SROA_0_0_LG:%.*]] = load double, ptr [[TMP_SROA_0]], align 8, !tbaa [[TBAA5]]
321; CHECK-NEXT:    [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1:%.*]] = load volatile double, ptr [[TMP_SROA_0]], align 8
322; CHECK-NEXT:    store volatile double [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 8
323; CHECK-NEXT:    [[TMP_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
324; CHECK-NEXT:    [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2:%.*]] = load volatile i64, ptr [[TMP_SROA_3]], align 8
325; CHECK-NEXT:    store volatile i64 [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2]], ptr [[TMP_SROA_3_0_DST_SROA_IDX]], align 8
326; CHECK-NEXT:    ret double [[TMP_SROA_0_0_TMP_SROA_0_0_LG]]
327;
328  %tmp = alloca [16 x i8], align 8
329  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 16, i1 false)
330  %lg = load double, ptr %tmp, align 8, !tbaa.struct !13
331  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 16, i1 true)
332  ret double %lg
333}
334
335define i32 @shorten_integer_store_single_field(ptr %dst, ptr %dst.2, ptr %src) {
336; CHECK-LABEL: define i32 @shorten_integer_store_single_field(
337; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
338; CHECK-NEXT:  entry:
339; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca i32, align 4
340; CHECK-NEXT:    store i32 123, ptr [[A_SROA_0]], align 4, !tbaa [[TBAA0]]
341; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
342; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
343; CHECK-NEXT:    store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
344; CHECK-NEXT:    ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
345;
346entry:
347  %a = alloca [8 x i8], align 2
348  store i64 123, ptr %a, align 2, !tbaa.struct !0
349  %l = load i32, ptr %a
350  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
351  ret i32 %l
352}
353
354define i32 @shorten_integer_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
355; CHECK-LABEL: define i32 @shorten_integer_store_multiple_fields(
356; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
357; CHECK-NEXT:  entry:
358; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca i32, align 4
359; CHECK-NEXT:    store i32 123, ptr [[A_SROA_0]], align 4, !tbaa [[TBAA5]]
360; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
361; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
362; CHECK-NEXT:    store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
363; CHECK-NEXT:    ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
364;
365entry:
366  %a = alloca [8 x i8], align 2
367  store i64 123, ptr %a, align 2, !tbaa.struct !14
368  %l = load i32, ptr %a
369  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
370  ret i32 %l
371}
372
373define <2 x i16> @shorten_vector_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
374; CHECK-LABEL: define <2 x i16> @shorten_vector_store_multiple_fields(
375; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
376; CHECK-NEXT:  entry:
377; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
378; CHECK-NEXT:    store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8
379; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
380; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
381; CHECK-NEXT:    ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
382;
383entry:
384  %a = alloca [8 x i8], align 2
385  store <2 x i32> <i32 1, i32 2>, ptr %a, align 2, !tbaa.struct !0
386  %l = load <2 x i16>, ptr %a
387  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
388  ret <2 x i16> %l
389}
390
391define <2 x i16> @shorten_vector_store_single_fields(ptr %dst, ptr %dst.2, ptr %src) {
392; CHECK-LABEL: define <2 x i16> @shorten_vector_store_single_fields(
393; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
394; CHECK-NEXT:  entry:
395; CHECK-NEXT:    [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
396; CHECK-NEXT:    store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8
397; CHECK-NEXT:    [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
398; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
399; CHECK-NEXT:    ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
400;
401entry:
402  %a = alloca [8 x i8], align 8
403  store <2 x i32> <i32 1, i32 2>, ptr %a, align 8, !tbaa.struct !14
404  %l = load <2 x i16>, ptr %a
405  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
406  ret <2 x i16> %l
407}
408
409define i32 @split_load_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
410; CHECK-LABEL: define i32 @split_load_with_tbaa_struct(
411; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
412; CHECK-NEXT:  entry:
413; CHECK-NEXT:    [[A3_SROA_0:%.*]] = alloca i16, align 8
414; CHECK-NEXT:    [[A3_SROA_3:%.*]] = alloca i16, align 2
415; CHECK-NEXT:    [[A3_SROA_33:%.*]] = alloca float, align 4
416; CHECK-NEXT:    [[A3_SROA_4:%.*]] = alloca i8, align 8
417; CHECK-NEXT:    [[A3_SROA_5:%.*]] = alloca i8, align 1
418; CHECK-NEXT:    [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
419; CHECK-NEXT:    store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
420; CHECK-NEXT:    [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
421; CHECK-NEXT:    [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
422; CHECK-NEXT:    store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
423; CHECK-NEXT:    [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
424; CHECK-NEXT:    [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
425; CHECK-NEXT:    store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
426; CHECK-NEXT:    [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
427; CHECK-NEXT:    [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
428; CHECK-NEXT:    store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
429; CHECK-NEXT:    [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
430; CHECK-NEXT:    [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
431; CHECK-NEXT:    store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
432; CHECK-NEXT:    [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD:%.*]] = load i16, ptr [[A3_SROA_0]], align 8, !tbaa [[TBAA5]]
433; CHECK-NEXT:    [[LOAD4_FCA_0_INSERT:%.*]] = insertvalue { i16, float, i8 } poison, i16 [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD]], 0
434; CHECK-NEXT:    [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD:%.*]] = load float, ptr [[A3_SROA_33]], align 4, !tbaa [[TBAA5]]
435; CHECK-NEXT:    [[LOAD4_FCA_1_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_0_INSERT]], float [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD]], 1
436; CHECK-NEXT:    [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD:%.*]] = load i8, ptr [[A3_SROA_4]], align 8, !tbaa [[TBAA5]]
437; CHECK-NEXT:    [[LOAD4_FCA_2_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_1_INSERT]], i8 [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD]], 2
438; CHECK-NEXT:    [[UNWRAP2:%.*]] = extractvalue { i16, float, i8 } [[LOAD4_FCA_2_INSERT]], 1
439; CHECK-NEXT:    [[VALCAST2:%.*]] = bitcast float [[UNWRAP2]] to i32
440; CHECK-NEXT:    [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
441; CHECK-NEXT:    store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
442; CHECK-NEXT:    [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
443; CHECK-NEXT:    [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
444; CHECK-NEXT:    store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
445; CHECK-NEXT:    [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
446; CHECK-NEXT:    [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
447; CHECK-NEXT:    store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
448; CHECK-NEXT:    [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
449; CHECK-NEXT:    [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
450; CHECK-NEXT:    store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
451; CHECK-NEXT:    [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
452; CHECK-NEXT:    [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
453; CHECK-NEXT:    store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
454; CHECK-NEXT:    ret i32 [[VALCAST2]]
455;
456entry:
457  %a3 = alloca { float, float , float }
458
459  call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
460  %load4 = load { i16, float , i8}, ptr %a3, !tbaa.struct !16
461  %unwrap2 = extractvalue { i16, float, i8 } %load4 , 1
462  %valcast2 = bitcast float %unwrap2 to i32
463  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
464
465  ret i32 %valcast2
466}
467
468define i32 @split_store_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
469; CHECK-LABEL: define i32 @split_store_with_tbaa_struct(
470; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
471; CHECK-NEXT:  entry:
472; CHECK-NEXT:    [[A3_SROA_0:%.*]] = alloca i16, align 8
473; CHECK-NEXT:    [[A3_SROA_3:%.*]] = alloca i16, align 2
474; CHECK-NEXT:    [[A3_SROA_33:%.*]] = alloca float, align 4
475; CHECK-NEXT:    [[A3_SROA_4:%.*]] = alloca i8, align 8
476; CHECK-NEXT:    [[A3_SROA_5:%.*]] = alloca i8, align 1
477; CHECK-NEXT:    [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
478; CHECK-NEXT:    store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
479; CHECK-NEXT:    [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
480; CHECK-NEXT:    [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
481; CHECK-NEXT:    store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
482; CHECK-NEXT:    [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
483; CHECK-NEXT:    [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
484; CHECK-NEXT:    store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
485; CHECK-NEXT:    [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
486; CHECK-NEXT:    [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
487; CHECK-NEXT:    store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
488; CHECK-NEXT:    [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
489; CHECK-NEXT:    [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
490; CHECK-NEXT:    store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
491; CHECK-NEXT:    [[I_1:%.*]] = insertvalue { i16, float, i8 } poison, i16 10, 0
492; CHECK-NEXT:    [[I_2:%.*]] = insertvalue { i16, float, i8 } [[I_1]], float 3.000000e+00, 1
493; CHECK-NEXT:    [[I_3:%.*]] = insertvalue { i16, float, i8 } [[I_2]], i8 99, 2
494; CHECK-NEXT:    [[I_3_FCA_0_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 0
495; CHECK-NEXT:    store i16 [[I_3_FCA_0_EXTRACT]], ptr [[A3_SROA_0]], align 8, !tbaa [[TBAA5]]
496; CHECK-NEXT:    [[I_3_FCA_1_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 1
497; CHECK-NEXT:    store float [[I_3_FCA_1_EXTRACT]], ptr [[A3_SROA_33]], align 4, !tbaa [[TBAA5]]
498; CHECK-NEXT:    [[I_3_FCA_2_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 2
499; CHECK-NEXT:    store i8 [[I_3_FCA_2_EXTRACT]], ptr [[A3_SROA_4]], align 8, !tbaa [[TBAA5]]
500; CHECK-NEXT:    [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
501; CHECK-NEXT:    store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
502; CHECK-NEXT:    [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
503; CHECK-NEXT:    [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
504; CHECK-NEXT:    store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
505; CHECK-NEXT:    [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
506; CHECK-NEXT:    [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
507; CHECK-NEXT:    store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
508; CHECK-NEXT:    [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
509; CHECK-NEXT:    [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
510; CHECK-NEXT:    store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
511; CHECK-NEXT:    [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
512; CHECK-NEXT:    [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
513; CHECK-NEXT:    store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
514; CHECK-NEXT:    ret i32 0
515;
516entry:
517  %a3 = alloca { float, float , float }
518
519  call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
520  %i.1 = insertvalue { i16, float, i8 } poison, i16 10, 0
521  %i.2 = insertvalue { i16, float, i8 } %i.1, float 3.0, 1
522  %i.3 = insertvalue { i16, float, i8 } %i.2, i8 99, 2
523  store { i16, float , i8} %i.3, ptr %a3, !tbaa.struct !16
524  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
525
526  ret i32 0
527}
528
529
530; Function Attrs: mustprogress nocallback nofree nounwind willreturn memory(argmem: readwrite)
531declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
532
533!0 = !{i64 0, i64 4, !1, i64 4, i64 4, !1}
534!1 = !{!2, !2, i64 0}
535!2 = !{!"float", !3, i64 0}
536!3 = !{!"omnipotent char", !4, i64 0}
537!4 = !{!"Simple C++ TBAA"}
538!5 = !{!"v2f32", !3, i64 0}
539!6 = !{!5, !5, i64 0}
540!7 = !{i64 0, i64 8, !6, i64 8, i64 4, !1}
541!8 = !{i64 0, i64 4, !1, i64 4, i64 8, !6}
542!9 = !{i64 0, i64 8, !6, i64 4, i64 8, !1}
543!10 = !{i64 0, i64 2, !1, i64 2, i64 2, !1}
544!11 = !{i64 0, i64 1, !1, i64 1, i64 3, !1}
545!12 = !{i64 0, i64 2, !1, i64 2, i64 6, !1}
546!13 = !{i64 0, i64 8, !6}
547!14 = !{i64 0, i64 4, !6}
548!15 = !{i64 0, i64 7, !6, i64 7, i64 1, !6}
549!16 = !{i64 0, i64 2, !6, i64 4, i64 4, !6, i64 8, i64 1, !6}
550;.
551; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
552; CHECK: [[META1]] = !{!"float", [[META2:![0-9]+]], i64 0}
553; CHECK: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]], i64 0}
554; CHECK: [[META3]] = !{!"Simple C++ TBAA"}
555; CHECK: [[TBAA_STRUCT4]] = !{i64 0, i64 4, [[TBAA0]], i64 4, i64 4, [[TBAA0]]}
556; CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
557; CHECK: [[META6]] = !{!"v2f32", [[META2]], i64 0}
558;.
559