xref: /llvm-project/llvm/test/Transforms/InstCombine/scalable-vector-struct.ll (revision bec4c7f5f7fb044dbc7b134a00f4cf29b5cb2b48)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2; RUN: opt -passes=instcombine -S < %s 2>&1 | FileCheck %s
3
4%struct.test = type { <vscale x 1 x i32>, <vscale x 1 x i32> }
5
6define <vscale x 1 x i32> @load(ptr %x) {
7; CHECK-LABEL: define <vscale x 1 x i32> @load
8; CHECK-SAME: (ptr [[X:%.*]]) {
9; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
10; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 2
11; CHECK-NEXT:    [[A_ELT1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]]
12; CHECK-NEXT:    [[A_UNPACK2:%.*]] = load <vscale x 1 x i32>, ptr [[A_ELT1]], align 4
13; CHECK-NEXT:    ret <vscale x 1 x i32> [[A_UNPACK2]]
14;
15  %a = load %struct.test, ptr %x
16  %b = extractvalue %struct.test %a, 1
17  ret <vscale x 1 x i32> %b
18}
19
20define void @store(ptr %x, <vscale x 1 x i32> %y, <vscale x 1 x i32> %z) {
21; CHECK-LABEL: define void @store
22; CHECK-SAME: (ptr [[X:%.*]], <vscale x 1 x i32> [[Y:%.*]], <vscale x 1 x i32> [[Z:%.*]]) {
23; CHECK-NEXT:    store <vscale x 1 x i32> [[Y]], ptr [[X]], align 4
24; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
25; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 2
26; CHECK-NEXT:    [[X_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[X]], i64 [[TMP2]]
27; CHECK-NEXT:    store <vscale x 1 x i32> [[Z]], ptr [[X_REPACK1]], align 4
28; CHECK-NEXT:    ret void
29;
30  %a = insertvalue %struct.test undef, <vscale x 1 x i32> %y, 0
31  %b = insertvalue %struct.test %a, <vscale x 1 x i32> %z, 1
32  store %struct.test %b, ptr %x
33  ret void
34}
35
36define {<vscale x 16 x i8>, <vscale x 16 x i8>} @split_load(ptr %p) nounwind {
37; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @split_load
38; CHECK-SAME: (ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
39; CHECK-NEXT:  entry:
40; CHECK-NEXT:    [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
41; CHECK-NEXT:    [[TMP0:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
42; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
43; CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 4
44; CHECK-NEXT:    [[R_ELT1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP2]]
45; CHECK-NEXT:    [[R_UNPACK2:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT1]], align 16
46; CHECK-NEXT:    [[R3:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP0]], <vscale x 16 x i8> [[R_UNPACK2]], 1
47; CHECK-NEXT:    ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R3]]
48;
49entry:
50  %r = load {<vscale x 16 x i8>, <vscale x 16 x i8>}, ptr %p
51  ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %r
52}
53
54define {<vscale x 16 x i8>} @split_load_one(ptr %p) nounwind {
55; CHECK-LABEL: define { <vscale x 16 x i8> } @split_load_one
56; CHECK-SAME: (ptr [[P:%.*]]) #[[ATTR0]] {
57; CHECK-NEXT:  entry:
58; CHECK-NEXT:    [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
59; CHECK-NEXT:    [[R1:%.*]] = insertvalue { <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
60; CHECK-NEXT:    ret { <vscale x 16 x i8> } [[R1]]
61;
62entry:
63  %r = load {<vscale x 16 x i8>}, ptr %p
64  ret {<vscale x 16 x i8>} %r
65}
66
67define void @split_store({<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p) nounwind {
68; CHECK-LABEL: define void @split_store
69; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
70; CHECK-NEXT:  entry:
71; CHECK-NEXT:    [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
72; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
73; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
74; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
75; CHECK-NEXT:    [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
76; CHECK-NEXT:    [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
77; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
78; CHECK-NEXT:    ret void
79;
80entry:
81  store {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p
82  ret void
83}
84
85define void @split_store_one({<vscale x 4 x i32>} %x, ptr %p) nounwind {
86; CHECK-LABEL: define void @split_store_one
87; CHECK-SAME: ({ <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
88; CHECK-NEXT:  entry:
89; CHECK-NEXT:    [[TMP0:%.*]] = extractvalue { <vscale x 4 x i32> } [[X]], 0
90; CHECK-NEXT:    store <vscale x 4 x i32> [[TMP0]], ptr [[P]], align 16
91; CHECK-NEXT:    ret void
92;
93entry:
94  store {<vscale x 4 x i32>} %x, ptr %p
95  ret void
96}
97
98define {<16 x i8>, <16 x i8>} @check_v16i8_v4i32({<4 x i32>, <4 x i32>} %x, ptr %p) nounwind {
99; CHECK-LABEL: define { <16 x i8>, <16 x i8> } @check_v16i8_v4i32
100; CHECK-SAME: ({ <4 x i32>, <4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
101; CHECK-NEXT:  entry:
102; CHECK-NEXT:    [[X_ELT:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 0
103; CHECK-NEXT:    store <4 x i32> [[X_ELT]], ptr [[P]], align 16
104; CHECK-NEXT:    [[P_REPACK1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 16
105; CHECK-NEXT:    [[X_ELT2:%.*]] = extractvalue { <4 x i32>, <4 x i32> } [[X]], 1
106; CHECK-NEXT:    store <4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
107; CHECK-NEXT:    [[R_UNPACK_CAST:%.*]] = bitcast <4 x i32> [[X_ELT]] to <16 x i8>
108; CHECK-NEXT:    [[TMP0:%.*]] = insertvalue { <16 x i8>, <16 x i8> } poison, <16 x i8> [[R_UNPACK_CAST]], 0
109; CHECK-NEXT:    [[R_UNPACK4_CAST:%.*]] = bitcast <4 x i32> [[X_ELT2]] to <16 x i8>
110; CHECK-NEXT:    [[R5:%.*]] = insertvalue { <16 x i8>, <16 x i8> } [[TMP0]], <16 x i8> [[R_UNPACK4_CAST]], 1
111; CHECK-NEXT:    ret { <16 x i8>, <16 x i8> } [[R5]]
112;
113entry:
114  store {<4 x i32>, <4 x i32>} %x, ptr %p
115  %r = load {<16 x i8>, <16 x i8>}, ptr %p
116  ret {<16 x i8>, <16 x i8>} %r
117}
118
119define {<vscale x 16 x i8>, <vscale x 16 x i8>} @check_nxv16i8_nxv4i32({<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p) nounwind {
120; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @check_nxv16i8_nxv4i32
121; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
122; CHECK-NEXT:  entry:
123; CHECK-NEXT:    [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
124; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
125; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
126; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
127; CHECK-NEXT:    [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
128; CHECK-NEXT:    [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
129; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
130; CHECK-NEXT:    [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
131; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
132; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
133; CHECK-NEXT:    [[TMP4:%.*]] = shl i64 [[TMP3]], 4
134; CHECK-NEXT:    [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]]
135; CHECK-NEXT:    [[R_UNPACK4:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT3]], align 16
136; CHECK-NEXT:    [[R5:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], <vscale x 16 x i8> [[R_UNPACK4]], 1
137; CHECK-NEXT:    ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R5]]
138;
139entry:
140  store {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p
141  %r = load {<vscale x 16 x i8>, <vscale x 16 x i8>}, ptr %p
142  ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %r
143}
144
145define {<vscale x 16 x i8>, <vscale x 16 x i8>} @alloca_nxv16i8_nxv4i32({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) nounwind {
146; CHECK-LABEL: define { <vscale x 16 x i8>, <vscale x 16 x i8> } @alloca_nxv16i8_nxv4i32
147; CHECK-SAME: ({ <vscale x 4 x i32>, <vscale x 4 x i32> } [[X:%.*]]) #[[ATTR0]] {
148; CHECK-NEXT:  entry:
149; CHECK-NEXT:    [[P:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
150; CHECK-NEXT:    [[X_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 0
151; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT]], ptr [[P]], align 16
152; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
153; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[TMP0]], 4
154; CHECK-NEXT:    [[P_REPACK1:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP1]]
155; CHECK-NEXT:    [[X_ELT2:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[X]], 1
156; CHECK-NEXT:    store <vscale x 4 x i32> [[X_ELT2]], ptr [[P_REPACK1]], align 16
157; CHECK-NEXT:    [[R_UNPACK:%.*]] = load <vscale x 16 x i8>, ptr [[P]], align 16
158; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } poison, <vscale x 16 x i8> [[R_UNPACK]], 0
159; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
160; CHECK-NEXT:    [[TMP4:%.*]] = shl i64 [[TMP3]], 4
161; CHECK-NEXT:    [[R_ELT3:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP4]]
162; CHECK-NEXT:    [[R_UNPACK4:%.*]] = load <vscale x 16 x i8>, ptr [[R_ELT3]], align 16
163; CHECK-NEXT:    [[R5:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP2]], <vscale x 16 x i8> [[R_UNPACK4]], 1
164; CHECK-NEXT:    ret { <vscale x 16 x i8>, <vscale x 16 x i8> } [[R5]]
165;
166entry:
167  %p = alloca {<vscale x 4 x i32>, <vscale x 4 x i32>}
168  store {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, ptr %p
169  %r = load {<vscale x 16 x i8>, <vscale x 16 x i8>}, ptr %p
170  ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %r
171}
172
173define { <16 x i8>, <32 x i8> } @differenttypes({ <4 x i32>, <8 x i32> } %a, ptr %p) {
174; CHECK-LABEL: define { <16 x i8>, <32 x i8> } @differenttypes
175; CHECK-SAME: ({ <4 x i32>, <8 x i32> } [[A:%.*]], ptr [[P:%.*]]) {
176; CHECK-NEXT:  entry:
177; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[P]])
178; CHECK-NEXT:    store { <4 x i32>, <8 x i32> } [[A]], ptr [[P]], align 16
179; CHECK-NEXT:    [[TMP0:%.*]] = load { <16 x i8>, <32 x i8> }, ptr [[P]], align 16
180; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[P]])
181; CHECK-NEXT:    ret { <16 x i8>, <32 x i8> } [[TMP0]]
182;
183entry:
184  call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %p) #5
185  store { <4 x i32>, <8 x i32> } %a, ptr %p, align 16
186  %2 = load { <16 x i8>, <32 x i8> }, ptr %p, align 16
187  call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %p) #5
188  ret { <16 x i8>, <32 x i8> } %2
189}
190