xref: /llvm-project/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-dupqlane.ll (revision 48df06f1d00c6accb396438c04133fb7fdd99d2c)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=instcombine < %s | FileCheck %s
3
4target triple = "aarch64-unknown-linux-gnu"
5
6define dso_local <vscale x 4 x float> @dupq_f32_ab_pattern(float %x, float %y) {
7; CHECK-LABEL: @dupq_f32_ab_pattern(
8; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x float> poison, float [[X:%.*]], i64 0
9; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[Y:%.*]], i64 1
10; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> [[TMP2]], i64 0)
11; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <vscale x 4 x float> [[TMP3]] to <vscale x 2 x i64>
12; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <vscale x 2 x i64> [[TMP4]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
13; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 2 x i64> [[TMP5]] to <vscale x 4 x float>
14; CHECK-NEXT:    ret <vscale x 4 x float> [[TMP6]]
15;
16  %1 = insertelement <4 x float> poison, float %x, i64 0
17  %2 = insertelement <4 x float> %1, float %y, i64 1
18  %3 = insertelement <4 x float> %2, float %x, i64 2
19  %4 = insertelement <4 x float> %3, float %y, i64 3
20  %5 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> %4, i64 0)
21  %6 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %5, i64 0)
22  ret <vscale x 4 x float> %6
23}
24
25define dso_local <vscale x 8 x half> @dupq_f16_a_pattern(half %a) {
26; CHECK-LABEL: @dupq_f16_a_pattern(
27; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
28; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> poison, <8 x i32> zeroinitializer
29; CHECK-NEXT:    [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP2]], i64 0)
30; CHECK-NEXT:    [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP3]], i64 0)
31; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP4]]
32;
33  %1 = insertelement <8 x half> poison, half %a, i64 0
34  %2 = insertelement <8 x half> %1, half %a, i64 1
35  %3 = insertelement <8 x half> %2, half %a, i64 2
36  %4 = insertelement <8 x half> %3, half %a, i64 3
37  %5 = insertelement <8 x half> %4, half %a, i64 4
38  %6 = insertelement <8 x half> %5, half %a, i64 5
39  %7 = insertelement <8 x half> %6, half %a, i64 6
40  %8 = insertelement <8 x half> %7, half %a, i64 7
41  %9 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %8, i64 0)
42  %10 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %9, i64 0)
43  ret <vscale x 8 x half> %10
44}
45
46define dso_local <vscale x 8 x half> @dupq_f16_ab_pattern(half %a, half %b) {
47; CHECK-LABEL: @dupq_f16_ab_pattern(
48; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
49; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
50; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP2]], i64 0)
51; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <vscale x 8 x half> [[TMP3]] to <vscale x 4 x i32>
52; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <vscale x 4 x i32> [[TMP4]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
53; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 4 x i32> [[TMP5]] to <vscale x 8 x half>
54; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP6]]
55;
56  %1 = insertelement <8 x half> poison, half %a, i64 0
57  %2 = insertelement <8 x half> %1, half %b, i64 1
58  %3 = insertelement <8 x half> %2, half %a, i64 2
59  %4 = insertelement <8 x half> %3, half %b, i64 3
60  %5 = insertelement <8 x half> %4, half %a, i64 4
61  %6 = insertelement <8 x half> %5, half %b, i64 5
62  %7 = insertelement <8 x half> %6, half %a, i64 6
63  %8 = insertelement <8 x half> %7, half %b, i64 7
64  %9 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %8, i64 0)
65  %10 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %9, i64 0)
66  ret <vscale x 8 x half> %10
67}
68
69define dso_local <vscale x 8 x half> @dupq_f16_abcd_pattern(half %a, half %b, half %c, half %d) {
70; CHECK-LABEL: @dupq_f16_abcd_pattern(
71; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
72; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
73; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2
74; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3
75; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP4]], i64 0)
76; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 8 x half> [[TMP5]] to <vscale x 2 x i64>
77; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
78; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <vscale x 2 x i64> [[TMP7]] to <vscale x 8 x half>
79; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP8]]
80;
81  %1 = insertelement <8 x half> poison, half %a, i64 0
82  %2 = insertelement <8 x half> %1, half %b, i64 1
83  %3 = insertelement <8 x half> %2, half %c, i64 2
84  %4 = insertelement <8 x half> %3, half %d, i64 3
85  %5 = insertelement <8 x half> %4, half %a, i64 4
86  %6 = insertelement <8 x half> %5, half %b, i64 5
87  %7 = insertelement <8 x half> %6, half %c, i64 6
88  %8 = insertelement <8 x half> %7, half %d, i64 7
89  %9 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %8, i64 0)
90  %10 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %9, i64 0)
91  ret <vscale x 8 x half> %10
92}
93
94define dso_local <vscale x 8 x half> @dupq_f16_abcnull_pattern(half %a, half %b, half %c, half %d) {
95; CHECK-LABEL: @dupq_f16_abcnull_pattern(
96; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
97; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
98; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2
99; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP3]], i64 0)
100; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <vscale x 8 x half> [[TMP4]] to <vscale x 2 x i64>
101; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <vscale x 2 x i64> [[TMP5]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
102; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <vscale x 2 x i64> [[TMP6]] to <vscale x 8 x half>
103; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP7]]
104;
105  %1 = insertelement <8 x half> poison, half %a, i64 0
106  %2 = insertelement <8 x half> %1, half %b, i64 1
107  %3 = insertelement <8 x half> %2, half %c, i64 2
108  %4 = insertelement <8 x half> %3, half %a, i64 4
109  %5 = insertelement <8 x half> %4, half %b, i64 5
110  %6 = insertelement <8 x half> %5, half %c, i64 6
111  %7 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %6, i64 0)
112  %8 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %7, i64 0)
113  ret <vscale x 8 x half> %8
114}
115
116define dso_local <vscale x 8 x half> @dupq_f16_abnull_pattern(half %a, half %b) {
117; CHECK-LABEL: @dupq_f16_abnull_pattern(
118; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
119; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
120; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP2]], i64 0)
121; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <vscale x 8 x half> [[TMP3]] to <vscale x 4 x i32>
122; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <vscale x 4 x i32> [[TMP4]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
123; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 4 x i32> [[TMP5]] to <vscale x 8 x half>
124; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP6]]
125;
126  %1 = insertelement <8 x half> poison, half %a, i64 0
127  %2 = insertelement <8 x half> %1, half %b, i64 1
128  %3 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %2, i64 0)
129  %4 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %3, i64 0)
130  ret <vscale x 8 x half> %4
131}
132
133define dso_local <vscale x 8 x half> @neg_dupq_f16_non_poison_fixed(half %a, half %b, <8 x half> %v) {
134; CHECK-LABEL: @neg_dupq_f16_non_poison_fixed(
135; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> [[V:%.*]], half [[A:%.*]], i64 0
136; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
137; CHECK-NEXT:    [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP2]], i64 0)
138; CHECK-NEXT:    [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP3]], i64 0)
139; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP4]]
140;
141  %1 = insertelement <8 x half> %v, half %a, i64 0
142  %2 = insertelement <8 x half> %1, half %b, i64 1
143  %3 = insertelement <8 x half> %2, half %a, i64 0
144  %4 = insertelement <8 x half> %3, half %b, i64 1
145  %5 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %4, i64 0)
146  %6 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %5, i64 0)
147  ret <vscale x 8 x half> %6
148}
149
150define dso_local <vscale x 8 x half> @neg_dupq_f16_into_non_poison_scalable(half %a, half %b, <vscale x 8 x half> %v) {
151; CHECK-LABEL: @neg_dupq_f16_into_non_poison_scalable(
152; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
153; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
154; CHECK-NEXT:    [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> [[V:%.*]], <8 x half> [[TMP2]], i64 0)
155; CHECK-NEXT:    [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP3]], i64 0)
156; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP4]]
157;
158  %1 = insertelement <8 x half> poison, half %a, i64 0
159  %2 = insertelement <8 x half> %1, half %b, i64 1
160  %3 = insertelement <8 x half> %2, half %a, i64 0
161  %4 = insertelement <8 x half> %3, half %b, i64 1
162  %5 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> %v, <8 x half> %4, i64 0)
163  %6 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %5, i64 0)
164  ret <vscale x 8 x half> %6
165}
166
167; Insert %c to override the last element in the insertelement chain, which will fail to combine
168
169define dso_local <vscale x 8 x half> @neg_dupq_f16_abcd_pattern_double_insert(half %a, half %b, half %c, half %d) {
170; CHECK-LABEL: @neg_dupq_f16_abcd_pattern_double_insert(
171; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
172; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
173; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2
174; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3
175; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[A]], i64 4
176; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[B]], i64 5
177; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[C]], i64 6
178; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x half> [[TMP7]], half [[C]], i64 7
179; CHECK-NEXT:    [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP8]], i64 0)
180; CHECK-NEXT:    [[TMP10:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP9]], i64 0)
181; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP10]]
182;
183  %1 = insertelement <8 x half> poison, half %a, i64 0
184  %2 = insertelement <8 x half> %1, half %b, i64 1
185  %3 = insertelement <8 x half> %2, half %c, i64 2
186  %4 = insertelement <8 x half> %3, half %d, i64 3
187  %5 = insertelement <8 x half> %4, half %a, i64 4
188  %6 = insertelement <8 x half> %5, half %b, i64 5
189  %7 = insertelement <8 x half> %6, half %c, i64 6
190  %8 = insertelement <8 x half> %7, half %d, i64 7
191  %9 = insertelement <8 x half> %8, half %c, i64 7
192  %10 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %9, i64 0)
193  %11 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %10, i64 0)
194  ret <vscale x 8 x half> %11
195}
196
197define dso_local <vscale x 8 x half> @dupq_f16_abcd_pattern_reverted_insert(half %a, half %b, half %c, half %d) {
198; CHECK-LABEL: @dupq_f16_abcd_pattern_reverted_insert(
199; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
200; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1
201; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2
202; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3
203; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP4]], i64 0)
204; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <vscale x 8 x half> [[TMP5]] to <vscale x 2 x i64>
205; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <vscale x 2 x i64> [[TMP6]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
206; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <vscale x 2 x i64> [[TMP7]] to <vscale x 8 x half>
207; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP8]]
208;
209  %1 = insertelement <8 x half> poison, half %d, i64 7
210  %2 = insertelement <8 x half> %1, half %c, i64 6
211  %3 = insertelement <8 x half> %2, half %b, i64 5
212  %4 = insertelement <8 x half> %3, half %a, i64 4
213  %5 = insertelement <8 x half> %4, half %d, i64 3
214  %6 = insertelement <8 x half> %5, half %c, i64 2
215  %7 = insertelement <8 x half> %6, half %b, i64 1
216  %8 = insertelement <8 x half> %7, half %a, i64 0
217  %9 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %8, i64 0)
218  %10 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %9, i64 0)
219  ret <vscale x 8 x half> %10
220}
221
222define dso_local <vscale x 8 x half> @dupq_f16_ab_no_front_pattern(half %a, half %b) {
223; CHECK-LABEL: @dupq_f16_ab_no_front_pattern(
224; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
225; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[A]], i64 1
226; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[A]], i64 2
227; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[B:%.*]], i64 3
228; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[A]], i64 4
229; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[B]], i64 5
230; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[A]], i64 6
231; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x half> [[TMP7]], half [[B]], i64 7
232; CHECK-NEXT:    [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP8]], i64 0)
233; CHECK-NEXT:    [[TMP10:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP9]], i64 0)
234; CHECK-NEXT:    ret <vscale x 8 x half> [[TMP10]]
235;
236  %1 = insertelement <8 x half> poison, half %a, i64 0
237  %2 = insertelement <8 x half> %1, half %a, i64 1
238  %3 = insertelement <8 x half> %2, half %a, i64 2
239  %4 = insertelement <8 x half> %3, half %b, i64 3
240  %5 = insertelement <8 x half> %4, half %a, i64 4
241  %6 = insertelement <8 x half> %5, half %b, i64 5
242  %7 = insertelement <8 x half> %6, half %a, i64 6
243  %8 = insertelement <8 x half> %7, half %b, i64 7
244  %9 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> %8, i64 0)
245  %10 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %9, i64 0)
246  ret <vscale x 8 x half> %10
247}
248
249declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
250declare <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half>, i64)
251declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
252declare <vscale x 4 x float> @llvm.vector.insert.nxv2f32.v2f32(<vscale x 4 x float>, <2 x float>, i64)
253declare <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float>, i64)
254declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
255declare <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32>, i64)
256declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
257declare <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16>, i64)
258
259attributes #0 = { "target-features"="+sve" }
260