xref: /llvm-project/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-uzpx4.ll (revision 62baf21daa377c4ec1a641b26931063c1117d262)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s
3
4; == 8 to 64-bit elements ==
5
6define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @uzp_x4_i8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
7; CHECK-LABEL: uzp_x4_i8:
8; CHECK:       // %bb.0:
9; CHECK-NEXT:    mov z7.d, z4.d
10; CHECK-NEXT:    mov z6.d, z3.d
11; CHECK-NEXT:    mov z5.d, z2.d
12; CHECK-NEXT:    mov z4.d, z1.d
13; CHECK-NEXT:    uzp { z0.b - z3.b }, { z4.b - z7.b }
14; CHECK-NEXT:    ret
15  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
16  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
17}
18
19define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @uzp_x4_i16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
20; CHECK-LABEL: uzp_x4_i16:
21; CHECK:       // %bb.0:
22; CHECK-NEXT:    mov z7.d, z4.d
23; CHECK-NEXT:    mov z6.d, z3.d
24; CHECK-NEXT:    mov z5.d, z2.d
25; CHECK-NEXT:    mov z4.d, z1.d
26; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
27; CHECK-NEXT:    ret
28  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
29  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
30}
31
32define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @uzp_x4_f16(<vscale x 8 x half> %unused, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
33; CHECK-LABEL: uzp_x4_f16:
34; CHECK:       // %bb.0:
35; CHECK-NEXT:    mov z7.d, z4.d
36; CHECK-NEXT:    mov z6.d, z3.d
37; CHECK-NEXT:    mov z5.d, z2.d
38; CHECK-NEXT:    mov z4.d, z1.d
39; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
40; CHECK-NEXT:    ret
41  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzp.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
42  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
43}
44
45define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @uzp_x4_bf16(<vscale x 8 x bfloat> %unused, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
46; CHECK-LABEL: uzp_x4_bf16:
47; CHECK:       // %bb.0:
48; CHECK-NEXT:    mov z7.d, z4.d
49; CHECK-NEXT:    mov z6.d, z3.d
50; CHECK-NEXT:    mov z5.d, z2.d
51; CHECK-NEXT:    mov z4.d, z1.d
52; CHECK-NEXT:    uzp { z0.h - z3.h }, { z4.h - z7.h }
53; CHECK-NEXT:    ret
54  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzp.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
55  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
56}
57
58define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @uzp_x4_i32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
59; CHECK-LABEL: uzp_x4_i32:
60; CHECK:       // %bb.0:
61; CHECK-NEXT:    mov z7.d, z4.d
62; CHECK-NEXT:    mov z6.d, z3.d
63; CHECK-NEXT:    mov z5.d, z2.d
64; CHECK-NEXT:    mov z4.d, z1.d
65; CHECK-NEXT:    uzp { z0.s - z3.s }, { z4.s - z7.s }
66; CHECK-NEXT:    ret
67  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
68  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
69}
70
71define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @uzp_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
72; CHECK-LABEL: uzp_x4_f32:
73; CHECK:       // %bb.0:
74; CHECK-NEXT:    mov z7.d, z4.d
75; CHECK-NEXT:    mov z6.d, z3.d
76; CHECK-NEXT:    mov z5.d, z2.d
77; CHECK-NEXT:    mov z4.d, z1.d
78; CHECK-NEXT:    uzp { z0.s - z3.s }, { z4.s - z7.s }
79; CHECK-NEXT:    ret
80  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzp.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
81  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
82}
83
84define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @uzp_x4_i64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
85; CHECK-LABEL: uzp_x4_i64:
86; CHECK:       // %bb.0:
87; CHECK-NEXT:    mov z7.d, z4.d
88; CHECK-NEXT:    mov z6.d, z3.d
89; CHECK-NEXT:    mov z5.d, z2.d
90; CHECK-NEXT:    mov z4.d, z1.d
91; CHECK-NEXT:    uzp { z0.d - z3.d }, { z4.d - z7.d }
92; CHECK-NEXT:    ret
93  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
94  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
95}
96
97define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @uzp_x4_f64(<vscale x 4 x double> %unused, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
98; CHECK-LABEL: uzp_x4_f64:
99; CHECK:       // %bb.0:
100; CHECK-NEXT:    mov z27.d, z5.d
101; CHECK-NEXT:    mov z26.d, z4.d
102; CHECK-NEXT:    mov z25.d, z3.d
103; CHECK-NEXT:    mov z24.d, z2.d
104; CHECK-NEXT:    uzp { z0.d - z3.d }, { z24.d - z27.d }
105; CHECK-NEXT:    ret
106  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzp.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
107  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
108}
109
110
111; == 128-bit elements ==
112
113define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @zipq_x4_i8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4) nounwind {
114; CHECK-LABEL: zipq_x4_i8:
115; CHECK:       // %bb.0:
116; CHECK-NEXT:    mov z7.d, z4.d
117; CHECK-NEXT:    mov z6.d, z3.d
118; CHECK-NEXT:    mov z5.d, z2.d
119; CHECK-NEXT:    mov z4.d, z1.d
120; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
121; CHECK-NEXT:    ret
122  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
123  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
124}
125
126define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @zipq_x4_i16(<vscale x 8 x i16> %unused, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4) nounwind {
127; CHECK-LABEL: zipq_x4_i16:
128; CHECK:       // %bb.0:
129; CHECK-NEXT:    mov z7.d, z4.d
130; CHECK-NEXT:    mov z6.d, z3.d
131; CHECK-NEXT:    mov z5.d, z2.d
132; CHECK-NEXT:    mov z4.d, z1.d
133; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
134; CHECK-NEXT:    ret
135  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzpq.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
136  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
137}
138
139define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @zipq_x4_f16(<vscale x 8 x half> %unused, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) nounwind {
140; CHECK-LABEL: zipq_x4_f16:
141; CHECK:       // %bb.0:
142; CHECK-NEXT:    mov z7.d, z4.d
143; CHECK-NEXT:    mov z6.d, z3.d
144; CHECK-NEXT:    mov z5.d, z2.d
145; CHECK-NEXT:    mov z4.d, z1.d
146; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
147; CHECK-NEXT:    ret
148  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzpq.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
149  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
150}
151
152define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @zipq_x4_bf16(<vscale x 8 x bfloat> %unused, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) nounwind {
153; CHECK-LABEL: zipq_x4_bf16:
154; CHECK:       // %bb.0:
155; CHECK-NEXT:    mov z7.d, z4.d
156; CHECK-NEXT:    mov z6.d, z3.d
157; CHECK-NEXT:    mov z5.d, z2.d
158; CHECK-NEXT:    mov z4.d, z1.d
159; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
160; CHECK-NEXT:    ret
161  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzpq.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
162  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
163}
164
165define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @zipq_x4_i32(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) nounwind {
166; CHECK-LABEL: zipq_x4_i32:
167; CHECK:       // %bb.0:
168; CHECK-NEXT:    mov z7.d, z4.d
169; CHECK-NEXT:    mov z6.d, z3.d
170; CHECK-NEXT:    mov z5.d, z2.d
171; CHECK-NEXT:    mov z4.d, z1.d
172; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
173; CHECK-NEXT:    ret
174  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzpq.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
175  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
176}
177
178define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @zipq_x4_f32(<vscale x 4 x float> %unused, <vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4) nounwind {
179; CHECK-LABEL: zipq_x4_f32:
180; CHECK:       // %bb.0:
181; CHECK-NEXT:    mov z7.d, z4.d
182; CHECK-NEXT:    mov z6.d, z3.d
183; CHECK-NEXT:    mov z5.d, z2.d
184; CHECK-NEXT:    mov z4.d, z1.d
185; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
186; CHECK-NEXT:    ret
187  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzpq.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
188  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
189}
190
191define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @zipq_x4_i64(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) nounwind {
192; CHECK-LABEL: zipq_x4_i64:
193; CHECK:       // %bb.0:
194; CHECK-NEXT:    mov z7.d, z4.d
195; CHECK-NEXT:    mov z6.d, z3.d
196; CHECK-NEXT:    mov z5.d, z2.d
197; CHECK-NEXT:    mov z4.d, z1.d
198; CHECK-NEXT:    uzp { z0.q - z3.q }, { z4.q - z7.q }
199; CHECK-NEXT:    ret
200  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzpq.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
201  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
202}
203
204define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @zipq_x4_f64(<vscale x 4 x double> %unused, <vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4) nounwind {
205; CHECK-LABEL: zipq_x4_f64:
206; CHECK:       // %bb.0:
207; CHECK-NEXT:    mov z27.d, z5.d
208; CHECK-NEXT:    mov z26.d, z4.d
209; CHECK-NEXT:    mov z25.d, z3.d
210; CHECK-NEXT:    mov z24.d, z2.d
211; CHECK-NEXT:    uzp { z0.q - z3.q }, { z24.q - z27.q }
212; CHECK-NEXT:    ret
213  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzpq.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
214  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
215}
216
217
218; == 8 to 64-bit elements ==
219declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzp.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
220declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzp.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
221declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzp.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
222declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzp.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
223declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzp.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
224declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzp.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
225declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzp.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
226declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzp.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
227
228; == 128-bit elements ==
229declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.uzpq.x4.nxv16i8(<vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4)
230declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uzpq.x4.nxv8i16(<vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3, <vscale x 8 x i16> %zn4)
231declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uzpq.x4.nxv4i32(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4)
232declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uzpq.x4.nxv2i64(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4)
233declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.uzpq.x4.nxv8f16(<vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
234declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.uzpq.x4.nxv8bf16(<vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
235declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.uzpq.x4.nxv4f32(<vscale x 4 x float> %zn1, <vscale x 4 x float> %zn2, <vscale x 4 x float> %zn3, <vscale x 4 x float> %zn4)
236declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.uzpq.x4.nxv2f64(<vscale x 2 x double> %zn1, <vscale x 2 x double> %zn2, <vscale x 2 x double> %zn3, <vscale x 2 x double> %zn4)
237