1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
3
4; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
5define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
6; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32:
7; CHECK:       // %bb.0:
8; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.s, uxtw]
9; CHECK-NEXT:    ret
10  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
11  ret void
12 }
13
14define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
15; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32:
16; CHECK:       // %bb.0:
17; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.s, sxtw]
18; CHECK-NEXT:    ret
19  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
20  ret void
21 }
22
23; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>]    -> 32-bit unpacked indexes
24
25define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
26; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64:
27; CHECK:       // %bb.0:
28; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d, uxtw]
29; CHECK-NEXT:    ret
30  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
31  ret void
32 }
33
34define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
35; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64:
36; CHECK:       // %bb.0:
37; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d, sxtw]
38; CHECK-NEXT:    ret
39  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
40  ret void
41 }
42; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
43define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
44; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
45; CHECK:       // %bb.0:
46; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d]
47; CHECK-NEXT:    ret
48  call void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
49  ret void
50 }
51
52;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
53
54; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
55define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
56; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32:
57; CHECK:       // %bb.0:
58; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.s, uxtw #1]
59; CHECK-NEXT:    ret
60  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
61  ret void
62 }
63
64define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
65; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32:
66; CHECK:       // %bb.0:
67; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.s, sxtw #1]
68; CHECK-NEXT:    ret
69  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
70  ret void
71 }
72
73; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked indexes
74define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
75; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64:
76; CHECK:       // %bb.0:
77; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
78; CHECK-NEXT:    ret
79  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
80  ret void
81 }
82
83define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
84; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64:
85; CHECK:       // %bb.0:
86; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, sxtw #1]
87; CHECK-NEXT:    ret
88  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
89  ret void
90 }
91
92; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
93define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
94; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
95; CHECK:       // %bb.0:
96; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, lsl #1]
97; CHECK-NEXT:    ret
98  call void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
99  ret void
100 }
101
102;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
103
104; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
105define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
106; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32:
107; CHECK:       // %bb.0:
108; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.s, uxtw #2]
109; CHECK-NEXT:    ret
110  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
111  ret void
112 }
113
114define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
115; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.s, sxtw #2]
118; CHECK-NEXT:    ret
119  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
120  ret void
121 }
122
123; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked indexes
124define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
125; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64:
126; CHECK:       // %bb.0:
127; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
128; CHECK-NEXT:    ret
129  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
130  ret void
131 }
132
133define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
134; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64:
135; CHECK:       // %bb.0:
136; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, sxtw #2]
137; CHECK-NEXT:    ret
138  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
139  ret void
140 }
141
142; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
143define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
144; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
145; CHECK:       // %bb.0:
146; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, lsl #2]
147; CHECK-NEXT:    ret
148  call void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
149  ret void
150 }
151
152;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
153
154; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
155define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
156; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32:
157; CHECK:       // %bb.0:
158; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.s, uxtw #3]
159; CHECK-NEXT:    ret
160  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
161  ret void
162 }
163
164define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
165; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32:
166; CHECK:       // %bb.0:
167; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.s, sxtw #3]
168; CHECK-NEXT:    ret
169  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
170  ret void
171 }
172
173; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked indexes
174define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
175; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64:
176; CHECK:       // %bb.0:
177; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
178; CHECK-NEXT:    ret
179  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
180  ret void
181 }
182
183define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
184; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64:
185; CHECK:       // %bb.0:
186; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, sxtw #3]
187; CHECK-NEXT:    ret
188  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
189  ret void
190 }
191
192; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
193define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
194; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
195; CHECK:       // %bb.0:
196; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, lsl #3]
197; CHECK-NEXT:    ret
198  call void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
199  ret void
200 }
201
202declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
203declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
204declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
205declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
206declare void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
207
208declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
209declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
210declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
211declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
212declare void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
213
214declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
215declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
216declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
217declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
218declare void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
219
220declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
221declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
222declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
223declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
224declare void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
225