Lines Matching full:4
8 define <vscale x 4 x i32> @binop_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
10 ; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
11 ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
12 ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
14 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
15 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
16 %add = add nsw <vscale x 4 x i32> %a.rev, %b.rev
17 ret <vscale x 4 x i32> %add
21 define <vscale x 4 x i32> @binop_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
23 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
24 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
25 ; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A]], [[B:%.*]]
26 ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
27 ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
29 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
30 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
31 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
32 %add = add <vscale x 4 x i32> %a.rev, %b.rev
33 ret <vscale x 4 x i32> %add
37 define <vscale x 4 x i32> @binop_reverse_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
39 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
40 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
41 ; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B]]
42 ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
43 ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
45 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
46 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
47 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
48 %add = add <vscale x 4 x i32> %a.rev, %b.rev
49 ret <vscale x 4 x i32> %add
53 define <vscale x 4 x i32> @binop_reverse_3(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
55 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
56 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
57 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
58 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
59 ; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A_REV]], [[B_REV]]
60 ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
62 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
63 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
64 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
65 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
66 %add = add <vscale x 4 x i32> %a.rev, %b.rev
67 ret <vscale x 4 x i32> %add
71 define <vscale x 4 x i32> @binop_reverse_4(<vscale x 4 x i32> %a) {
73 ; CHECK-NEXT: [[MUL1:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[A]]
74 ; CHECK-NEXT: [[MUL:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[MUL1]])
75 ; CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
77 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
78 %mul = mul <vscale x 4 x i32> %a.rev, %a.rev
79 ret <vscale x 4 x i32> %mul
83 define <vscale x 4 x i32> @binop_reverse_5(<vscale x 4 x i32> %a) {
85 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
86 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
87 ; CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A_REV]], [[A_REV]]
88 ; CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
90 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
91 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
92 %mul = mul <vscale x 4 x i32> %a.rev, %a.rev
93 ret <vscale x 4 x i32> %mul
96 define <vscale x 4 x i32> @binop_reverse_splat_RHS(<vscale x 4 x i32> %a, i32 %b) {
98 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
99 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
100 ; CHECK-NEXT: [[DIV1:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B_SPLAT]]
101 ; CHECK-NEXT: [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[DIV1]])
102 ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
104 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
105 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
106 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
107 %div = udiv <vscale x 4 x i32> %a.rev, %b.splat
108 ret <vscale x 4 x i32> %div
112 define <vscale x 4 x i32> @binop_reverse_splat_RHS_1(<vscale x 4 x i32> %a, i32 %b) {
114 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
115 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
116 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
117 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
118 ; CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A_REV]], [[B_SPLAT]]
119 ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
121 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
122 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
123 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
124 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
125 %div = udiv <vscale x 4 x i32> %a.rev, %b.splat
126 ret <vscale x 4 x i32> %div
129 define <vscale x 4 x i32> @binop_reverse_splat_LHS(<vscale x 4 x i32> %a, i32 %b) {
131 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
132 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
133 ; CHECK-NEXT: [[DIV1:%.*]] = udiv <vscale x 4 x i32> [[B_SPLAT]], [[A:%.*]]
134 ; CHECK-NEXT: [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[DIV1]])
135 ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
137 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
138 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
139 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
140 %div = udiv <vscale x 4 x i32> %b.splat, %a.rev
141 ret <vscale x 4 x i32> %div
145 define <vscale x 4 x i32> @binop_reverse_splat_LHS_1(<vscale x 4 x i32> %a, i32 %b) {
147 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
148 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
149 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
150 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
151 ; CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[B_SPLAT]], [[A_REV]]
152 ; CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
154 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
155 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
156 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
157 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
158 %div = udiv <vscale x 4 x i32> %b.splat, %a.rev
159 ret <vscale x 4 x i32> %div
162 define <vscale x 4 x float> @unop_reverse(<vscale x 4 x float> %a) {
164 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
165 ; CHECK-NEXT: [[NEG:%.*]] = fneg fast <vscale x 4 x float> [[A_REV]]
166 ; CHECK-NEXT: ret <vscale x 4 x float> [[NEG]]
168 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
169 %neg = fneg fast <vscale x 4 x float> %a.rev
170 ret <vscale x 4 x float> %neg
174 define <vscale x 4 x float> @unop_reverse_1(<vscale x 4 x float> %a) {
176 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
177 ; CHECK-NEXT: call void @use_nxv4f32(<vscale x 4 x float> [[A_REV]])
178 ; CHECK-NEXT: [[NEG:%.*]] = fneg fast <vscale x 4 x float> [[A_REV]]
179 ; CHECK-NEXT: ret <vscale x 4 x float> [[NEG]]
181 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
182 call void @use_nxv4f32(<vscale x 4 x float> %a.rev)
183 %neg = fneg fast <vscale x 4 x float> %a.rev
184 ret <vscale x 4 x float> %neg
187 define <vscale x 4 x i1> @icmp_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
189 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
190 ; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
191 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
193 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
194 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
195 %cmp = icmp eq <vscale x 4 x i32> %a.rev, %b.rev
196 ret <vscale x 4 x i1> %cmp
200 define <vscale x 4 x i1> @icmp_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
202 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
203 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
204 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <vscale x 4 x i32> [[A]], [[B:%.*]]
205 ; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
206 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
208 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
209 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
210 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
211 %cmp = icmp eq <vscale x 4 x i32> %a.rev, %b.rev
212 ret <vscale x 4 x i1> %cmp
216 define <vscale x 4 x i1> @icmp_reverse_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
218 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
219 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
220 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B]]
221 ; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
222 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
224 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
225 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
226 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
227 %cmp = icmp eq <vscale x 4 x i32> %a.rev, %b.rev
228 ret <vscale x 4 x i1> %cmp
232 define <vscale x 4 x i1> @icmp_reverse_3(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
234 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
235 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
236 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
237 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
238 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A_REV]], [[B_REV]]
239 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
241 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
242 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
243 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
244 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
245 %cmp = icmp eq <vscale x 4 x i32> %a.rev, %b.rev
246 ret <vscale x 4 x i1> %cmp
249 define <vscale x 4 x i1> @icmp_reverse_splat_RHS(<vscale x 4 x i32> %a, i32 %b) {
251 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
252 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
253 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt <vscale x 4 x i32> [[A:%.*]], [[B_SPLAT]]
254 ; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
255 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
257 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
258 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
259 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
260 %cmp = icmp sgt <vscale x 4 x i32> %a.rev, %b.splat
261 ret <vscale x 4 x i1> %cmp
265 define <vscale x 4 x i1> @icmp_reverse_splat_RHS_1(<vscale x 4 x i32> %a, i32 %b) {
267 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
268 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
269 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
270 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
271 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <vscale x 4 x i32> [[A_REV]], [[B_SPLAT]]
272 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
274 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
275 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
276 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
277 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
278 %cmp = icmp sgt <vscale x 4 x i32> %a.rev, %b.splat
279 ret <vscale x 4 x i1> %cmp
282 define <vscale x 4 x i1> @icmp_reverse_splat_LHS(<vscale x 4 x i32> %a, i32 %b) {
284 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
285 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
286 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <vscale x 4 x i32> [[B_SPLAT]], [[A:%.*]]
287 ; CHECK-NEXT: [[CMP:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[CMP1]])
288 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
290 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
291 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
292 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
293 %cmp = icmp ult <vscale x 4 x i32> %b.splat, %a.rev
294 ret <vscale x 4 x i1> %cmp
298 define <vscale x 4 x i1> @icmp_reverse_splat_LHS_1(<vscale x 4 x i32> %a, i32 %b) {
300 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
301 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
302 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
303 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
304 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[B_SPLAT]], [[A_REV]]
305 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP]]
307 %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
308 %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
309 %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
310 call void @use_nxv4i32(<vscale x 4 x i32> %a.rev)
311 %cmp = icmp ult <vscale x 4 x i32> %b.splat, %a.rev
312 ret <vscale x 4 x i1> %cmp
315 define <vscale x 4 x i32> @select_reverse(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
317 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C:%.*]]
318 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
319 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
321 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
322 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
323 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
324 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
325 ret <vscale x 4 x i32> %select
329 define <vscale x 4 x i32> @select_reverse_1(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
331 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
332 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
333 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C:%.*]]
334 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
335 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
337 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
338 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
339 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
340 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
341 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
342 ret <vscale x 4 x i32> %select
346 define <vscale x 4 x i32> @select_reverse_2(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
348 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
349 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
350 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[C:%.*]]
351 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
352 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
354 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
355 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
356 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
357 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
358 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
359 ret <vscale x 4 x i32> %select
363 define <vscale x 4 x i32> @select_reverse_3(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
365 ; CHECK-NEXT: [[C_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[C:%.*]])
366 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[C_REV]])
367 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C]]
368 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
369 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
371 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
372 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
373 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
374 call void @use_nxv4i32(<vscale x 4 x i32> %c.rev)
375 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
376 ret <vscale x 4 x i32> %select
380 define <vscale x 4 x i32> @select_reverse_4(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
382 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
383 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
384 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
385 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
386 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A]], <vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[C:%.*]]
387 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
388 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
390 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
391 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
392 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
393 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
394 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
395 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
396 ret <vscale x 4 x i32> %select
400 define <vscale x 4 x i32> @select_reverse_5(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
402 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
403 ; CHECK-NEXT: [[C_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[C:%.*]])
404 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
405 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[C_REV]])
406 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C]]
407 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
408 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
410 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
411 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
412 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
413 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
414 call void @use_nxv4i32(<vscale x 4 x i32> %c.rev)
415 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
416 ret <vscale x 4 x i32> %select
420 define <vscale x 4 x i32> @select_reverse_6(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
422 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
423 ; CHECK-NEXT: [[C_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[C:%.*]])
424 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
425 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[C_REV]])
426 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[C]]
427 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
428 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
430 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
431 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
432 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
433 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
434 call void @use_nxv4i32(<vscale x 4 x i32> %c.rev)
435 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
436 ret <vscale x 4 x i32> %select
440 define <vscale x 4 x i32> @select_reverse_7(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
442 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
443 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
444 ; CHECK-NEXT: [[C_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[C:%.*]])
445 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
446 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
447 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[C_REV]])
448 ; CHECK-NEXT: [[SELECT:%.*]] = select <vscale x 4 x i1> [[A_REV]], <vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> [[C_REV]]
449 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
451 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
452 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
453 %c.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %c)
454 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
455 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
456 call void @use_nxv4i32(<vscale x 4 x i32> %c.rev)
457 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.rev
458 ret <vscale x 4 x i32> %select
461 define <vscale x 4 x i32> @select_reverse_splat_false(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
463 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
464 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
465 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C_SPLAT]]
466 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
467 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
469 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
470 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
471 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
472 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
473 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.splat
474 ret <vscale x 4 x i32> %select
478 define <vscale x 4 x i32> @select_reverse_splat_false_1(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
480 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
481 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
482 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
483 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
484 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A]], <vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i32> [[C_SPLAT]]
485 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
486 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
488 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
489 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
490 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
491 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
492 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
493 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.splat
494 ret <vscale x 4 x i32> %select
498 define <vscale x 4 x i32> @select_reverse_splat_false_2(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
500 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
501 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
502 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
503 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
504 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[C_SPLAT]]
505 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
506 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
508 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
509 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
510 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
511 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
512 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
513 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.splat
514 ret <vscale x 4 x i32> %select
518 define <vscale x 4 x i32> @select_reverse_splat_false_3(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
520 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
521 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
522 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
523 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
524 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
525 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
526 ; CHECK-NEXT: [[SELECT:%.*]] = select <vscale x 4 x i1> [[A_REV]], <vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> [[C_SPLAT]]
527 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
529 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
530 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
531 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
532 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
533 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
534 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
535 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %b.rev, <vscale x 4 x i32> %c.splat
536 ret <vscale x 4 x i32> %select
539 define <vscale x 4 x i32> @select_reverse_splat_true(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
541 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
542 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
543 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[C_SPLAT]], <vscale x 4 x i32> [[B:%.*]]
544 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
545 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
547 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
548 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
549 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
550 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
551 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %c.splat, <vscale x 4 x i32> %b.rev
552 ret <vscale x 4 x i32> %select
556 define <vscale x 4 x i32> @select_reverse_splat_true_1(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
558 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
559 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
560 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
561 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
562 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A]], <vscale x 4 x i32> [[C_SPLAT]], <vscale x 4 x i32> [[B:%.*]]
563 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
564 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
566 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
567 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
568 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
569 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
570 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
571 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %c.splat, <vscale x 4 x i32> %b.rev
572 ret <vscale x 4 x i32> %select
576 define <vscale x 4 x i32> @select_reverse_splat_true_2(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
578 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
579 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
580 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
581 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
582 ; CHECK-NEXT: [[SELECT1:%.*]] = select <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x i32> [[C_SPLAT]], <vscale x 4 x i32> [[B]]
583 ; CHECK-NEXT: [[SELECT:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[SELECT1]])
584 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
586 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
587 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
588 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
589 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
590 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
591 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %c.splat, <vscale x 4 x i32> %b.rev
592 ret <vscale x 4 x i32> %select
596 define <vscale x 4 x i32> @select_reverse_splat_true_3(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, i32 %c) {
598 ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[A:%.*]])
599 ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
600 ; CHECK-NEXT: [[C_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[C:%.*]], i64 0
601 ; CHECK-NEXT: [[C_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[C_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
602 ; CHECK-NEXT: call void @use_nxv4i1(<vscale x 4 x i1> [[A_REV]])
603 ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
604 ; CHECK-NEXT: [[SELECT:%.*]] = select <vscale x 4 x i1> [[A_REV]], <vscale x 4 x i32> [[C_SPLAT]], <vscale x 4 x i32> [[B_REV]]
605 ; CHECK-NEXT: ret <vscale x 4 x i32> [[SELECT]]
607 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
608 %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> %b)
609 %c.insert = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
610 %c.splat = shufflevector <vscale x 4 x i32> %c.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
611 call void @use_nxv4i1(<vscale x 4 x i1> %a.rev)
612 call void @use_nxv4i32(<vscale x 4 x i32> %b.rev)
613 %select = select <vscale x 4 x i1> %a.rev, <vscale x 4 x i32> %c.splat, <vscale x 4 x i32> %b.rev
614 ret <vscale x 4 x i32> %select
620 define <vscale x 4 x float> @reverse_binop_reverse(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
622 ; CHECK-NEXT: [[ADD1:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
623 ; CHECK-NEXT: ret <vscale x 4 x float> [[ADD1]]
625 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
626 %b.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %b)
627 %add = fadd <vscale x 4 x float> %a.rev, %b.rev
628 %add.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %add)
629 ret <vscale x 4 x float> %add.rev
632 define <vscale x 4 x float> @reverse_binop_reverse_splat_RHS(<vscale x 4 x float> %a, float %b) {
634 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
635 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
636 ; CHECK-NEXT: [[DIV1:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B_SPLAT]]
637 ; CHECK-NEXT: ret <vscale x 4 x float> [[DIV1]]
639 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
640 %b.insert = insertelement <vscale x 4 x float> poison, float %b, i32 0
641 %b.splat = shufflevector <vscale x 4 x float> %b.insert, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
642 %div = fdiv <vscale x 4 x float> %a.rev, %b.splat
643 %div.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %div)
644 ret <vscale x 4 x float> %div.rev
647 define <vscale x 4 x float> @reverse_binop_reverse_splat_LHS(<vscale x 4 x float> %a, float %b) {
649 ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
650 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
651 ; CHECK-NEXT: [[DIV1:%.*]] = fdiv <vscale x 4 x float> [[B_SPLAT]], [[A:%.*]]
652 ; CHECK-NEXT: ret <vscale x 4 x float> [[DIV1]]
654 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
655 %b.insert = insertelement <vscale x 4 x float> poison, float %b, i32 0
656 %b.splat = shufflevector <vscale x 4 x float> %b.insert, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
657 %div = fdiv <vscale x 4 x float> %b.splat, %a.rev
658 %div.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %div)
659 ret <vscale x 4 x float> %div.rev
662 define <vscale x 4 x i1> @reverse_fcmp_reverse(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
664 ; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
665 ; CHECK-NEXT: ret <vscale x 4 x i1> [[CMP1]]
667 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
668 %b.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %b)
669 %cmp = fcmp fast olt <vscale x 4 x float> %a.rev, %b.rev
670 %cmp.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %cmp)
671 ret <vscale x 4 x i1> %cmp.rev
674 define <vscale x 4 x float> @reverse_select_reverse(<vscale x 4 x i1> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
676 ; CHECK-NEXT: [[SELECT1:%.*]] = select fast <vscale x 4 x i1> [[A:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> [[C:%.*]]
677 ; CHECK-NEXT: ret <vscale x 4 x float> [[SELECT1]]
679 %a.rev = tail call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
680 %b.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %b)
681 %c.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %c)
682 %select = select fast <vscale x 4 x i1> %a.rev, <vscale x 4 x float> %b.rev, <vscale x 4 x float> %c.rev
683 %select.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %select)
684 ret <vscale x 4 x float> %select.rev
687 define <vscale x 4 x float> @reverse_unop_reverse(<vscale x 4 x float> %a) {
689 ; CHECK-NEXT: [[NEG1:%.*]] = fneg <vscale x 4 x float> [[A:%.*]]
690 ; CHECK-NEXT: ret <vscale x 4 x float> [[NEG1]]
692 %a.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %a)
693 %neg = fneg <vscale x 4 x float> %a.rev
694 %neg.rev = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> %neg)
695 ret <vscale x 4 x float> %neg.rev
699 declare void @use_nxv4i1(<vscale x 4 x i1>)
700 declare void @use_nxv4i32(<vscale x 4 x i32>)
701 declare void @use_nxv4f32(<vscale x 4 x float>)
703 declare <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1>)
704 declare <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32>)
705 declare <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float>)