xref: /llvm-project/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_asr.c (revision 207e5ccceec8d3cc3f32723e78f2a142bc61b07d)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 // RUN: %clang_cc1 -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
9 
10 #include <arm_sve.h>
11 
12 #if defined __ARM_FEATURE_SME
13 #define MODE_ATTR __arm_streaming
14 #else
15 #define MODE_ATTR
16 #endif
17 
18 #ifdef SVE_OVERLOADED_FORMS
19 // A simple used,unused... macro, long enough to represent any SVE builtin.
20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
21 #else
22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
23 #endif
24 
25 // CHECK-LABEL: @test_svasr_s8_z(
26 // CHECK-NEXT:  entry:
27 // CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
28 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
29 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
30 //
31 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_zu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
32 // CPP-CHECK-NEXT:  entry:
33 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
34 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
35 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
36 //
37 svint8_t test_svasr_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2) MODE_ATTR
38 {
39   return SVE_ACLE_FUNC(svasr,_s8,_z,)(pg, op1, op2);
40 }
41 
42 // CHECK-LABEL: @test_svasr_s16_z(
43 // CHECK-NEXT:  entry:
44 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
45 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
46 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
47 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
48 //
49 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
50 // CPP-CHECK-NEXT:  entry:
51 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
52 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
53 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
54 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
55 //
56 svint16_t test_svasr_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2) MODE_ATTR
57 {
58   return SVE_ACLE_FUNC(svasr,_s16,_z,)(pg, op1, op2);
59 }
60 
61 // CHECK-LABEL: @test_svasr_s32_z(
62 // CHECK-NEXT:  entry:
63 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
64 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
65 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
66 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
67 //
68 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
69 // CPP-CHECK-NEXT:  entry:
70 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
71 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
72 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
73 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
74 //
75 svint32_t test_svasr_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2) MODE_ATTR
76 {
77   return SVE_ACLE_FUNC(svasr,_s32,_z,)(pg, op1, op2);
78 }
79 
80 // CHECK-LABEL: @test_svasr_s64_z(
81 // CHECK-NEXT:  entry:
82 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
83 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
84 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
85 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
86 //
87 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_zu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
88 // CPP-CHECK-NEXT:  entry:
89 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
90 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
91 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
92 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
93 //
94 svint64_t test_svasr_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2) MODE_ATTR
95 {
96   return SVE_ACLE_FUNC(svasr,_s64,_z,)(pg, op1, op2);
97 }
98 
99 // CHECK-LABEL: @test_svasr_s8_m(
100 // CHECK-NEXT:  entry:
101 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
102 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
103 //
104 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_mu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
105 // CPP-CHECK-NEXT:  entry:
106 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
107 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
108 //
109 svint8_t test_svasr_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2) MODE_ATTR
110 {
111   return SVE_ACLE_FUNC(svasr,_s8,_m,)(pg, op1, op2);
112 }
113 
114 // CHECK-LABEL: @test_svasr_s16_m(
115 // CHECK-NEXT:  entry:
116 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
117 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
118 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
119 //
120 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
121 // CPP-CHECK-NEXT:  entry:
122 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
123 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
124 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
125 //
126 svint16_t test_svasr_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2) MODE_ATTR
127 {
128   return SVE_ACLE_FUNC(svasr,_s16,_m,)(pg, op1, op2);
129 }
130 
131 // CHECK-LABEL: @test_svasr_s32_m(
132 // CHECK-NEXT:  entry:
133 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
134 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
135 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
136 //
137 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
138 // CPP-CHECK-NEXT:  entry:
139 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
140 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
141 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
142 //
143 svint32_t test_svasr_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2) MODE_ATTR
144 {
145   return SVE_ACLE_FUNC(svasr,_s32,_m,)(pg, op1, op2);
146 }
147 
148 // CHECK-LABEL: @test_svasr_s64_m(
149 // CHECK-NEXT:  entry:
150 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
151 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
152 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
153 //
154 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_mu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
155 // CPP-CHECK-NEXT:  entry:
156 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
157 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
158 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
159 //
160 svint64_t test_svasr_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2) MODE_ATTR
161 {
162   return SVE_ACLE_FUNC(svasr,_s64,_m,)(pg, op1, op2);
163 }
164 
165 // CHECK-LABEL: @test_svasr_s8_x(
166 // CHECK-NEXT:  entry:
167 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
168 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
169 //
170 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_xu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
171 // CPP-CHECK-NEXT:  entry:
172 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
173 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
174 //
175 svint8_t test_svasr_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2) MODE_ATTR
176 {
177   return SVE_ACLE_FUNC(svasr,_s8,_x,)(pg, op1, op2);
178 }
179 
180 // CHECK-LABEL: @test_svasr_s16_x(
181 // CHECK-NEXT:  entry:
182 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
184 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
185 //
186 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
187 // CPP-CHECK-NEXT:  entry:
188 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
189 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
190 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
191 //
192 svint16_t test_svasr_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2) MODE_ATTR
193 {
194   return SVE_ACLE_FUNC(svasr,_s16,_x,)(pg, op1, op2);
195 }
196 
197 // CHECK-LABEL: @test_svasr_s32_x(
198 // CHECK-NEXT:  entry:
199 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
200 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
201 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
202 //
203 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
204 // CPP-CHECK-NEXT:  entry:
205 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
206 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
207 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
208 //
209 svint32_t test_svasr_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2) MODE_ATTR
210 {
211   return SVE_ACLE_FUNC(svasr,_s32,_x,)(pg, op1, op2);
212 }
213 
214 // CHECK-LABEL: @test_svasr_s64_x(
215 // CHECK-NEXT:  entry:
216 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
217 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
218 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
219 //
220 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_xu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
221 // CPP-CHECK-NEXT:  entry:
222 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
223 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
224 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
225 //
226 svint64_t test_svasr_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2) MODE_ATTR
227 {
228   return SVE_ACLE_FUNC(svasr,_s64,_x,)(pg, op1, op2);
229 }
230 
231 // CHECK-LABEL: @test_svasr_n_s64_z(
232 // CHECK-NEXT:  entry:
233 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
234 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
235 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
236 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
237 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
238 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
239 //
240 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_zu10__SVBool_tu11__SVInt64_tm(
241 // CPP-CHECK-NEXT:  entry:
242 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
243 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
244 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
245 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
246 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
247 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP2]]
248 //
249 svint64_t test_svasr_n_s64_z(svbool_t pg, svint64_t op1, uint64_t op2) MODE_ATTR
250 {
251   return SVE_ACLE_FUNC(svasr,_n_s64,_z,)(pg, op1, op2);
252 }
253 
254 // CHECK-LABEL: @test_svasr_n_s64_m(
255 // CHECK-NEXT:  entry:
256 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
257 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
258 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
259 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
260 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
261 //
262 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_mu10__SVBool_tu11__SVInt64_tm(
263 // CPP-CHECK-NEXT:  entry:
264 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
265 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
266 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
267 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
268 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
269 //
270 svint64_t test_svasr_n_s64_m(svbool_t pg, svint64_t op1, uint64_t op2) MODE_ATTR
271 {
272   return SVE_ACLE_FUNC(svasr,_n_s64,_m,)(pg, op1, op2);
273 }
274 
275 // CHECK-LABEL: @test_svasr_n_s64_x(
276 // CHECK-NEXT:  entry:
277 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
278 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
279 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
280 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
281 // CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
282 //
283 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_xu10__SVBool_tu11__SVInt64_tm(
284 // CPP-CHECK-NEXT:  entry:
285 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
286 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
287 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
288 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
289 // CPP-CHECK-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
290 //
291 svint64_t test_svasr_n_s64_x(svbool_t pg, svint64_t op1, uint64_t op2) MODE_ATTR
292 {
293   return SVE_ACLE_FUNC(svasr,_n_s64,_x,)(pg, op1, op2);
294 }
295 
296 // CHECK-LABEL: @test_svasr_wide_s8_z(
297 // CHECK-NEXT:  entry:
298 // CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
299 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
300 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
301 //
302 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_zu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
303 // CPP-CHECK-NEXT:  entry:
304 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
305 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
306 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
307 //
308 svint8_t test_svasr_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2) MODE_ATTR
309 {
310   return SVE_ACLE_FUNC(svasr_wide,_s8,_z,)(pg, op1, op2);
311 }
312 
313 // CHECK-LABEL: @test_svasr_wide_s16_z(
314 // CHECK-NEXT:  entry:
315 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
316 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
317 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
318 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
319 //
320 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
321 // CPP-CHECK-NEXT:  entry:
322 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
323 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
324 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
325 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
326 //
327 svint16_t test_svasr_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2) MODE_ATTR
328 {
329   return SVE_ACLE_FUNC(svasr_wide,_s16,_z,)(pg, op1, op2);
330 }
331 
332 // CHECK-LABEL: @test_svasr_wide_s32_z(
333 // CHECK-NEXT:  entry:
334 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
335 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
336 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
337 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
338 //
339 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
340 // CPP-CHECK-NEXT:  entry:
341 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
342 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
343 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
344 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
345 //
346 svint32_t test_svasr_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2) MODE_ATTR
347 {
348   return SVE_ACLE_FUNC(svasr_wide,_s32,_z,)(pg, op1, op2);
349 }
350 
351 // CHECK-LABEL: @test_svasr_wide_s8_m(
352 // CHECK-NEXT:  entry:
353 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
354 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
355 //
356 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_mu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
357 // CPP-CHECK-NEXT:  entry:
358 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
359 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
360 //
361 svint8_t test_svasr_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2) MODE_ATTR
362 {
363   return SVE_ACLE_FUNC(svasr_wide,_s8,_m,)(pg, op1, op2);
364 }
365 
366 // CHECK-LABEL: @test_svasr_wide_s16_m(
367 // CHECK-NEXT:  entry:
368 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
369 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
370 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
371 //
372 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
373 // CPP-CHECK-NEXT:  entry:
374 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
375 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
376 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
377 //
378 svint16_t test_svasr_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2) MODE_ATTR
379 {
380   return SVE_ACLE_FUNC(svasr_wide,_s16,_m,)(pg, op1, op2);
381 }
382 
383 // CHECK-LABEL: @test_svasr_wide_s32_m(
384 // CHECK-NEXT:  entry:
385 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
386 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
387 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
388 //
389 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
390 // CPP-CHECK-NEXT:  entry:
391 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
392 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
393 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
394 //
395 svint32_t test_svasr_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2) MODE_ATTR
396 {
397   return SVE_ACLE_FUNC(svasr_wide,_s32,_m,)(pg, op1, op2);
398 }
399 
400 // CHECK-LABEL: @test_svasr_wide_s8_x(
401 // CHECK-NEXT:  entry:
402 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
403 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
404 //
405 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_xu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
406 // CPP-CHECK-NEXT:  entry:
407 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
408 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
409 //
410 svint8_t test_svasr_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2) MODE_ATTR
411 {
412   return SVE_ACLE_FUNC(svasr_wide,_s8,_x,)(pg, op1, op2);
413 }
414 
415 // CHECK-LABEL: @test_svasr_wide_s16_x(
416 // CHECK-NEXT:  entry:
417 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
418 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
419 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
420 //
421 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
422 // CPP-CHECK-NEXT:  entry:
423 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
424 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
425 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
426 //
427 svint16_t test_svasr_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2) MODE_ATTR
428 {
429   return SVE_ACLE_FUNC(svasr_wide,_s16,_x,)(pg, op1, op2);
430 }
431 
432 // CHECK-LABEL: @test_svasr_wide_s32_x(
433 // CHECK-NEXT:  entry:
434 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
435 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
436 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
437 //
438 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
439 // CPP-CHECK-NEXT:  entry:
440 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
441 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
442 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
443 //
444 svint32_t test_svasr_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2) MODE_ATTR
445 {
446   return SVE_ACLE_FUNC(svasr_wide,_s32,_x,)(pg, op1, op2);
447 }
448 
449 // CHECK-LABEL: @test_svasr_n_s8_z(
450 // CHECK-NEXT:  entry:
451 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
452 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
453 // CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
454 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
455 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
456 //
457 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_zu10__SVBool_tu10__SVInt8_th(
458 // CPP-CHECK-NEXT:  entry:
459 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
460 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
461 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
462 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
463 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
464 //
465 svint8_t test_svasr_n_s8_z(svbool_t pg, svint8_t op1, uint8_t op2) MODE_ATTR
466 {
467   return SVE_ACLE_FUNC(svasr,_n_s8,_z,)(pg, op1, op2);
468 }
469 
470 // CHECK-LABEL: @test_svasr_n_s16_z(
471 // CHECK-NEXT:  entry:
472 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
473 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
474 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
475 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
476 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
477 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
478 //
479 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_zu10__SVBool_tu11__SVInt16_tt(
480 // CPP-CHECK-NEXT:  entry:
481 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
482 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
483 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
484 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
485 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
486 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
487 //
488 svint16_t test_svasr_n_s16_z(svbool_t pg, svint16_t op1, uint16_t op2) MODE_ATTR
489 {
490   return SVE_ACLE_FUNC(svasr,_n_s16,_z,)(pg, op1, op2);
491 }
492 
493 // CHECK-LABEL: @test_svasr_n_s32_z(
494 // CHECK-NEXT:  entry:
495 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
496 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
497 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
498 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
499 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
500 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
501 //
502 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_zu10__SVBool_tu11__SVInt32_tj(
503 // CPP-CHECK-NEXT:  entry:
504 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
505 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
506 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
507 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
508 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
509 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
510 //
511 svint32_t test_svasr_n_s32_z(svbool_t pg, svint32_t op1, uint32_t op2) MODE_ATTR
512 {
513   return SVE_ACLE_FUNC(svasr,_n_s32,_z,)(pg, op1, op2);
514 }
515 
516 // CHECK-LABEL: @test_svasr_n_s8_m(
517 // CHECK-NEXT:  entry:
518 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
519 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
520 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
521 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
522 //
523 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_mu10__SVBool_tu10__SVInt8_th(
524 // CPP-CHECK-NEXT:  entry:
525 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
526 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
527 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
528 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
529 //
530 svint8_t test_svasr_n_s8_m(svbool_t pg, svint8_t op1, uint8_t op2) MODE_ATTR
531 {
532   return SVE_ACLE_FUNC(svasr,_n_s8,_m,)(pg, op1, op2);
533 }
534 
535 // CHECK-LABEL: @test_svasr_n_s16_m(
536 // CHECK-NEXT:  entry:
537 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
538 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
539 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
540 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
541 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
542 //
543 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_mu10__SVBool_tu11__SVInt16_tt(
544 // CPP-CHECK-NEXT:  entry:
545 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
546 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
547 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
548 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
549 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
550 //
551 svint16_t test_svasr_n_s16_m(svbool_t pg, svint16_t op1, uint16_t op2) MODE_ATTR
552 {
553   return SVE_ACLE_FUNC(svasr,_n_s16,_m,)(pg, op1, op2);
554 }
555 
556 // CHECK-LABEL: @test_svasr_n_s32_m(
557 // CHECK-NEXT:  entry:
558 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
559 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
560 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
561 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
562 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
563 //
564 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_mu10__SVBool_tu11__SVInt32_tj(
565 // CPP-CHECK-NEXT:  entry:
566 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
567 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
568 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
569 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
570 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
571 //
572 svint32_t test_svasr_n_s32_m(svbool_t pg, svint32_t op1, uint32_t op2) MODE_ATTR
573 {
574   return SVE_ACLE_FUNC(svasr,_n_s32,_m,)(pg, op1, op2);
575 }
576 
577 // CHECK-LABEL: @test_svasr_n_s8_x(
578 // CHECK-NEXT:  entry:
579 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
580 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
581 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
582 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
583 //
584 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_xu10__SVBool_tu10__SVInt8_th(
585 // CPP-CHECK-NEXT:  entry:
586 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
587 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
588 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
589 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
590 //
591 svint8_t test_svasr_n_s8_x(svbool_t pg, svint8_t op1, uint8_t op2) MODE_ATTR
592 {
593   return SVE_ACLE_FUNC(svasr,_n_s8,_x,)(pg, op1, op2);
594 }
595 
596 // CHECK-LABEL: @test_svasr_n_s16_x(
597 // CHECK-NEXT:  entry:
598 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
599 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
600 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
601 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
602 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
603 //
604 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_xu10__SVBool_tu11__SVInt16_tt(
605 // CPP-CHECK-NEXT:  entry:
606 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
607 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
608 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
609 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
610 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
611 //
612 svint16_t test_svasr_n_s16_x(svbool_t pg, svint16_t op1, uint16_t op2) MODE_ATTR
613 {
614   return SVE_ACLE_FUNC(svasr,_n_s16,_x,)(pg, op1, op2);
615 }
616 
617 // CHECK-LABEL: @test_svasr_n_s32_x(
618 // CHECK-NEXT:  entry:
619 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
620 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
621 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
622 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
623 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
624 //
625 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_xu10__SVBool_tu11__SVInt32_tj(
626 // CPP-CHECK-NEXT:  entry:
627 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
628 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
629 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
630 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
631 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
632 //
633 svint32_t test_svasr_n_s32_x(svbool_t pg, svint32_t op1, uint32_t op2) MODE_ATTR
634 {
635   return SVE_ACLE_FUNC(svasr,_n_s32,_x,)(pg, op1, op2);
636 }
637 
638 // CHECK-LABEL: @test_svasr_wide_n_s8_m(
639 // CHECK-NEXT:  entry:
640 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
641 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
642 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
643 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
644 //
645 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_mu10__SVBool_tu10__SVInt8_tm(
646 // CPP-CHECK-NEXT:  entry:
647 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
648 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
649 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
650 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
651 //
652 svint8_t test_svasr_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2) MODE_ATTR
653 {
654   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_m,)(pg, op1, op2);
655 }
656 
657 // CHECK-LABEL: @test_svasr_wide_n_s16_m(
658 // CHECK-NEXT:  entry:
659 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
660 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
661 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
662 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
663 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
664 //
665 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_mu10__SVBool_tu11__SVInt16_tm(
666 // CPP-CHECK-NEXT:  entry:
667 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
668 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
669 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
670 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
671 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
672 //
673 svint16_t test_svasr_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2) MODE_ATTR
674 {
675   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_m,)(pg, op1, op2);
676 }
677 
678 // CHECK-LABEL: @test_svasr_wide_n_s32_m(
679 // CHECK-NEXT:  entry:
680 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
681 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
682 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
683 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
684 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
685 //
686 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_mu10__SVBool_tu11__SVInt32_tm(
687 // CPP-CHECK-NEXT:  entry:
688 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
689 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
690 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
691 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
692 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
693 //
694 svint32_t test_svasr_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2) MODE_ATTR
695 {
696   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_m,)(pg, op1, op2);
697 }
698 
699 // CHECK-LABEL: @test_svasr_wide_n_s8_z(
700 // CHECK-NEXT:  entry:
701 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
702 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
703 // CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
704 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
705 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
706 //
707 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_zu10__SVBool_tu10__SVInt8_tm(
708 // CPP-CHECK-NEXT:  entry:
709 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
710 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
711 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
712 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
713 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP1]]
714 //
715 svint8_t test_svasr_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2) MODE_ATTR
716 {
717   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_z,)(pg, op1, op2);
718 }
719 
720 // CHECK-LABEL: @test_svasr_wide_n_s16_z(
721 // CHECK-NEXT:  entry:
722 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
723 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
724 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
725 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
726 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
727 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
728 //
729 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_zu10__SVBool_tu11__SVInt16_tm(
730 // CPP-CHECK-NEXT:  entry:
731 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
732 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
733 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
734 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
735 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
736 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP2]]
737 //
738 svint16_t test_svasr_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2) MODE_ATTR
739 {
740   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_z,)(pg, op1, op2);
741 }
742 
743 // CHECK-LABEL: @test_svasr_wide_n_s32_z(
744 // CHECK-NEXT:  entry:
745 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
746 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
747 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
748 // CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
749 // CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
750 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
751 //
752 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_zu10__SVBool_tu11__SVInt32_tm(
753 // CPP-CHECK-NEXT:  entry:
754 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
755 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
756 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
757 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
758 // CPP-CHECK-NEXT:    [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
759 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP2]]
760 //
761 svint32_t test_svasr_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2) MODE_ATTR
762 {
763   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_z,)(pg, op1, op2);
764 }
765 
766 // CHECK-LABEL: @test_svasr_wide_n_s8_x(
767 // CHECK-NEXT:  entry:
768 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
769 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
770 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
771 // CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
772 //
773 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_xu10__SVBool_tu10__SVInt8_tm(
774 // CPP-CHECK-NEXT:  entry:
775 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
776 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
777 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
778 // CPP-CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
779 //
780 svint8_t test_svasr_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2) MODE_ATTR
781 {
782   return SVE_ACLE_FUNC(svasr_wide,_n_s8,_x,)(pg, op1, op2);
783 }
784 
785 // CHECK-LABEL: @test_svasr_wide_n_s16_x(
786 // CHECK-NEXT:  entry:
787 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
788 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
789 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
790 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
791 // CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
792 //
793 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_xu10__SVBool_tu11__SVInt16_tm(
794 // CPP-CHECK-NEXT:  entry:
795 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
796 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
797 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
798 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
799 // CPP-CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP1]]
800 //
801 svint16_t test_svasr_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2) MODE_ATTR
802 {
803   return SVE_ACLE_FUNC(svasr_wide,_n_s16,_x,)(pg, op1, op2);
804 }
805 
806 // CHECK-LABEL: @test_svasr_wide_n_s32_x(
807 // CHECK-NEXT:  entry:
808 // CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
809 // CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
810 // CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
811 // CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
812 // CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
813 //
814 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_xu10__SVBool_tu11__SVInt32_tm(
815 // CPP-CHECK-NEXT:  entry:
816 // CPP-CHECK-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
817 // CPP-CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
818 // CPP-CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
819 // CPP-CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
820 // CPP-CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
821 //
822 svint32_t test_svasr_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2) MODE_ATTR
823 {
824   return SVE_ACLE_FUNC(svasr_wide,_n_s32,_x,)(pg, op1, op2);
825 }
826