1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 2 // REQUIRES: aarch64-registered-target 3 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s 4 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK 5 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s 6 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK 7 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s 8 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s 9 10 #include <arm_sve.h> 11 12 #if defined __ARM_FEATURE_SME 13 #define MODE_ATTR __arm_streaming 14 #else 15 #define MODE_ATTR 16 #endif 17 18 #ifdef SVE_OVERLOADED_FORMS 19 // A simple used,unused... macro, long enough to represent any SVE builtin. 20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 21 #else 22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 23 #endif 24 25 // CHECK-LABEL: @test_svextb_s16_z( 26 // CHECK-NEXT: entry: 27 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 28 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 29 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 30 // 31 // CPP-CHECK-LABEL: @_Z17test_svextb_s16_zu10__SVBool_tu11__SVInt16_t( 32 // CPP-CHECK-NEXT: entry: 33 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 34 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 35 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 36 // 37 svint16_t test_svextb_s16_z(svbool_t pg, svint16_t op) MODE_ATTR 38 { 39 return SVE_ACLE_FUNC(svextb,_s16,_z,)(pg, op); 40 } 41 42 // CHECK-LABEL: @test_svextb_s32_z( 43 // CHECK-NEXT: entry: 44 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 45 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 46 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 47 // 48 // CPP-CHECK-LABEL: @_Z17test_svextb_s32_zu10__SVBool_tu11__SVInt32_t( 49 // CPP-CHECK-NEXT: entry: 50 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 51 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 52 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 53 // 54 svint32_t test_svextb_s32_z(svbool_t pg, svint32_t op) MODE_ATTR 55 { 56 return SVE_ACLE_FUNC(svextb,_s32,_z,)(pg, op); 57 } 58 59 // CHECK-LABEL: @test_svextb_s64_z( 60 // CHECK-NEXT: entry: 61 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 62 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 63 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 64 // 65 // CPP-CHECK-LABEL: @_Z17test_svextb_s64_zu10__SVBool_tu11__SVInt64_t( 66 // CPP-CHECK-NEXT: entry: 67 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 68 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 69 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 70 // 71 svint64_t test_svextb_s64_z(svbool_t pg, svint64_t op) MODE_ATTR 72 { 73 return SVE_ACLE_FUNC(svextb,_s64,_z,)(pg, op); 74 } 75 76 // CHECK-LABEL: @test_svextb_u16_z( 77 // CHECK-NEXT: entry: 78 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 79 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 80 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 81 // 82 // CPP-CHECK-LABEL: @_Z17test_svextb_u16_zu10__SVBool_tu12__SVUint16_t( 83 // CPP-CHECK-NEXT: entry: 84 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 85 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 86 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 87 // 88 svuint16_t test_svextb_u16_z(svbool_t pg, svuint16_t op) MODE_ATTR 89 { 90 return SVE_ACLE_FUNC(svextb,_u16,_z,)(pg, op); 91 } 92 93 // CHECK-LABEL: @test_svextb_u32_z( 94 // CHECK-NEXT: entry: 95 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 96 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 97 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 98 // 99 // CPP-CHECK-LABEL: @_Z17test_svextb_u32_zu10__SVBool_tu12__SVUint32_t( 100 // CPP-CHECK-NEXT: entry: 101 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 102 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 103 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 104 // 105 svuint32_t test_svextb_u32_z(svbool_t pg, svuint32_t op) MODE_ATTR 106 { 107 return SVE_ACLE_FUNC(svextb,_u32,_z,)(pg, op); 108 } 109 110 // CHECK-LABEL: @test_svextb_u64_z( 111 // CHECK-NEXT: entry: 112 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 113 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 114 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 115 // 116 // CPP-CHECK-LABEL: @_Z17test_svextb_u64_zu10__SVBool_tu12__SVUint64_t( 117 // CPP-CHECK-NEXT: entry: 118 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 119 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 120 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 121 // 122 svuint64_t test_svextb_u64_z(svbool_t pg, svuint64_t op) MODE_ATTR 123 { 124 return SVE_ACLE_FUNC(svextb,_u64,_z,)(pg, op); 125 } 126 127 // CHECK-LABEL: @test_svextb_s16_m( 128 // CHECK-NEXT: entry: 129 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 130 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 131 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 132 // 133 // CPP-CHECK-LABEL: @_Z17test_svextb_s16_mu11__SVInt16_tu10__SVBool_tS_( 134 // CPP-CHECK-NEXT: entry: 135 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 136 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 137 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 138 // 139 svint16_t test_svextb_s16_m(svint16_t inactive, svbool_t pg, svint16_t op) MODE_ATTR 140 { 141 return SVE_ACLE_FUNC(svextb,_s16,_m,)(inactive, pg, op); 142 } 143 144 // CHECK-LABEL: @test_svextb_s32_m( 145 // CHECK-NEXT: entry: 146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 147 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 148 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 149 // 150 // CPP-CHECK-LABEL: @_Z17test_svextb_s32_mu11__SVInt32_tu10__SVBool_tS_( 151 // CPP-CHECK-NEXT: entry: 152 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 153 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 154 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 155 // 156 svint32_t test_svextb_s32_m(svint32_t inactive, svbool_t pg, svint32_t op) MODE_ATTR 157 { 158 return SVE_ACLE_FUNC(svextb,_s32,_m,)(inactive, pg, op); 159 } 160 161 // CHECK-LABEL: @test_svextb_s64_m( 162 // CHECK-NEXT: entry: 163 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 164 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 165 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 166 // 167 // CPP-CHECK-LABEL: @_Z17test_svextb_s64_mu11__SVInt64_tu10__SVBool_tS_( 168 // CPP-CHECK-NEXT: entry: 169 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 170 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 171 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 172 // 173 svint64_t test_svextb_s64_m(svint64_t inactive, svbool_t pg, svint64_t op) MODE_ATTR 174 { 175 return SVE_ACLE_FUNC(svextb,_s64,_m,)(inactive, pg, op); 176 } 177 178 // CHECK-LABEL: @test_svextb_u16_m( 179 // CHECK-NEXT: entry: 180 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 181 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 182 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 183 // 184 // CPP-CHECK-LABEL: @_Z17test_svextb_u16_mu12__SVUint16_tu10__SVBool_tS_( 185 // CPP-CHECK-NEXT: entry: 186 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 187 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 188 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 189 // 190 svuint16_t test_svextb_u16_m(svuint16_t inactive, svbool_t pg, svuint16_t op) MODE_ATTR 191 { 192 return SVE_ACLE_FUNC(svextb,_u16,_m,)(inactive, pg, op); 193 } 194 195 // CHECK-LABEL: @test_svextb_u32_m( 196 // CHECK-NEXT: entry: 197 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 198 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 199 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 200 // 201 // CPP-CHECK-LABEL: @_Z17test_svextb_u32_mu12__SVUint32_tu10__SVBool_tS_( 202 // CPP-CHECK-NEXT: entry: 203 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 204 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 205 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 206 // 207 svuint32_t test_svextb_u32_m(svuint32_t inactive, svbool_t pg, svuint32_t op) MODE_ATTR 208 { 209 return SVE_ACLE_FUNC(svextb,_u32,_m,)(inactive, pg, op); 210 } 211 212 // CHECK-LABEL: @test_svextb_u64_m( 213 // CHECK-NEXT: entry: 214 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 215 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 216 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 217 // 218 // CPP-CHECK-LABEL: @_Z17test_svextb_u64_mu12__SVUint64_tu10__SVBool_tS_( 219 // CPP-CHECK-NEXT: entry: 220 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 221 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 222 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 223 // 224 svuint64_t test_svextb_u64_m(svuint64_t inactive, svbool_t pg, svuint64_t op) MODE_ATTR 225 { 226 return SVE_ACLE_FUNC(svextb,_u64,_m,)(inactive, pg, op); 227 } 228 229 // CHECK-LABEL: @test_svextb_s16_x( 230 // CHECK-NEXT: entry: 231 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 232 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 233 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 234 // 235 // CPP-CHECK-LABEL: @_Z17test_svextb_s16_xu10__SVBool_tu11__SVInt16_t( 236 // CPP-CHECK-NEXT: entry: 237 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 238 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 239 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 240 // 241 svint16_t test_svextb_s16_x(svbool_t pg, svint16_t op) MODE_ATTR 242 { 243 return SVE_ACLE_FUNC(svextb,_s16,_x,)(pg, op); 244 } 245 246 // CHECK-LABEL: @test_svextb_s32_x( 247 // CHECK-NEXT: entry: 248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 249 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 250 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 251 // 252 // CPP-CHECK-LABEL: @_Z17test_svextb_s32_xu10__SVBool_tu11__SVInt32_t( 253 // CPP-CHECK-NEXT: entry: 254 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 255 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 256 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 257 // 258 svint32_t test_svextb_s32_x(svbool_t pg, svint32_t op) MODE_ATTR 259 { 260 return SVE_ACLE_FUNC(svextb,_s32,_x,)(pg, op); 261 } 262 263 // CHECK-LABEL: @test_svextb_s64_x( 264 // CHECK-NEXT: entry: 265 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 266 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 267 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 268 // 269 // CPP-CHECK-LABEL: @_Z17test_svextb_s64_xu10__SVBool_tu11__SVInt64_t( 270 // CPP-CHECK-NEXT: entry: 271 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 272 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 273 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 274 // 275 svint64_t test_svextb_s64_x(svbool_t pg, svint64_t op) MODE_ATTR 276 { 277 return SVE_ACLE_FUNC(svextb,_s64,_x,)(pg, op); 278 } 279 280 // CHECK-LABEL: @test_svextb_u16_x( 281 // CHECK-NEXT: entry: 282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 283 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 284 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 285 // 286 // CPP-CHECK-LABEL: @_Z17test_svextb_u16_xu10__SVBool_tu12__SVUint16_t( 287 // CPP-CHECK-NEXT: entry: 288 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) 289 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP:%.*]]) 290 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] 291 // 292 svuint16_t test_svextb_u16_x(svbool_t pg, svuint16_t op) MODE_ATTR 293 { 294 return SVE_ACLE_FUNC(svextb,_u16,_x,)(pg, op); 295 } 296 297 // CHECK-LABEL: @test_svextb_u32_x( 298 // CHECK-NEXT: entry: 299 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 300 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 301 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 302 // 303 // CPP-CHECK-LABEL: @_Z17test_svextb_u32_xu10__SVBool_tu12__SVUint32_t( 304 // CPP-CHECK-NEXT: entry: 305 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) 306 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP:%.*]]) 307 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] 308 // 309 svuint32_t test_svextb_u32_x(svbool_t pg, svuint32_t op) MODE_ATTR 310 { 311 return SVE_ACLE_FUNC(svextb,_u32,_x,)(pg, op); 312 } 313 314 // CHECK-LABEL: @test_svextb_u64_x( 315 // CHECK-NEXT: entry: 316 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 317 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 318 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 319 // 320 // CPP-CHECK-LABEL: @_Z17test_svextb_u64_xu10__SVBool_tu12__SVUint64_t( 321 // CPP-CHECK-NEXT: entry: 322 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) 323 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP:%.*]]) 324 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] 325 // 326 svuint64_t test_svextb_u64_x(svbool_t pg, svuint64_t op) MODE_ATTR 327 { 328 return SVE_ACLE_FUNC(svextb,_u64,_x,)(pg, op); 329 } 330