1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 3; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s 4 5; 6; FCVT 7; 8 9define <vscale x 8 x half> @fcvt_f16_f32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) { 10; CHECK-LABEL: fcvt_f16_f32: 11; CHECK: // %bb.0: 12; CHECK-NEXT: fcvt z0.h, p0/m, z1.s 13; CHECK-NEXT: ret 14 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> %a, 15 <vscale x 4 x i1> %pg, 16 <vscale x 4 x float> %b) 17 ret <vscale x 8 x half> %out 18} 19 20define <vscale x 8 x half> @fcvt_f16_f64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 21; CHECK-LABEL: fcvt_f16_f64: 22; CHECK: // %bb.0: 23; CHECK-NEXT: fcvt z0.h, p0/m, z1.d 24; CHECK-NEXT: ret 25 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> %a, 26 <vscale x 2 x i1> %pg, 27 <vscale x 2 x double> %b) 28 ret <vscale x 8 x half> %out 29} 30 31define <vscale x 4 x float> @fcvt_f32_f16(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) { 32; CHECK-LABEL: fcvt_f32_f16: 33; CHECK: // %bb.0: 34; CHECK-NEXT: fcvt z0.s, p0/m, z1.h 35; CHECK-NEXT: ret 36 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> %a, 37 <vscale x 4 x i1> %pg, 38 <vscale x 8 x half> %b) 39 ret <vscale x 4 x float> %out 40} 41 42define <vscale x 4 x float> @fcvt_f32_f64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 43; CHECK-LABEL: fcvt_f32_f64: 44; CHECK: // %bb.0: 45; CHECK-NEXT: fcvt z0.s, p0/m, z1.d 46; CHECK-NEXT: ret 47 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> %a, 48 <vscale x 2 x i1> %pg, 49 <vscale x 2 x double> %b) 50 ret <vscale x 4 x float> %out 51} 52 53define <vscale x 2 x double> @fcvt_f64_f16(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) { 54; CHECK-LABEL: fcvt_f64_f16: 55; CHECK: // %bb.0: 56; CHECK-NEXT: fcvt z0.d, p0/m, z1.h 57; CHECK-NEXT: ret 58 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> %a, 59 <vscale x 2 x i1> %pg, 60 <vscale x 8 x half> %b) 61 ret <vscale x 2 x double> %out 62} 63 64define <vscale x 2 x double> @fcvt_f64_f32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) { 65; CHECK-LABEL: fcvt_f64_f32: 66; CHECK: // %bb.0: 67; CHECK-NEXT: fcvt z0.d, p0/m, z1.s 68; CHECK-NEXT: ret 69 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> %a, 70 <vscale x 2 x i1> %pg, 71 <vscale x 4 x float> %b) 72 ret <vscale x 2 x double> %out 73} 74 75; 76; FCVTZS 77; 78 79define <vscale x 8 x i16> @fcvtzs_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) { 80; CHECK-LABEL: fcvtzs_i16_f16: 81; CHECK: // %bb.0: 82; CHECK-NEXT: fcvtzs z0.h, p0/m, z1.h 83; CHECK-NEXT: ret 84 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> %a, 85 <vscale x 8 x i1> %pg, 86 <vscale x 8 x half> %b) 87 ret <vscale x 8 x i16> %out 88} 89 90define <vscale x 4 x i32> @fcvtzs_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) { 91; CHECK-LABEL: fcvtzs_i32_f32: 92; CHECK: // %bb.0: 93; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.s 94; CHECK-NEXT: ret 95 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> %a, 96 <vscale x 4 x i1> %pg, 97 <vscale x 4 x float> %b) 98 ret <vscale x 4 x i32> %out 99} 100 101define <vscale x 2 x i64> @fcvtzs_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 102; CHECK-LABEL: fcvtzs_i64_f64: 103; CHECK: // %bb.0: 104; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d 105; CHECK-NEXT: ret 106 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> %a, 107 <vscale x 2 x i1> %pg, 108 <vscale x 2 x double> %b) 109 ret <vscale x 2 x i64> %out 110} 111 112define <vscale x 4 x i32> @fcvtzs_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) { 113; CHECK-LABEL: fcvtzs_i32_f16: 114; CHECK: // %bb.0: 115; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.h 116; CHECK-NEXT: ret 117 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> %a, 118 <vscale x 4 x i1> %pg, 119 <vscale x 8 x half> %b) 120 ret <vscale x 4 x i32> %out 121} 122 123define <vscale x 4 x i32> @fcvtzs_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 124; CHECK-LABEL: fcvtzs_i32_f64: 125; CHECK: // %bb.0: 126; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.d 127; CHECK-NEXT: ret 128 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> %a, 129 <vscale x 2 x i1> %pg, 130 <vscale x 2 x double> %b) 131 ret <vscale x 4 x i32> %out 132} 133 134define <vscale x 2 x i64> @fcvtzs_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) { 135; CHECK-LABEL: fcvtzs_i64_f16: 136; CHECK: // %bb.0: 137; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.h 138; CHECK-NEXT: ret 139 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> %a, 140 <vscale x 2 x i1> %pg, 141 <vscale x 8 x half> %b) 142 ret <vscale x 2 x i64> %out 143} 144 145define <vscale x 2 x i64> @fcvtzs_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) { 146; CHECK-LABEL: fcvtzs_i64_f32: 147; CHECK: // %bb.0: 148; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s 149; CHECK-NEXT: ret 150 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> %a, 151 <vscale x 2 x i1> %pg, 152 <vscale x 4 x float> %b) 153 ret <vscale x 2 x i64> %out 154} 155 156; 157; FCVTZU 158; 159 160define <vscale x 8 x i16> @fcvtzu_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) { 161; CHECK-LABEL: fcvtzu_i16_f16: 162; CHECK: // %bb.0: 163; CHECK-NEXT: fcvtzu z0.h, p0/m, z1.h 164; CHECK-NEXT: ret 165 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> %a, 166 <vscale x 8 x i1> %pg, 167 <vscale x 8 x half> %b) 168 ret <vscale x 8 x i16> %out 169} 170 171define <vscale x 4 x i32> @fcvtzu_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) { 172; CHECK-LABEL: fcvtzu_i32_f32: 173; CHECK: // %bb.0: 174; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.s 175; CHECK-NEXT: ret 176 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> %a, 177 <vscale x 4 x i1> %pg, 178 <vscale x 4 x float> %b) 179 ret <vscale x 4 x i32> %out 180} 181 182define <vscale x 2 x i64> @fcvtzu_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 183; CHECK-LABEL: fcvtzu_i64_f64: 184; CHECK: // %bb.0: 185; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.d 186; CHECK-NEXT: ret 187 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> %a, 188 <vscale x 2 x i1> %pg, 189 <vscale x 2 x double> %b) 190 ret <vscale x 2 x i64> %out 191} 192 193define <vscale x 4 x i32> @fcvtzu_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) { 194; CHECK-LABEL: fcvtzu_i32_f16: 195; CHECK: // %bb.0: 196; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.h 197; CHECK-NEXT: ret 198 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> %a, 199 <vscale x 4 x i1> %pg, 200 <vscale x 8 x half> %b) 201 ret <vscale x 4 x i32> %out 202} 203 204define <vscale x 4 x i32> @fcvtzu_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) { 205; CHECK-LABEL: fcvtzu_i32_f64: 206; CHECK: // %bb.0: 207; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.d 208; CHECK-NEXT: ret 209 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> %a, 210 <vscale x 2 x i1> %pg, 211 <vscale x 2 x double> %b) 212 ret <vscale x 4 x i32> %out 213} 214 215define <vscale x 2 x i64> @fcvtzu_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) { 216; CHECK-LABEL: fcvtzu_i64_f16: 217; CHECK: // %bb.0: 218; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.h 219; CHECK-NEXT: ret 220 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> %a, 221 <vscale x 2 x i1> %pg, 222 <vscale x 8 x half> %b) 223 ret <vscale x 2 x i64> %out 224} 225 226define <vscale x 2 x i64> @fcvtzu_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) { 227; CHECK-LABEL: fcvtzu_i64_f32: 228; CHECK: // %bb.0: 229; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.s 230; CHECK-NEXT: ret 231 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> %a, 232 <vscale x 2 x i1> %pg, 233 <vscale x 4 x float> %b) 234 ret <vscale x 2 x i64> %out 235} 236 237; 238; SCVTF 239; 240 241define <vscale x 8 x half> @scvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { 242; CHECK-LABEL: scvtf_f16_i16: 243; CHECK: // %bb.0: 244; CHECK-NEXT: scvtf z0.h, p0/m, z1.h 245; CHECK-NEXT: ret 246 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a, 247 <vscale x 8 x i1> %pg, 248 <vscale x 8 x i16> %b) 249 ret <vscale x 8 x half> %out 250} 251 252define <vscale x 4 x float> @scvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 253; CHECK-LABEL: scvtf_f32_i32: 254; CHECK: // %bb.0: 255; CHECK-NEXT: scvtf z0.s, p0/m, z1.s 256; CHECK-NEXT: ret 257 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a, 258 <vscale x 4 x i1> %pg, 259 <vscale x 4 x i32> %b) 260 ret <vscale x 4 x float> %out 261} 262 263define <vscale x 2 x double> @scvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 264; CHECK-LABEL: scvtf_f64_i64: 265; CHECK: // %bb.0: 266; CHECK-NEXT: scvtf z0.d, p0/m, z1.d 267; CHECK-NEXT: ret 268 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a, 269 <vscale x 2 x i1> %pg, 270 <vscale x 2 x i64> %b) 271 ret <vscale x 2 x double> %out 272} 273 274define <vscale x 8 x half> @scvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 275; CHECK-LABEL: scvtf_f16_i32: 276; CHECK: // %bb.0: 277; CHECK-NEXT: scvtf z0.h, p0/m, z1.s 278; CHECK-NEXT: ret 279 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> %a, 280 <vscale x 4 x i1> %pg, 281 <vscale x 4 x i32> %b) 282 ret <vscale x 8 x half> %out 283} 284 285define <vscale x 8 x half> @scvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 286; CHECK-LABEL: scvtf_f16_i64: 287; CHECK: // %bb.0: 288; CHECK-NEXT: scvtf z0.h, p0/m, z1.d 289; CHECK-NEXT: ret 290 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> %a, 291 <vscale x 2 x i1> %pg, 292 <vscale x 2 x i64> %b) 293 ret <vscale x 8 x half> %out 294} 295 296define <vscale x 4 x float> @scvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 297; CHECK-LABEL: scvtf_f32_i64: 298; CHECK: // %bb.0: 299; CHECK-NEXT: scvtf z0.s, p0/m, z1.d 300; CHECK-NEXT: ret 301 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> %a, 302 <vscale x 2 x i1> %pg, 303 <vscale x 2 x i64> %b) 304 ret <vscale x 4 x float> %out 305} 306 307define <vscale x 2 x double> @scvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) { 308; CHECK-LABEL: scvtf_f64_i32: 309; CHECK: // %bb.0: 310; CHECK-NEXT: scvtf z0.d, p0/m, z1.s 311; CHECK-NEXT: ret 312 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> %a, 313 <vscale x 2 x i1> %pg, 314 <vscale x 4 x i32> %b) 315 ret <vscale x 2 x double> %out 316} 317 318; 319; UCVTF 320; 321 322define <vscale x 8 x half> @ucvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { 323; CHECK-LABEL: ucvtf_f16_i16: 324; CHECK: // %bb.0: 325; CHECK-NEXT: ucvtf z0.h, p0/m, z1.h 326; CHECK-NEXT: ret 327 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a, 328 <vscale x 8 x i1> %pg, 329 <vscale x 8 x i16> %b) 330 ret <vscale x 8 x half> %out 331} 332 333define <vscale x 4 x float> @ucvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 334; CHECK-LABEL: ucvtf_f32_i32: 335; CHECK: // %bb.0: 336; CHECK-NEXT: ucvtf z0.s, p0/m, z1.s 337; CHECK-NEXT: ret 338 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a, 339 <vscale x 4 x i1> %pg, 340 <vscale x 4 x i32> %b) 341 ret <vscale x 4 x float> %out 342} 343 344define <vscale x 2 x double> @ucvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 345; CHECK-LABEL: ucvtf_f64_i64: 346; CHECK: // %bb.0: 347; CHECK-NEXT: ucvtf z0.d, p0/m, z1.d 348; CHECK-NEXT: ret 349 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a, 350 <vscale x 2 x i1> %pg, 351 <vscale x 2 x i64> %b) 352 ret <vscale x 2 x double> %out 353} 354 355define <vscale x 8 x half> @ucvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 356; CHECK-LABEL: ucvtf_f16_i32: 357; CHECK: // %bb.0: 358; CHECK-NEXT: ucvtf z0.h, p0/m, z1.s 359; CHECK-NEXT: ret 360 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> %a, 361 <vscale x 4 x i1> %pg, 362 <vscale x 4 x i32> %b) 363 ret <vscale x 8 x half> %out 364} 365 366define <vscale x 8 x half> @ucvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 367; CHECK-LABEL: ucvtf_f16_i64: 368; CHECK: // %bb.0: 369; CHECK-NEXT: ucvtf z0.h, p0/m, z1.d 370; CHECK-NEXT: ret 371 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> %a, 372 <vscale x 2 x i1> %pg, 373 <vscale x 2 x i64> %b) 374 ret <vscale x 8 x half> %out 375} 376 377define <vscale x 4 x float> @ucvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 378; CHECK-LABEL: ucvtf_f32_i64: 379; CHECK: // %bb.0: 380; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d 381; CHECK-NEXT: ret 382 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> %a, 383 <vscale x 2 x i1> %pg, 384 <vscale x 2 x i64> %b) 385 ret <vscale x 4 x float> %out 386} 387 388define <vscale x 2 x double> @ucvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) { 389; CHECK-LABEL: ucvtf_f64_i32: 390; CHECK: // %bb.0: 391; CHECK-NEXT: ucvtf z0.d, p0/m, z1.s 392; CHECK-NEXT: ret 393 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> %a, 394 <vscale x 2 x i1> %pg, 395 <vscale x 4 x i32> %b) 396 ret <vscale x 2 x double> %out 397} 398 399declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x float>) 400declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x double>) 401declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 8 x half>) 402declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x double>) 403declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 8 x half>) 404declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x float>) 405 406declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>) 407declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>) 408declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>) 409declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>) 410declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>) 411declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>) 412declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>) 413 414declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>) 415declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>) 416declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>) 417declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>) 418declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>) 419declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>) 420declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>) 421 422declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>) 423declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>) 424declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>) 425declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>) 426declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>) 427declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>) 428declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>) 429 430declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>) 431declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>) 432declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>) 433declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>) 434declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>) 435declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>) 436declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>) 437