1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=aarch64 -mattr=+v8.2a,+fullfp16 | FileCheck %s 3 4declare half @llvm.aarch64.sisd.fabd.f16(half, half) 5declare half @llvm.aarch64.neon.fmax.f16(half, half) 6declare half @llvm.aarch64.neon.fmin.f16(half, half) 7declare half @llvm.aarch64.neon.frsqrts.f16(half, half) 8declare half @llvm.aarch64.neon.frecps.f16(half, half) 9declare half @llvm.aarch64.neon.fmulx.f16(half, half) 10declare half @llvm.fabs.f16(half) 11declare i32 @llvm.aarch64.neon.facge.i32.f16(half, half) 12declare i32 @llvm.aarch64.neon.facgt.i32.f16(half, half) 13 14define dso_local half @t_vabdh_f16(half %a, half %b) { 15; CHECK-LABEL: t_vabdh_f16: 16; CHECK: // %bb.0: // %entry 17; CHECK-NEXT: fabd h0, h0, h1 18; CHECK-NEXT: ret 19entry: 20 %vabdh_f16 = tail call half @llvm.aarch64.sisd.fabd.f16(half %a, half %b) 21 ret half %vabdh_f16 22} 23 24define dso_local half @t_vabdh_f16_from_fsub_fabs(half %a, half %b) { 25; CHECK-LABEL: t_vabdh_f16_from_fsub_fabs: 26; CHECK: // %bb.0: // %entry 27; CHECK-NEXT: fabd h0, h0, h1 28; CHECK-NEXT: ret 29entry: 30 %sub = fsub half %a, %b 31 %abs = tail call half @llvm.fabs.f16(half %sub) 32 ret half %abs 33} 34 35define dso_local i16 @t_vceqh_f16(half %a, half %b) { 36; CHECK-LABEL: t_vceqh_f16: 37; CHECK: // %bb.0: // %entry 38; CHECK-NEXT: fcmp h0, h1 39; CHECK-NEXT: csetm w0, eq 40; CHECK-NEXT: ret 41entry: 42 %0 = fcmp oeq half %a, %b 43 %vcmpd = sext i1 %0 to i16 44 ret i16 %vcmpd 45} 46 47define dso_local i16 @t_vcgeh_f16(half %a, half %b) { 48; CHECK-LABEL: t_vcgeh_f16: 49; CHECK: // %bb.0: // %entry 50; CHECK-NEXT: fcmp h0, h1 51; CHECK-NEXT: csetm w0, ge 52; CHECK-NEXT: ret 53entry: 54 %0 = fcmp oge half %a, %b 55 %vcmpd = sext i1 %0 to i16 56 ret i16 %vcmpd 57} 58 59define dso_local i16 @t_vcgth_f16(half %a, half %b) { 60; CHECK-LABEL: t_vcgth_f16: 61; CHECK: // %bb.0: // %entry 62; CHECK-NEXT: fcmp h0, h1 63; CHECK-NEXT: csetm w0, gt 64; CHECK-NEXT: ret 65entry: 66 %0 = fcmp ogt half %a, %b 67 %vcmpd = sext i1 %0 to i16 68 ret i16 %vcmpd 69} 70 71define dso_local i16 @t_vcleh_f16(half %a, half %b) { 72; CHECK-LABEL: t_vcleh_f16: 73; CHECK: // %bb.0: // %entry 74; CHECK-NEXT: fcmp h0, h1 75; CHECK-NEXT: csetm w0, ls 76; CHECK-NEXT: ret 77entry: 78 %0 = fcmp ole half %a, %b 79 %vcmpd = sext i1 %0 to i16 80 ret i16 %vcmpd 81} 82 83define dso_local i16 @t_vclth_f16(half %a, half %b) { 84; CHECK-LABEL: t_vclth_f16: 85; CHECK: // %bb.0: // %entry 86; CHECK-NEXT: fcmp h0, h1 87; CHECK-NEXT: csetm w0, mi 88; CHECK-NEXT: ret 89entry: 90 %0 = fcmp olt half %a, %b 91 %vcmpd = sext i1 %0 to i16 92 ret i16 %vcmpd 93} 94 95define dso_local half @t_vmaxh_f16(half %a, half %b) { 96; CHECK-LABEL: t_vmaxh_f16: 97; CHECK: // %bb.0: // %entry 98; CHECK-NEXT: fmax h0, h0, h1 99; CHECK-NEXT: ret 100entry: 101 %vmax = tail call half @llvm.aarch64.neon.fmax.f16(half %a, half %b) 102 ret half %vmax 103} 104 105define dso_local half @t_vminh_f16(half %a, half %b) { 106; CHECK-LABEL: t_vminh_f16: 107; CHECK: // %bb.0: // %entry 108; CHECK-NEXT: fmin h0, h0, h1 109; CHECK-NEXT: ret 110entry: 111 %vmin = tail call half @llvm.aarch64.neon.fmin.f16(half %a, half %b) 112 ret half %vmin 113} 114 115define dso_local half @t_vmulxh_f16(half %a, half %b) { 116; CHECK-LABEL: t_vmulxh_f16: 117; CHECK: // %bb.0: // %entry 118; CHECK-NEXT: fmulx h0, h0, h1 119; CHECK-NEXT: ret 120entry: 121 %vmulxh_f16 = tail call half @llvm.aarch64.neon.fmulx.f16(half %a, half %b) 122 ret half %vmulxh_f16 123} 124 125define dso_local half @t_vrecpsh_f16(half %a, half %b) { 126; CHECK-LABEL: t_vrecpsh_f16: 127; CHECK: // %bb.0: // %entry 128; CHECK-NEXT: frecps h0, h0, h1 129; CHECK-NEXT: ret 130entry: 131 %vrecps = tail call half @llvm.aarch64.neon.frecps.f16(half %a, half %b) 132 ret half %vrecps 133} 134 135define dso_local half @t_vrsqrtsh_f16(half %a, half %b) { 136; CHECK-LABEL: t_vrsqrtsh_f16: 137; CHECK: // %bb.0: // %entry 138; CHECK-NEXT: frsqrts h0, h0, h1 139; CHECK-NEXT: ret 140entry: 141 %vrsqrtsh_f16 = tail call half @llvm.aarch64.neon.frsqrts.f16(half %a, half %b) 142 ret half %vrsqrtsh_f16 143} 144 145declare half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32, i32) #1 146declare half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64, i32) #1 147declare i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half, i32) #1 148declare i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half, i32) #1 149declare half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32, i32) #1 150declare i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half, i32) #1 151 152define dso_local half @test_vcvth_n_f16_s16_1(i16 %a) { 153; CHECK-LABEL: test_vcvth_n_f16_s16_1: 154; CHECK: // %bb.0: // %entry 155; CHECK-NEXT: fmov s0, w0 156; CHECK-NEXT: scvtf h0, h0, #1 157; CHECK-NEXT: ret 158entry: 159 %sext = sext i16 %a to i32 160 %fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %sext, i32 1) 161 ret half %fcvth_n 162} 163 164define dso_local half @test_vcvth_n_f16_s16_16(i16 %a) { 165; CHECK-LABEL: test_vcvth_n_f16_s16_16: 166; CHECK: // %bb.0: // %entry 167; CHECK-NEXT: fmov s0, w0 168; CHECK-NEXT: scvtf h0, h0, #16 169; CHECK-NEXT: ret 170entry: 171 %sext = sext i16 %a to i32 172 %fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %sext, i32 16) 173 ret half %fcvth_n 174} 175 176define dso_local half @test_vcvth_n_f16_s32_1(i32 %a) { 177; CHECK-LABEL: test_vcvth_n_f16_s32_1: 178; CHECK: // %bb.0: // %entry 179; CHECK-NEXT: fmov s0, w0 180; CHECK-NEXT: scvtf h0, h0, #1 181; CHECK-NEXT: ret 182entry: 183 %vcvth_n_f16_s32 = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %a, i32 1) 184 ret half %vcvth_n_f16_s32 185} 186 187define dso_local half @test_vcvth_n_f16_s32_16(i32 %a) { 188; CHECK-LABEL: test_vcvth_n_f16_s32_16: 189; CHECK: // %bb.0: // %entry 190; CHECK-NEXT: fmov s0, w0 191; CHECK-NEXT: scvtf h0, h0, #16 192; CHECK-NEXT: ret 193entry: 194 %vcvth_n_f16_s32 = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %a, i32 16) 195 ret half %vcvth_n_f16_s32 196} 197 198define dso_local i16 @test_vcvth_n_s16_f16_1(half %a) { 199; CHECK-LABEL: test_vcvth_n_s16_f16_1: 200; CHECK: // %bb.0: // %entry 201; CHECK-NEXT: fcvtzs h0, h0, #1 202; CHECK-NEXT: fmov w0, s0 203; CHECK-NEXT: ret 204entry: 205 %fcvth_n = tail call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1) 206 %0 = trunc i32 %fcvth_n to i16 207 ret i16 %0 208} 209 210define dso_local i16 @test_vcvth_n_s16_f16_16(half %a) { 211; CHECK-LABEL: test_vcvth_n_s16_f16_16: 212; CHECK: // %bb.0: // %entry 213; CHECK-NEXT: fcvtzs h0, h0, #16 214; CHECK-NEXT: fmov w0, s0 215; CHECK-NEXT: ret 216entry: 217 %fcvth_n = tail call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 16) 218 %0 = trunc i32 %fcvth_n to i16 219 ret i16 %0 220} 221 222define dso_local i32 @test_vcvth_n_s32_f16_1(half %a) { 223; CHECK-LABEL: test_vcvth_n_s32_f16_1: 224; CHECK: // %bb.0: // %entry 225; CHECK-NEXT: fcvtzs h0, h0, #1 226; CHECK-NEXT: fmov w0, s0 227; CHECK-NEXT: ret 228entry: 229 %vcvth_n_s32_f16 = tail call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1) 230 ret i32 %vcvth_n_s32_f16 231} 232 233define dso_local i32 @test_vcvth_n_s32_f16_16(half %a) { 234; CHECK-LABEL: test_vcvth_n_s32_f16_16: 235; CHECK: // %bb.0: // %entry 236; CHECK-NEXT: fcvtzs h0, h0, #16 237; CHECK-NEXT: fmov w0, s0 238; CHECK-NEXT: ret 239entry: 240 %vcvth_n_s32_f16 = tail call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 16) 241 ret i32 %vcvth_n_s32_f16 242} 243 244define dso_local i64 @test_vcvth_n_s64_f16_1(half %a) { 245; CHECK-LABEL: test_vcvth_n_s64_f16_1: 246; CHECK: // %bb.0: // %entry 247; CHECK-NEXT: fcvtzs h0, h0, #1 248; CHECK-NEXT: fmov x0, d0 249; CHECK-NEXT: ret 250entry: 251 %vcvth_n_s64_f16 = tail call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half %a, i32 1) 252 ret i64 %vcvth_n_s64_f16 253} 254 255define dso_local i64 @test_vcvth_n_s64_f16_32(half %a) { 256; CHECK-LABEL: test_vcvth_n_s64_f16_32: 257; CHECK: // %bb.0: // %entry 258; CHECK-NEXT: fcvtzs h0, h0, #32 259; CHECK-NEXT: fmov x0, d0 260; CHECK-NEXT: ret 261entry: 262 %vcvth_n_s64_f16 = tail call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half %a, i32 32) 263 ret i64 %vcvth_n_s64_f16 264} 265 266define dso_local half @test_vcvth_n_f16_u16_1(i16 %a) { 267; CHECK-LABEL: test_vcvth_n_f16_u16_1: 268; CHECK: // %bb.0: // %entry 269; CHECK-NEXT: fmov s0, w0 270; CHECK-NEXT: ucvtf h0, h0, #1 271; CHECK-NEXT: ret 272entry: 273 %0 = zext i16 %a to i32 274 %fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %0, i32 1) 275 ret half %fcvth_n 276} 277 278define dso_local half @test_vcvth_n_f16_u16_16(i16 %a) { 279; CHECK-LABEL: test_vcvth_n_f16_u16_16: 280; CHECK: // %bb.0: // %entry 281; CHECK-NEXT: fmov s0, w0 282; CHECK-NEXT: ucvtf h0, h0, #16 283; CHECK-NEXT: ret 284entry: 285 %0 = zext i16 %a to i32 286 %fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %0, i32 16) 287 ret half %fcvth_n 288} 289 290define dso_local half @test_vcvth_n_f16_u32_1(i32 %a) { 291; CHECK-LABEL: test_vcvth_n_f16_u32_1: 292; CHECK: // %bb.0: // %entry 293; CHECK-NEXT: fmov s0, w0 294; CHECK-NEXT: ucvtf h0, h0, #1 295; CHECK-NEXT: ret 296entry: 297 %vcvth_n_f16_u32 = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %a, i32 1) 298 ret half %vcvth_n_f16_u32 299} 300 301define dso_local half @test_vcvth_n_f16_u32_16(i32 %a) { 302; CHECK-LABEL: test_vcvth_n_f16_u32_16: 303; CHECK: // %bb.0: // %entry 304; CHECK-NEXT: fmov s0, w0 305; CHECK-NEXT: ucvtf h0, h0, #16 306; CHECK-NEXT: ret 307entry: 308 %vcvth_n_f16_u32 = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %a, i32 16) 309 ret half %vcvth_n_f16_u32 310} 311 312define dso_local i16 @test_vcvth_n_u16_f16_1(half %a) { 313; CHECK-LABEL: test_vcvth_n_u16_f16_1: 314; CHECK: // %bb.0: // %entry 315; CHECK-NEXT: fcvtzu h0, h0, #1 316; CHECK-NEXT: fmov w0, s0 317; CHECK-NEXT: ret 318entry: 319 %fcvth_n = tail call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1) 320 %0 = trunc i32 %fcvth_n to i16 321 ret i16 %0 322} 323 324define dso_local i16 @test_vcvth_n_u16_f16_16(half %a) { 325; CHECK-LABEL: test_vcvth_n_u16_f16_16: 326; CHECK: // %bb.0: // %entry 327; CHECK-NEXT: fcvtzu h0, h0, #16 328; CHECK-NEXT: fmov w0, s0 329; CHECK-NEXT: ret 330entry: 331 %fcvth_n = tail call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 16) 332 %0 = trunc i32 %fcvth_n to i16 333 ret i16 %0 334} 335 336define dso_local i32 @test_vcvth_n_u32_f16_1(half %a) { 337; CHECK-LABEL: test_vcvth_n_u32_f16_1: 338; CHECK: // %bb.0: // %entry 339; CHECK-NEXT: fcvtzu h0, h0, #1 340; CHECK-NEXT: fmov w0, s0 341; CHECK-NEXT: ret 342entry: 343 %vcvth_n_u32_f16 = tail call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1) 344 ret i32 %vcvth_n_u32_f16 345} 346 347define dso_local i32 @test_vcvth_n_u32_f16_16(half %a) { 348; CHECK-LABEL: test_vcvth_n_u32_f16_16: 349; CHECK: // %bb.0: // %entry 350; CHECK-NEXT: fcvtzu h0, h0, #16 351; CHECK-NEXT: fmov w0, s0 352; CHECK-NEXT: ret 353entry: 354 %vcvth_n_u32_f16 = tail call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 16) 355 ret i32 %vcvth_n_u32_f16 356} 357 358define dso_local i16 @vcageh_f16_test(half %a, half %b) { 359; CHECK-LABEL: vcageh_f16_test: 360; CHECK: // %bb.0: // %entry 361; CHECK-NEXT: facge h0, h0, h1 362; CHECK-NEXT: fmov w0, s0 363; CHECK-NEXT: ret 364entry: 365 %facg = tail call i32 @llvm.aarch64.neon.facge.i32.f16(half %a, half %b) 366 %0 = trunc i32 %facg to i16 367 ret i16 %0 368} 369 370define dso_local i16 @vcagth_f16_test(half %a, half %b) { 371; CHECK-LABEL: vcagth_f16_test: 372; CHECK: // %bb.0: // %entry 373; CHECK-NEXT: facgt h0, h0, h1 374; CHECK-NEXT: fmov w0, s0 375; CHECK-NEXT: ret 376entry: 377 %facg = tail call i32 @llvm.aarch64.neon.facgt.i32.f16(half %a, half %b) 378 %0 = trunc i32 %facg to i16 379 ret i16 %0 380} 381 382define dso_local half @vcvth_n_f16_s64_test(i64 %a) { 383; CHECK-LABEL: vcvth_n_f16_s64_test: 384; CHECK: // %bb.0: // %entry 385; CHECK-NEXT: fmov d0, x0 386; CHECK-NEXT: scvtf h0, h0, #16 387; CHECK-NEXT: ret 388entry: 389 %vcvth_n_f16_s64 = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 16) 390 ret half %vcvth_n_f16_s64 391} 392