xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-fixed-point-scalar-cvt-dagcombine.ll (revision 6ef5e242f2f7a307c4f4ab9c9310410d1acba6a7)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
3
4; DAGCombine to transform a conversion of an extract_vector_elt to an
5; extract_vector_elt of a conversion, which saves a round trip of copies
6; of the value to a GPR and back to and FPR.
7; rdar://11855286
8define double @foo0(<2 x i64> %a) nounwind {
9; CHECK-LABEL: foo0:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    scvtf.2d v0, v0, #9
12; CHECK-NEXT:    mov d0, v0[1]
13; CHECK-NEXT:    ret
14  %vecext = extractelement <2 x i64> %a, i32 1
15  %fcvt_n = tail call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %vecext, i32 9)
16  ret double %fcvt_n
17}
18
19define double @bar(ptr %iVals, ptr %fVals, ptr %dVals) {
20; CHECK-LABEL: bar:
21; CHECK:       // %bb.0: // %entry
22; CHECK-NEXT:    ldr d0, [x2, #128]
23; CHECK-NEXT:    frinti d0, d0
24; CHECK-NEXT:    fcvtzs x8, d0
25; CHECK-NEXT:    fmov d0, x8
26; CHECK-NEXT:    sri d0, d0, #1
27; CHECK-NEXT:    scvtf.2d v0, v0, #1
28; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
29; CHECK-NEXT:    ret
30entry:
31  %arrayidx = getelementptr inbounds double, ptr %dVals, i64 16
32  %0 = load <1 x double>, ptr %arrayidx, align 8
33  %vrndi_v1.i = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %0)
34  %vget_lane = extractelement <1 x double> %vrndi_v1.i, i64 0
35  %vcvtd_s64_f64.i = call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %vget_lane)
36  %1 = insertelement <1 x i64> poison, i64 %vcvtd_s64_f64.i, i64 0
37  %vsrid_n_s647 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %1, <1 x i64> %1, i32 1)
38  %2 = extractelement <1 x i64> %vsrid_n_s647, i64 0
39  %vcvtd_n_f64_s64 = call double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64 %2, i32 1)
40  ret double %vcvtd_n_f64_s64
41}
42
43define float @do_stuff(<8 x i16> noundef %var_135) {
44; CHECK-LABEL: do_stuff:
45; CHECK:       // %bb.0: // %entry
46; CHECK-NEXT:    umaxv.8h h0, v0
47; CHECK-NEXT:    ucvtf s0, s0, #1
48; CHECK-NEXT:    ret
49entry:
50  %vmaxv.i = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %var_135) #2
51  %vcvts_n_f32_u32 = call float @llvm.aarch64.neon.vcvtfxu2fp.f32.i32(i32 %vmaxv.i, i32 1)
52  ret float %vcvts_n_f32_u32
53}
54
55declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
56declare double @llvm.aarch64.neon.vcvtfxs2fp.f64.i64(i64, i32)
57declare <1 x double> @llvm.nearbyint.v1f64(<1 x double>)
58declare i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double)
59declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) #1
60declare float @llvm.aarch64.neon.vcvtfxu2fp.f32.i32(i32, i32) #1
61