xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/sin-sqrt.ll (revision 1833d418a04123916c1dbeb0c41c8bc7d06b779b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=slp-vectorizer -S | FileCheck %s
3; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=inject-tli-mappings,slp-vectorizer -vector-library=SVML -S | FileCheck %s --check-prefix=VECLIB
4; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=inject-tli-mappings,slp-vectorizer -vector-library=AMDLIBM -S | FileCheck %s --check-prefix=AMDLIBM
5
6@src = common global [8 x double] zeroinitializer, align 64
7@dst = common global [8 x double] zeroinitializer, align 64
8
9declare double @llvm.sqrt.f64(double)
10declare double @llvm.sin.f64(double)
11
12define void @test() {
13; CHECK-LABEL: @test(
14; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src, align 8
15; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 2, i32 6>
16; CHECK-NEXT:    [[TMP3:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP2]])
17; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 3, i32 7>
18; CHECK-NEXT:    [[TMP5:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP4]])
19; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 0, i32 4>
20; CHECK-NEXT:    [[TMP7:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP6]])
21; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 1, i32 5>
22; CHECK-NEXT:    [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
23; CHECK-NEXT:    [[TMP10:%.*]] = fadd fast <2 x double> [[TMP7]], [[TMP5]]
24; CHECK-NEXT:    [[TMP11:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP9]]
25; CHECK-NEXT:    [[TMP12:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP11]]
26; CHECK-NEXT:    store <2 x double> [[TMP12]], ptr @dst, align 8
27; CHECK-NEXT:    ret void
28;
29; VECLIB-LABEL: @test(
30; VECLIB-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src, align 8
31; VECLIB-NEXT:    [[TMP2:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 2, i32 6>
32; VECLIB-NEXT:    [[TMP3:%.*]] = call fast <2 x double> @__svml_sin2(<2 x double> [[TMP2]])
33; VECLIB-NEXT:    [[TMP4:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 3, i32 7>
34; VECLIB-NEXT:    [[TMP5:%.*]] = call fast <2 x double> @__svml_sin2(<2 x double> [[TMP4]])
35; VECLIB-NEXT:    [[TMP6:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 0, i32 4>
36; VECLIB-NEXT:    [[TMP7:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP6]])
37; VECLIB-NEXT:    [[TMP8:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 1, i32 5>
38; VECLIB-NEXT:    [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
39; VECLIB-NEXT:    [[TMP10:%.*]] = fadd fast <2 x double> [[TMP7]], [[TMP5]]
40; VECLIB-NEXT:    [[TMP11:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP9]]
41; VECLIB-NEXT:    [[TMP12:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP11]]
42; VECLIB-NEXT:    store <2 x double> [[TMP12]], ptr @dst, align 8
43; VECLIB-NEXT:    ret void
44;
45; AMDLIBM-LABEL: @test(
46; AMDLIBM-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src, align 8
47; AMDLIBM-NEXT:    [[TMP2:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 2, i32 6>
48; AMDLIBM-NEXT:    [[TMP3:%.*]] = call fast <2 x double> @amd_vrd2_sin(<2 x double> [[TMP2]])
49; AMDLIBM-NEXT:    [[TMP4:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 3, i32 7>
50; AMDLIBM-NEXT:    [[TMP5:%.*]] = call fast <2 x double> @amd_vrd2_sin(<2 x double> [[TMP4]])
51; AMDLIBM-NEXT:    [[TMP6:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 0, i32 4>
52; AMDLIBM-NEXT:    [[TMP7:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP6]])
53; AMDLIBM-NEXT:    [[TMP8:%.*]] = shufflevector <8 x double> [[TMP1]], <8 x double> poison, <2 x i32> <i32 1, i32 5>
54; AMDLIBM-NEXT:    [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
55; AMDLIBM-NEXT:    [[TMP10:%.*]] = fadd fast <2 x double> [[TMP7]], [[TMP5]]
56; AMDLIBM-NEXT:    [[TMP11:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP9]]
57; AMDLIBM-NEXT:    [[TMP12:%.*]] = fadd fast <2 x double> [[TMP10]], [[TMP11]]
58; AMDLIBM-NEXT:    store <2 x double> [[TMP12]], ptr @dst, align 8
59; AMDLIBM-NEXT:    ret void
60;
61  %a0 = load double, ptr @src, align 8
62  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
63  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
64  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
65  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
66  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
67  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
68  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
69  %sin0 = call fast double @llvm.sin.f64(double %a2)
70  %sin1 = call fast double @llvm.sin.f64(double %a3)
71  %sqrt0 = call fast double @llvm.sqrt.f64(double %a0)
72  %sqrt1 = call fast double @llvm.sqrt.f64(double %a1)
73  %sin2 = call fast double @llvm.sin.f64(double %a6)
74  %sin3 = call fast double @llvm.sin.f64(double %a7)
75  %sqrt2 = call fast double @llvm.sqrt.f64(double %a4)
76  %sqrt3 = call fast double @llvm.sqrt.f64(double %a5)
77  %res1 = fadd fast double %sqrt0, %sin1
78  %res2 = fadd fast double %sin0, %sqrt1
79  %res00 = fadd fast double %res1, %res2
80  %res3 = fadd fast double %sqrt2, %sin3
81  %res4 = fadd fast double %sin2, %sqrt3
82  %res01 = fadd fast double %res3, %res4
83  store double %res00, ptr @dst, align 8
84  store double %res01, ptr getelementptr inbounds ([8 x double], ptr @dst, i32 0, i64 1), align 8
85  ret void
86}
87