xref: /llvm-project/llvm/test/CodeGen/Generic/replace-intrinsics-with-veclib.ll (revision 36adfec155de366d722f2bac8ff9162289dcf06c)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes
2; RUN: opt -vector-library=SVML -replace-with-veclib -S < %s | FileCheck %s  --check-prefixes=COMMON,SVML
3; RUN: opt -vector-library=AMDLIBM -replace-with-veclib -S < %s | FileCheck %s  --check-prefixes=COMMON,AMDLIBM
4; RUN: opt -vector-library=LIBMVEC-X86 -replace-with-veclib -S < %s | FileCheck %s  --check-prefixes=COMMON,LIBMVEC-X86
5; RUN: opt -vector-library=MASSV -replace-with-veclib -S < %s | FileCheck %s  --check-prefixes=COMMON,MASSV
6; RUN: opt -vector-library=Accelerate -replace-with-veclib -S < %s | FileCheck %s  --check-prefixes=COMMON,ACCELERATE
7
8target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-linux-gnu"
10
11define <4 x double> @exp_v4(<4 x double> %in) {
12; SVML-LABEL: define {{[^@]+}}@exp_v4
13; SVML-SAME: (<4 x double> [[IN:%.*]]) {
14; SVML-NEXT:    [[TMP1:%.*]] = call <4 x double> @__svml_exp4(<4 x double> [[IN]])
15; SVML-NEXT:    ret <4 x double> [[TMP1]]
16;
17; AMDLIBM-LABEL: define {{[^@]+}}@exp_v4
18; AMDLIBM-SAME: (<4 x double> [[IN:%.*]]) {
19; AMDLIBM-NEXT:    [[TMP1:%.*]] = call <4 x double> @amd_vrd4_exp(<4 x double> [[IN]])
20; AMDLIBM-NEXT:    ret <4 x double> [[TMP1]]
21;
22; LIBMVEC-X86-LABEL: define {{[^@]+}}@exp_v4
23; LIBMVEC-X86-SAME: (<4 x double> [[IN:%.*]]) {
24; LIBMVEC-X86-NEXT:    [[TMP1:%.*]] = call <4 x double> @_ZGVdN4v_exp(<4 x double> [[IN]])
25; LIBMVEC-X86-NEXT:    ret <4 x double> [[TMP1]]
26;
27; MASSV-LABEL: define {{[^@]+}}@exp_v4
28; MASSV-SAME: (<4 x double> [[IN:%.*]]) {
29; MASSV-NEXT:    [[CALL:%.*]] = call <4 x double> @llvm.exp.v4f64(<4 x double> [[IN]])
30; MASSV-NEXT:    ret <4 x double> [[CALL]]
31;
32; ACCELERATE-LABEL: define {{[^@]+}}@exp_v4
33; ACCELERATE-SAME: (<4 x double> [[IN:%.*]]) {
34; ACCELERATE-NEXT:    [[CALL:%.*]] = call <4 x double> @llvm.exp.v4f64(<4 x double> [[IN]])
35; ACCELERATE-NEXT:    ret <4 x double> [[CALL]]
36;
37  %call = call <4 x double> @llvm.exp.v4f64(<4 x double> %in)
38  ret <4 x double> %call
39}
40
41declare <4 x double> @llvm.exp.v4f64(<4 x double>) #0
42
43define <4 x float> @exp_f32(<4 x float> %in) {
44; SVML-LABEL: define {{[^@]+}}@exp_f32
45; SVML-SAME: (<4 x float> [[IN:%.*]]) {
46; SVML-NEXT:    [[TMP1:%.*]] = call <4 x float> @__svml_expf4(<4 x float> [[IN]])
47; SVML-NEXT:    ret <4 x float> [[TMP1]]
48;
49; AMDLIBM-LABEL: define {{[^@]+}}@exp_f32
50; AMDLIBM-SAME: (<4 x float> [[IN:%.*]]) {
51; AMDLIBM-NEXT:    [[TMP1:%.*]] = call <4 x float> @amd_vrs4_expf(<4 x float> [[IN]])
52; AMDLIBM-NEXT:    ret <4 x float> [[TMP1]]
53;
54; LIBMVEC-X86-LABEL: define {{[^@]+}}@exp_f32
55; LIBMVEC-X86-SAME: (<4 x float> [[IN:%.*]]) {
56; LIBMVEC-X86-NEXT:    [[TMP1:%.*]] = call <4 x float> @_ZGVbN4v_expf(<4 x float> [[IN]])
57; LIBMVEC-X86-NEXT:    ret <4 x float> [[TMP1]]
58;
59; MASSV-LABEL: define {{[^@]+}}@exp_f32
60; MASSV-SAME: (<4 x float> [[IN:%.*]]) {
61; MASSV-NEXT:    [[TMP1:%.*]] = call <4 x float> @__expf4(<4 x float> [[IN]])
62; MASSV-NEXT:    ret <4 x float> [[TMP1]]
63;
64; ACCELERATE-LABEL: define {{[^@]+}}@exp_f32
65; ACCELERATE-SAME: (<4 x float> [[IN:%.*]]) {
66; ACCELERATE-NEXT:    [[TMP1:%.*]] = call <4 x float> @vexpf(<4 x float> [[IN]])
67; ACCELERATE-NEXT:    ret <4 x float> [[TMP1]]
68;
69  %call = call <4 x float> @llvm.exp.v4f32(<4 x float> %in)
70  ret <4 x float> %call
71}
72
73declare <4 x float> @llvm.exp.v4f32(<4 x float>) #0
74
75; No replacement should take place for non-vector intrinsic.
76define double @exp_f64(double %in) {
77; COMMON-LABEL: define {{[^@]+}}@exp_f64
78; COMMON-SAME: (double [[IN:%.*]]) {
79; COMMON-NEXT:    [[CALL:%.*]] = call double @llvm.exp.f64(double [[IN]])
80; COMMON-NEXT:    ret double [[CALL]]
81;
82  %call = call double @llvm.exp.f64(double %in)
83  ret double %call
84}
85
86declare double @llvm.exp.f64(double) #0
87
88; Check that the pass works with scalar operands on
89; vector intrinsics. No vector library has a substitute for powi.
90define <4 x double> @powi_v4(<4 x double> %in){
91; COMMON-LABEL: define {{[^@]+}}@powi_v4
92; COMMON-SAME: (<4 x double> [[IN:%.*]]) {
93; COMMON-NEXT:    [[CALL:%.*]] = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> [[IN]], i32 3)
94; COMMON-NEXT:    ret <4 x double> [[CALL]]
95;
96  %call = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %in, i32 3)
97  ret <4 x double> %call
98}
99
100declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) #0
101
102; Replacement should not take place if the vector length
103; does not match exactly.
104define <3 x double> @exp_v3(<3 x double> %in) {
105; COMMON-LABEL: define {{[^@]+}}@exp_v3
106; COMMON-SAME: (<3 x double> [[IN:%.*]]) {
107; COMMON-NEXT:    [[CALL:%.*]] = call <3 x double> @llvm.exp.v3f64(<3 x double> [[IN]])
108; COMMON-NEXT:    ret <3 x double> [[CALL]]
109;
110  %call = call <3 x double> @llvm.exp.v3f64(<3 x double> %in)
111  ret <3 x double> %call
112}
113
114declare <3 x double> @llvm.exp.v3f64(<3 x double>) #0
115
116attributes #0 = {nounwind readnone}
117