xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/fptosi-inseltpoison.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1db7a2f34SJuneyoung Lee; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
23be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
33be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256NODQ
43be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver1 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256NODQ
53be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256NODQ
63be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=-prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512
73be72f40SBjorn Pettersson; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -mattr=+prefer-256-bit -passes=slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256DQ
8db7a2f34SJuneyoung Lee
9db7a2f34SJuneyoung Leetarget datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
10db7a2f34SJuneyoung Lee
11db7a2f34SJuneyoung Lee@src64 = common global [8 x double] zeroinitializer, align 64
12db7a2f34SJuneyoung Lee@src32 = common global [16 x float] zeroinitializer, align 64
13db7a2f34SJuneyoung Lee@dst64 = common global [8 x i64] zeroinitializer, align 64
14db7a2f34SJuneyoung Lee@dst32 = common global [16 x i32] zeroinitializer, align 64
15db7a2f34SJuneyoung Lee@dst16 = common global [32 x i16] zeroinitializer, align 64
16db7a2f34SJuneyoung Lee@dst8 = common global [64 x i8] zeroinitializer, align 64
17db7a2f34SJuneyoung Lee
18db7a2f34SJuneyoung Lee;
19db7a2f34SJuneyoung Lee; FPTOSI vXf64
20db7a2f34SJuneyoung Lee;
21db7a2f34SJuneyoung Lee
22db7a2f34SJuneyoung Leedefine void @fptosi_8f64_8i64() #0 {
23db7a2f34SJuneyoung Lee; SSE-LABEL: @fptosi_8f64_8i64(
24*580210a0SNikita Popov; SSE-NEXT:    [[A0:%.*]] = load double, ptr @src64, align 8
25*580210a0SNikita Popov; SSE-NEXT:    [[A1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
26*580210a0SNikita Popov; SSE-NEXT:    [[A2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
27*580210a0SNikita Popov; SSE-NEXT:    [[A3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
28*580210a0SNikita Popov; SSE-NEXT:    [[A4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
29*580210a0SNikita Popov; SSE-NEXT:    [[A5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
30*580210a0SNikita Popov; SSE-NEXT:    [[A6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
31*580210a0SNikita Popov; SSE-NEXT:    [[A7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
32db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT0:%.*]] = fptosi double [[A0]] to i64
33db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT1:%.*]] = fptosi double [[A1]] to i64
34db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT2:%.*]] = fptosi double [[A2]] to i64
35db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT3:%.*]] = fptosi double [[A3]] to i64
36db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT4:%.*]] = fptosi double [[A4]] to i64
37db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT5:%.*]] = fptosi double [[A5]] to i64
38db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT6:%.*]] = fptosi double [[A6]] to i64
39db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT7:%.*]] = fptosi double [[A7]] to i64
40*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT0]], ptr @dst64, align 8
41*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT1]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
42*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT2]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
43*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT3]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
44*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
45*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT5]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
46*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT6]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
47*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT7]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
48db7a2f34SJuneyoung Lee; SSE-NEXT:    ret void
49db7a2f34SJuneyoung Lee;
50db7a2f34SJuneyoung Lee; AVX256NODQ-LABEL: @fptosi_8f64_8i64(
51*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A0:%.*]] = load double, ptr @src64, align 8
52*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A1:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
53*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A2:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
54*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A3:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
55*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A4:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
56*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A5:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
57*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A6:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
58*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A7:%.*]] = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
59db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT0:%.*]] = fptosi double [[A0]] to i64
60db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT1:%.*]] = fptosi double [[A1]] to i64
61db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT2:%.*]] = fptosi double [[A2]] to i64
62db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT3:%.*]] = fptosi double [[A3]] to i64
63db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT4:%.*]] = fptosi double [[A4]] to i64
64db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT5:%.*]] = fptosi double [[A5]] to i64
65db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT6:%.*]] = fptosi double [[A6]] to i64
66db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT7:%.*]] = fptosi double [[A7]] to i64
67*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT0]], ptr @dst64, align 8
68*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT1]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
69*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT2]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
70*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT3]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
71*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
72*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT5]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
73*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT6]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
74*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT7]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
75db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    ret void
76db7a2f34SJuneyoung Lee;
77db7a2f34SJuneyoung Lee; AVX512-LABEL: @fptosi_8f64_8i64(
78*580210a0SNikita Popov; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
79db7a2f34SJuneyoung Lee; AVX512-NEXT:    [[TMP2:%.*]] = fptosi <8 x double> [[TMP1]] to <8 x i64>
80*580210a0SNikita Popov; AVX512-NEXT:    store <8 x i64> [[TMP2]], ptr @dst64, align 8
81db7a2f34SJuneyoung Lee; AVX512-NEXT:    ret void
82db7a2f34SJuneyoung Lee;
83db7a2f34SJuneyoung Lee; AVX256DQ-LABEL: @fptosi_8f64_8i64(
84*580210a0SNikita Popov; AVX256DQ-NEXT:    [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
8548cc9287SPhilip Reames; AVX256DQ-NEXT:    [[TMP2:%.*]] = fptosi <4 x double> [[TMP1]] to <4 x i64>
86*580210a0SNikita Popov; AVX256DQ-NEXT:    store <4 x i64> [[TMP2]], ptr @dst64, align 8
87*580210a0SNikita Popov; AVX256DQ-NEXT:    [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
8848cc9287SPhilip Reames; AVX256DQ-NEXT:    [[TMP4:%.*]] = fptosi <4 x double> [[TMP3]] to <4 x i64>
89*580210a0SNikita Popov; AVX256DQ-NEXT:    store <4 x i64> [[TMP4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
90db7a2f34SJuneyoung Lee; AVX256DQ-NEXT:    ret void
91db7a2f34SJuneyoung Lee;
92*580210a0SNikita Popov  %a0 = load double, ptr @src64, align 8
93*580210a0SNikita Popov  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
94*580210a0SNikita Popov  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
95*580210a0SNikita Popov  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
96*580210a0SNikita Popov  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
97*580210a0SNikita Popov  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
98*580210a0SNikita Popov  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
99*580210a0SNikita Popov  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
100db7a2f34SJuneyoung Lee  %cvt0 = fptosi double %a0 to i64
101db7a2f34SJuneyoung Lee  %cvt1 = fptosi double %a1 to i64
102db7a2f34SJuneyoung Lee  %cvt2 = fptosi double %a2 to i64
103db7a2f34SJuneyoung Lee  %cvt3 = fptosi double %a3 to i64
104db7a2f34SJuneyoung Lee  %cvt4 = fptosi double %a4 to i64
105db7a2f34SJuneyoung Lee  %cvt5 = fptosi double %a5 to i64
106db7a2f34SJuneyoung Lee  %cvt6 = fptosi double %a6 to i64
107db7a2f34SJuneyoung Lee  %cvt7 = fptosi double %a7 to i64
108*580210a0SNikita Popov  store i64 %cvt0, ptr @dst64, align 8
109*580210a0SNikita Popov  store i64 %cvt1, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
110*580210a0SNikita Popov  store i64 %cvt2, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
111*580210a0SNikita Popov  store i64 %cvt3, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
112*580210a0SNikita Popov  store i64 %cvt4, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
113*580210a0SNikita Popov  store i64 %cvt5, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
114*580210a0SNikita Popov  store i64 %cvt6, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
115*580210a0SNikita Popov  store i64 %cvt7, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
116db7a2f34SJuneyoung Lee  ret void
117db7a2f34SJuneyoung Lee}
118db7a2f34SJuneyoung Lee
119db7a2f34SJuneyoung Leedefine void @fptosi_8f64_8i32() #0 {
120db7a2f34SJuneyoung Lee; SSE-LABEL: @fptosi_8f64_8i32(
121*580210a0SNikita Popov; SSE-NEXT:    [[TMP1:%.*]] = load <4 x double>, ptr @src64, align 8
12248cc9287SPhilip Reames; SSE-NEXT:    [[TMP2:%.*]] = fptosi <4 x double> [[TMP1]] to <4 x i32>
123*580210a0SNikita Popov; SSE-NEXT:    store <4 x i32> [[TMP2]], ptr @dst32, align 4
124*580210a0SNikita Popov; SSE-NEXT:    [[TMP3:%.*]] = load <4 x double>, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
12548cc9287SPhilip Reames; SSE-NEXT:    [[TMP4:%.*]] = fptosi <4 x double> [[TMP3]] to <4 x i32>
126*580210a0SNikita Popov; SSE-NEXT:    store <4 x i32> [[TMP4]], ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 4), align 4
127db7a2f34SJuneyoung Lee; SSE-NEXT:    ret void
128db7a2f34SJuneyoung Lee;
129db7a2f34SJuneyoung Lee; AVX-LABEL: @fptosi_8f64_8i32(
130*580210a0SNikita Popov; AVX-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
131db7a2f34SJuneyoung Lee; AVX-NEXT:    [[TMP2:%.*]] = fptosi <8 x double> [[TMP1]] to <8 x i32>
132*580210a0SNikita Popov; AVX-NEXT:    store <8 x i32> [[TMP2]], ptr @dst32, align 4
133db7a2f34SJuneyoung Lee; AVX-NEXT:    ret void
134db7a2f34SJuneyoung Lee;
135*580210a0SNikita Popov  %a0 = load double, ptr @src64, align 8
136*580210a0SNikita Popov  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
137*580210a0SNikita Popov  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
138*580210a0SNikita Popov  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
139*580210a0SNikita Popov  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
140*580210a0SNikita Popov  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
141*580210a0SNikita Popov  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
142*580210a0SNikita Popov  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
143db7a2f34SJuneyoung Lee  %cvt0 = fptosi double %a0 to i32
144db7a2f34SJuneyoung Lee  %cvt1 = fptosi double %a1 to i32
145db7a2f34SJuneyoung Lee  %cvt2 = fptosi double %a2 to i32
146db7a2f34SJuneyoung Lee  %cvt3 = fptosi double %a3 to i32
147db7a2f34SJuneyoung Lee  %cvt4 = fptosi double %a4 to i32
148db7a2f34SJuneyoung Lee  %cvt5 = fptosi double %a5 to i32
149db7a2f34SJuneyoung Lee  %cvt6 = fptosi double %a6 to i32
150db7a2f34SJuneyoung Lee  %cvt7 = fptosi double %a7 to i32
151*580210a0SNikita Popov  store i32 %cvt0, ptr @dst32, align 4
152*580210a0SNikita Popov  store i32 %cvt1, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 1), align 4
153*580210a0SNikita Popov  store i32 %cvt2, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 2), align 4
154*580210a0SNikita Popov  store i32 %cvt3, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 3), align 4
155*580210a0SNikita Popov  store i32 %cvt4, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 4), align 4
156*580210a0SNikita Popov  store i32 %cvt5, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 5), align 4
157*580210a0SNikita Popov  store i32 %cvt6, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 6), align 4
158*580210a0SNikita Popov  store i32 %cvt7, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 7), align 4
159db7a2f34SJuneyoung Lee  ret void
160db7a2f34SJuneyoung Lee}
161db7a2f34SJuneyoung Lee
162db7a2f34SJuneyoung Leedefine void @fptosi_8f64_8i16() #0 {
163db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_8f64_8i16(
164*580210a0SNikita Popov; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
165db7a2f34SJuneyoung Lee; CHECK-NEXT:    [[TMP2:%.*]] = fptosi <8 x double> [[TMP1]] to <8 x i16>
166*580210a0SNikita Popov; CHECK-NEXT:    store <8 x i16> [[TMP2]], ptr @dst16, align 2
167db7a2f34SJuneyoung Lee; CHECK-NEXT:    ret void
168db7a2f34SJuneyoung Lee;
169*580210a0SNikita Popov  %a0 = load double, ptr @src64, align 8
170*580210a0SNikita Popov  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
171*580210a0SNikita Popov  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
172*580210a0SNikita Popov  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
173*580210a0SNikita Popov  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
174*580210a0SNikita Popov  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
175*580210a0SNikita Popov  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
176*580210a0SNikita Popov  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
177db7a2f34SJuneyoung Lee  %cvt0 = fptosi double %a0 to i16
178db7a2f34SJuneyoung Lee  %cvt1 = fptosi double %a1 to i16
179db7a2f34SJuneyoung Lee  %cvt2 = fptosi double %a2 to i16
180db7a2f34SJuneyoung Lee  %cvt3 = fptosi double %a3 to i16
181db7a2f34SJuneyoung Lee  %cvt4 = fptosi double %a4 to i16
182db7a2f34SJuneyoung Lee  %cvt5 = fptosi double %a5 to i16
183db7a2f34SJuneyoung Lee  %cvt6 = fptosi double %a6 to i16
184db7a2f34SJuneyoung Lee  %cvt7 = fptosi double %a7 to i16
185*580210a0SNikita Popov  store i16 %cvt0, ptr @dst16, align 2
186*580210a0SNikita Popov  store i16 %cvt1, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 1), align 2
187*580210a0SNikita Popov  store i16 %cvt2, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 2), align 2
188*580210a0SNikita Popov  store i16 %cvt3, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 3), align 2
189*580210a0SNikita Popov  store i16 %cvt4, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 4), align 2
190*580210a0SNikita Popov  store i16 %cvt5, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 5), align 2
191*580210a0SNikita Popov  store i16 %cvt6, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 6), align 2
192*580210a0SNikita Popov  store i16 %cvt7, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 7), align 2
193db7a2f34SJuneyoung Lee  ret void
194db7a2f34SJuneyoung Lee}
195db7a2f34SJuneyoung Lee
196db7a2f34SJuneyoung Leedefine void @fptosi_8f64_8i8() #0 {
197db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_8f64_8i8(
198*580210a0SNikita Popov; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x double>, ptr @src64, align 8
1999dc4ced2SAlexey Bataev; CHECK-NEXT:    [[TMP2:%.*]] = fptosi <8 x double> [[TMP1]] to <8 x i8>
200*580210a0SNikita Popov; CHECK-NEXT:    store <8 x i8> [[TMP2]], ptr @dst8, align 1
201db7a2f34SJuneyoung Lee; CHECK-NEXT:    ret void
202db7a2f34SJuneyoung Lee;
203*580210a0SNikita Popov  %a0 = load double, ptr @src64, align 8
204*580210a0SNikita Popov  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 1), align 8
205*580210a0SNikita Popov  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 2), align 8
206*580210a0SNikita Popov  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 3), align 8
207*580210a0SNikita Popov  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 4), align 8
208*580210a0SNikita Popov  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 5), align 8
209*580210a0SNikita Popov  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 6), align 8
210*580210a0SNikita Popov  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src64, i32 0, i64 7), align 8
211db7a2f34SJuneyoung Lee  %cvt0 = fptosi double %a0 to i8
212db7a2f34SJuneyoung Lee  %cvt1 = fptosi double %a1 to i8
213db7a2f34SJuneyoung Lee  %cvt2 = fptosi double %a2 to i8
214db7a2f34SJuneyoung Lee  %cvt3 = fptosi double %a3 to i8
215db7a2f34SJuneyoung Lee  %cvt4 = fptosi double %a4 to i8
216db7a2f34SJuneyoung Lee  %cvt5 = fptosi double %a5 to i8
217db7a2f34SJuneyoung Lee  %cvt6 = fptosi double %a6 to i8
218db7a2f34SJuneyoung Lee  %cvt7 = fptosi double %a7 to i8
219*580210a0SNikita Popov  store i8 %cvt0, ptr @dst8, align 1
220*580210a0SNikita Popov  store i8 %cvt1, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 1), align 1
221*580210a0SNikita Popov  store i8 %cvt2, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 2), align 1
222*580210a0SNikita Popov  store i8 %cvt3, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 3), align 1
223*580210a0SNikita Popov  store i8 %cvt4, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 4), align 1
224*580210a0SNikita Popov  store i8 %cvt5, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 5), align 1
225*580210a0SNikita Popov  store i8 %cvt6, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 6), align 1
226*580210a0SNikita Popov  store i8 %cvt7, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 7), align 1
227db7a2f34SJuneyoung Lee  ret void
228db7a2f34SJuneyoung Lee}
229db7a2f34SJuneyoung Lee
230db7a2f34SJuneyoung Lee;
231db7a2f34SJuneyoung Lee; FPTOSI vXf32
232db7a2f34SJuneyoung Lee;
233db7a2f34SJuneyoung Lee
234db7a2f34SJuneyoung Leedefine void @fptosi_8f32_8i64() #0 {
235db7a2f34SJuneyoung Lee; SSE-LABEL: @fptosi_8f32_8i64(
236*580210a0SNikita Popov; SSE-NEXT:    [[A0:%.*]] = load float, ptr @src32, align 4
237*580210a0SNikita Popov; SSE-NEXT:    [[A1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
238*580210a0SNikita Popov; SSE-NEXT:    [[A2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
239*580210a0SNikita Popov; SSE-NEXT:    [[A3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
240*580210a0SNikita Popov; SSE-NEXT:    [[A4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
241*580210a0SNikita Popov; SSE-NEXT:    [[A5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
242*580210a0SNikita Popov; SSE-NEXT:    [[A6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
243*580210a0SNikita Popov; SSE-NEXT:    [[A7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
244db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT0:%.*]] = fptosi float [[A0]] to i64
245db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT1:%.*]] = fptosi float [[A1]] to i64
246db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT2:%.*]] = fptosi float [[A2]] to i64
247db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT3:%.*]] = fptosi float [[A3]] to i64
248db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT4:%.*]] = fptosi float [[A4]] to i64
249db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT5:%.*]] = fptosi float [[A5]] to i64
250db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT6:%.*]] = fptosi float [[A6]] to i64
251db7a2f34SJuneyoung Lee; SSE-NEXT:    [[CVT7:%.*]] = fptosi float [[A7]] to i64
252*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT0]], ptr @dst64, align 8
253*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT1]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
254*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT2]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
255*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT3]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
256*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
257*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT5]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
258*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT6]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
259*580210a0SNikita Popov; SSE-NEXT:    store i64 [[CVT7]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
260db7a2f34SJuneyoung Lee; SSE-NEXT:    ret void
261db7a2f34SJuneyoung Lee;
262db7a2f34SJuneyoung Lee; AVX256NODQ-LABEL: @fptosi_8f32_8i64(
263*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A0:%.*]] = load float, ptr @src32, align 4
264*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A1:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
265*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A2:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
266*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A3:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
267*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A4:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
268*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A5:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
269*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A6:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
270*580210a0SNikita Popov; AVX256NODQ-NEXT:    [[A7:%.*]] = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
271db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT0:%.*]] = fptosi float [[A0]] to i64
272db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT1:%.*]] = fptosi float [[A1]] to i64
273db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT2:%.*]] = fptosi float [[A2]] to i64
274db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT3:%.*]] = fptosi float [[A3]] to i64
275db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT4:%.*]] = fptosi float [[A4]] to i64
276db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT5:%.*]] = fptosi float [[A5]] to i64
277db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT6:%.*]] = fptosi float [[A6]] to i64
278db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    [[CVT7:%.*]] = fptosi float [[A7]] to i64
279*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT0]], ptr @dst64, align 8
280*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT1]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
281*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT2]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
282*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT3]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
283*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
284*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT5]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
285*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT6]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
286*580210a0SNikita Popov; AVX256NODQ-NEXT:    store i64 [[CVT7]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
287db7a2f34SJuneyoung Lee; AVX256NODQ-NEXT:    ret void
288db7a2f34SJuneyoung Lee;
289db7a2f34SJuneyoung Lee; AVX512-LABEL: @fptosi_8f32_8i64(
290*580210a0SNikita Popov; AVX512-NEXT:    [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
291db7a2f34SJuneyoung Lee; AVX512-NEXT:    [[TMP2:%.*]] = fptosi <8 x float> [[TMP1]] to <8 x i64>
292*580210a0SNikita Popov; AVX512-NEXT:    store <8 x i64> [[TMP2]], ptr @dst64, align 8
293db7a2f34SJuneyoung Lee; AVX512-NEXT:    ret void
294db7a2f34SJuneyoung Lee;
295db7a2f34SJuneyoung Lee; AVX256DQ-LABEL: @fptosi_8f32_8i64(
296*580210a0SNikita Popov; AVX256DQ-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
29748cc9287SPhilip Reames; AVX256DQ-NEXT:    [[TMP2:%.*]] = fptosi <4 x float> [[TMP1]] to <4 x i64>
298*580210a0SNikita Popov; AVX256DQ-NEXT:    store <4 x i64> [[TMP2]], ptr @dst64, align 8
299*580210a0SNikita Popov; AVX256DQ-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
30048cc9287SPhilip Reames; AVX256DQ-NEXT:    [[TMP4:%.*]] = fptosi <4 x float> [[TMP3]] to <4 x i64>
301*580210a0SNikita Popov; AVX256DQ-NEXT:    store <4 x i64> [[TMP4]], ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
302db7a2f34SJuneyoung Lee; AVX256DQ-NEXT:    ret void
303db7a2f34SJuneyoung Lee;
304*580210a0SNikita Popov  %a0 = load float, ptr @src32, align 4
305*580210a0SNikita Popov  %a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
306*580210a0SNikita Popov  %a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
307*580210a0SNikita Popov  %a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
308*580210a0SNikita Popov  %a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
309*580210a0SNikita Popov  %a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
310*580210a0SNikita Popov  %a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
311*580210a0SNikita Popov  %a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
312db7a2f34SJuneyoung Lee  %cvt0 = fptosi float %a0 to i64
313db7a2f34SJuneyoung Lee  %cvt1 = fptosi float %a1 to i64
314db7a2f34SJuneyoung Lee  %cvt2 = fptosi float %a2 to i64
315db7a2f34SJuneyoung Lee  %cvt3 = fptosi float %a3 to i64
316db7a2f34SJuneyoung Lee  %cvt4 = fptosi float %a4 to i64
317db7a2f34SJuneyoung Lee  %cvt5 = fptosi float %a5 to i64
318db7a2f34SJuneyoung Lee  %cvt6 = fptosi float %a6 to i64
319db7a2f34SJuneyoung Lee  %cvt7 = fptosi float %a7 to i64
320*580210a0SNikita Popov  store i64 %cvt0, ptr @dst64, align 8
321*580210a0SNikita Popov  store i64 %cvt1, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 1), align 8
322*580210a0SNikita Popov  store i64 %cvt2, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 2), align 8
323*580210a0SNikita Popov  store i64 %cvt3, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 3), align 8
324*580210a0SNikita Popov  store i64 %cvt4, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 4), align 8
325*580210a0SNikita Popov  store i64 %cvt5, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 5), align 8
326*580210a0SNikita Popov  store i64 %cvt6, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 6), align 8
327*580210a0SNikita Popov  store i64 %cvt7, ptr getelementptr inbounds ([8 x i64], ptr @dst64, i32 0, i64 7), align 8
328db7a2f34SJuneyoung Lee  ret void
329db7a2f34SJuneyoung Lee}
330db7a2f34SJuneyoung Lee
331db7a2f34SJuneyoung Leedefine void @fptosi_8f32_8i32() #0 {
332db7a2f34SJuneyoung Lee; SSE-LABEL: @fptosi_8f32_8i32(
333*580210a0SNikita Popov; SSE-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr @src32, align 4
33448cc9287SPhilip Reames; SSE-NEXT:    [[TMP2:%.*]] = fptosi <4 x float> [[TMP1]] to <4 x i32>
335*580210a0SNikita Popov; SSE-NEXT:    store <4 x i32> [[TMP2]], ptr @dst32, align 4
336*580210a0SNikita Popov; SSE-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
33748cc9287SPhilip Reames; SSE-NEXT:    [[TMP4:%.*]] = fptosi <4 x float> [[TMP3]] to <4 x i32>
338*580210a0SNikita Popov; SSE-NEXT:    store <4 x i32> [[TMP4]], ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 4), align 4
339db7a2f34SJuneyoung Lee; SSE-NEXT:    ret void
340db7a2f34SJuneyoung Lee;
341db7a2f34SJuneyoung Lee; AVX-LABEL: @fptosi_8f32_8i32(
342*580210a0SNikita Popov; AVX-NEXT:    [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
343db7a2f34SJuneyoung Lee; AVX-NEXT:    [[TMP2:%.*]] = fptosi <8 x float> [[TMP1]] to <8 x i32>
344*580210a0SNikita Popov; AVX-NEXT:    store <8 x i32> [[TMP2]], ptr @dst32, align 4
345db7a2f34SJuneyoung Lee; AVX-NEXT:    ret void
346db7a2f34SJuneyoung Lee;
347*580210a0SNikita Popov  %a0 = load float, ptr @src32, align 4
348*580210a0SNikita Popov  %a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
349*580210a0SNikita Popov  %a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
350*580210a0SNikita Popov  %a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
351*580210a0SNikita Popov  %a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
352*580210a0SNikita Popov  %a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
353*580210a0SNikita Popov  %a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
354*580210a0SNikita Popov  %a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
355db7a2f34SJuneyoung Lee  %cvt0 = fptosi float %a0 to i32
356db7a2f34SJuneyoung Lee  %cvt1 = fptosi float %a1 to i32
357db7a2f34SJuneyoung Lee  %cvt2 = fptosi float %a2 to i32
358db7a2f34SJuneyoung Lee  %cvt3 = fptosi float %a3 to i32
359db7a2f34SJuneyoung Lee  %cvt4 = fptosi float %a4 to i32
360db7a2f34SJuneyoung Lee  %cvt5 = fptosi float %a5 to i32
361db7a2f34SJuneyoung Lee  %cvt6 = fptosi float %a6 to i32
362db7a2f34SJuneyoung Lee  %cvt7 = fptosi float %a7 to i32
363*580210a0SNikita Popov  store i32 %cvt0, ptr @dst32, align 4
364*580210a0SNikita Popov  store i32 %cvt1, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 1), align 4
365*580210a0SNikita Popov  store i32 %cvt2, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 2), align 4
366*580210a0SNikita Popov  store i32 %cvt3, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 3), align 4
367*580210a0SNikita Popov  store i32 %cvt4, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 4), align 4
368*580210a0SNikita Popov  store i32 %cvt5, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 5), align 4
369*580210a0SNikita Popov  store i32 %cvt6, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 6), align 4
370*580210a0SNikita Popov  store i32 %cvt7, ptr getelementptr inbounds ([16 x i32], ptr @dst32, i32 0, i64 7), align 4
371db7a2f34SJuneyoung Lee  ret void
372db7a2f34SJuneyoung Lee}
373db7a2f34SJuneyoung Lee
374db7a2f34SJuneyoung Leedefine void @fptosi_8f32_8i16() #0 {
375db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_8f32_8i16(
376*580210a0SNikita Popov; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
377db7a2f34SJuneyoung Lee; CHECK-NEXT:    [[TMP2:%.*]] = fptosi <8 x float> [[TMP1]] to <8 x i16>
378*580210a0SNikita Popov; CHECK-NEXT:    store <8 x i16> [[TMP2]], ptr @dst16, align 2
379db7a2f34SJuneyoung Lee; CHECK-NEXT:    ret void
380db7a2f34SJuneyoung Lee;
381*580210a0SNikita Popov  %a0 = load float, ptr @src32, align 4
382*580210a0SNikita Popov  %a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
383*580210a0SNikita Popov  %a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
384*580210a0SNikita Popov  %a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
385*580210a0SNikita Popov  %a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
386*580210a0SNikita Popov  %a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
387*580210a0SNikita Popov  %a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
388*580210a0SNikita Popov  %a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
389db7a2f34SJuneyoung Lee  %cvt0 = fptosi float %a0 to i16
390db7a2f34SJuneyoung Lee  %cvt1 = fptosi float %a1 to i16
391db7a2f34SJuneyoung Lee  %cvt2 = fptosi float %a2 to i16
392db7a2f34SJuneyoung Lee  %cvt3 = fptosi float %a3 to i16
393db7a2f34SJuneyoung Lee  %cvt4 = fptosi float %a4 to i16
394db7a2f34SJuneyoung Lee  %cvt5 = fptosi float %a5 to i16
395db7a2f34SJuneyoung Lee  %cvt6 = fptosi float %a6 to i16
396db7a2f34SJuneyoung Lee  %cvt7 = fptosi float %a7 to i16
397*580210a0SNikita Popov  store i16 %cvt0, ptr @dst16, align 2
398*580210a0SNikita Popov  store i16 %cvt1, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 1), align 2
399*580210a0SNikita Popov  store i16 %cvt2, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 2), align 2
400*580210a0SNikita Popov  store i16 %cvt3, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 3), align 2
401*580210a0SNikita Popov  store i16 %cvt4, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 4), align 2
402*580210a0SNikita Popov  store i16 %cvt5, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 5), align 2
403*580210a0SNikita Popov  store i16 %cvt6, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 6), align 2
404*580210a0SNikita Popov  store i16 %cvt7, ptr getelementptr inbounds ([32 x i16], ptr @dst16, i32 0, i64 7), align 2
405db7a2f34SJuneyoung Lee  ret void
406db7a2f34SJuneyoung Lee}
407db7a2f34SJuneyoung Lee
408db7a2f34SJuneyoung Leedefine void @fptosi_8f32_8i8() #0 {
409db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_8f32_8i8(
410*580210a0SNikita Popov; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x float>, ptr @src32, align 4
4119dc4ced2SAlexey Bataev; CHECK-NEXT:    [[TMP2:%.*]] = fptosi <8 x float> [[TMP1]] to <8 x i8>
412*580210a0SNikita Popov; CHECK-NEXT:    store <8 x i8> [[TMP2]], ptr @dst8, align 1
413db7a2f34SJuneyoung Lee; CHECK-NEXT:    ret void
414db7a2f34SJuneyoung Lee;
415*580210a0SNikita Popov  %a0 = load float, ptr @src32, align 4
416*580210a0SNikita Popov  %a1 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 1), align 4
417*580210a0SNikita Popov  %a2 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 2), align 4
418*580210a0SNikita Popov  %a3 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 3), align 4
419*580210a0SNikita Popov  %a4 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 4), align 4
420*580210a0SNikita Popov  %a5 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 5), align 4
421*580210a0SNikita Popov  %a6 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 6), align 4
422*580210a0SNikita Popov  %a7 = load float, ptr getelementptr inbounds ([16 x float], ptr @src32, i32 0, i64 7), align 4
423db7a2f34SJuneyoung Lee  %cvt0 = fptosi float %a0 to i8
424db7a2f34SJuneyoung Lee  %cvt1 = fptosi float %a1 to i8
425db7a2f34SJuneyoung Lee  %cvt2 = fptosi float %a2 to i8
426db7a2f34SJuneyoung Lee  %cvt3 = fptosi float %a3 to i8
427db7a2f34SJuneyoung Lee  %cvt4 = fptosi float %a4 to i8
428db7a2f34SJuneyoung Lee  %cvt5 = fptosi float %a5 to i8
429db7a2f34SJuneyoung Lee  %cvt6 = fptosi float %a6 to i8
430db7a2f34SJuneyoung Lee  %cvt7 = fptosi float %a7 to i8
431*580210a0SNikita Popov  store i8 %cvt0, ptr @dst8, align 1
432*580210a0SNikita Popov  store i8 %cvt1, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 1), align 1
433*580210a0SNikita Popov  store i8 %cvt2, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 2), align 1
434*580210a0SNikita Popov  store i8 %cvt3, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 3), align 1
435*580210a0SNikita Popov  store i8 %cvt4, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 4), align 1
436*580210a0SNikita Popov  store i8 %cvt5, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 5), align 1
437*580210a0SNikita Popov  store i8 %cvt6, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 6), align 1
438*580210a0SNikita Popov  store i8 %cvt7, ptr getelementptr inbounds ([64 x i8], ptr @dst8, i32 0, i64 7), align 1
439db7a2f34SJuneyoung Lee  ret void
440db7a2f34SJuneyoung Lee}
441db7a2f34SJuneyoung Lee
442db7a2f34SJuneyoung Lee;
443db7a2f34SJuneyoung Lee; FPTOSI BUILDVECTOR
444db7a2f34SJuneyoung Lee;
445db7a2f34SJuneyoung Lee
446db7a2f34SJuneyoung Leedefine <4 x i32> @fptosi_4xf64_4i32(double %a0, double %a1, double %a2, double %a3) #0 {
447db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_4xf64_4i32(
448ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x double> poison, double [[A0:%.*]], i32 0
449ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x double> [[TMP1]], double [[A1:%.*]], i32 1
450ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x double> [[TMP2]], double [[A2:%.*]], i32 2
451ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[A3:%.*]], i32 3
452ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP5:%.*]] = fptosi <4 x double> [[TMP4]] to <4 x i32>
453ab2c499dSAnton Afanasyev; CHECK-NEXT:    ret <4 x i32> [[TMP5]]
454db7a2f34SJuneyoung Lee;
455db7a2f34SJuneyoung Lee  %cvt0 = fptosi double %a0 to i32
456db7a2f34SJuneyoung Lee  %cvt1 = fptosi double %a1 to i32
457db7a2f34SJuneyoung Lee  %cvt2 = fptosi double %a2 to i32
458db7a2f34SJuneyoung Lee  %cvt3 = fptosi double %a3 to i32
459db7a2f34SJuneyoung Lee  %res0 = insertelement <4 x i32> poison, i32 %cvt0, i32 0
460db7a2f34SJuneyoung Lee  %res1 = insertelement <4 x i32> %res0, i32 %cvt1, i32 1
461db7a2f34SJuneyoung Lee  %res2 = insertelement <4 x i32> %res1, i32 %cvt2, i32 2
462db7a2f34SJuneyoung Lee  %res3 = insertelement <4 x i32> %res2, i32 %cvt3, i32 3
463db7a2f34SJuneyoung Lee  ret <4 x i32> %res3
464db7a2f34SJuneyoung Lee}
465db7a2f34SJuneyoung Lee
466db7a2f34SJuneyoung Leedefine <4 x i32> @fptosi_4xf32_4i32(float %a0, float %a1, float %a2, float %a3) #0 {
467db7a2f34SJuneyoung Lee; CHECK-LABEL: @fptosi_4xf32_4i32(
468ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x float> poison, float [[A0:%.*]], i32 0
469ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[A1:%.*]], i32 1
470ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[A2:%.*]], i32 2
471ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x float> [[TMP3]], float [[A3:%.*]], i32 3
472ab2c499dSAnton Afanasyev; CHECK-NEXT:    [[TMP5:%.*]] = fptosi <4 x float> [[TMP4]] to <4 x i32>
473ab2c499dSAnton Afanasyev; CHECK-NEXT:    ret <4 x i32> [[TMP5]]
474db7a2f34SJuneyoung Lee;
475db7a2f34SJuneyoung Lee  %cvt0 = fptosi float %a0 to i32
476db7a2f34SJuneyoung Lee  %cvt1 = fptosi float %a1 to i32
477db7a2f34SJuneyoung Lee  %cvt2 = fptosi float %a2 to i32
478db7a2f34SJuneyoung Lee  %cvt3 = fptosi float %a3 to i32
479db7a2f34SJuneyoung Lee  %res0 = insertelement <4 x i32> poison, i32 %cvt0, i32 0
480db7a2f34SJuneyoung Lee  %res1 = insertelement <4 x i32> %res0, i32 %cvt1, i32 1
481db7a2f34SJuneyoung Lee  %res2 = insertelement <4 x i32> %res1, i32 %cvt2, i32 2
482db7a2f34SJuneyoung Lee  %res3 = insertelement <4 x i32> %res2, i32 %cvt3, i32 3
483db7a2f34SJuneyoung Lee  ret <4 x i32> %res3
484db7a2f34SJuneyoung Lee}
485db7a2f34SJuneyoung Lee
486db7a2f34SJuneyoung Leeattributes #0 = { nounwind }
487