xref: /llvm-project/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll (revision d92ce344bf641e6bb025b41b3f1a77dd25e2b3e9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-apple-darwin -regalloc=basic | FileCheck %s --check-prefix=X64
4
5; This testcase should need to spill the -1 value on both x86-32 and x86-64,
6; so it shouldn't use pcmpeqd to materialize an all-ones vector; it
7; should use a constant-pool load instead.
8;
9; RAGreedy defeats the test by splitting live ranges.
10
11; There should be no pcmpeqd instructions, everybody should the constant pool.
12
13	%struct.__ImageExecInfo = type <{ <4 x i32>, <4 x float>, <2 x i64>, ptr, ptr, ptr, i32, i32, i32, i32, i32 }>
14	%struct._cl_image_format_t = type <{ i32, i32, i32 }>
15	%struct._image2d_t = type <{ ptr, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }>
16
17define void @program_1(ptr %dest, ptr %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
18; X86-LABEL: program_1:
19; X86:       ## %bb.0: ## %entry
20; X86-NEXT:    cmpl $0, 0
21; X86-NEXT:    jle LBB0_2
22; X86-NEXT:  ## %bb.1: ## %forcond
23; X86-NEXT:    cmpl $0, 0
24; X86-NEXT:    jg LBB0_3
25; X86-NEXT:  LBB0_2: ## %ifthen
26; X86-NEXT:    retl
27; X86-NEXT:  LBB0_3: ## %forbody
28; X86-NEXT:    pushl %esi
29; X86-NEXT:    subl $88, %esp
30; X86-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
31; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
32; X86-NEXT:    cvttps2dq %xmm1, %xmm0
33; X86-NEXT:    cvtdq2ps %xmm0, %xmm0
34; X86-NEXT:    subps %xmm0, %xmm1
35; X86-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
36; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
37; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
38; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
39; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
40; X86-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
41; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
42; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
43; X86-NEXT:    mulps %xmm1, %xmm0
44; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
45; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
46; X86-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
47; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
48; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
49; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
50; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
51; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
52; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
53; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
54; X86-NEXT:    xorps %xmm0, %xmm0
55; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
56; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
57; X86-NEXT:    mulps %xmm0, %xmm0
58; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
59; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
60; X86-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
61; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
62; X86-NEXT:    xorps %xmm0, %xmm0
63; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
64; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
65; X86-NEXT:    cmpunordps %xmm0, %xmm0
66; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
67; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
68; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
69; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
70; X86-NEXT:    xorps %xmm0, %xmm0
71; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
72; X86-NEXT:    movl $0, (%esp)
73; X86-NEXT:    xorl %esi, %esi
74; X86-NEXT:    xorps %xmm3, %xmm3
75; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
76; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
77; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
78; X86-NEXT:    calll *%esi
79; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
80; X86-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
81; X86-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
82; X86-NEXT:    pxor %xmm1, %xmm1
83; X86-NEXT:    psubd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
84; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
85; X86-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
86; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
87; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
88; X86-NEXT:    por %xmm1, %xmm0
89; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill
90; X86-NEXT:    pxor %xmm0, %xmm0
91; X86-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
92; X86-NEXT:    movl $0, (%esp)
93; X86-NEXT:    xorps %xmm3, %xmm3
94; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload
95; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload
96; X86-NEXT:    movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload
97; X86-NEXT:    calll *%esi
98; X86-NEXT:    ud2
99;
100; X64-LABEL: program_1:
101; X64:       ## %bb.0: ## %entry
102; X64-NEXT:    cmpl $0, 0
103; X64-NEXT:    jle LBB0_2
104; X64-NEXT:  ## %bb.1: ## %forcond
105; X64-NEXT:    cmpl $0, 0
106; X64-NEXT:    jg LBB0_3
107; X64-NEXT:  LBB0_2: ## %ifthen
108; X64-NEXT:    retq
109; X64-NEXT:  LBB0_3: ## %forbody
110; X64-NEXT:    pushq %rbx
111; X64-NEXT:    subq $64, %rsp
112; X64-NEXT:    xorps %xmm0, %xmm0
113; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
114; X64-NEXT:    movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2]
115; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
116; X64-NEXT:    cvttps2dq %xmm1, %xmm0
117; X64-NEXT:    cvtdq2ps %xmm0, %xmm0
118; X64-NEXT:    subps %xmm0, %xmm1
119; X64-NEXT:    movaps %xmm1, (%rsp) ## 16-byte Spill
120; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
121; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
122; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
123; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
124; X64-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
125; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
126; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
127; X64-NEXT:    mulps %xmm1, %xmm0
128; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
129; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
130; X64-NEXT:    addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
131; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
132; X64-NEXT:    movdqa (%rsp), %xmm0 ## 16-byte Reload
133; X64-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
134; X64-NEXT:    movdqa %xmm0, (%rsp) ## 16-byte Spill
135; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
136; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
137; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
138; X64-NEXT:    xorps %xmm0, %xmm0
139; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
140; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
141; X64-NEXT:    mulps %xmm0, %xmm0
142; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
143; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
144; X64-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
145; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
146; X64-NEXT:    xorps %xmm0, %xmm0
147; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
148; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
149; X64-NEXT:    cmpunordps %xmm0, %xmm0
150; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
151; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
152; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
153; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
154; X64-NEXT:    xorl %ebx, %ebx
155; X64-NEXT:    xorps %xmm3, %xmm3
156; X64-NEXT:    xorps %xmm4, %xmm4
157; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
158; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
159; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 ## 16-byte Reload
160; X64-NEXT:    xorl %edi, %edi
161; X64-NEXT:    callq *%rbx
162; X64-NEXT:    movaps (%rsp), %xmm0 ## 16-byte Reload
163; X64-NEXT:    minps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
164; X64-NEXT:    movaps %xmm0, (%rsp) ## 16-byte Spill
165; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
166; X64-NEXT:    psubd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
167; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
168; X64-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
169; X64-NEXT:    psubd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
170; X64-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
171; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
172; X64-NEXT:    orps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
173; X64-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
174; X64-NEXT:    xorps %xmm3, %xmm3
175; X64-NEXT:    xorps %xmm4, %xmm4
176; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
177; X64-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
178; X64-NEXT:    movaps (%rsp), %xmm2 ## 16-byte Reload
179; X64-NEXT:    xorl %edi, %edi
180; X64-NEXT:    callq *%rbx
181; X64-NEXT:    ud2
182entry:
183	%tmp3.i = load i32, ptr null		; <i32> [#uses=1]
184	%cmp = icmp slt i32 0, %tmp3.i		; <i1> [#uses=1]
185	br i1 %cmp, label %forcond, label %ifthen
186
187ifthen:		; preds = %entry
188	ret void
189
190forcond:		; preds = %entry
191	%tmp3.i536 = load i32, ptr null		; <i32> [#uses=1]
192	%cmp12 = icmp slt i32 0, %tmp3.i536		; <i1> [#uses=1]
193	br i1 %cmp12, label %forbody, label %afterfor
194
195forbody:		; preds = %forcond
196	%bitcast204.i104 = bitcast <4 x i32> zeroinitializer to <4 x float>		; <<4 x float>> [#uses=1]
197	%tmp78 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> < float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02 >, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=2]
198	%tmp79 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp78) nounwind		; <<4 x i32>> [#uses=1]
199	%tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp79) nounwind		; <<4 x float>> [#uses=1]
200	%sub140.i = fsub <4 x float> %tmp78, %tmp80		; <<4 x float>> [#uses=2]
201	%mul166.i = fmul <4 x float> zeroinitializer, %sub140.i		; <<4 x float>> [#uses=1]
202	%add167.i = fadd <4 x float> %mul166.i, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 >		; <<4 x float>> [#uses=1]
203	%mul171.i = fmul <4 x float> %add167.i, %sub140.i		; <<4 x float>> [#uses=1]
204	%add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 >		; <<4 x float>> [#uses=1]
205	%bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32>		; <<4 x i32>> [#uses=1]
206	%andnps178.i = add <4 x i32> %bitcast176.i, <i32 1, i32 1, i32 1, i32 1>		; <<4 x i32>> [#uses=1]
207	%bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float>		; <<4 x float>> [#uses=1]
208	%mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer		; <<4 x float>> [#uses=1]
209	%bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32>		; <<4 x i32>> [#uses=1]
210	%andnps192.i = add <4 x i32> %bitcast190.i, <i32 1, i32 1, i32 1, i32 1>		; <<4 x i32>> [#uses=1]
211	%xorps.i = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
212	%orps203.i = add <4 x i32> %andnps192.i, %xorps.i		; <<4 x i32>> [#uses=1]
213	%bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float>		; <<4 x float>> [#uses=1]
214	%mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer		; <<4 x float>> [#uses=2]
215	%mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer		; <<4 x float>> [#uses=1]
216	%cmpunord.i11 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 3) nounwind		; <<4 x float>> [#uses=1]
217	%tmp83 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul310, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=1]
218	%bitcast.i3 = bitcast <4 x float> %mul310 to <4 x i32>		; <<4 x i32>> [#uses=1]
219	%andps.i5 = and <4 x i32> %bitcast.i3, zeroinitializer		; <<4 x i32>> [#uses=1]
220
221	call void null(<4 x float> %mul313, <4 x float> %cmpunord.i11, <4 x float> %tmp83, <4 x float> zeroinitializer, ptr null, <4 x i32> zeroinitializer) nounwind
222
223	%tmp84 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul313, <4 x float> zeroinitializer) nounwind		; <<4 x float>> [#uses=1]
224
225	%bitcast6.i13 = bitcast <4 x float> %cmpunord.i11 to <4 x i32>		; <<4 x i32>> [#uses=2]
226	%andps.i14 = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %bitcast6.i13		; <<4 x i32>> [#uses=1]
227	%not.i16 = xor <4 x i32> %bitcast6.i13, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
228	%andnps.i17 = add <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %not.i16		; <<4 x i32>> [#uses=1]
229	%orps.i18 = or <4 x i32> %andnps.i17, %andps.i14		; <<4 x i32>> [#uses=1]
230	%bitcast17.i19 = bitcast <4 x i32> %orps.i18 to <4 x float>		; <<4 x float>> [#uses=1]
231
232	%bitcast11.i6 = bitcast <4 x float> %tmp83 to <4 x i32>		; <<4 x i32>> [#uses=1]
233	%not.i7 = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
234	%andnps.i8 = and <4 x i32> %bitcast11.i6, %not.i7		; <<4 x i32>> [#uses=1]
235	%orps.i9 = or <4 x i32> %andnps.i8, %andps.i5		; <<4 x i32>> [#uses=1]
236	%bitcast17.i10 = bitcast <4 x i32> %orps.i9 to <4 x float>		; <<4 x float>> [#uses=1]
237
238	%bitcast6.i = bitcast <4 x float> zeroinitializer to <4 x i32>		; <<4 x i32>> [#uses=2]
239	%andps.i = and <4 x i32> zeroinitializer, %bitcast6.i		; <<4 x i32>> [#uses=1]
240	%bitcast11.i = bitcast <4 x float> %tmp84 to <4 x i32>		; <<4 x i32>> [#uses=1]
241	%not.i = xor <4 x i32> %bitcast6.i, < i32 -1, i32 -1, i32 -1, i32 -1 >		; <<4 x i32>> [#uses=1]
242	%andnps.i = and <4 x i32> %bitcast11.i, %not.i		; <<4 x i32>> [#uses=1]
243	%orps.i = or <4 x i32> %andnps.i, %andps.i		; <<4 x i32>> [#uses=1]
244	%bitcast17.i = bitcast <4 x i32> %orps.i to <4 x float>		; <<4 x float>> [#uses=1]
245	call void null(<4 x float> %bitcast17.i19, <4 x float> %bitcast17.i10, <4 x float> %bitcast17.i, <4 x float> zeroinitializer, ptr null, <4 x i32> zeroinitializer) nounwind
246	unreachable
247
248afterfor:		; preds = %forcond
249	ret void
250}
251
252declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
253
254declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
255
256declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
257
258declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
259