Lines Matching +full:0 +full:xc3
11 ; SSE: ## %bb.0:
12 ; SSE-NEXT: movapd %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x28,0xd8]
13 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
14 ; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x15,0xd9]
15 ; SSE-NEXT: movapd %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x28,0xc3]
16 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
19 ; AVX: ## %bb.0:
20 ; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4b,0xc1,0x20]
21 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
30 ; SSE: ## %bb.0:
31 ; SSE-NEXT: movaps %xmm0, %xmm3 ## encoding: [0x0f,0x28,0xd8]
32 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
33 ; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x14,0xd9]
34 ; SSE-NEXT: movaps %xmm3, %xmm0 ## encoding: [0x0f,0x28,0xc3]
35 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
38 ; AVX: ## %bb.0:
39 ; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4a,0xc1,0x20]
40 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
49 ; SSE: ## %bb.0:
50 ; SSE-NEXT: dppd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x41,0xc1,0x07]
51 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
54 ; AVX: ## %bb.0:
55 ; AVX-NEXT: vdppd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x41,0xc1,0x07]
56 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
65 ; SSE: ## %bb.0:
66 ; SSE-NEXT: dpps $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x40,0xc1,0x07]
67 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
70 ; AVX: ## %bb.0:
71 ; AVX-NEXT: vdpps $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x40,0xc1,0x07]
72 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
81 ; SSE: ## %bb.0:
82 ; SSE-NEXT: insertps $17, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x21,0xc1,0x11]
83 ; SSE-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
84 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
87 ; AVX1: ## %bb.0:
88 ; AVX1-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
89 ; AVX1-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
90 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
93 ; AVX512: ## %bb.0:
94 ; AVX512-NEXT: vinsertps $17, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x21,0xc1,0x11]
95 ; AVX512-NEXT: ## xmm0 = zero,xmm1[0],xmm0[2,3]
96 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
106 ; SSE: ## %bb.0:
107 ; SSE-NEXT: mpsadbw $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x42,0xc1,0x07]
108 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
111 ; AVX: ## %bb.0:
112 ; AVX-NEXT: vmpsadbw $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x42,0xc1,0x07]
113 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
122 ; X86-SSE: ## %bb.0:
123 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
124 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x08]
125 ; X86-SSE-NEXT: mpsadbw $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x42,0xc8,0x07]
126 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1]
127 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
130 ; X86-AVX1: ## %bb.0:
131 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
132 ; X86-AVX1-NEXT: vmovdqa (%eax), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x08]
133 ; X86-AVX1-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
134 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
137 ; X86-AVX512: ## %bb.0:
138 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
139 ; X86-AVX512-NEXT: vmovdqa (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x08]
140 ; X86-AVX512-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
141 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
144 ; X64-SSE: ## %bb.0:
145 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 ## encoding: [0x66,0x0f,0x6f,0x0f]
146 ; X64-SSE-NEXT: mpsadbw $7, %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x3a,0x42,0xc8,0x07]
147 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc1]
148 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
151 ; X64-AVX1: ## %bb.0:
152 ; X64-AVX1-NEXT: vmovdqa (%rdi), %xmm1 ## encoding: [0xc5,0xf9,0x6f,0x0f]
153 ; X64-AVX1-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
154 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
157 ; X64-AVX512: ## %bb.0:
158 ; X64-AVX512-NEXT: vmovdqa (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0x0f]
159 ; X64-AVX512-NEXT: vmpsadbw $7, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x42,0xc0,0x07]
160 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
168 ; SSE: ## %bb.0:
169 ; SSE-NEXT: packusdw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x2b,0xc1]
170 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
173 ; AVX1: ## %bb.0:
174 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
175 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
178 ; AVX512: ## %bb.0:
179 ; AVX512-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x2b,0xc1]
180 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
189 ; X86-SSE: ## %bb.0:
190 ; X86-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
191 ; X86-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
192 ; X86-SSE-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
193 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
196 ; X86-AVX1: ## %bb.0:
197 ; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
198 ; X86-AVX1-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
199 ; X86-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
200 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
203 ; X86-AVX512: ## %bb.0:
204 ; X86-AVX512-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,0,4294967295,0]
205 ; X86-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x21,0x05,A,A,A,A]
206 ; X86-AVX512-NEXT: ## fixup A - offset: 5, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
207 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
210 ; X64-SSE: ## %bb.0:
211 ; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
212 ; X64-SSE-NEXT: ## encoding: [0x0f,0x28,0x05,A,A,A,A]
213 ; X64-SSE-NEXT: ## fixup A - offset: 3, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
214 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
217 ; X64-AVX1: ## %bb.0:
218 ; X64-AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [0,0,0,0,65535,65535,0,0]
219 ; X64-AVX1-NEXT: ## encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
220 ; X64-AVX1-NEXT: ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
221 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
224 ; X64-AVX512: ## %bb.0:
225 ; X64-AVX512-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,0,4294967295,0]
226 ; X64-AVX512-NEXT: ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x21,0x05,A,A,A,A]
227 ; X64-AVX512-NEXT: ## fixup A - offset: 5, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
228 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
236 ; SSE: ## %bb.0:
237 ; SSE-NEXT: movdqa %xmm0, %xmm3 ## encoding: [0x66,0x0f,0x6f,0xd8]
238 ; SSE-NEXT: movaps %xmm2, %xmm0 ## encoding: [0x0f,0x28,0xc2]
239 ; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm3 ## encoding: [0x66,0x0f,0x38,0x10,0xd9]
240 ; SSE-NEXT: movdqa %xmm3, %xmm0 ## encoding: [0x66,0x0f,0x6f,0xc3]
241 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
244 ; AVX: ## %bb.0:
245 ; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x4c,0xc1,0x20]
246 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
255 ; SSE: ## %bb.0:
256 ; SSE-NEXT: phminposuw %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x38,0x41,0xc0]
257 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
260 ; AVX: ## %bb.0:
261 ; AVX-NEXT: vphminposuw %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x41,0xc0]
262 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
271 ; SSE: ## %bb.0:
272 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
273 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
274 ; SSE-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
275 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
278 ; AVX: ## %bb.0:
279 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
280 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
281 ; AVX-NEXT: setb %al ## encoding: [0x0f,0x92,0xc0]
282 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
291 ; SSE: ## %bb.0:
292 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
293 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
294 ; SSE-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
295 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
298 ; AVX: ## %bb.0:
299 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
300 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
301 ; AVX-NEXT: seta %al ## encoding: [0x0f,0x97,0xc0]
302 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
311 ; SSE: ## %bb.0:
312 ; SSE-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
313 ; SSE-NEXT: ptest %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x38,0x17,0xc1]
314 ; SSE-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
315 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
318 ; AVX: ## %bb.0:
319 ; AVX-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0]
320 ; AVX-NEXT: vptest %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x17,0xc1]
321 ; AVX-NEXT: sete %al ## encoding: [0x0f,0x94,0xc0]
322 ; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
331 ; SSE: ## %bb.0:
332 ; SSE-NEXT: roundpd $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x09,0xc0,0x07]
333 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
336 ; AVX1: ## %bb.0:
337 ; AVX1-NEXT: vroundpd $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
338 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
341 ; AVX512: ## %bb.0:
342 ; AVX512-NEXT: vroundpd $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x09,0xc0,0x07]
343 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
352 ; SSE: ## %bb.0:
353 ; SSE-NEXT: roundps $7, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x08,0xc0,0x07]
354 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
357 ; AVX1: ## %bb.0:
358 ; AVX1-NEXT: vroundps $7, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
359 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
362 ; AVX512: ## %bb.0:
363 ; AVX512-NEXT: vroundps $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x08,0xc0,0x07]
364 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
373 ; SSE: ## %bb.0:
374 ; SSE-NEXT: roundsd $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0xc1,0x07]
375 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
378 ; AVX1: ## %bb.0:
379 ; AVX1-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
380 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
383 ; AVX512: ## %bb.0:
384 ; AVX512-NEXT: vroundsd $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0xc1,0x07]
385 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
394 ; X86-SSE: ## %bb.0:
395 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
396 ; X86-SSE-NEXT: roundsd $7, (%eax), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x00,0x07]
397 ; X86-SSE-NEXT: retl ## encoding: [0xc3]
400 ; X86-AVX1: ## %bb.0:
401 ; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
402 ; X86-AVX1-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
403 ; X86-AVX1-NEXT: retl ## encoding: [0xc3]
406 ; X86-AVX512: ## %bb.0:
407 ; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
408 ; X86-AVX512-NEXT: vroundsd $7, (%eax), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x00,0x07]
409 ; X86-AVX512-NEXT: retl ## encoding: [0xc3]
412 ; X64-SSE: ## %bb.0:
413 ; X64-SSE-NEXT: roundsd $7, (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0b,0x07,0x07]
414 ; X64-SSE-NEXT: retq ## encoding: [0xc3]
417 ; X64-AVX1: ## %bb.0:
418 ; X64-AVX1-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
419 ; X64-AVX1-NEXT: retq ## encoding: [0xc3]
422 ; X64-AVX512: ## %bb.0:
423 ; X64-AVX512-NEXT: vroundsd $7, (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0b,0x07,0x07]
424 ; X64-AVX512-NEXT: retq ## encoding: [0xc3]
433 ; SSE: ## %bb.0:
434 ; SSE-NEXT: roundss $7, %xmm1, %xmm0 ## encoding: [0x66,0x0f,0x3a,0x0a,0xc1,0x07]
435 ; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
438 ; AVX1: ## %bb.0:
439 ; AVX1-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
440 ; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
443 ; AVX512: ## %bb.0:
444 ; AVX512-NEXT: vroundss $7, %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x0a,0xc1,0x07]
445 ; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]