Lines Matching +full:0 +full:x302
14 ; CHECK: # %bb.0:
27 ; CHECK: # %bb.0:
40 ; X86: # %bb.0:
41 ; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
46 ; X64: # %bb.0:
47 ; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
59 ; X86: # %bb.0:
61 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
62 ; X86-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
64 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
65 ; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
67 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
68 ; X86-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
70 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
71 ; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
74 ; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
76 ; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
84 ; X64: # %bb.0:
86 ; X64-NEXT: movabsq $-6148914691236517205, %rcx # imm = 0xAAAAAAAAAAAAAAAB
92 ; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
105 ; X86: # %bb.0:
109 ; X86-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
113 ; X64: # %bb.0:
117 ; X64-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
129 ; X86: # %bb.0:
130 ; X86-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # [10,10,10,10,10,10,10,10,10,10,10,1…
137 ; X64: # %bb.0:
138 ; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 # [10,10,10,10,10,10,10,10,10,1…
153 ; X86: # %bb.0:
156 ; X86-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm1, %ymm0
160 ; X64: # %bb.0:
163 ; X64-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
175 ; X86: # %bb.0:
178 ; X86-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm1, %xmm0
182 ; X64: # %bb.0:
185 ; X64-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
197 ; X86: # %bb.0:
198 ; X86-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
206 ; X64: # %bb.0:
207 ; X64-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm1
223 ; X86: # %bb.0:
225 ; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
226 ; X86-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
228 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
229 ; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
231 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
232 ; X86-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
234 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
235 ; X86-NEXT: movl $3, {{[0-9]+}}(%esp)
237 ; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
239 ; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
241 ; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
242 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
243 ; X86-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
245 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
246 ; X86-NEXT: movl $770, {{[0-9]+}}(%esp) # imm = 0x302
248 ; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
249 ; X86-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
251 ; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
252 ; X86-NEXT: movl $770, {{[0-9]+}}(%esp) # imm = 0x302
255 ; X86-NEXT: vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
257 ; X86-NEXT: vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
260 ; X86-NEXT: vpsllq $56, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
266 ; X64: # %bb.0:
268 ; X64-NEXT: movabsq $-6148914691236517205, %rdi # imm = 0xAAAAAAAAAAAAAAAB
276 ; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
278 ; X64-NEXT: movabsq $-6180857105216966645, %rdi # imm = 0xAA392F35DC17F00B
285 ; X64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
300 ; CHECK: # %bb.0:
311 ; CHECK: # %bb.0:
322 ; CHECK: # %bb.0: