xref: /llvm-project/llvm/test/CodeGen/X86/vshli-simplify-demanded-bits.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
3
4; A combine forming X86ISD::VSHLI was missing a test and not using
5; TargetConstant for the RHS operand.
6; https://bugs.chromium.org/p/chromium/issues/detail?id=1005750
7
8define <8 x i8> @vshli_target_constant(<8 x i16> %arg, <8 x i32> %arg1) {
9; CHECK-LABEL: vshli_target_constant:
10; CHECK:       # %bb.0: # %bb
11; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [2863311531,2863311531,2863311531,2863311531]
12; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
13; CHECK-NEXT:    pmuludq %xmm0, %xmm1
14; CHECK-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
15; CHECK-NEXT:    pmuludq %xmm0, %xmm3
16; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
17; CHECK-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
18; CHECK-NEXT:    psrld $1, %xmm1
19; CHECK-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
20; CHECK-NEXT:    pmuludq %xmm0, %xmm2
21; CHECK-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
22; CHECK-NEXT:    pmuludq %xmm0, %xmm3
23; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
24; CHECK-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
25; CHECK-NEXT:    psrld $1, %xmm2
26; CHECK-NEXT:    movdqa {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
27; CHECK-NEXT:    pand %xmm3, %xmm2
28; CHECK-NEXT:    pand %xmm3, %xmm1
29; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
30; CHECK-NEXT:    pxor %xmm4, %xmm4
31; CHECK-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
32; CHECK-NEXT:    movdqa %xmm0, %xmm5
33; CHECK-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
34; CHECK-NEXT:    pmaddwd %xmm2, %xmm5
35; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
36; CHECK-NEXT:    pmaddwd %xmm1, %xmm0
37; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [128,128,128,128]
38; CHECK-NEXT:    paddd %xmm1, %xmm5
39; CHECK-NEXT:    paddd %xmm1, %xmm0
40; CHECK-NEXT:    psrld $8, %xmm0
41; CHECK-NEXT:    psrld $8, %xmm5
42; CHECK-NEXT:    pand %xmm3, %xmm5
43; CHECK-NEXT:    pand %xmm3, %xmm0
44; CHECK-NEXT:    packuswb %xmm5, %xmm0
45; CHECK-NEXT:    packuswb %xmm0, %xmm0
46; CHECK-NEXT:    retq
47bb:
48  %tmp = udiv <8 x i32> %arg1, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
49  %tmp2 = and <8 x i32> %tmp, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
50  %tmp3 = load <8 x i8>, ptr undef, align 1
51  %tmp4 = zext <8 x i8> %tmp3 to <8 x i32>
52  %tmp5 = mul nuw nsw <8 x i32> %tmp2, %tmp4
53  %tmp6 = add nuw nsw <8 x i32> %tmp5, <i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128, i32 128>
54  %tmp7 = lshr <8 x i32> %tmp6, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
55  %tmp8 = trunc <8 x i32> %tmp7 to <8 x i8>
56  ret <8 x i8> %tmp8
57}
58