1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE,SSE2 3; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE,SSE41 4; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX2 5; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512 6 7define <8 x i16> @pow2_mask_v16i8(i8 zeroext %0) { 8; SSE2-LABEL: pow2_mask_v16i8: 9; SSE2: # %bb.0: 10; SSE2-NEXT: movd %edi, %xmm0 11; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 12; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 13; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 14; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [128,128,64,64,32,32,16,16,8,8,4,4,2,2,1,1] 15; SSE2-NEXT: pand %xmm1, %xmm0 16; SSE2-NEXT: pcmpeqb %xmm1, %xmm0 17; SSE2-NEXT: retq 18; 19; SSE41-LABEL: pow2_mask_v16i8: 20; SSE41: # %bb.0: 21; SSE41-NEXT: movd %edi, %xmm0 22; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 23; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 24; SSE41-NEXT: movq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,0,0,0,0,0,0,0,0] 25; SSE41-NEXT: pand %xmm1, %xmm0 26; SSE41-NEXT: pcmpeqb %xmm1, %xmm0 27; SSE41-NEXT: pmovsxbw %xmm0, %xmm0 28; SSE41-NEXT: retq 29; 30; AVX2-LABEL: pow2_mask_v16i8: 31; AVX2: # %bb.0: 32; AVX2-NEXT: vmovd %edi, %xmm0 33; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 34; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1] 35; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 36; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 37; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 38; AVX2-NEXT: retq 39; 40; AVX512-LABEL: pow2_mask_v16i8: 41; AVX512: # %bb.0: 42; AVX512-NEXT: vpbroadcastb %edi, %xmm0 43; AVX512-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0 44; AVX512-NEXT: vpmovm2w %k0, %xmm0 45; AVX512-NEXT: retq 46 %vec = insertelement <1 x i8> poison, i8 %0, i64 0 47 %splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer 48 %mask = and <8 x i8> %splat, <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1> 49 %not = icmp ne <8 x i8> %mask, zeroinitializer 50 %ext = sext <8 x i1> %not to <8 x i16> 51 ret <8 x i16> %ext 52} 53 54define <16 x i16> @pow2_mask_v16i16(i16 zeroext %0) { 55; SSE2-LABEL: pow2_mask_v16i16: 56; SSE2: # %bb.0: 57; SSE2-NEXT: movd %edi, %xmm0 58; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 59; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 60; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1] 61; SSE2-NEXT: movdqa %xmm0, %xmm1 62; SSE2-NEXT: pand %xmm2, %xmm1 63; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,16384,8192,4096,2048,1024,512,256] 64; SSE2-NEXT: pand %xmm3, %xmm0 65; SSE2-NEXT: pcmpeqw %xmm3, %xmm0 66; SSE2-NEXT: pcmpeqw %xmm2, %xmm1 67; SSE2-NEXT: retq 68; 69; SSE41-LABEL: pow2_mask_v16i16: 70; SSE41: # %bb.0: 71; SSE41-NEXT: movd %edi, %xmm0 72; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 73; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] 74; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1] 75; SSE41-NEXT: movdqa %xmm0, %xmm1 76; SSE41-NEXT: pand %xmm2, %xmm1 77; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [32768,16384,8192,4096,2048,1024,512,256] 78; SSE41-NEXT: pand %xmm3, %xmm0 79; SSE41-NEXT: pcmpeqw %xmm3, %xmm0 80; SSE41-NEXT: pcmpeqw %xmm2, %xmm1 81; SSE41-NEXT: retq 82; 83; AVX2-LABEL: pow2_mask_v16i16: 84; AVX2: # %bb.0: 85; AVX2-NEXT: vmovd %edi, %xmm0 86; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0 87; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] 88; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 89; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 90; AVX2-NEXT: retq 91; 92; AVX512-LABEL: pow2_mask_v16i16: 93; AVX512: # %bb.0: 94; AVX512-NEXT: vpbroadcastw %edi, %ymm0 95; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1] 96; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 97; AVX512-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 98; AVX512-NEXT: retq 99 %vec = insertelement <1 x i16> poison, i16 %0, i64 0 100 %splat = shufflevector <1 x i16> %vec, <1 x i16> poison, <16 x i32> zeroinitializer 101 %mask = and <16 x i16> %splat, <i16 -32768, i16 16384, i16 8192, i16 4096, i16 2048, i16 1024, i16 512, i16 256, i16 128, i16 64, i16 32, i16 16, i16 8, i16 4, i16 2, i16 1> 102 %not = icmp ne <16 x i16> %mask, zeroinitializer 103 %ext = sext <16 x i1> %not to <16 x i16> 104 ret <16 x i16> %ext 105} 106 107; PR78888 108define i64 @pow2_mask_v8i8(i8 zeroext %0) { 109; SSE-LABEL: pow2_mask_v8i8: 110; SSE: # %bb.0: 111; SSE-NEXT: movd %edi, %xmm0 112; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] 113; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] 114; SSE-NEXT: movq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,0,0,0,0,0,0,0,0] 115; SSE-NEXT: pand %xmm1, %xmm0 116; SSE-NEXT: pcmpeqb %xmm1, %xmm0 117; SSE-NEXT: movq %xmm0, %rax 118; SSE-NEXT: retq 119; 120; AVX2-LABEL: pow2_mask_v8i8: 121; AVX2: # %bb.0: 122; AVX2-NEXT: vmovd %edi, %xmm0 123; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0 124; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1] 125; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 126; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 127; AVX2-NEXT: vmovq %xmm0, %rax 128; AVX2-NEXT: retq 129; 130; AVX512-LABEL: pow2_mask_v8i8: 131; AVX512: # %bb.0: 132; AVX512-NEXT: vpbroadcastb %edi, %xmm0 133; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1] 134; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 135; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 136; AVX512-NEXT: vmovq %xmm0, %rax 137; AVX512-NEXT: retq 138 %vec = insertelement <1 x i8> poison, i8 %0, i64 0 139 %splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer 140 %mask = and <8 x i8> %splat, <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1> 141 %not = icmp ne <8 x i8> %mask, zeroinitializer 142 %ext = sext <8 x i1> %not to <8 x i8> 143 %res = bitcast <8 x i8> %ext to i64 144 ret i64 %res 145} 146