1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,X86-SSE 3; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X86-SSE 4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,X86-AVX 5; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,X86-AVX 6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,X64-SSE 7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X64-SSE 8; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,X64-AVX 9; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,X64-AVX 10 11define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind { 12; SSE2-LABEL: trunc_ashr_v4i64: 13; SSE2: # %bb.0: 14; SSE2-NEXT: psrad $31, %xmm1 15; SSE2-NEXT: psrad $31, %xmm0 16; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] 17; SSE2-NEXT: ret{{[l|q]}} 18; 19; SSE4-LABEL: trunc_ashr_v4i64: 20; SSE4: # %bb.0: 21; SSE4-NEXT: pxor %xmm2, %xmm2 22; SSE4-NEXT: pxor %xmm3, %xmm3 23; SSE4-NEXT: pcmpgtq %xmm1, %xmm3 24; SSE4-NEXT: pcmpgtq %xmm0, %xmm2 25; SSE4-NEXT: packssdw %xmm3, %xmm2 26; SSE4-NEXT: movdqa %xmm2, %xmm0 27; SSE4-NEXT: ret{{[l|q]}} 28; 29; AVX1-LABEL: trunc_ashr_v4i64: 30; AVX1: # %bb.0: 31; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 32; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 33; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 34; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 35; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 36; AVX1-NEXT: vzeroupper 37; AVX1-NEXT: ret{{[l|q]}} 38; 39; AVX2-LABEL: trunc_ashr_v4i64: 40; AVX2: # %bb.0: 41; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 42; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 43; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 44; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 45; AVX2-NEXT: vzeroupper 46; AVX2-NEXT: ret{{[l|q]}} 47 %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63> 48 %2 = trunc <4 x i64> %1 to <4 x i32> 49 ret <4 x i32> %2 50} 51 52define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) { 53; SSE2-LABEL: trunc_ashr_v4i64_bitcast: 54; SSE2: # %bb.0: 55; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] 56; SSE2-NEXT: movdqa %xmm1, %xmm2 57; SSE2-NEXT: psrad $31, %xmm2 58; SSE2-NEXT: psrad $17, %xmm1 59; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] 60; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] 61; SSE2-NEXT: movdqa %xmm0, %xmm2 62; SSE2-NEXT: psrad $31, %xmm2 63; SSE2-NEXT: psrad $17, %xmm0 64; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] 65; SSE2-NEXT: packssdw %xmm1, %xmm0 66; SSE2-NEXT: ret{{[l|q]}} 67; 68; SSE4-LABEL: trunc_ashr_v4i64_bitcast: 69; SSE4: # %bb.0: 70; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] 71; SSE4-NEXT: psrad $31, %xmm1 72; SSE4-NEXT: psrad $17, %xmm2 73; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] 74; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3] 75; SSE4-NEXT: psrad $31, %xmm0 76; SSE4-NEXT: psrad $17, %xmm1 77; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] 78; SSE4-NEXT: packssdw %xmm2, %xmm0 79; SSE4-NEXT: ret{{[l|q]}} 80; 81; AVX1-LABEL: trunc_ashr_v4i64_bitcast: 82; AVX1: # %bb.0: 83; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 84; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 85; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] 86; AVX1-NEXT: vpsrad $17, %xmm1, %xmm1 87; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] 88; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 89; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] 90; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0 91; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] 92; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 93; AVX1-NEXT: vzeroupper 94; AVX1-NEXT: ret{{[l|q]}} 95; 96; AVX2-LABEL: trunc_ashr_v4i64_bitcast: 97; AVX2: # %bb.0: 98; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1 99; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] 100; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0 101; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] 102; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 103; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 104; AVX2-NEXT: vzeroupper 105; AVX2-NEXT: ret{{[l|q]}} 106 %1 = ashr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49> 107 %2 = bitcast <4 x i64> %1 to <8 x i32> 108 %3 = trunc <8 x i32> %2 to <8 x i16> 109 ret <8 x i16> %3 110} 111 112define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind { 113; SSE-LABEL: trunc_ashr_v8i32: 114; SSE: # %bb.0: 115; SSE-NEXT: psrad $31, %xmm1 116; SSE-NEXT: psrad $31, %xmm0 117; SSE-NEXT: packssdw %xmm1, %xmm0 118; SSE-NEXT: ret{{[l|q]}} 119; 120; AVX1-LABEL: trunc_ashr_v8i32: 121; AVX1: # %bb.0: 122; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 123; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 124; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 125; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 126; AVX1-NEXT: vzeroupper 127; AVX1-NEXT: ret{{[l|q]}} 128; 129; AVX2-LABEL: trunc_ashr_v8i32: 130; AVX2: # %bb.0: 131; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 132; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 133; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 134; AVX2-NEXT: vzeroupper 135; AVX2-NEXT: ret{{[l|q]}} 136 %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31> 137 %2 = trunc <8 x i32> %1 to <8 x i16> 138 ret <8 x i16> %2 139} 140 141define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind { 142; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32: 143; X86-SSE: # %bb.0: 144; X86-SSE-NEXT: psrad $31, %xmm0 145; X86-SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 146; X86-SSE-NEXT: packssdw %xmm1, %xmm0 147; X86-SSE-NEXT: retl 148; 149; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32: 150; X86-AVX: # %bb.0: 151; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0 152; X86-AVX-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 153; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 154; X86-AVX-NEXT: retl 155; 156; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32: 157; X64-SSE: # %bb.0: 158; X64-SSE-NEXT: psrad $31, %xmm0 159; X64-SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 160; X64-SSE-NEXT: packssdw %xmm1, %xmm0 161; X64-SSE-NEXT: retq 162; 163; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32: 164; X64-AVX: # %bb.0: 165; X64-AVX-NEXT: vpsrad $31, %xmm0, %xmm0 166; X64-AVX-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 167; X64-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 168; X64-AVX-NEXT: retq 169 %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31> 170 %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535> 171 %3 = sext <4 x i1> %2 to <4 x i32> 172 %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 173 %5 = trunc <8 x i32> %4 to <8 x i16> 174 ret <8 x i16> %5 175} 176 177define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) { 178; SSE2-LABEL: trunc_ashr_v4i64_demandedelts: 179; SSE2: # %bb.0: 180; SSE2-NEXT: psllq $63, %xmm0 181; SSE2-NEXT: psllq $63, %xmm1 182; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1] 183; SSE2-NEXT: psrad $31, %xmm1 184; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] 185; SSE2-NEXT: psrad $31, %xmm0 186; SSE2-NEXT: packssdw %xmm1, %xmm0 187; SSE2-NEXT: ret{{[l|q]}} 188; 189; SSE4-LABEL: trunc_ashr_v4i64_demandedelts: 190; SSE4: # %bb.0: 191; SSE4-NEXT: psllq $63, %xmm0 192; SSE4-NEXT: pxor %xmm2, %xmm2 193; SSE4-NEXT: pxor %xmm3, %xmm3 194; SSE4-NEXT: pcmpgtq %xmm0, %xmm3 195; SSE4-NEXT: psllq $63, %xmm1 196; SSE4-NEXT: pcmpgtq %xmm1, %xmm2 197; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0] 198; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,0,0] 199; SSE4-NEXT: packssdw %xmm1, %xmm0 200; SSE4-NEXT: ret{{[l|q]}} 201; 202; AVX1-LABEL: trunc_ashr_v4i64_demandedelts: 203; AVX1: # %bb.0: 204; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 205; AVX1-NEXT: vpsllq $63, %xmm1, %xmm1 206; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 207; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1 208; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 209; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0 210; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 211; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4] 212; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 213; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 214; AVX1-NEXT: vzeroupper 215; AVX1-NEXT: ret{{[l|q]}} 216; 217; AVX2-LABEL: trunc_ashr_v4i64_demandedelts: 218; AVX2: # %bb.0: 219; AVX2-NEXT: vpsllq $63, %ymm0, %ymm0 220; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 221; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0 222; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4] 223; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 224; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 225; AVX2-NEXT: vzeroupper 226; AVX2-NEXT: ret{{[l|q]}} 227 %1 = shl <4 x i64> %a0, <i64 63, i64 0, i64 63, i64 0> 228 %2 = ashr <4 x i64> %1, <i64 63, i64 0, i64 63, i64 0> 229 %3 = bitcast <4 x i64> %2 to <8 x i32> 230 %4 = shufflevector <8 x i32> %3, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4> 231 %5 = trunc <8 x i32> %4 to <8 x i16> 232 ret <8 x i16> %5 233} 234 235define <16 x i8> @packsswb_icmp_zero_128(<8 x i16> %a0) { 236; SSE-LABEL: packsswb_icmp_zero_128: 237; SSE: # %bb.0: 238; SSE-NEXT: pxor %xmm1, %xmm1 239; SSE-NEXT: pcmpeqw %xmm1, %xmm0 240; SSE-NEXT: packsswb %xmm1, %xmm0 241; SSE-NEXT: ret{{[l|q]}} 242; 243; AVX-LABEL: packsswb_icmp_zero_128: 244; AVX: # %bb.0: 245; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 246; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 247; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 248; AVX-NEXT: ret{{[l|q]}} 249 %1 = icmp eq <8 x i16> %a0, zeroinitializer 250 %2 = sext <8 x i1> %1 to <8 x i8> 251 %3 = shufflevector <8 x i8> %2, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 252 ret <16 x i8> %3 253} 254 255define <16 x i8> @packsswb_icmp_zero_trunc_128(<8 x i16> %a0) { 256; SSE-LABEL: packsswb_icmp_zero_trunc_128: 257; SSE: # %bb.0: 258; SSE-NEXT: pxor %xmm1, %xmm1 259; SSE-NEXT: pcmpeqw %xmm1, %xmm0 260; SSE-NEXT: packsswb %xmm1, %xmm0 261; SSE-NEXT: ret{{[l|q]}} 262; 263; AVX-LABEL: packsswb_icmp_zero_trunc_128: 264; AVX: # %bb.0: 265; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 266; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 267; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 268; AVX-NEXT: ret{{[l|q]}} 269 %1 = icmp eq <8 x i16> %a0, zeroinitializer 270 %2 = sext <8 x i1> %1 to <8 x i16> 271 %3 = shufflevector <8 x i16> %2, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> 272 %4 = trunc <16 x i16> %3 to <16 x i8> 273 ret <16 x i8> %4 274} 275 276define <32 x i8> @packsswb_icmp_zero_256(<16 x i16> %a0) { 277; SSE-LABEL: packsswb_icmp_zero_256: 278; SSE: # %bb.0: 279; SSE-NEXT: pxor %xmm2, %xmm2 280; SSE-NEXT: pcmpeqw %xmm2, %xmm1 281; SSE-NEXT: pcmpeqw %xmm2, %xmm0 282; SSE-NEXT: pxor %xmm3, %xmm3 283; SSE-NEXT: packsswb %xmm0, %xmm3 284; SSE-NEXT: packsswb %xmm1, %xmm2 285; SSE-NEXT: movdqa %xmm3, %xmm0 286; SSE-NEXT: movdqa %xmm2, %xmm1 287; SSE-NEXT: ret{{[l|q]}} 288; 289; AVX1-LABEL: packsswb_icmp_zero_256: 290; AVX1: # %bb.0: 291; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 292; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2 293; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 294; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 295; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0 296; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 297; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 298; AVX1-NEXT: ret{{[l|q]}} 299; 300; AVX2-LABEL: packsswb_icmp_zero_256: 301; AVX2: # %bb.0: 302; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 303; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 304; AVX2-NEXT: vpacksswb %ymm0, %ymm1, %ymm0 305; AVX2-NEXT: ret{{[l|q]}} 306 %1 = icmp eq <16 x i16> %a0, zeroinitializer 307 %2 = sext <16 x i1> %1 to <16 x i16> 308 %3 = bitcast <16 x i16> %2 to <32 x i8> 309 %4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62> 310 ret <32 x i8> %4 311} 312 313define <32 x i8> @packsswb_icmp_zero_trunc_256(<16 x i16> %a0) { 314; SSE-LABEL: packsswb_icmp_zero_trunc_256: 315; SSE: # %bb.0: 316; SSE-NEXT: pxor %xmm2, %xmm2 317; SSE-NEXT: pcmpeqw %xmm2, %xmm1 318; SSE-NEXT: pcmpeqw %xmm2, %xmm0 319; SSE-NEXT: pxor %xmm3, %xmm3 320; SSE-NEXT: packsswb %xmm0, %xmm3 321; SSE-NEXT: packsswb %xmm1, %xmm2 322; SSE-NEXT: movdqa %xmm3, %xmm0 323; SSE-NEXT: movdqa %xmm2, %xmm1 324; SSE-NEXT: ret{{[l|q]}} 325; 326; AVX1-LABEL: packsswb_icmp_zero_trunc_256: 327; AVX1: # %bb.0: 328; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 329; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2 330; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 331; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 332; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0 333; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1 334; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 335; AVX1-NEXT: ret{{[l|q]}} 336; 337; AVX2-LABEL: packsswb_icmp_zero_trunc_256: 338; AVX2: # %bb.0: 339; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 340; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 341; AVX2-NEXT: vpacksswb %ymm0, %ymm1, %ymm0 342; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] 343; AVX2-NEXT: ret{{[l|q]}} 344 %1 = icmp eq <16 x i16> %a0, zeroinitializer 345 %2 = sext <16 x i1> %1 to <16 x i16> 346 %3 = shufflevector <16 x i16> zeroinitializer, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> 347 %4 = trunc <32 x i16> %3 to <32 x i8> 348 ret <32 x i8> %4 349} 350