1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mattr=+avx -mtriple=i686-pc-win32 | FileCheck %s --check-prefixes=AVX1 3; RUN: llc < %s -mattr=+avx2 -mtriple=i686-pc-win32 | FileCheck %s --check-prefixes=AVX2 4 5define void @endless_loop() { 6; AVX1-LABEL: endless_loop: 7; AVX1: # %bb.0: # %entry 8; AVX1-NEXT: vmovaps (%eax), %xmm0 9; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,0,0,0] 10; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 11; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 12; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7] 13; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 14; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] 15; AVX1-NEXT: vmovaps %ymm0, (%eax) 16; AVX1-NEXT: vmovaps %ymm1, (%eax) 17; AVX1-NEXT: vzeroupper 18; AVX1-NEXT: retl 19; 20; AVX2-LABEL: endless_loop: 21; AVX2: # %bb.0: # %entry 22; AVX2-NEXT: vbroadcastss (%eax), %xmm0 23; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 24; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] 25; AVX2-NEXT: vbroadcastss %xmm0, %ymm0 26; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 27; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5,6],ymm0[7] 28; AVX2-NEXT: vmovaps %ymm0, (%eax) 29; AVX2-NEXT: vmovaps %ymm1, (%eax) 30; AVX2-NEXT: vzeroupper 31; AVX2-NEXT: retl 32entry: 33 %0 = load <8 x i32>, ptr addrspace(1) undef, align 32 34 %1 = shufflevector <8 x i32> %0, <8 x i32> undef, <16 x i32> <i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> 35 %2 = shufflevector <16 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef>, <16 x i32> %1, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 17> 36 store <16 x i32> %2, ptr addrspace(1) undef, align 64 37 ret void 38} 39