xref: /llvm-project/llvm/test/CodeGen/X86/shuffle-combine-crash-4.ll (revision 02f8519502447de6ef69a85fa8de5732dd59d853)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s
3
4; Make sure that we do not infinitely-loop combining shuffle vectors.
5
6@test24_id5239 = dso_local local_unnamed_addr global i64 0, align 8
7define void @infiloop() {
8; CHECK-LABEL: infiloop:
9; CHECK:       # %bb.0: # %entry
10; CHECK-NEXT:    pushq %rbp
11; CHECK-NEXT:    .cfi_def_cfa_offset 16
12; CHECK-NEXT:    .cfi_offset %rbp, -16
13; CHECK-NEXT:    movq %rsp, %rbp
14; CHECK-NEXT:    .cfi_def_cfa_register %rbp
15; CHECK-NEXT:    andq $-32, %rsp
16; CHECK-NEXT:    subq $64, %rsp
17; CHECK-NEXT:    movabsq $506097522914230528, %rax # imm = 0x706050403020100
18; CHECK-NEXT:    movq %rax, test24_id5239(%rip)
19; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [4,1,6,7,6,7,2,3,6,7,4,5,6,7,2,3,6,7,2,3,2,3,2,3,4,5,4,5,2,3,0,1]
20; CHECK-NEXT:    vmovaps %ymm0, (%rsp)
21; CHECK-NEXT:    movq %rbp, %rsp
22; CHECK-NEXT:    popq %rbp
23; CHECK-NEXT:    .cfi_def_cfa %rsp, 8
24; CHECK-NEXT:    vzeroupper
25; CHECK-NEXT:    retq
26entry:
27  %id5230 = alloca <32 x i8>, align 32
28  store i8 0, ptr @test24_id5239, align 8
29  store i8 1, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 1), align 1
30  store i8 2, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 2), align 2
31  store i8 3, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 3), align 1
32  store i8 4, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 4), align 4
33  store i8 5, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 5), align 1
34  store i8 6, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 6), align 2
35  store i8 7, ptr getelementptr inbounds (i8, ptr @test24_id5239, i64 7), align 1
36  %0 = load <4 x i16>, ptr @test24_id5239, align 8
37  %shuffle = shufflevector <4 x i16> %0, <4 x i16> poison, <16 x i32> <i32 0, i32 3, i32 3, i32 1, i32 3, i32 2, i32 3, i32 1, i32 3, i32 1, i32 1, i32 1, i32 2, i32 2, i32 1, i32 0>
38  %1 = bitcast <16 x i16> %shuffle to <32 x i8>
39  %2 = or <32 x i8> %1, <i8 4, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
40  store volatile <32 x i8> %2, ptr %id5230, align 32
41  ret void
42}
43