xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/jumbled_store_crash.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: %if x86-registered-target %{ opt --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -o - -S < %s | FileCheck %s %}
3; RUN: %if aarch64-registered-target %{ opt --passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu -o - -S < %s | FileCheck %s %}
4
5@b = common dso_local global ptr null, align 8
6@e = common dso_local global float 0.000000e+00, align 4
7@c = common dso_local global float 0.000000e+00, align 4
8@g = common dso_local global float 0.000000e+00, align 4
9@d = common dso_local global float 0.000000e+00, align 4
10@f = common dso_local global float 0.000000e+00, align 4
11@a = common dso_local global i32 0, align 4
12@h = common dso_local global float 0.000000e+00, align 4
13
14define dso_local void @j() local_unnamed_addr {
15; CHECK-LABEL: @j(
16; CHECK-NEXT:  entry:
17; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr @b, align 8
18; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4
19; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 12
20; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @a, align 4
21; CHECK-NEXT:    [[CONV19:%.*]] = sitofp i32 [[TMP1]] to float
22; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 4
23; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, ptr [[ARRAYIDX1]], align 4
24; CHECK-NEXT:    [[TMP6:%.*]] = add nsw <2 x i32> [[TMP5]], [[TMP3]]
25; CHECK-NEXT:    [[TMP7:%.*]] = sitofp <2 x i32> [[TMP6]] to <2 x float>
26; CHECK-NEXT:    [[TMP8:%.*]] = fmul <2 x float> [[TMP7]], splat (float 1.000000e+01)
27; CHECK-NEXT:    [[TMP9:%.*]] = fsub <2 x float> <float 1.000000e+00, float 0.000000e+00>, [[TMP8]]
28; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
29; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <4 x float> [[TMP10]], i32 1
30; CHECK-NEXT:    store float [[TMP11]], ptr @g, align 4
31; CHECK-NEXT:    [[TMP12:%.*]] = fadd <4 x float> [[TMP10]], <float -1.000000e+00, float -1.000000e+00, float 1.000000e+00, float 1.000000e+00>
32; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <4 x float> [[TMP12]], i32 2
33; CHECK-NEXT:    store float [[TMP13]], ptr @c, align 4
34; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <4 x float> [[TMP12]], i32 0
35; CHECK-NEXT:    store float [[TMP14]], ptr @d, align 4
36; CHECK-NEXT:    [[TMP15:%.*]] = extractelement <4 x float> [[TMP12]], i32 3
37; CHECK-NEXT:    store float [[TMP15]], ptr @e, align 4
38; CHECK-NEXT:    [[TMP16:%.*]] = extractelement <4 x float> [[TMP12]], i32 1
39; CHECK-NEXT:    store float [[TMP16]], ptr @f, align 4
40; CHECK-NEXT:    [[TMP17:%.*]] = insertelement <4 x float> <float poison, float -1.000000e+00, float poison, float -1.000000e+00>, float [[CONV19]], i32 0
41; CHECK-NEXT:    [[TMP18:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
42; CHECK-NEXT:    [[TMP19:%.*]] = shufflevector <4 x float> [[TMP17]], <4 x float> [[TMP18]], <4 x i32> <i32 0, i32 1, i32 5, i32 3>
43; CHECK-NEXT:    [[TMP20:%.*]] = fsub <4 x float> [[TMP12]], [[TMP19]]
44; CHECK-NEXT:    [[TMP21:%.*]] = fadd <4 x float> [[TMP12]], [[TMP19]]
45; CHECK-NEXT:    [[TMP22:%.*]] = shufflevector <4 x float> [[TMP20]], <4 x float> [[TMP21]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
46; CHECK-NEXT:    [[TMP23:%.*]] = fptosi <4 x float> [[TMP22]] to <4 x i32>
47; CHECK-NEXT:    store <4 x i32> [[TMP23]], ptr [[ARRAYIDX1]], align 4
48; CHECK-NEXT:    ret void
49;
50entry:
51  %0 = load ptr, ptr @b, align 8
52  %arrayidx = getelementptr inbounds i32, ptr %0, i64 4
53  %1 = load i32, ptr %arrayidx, align 4
54  %arrayidx1 = getelementptr inbounds i32, ptr %0, i64 12
55  %2 = load i32, ptr %arrayidx1, align 4
56  %add = add nsw i32 %2, %1
57  %conv = sitofp i32 %add to float
58  %mul = fmul float %conv, 1.000000e+01
59  %arrayidx2 = getelementptr inbounds i32, ptr %0, i64 5
60  %3 = load i32, ptr %arrayidx2, align 4
61  %arrayidx3 = getelementptr inbounds i32, ptr %0, i64 13
62  %4 = load i32, ptr %arrayidx3, align 4
63  %add4 = add nsw i32 %4, %3
64  %conv5 = sitofp i32 %add4 to float
65  %mul6 = fmul float %conv5, 1.000000e+01
66  %sub = fsub float 0.000000e+00, %mul6
67  %sub7 = fsub float 1.000000e+00, %mul
68  store float %sub7, ptr @g, align 4
69  %add9 = fadd float %sub, 1.000000e+00
70  store float %add9, ptr @c, align 4
71  %sub10 = fadd float %sub, -1.000000e+00
72  store float %sub10, ptr @d, align 4
73  %add11 = fadd float %sub7, 1.000000e+00
74  store float %add11, ptr @e, align 4
75  %sub12 = fadd float %sub7, -1.000000e+00
76  store float %sub12, ptr @f, align 4
77  %sub13 = fsub float %add9, %sub
78  %conv14 = fptosi float %sub13 to i32
79  %arrayidx15 = getelementptr inbounds i32, ptr %0, i64 14
80  store i32 %conv14, ptr %arrayidx15, align 4
81  %sub16 = fadd float %add11, -1.000000e+00
82  %conv17 = fptosi float %sub16 to i32
83  %arrayidx18 = getelementptr inbounds i32, ptr %0, i64 15
84  store i32 %conv17, ptr %arrayidx18, align 4
85  %5 = load i32, ptr @a, align 4
86  %conv19 = sitofp i32 %5 to float
87  %sub20 = fsub float %sub10, %conv19
88  %conv21 = fptosi float %sub20 to i32
89  store i32 %conv21, ptr %arrayidx1, align 4
90  %sub23 = fadd float %sub12, -1.000000e+00
91  %conv24 = fptosi float %sub23 to i32
92  store i32 %conv24, ptr %arrayidx3, align 4
93  ret void
94}
95