xref: /llvm-project/llvm/test/Transforms/GlobalOpt/store-struct-element.ll (revision d586bd5ca231a6022f439d4c6e43cfeeb53eb1d6)
1; RUN: opt < %s -passes=globalopt -S -o - | FileCheck %s
2
3%class.Class = type { i8, i8, i8, i8 }
4@A = local_unnamed_addr global %class.Class undef, align 4
5@B = local_unnamed_addr global %class.Class undef, align 4
6
7@llvm.global_ctors = appending global [2 x { i32, ptr, ptr }] [
8  { i32, ptr, ptr } { i32 65535, ptr @initA, ptr null },
9  { i32, ptr, ptr } { i32 65535, ptr @initB, ptr null }
10]
11
12define internal void @initA() section "__TEXT,__StaticInit,regular,pure_instructions" {
13entry:
14  store i32 -1, ptr @A, align 4
15  ret void
16}
17
18define internal void @initB() section "__TEXT,__StaticInit,regular,pure_instructions" {
19entry:
20  store i8 -1, ptr @B, align 4
21  ret void
22}
23
24; rdar://79503568
25; Check that we don't miscompile when the store covers the whole struct.
26; CHECK-NOT: @A = local_unnamed_addr global %class.Class { i8 -1, i8 undef, i8 undef, i8 undef }, align 4
27
28; FIXME: We could optimzie this as { i8 -1, i8 -1, i8 -1, i8 -1 } if constant folding were a little smarter.
29; CHECK: @A = local_unnamed_addr global %class.Class undef, align 4
30
31; Check that we still perform the transform when store is smaller than the width of the 0th element.
32; CHECK: @B = local_unnamed_addr global %class.Class { i8 -1, i8 undef, i8 undef, i8 undef }, align 4
33
34; CHECK: define internal void @initA()
35; CHECK-NOT: define internal void @initB()
36
37