xref: /llvm-project/llvm/test/Analysis/GlobalsModRef/volatile-instrs.ll (revision 47f0b6630c78ab52c2197ec5e1c2d13a6acffed1)
1; RUN: opt < %s -passes=dse -S | FileCheck %s
2
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.8.0"
5
6%struct.anon = type { i32, i32, i32 }
7@b = global %struct.anon { i32 1, i32 0, i32 0 }, align 4
8@c = common global i32 0, align 4
9@a = common global %struct.anon zeroinitializer, align 4
10@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
11
12declare i32 @printf(ptr nocapture, ...) nounwind
13declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
14
15
16; Make sure that the initial memcpy call does not go away
17; because the volatile load is in the way. PR12899
18
19; CHECK: main_entry:
20; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64
21
22define i32 @main() nounwind uwtable ssp {
23main_entry:
24  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @b, ptr align 4 @a, i64 12, i1 false)
25  %0 = load volatile i32, ptr @b, align 4
26  store i32 %0, ptr @c, align 4
27  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @b, ptr align 4 @a, i64 12, i1 false) nounwind
28  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %0) nounwind
29  ret i32 0
30}
31