xref: /llvm-project/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll (revision 3e992d81afc3925a8685eb15f794dd4a6ba3e97e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=0 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF
3; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=1 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-ON
4; RUN: opt -S -O2 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF
5
6target datalayout = "e-p:64:64-p5:32:32-A5"
7
8; This illustrates an optimization difference caused by instruction counting
9; heuristics, which are affected by the additional instructions of the
10; alignment assumption.
11
12define internal i1 @callee1(i1 %c, ptr align 8 %ptr) {
13  store volatile i64 0, ptr %ptr
14  ret i1 %c
15}
16
17define void @caller1(i1 %c, ptr align 1 %ptr) {
18; ASSUMPTIONS-OFF-LABEL: @caller1(
19; ASSUMPTIONS-OFF-NEXT:    br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]]
20; ASSUMPTIONS-OFF:       common.ret:
21; ASSUMPTIONS-OFF-NEXT:    [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ]
22; ASSUMPTIONS-OFF-NEXT:    store volatile i64 0, ptr [[PTR:%.*]], align 4
23; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
24; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
25; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
26; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
27; ASSUMPTIONS-OFF-NEXT:    store volatile i64 -1, ptr [[PTR]], align 4
28; ASSUMPTIONS-OFF-NEXT:    store volatile i64 [[DOTSINK]], ptr [[PTR]], align 4
29; ASSUMPTIONS-OFF-NEXT:    ret void
30; ASSUMPTIONS-OFF:       false2:
31; ASSUMPTIONS-OFF-NEXT:    store volatile i64 1, ptr [[PTR]], align 4
32; ASSUMPTIONS-OFF-NEXT:    br label [[COMMON_RET]]
33;
34; ASSUMPTIONS-ON-LABEL: @caller1(
35; ASSUMPTIONS-ON-NEXT:    br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]]
36; ASSUMPTIONS-ON:       common.ret:
37; ASSUMPTIONS-ON-NEXT:    [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ]
38; ASSUMPTIONS-ON-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[PTR:%.*]], i64 8) ]
39; ASSUMPTIONS-ON-NEXT:    store volatile i64 0, ptr [[PTR]], align 8
40; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
41; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
42; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
43; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
44; ASSUMPTIONS-ON-NEXT:    store volatile i64 -1, ptr [[PTR]], align 8
45; ASSUMPTIONS-ON-NEXT:    store volatile i64 [[DOTSINK]], ptr [[PTR]], align 8
46; ASSUMPTIONS-ON-NEXT:    ret void
47; ASSUMPTIONS-ON:       false2:
48; ASSUMPTIONS-ON-NEXT:    store volatile i64 1, ptr [[PTR]], align 4
49; ASSUMPTIONS-ON-NEXT:    br label [[COMMON_RET]]
50;
51  br i1 %c, label %true1, label %false1
52
53true1:
54  %c2 = call i1 @callee1(i1 %c, ptr %ptr)
55  store volatile i64 -1, ptr %ptr
56  store volatile i64 -1, ptr %ptr
57  store volatile i64 -1, ptr %ptr
58  store volatile i64 -1, ptr %ptr
59  store volatile i64 -1, ptr %ptr
60  br i1 %c2, label %true2, label %false2
61
62false1:
63  store volatile i64 1, ptr %ptr
64  br label %true1
65
66true2:
67  store volatile i64 2, ptr %ptr
68  ret void
69
70false2:
71  store volatile i64 3, ptr %ptr
72  ret void
73}
74
75; This test checks that alignment assumptions do not prevent SROA.
76; See PR45763.
77
78define internal void @callee2(ptr noalias sret(i64) align 32 %arg) {
79  store i64 0, ptr %arg, align 8
80  ret void
81}
82
83define amdgpu_kernel void @caller2() {
84; CHECK-LABEL: @caller2(
85; CHECK-NEXT:    ret void
86;
87  %alloca = alloca i64, align 8, addrspace(5)
88  %cast = addrspacecast ptr addrspace(5) %alloca to ptr
89  call void @callee2(ptr sret(i64) align 32 %cast)
90  ret void
91}
92