xref: /llvm-project/llvm/test/Transforms/InstCombine/non-integral-pointers.ll (revision 90ba33099cbb17e7c159e9ebc5a512037db99d6d)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=instcombine -S < %s | FileCheck %s
3
4target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
5target triple = "x86_64-unknown-linux-gnu"
6
7define ptr addrspace(4) @f_0() {
8; CHECK-LABEL: @f_0(
9; CHECK-NEXT:    ret ptr addrspace(4) getelementptr (i8, ptr addrspace(4) null, i64 50)
10;
11  %result = getelementptr i8, ptr addrspace(4) null, i64 50
12  ret ptr addrspace(4) %result
13}
14
15define ptr addrspace(3) @f_1() {
16; inttoptr is fine here since addrspace(3) is integral.
17; CHECK-LABEL: @f_1(
18; CHECK-NEXT:    ret ptr addrspace(3) inttoptr (i64 50 to ptr addrspace(3))
19;
20  %result = getelementptr i8, ptr addrspace(3) null, i64 50
21  ret ptr addrspace(3) %result
22}
23
24define void @f_2(ptr %ptr0, ptr %ptr1) {
25; It is not okay to convert the load/store pair to load and store
26; integers, since pointers in address space 4 are non-integral.
27; CHECK-LABEL: @f_2(
28; CHECK-NEXT:  entry:
29; CHECK-NEXT:    [[VAL:%.*]] = load ptr addrspace(4), ptr [[PTR0:%.*]], align 8
30; CHECK-NEXT:    store ptr addrspace(4) [[VAL]], ptr [[PTR1:%.*]], align 8
31; CHECK-NEXT:    ret void
32;
33entry:
34  %val = load ptr addrspace(4), ptr %ptr0
35  store ptr addrspace(4) %val, ptr %ptr1
36  ret void
37}
38
39define void @f_3(ptr %ptr0, ptr %ptr1) {
40; It *is* okay to convert the load/store pair to load and store
41; integers, since pointers in address space 3 are integral.
42; CHECK-LABEL: @f_3(
43; CHECK-NEXT:  entry:
44; CHECK-NEXT:    [[VAL:%.*]] = load ptr addrspace(3), ptr [[PTR0:%.*]], align 8
45; CHECK-NEXT:    store ptr addrspace(3) [[VAL]], ptr [[PTR1:%.*]], align 8
46; CHECK-NEXT:    ret void
47;
48entry:
49  %val = load ptr addrspace(3), ptr %ptr0
50  store ptr addrspace(3) %val, ptr %ptr1
51  ret void
52}
53
54define i64 @g(ptr %gp) {
55; CHECK-LABEL: @g(
56; CHECK-NEXT:    [[DOTPRE:%.*]] = load ptr addrspace(4), ptr [[GP:%.*]], align 8
57; CHECK-NEXT:    [[V74:%.*]] = call ptr addrspace(4) @alloc()
58; CHECK-NEXT:    [[V75:%.*]] = addrspacecast ptr addrspace(4) [[V74]] to ptr
59; CHECK-NEXT:    [[V77:%.*]] = getelementptr i8, ptr [[V75]], i64 -8
60; CHECK-NEXT:    store ptr addrspace(4) [[DOTPRE]], ptr [[V77]], align 8
61; CHECK-NEXT:    [[V81:%.*]] = load i64, ptr [[V77]], align 8
62; CHECK-NEXT:    ret i64 [[V81]]
63;
64  %.pre = load ptr addrspace(4), ptr %gp, align 8
65  %v74 = call ptr addrspace(4) @alloc()
66  %v75 = addrspacecast ptr addrspace(4) %v74 to ptr
67  %v77 = getelementptr ptr addrspace(4), ptr %v75, i64 -1
68  store ptr addrspace(4) %.pre, ptr %v77, align 8
69  %v81 = load i64, ptr %v77, align 8
70  ret i64 %v81
71}
72
73define i64 @g2(ptr addrspace(4) %gp) {
74; CHECK-LABEL: @g2(
75; CHECK-NEXT:    [[DOTPRE:%.*]] = load ptr, ptr addrspace(4) [[GP:%.*]], align 8
76; CHECK-NEXT:    [[V74:%.*]] = call ptr addrspace(4) @alloc()
77; CHECK-NEXT:    [[V77:%.*]] = getelementptr i8, ptr addrspace(4) [[V74]], i64 -8
78; CHECK-NEXT:    store ptr [[DOTPRE]], ptr addrspace(4) [[V77]], align 8
79; CHECK-NEXT:    [[V81_CAST:%.*]] = ptrtoint ptr [[DOTPRE]] to i64
80; CHECK-NEXT:    ret i64 [[V81_CAST]]
81;
82  %.pre = load ptr, ptr addrspace(4) %gp, align 8
83  %v74 = call ptr addrspace(4) @alloc()
84  %v77 = getelementptr ptr, ptr addrspace(4) %v74, i64 -1
85  store ptr %.pre, ptr addrspace(4) %v77, align 8
86  %v81 = load i64, ptr addrspace(4) %v77, align 8
87  ret i64 %v81
88}
89
90declare ptr addrspace(4) @alloc()
91
92define i64 @f_4(ptr addrspace(4) %v0) {
93; CHECK-LABEL: @f_4(
94; CHECK-NEXT:    [[V6:%.*]] = call i64 @f_5(ptr addrspace(4) [[V0:%.*]])
95; CHECK-NEXT:    ret i64 [[V6]]
96;
97  %v6 = call i64 @f_5(ptr addrspace(4) %v0)
98  ret i64 %v6
99}
100
101declare i64 @f_5(i64)
102