xref: /llvm-project/llvm/test/Transforms/InstCombine/trunc-load.ll (revision 4ab40eca080965c65802710e39adbb78c4ce7bde)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S -data-layout="e-n16:32:64" | FileCheck %s
3; RUN: opt < %s -passes=instcombine -S -data-layout="E-n16:32:64" | FileCheck %s
4
5; Don't narrow if it would lose information about the dereferenceable range of the pointer.
6
7define i32 @truncload_no_deref(ptr %ptr) {
8; CHECK-LABEL: @truncload_no_deref(
9; CHECK-NEXT:    [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
10; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
11; CHECK-NEXT:    ret i32 [[R]]
12;
13  %x = load i64, ptr %ptr
14  %r = trunc i64 %x to i32
15  ret i32 %r
16}
17
18define i32 @truncload_small_deref(ptr dereferenceable(7) %ptr) {
19; CHECK-LABEL: @truncload_small_deref(
20; CHECK-NEXT:    [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
21; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
22; CHECK-NEXT:    ret i32 [[R]]
23;
24  %x = load i64, ptr %ptr
25  %r = trunc i64 %x to i32
26  ret i32 %r
27}
28
29; On little-endian, we can narrow the load without an offset.
30
31define i32 @truncload_deref(ptr dereferenceable(8) %ptr) {
32; CHECK-LABEL: @truncload_deref(
33; CHECK-NEXT:    [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 4
34; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
35; CHECK-NEXT:    ret i32 [[R]]
36;
37  %x = load i64, ptr %ptr
38  %r = trunc i64 %x to i32
39  ret i32 %r
40}
41
42; Preserve alignment.
43
44define i16 @truncload_align(ptr dereferenceable(14) %ptr) {
45; CHECK-LABEL: @truncload_align(
46; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[PTR:%.*]], align 16
47; CHECK-NEXT:    [[R:%.*]] = trunc i32 [[X]] to i16
48; CHECK-NEXT:    ret i16 [[R]]
49;
50  %x = load i32, ptr %ptr, align 16
51  %r = trunc i32 %x to i16
52  ret i16 %r
53}
54
55; Negative test - extra use means we would not eliminate the original load.
56
57declare void @use(i64)
58
59define i32 @truncload_extra_use(ptr dereferenceable(100) %ptr) {
60; CHECK-LABEL: @truncload_extra_use(
61; CHECK-NEXT:    [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 2
62; CHECK-NEXT:    call void @use(i64 [[X]])
63; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
64; CHECK-NEXT:    ret i32 [[R]]
65;
66  %x = load i64, ptr %ptr, align 2
67  call void @use(i64 %x)
68  %r = trunc i64 %x to i32
69  ret i32 %r
70}
71
72; Negative test - don't create a load if the type is not allowed by the data-layout.
73
74define i8 @truncload_type(ptr dereferenceable(9) %ptr) {
75; CHECK-LABEL: @truncload_type(
76; CHECK-NEXT:    [[X:%.*]] = load i64, ptr [[PTR:%.*]], align 2
77; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i8
78; CHECK-NEXT:    ret i8 [[R]]
79;
80  %x = load i64, ptr %ptr, align 2
81  %r = trunc i64 %x to i8
82  ret i8 %r
83}
84
85; Negative test - don't transform volatiles.
86
87define i32 @truncload_volatile(ptr dereferenceable(8) %ptr) {
88; CHECK-LABEL: @truncload_volatile(
89; CHECK-NEXT:    [[X:%.*]] = load volatile i64, ptr [[PTR:%.*]], align 8
90; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
91; CHECK-NEXT:    ret i32 [[R]]
92;
93  %x = load volatile i64, ptr %ptr, align 8
94  %r = trunc i64 %x to i32
95  ret i32 %r
96}
97
98; Preserve address space.
99
100define i32 @truncload_address_space(ptr addrspace(1) dereferenceable(8) %ptr) {
101; CHECK-LABEL: @truncload_address_space(
102; CHECK-NEXT:    [[X:%.*]] = load i64, ptr addrspace(1) [[PTR:%.*]], align 4
103; CHECK-NEXT:    [[R:%.*]] = trunc i64 [[X]] to i32
104; CHECK-NEXT:    ret i32 [[R]]
105;
106  %x = load i64, ptr addrspace(1) %ptr, align 4
107  %r = trunc i64 %x to i32
108  ret i32 %r
109}
110