xref: /llvm-project/llvm/test/Transforms/InstCombine/load-bitcast-vec.ll (revision 4ab40eca080965c65802710e39adbb78c4ce7bde)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=instcombine -S < %s | FileCheck %s
3
4define float @matching_scalar(ptr dereferenceable(16) %p) {
5; CHECK-LABEL: @matching_scalar(
6; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 16
7; CHECK-NEXT:    ret float [[R]]
8;
9  %r = load float, ptr %p, align 16
10  ret float %r
11}
12
13define i32 @nonmatching_scalar(ptr dereferenceable(16) %p) {
14; CHECK-LABEL: @nonmatching_scalar(
15; CHECK-NEXT:    [[R:%.*]] = load i32, ptr [[P:%.*]], align 16
16; CHECK-NEXT:    ret i32 [[R]]
17;
18  %r = load i32, ptr %p, align 16
19  ret i32 %r
20}
21
22define i64 @larger_scalar(ptr dereferenceable(16) %p) {
23; CHECK-LABEL: @larger_scalar(
24; CHECK-NEXT:    [[R:%.*]] = load i64, ptr [[P:%.*]], align 16
25; CHECK-NEXT:    ret i64 [[R]]
26;
27  %r = load i64, ptr %p, align 16
28  ret i64 %r
29}
30
31define i8 @smaller_scalar(ptr dereferenceable(16) %p) {
32; CHECK-LABEL: @smaller_scalar(
33; CHECK-NEXT:    [[R:%.*]] = load i8, ptr [[P:%.*]], align 16
34; CHECK-NEXT:    ret i8 [[R]]
35;
36  %r = load i8, ptr %p, align 16
37  ret i8 %r
38}
39
40define i8 @smaller_scalar_less_aligned(ptr dereferenceable(16) %p) {
41; CHECK-LABEL: @smaller_scalar_less_aligned(
42; CHECK-NEXT:    [[R:%.*]] = load i8, ptr [[P:%.*]], align 4
43; CHECK-NEXT:    ret i8 [[R]]
44;
45  %r = load i8, ptr %p, align 4
46  ret i8 %r
47}
48
49define float @matching_scalar_small_deref(ptr dereferenceable(15) %p) {
50; CHECK-LABEL: @matching_scalar_small_deref(
51; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 16
52; CHECK-NEXT:    ret float [[R]]
53;
54  %r = load float, ptr %p, align 16
55  ret float %r
56}
57
58define float @matching_scalar_smallest_deref(ptr dereferenceable(1) %p) {
59; CHECK-LABEL: @matching_scalar_smallest_deref(
60; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 16
61; CHECK-NEXT:    ret float [[R]]
62;
63  %r = load float, ptr %p, align 16
64  ret float %r
65}
66
67define float @matching_scalar_smallest_deref_or_null(ptr dereferenceable_or_null(1) %p) {
68; CHECK-LABEL: @matching_scalar_smallest_deref_or_null(
69; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 16
70; CHECK-NEXT:    ret float [[R]]
71;
72  %r = load float, ptr %p, align 16
73  ret float %r
74}
75
76define float @matching_scalar_smallest_deref_addrspace(ptr addrspace(4) dereferenceable(1) %p) {
77; CHECK-LABEL: @matching_scalar_smallest_deref_addrspace(
78; CHECK-NEXT:    [[R:%.*]] = load float, ptr addrspace(4) [[P:%.*]], align 16
79; CHECK-NEXT:    ret float [[R]]
80;
81  %r = load float, ptr addrspace(4) %p, align 16
82  ret float %r
83}
84
85; A null pointer can't be assumed inbounds in a non-default address space.
86
87define float @matching_scalar_smallest_deref_or_null_addrspace(ptr addrspace(4) dereferenceable_or_null(1) %p) {
88; CHECK-LABEL: @matching_scalar_smallest_deref_or_null_addrspace(
89; CHECK-NEXT:    [[R:%.*]] = load float, ptr addrspace(4) [[P:%.*]], align 16
90; CHECK-NEXT:    ret float [[R]]
91;
92  %r = load float, ptr addrspace(4) %p, align 16
93  ret float %r
94}
95
96define float @matching_scalar_volatile(ptr dereferenceable(16) %p) {
97; CHECK-LABEL: @matching_scalar_volatile(
98; CHECK-NEXT:    [[R:%.*]] = load volatile float, ptr [[P:%.*]], align 16
99; CHECK-NEXT:    ret float [[R]]
100;
101  %r = load volatile float, ptr %p, align 16
102  ret float %r
103}
104
105define float @nonvector(ptr dereferenceable(16) %p) {
106; CHECK-LABEL: @nonvector(
107; CHECK-NEXT:    [[R:%.*]] = load float, ptr [[P:%.*]], align 16
108; CHECK-NEXT:    ret float [[R]]
109;
110  %r = load float, ptr %p, align 16
111  ret float %r
112}
113