xref: /llvm-project/llvm/test/CodeGen/NVPTX/vector-loads.ll (revision b279f6b098d3849f7f1c1f539b108307d5f8ae2d)
1; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 | FileCheck %s
2; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
3
4; Even though general vector types are not supported in PTX, we can still
5; optimize loads/stores with pseudo-vector instructions of the form:
6;
7; ld.v2.f32 {%f0, %f1}, [%r0]
8;
9; which will load two floats at once into scalar registers.
10
11; CHECK-LABEL: foo
12define void @foo(ptr %a) {
13; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}
14  %t1 = load <2 x float>, ptr %a
15  %t2 = fmul <2 x float> %t1, %t1
16  store <2 x float> %t2, ptr %a
17  ret void
18}
19
20; CHECK-LABEL: foo2
21define void @foo2(ptr %a) {
22; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
23  %t1 = load <4 x float>, ptr %a
24  %t2 = fmul <4 x float> %t1, %t1
25  store <4 x float> %t2, ptr %a
26  ret void
27}
28
29; CHECK-LABEL: foo3
30define void @foo3(ptr %a) {
31; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
32; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
33  %t1 = load <8 x float>, ptr %a
34  %t2 = fmul <8 x float> %t1, %t1
35  store <8 x float> %t2, ptr %a
36  ret void
37}
38
39
40
41; CHECK-LABEL: foo4
42define void @foo4(ptr %a) {
43; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}
44  %t1 = load <2 x i32>, ptr %a
45  %t2 = mul <2 x i32> %t1, %t1
46  store <2 x i32> %t2, ptr %a
47  ret void
48}
49
50; CHECK-LABEL: foo5
51define void @foo5(ptr %a) {
52; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
53  %t1 = load <4 x i32>, ptr %a
54  %t2 = mul <4 x i32> %t1, %t1
55  store <4 x i32> %t2, ptr %a
56  ret void
57}
58
59; CHECK-LABEL: foo6
60define void @foo6(ptr %a) {
61; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
62; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
63  %t1 = load <8 x i32>, ptr %a
64  %t2 = mul <8 x i32> %t1, %t1
65  store <8 x i32> %t2, ptr %a
66  ret void
67}
68
69; The following test wasn't passing previously as the address
70; computation was still too complex when LSV was called.
71declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() #0
72declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() #0
73; CHECK-LABEL: foo_complex
74define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(134217728) %alloc0) {
75  %t0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !1
76  %t1 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
77  %t2 = lshr i32 %t1, 8
78  %t3 = shl nuw nsw i32 %t1, 9
79  %ttile_origin.2 = and i32 %t3, 130560
80  %tstart_offset_x_mul = shl nuw nsw i32 %t0, 1
81  %t4 = or disjoint i32 %ttile_origin.2, %tstart_offset_x_mul
82  %t6 = or disjoint i32 %t4, 1
83  %t8 = or disjoint i32 %t4, 128
84  %t9 = zext i32 %t8 to i64
85  %t10 = or disjoint i32 %t4, 129
86  %t11 = zext i32 %t10 to i64
87  %t20 = zext i32 %t2 to i64
88  %t27 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t9
89; CHECK: ld.v2.u8
90  %t28 = load i8, ptr %t27, align 2
91  %t31 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t11
92  %t32 = load i8, ptr %t31, align 1
93  %t33 = icmp ult i8 %t28, %t32
94  %t34 = select i1 %t33, i8 %t32, i8 %t28
95  store i8 %t34, ptr %t31
96; CHECK: ret
97  ret void
98}
99
100; CHECK-LABEL: extv8f16_global_a16(
101define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
102; CHECK: ld.global.v4.b32 {%r
103  %v = load <8 x half>, ptr addrspace(1) %src, align 16
104; CHECK: mov.b32 {%rs
105; CHECK: mov.b32 {%rs
106; CHECK: mov.b32 {%rs
107; CHECK: mov.b32 {%rs
108; CHECK: cvt.f32.f16 %f{{.*}}, %rs
109; CHECK: cvt.f32.f16 %f{{.*}}, %rs
110; CHECK: cvt.f32.f16 %f{{.*}}, %rs
111; CHECK: cvt.f32.f16 %f{{.*}}, %rs
112; CHECK: cvt.f32.f16 %f{{.*}}, %rs
113; CHECK: cvt.f32.f16 %f{{.*}}, %rs
114; CHECK: cvt.f32.f16 %f{{.*}}, %rs
115; CHECK: cvt.f32.f16 %f{{.*}}, %rs
116  %ext = fpext <8 x half> %v to <8 x float>
117; CHECK: st.global.v4.f32
118; CHECK: st.global.v4.f32
119  store <8 x float> %ext, ptr addrspace(1) %dst, align 16
120  ret void
121}
122
123; CHECK-LABEL: extv8f16_global_a4(
124define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
125; CHECK: ld.global.b32 %r
126; CHECK: ld.global.b32 %r
127; CHECK: ld.global.b32 %r
128; CHECK: ld.global.b32 %r
129  %v = load <8 x half>, ptr addrspace(1) %src, align 4
130; CHECK: mov.b32 {%rs
131; CHECK: cvt.f32.f16 %f{{.*}}, %rs
132; CHECK: cvt.f32.f16 %f{{.*}}, %rs
133; CHECK: mov.b32 {%rs
134; CHECK: cvt.f32.f16 %f{{.*}}, %rs
135; CHECK: cvt.f32.f16 %f{{.*}}, %rs
136; CHECK: mov.b32 {%rs
137; CHECK: cvt.f32.f16 %f{{.*}}, %rs
138; CHECK: cvt.f32.f16 %f{{.*}}, %rs
139; CHECK: mov.b32 {%rs
140; CHECK: cvt.f32.f16 %f{{.*}}, %rs
141; CHECK: cvt.f32.f16 %f{{.*}}, %rs
142  %ext = fpext <8 x half> %v to <8 x float>
143; CHECK: st.global.v4.f32
144; CHECK: st.global.v4.f32
145  store <8 x float> %ext, ptr addrspace(1) %dst, align 16
146  ret void
147}
148
149
150; CHECK-LABEL: extv8f16_generic_a16(
151define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
152; CHECK: ld.v4.b32 {%r
153  %v = load <8 x half>, ptr %src, align 16
154; CHECK: mov.b32 {%rs
155; CHECK: mov.b32 {%rs
156; CHECK: mov.b32 {%rs
157; CHECK: mov.b32 {%rs
158; CHECK: cvt.f32.f16 %f{{.*}}, %rs
159; CHECK: cvt.f32.f16 %f{{.*}}, %rs
160; CHECK: cvt.f32.f16 %f{{.*}}, %rs
161; CHECK: cvt.f32.f16 %f{{.*}}, %rs
162; CHECK: cvt.f32.f16 %f{{.*}}, %rs
163; CHECK: cvt.f32.f16 %f{{.*}}, %rs
164; CHECK: cvt.f32.f16 %f{{.*}}, %rs
165; CHECK: cvt.f32.f16 %f{{.*}}, %rs
166  %ext = fpext <8 x half> %v to <8 x float>
167; CHECK: st.v4.f32
168; CHECK: st.v4.f32
169  store <8 x float> %ext, ptr %dst, align 16
170  ret void
171}
172
173; CHECK-LABEL: extv8f16_generic_a4(
174define void @extv8f16_generic_a4(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
175; CHECK: ld.b32 %r
176; CHECK: ld.b32 %r
177; CHECK: ld.b32 %r
178; CHECK: ld.b32 %r
179  %v = load <8 x half>, ptr %src, align 4
180; CHECK: mov.b32 {%rs
181; CHECK: cvt.f32.f16 %f{{.*}}, %rs
182; CHECK: cvt.f32.f16 %f{{.*}}, %rs
183; CHECK: mov.b32 {%rs
184; CHECK: cvt.f32.f16 %f{{.*}}, %rs
185; CHECK: cvt.f32.f16 %f{{.*}}, %rs
186; CHECK: mov.b32 {%rs
187; CHECK: cvt.f32.f16 %f{{.*}}, %rs
188; CHECK: cvt.f32.f16 %f{{.*}}, %rs
189; CHECK: mov.b32 {%rs
190; CHECK: cvt.f32.f16 %f{{.*}}, %rs
191; CHECK: cvt.f32.f16 %f{{.*}}, %rs
192  %ext = fpext <8 x half> %v to <8 x float>
193; CHECK: st.v4.f32
194; CHECK: st.v4.f32
195  store <8 x float> %ext, ptr %dst, align 16
196  ret void
197}
198
199
200!1 = !{i32 0, i32 64}
201
202; CHECK-LABEL: bf16_v4_align_load_store
203define dso_local void @bf16_v4_align_load_store(ptr noundef %0, ptr noundef %1) #0 {
204  ; CHECK: ld.v4.b16
205  ; CHECK: st.v4.b16
206  %3 = load <4 x bfloat>, ptr %1, align 8
207  store <4 x bfloat> %3, ptr %0, align 8
208  ret void
209}
210