xref: /llvm-project/llvm/test/CodeGen/NVPTX/reg-types.ll (revision 310e79875752886a7713911e2a1ec14bc75bd4b3)
1; Verify register types we generate in PTX.
2; RUN: llc -O0 < %s -mtriple=nvptx -mcpu=sm_20 | FileCheck %s
3; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_20 | FileCheck %s
4; RUN: llc -O0 < %s -mtriple=nvptx -mcpu=sm_20 | FileCheck %s -check-prefixes=NO8BIT
5; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_20 | FileCheck %s -check-prefixes=NO8BIT
6; RUN: %if ptxas && !ptxas-12.0 %{ llc -O0 < %s -mtriple=nvptx -mcpu=sm_20 | %ptxas-verify %}
7; RUN: %if ptxas %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
8
9; CHECK-LABEL: .visible .func func()
10; NO8BIT-LABEL: .visible .func func()
11define void @func() {
12entry:
13  %s8 = alloca i8, align 1
14  %u8 = alloca i8, align 1
15  %s16 = alloca i16, align 2
16  %u16 = alloca i16, align 2
17; Both 8- and 16-bit integers are packed into 16-bit registers.
18; CHECK-DAG: .reg .b16 %rs<
19; We should not generate 8-bit registers.
20; NO8BIT-NOT: .reg .{{[bsu]}}8
21  %s32 = alloca i32, align 4
22  %u32 = alloca i32, align 4
23; CHECK-DAG: .reg .b32 %r<
24  %s64 = alloca i64, align 8
25  %u64 = alloca i64, align 8
26; CHECK-DAG: .reg .b64 %rd<
27  %f32 = alloca float, align 4
28; CHECK-DAG: .reg .f32 %f<
29  %f64 = alloca double, align 8
30; CHECK-DAG: .reg .f64 %fd<
31
32; Verify that we use correct register types.
33  store i8 1, ptr %s8, align 1
34; CHECK: mov.b16 [[R1:%rs[0-9]]], 1;
35; CHECK-NEXT: st.u8 {{.*}}, [[R1]]
36  store i8 2, ptr %u8, align 1
37; CHECK: mov.b16 [[R2:%rs[0-9]]], 2;
38; CHECK-NEXT: st.u8 {{.*}}, [[R2]]
39  store i16 3, ptr %s16, align 2
40; CHECK: mov.b16 [[R3:%rs[0-9]]], 3;
41; CHECK-NEXT: st.u16 {{.*}}, [[R3]]
42  store i16 4, ptr %u16, align 2
43; CHECK: mov.b16 [[R4:%rs[0-9]]], 4;
44; CHECK-NEXT: st.u16 {{.*}}, [[R4]]
45  store i32 5, ptr %s32, align 4
46; CHECK: mov.b32 [[R5:%r[0-9]]], 5;
47; CHECK-NEXT: st.u32 {{.*}}, [[R5]]
48  store i32 6, ptr %u32, align 4
49; CHECK: mov.b32 [[R6:%r[0-9]]], 6;
50; CHECK-NEXT: st.u32 {{.*}}, [[R6]]
51  store i64 7, ptr %s64, align 8
52; CHECK: mov.b64 [[R7:%rd[0-9]]], 7;
53; CHECK-NEXT: st.u64 {{.*}}, [[R7]]
54  store i64 8, ptr %u64, align 8
55; CHECK: mov.b64 [[R8:%rd[0-9]]], 8;
56; CHECK-NEXT: st.u64 {{.*}}, [[R8]]
57
58; FP constants are stored via integer registers, but that's an
59; implementation detail that's irrelevant here.
60  store float 9.000000e+00, ptr %f32, align 4
61  store double 1.000000e+01, ptr %f64, align 8
62; Instead, we force a load into a register and then verify register type.
63  %f32v = load volatile float, ptr %f32, align 4
64; CHECK: ld.volatile.f32         %f{{[0-9]+}}
65  %f64v = load volatile double, ptr %f64, align 8
66; CHECK: ld.volatile.f64         %fd{{[0-9]+}}
67  ret void
68; CHECK: ret;
69; NO8BIT: ret;
70}
71
72