xref: /llvm-project/llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/reassociate-geps-and-slsr.ll (revision 133352feb30605ec51b15f77826ed3a2fbf8db56)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt < %s -passes=separate-const-offset-from-gep,slsr,gvn -S | FileCheck %s
3; RUN: llc < %s -mcpu=sm_35 | FileCheck %s --check-prefix=PTX
4
5target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
6target triple = "nvptx64-unknown-unknown"
7
8; arr[i + 5]
9; arr[i * 2 + 5]
10; arr[i * 3 + 5]
11; arr[i * 4 + 5]
12;
13;   => reassociate-geps
14;
15; *(&arr[i] + 5)
16; *(&arr[i * 2] + 5)
17; *(&arr[i * 3] + 5)
18; *(&arr[i * 4] + 5)
19;
20;   => slsr
21;
22; p1 = &arr[i]
23; *(p1 + 5)
24; p2 = p1 + i
25; *(p2 + 5)
26; p3 = p2 + i
27; *(p3 + 5)
28; p4 = p3 + i
29; *(p4 + 5)
30define void @slsr_after_reassociate_geps(ptr %arr, i32 %i) {
31; CHECK-LABEL: define void @slsr_after_reassociate_geps(
32; CHECK-SAME: ptr [[ARR:%.*]], i32 [[I:%.*]]) {
33; CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[I]] to i64
34; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, ptr [[ARR]], i64 [[TMP1]]
35; CHECK-NEXT:    [[P12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 20
36; CHECK-NEXT:    [[V1:%.*]] = load float, ptr [[P12]], align 4
37; CHECK-NEXT:    call void @foo(float [[V1]])
38; CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP1]], 2
39; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP3]]
40; CHECK-NEXT:    [[P24:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 20
41; CHECK-NEXT:    [[V2:%.*]] = load float, ptr [[P24]], align 4
42; CHECK-NEXT:    call void @foo(float [[V2]])
43; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]]
44; CHECK-NEXT:    [[P36:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 20
45; CHECK-NEXT:    [[V3:%.*]] = load float, ptr [[P36]], align 4
46; CHECK-NEXT:    call void @foo(float [[V3]])
47; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 [[TMP3]]
48; CHECK-NEXT:    [[P48:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 20
49; CHECK-NEXT:    [[V4:%.*]] = load float, ptr [[P48]], align 4
50; CHECK-NEXT:    call void @foo(float [[V4]])
51; CHECK-NEXT:    ret void
52;
53; PTX-LABEL: .visible .func slsr_after_reassociate_geps(
54; PTX: ld.param.u64 [[arr:%rd[0-9]+]], [slsr_after_reassociate_geps_param_0];
55; PTX: ld.param.u32 [[i:%r[0-9]+]], [slsr_after_reassociate_geps_param_1];
56  %i2 = shl nsw i32 %i, 1
57  %i3 = mul nsw i32 %i, 3
58  %i4 = shl nsw i32 %i, 2
59
60  %j1 = add nsw i32 %i, 5
61  %p1 = getelementptr inbounds float, ptr %arr, i32 %j1
62; PTX: mul.wide.s32 [[i4:%rd[0-9]+]], [[i]], 4;
63; PTX: add.s64 [[base1:%rd[0-9]+]], [[arr]], [[i4]];
64  %v1 = load float, ptr %p1, align 4
65; PTX: ld.f32 {{%f[0-9]+}}, [[[base1]]+20];
66  call void @foo(float %v1)
67
68  %j2 = add nsw i32 %i2, 5
69  %p2 = getelementptr inbounds float, ptr %arr, i32 %j2
70; PTX: add.s64 [[base2:%rd[0-9]+]], [[base1]], [[i4]];
71  %v2 = load float, ptr %p2, align 4
72; PTX: ld.f32 {{%f[0-9]+}}, [[[base2]]+20];
73  call void @foo(float %v2)
74
75  %j3 = add nsw i32 %i3, 5
76  %p3 = getelementptr inbounds float, ptr %arr, i32 %j3
77; PTX: add.s64 [[base3:%rd[0-9]+]], [[base2]], [[i4]];
78  %v3 = load float, ptr %p3, align 4
79; PTX: ld.f32 {{%f[0-9]+}}, [[[base3]]+20];
80  call void @foo(float %v3)
81
82  %j4 = add nsw i32 %i4, 5
83  %p4 = getelementptr inbounds float, ptr %arr, i32 %j4
84; PTX: add.s64 [[base4:%rd[0-9]+]], [[base3]], [[i4]];
85  %v4 = load float, ptr %p4, align 4
86; PTX: ld.f32 {{%f[0-9]+}}, [[[base4]]+20];
87  call void @foo(float %v4)
88
89  ret void
90}
91
92declare void @foo(float)
93