xref: /llvm-project/clang/test/CodeGenCXX/vla.cpp (revision 7eca38ce76d5d1915f4ab7e665964062c0b37697)
1 // RUN: %clang_cc1 -std=c++11 -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck -check-prefixes=X64,CHECK %s
2 // RUN: %clang_cc1 -std=c++11 -triple amdgcn %s -emit-llvm -o - | FileCheck -check-prefixes=AMDGCN,CHECK %s
3 
4 template<typename T>
5 struct S {
6   static int n;
7 };
8 template<typename T> int S<T>::n = 5;
9 
10 int f() {
11   // Make sure that the reference here is enough to trigger the instantiation of
12   // the static data member.
13   // CHECK: @_ZN1SIiE1nE = linkonce_odr{{.*}} global i32 5
14   int a[S<int>::n];
15   return sizeof a;
16 }
17 
18 void test0(void *array, int n) {
19   // CHECK-LABEL: define{{.*}} void @_Z5test0Pvi(
20   // AMDGCN:        [[ARRAY0:%.*]] = alloca ptr, align 8, addrspace(5)
21   // AMDGCN-NEXT:   [[N0:%.*]] = alloca i32, align 4, addrspace(5)
22   // AMDGCN-NEXT:   [[REF0:%.*]] = alloca ptr, align 8, addrspace(5)
23   // AMDGCN-NEXT:   [[S0:%.*]] = alloca i16, align 2, addrspace(5)
24   // AMDGCN-NEXT:   [[ARRAY:%.*]] = addrspacecast ptr addrspace(5) [[ARRAY0]] to ptr
25   // AMDGCN-NEXT:   [[N:%.*]] = addrspacecast ptr addrspace(5) [[N0]] to ptr
26   // AMDGCN-NEXT:   [[REF:%.*]] = addrspacecast ptr addrspace(5) [[REF0]] to ptr
27   // AMDGCN-NEXT:   [[S:%.*]] = addrspacecast ptr addrspace(5) [[S0]] to ptr
28   // X64:        [[ARRAY:%.*]] = alloca ptr, align 8
29   // X64-NEXT:   [[N:%.*]] = alloca i32, align 4
30   // X64-NEXT:   [[REF:%.*]] = alloca ptr, align 8
31   // X64-NEXT:   [[S:%.*]] = alloca i16, align 2
32   // CHECK-NEXT: store ptr
33   // CHECK-NEXT: store i32
34 
35   // Capture the bounds.
36   // CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[N]], align 4
37   // CHECK-NEXT: [[DIM0:%.*]] = zext i32 [[T0]] to i64
38   // CHECK-NEXT: [[T0:%.*]] = load i32, ptr [[N]], align 4
39   // CHECK-NEXT: [[T1:%.*]] = add nsw i32 [[T0]], 1
40   // CHECK-NEXT: [[DIM1:%.*]] = zext i32 [[T1]] to i64
41   typedef short array_t[n][n+1];
42 
43   // CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[ARRAY]], align 8
44   // CHECK-NEXT: store ptr [[T0]], ptr [[REF]], align 8
45   array_t &ref = *(array_t*) array;
46 
47   // CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[REF]]
48   // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 1, [[DIM1]]
49   // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, ptr [[T0]], i64 [[T1]]
50   // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, ptr [[T2]], i64 2
51   // CHECK-NEXT: store i16 3, ptr [[T3]]
52   ref[1][2] = 3;
53 
54   // CHECK-NEXT: [[T0:%.*]] = load ptr, ptr [[REF]]
55   // CHECK-NEXT: [[T1:%.*]] = mul nsw i64 4, [[DIM1]]
56   // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i16, ptr [[T0]], i64 [[T1]]
57   // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i16, ptr [[T2]], i64 5
58   // CHECK-NEXT: [[T4:%.*]] = load i16, ptr [[T3]]
59   // CHECK-NEXT: store i16 [[T4]], ptr [[S]], align 2
60   short s = ref[4][5];
61 
62   // CHECK-NEXT: ret void
63 }
64 
65 
66 void test2(int b) {
67   // CHECK-LABEL: define{{.*}} void {{.*}}test2{{.*}}(i32 noundef %b)
68   int varr[b];
69   // AMDGCN: %__end1 = alloca ptr, align 8, addrspace(5)
70   // AMDGCN: [[END:%.*]] = addrspacecast ptr addrspace(5) %__end1 to ptr
71   // get the address of %b by checking the first store that stores it
72   //CHECK: store i32 %b, ptr [[PTR_B:%.*]]
73 
74   // get the size of the VLA by getting the first load of the PTR_B
75   //CHECK: [[VLA_NUM_ELEMENTS_PREZEXT:%.*]] = load i32, ptr [[PTR_B]]
76   //CHECK-NEXT: [[VLA_NUM_ELEMENTS_PRE:%.*]] = zext i32 [[VLA_NUM_ELEMENTS_PREZEXT]]
77 
78   b = 15;
79   //CHECK: store i32 15, ptr [[PTR_B]]
80 
81   // Now get the sizeof, and then divide by the element size
82 
83 
84   //CHECK: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_NUM_ELEMENTS_PRE]]
85   //CHECK-NEXT: [[VLA_NUM_ELEMENTS_POST:%.*]] = udiv i64 [[VLA_SIZEOF]], 4
86   //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds nuw i32, ptr {{%.*}}, i64 [[VLA_NUM_ELEMENTS_POST]]
87   //X64-NEXT: store ptr [[VLA_END_PTR]], ptr %__end1
88   //AMDGCN-NEXT: store ptr [[VLA_END_PTR]], ptr [[END]]
89   for (int d : varr) 0;
90 }
91 
92 void test3(int b, int c) {
93   // CHECK-LABEL: define{{.*}} void {{.*}}test3{{.*}}(i32 noundef %b, i32 noundef %c)
94   int varr[b][c];
95   // AMDGCN: %__end1 = alloca ptr, align 8, addrspace(5)
96   // AMDGCN: [[END:%.*]] = addrspacecast ptr addrspace(5) %__end1 to ptr
97   // get the address of %b by checking the first store that stores it
98   //CHECK: store i32 %b, ptr [[PTR_B:%.*]]
99   //CHECK-NEXT: store i32 %c, ptr [[PTR_C:%.*]]
100 
101   // get the size of the VLA by getting the first load of the PTR_B
102   //CHECK: [[VLA_DIM1_PREZEXT:%.*]] = load i32, ptr [[PTR_B]]
103   //CHECK-NEXT: [[VLA_DIM1_PRE:%.*]] = zext i32 [[VLA_DIM1_PREZEXT]]
104   //CHECK: [[VLA_DIM2_PREZEXT:%.*]] = load i32, ptr [[PTR_C]]
105   //CHECK-NEXT: [[VLA_DIM2_PRE:%.*]] = zext i32 [[VLA_DIM2_PREZEXT]]
106 
107   b = 15;
108   c = 15;
109   //CHECK: store i32 15, ptr [[PTR_B]]
110   //CHECK: store i32 15, ptr [[PTR_C]]
111   // Now get the sizeof, and then divide by the element size
112 
113   // multiply the two dimensions, then by the element type and then divide by the sizeof dim2
114   //CHECK: [[VLA_DIM1_X_DIM2:%.*]] = mul nuw i64 [[VLA_DIM1_PRE]], [[VLA_DIM2_PRE]]
115   //CHECK-NEXT: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_DIM1_X_DIM2]]
116   //CHECK-NEXT: [[VLA_SIZEOF_DIM2:%.*]] = mul nuw i64 4, [[VLA_DIM2_PRE]]
117   //CHECK-NEXT: [[VLA_NUM_ELEMENTS:%.*]] = udiv i64 [[VLA_SIZEOF]], [[VLA_SIZEOF_DIM2]]
118   //CHECK-NEXT: [[VLA_END_INDEX:%.*]] = mul nsw i64 [[VLA_NUM_ELEMENTS]], [[VLA_DIM2_PRE]]
119   //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds nuw i32, ptr {{%.*}}, i64 [[VLA_END_INDEX]]
120   //X64-NEXT: store ptr [[VLA_END_PTR]], ptr %__end
121   //AMDGCN-NEXT: store ptr [[VLA_END_PTR]], ptr [[END]]
122 
123   for (auto &d : varr) 0;
124 }
125 
126 
127