1 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=OMP45 --check-prefix=CHECK 2 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=45 -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG 5 6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=OMP50 --check-prefix=CHECK 7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 9 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp -DOMP5 -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG 10 11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s 12 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 13 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 14 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=45 -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 15 16 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s 17 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 18 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 19 // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp-simd -DOMP5 -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 20 // expected-no-diagnostics 21 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 22 #ifndef HEADER 23 #define HEADER 24 25 long long get_val() { extern void mayThrow(); mayThrow(); return 0; } 26 double *g_ptr; 27 28 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* noundef {{.+}}, float* noundef {{.+}}, float* noundef {{.+}}, float* noundef {{.+}}) 29 void simple(float *a, float *b, float *c, float *d) { 30 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 31 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val 32 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]] 33 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 34 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]] 35 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 36 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 37 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 38 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 39 // CHECK: store i32 -1, i32* [[A:%.+]], 40 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 41 // CHECK: store i32 -1, i32* [[R:%[^,]+]], 42 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 43 #pragma omp parallel for simd 44 // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 45 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 46 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5 47 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 48 // CHECK: [[TRUE]]: 49 // CHECK: br label %[[SWITCH:[^,]+]] 50 // CHECK: [[FALSE]]: 51 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 52 // CHECK: br label %[[SWITCH]] 53 // CHECK: [[SWITCH]]: 54 // CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 55 // CHECK: store i32 [[UP]], i32* [[UB]], 56 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 57 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], 58 59 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 60 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] 61 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] 62 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] 63 for (int i = 3; i < 32; i += 5) { 64 // CHECK: [[SIMPLE_LOOP1_BODY]]: 65 // Start of body: calculate i from IV: 66 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] 67 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 68 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] 69 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 70 // ... loop body ... 71 // End of body: store into a[i]: 72 // CHECK: store float [[RESULT:%.+]], float* 73 a[i] = b[i] * c[i] * d[i]; 74 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]] 75 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 76 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 77 // br label %{{.+}}, !llvm.loop !{{.+}} 78 } 79 // CHECK: [[SIMPLE_LOOP1_END]]: 80 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 81 82 long long k = get_val(); 83 84 #pragma omp parallel for simd linear(k : 3) schedule(dynamic) 85 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR:%[^,]+]] 86 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] 87 88 // CHECK: call void @__kmpc_dispatch_init_4(%struct.ident_t* {{.+}}, i32 %{{.+}}, i32 {{35|1073741859}}, i32 0, i32 8, i32 1, i32 1) 89 // CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}}) 90 // CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0 91 // CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]] 92 // CHECK: [[CONT]]: 93 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 94 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]], 95 96 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group 97 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.access.group 98 // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]] 99 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] 100 for (int i = 10; i > 1; i--) { 101 // CHECK: [[SIMPLE_LOOP2_BODY]]: 102 // Start of body: calculate i from IV: 103 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group 104 // FIXME: It is interesting, why the following "mul 1" was not constant folded? 105 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 106 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] 107 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.access.group 108 // 109 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.access.group 110 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group 111 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 112 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 113 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] 114 // Update of the privatized version of linear variable! 115 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] 116 a[k]++; 117 k = k + 3; 118 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.access.group 119 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 120 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.access.group 121 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] 122 } 123 // CHECK: [[SIMPLE_LOOP2_END]]: 124 // 125 // Update linear vars after loop, as the loop was operating on a private version. 126 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[K_PRIVATIZED]] 127 // CHECK-NEXT: store i64 [[LIN0_2]], i64* %{{.+}} 128 129 int lin = 12; 130 #pragma omp parallel for simd linear(lin : get_val()), linear(g_ptr) 131 132 // CHECK: alloca i32, 133 // Init linear private var. 134 // CHECK: [[LIN_VAR:%.+]] = load i32*, i32** % 135 // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]] 136 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]] 137 // Remember linear step. 138 // CHECK: [[CALL_VAL:%.+]] = invoke 139 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]] 140 141 // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:%.+]], 142 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]] 143 144 // CHECK: call void @__kmpc_for_static_init_8u(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 145 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 146 // CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3 147 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 148 // CHECK: [[TRUE]]: 149 // CHECK: br label %[[SWITCH:[^,]+]] 150 // CHECK: [[FALSE]]: 151 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 152 // CHECK: br label %[[SWITCH]] 153 // CHECK: [[SWITCH]]: 154 // CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 155 // CHECK: store i64 [[UP]], i64* [[UB]], 156 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 157 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]], 158 159 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]] 160 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 161 // CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]] 162 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] 163 for (unsigned long long it = 2000; it >= 600; it-=400) { 164 // CHECK: [[SIMPLE_LOOP3_BODY]]: 165 // Start of body: calculate it from IV: 166 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]] 167 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 168 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] 169 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}} 170 // 171 // Linear start and step are used to calculate current value of the linear variable. 172 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]] 173 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]] 174 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]] 175 // CHECK: store i32 {{.+}}, i32* [[LIN_PRIV:%[^,]+]], 176 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]] 177 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]] 178 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1 179 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]] 180 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]] 181 *g_ptr++ = 0.0; 182 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]] 183 // CHECK: store double{{.*}}[[GEP_VAL]] 184 a[it + lin]++; 185 // CHECK: [[FLT_INC:%.+]] = fadd float 186 // CHECK-NEXT: store float [[FLT_INC]], 187 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]] 188 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 189 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]] 190 } 191 // CHECK: [[SIMPLE_LOOP3_END]]: 192 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 193 // 194 // Linear start and step are used to calculate final value of the linear variables. 195 // CHECK: [[LIN:%.+]] = load i32, i32* [[LIN_PRIV]] 196 // CHECK: store i32 [[LIN]], i32* [[LIN_VAR]], 197 // CHECK: [[GLIN:%.+]] = load double*, double** [[G_PTR_CUR]] 198 // CHECK: store double* [[GLIN]], double** [[GLIN_VAR]], 199 200 #pragma omp parallel for simd 201 // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 202 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 203 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3 204 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 205 // CHECK: [[TRUE]]: 206 // CHECK: br label %[[SWITCH:[^,]+]] 207 // CHECK: [[FALSE]]: 208 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 209 // CHECK: br label %[[SWITCH]] 210 // CHECK: [[SWITCH]]: 211 // CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 212 // CHECK: store i32 [[UP]], i32* [[UB]], 213 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 214 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]], 215 216 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]] 217 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] 218 // CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]] 219 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] 220 for (short it = 6; it <= 20; it-=-4) { 221 // CHECK: [[SIMPLE_LOOP4_BODY]]: 222 // Start of body: calculate it from IV: 223 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]] 224 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 225 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] 226 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 227 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* 228 229 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]] 230 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 231 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]] 232 } 233 // CHECK: [[SIMPLE_LOOP4_END]]: 234 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 235 236 #pragma omp parallel for simd 237 // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 238 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 239 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25 240 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 241 // CHECK: [[TRUE]]: 242 // CHECK: br label %[[SWITCH:[^,]+]] 243 // CHECK: [[FALSE]]: 244 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 245 // CHECK: br label %[[SWITCH]] 246 // CHECK: [[SWITCH]]: 247 // CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 248 // CHECK: store i32 [[UP]], i32* [[UB]], 249 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 250 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]], 251 252 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]] 253 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] 254 // CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]] 255 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] 256 for (unsigned char it = 'z'; it >= 'a'; it+=-1) { 257 // CHECK: [[SIMPLE_LOOP5_BODY]]: 258 // Start of body: calculate it from IV: 259 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]] 260 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 261 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] 262 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 263 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, 264 265 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]] 266 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 267 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]] 268 } 269 // CHECK: [[SIMPLE_LOOP5_END]]: 270 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 271 272 // CHECK-NOT: mul i32 %{{.+}}, 10 273 #pragma omp parallel for simd 274 for (unsigned i=100; i<10; i+=10) { 275 } 276 277 int A; 278 { 279 A = -1; 280 #pragma omp parallel for simd lastprivate(A) 281 // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 282 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 283 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 284 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 285 // CHECK: [[TRUE]]: 286 // CHECK: br label %[[SWITCH:[^,]+]] 287 // CHECK: [[FALSE]]: 288 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 289 // CHECK: br label %[[SWITCH]] 290 // CHECK: [[SWITCH]]: 291 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 292 // CHECK: store i64 [[UP]], i64* [[UB]], 293 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 294 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]], 295 296 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] 297 // CHECK: [[SIMD_LOOP7_COND]]: 298 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]] 299 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 300 // CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]] 301 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] 302 for (long long i = -10; i < 10; i += 3) { 303 // CHECK: [[SIMPLE_LOOP7_BODY]]: 304 // Start of body: calculate i from IV: 305 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]] 306 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 307 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] 308 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]], 309 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]] 310 // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32 311 // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]], 312 A = i; 313 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]] 314 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 315 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]] 316 } 317 // CHECK: [[SIMPLE_LOOP7_END]]: 318 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 319 // CHECK: load i32, i32* 320 // CHECK: icmp ne i32 %{{.+}}, 0 321 // CHECK: br i1 %{{.+}}, label 322 // CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]], 323 // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}}, 324 // CHECK-NEXT: br label 325 } 326 int R; 327 { 328 R = -1; 329 // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]], 330 #pragma omp parallel for simd reduction(*:R) 331 // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 332 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 333 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 334 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 335 // CHECK: [[TRUE]]: 336 // CHECK: br label %[[SWITCH:[^,]+]] 337 // CHECK: [[FALSE]]: 338 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 339 // CHECK: br label %[[SWITCH]] 340 // CHECK: [[SWITCH]]: 341 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 342 // CHECK: store i64 [[UP]], i64* [[UB]], 343 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 344 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]], 345 346 // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]] 347 // CHECK: [[SIMD_LOOP8_COND]]: 348 // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]] 349 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 350 // CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]] 351 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]] 352 for (long long i = -10; i < 10; i += 3) { 353 // CHECK: [[SIMPLE_LOOP8_BODY]]: 354 // Start of body: calculate i from IV: 355 // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]] 356 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3 357 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] 358 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]], 359 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]] 360 // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]], 361 R *= i; 362 // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]] 363 // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1 364 // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]] 365 } 366 // CHECK: [[SIMPLE_LOOP8_END]]: 367 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 368 // CHECK: call i32 @__kmpc_reduce_nowait( 369 // CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]], 370 // CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]] 371 // CHECK-NEXT: store i32 [[RED]], i32* %{{.+}}, 372 // CHECK-NEXT: call void @__kmpc_end_reduce_nowait( 373 } 374 } 375 376 template <class T, unsigned K> T tfoo(T a) { return a + K; } 377 378 template <typename T, unsigned N> 379 int templ1(T a, T *z) { 380 #pragma omp parallel for simd collapse(N) 381 for (int i = 0; i < N * 2; i++) { 382 for (long long j = 0; j < (N + N + N + N); j += 2) { 383 z[i + j] = a + tfoo<T, N>(i + j); 384 } 385 } 386 return 0; 387 } 388 389 // Instatiation templ1<float,2> 390 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float noundef {{.+}}, float* noundef {{.+}}) 391 // CHECK: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call( 392 void inst_templ1() { 393 float a; 394 float z[100]; 395 templ1<float,2> (a, z); 396 } 397 398 // OMP50: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 399 // OMP50: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 400 // OMP50: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15 401 // OMP50: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 402 // OMP50: [[TRUE]]: 403 // OMP50: br label %[[SWITCH:[^,]+]] 404 // OMP50: [[FALSE]]: 405 // OMP50: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 406 // OMP50: br label %[[SWITCH]] 407 // OMP50: [[SWITCH]]: 408 // OMP50: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 409 // OMP50: store i64 [[UP]], i64* [[UB]], 410 // OMP50: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 411 // OMP50: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]], 412 413 // ... 414 // OMP50: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]] 415 // OMP50-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 416 // OMP50-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] 417 // OMP50-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] 418 // OMP50: [[T1_BODY]]: 419 // Loop counters i and j updates: 420 // OMP50: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]] 421 // OMP50-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 422 // OMP50-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 423 // OMP50-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] 424 // OMP50-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 425 // OMP50-NEXT: store i32 [[I_2]], i32* 426 // OMP50: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]] 427 // OMP50: [[IV2_1:%.+]] = load i64, i64* [[T1_OMP_IV]] 428 // OMP50-NEXT: [[DIV_1:%.+]] = sdiv i64 [[IV2_1]], 4 429 // OMP50-NEXT: [[MUL_1:%.+]] = mul nsw i64 [[DIV_1]], 4 430 // OMP50-NEXT: [[J_1:%.+]] = sub nsw i64 [[IV2]], [[MUL_1]] 431 // OMP50-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 432 // OMP50-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] 433 // OMP50-NEXT: store i64 [[J_2_ADD0]], i64* 434 // simd.for.inc: 435 // OMP50: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]] 436 // OMP50-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 437 // OMP50-NEXT: store i64 [[INC]], i64* 438 // OMP50-NEXT: br label {{%.+}} 439 // OMP50: [[T1_END]]: 440 // OMP50: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 441 // OMP50: ret void 442 // 443 444 typedef int MyIdx; 445 446 class IterDouble { 447 double *Ptr; 448 public: 449 IterDouble operator++ () const { 450 IterDouble n; 451 n.Ptr = Ptr + 1; 452 return n; 453 } 454 bool operator < (const IterDouble &that) const { 455 return Ptr < that.Ptr; 456 } 457 double & operator *() const { 458 return *Ptr; 459 } 460 MyIdx operator - (const IterDouble &that) const { 461 return (MyIdx) (Ptr - that.Ptr); 462 } 463 IterDouble operator + (int Delta) { 464 IterDouble re; 465 re.Ptr = Ptr + Delta; 466 return re; 467 } 468 469 ///~IterDouble() {} 470 }; 471 472 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}} 473 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { 474 // 475 // Calculate number of iterations before the loop body. 476 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}} 477 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1 478 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1 479 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1 480 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1 481 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}} 482 #pragma omp parallel for simd 483 484 // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 485 // CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 486 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], 487 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]] 488 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 489 // CHECK: [[TRUE]]: 490 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], 491 // CHECK: br label %[[SWITCH:[^,]+]] 492 // CHECK: [[FALSE]]: 493 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 494 // CHECK: br label %[[SWITCH]] 495 // CHECK: [[SWITCH]]: 496 // CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 497 // CHECK: store i32 [[UP]], i32* [[UB]], 498 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 499 // CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]], 500 501 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]] 502 // CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]] 503 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] 504 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] 505 for (IterDouble i = ia; i < ib; ++i) { 506 // CHECK: [[IT_BODY]]: 507 // Start of body: calculate i from index: 508 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]] 509 // Call of operator+ (i, IV). 510 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}} 511 // ... loop body ... 512 *i = *ic * 0.5; 513 // Float multiply and save result. 514 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01 515 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}} 516 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]] 517 ++ic; 518 // 519 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]] 520 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 521 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]] 522 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] 523 } 524 // CHECK: [[IT_END]]: 525 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 526 // CHECK: ret void 527 } 528 529 530 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}} 531 void collapsed(float *a, float *b, float *c, float *d) { 532 int i; // outer loop counter 533 unsigned j; // middle loop couter, leads to unsigned icmp in loop header. 534 // k declared in the loop init below 535 short l; // inner loop counter 536 // CHECK: call void @__kmpc_for_static_init_4u(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 537 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 538 // CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119 539 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 540 // CHECK: [[TRUE]]: 541 // CHECK: br label %[[SWITCH:[^,]+]] 542 // CHECK: [[FALSE]]: 543 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 544 // CHECK: br label %[[SWITCH]] 545 // CHECK: [[SWITCH]]: 546 // CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 547 // CHECK: store i32 [[UP]], i32* [[UB]], 548 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 549 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], 550 // 551 #pragma omp parallel for simd collapse(4) 552 553 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 554 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]] 555 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]] 556 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] 557 for (i = 1; i < 3; i++) // 2 iterations 558 for (j = 2u; j < 5u; j++) //3 iterations 559 for (int k = 3; k <= 6; k++) // 4 iterations 560 for (l = 4; l < 9; ++l) // 5 iterations 561 { 562 // CHECK: [[COLL1_BODY]]: 563 // Start of body: calculate i from index: 564 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]] 565 // Calculation of the loop counters values. 566 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 567 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 568 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] 569 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 570 571 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]] 572 // CHECK: [[IV1_2_1:%.+]] = load i32, i32* [[OMP_IV]] 573 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2_1]], 60 574 // CHECK-NEXT: [[MUL_1:%.+]] = mul i32 [[CALC_J_1]], 60 575 // CHECK-NEXT: [[SUB_3:%.+]] = sub i32 [[IV1_2]], [[MUL_1]] 576 // CHECK-NEXT: [[CALC_J_2:%.+]] = udiv i32 [[SUB_3]], 20 577 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 578 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] 579 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] 580 581 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]] 582 // CHECK: [[IV1_3_1:%.+]] = load i32, i32* [[OMP_IV]] 583 // CHECK-NEXT: [[DIV_1:%.+]] = udiv i32 [[IV1_3_1]], 60 584 // CHECK-NEXT: [[MUL_2:%.+]] = mul i32 [[DIV_1]], 60 585 // CHECK-NEXT: [[ADD_3:%.+]] = sub i32 [[IV1_3]], [[MUL_2]] 586 587 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]] 588 // CHECK: [[IV1_4_1:%.+]] = load i32, i32* [[OMP_IV]] 589 // CHECK-NEXT: [[DIV_2:%.+]] = udiv i32 [[IV1_4_1]], 60 590 // CHECK-NEXT: [[MUL_3:%.+]] = mul i32 [[DIV_2]], 60 591 // CHECK-NEXT: [[SUB_6:%.+]] = sub i32 [[IV1_4]], [[MUL_3]] 592 // CHECK-NEXT: [[DIV_3:%.+]] = udiv i32 [[SUB_6]], 20 593 // CHECK-NEXT: [[MUL_4:%.+]] = mul i32 [[DIV_3]], 20 594 // CHECK-NEXT: [[SUB_7:%.+]] = sub i32 [[ADD_3]], [[MUL_4]] 595 // CHECK-NEXT: [[DIV_4:%.+]] = udiv i32 [[SUB_7]], 5 596 // CHECK-NEXT: [[MUL_5:%.+]] = mul i32 [[DIV_4]], 1 597 // CHECK-NEXT: [[ADD_6:%.+]] = add i32 3, [[MUL_5]] 598 // CHECK-NEXT: store i32 [[ADD_6]], i32* [[LC_K:.+]] 599 600 // CHECK: [[IV1_5:%.+]] = load i32, i32* [[OMP_IV]] 601 // CHECK: [[IV1_5_1:%.+]] = load i32, i32* [[OMP_IV]] 602 // CHECK-NEXT: [[DIV_5:%.+]] = udiv i32 [[IV1_5_1]], 60 603 // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[DIV_5]], 60 604 // CHECK-NEXT: [[ADD_7:%.+]] = sub i32 [[IV1_5]], [[MUL_6]] 605 606 // CHECK: [[IV1_6:%.+]] = load i32, i32* [[OMP_IV]] 607 // CHECK: [[IV1_6_1:%.+]] = load i32, i32* [[OMP_IV]] 608 // CHECK-NEXT: [[DIV_6:%.+]] = udiv i32 [[IV1_6_1]], 60 609 // CHECK-NEXT: [[MUL_7:%.+]] = mul i32 [[DIV_6]], 60 610 // CHECK-NEXT: [[SUB_10:%.+]] = sub i32 [[IV1_6]], [[MUL_7]] 611 // CHECK-NEXT: [[DIV_7:%.+]] = udiv i32 [[SUB_10]], 20 612 // CHECK-NEXT: [[MUL_8:%.+]] = mul i32 [[DIV_7]], 20 613 // CHECK-NEXT: [[ADD_9:%.+]] = sub i32 [[ADD_7]], [[MUL_8]] 614 615 // CHECK: [[IV1_7:%.+]] = load i32, i32* [[OMP_IV]] 616 // CHECK: [[IV1_7_1:%.+]] = load i32, i32* [[OMP_IV]] 617 // CHECK-NEXT: [[DIV_8:%.+]] = udiv i32 [[IV1_7_1]], 60 618 // CHECK-NEXT: [[MUL_9:%.+]] = mul i32 [[DIV_8]], 60 619 // CHECK-NEXT: [[ADD_10:%.+]] = sub i32 [[IV1_7]], [[MUL_9]] 620 621 // CHECK: [[IV1_8:%.+]] = load i32, i32* [[OMP_IV]] 622 // CHECK: [[IV1_8_1:%.+]] = load i32, i32* [[OMP_IV]] 623 // CHECK-NEXT: [[DIV_3:%.+]] = udiv i32 [[IV1_8_1]], 60 624 // CHECK-NEXT: [[MUL_4:%.+]] = mul i32 [[DIV_3]], 60 625 // CHECK-NEXT: [[SUB_7:%.+]] = sub i32 [[IV1_8]], [[MUL_4]] 626 // CHECK-NEXT: [[DIV_4:%.+]] = udiv i32 [[SUB_7]], 20 627 // CHECK-NEXT: [[MUL_5:%.+]] = mul i32 [[DIV_4]], 20 628 // CHECK-NEXT: [[SUB_8:%.+]] = sub i32 [[ADD_10]], [[MUL_5]] 629 // CHECK-NEXT: [[DIV_5:%.+]] = udiv i32 [[SUB_8]], 5 630 // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[DIV_5]], 5 631 // CHECK-NEXT: [[SUB_9:%.+]] = sub i32 [[ADD_9]], [[MUL_6]] 632 // CHECK-NEXT: [[MUL_6:%.+]] = mul i32 [[SUB_9]], 1 633 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[MUL_6]] 634 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16 635 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]] 636 // ... loop body ... 637 // End of body: store into a[i]: 638 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]] 639 float res = b[j] * c[k]; 640 a[i] = res * d[l]; 641 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]] 642 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 643 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]] 644 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] 645 // CHECK: [[COLL1_END]]: 646 } 647 // i,j,l are updated; k is not updated. 648 // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 649 // CHECK: store i32 3, i32* [[I:%[^,]+]] 650 // CHECK: store i32 5, i32* [[I:%[^,]+]] 651 // CHECK: store i16 9, i16* [[I:%[^,]+]] 652 // CHECK: ret void 653 } 654 655 extern char foo(); 656 extern double globalfloat; 657 658 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}} 659 void widened(float *a, float *b, float *c, float *d) { 660 int i; // outer loop counter 661 short j; // inner loop counter 662 globalfloat = 1.0; 663 int localint = 1; 664 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]] 665 // Counter is widened to 64 bits. 666 // CHECK: [[MUL:%.+]] = mul nsw i64 2, %{{.+}} 667 // CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1 668 // CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]], 669 // CHECK: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 670 // CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 671 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], 672 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]] 673 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 674 // CHECK: [[TRUE]]: 675 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], 676 // CHECK: br label %[[SWITCH:[^,]+]] 677 // CHECK: [[FALSE]]: 678 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 679 // CHECK: br label %[[SWITCH]] 680 // CHECK: [[SWITCH]]: 681 // CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 682 // CHECK: store i64 [[UP]], i64* [[UB]], 683 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 684 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]], 685 // 686 #pragma omp parallel for simd collapse(2) private(globalfloat, localint) 687 688 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] 689 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 690 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] 691 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] 692 for (i = 1; i < 3; i++) // 2 iterations 693 for (j = 0; j < foo(); j++) // foo() iterations 694 { 695 // CHECK: [[WIDE1_BODY]]: 696 // Start of body: calculate i from index: 697 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]] 698 // Calculation of the loop counters values... 699 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] 700 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]] 701 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] 702 // ... loop body ... 703 // 704 // Here we expect store into private double var, not global 705 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]] 706 globalfloat = (float)j/i; 707 float res = b[j] * c[j]; 708 // Store into a[i]: 709 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]] 710 a[i] = res * d[i]; 711 // Then there's a store into private var localint: 712 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]] 713 localint = (int)j; 714 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]] 715 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 716 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]] 717 // 718 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] 719 // CHECK: [[WIDE1_END]]: 720 } 721 // i,j are updated. 722 // CHECK: store i32 3, i32* [[I:%[^,]+]] 723 // CHECK: store i16 724 // 725 // Here we expect store into original localint, not its privatized version. 726 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]] 727 localint = (int)j; 728 // CHECK: ret void 729 } 730 731 // CHECK-LABEL: if_clause 732 void if_clause(int a) { 733 #pragma omp parallel for simd if(a) schedule(static, 1) 734 for (int i = 0; i < 10; ++i); 735 } 736 // CHECK: call void @__kmpc_for_static_init_4( 737 // OMP50: [[COND:%.+]] = trunc i8 %{{.+}} to i1 738 // OMP50: br i1 [[COND]], label {{%?}}[[THEN:.+]], label {{%?}}[[ELSE:.+]] 739 740 // OMP50: [[THEN]]: 741 // OMP45: br label {{.+}}, !llvm.loop ![[VECT:.+]] 742 // OMP50: br label {{.+}}, !llvm.loop ![[VECT:.+]] 743 // OMP50: [[ELSE]]: 744 // OMP50: br label {{.+}}, !llvm.loop ![[NOVECT:.+]] 745 // CHECK: call void @__kmpc_for_static_fini( 746 747 // OMP45: call void @__kmpc_for_static_init_8(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 748 // OMP45: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 749 // OMP45: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15 750 // OMP45: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 751 // OMP45: [[TRUE]]: 752 // OMP45: br label %[[SWITCH:[^,]+]] 753 // OMP45: [[FALSE]]: 754 // OMP45: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 755 // OMP45: br label %[[SWITCH]] 756 // OMP45: [[SWITCH]]: 757 // OMP45: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 758 // OMP45: store i64 [[UP]], i64* [[UB]], 759 // OMP45: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 760 // OMP45: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]], 761 762 // ... 763 // OMP45: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]] 764 // OMP45-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]] 765 // OMP45-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] 766 // OMP45-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] 767 // OMP45: [[T1_BODY]]: 768 // Loop counters i and j updates: 769 // OMP45: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]] 770 // OMP45-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 771 // OMP45-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 772 // OMP45-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] 773 // OMP45-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 774 // OMP45-NEXT: store i32 [[I_2]], i32* 775 // OMP45: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]] 776 // OMP45: [[IV2_1:%.+]] = load i64, i64* [[T1_OMP_IV]] 777 // OMP45-NEXT: [[DIV_1:%.+]] = sdiv i64 [[IV2_1]], 4 778 // OMP45-NEXT: [[MUL_1:%.+]] = mul nsw i64 [[DIV_1]], 4 779 // OMP45-NEXT: [[J_1:%.+]] = sub nsw i64 [[IV2]], [[MUL_1]] 780 // OMP45-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 781 // OMP45-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] 782 // OMP45-NEXT: store i64 [[J_2_ADD0]], i64* 783 // simd.for.inc: 784 // OMP45: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]] 785 // OMP45-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 786 // OMP45-NEXT: store i64 [[INC]], i64* 787 // OMP45-NEXT: br label {{%.+}} 788 // OMP45: [[T1_END]]: 789 // OMP45: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) 790 // OMP45: ret void 791 792 // OMP50-LABEL: parallel_simd_atomic 793 // OMP50: call i32 @__kmpc_global_thread_num 794 // OMP50: call void @__kmpc_push_num_threads 795 // OMP50: call void {{.*}} @__kmpc_fork_call 796 // OMP50-LABEL: define internal void @_Z20parallel_simd_atomici.omp_outlined 797 // OMP50: call void @__kmpc_for_static_init_4 798 // OMP50: omp.inner.for.body: 799 // OMP50: %{{[0-9]+}} = atomicrmw add i32* %{{[0-9]+}}, i32 1 monotonic, align 4, !llvm.access.group !47 800 801 // OMP45-NOT: !{!"llvm.loop.vectorize.enable", i1 false} 802 // OMP45-DAG: ![[VECT]] = distinct !{![[VECT]], ![[PA:.+]], ![[VM:.+]]} 803 // OMP45-DAG: ![[PA]] = !{!"llvm.loop.parallel_accesses", !{{.+}}} 804 // OMP45-DAG: ![[VM]] = !{!"llvm.loop.vectorize.enable", i1 true} 805 // OMP45-NOT: !{!"llvm.loop.vectorize.enable", i1 false} 806 // OMP50-DAG: ![[VECT]] = distinct !{![[VECT]], ![[PA:.+]], ![[VM:.+]]} 807 // OMP50-DAG ![[PA]] = !{!"llvm.loop.parallel_accesses", !{{.+}}} 808 // OMP50-DAG: ![[VM]] = !{!"llvm.loop.vectorize.enable", i1 true} 809 // OMP50-DAG: ![[NOVECT]] = distinct !{![[NOVECT]], ![[NOVM:.+]]} 810 // OMP50-DAG: ![[NOVM]] = !{!"llvm.loop.vectorize.enable", i1 false} 811 812 // TERM_DEBUG-LABEL: bar 813 int bar() { extern void mayThrow(); mayThrow(); return 0; }; 814 815 // TERM_DEBUG-LABEL: parallel_simd 816 void parallel_simd(float *a) { 817 #pragma omp parallel for simd 818 // TERM_DEBUG-NOT: __kmpc_global_thread_num 819 // TERM_DEBUG: invoke noundef i32 {{.*}}bar{{.*}}() 820 // TERM_DEBUG: unwind label %[[TERM_LPAD:[a-zA-Z0-9\.]+]], 821 // TERM_DEBUG-NOT: __kmpc_global_thread_num 822 // TERM_DEBUG: [[TERM_LPAD]] 823 // TERM_DEBUG: call void @__clang_call_terminate 824 // TERM_DEBUG: unreachable 825 for (unsigned i = 131071; i <= 2147483647; i += 127) 826 a[i] += bar(); 827 } 828 // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]], 829 // TERM_DEBUG-NOT: line: 0, 830 831 #ifdef OMP5 832 void parallel_simd_atomic(int a) { 833 #pragma omp parallel for simd shared(a) num_threads(10) 834 for (int i = 0; i < 100; i++) { 835 #pragma omp atomic update 836 a += 1; 837 } 838 } 839 #endif // OMP5 840 841 #endif // HEADER 842 843