1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s 3 // expected-no-diagnostics 4 5 #ifndef HEADER 6 #define HEADER 7 8 struct MyIterator { 9 MyIterator(unsigned pos); 10 MyIterator(const MyIterator &other); 11 const MyIterator &operator=(const MyIterator &that); 12 MyIterator &operator++(); 13 int operator-(const MyIterator &that) const; 14 MyIterator &operator+=(unsigned a); 15 MyIterator operator+(unsigned a) const; 16 bool operator==(const MyIterator &that) const; 17 bool operator!=(const MyIterator &that) const; 18 unsigned operator*() const; 19 }; 20 21 struct MyRange { 22 MyRange(int n); 23 24 MyIterator begin(); 25 MyIterator end(); 26 }; 27 28 extern "C" void workshareloop_rangefor(float *a, float *b, float *c) { 29 #pragma omp for 30 for (unsigned i : MyRange(42)) { 31 a[i] = b[i] * c[i]; 32 } 33 } 34 35 #endif // HEADER 36 // CHECK-LABEL: define {{[^@]+}}@workshareloop_rangefor 37 // CHECK-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]], ptr noundef [[C:%.*]]) #[[ATTR0:[0-9]+]] { 38 // CHECK-NEXT: entry: 39 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 40 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 41 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8 42 // CHECK-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 43 // CHECK-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_MYRANGE:%.*]], align 1 44 // CHECK-NEXT: [[__BEGIN2:%.*]] = alloca [[STRUCT_MYITERATOR:%.*]], align 1 45 // CHECK-NEXT: [[__END2:%.*]] = alloca [[STRUCT_MYITERATOR]], align 1 46 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 47 // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8 48 // CHECK-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 1 49 // CHECK-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i64, align 8 50 // CHECK-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4 51 // CHECK-NEXT: [[P_LOWERBOUND:%.*]] = alloca i64, align 8 52 // CHECK-NEXT: [[P_UPPERBOUND:%.*]] = alloca i64, align 8 53 // CHECK-NEXT: [[P_STRIDE:%.*]] = alloca i64, align 8 54 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 55 // CHECK-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 56 // CHECK-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 57 // CHECK-NEXT: call void @_ZN7MyRangeC1Ei(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP]], i32 noundef 42) 58 // CHECK-NEXT: store ptr [[REF_TMP]], ptr [[__RANGE2]], align 8 59 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RANGE2]], align 8 60 // CHECK-NEXT: call void @_ZN7MyRange5beginEv(ptr dead_on_unwind writable sret([[STRUCT_MYITERATOR]]) align 1 [[__BEGIN2]], ptr noundef nonnull align 1 dereferenceable(1) [[TMP0]]) 61 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[__RANGE2]], align 8 62 // CHECK-NEXT: call void @_ZN7MyRange3endEv(ptr dead_on_unwind writable sret([[STRUCT_MYITERATOR]]) align 1 [[__END2]], ptr noundef nonnull align 1 dereferenceable(1) [[TMP1]]) 63 // CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_ZNK10MyIteratordeEv(ptr noundef nonnull align 1 dereferenceable(1) [[__BEGIN2]]) 64 // CHECK-NEXT: store i32 [[CALL]], ptr [[I]], align 4 65 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 66 // CHECK-NEXT: store ptr [[__BEGIN2]], ptr [[TMP2]], align 8 67 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 68 // CHECK-NEXT: store ptr [[__END2]], ptr [[TMP3]], align 8 69 // CHECK-NEXT: call void @_ZN10MyIteratorC1ERKS_(ptr noundef nonnull align 1 dereferenceable(1) [[AGG_CAPTURED1]], ptr noundef nonnull align 1 dereferenceable(1) [[__BEGIN2]]) 70 // CHECK-NEXT: call void @__captured_stmt(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]) 71 // CHECK-NEXT: [[DOTCOUNT:%.*]] = load i64, ptr [[DOTCOUNT_ADDR]], align 8 72 // CHECK-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]] 73 // CHECK: omp_loop.preheader: 74 // CHECK-NEXT: store i64 0, ptr [[P_LOWERBOUND]], align 8 75 // CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[DOTCOUNT]], 1 76 // CHECK-NEXT: store i64 [[TMP4]], ptr [[P_UPPERBOUND]], align 8 77 // CHECK-NEXT: store i64 1, ptr [[P_STRIDE]], align 8 78 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) 79 // CHECK-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[P_LASTITER]], ptr [[P_LOWERBOUND]], ptr [[P_UPPERBOUND]], ptr [[P_STRIDE]], i64 1, i64 0) 80 // CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[P_LOWERBOUND]], align 8 81 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[P_UPPERBOUND]], align 8 82 // CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[TMP5]] 83 // CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 1 84 // CHECK-NEXT: br label [[OMP_LOOP_HEADER:%.*]] 85 // CHECK: omp_loop.header: 86 // CHECK-NEXT: [[OMP_LOOP_IV:%.*]] = phi i64 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ] 87 // CHECK-NEXT: br label [[OMP_LOOP_COND:%.*]] 88 // CHECK: omp_loop.cond: 89 // CHECK-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i64 [[OMP_LOOP_IV]], [[TMP8]] 90 // CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] 91 // CHECK: omp_loop.body: 92 // CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OMP_LOOP_IV]], [[TMP5]] 93 // CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i64 [[TMP9]], ptr [[AGG_CAPTURED1]]) 94 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[B_ADDR]], align 8 95 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 96 // CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP11]] to i64 97 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[TMP10]], i64 [[IDXPROM]] 98 // CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 99 // CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[C_ADDR]], align 8 100 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4 101 // CHECK-NEXT: [[IDXPROM2:%.*]] = zext i32 [[TMP14]] to i64 102 // CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[IDXPROM2]] 103 // CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 104 // CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP12]], [[TMP15]] 105 // CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8 106 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4 107 // CHECK-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP17]] to i64 108 // CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP16]], i64 [[IDXPROM4]] 109 // CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX5]], align 4 110 // CHECK-NEXT: br label [[OMP_LOOP_INC]] 111 // CHECK: omp_loop.inc: 112 // CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i64 [[OMP_LOOP_IV]], 1 113 // CHECK-NEXT: br label [[OMP_LOOP_HEADER]] 114 // CHECK: omp_loop.exit: 115 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]]) 116 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) 117 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM6]]) 118 // CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]] 119 // CHECK: omp_loop.after: 120 // CHECK-NEXT: ret void 121 // 122 // 123 // CHECK-LABEL: define {{[^@]+}}@__captured_stmt 124 // CHECK-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[DISTANCE:%.*]], ptr noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] { 125 // CHECK-NEXT: entry: 126 // CHECK-NEXT: [[DISTANCE_ADDR:%.*]] = alloca ptr, align 8 127 // CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca ptr, align 8 128 // CHECK-NEXT: [[DOTSTART:%.*]] = alloca [[STRUCT_MYITERATOR:%.*]], align 1 129 // CHECK-NEXT: [[DOTSTOP:%.*]] = alloca [[STRUCT_MYITERATOR]], align 1 130 // CHECK-NEXT: [[DOTSTEP:%.*]] = alloca i64, align 8 131 // CHECK-NEXT: store ptr [[DISTANCE]], ptr [[DISTANCE_ADDR]], align 8 132 // CHECK-NEXT: store ptr [[__CONTEXT]], ptr [[__CONTEXT_ADDR]], align 8 133 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__CONTEXT_ADDR]], align 8 134 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT_ANON:%.*]], ptr [[TMP0]], i32 0, i32 0 135 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8 136 // CHECK-NEXT: call void @_ZN10MyIteratorC1ERKS_(ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTART]], ptr noundef nonnull align 1 dereferenceable(1) [[TMP2]]) 137 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT_ANON]], ptr [[TMP0]], i32 0, i32 1 138 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8 139 // CHECK-NEXT: call void @_ZN10MyIteratorC1ERKS_(ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTOP]], ptr noundef nonnull align 1 dereferenceable(1) [[TMP4]]) 140 // CHECK-NEXT: store i64 1, ptr [[DOTSTEP]], align 8 141 // CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTSTEP]], align 8 142 // CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP5]], 0 143 // CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 144 // CHECK: cond.true: 145 // CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_ZNK10MyIteratormiERKS_(ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTART]], ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTOP]]) 146 // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[CALL]] to i64 147 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTSTEP]], align 8 148 // CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[TMP6]] 149 // CHECK-NEXT: [[DIV:%.*]] = udiv i64 [[CONV]], [[SUB]] 150 // CHECK-NEXT: br label [[COND_END:%.*]] 151 // CHECK: cond.false: 152 // CHECK-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZNK10MyIteratormiERKS_(ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTOP]], ptr noundef nonnull align 1 dereferenceable(1) [[DOTSTART]]) 153 // CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[CALL1]] to i64 154 // CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTSTEP]], align 8 155 // CHECK-NEXT: [[DIV3:%.*]] = udiv i64 [[CONV2]], [[TMP7]] 156 // CHECK-NEXT: br label [[COND_END]] 157 // CHECK: cond.end: 158 // CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[DIV]], [[COND_TRUE]] ], [ [[DIV3]], [[COND_FALSE]] ] 159 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DISTANCE_ADDR]], align 8 160 // CHECK-NEXT: store i64 [[COND]], ptr [[TMP8]], align 8 161 // CHECK-NEXT: ret void 162 // 163 // 164 // CHECK-LABEL: define {{[^@]+}}@__captured_stmt.1 165 // CHECK-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[LOOPVAR:%.*]], i64 noundef [[LOGICAL:%.*]], ptr noalias noundef [[__CONTEXT:%.*]]) #[[ATTR2]] { 166 // CHECK-NEXT: entry: 167 // CHECK-NEXT: [[LOOPVAR_ADDR:%.*]] = alloca ptr, align 8 168 // CHECK-NEXT: [[LOGICAL_ADDR:%.*]] = alloca i64, align 8 169 // CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca ptr, align 8 170 // CHECK-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_MYITERATOR:%.*]], align 1 171 // CHECK-NEXT: store ptr [[LOOPVAR]], ptr [[LOOPVAR_ADDR]], align 8 172 // CHECK-NEXT: store i64 [[LOGICAL]], ptr [[LOGICAL_ADDR]], align 8 173 // CHECK-NEXT: store ptr [[__CONTEXT]], ptr [[__CONTEXT_ADDR]], align 8 174 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__CONTEXT_ADDR]], align 8 175 // CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[LOGICAL_ADDR]], align 8 176 // CHECK-NEXT: [[MUL:%.*]] = mul i64 1, [[TMP1]] 177 // CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[MUL]] to i32 178 // CHECK-NEXT: call void @_ZNK10MyIteratorplEj(ptr dead_on_unwind writable sret([[STRUCT_MYITERATOR]]) align 1 [[REF_TMP]], ptr noundef nonnull align 1 dereferenceable(1) [[TMP0]], i32 noundef [[CONV]]) 179 // CHECK-NEXT: [[CALL:%.*]] = call noundef i32 @_ZNK10MyIteratordeEv(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP]]) 180 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[LOOPVAR_ADDR]], align 8 181 // CHECK-NEXT: store i32 [[CALL]], ptr [[TMP2]], align 4 182 // CHECK-NEXT: ret void 183 // 184