1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG 5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -O1 -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CLEANUP 6 7 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s 8 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 10 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fexceptions -fcxx-exceptions -debug-info-kind=line-tables-only -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 11 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -O1 -fopenmp-simd -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 12 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 13 // expected-no-diagnostics 14 15 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix=OMP5 %s 16 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 17 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix=OMP5 %s 18 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s 19 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 20 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 21 #ifndef HEADER 22 #define HEADER 23 24 #ifndef OMP5 25 // CHECK-DAG: [[IDENT_T_TY:%.+]] = type { i32, i32, i32, i32, i8* } 26 // CHECK-DAG: [[LOOP_LOC:@.+]] = private unnamed_addr global [[IDENT_T_TY]] { i32 0, i32 514, i32 0, i32 0, i8* 27 28 // CHECK-LABEL: with_var_schedule 29 void with_var_schedule() { 30 double a = 5; 31 // CHECK: [[CHUNK_SIZE:%.+]] = fptosi double %{{.+}}to i8 32 // CHECK: store i8 %{{.+}}, i8* [[CHUNK:%.+]], 33 // CHECK: [[VAL:%.+]] = load i8, i8* [[CHUNK]], 34 // CHECK: store i8 [[VAL]], i8* 35 // CHECK: [[CHUNK:%.+]] = load i64, i64* % 36 // CHECK: call void {{.+}} @__kmpc_fork_call({{.+}}, i64 [[CHUNK]]) 37 38 // CHECK: [[CHUNK_VAL:%.+]] = load i8, i8* % 39 // CHECK: [[CHUNK_SIZE:%.+]] = sext i8 [[CHUNK_VAL]] to i64 40 // CHECK: call void @__kmpc_for_static_init_8u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%[^,]+]], i32 33, i32* [[IS_LAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]], i64 1, i64 [[CHUNK_SIZE]]) 41 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) 42 #pragma omp parallel for schedule(static, char(a)) 43 for (unsigned long long i = 1; i < 2; ++i) { 44 } 45 } 46 47 // CHECK-LABEL: define {{.*void}} @{{.*}}without_schedule_clause{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 48 void without_schedule_clause(float *a, float *b, float *c, float *d) { 49 #pragma omp parallel for 50 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 51 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 52 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 53 // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) 54 // UB = min(UB, GlobalUB) 55 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 56 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 57 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 58 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 59 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 60 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 61 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 62 // Loop header 63 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 64 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 65 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] 66 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 67 for (int i = 33; i < 32000000; i += 7) { 68 // CHECK: [[LOOP1_BODY]] 69 // Start of body: calculate i from IV: 70 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] 71 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 72 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 33, [[CALC_I_1]] 73 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 74 // ... loop body ... 75 // End of body: store into a[i]: 76 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 77 a[i] = b[i] * c[i] * d[i]; 78 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 79 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 80 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 81 // CHECK-NEXT: br label %{{.+}} 82 } 83 // CHECK: [[LOOP1_END]] 84 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) 85 // CHECK: ret void 86 } 87 88 // CHECK-LABEL: define {{.*void}} @{{.*}}static_not_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 89 void static_not_chunked(float *a, float *b, float *c, float *d) { 90 #pragma omp parallel for schedule(static) 91 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 92 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 93 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 94 // CHECK: call void @__kmpc_for_static_init_4([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) 95 // UB = min(UB, GlobalUB) 96 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 97 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4571423 98 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 99 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 4571423, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 100 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 101 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 102 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 103 // Loop header 104 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 105 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 106 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] 107 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 108 for (int i = 32000000; i > 33; i += -7) { 109 // CHECK: [[LOOP1_BODY]] 110 // Start of body: calculate i from IV: 111 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] 112 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 7 113 // CHECK-NEXT: [[CALC_I_2:%.+]] = sub nsw i32 32000000, [[CALC_I_1]] 114 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 115 // ... loop body ... 116 // End of body: store into a[i]: 117 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 118 a[i] = b[i] * c[i] * d[i]; 119 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 120 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 121 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 122 // CHECK-NEXT: br label %{{.+}} 123 } 124 // CHECK: [[LOOP1_END]] 125 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) 126 // CHECK: ret void 127 } 128 129 // CHECK-LABEL: define {{.*void}} @{{.*}}static_chunked{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 130 void static_chunked(float *a, float *b, float *c, float *d) { 131 #pragma omp parallel for schedule(static, 5) 132 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 133 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 134 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 135 // CHECK: call void @__kmpc_for_static_init_4u([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID:%.+]], i32 33, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 5) 136 // UB = min(UB, GlobalUB) 137 // CHECK: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 138 // CHECK-NEXT: [[UBCMP:%.+]] = icmp ugt i32 [[UB]], 16908288 139 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 140 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 16908288, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 141 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 142 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 143 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 144 145 // Outer loop header 146 // CHECK: [[O_IV:%.+]] = load i32, i32* [[OMP_IV]] 147 // CHECK-NEXT: [[O_UB:%.+]] = load i32, i32* [[OMP_UB]] 148 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ule i32 [[O_IV]], [[O_UB]] 149 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] 150 151 // Loop header 152 // CHECK: [[O_LOOP1_BODY]] 153 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 154 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 155 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB]] 156 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 157 for (unsigned i = 131071; i <= 2147483647; i += 127) { 158 // CHECK: [[LOOP1_BODY]] 159 // Start of body: calculate i from IV: 160 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] 161 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i32 [[IV1_1]], 127 162 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 131071, [[CALC_I_1]] 163 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 164 // ... loop body ... 165 // End of body: store into a[i]: 166 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 167 a[i] = b[i] * c[i] * d[i]; 168 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 169 // CHECK-NEXT: [[ADD1_2:%.+]] = add i32 [[IV1_2]], 1 170 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 171 // CHECK-NEXT: br label %{{.+}} 172 } 173 // CHECK: [[LOOP1_END]] 174 // Update the counters, adding stride 175 // CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 176 // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] 177 // CHECK-NEXT: [[ADD_LB:%.+]] = add i32 [[LB]], [[ST]] 178 // CHECK-NEXT: store i32 [[ADD_LB]], i32* [[OMP_LB]] 179 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 180 // CHECK-NEXT: [[ST:%.+]] = load i32, i32* [[OMP_ST]] 181 // CHECK-NEXT: [[ADD_UB:%.+]] = add i32 [[UB]], [[ST]] 182 // CHECK-NEXT: store i32 [[ADD_UB]], i32* [[OMP_UB]] 183 184 // CHECK: [[O_LOOP1_END]] 185 // CHECK: call void @__kmpc_for_static_fini([[IDENT_T_TY]]* [[LOOP_LOC]], i32 [[GTID]]) 186 // CHECK: ret void 187 } 188 189 // CHECK-LABEL: define {{.*void}} @{{.*}}dynamic1{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 190 void dynamic1(float *a, float *b, float *c, float *d) { 191 #pragma omp parallel for schedule(dynamic) 192 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 193 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 194 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 195 // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 35, i64 0, i64 16908287, i64 1, i64 1) 196 // 197 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) 198 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 199 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] 200 201 // Loop header 202 // CHECK: [[O_LOOP1_BODY]] 203 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] 204 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] 205 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] 206 207 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] 208 // CHECK-NEXT: [[BOUND:%.+]] = add i64 [[UB]], 1 209 // CHECK-NEXT: [[CMP:%.+]] = icmp ult i64 [[IV]], [[BOUND]] 210 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 211 for (unsigned long long i = 131071; i < 2147483647; i += 127) { 212 // CHECK: [[LOOP1_BODY]] 213 // Start of body: calculate i from IV: 214 // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]] 215 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127 216 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]] 217 // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] 218 // ... loop body ... 219 // End of body: store into a[i]: 220 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 221 a[i] = b[i] * c[i] * d[i]; 222 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} 223 // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 224 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] 225 // CHECK-NEXT: br label %{{.+}} 226 } 227 // CHECK: [[LOOP1_END]] 228 // CHECK: [[O_LOOP1_END]] 229 // CHECK: ret void 230 } 231 232 // CHECK-LABEL: define {{.*void}} @{{.*}}guided7{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 233 void guided7(float *a, float *b, float *c, float *d) { 234 #pragma omp parallel for schedule(guided, 7) 235 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 236 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 237 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 238 // CHECK: call void @__kmpc_dispatch_init_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 36, i64 0, i64 16908287, i64 1, i64 7) 239 // 240 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8u([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) 241 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 242 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] 243 244 // Loop header 245 // CHECK: [[O_LOOP1_BODY]] 246 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] 247 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] 248 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] 249 250 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] 251 // CHECK-NEXT: [[BOUND:%.+]] = add i64 [[UB]], 1 252 // CHECK-NEXT: [[CMP:%.+]] = icmp ult i64 [[IV]], [[BOUND]] 253 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 254 for (unsigned long long i = 131071; i < 2147483647; i += 127) { 255 // CHECK: [[LOOP1_BODY]] 256 // Start of body: calculate i from IV: 257 // CHECK: [[IV1_1:%.+]] = load i64, i64* [[OMP_IV]] 258 // CHECK-NEXT: [[CALC_I_1:%.+]] = mul i64 [[IV1_1]], 127 259 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i64 131071, [[CALC_I_1]] 260 // CHECK-NEXT: store i64 [[CALC_I_2]], i64* [[LC_I:.+]] 261 // ... loop body ... 262 // End of body: store into a[i]: 263 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 264 a[i] = b[i] * c[i] * d[i]; 265 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} 266 // CHECK-NEXT: [[ADD1_2:%.+]] = add i64 [[IV1_2]], 1 267 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] 268 // CHECK-NEXT: br label %{{.+}} 269 } 270 // CHECK: [[LOOP1_END]] 271 // CHECK: [[O_LOOP1_END]] 272 // CHECK: ret void 273 } 274 275 // CHECK-LABEL: define {{.*void}} @{{.*}}test_auto{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 276 void test_auto(float *a, float *b, float *c, float *d) { 277 unsigned int x = 0; 278 unsigned int y = 0; 279 #pragma omp parallel for schedule(auto) collapse(2) 280 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 281 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 282 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 283 // CHECK: call void @__kmpc_dispatch_init_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 38, i64 0, i64 [[LAST_ITER:%[^,]+]], i64 1, i64 1) 284 // 285 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_8([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i64* [[OMP_LB:%[^,]+]], i64* [[OMP_UB:%[^,]+]], i64* [[OMP_ST:%[^,]+]]) 286 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 287 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] 288 289 // Loop header 290 // CHECK: [[O_LOOP1_BODY]] 291 // CHECK: [[LB:%.+]] = load i64, i64* [[OMP_LB]] 292 // CHECK-NEXT: store i64 [[LB]], i64* [[OMP_IV:[^,]+]] 293 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]] 294 295 // CHECK-NEXT: [[UB:%.+]] = load i64, i64* [[OMP_UB]] 296 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB]] 297 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 298 // FIXME: When the iteration count of some nested loop is not a known constant, 299 // we should pre-calculate it, like we do for the total number of iterations! 300 for (char i = static_cast<char>(y); i <= '9'; ++i) 301 for (x = 11; x > 0; --x) { 302 // CHECK: [[LOOP1_BODY]] 303 // Start of body: indices are calculated from IV: 304 // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}} 305 // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}} 306 // ... loop body ... 307 // End of body: store into a[i]: 308 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 309 a[i] = b[i] * c[i] * d[i]; 310 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}} 311 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i64 [[IV1_2]], 1 312 // CHECK-NEXT: store i64 [[ADD1_2]], i64* [[OMP_IV]] 313 // CHECK-NEXT: br label %{{.+}} 314 } 315 // CHECK: [[LOOP1_END]] 316 // CHECK: [[O_LOOP1_END]] 317 // CHECK: ret void 318 } 319 320 // CHECK-LABEL: define {{.*void}} @{{.*}}runtime{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 321 void runtime(float *a, float *b, float *c, float *d) { 322 int x = 0; 323 #pragma omp parallel for collapse(2) schedule(runtime) 324 // CHECK: call void ([[IDENT_T_TY]]*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call([[IDENT_T_TY]]* [[DEFAULT_LOC:[@%].+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, float**, float**, float**)* [[OMP_PARALLEL_FUNC:@.+]] to void (i32*, i32*, ...)*), 325 // CHECK: define internal void [[OMP_PARALLEL_FUNC]](i32* noalias [[GTID_PARAM_ADDR:%.+]], i32* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}, float** dereferenceable(8) %{{.+}}) 326 // CHECK: store i32* [[GTID_PARAM_ADDR]], i32** [[GTID_REF_ADDR:%.+]], 327 // CHECK: call void @__kmpc_dispatch_init_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID:%.+]], i32 37, i32 0, i32 199, i32 1, i32 1) 328 // 329 // CHECK: [[HASWORK:%.+]] = call i32 @__kmpc_dispatch_next_4([[IDENT_T_TY]]* [[DEFAULT_LOC]], i32 [[GTID]], i32* [[OMP_ISLAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]]) 330 // CHECK-NEXT: [[O_CMP:%.+]] = icmp ne i32 [[HASWORK]], 0 331 // CHECK-NEXT: br i1 [[O_CMP]], label %[[O_LOOP1_BODY:[^,]+]], label %[[O_LOOP1_END:[^,]+]] 332 333 // Loop header 334 // CHECK: [[O_LOOP1_BODY]] 335 // CHECK: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 336 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 337 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 338 339 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 340 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] 341 // CHECK-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 342 for (unsigned char i = '0' ; i <= '9'; ++i) 343 for (x = -10; x < 10; ++x) { 344 // CHECK: [[LOOP1_BODY]] 345 // Start of body: indices are calculated from IV: 346 // CHECK: store i8 {{%[^,]+}}, i8* {{%[^,]+}} 347 // CHECK: store i32 {{%[^,]+}}, i32* {{%[^,]+}} 348 // ... loop body ... 349 // End of body: store into a[i]: 350 // CHECK: store float [[RESULT:%.+]], float* {{%.+}} 351 a[i] = b[i] * c[i] * d[i]; 352 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 353 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 354 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 355 // CHECK-NEXT: br label %{{.+}} 356 } 357 // CHECK: [[LOOP1_END]] 358 // CHECK: [[O_LOOP1_END]] 359 // CHECK: ret void 360 } 361 362 // TERM_DEBUG-LABEL: foo 363 int foo() {return 0;}; 364 365 // TERM_DEBUG-LABEL: parallel_for 366 // CLEANUP: parallel_for 367 void parallel_for(float *a, const int n) { 368 float arr[n]; 369 #pragma omp parallel for schedule(static, 5) private(arr) default(none) firstprivate(n) shared(a) 370 // TERM_DEBUG-NOT: __kmpc_global_thread_num 371 // TERM_DEBUG: call void @__kmpc_for_static_init_4u({{.+}}), !dbg [[DBG_LOC_START:![0-9]+]] 372 // TERM_DEBUG: invoke i32 {{.*}}foo{{.*}}() 373 // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], 374 // TERM_DEBUG-NOT: __kmpc_global_thread_num 375 // TERM_DEBUG: call void @__kmpc_for_static_fini({{.+}}), !dbg [[DBG_LOC_END:![0-9]+]] 376 // TERM_DEBUG: [[TERM_LPAD]] 377 // TERM_DEBUG: call void @__clang_call_terminate 378 // TERM_DEBUG: unreachable 379 // CLEANUP-NOT: __kmpc_global_thread_num 380 // CLEANUP: call void @__kmpc_for_static_init_4u({{.+}}) 381 // CLEANUP: call void @__kmpc_for_static_fini({{.+}}) 382 for (unsigned i = 131071; i <= 2147483647; i += 127) 383 a[i] += foo() + arr[i] + n; 384 } 385 // Check source line corresponds to "#pragma omp parallel for schedule(static, 5)" above: 386 // TERM_DEBUG-DAG: [[DBG_LOC_START]] = !DILocation(line: [[@LINE-4]], 387 // TERM_DEBUG-DAG: [[DBG_LOC_END]] = !DILocation(line: [[@LINE-18]], 388 389 #else // OMP5 390 // OMP5-LABEL: increment 391 int increment () { 392 // OMP5: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[DEFAULT_LOC:[@%].+]]) 393 #pragma omp for 394 // Determine UB = min(UB, GlobalUB) 395 // OMP5: call void @__kmpc_for_static_init_4(%struct.ident_t* [[LOOP_LOC:[@%].+]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) 396 // OMP5-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 397 // OMP5-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4 398 // OMP5-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 399 // OMP5: [[UBRESULT:%.+]] = phi i32 [ 4, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 400 // OMP5-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 401 // OMP5-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 402 // OMP5-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 403 // OMP5-NEXT: br label %[[LOOP1_HEAD:.+]] 404 405 // Loop header 406 // OMP5: [[LOOP1_HEAD]] 407 // OMP5: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 408 // OMP5-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 409 // OMP5-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] 410 // OMP5-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 411 412 for (int i = 0 ; i != 5; ++i) 413 // Start of body: calculate i from IV: 414 // OMP5: [[LOOP1_BODY]] 415 // OMP5: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]] 416 // OMP5-NEXT: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 1 417 // OMP5-NEXT: [[CALC_I_2:%.+]] = add nsw i32 0, [[CALC_I_1]] 418 // OMP5-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 419 // OMP5: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 420 // OMP5-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 421 // OMP5-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]] 422 // OMP5-NEXT: br label %[[LOOP1_HEAD]] 423 ; 424 // OMP5: [[LOOP1_END]] 425 // OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* [[LOOP_LOC]], i32 [[GTID]]) 426 // OMP5: __kmpc_barrier 427 return 0; 428 // OMP5: ret i32 0 429 } 430 431 // OMP5-LABEL: decrement_nowait 432 int decrement_nowait () { 433 // OMP5: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[DEFAULT_LOC:[@%].+]]) 434 #pragma omp for nowait 435 // Determine UB = min(UB, GlobalUB) 436 // OMP5: call void @__kmpc_for_static_init_4(%struct.ident_t* [[LOOP_LOC]], i32 [[GTID]], i32 34, i32* [[IS_LAST:%[^,]+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) 437 // OMP5-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 438 // OMP5-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 4 439 // OMP5-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 440 // OMP5: [[UBRESULT:%.+]] = phi i32 [ 4, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 441 // OMP5-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 442 // OMP5-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 443 // OMP5-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 444 // OMP5-NEXT: br label %[[LOOP1_HEAD:.+]] 445 446 // Loop header 447 // OMP5: [[LOOP1_HEAD]] 448 // OMP5: [[IV:%.+]] = load i32, i32* [[OMP_IV]] 449 // OMP5-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 450 // OMP5-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB]] 451 // OMP5-NEXT: br i1 [[CMP]], label %[[LOOP1_BODY:[^,]+]], label %[[LOOP1_END:[^,]+]] 452 for (int j = 5 ; j != 0; --j) 453 // Start of body: calculate i from IV: 454 // OMP5: [[LOOP1_BODY]] 455 // OMP5: [[IV2_1:%.+]] = load i32, i32* [[OMP_IV]] 456 // OMP5-NEXT: [[CALC_II_1:%.+]] = mul nsw i32 [[IV2_1]], 1 457 // OMP5-NEXT: [[CALC_II_2:%.+]] = sub nsw i32 5, [[CALC_II_1]] 458 // OMP5-NEXT: store i32 [[CALC_II_2]], i32* [[LC_I:.+]] 459 // OMP5: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}} 460 // OMP5-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 461 // OMP5-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV]] 462 // OMP5-NEXT: br label %[[LOOP1_HEAD]] 463 ; 464 // OMP5: [[LOOP1_END]] 465 // OMP5: call void @__kmpc_for_static_fini(%struct.ident_t* [[LOOP_LOC]], i32 [[GTID]]) 466 // OMP5-NOT: __kmpc_barrier 467 return 0; 468 // OMP5: ret i32 0 469 } 470 #endif // OMP5 471 472 #endif // HEADER 473 474