1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 // REQUIRES: x86-registered-target 8 #ifndef HEADER 9 #define HEADER 10 11 struct SS { 12 int a; 13 int b : 4; 14 int &c; 15 SS(int &d) : a(0), b(0), c(d) { 16 #pragma omp parallel 17 #pragma omp for lastprivate(a, b, c) 18 for (int i = 0; i < 2; ++i) 19 #ifdef LAMBDA 20 [&]() { 21 ++this->a, --b, (this)->c /= 1; 22 #pragma omp parallel 23 #pragma omp for lastprivate(a, b, c) 24 for (int i = 0; i < 2; ++i) 25 ++(this)->a, --b, this->c /= 1; 26 }(); 27 #elif defined(BLOCKS) 28 ^{ 29 ++a; 30 --this->b; 31 (this)->c /= 1; 32 #pragma omp parallel 33 #pragma omp for lastprivate(a, b, c) 34 for (int i = 0; i < 2; ++i) 35 ++(this)->a, --b, this->c /= 1; 36 }(); 37 #else 38 ++this->a, --b, c /= 1; 39 #endif 40 #pragma omp for 41 for (a = 0; a < 2; ++a) 42 #ifdef LAMBDA 43 [&]() { 44 ++this->a, --b, (this)->c /= 1; 45 #pragma omp parallel 46 #pragma omp for lastprivate(b) 47 for (b = 0; b < 2; ++b) 48 ++(this)->a, --b, this->c /= 1; 49 }(); 50 #elif defined(BLOCKS) 51 ^{ 52 ++a; 53 --this->b; 54 (this)->c /= 1; 55 #pragma omp parallel 56 #pragma omp for 57 for (c = 0; c < 2; ++c) 58 ++(this)->a, --b, this->c /= 1; 59 }(); 60 #else 61 ++this->a, --b, c /= 1; 62 #endif 63 } 64 }; 65 66 template <typename T> 67 struct SST { 68 T a; 69 SST() : a(T()) { 70 #pragma omp parallel 71 #pragma omp for lastprivate(a) 72 for (int i = 0; i < 2; ++i) 73 #ifdef LAMBDA 74 [&]() { 75 [&]() { 76 ++this->a; 77 #pragma omp parallel 78 #pragma omp for lastprivate(a) 79 for (int i = 0; i < 2; ++i) 80 ++(this)->a; 81 }(); 82 }(); 83 #elif defined(BLOCKS) 84 ^{ 85 ^{ 86 ++a; 87 #pragma omp parallel 88 #pragma omp for lastprivate(a) 89 for (int i = 0; i < 2; ++i) 90 ++(this)->a; 91 }(); 92 }(); 93 #else 94 ++(this)->a; 95 #endif 96 #pragma omp for 97 for (a = 0; a < 2; ++a) 98 #ifdef LAMBDA 99 [&]() { 100 ++this->a; 101 #pragma omp parallel 102 #pragma omp for 103 for (a = 0; a < 2; ++(this)->a) 104 ++(this)->a; 105 }(); 106 #elif defined(BLOCKS) 107 ^{ 108 ++a; 109 #pragma omp parallel 110 #pragma omp for 111 for (this->a = 0; a < 2; ++a) 112 ++(this)->a; 113 }(); 114 #else 115 ++(this)->a; 116 #endif 117 } 118 }; 119 120 template <class T> 121 struct S { 122 T f; 123 S(T a) : f(a) {} 124 S() : f() {} 125 S<T> &operator=(const S<T> &); 126 operator T() { return T(); } 127 ~S() {} 128 }; 129 130 volatile int g __attribute__((aligned(128)))= 1212; 131 volatile int &g1 = g; 132 float f; 133 char cnt; 134 135 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 136 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 137 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 138 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 139 // CHECK: [[S_INT_TY:%.+]] = type { i32 } 140 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 141 // CHECK-DAG: [[X:@.+]] = global double 0.0 142 // CHECK-DAG: [[F:@.+]] = global float 0.0 143 // CHECK-DAG: [[CNT:@.+]] = global i8 0 144 template <typename T> 145 T tmain() { 146 S<T> test; 147 SST<T> sst; 148 T t_var __attribute__((aligned(128))) = T(); 149 T vec[] __attribute__((aligned(128))) = {1, 2}; 150 S<T> s_arr[] __attribute__((aligned(128))) = {1, 2}; 151 S<T> &var __attribute__((aligned(128))) = test; 152 #pragma omp parallel 153 #pragma omp for lastprivate(t_var, vec, s_arr, var) 154 for (int i = 0; i < 2; ++i) { 155 vec[i] = t_var; 156 s_arr[i] = var; 157 } 158 return T(); 159 } 160 161 namespace A { 162 double x; 163 } 164 namespace B { 165 using A::x; 166 } 167 168 int main() { 169 static int sivar; 170 SS ss(sivar); 171 #ifdef LAMBDA 172 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 173 // LAMBDA: [[SIVAR:@.+]] = internal global i{{[0-9]+}} 0, 174 // LAMBDA-LABEL: @main 175 // LAMBDA: alloca [[SS_TY]], 176 // LAMBDA: alloca [[CAP_TY:%.+]], 177 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]* 178 [&]() { 179 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 180 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* %{{.+}}) 181 #pragma omp parallel 182 #pragma omp for lastprivate(g, g1, sivar) 183 for (int i = 0; i < 2; ++i) { 184 // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]* 185 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 186 // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 187 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 188 // LAMBDA: store i8 189 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 190 // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 191 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 192 // LAMBDA: call void @__kmpc_for_static_init_4( 193 // LAMBDA-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 194 // LAMBDA: call void {{.+}} [[SS_LAMBDA:@[^ ]+]] 195 // LAMBDA: call void @__kmpc_for_static_fini(% 196 // LAMBDA: ret 197 198 // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 199 // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0 200 // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 201 // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2 202 // LAMBDA: call void @__kmpc_for_static_init_4( 203 // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* 204 // LAMBDA: call{{.*}} void 205 // LAMBDA: call void @__kmpc_for_static_fini( 206 // LAMBDA: br i1 207 // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 208 // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]], 209 // LAMBDA: br label 210 // LAMBDA: ret void 211 212 // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}}) 213 // LAMBDA: alloca i{{[0-9]+}}, 214 // LAMBDA: alloca i{{[0-9]+}}, 215 // LAMBDA: alloca i{{[0-9]+}}, 216 // LAMBDA: alloca i{{[0-9]+}}, 217 // LAMBDA: alloca i{{[0-9]+}}, 218 // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 219 // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 220 // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 221 // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 222 // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 223 // LAMBDA: call void @__kmpc_for_static_init_4( 224 // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 225 // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 226 // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 227 // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 228 // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 229 // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 230 // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 231 // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 232 // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 233 // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 234 // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 235 // LAMBDA: call void @__kmpc_for_static_fini( 236 // LAMBDA: br i1 237 // LAMBDA: br label 238 // LAMBDA: ret void 239 240 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]]) 241 // LAMBDA: alloca i{{[0-9]+}}, 242 // LAMBDA: alloca i{{[0-9]+}}, 243 // LAMBDA: alloca i{{[0-9]+}}, 244 // LAMBDA: alloca i{{[0-9]+}}, 245 // LAMBDA: alloca i{{[0-9]+}}, 246 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128 247 // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 248 // LAMBDA: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 249 // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}, 250 251 // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 252 // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 253 254 // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 255 // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 256 // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], 257 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 258 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 259 // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 260 // LAMBDA: store i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]] 261 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 262 // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 263 g = 1; 264 g1 = 1; 265 sivar = 2; 266 // Check for final copying of private values back to original vars. 267 // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 268 // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 269 // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 270 // LAMBDA: [[LAST_THEN]] 271 // Actual copying. 272 273 // original g=private_g; 274 // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 275 // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 276 277 // original sivar=private_sivar; 278 // LAMBDA: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], 279 // LAMBDA: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}}, 280 // LAMBDA: br label %[[LAST_DONE]] 281 // LAMBDA: [[LAST_DONE]] 282 // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 283 [&]() { 284 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 285 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 286 g = 2; 287 g1 = 2; 288 sivar = 4; 289 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 290 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 291 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] 292 // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 293 // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 294 // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]] 295 // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIVAR_REF]] 296 }(); 297 } 298 }(); 299 return 0; 300 #elif defined(BLOCKS) 301 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 302 // BLOCKS-LABEL: @main 303 // BLOCKS: call 304 // BLOCKS: call void {{%.+}}(i8 305 ^{ 306 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 307 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 308 #pragma omp parallel 309 #pragma omp for lastprivate(g, g1, sivar) 310 for (int i = 0; i < 2; ++i) { 311 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]]) 312 // BLOCKS: alloca i{{[0-9]+}}, 313 // BLOCKS: alloca i{{[0-9]+}}, 314 // BLOCKS: alloca i{{[0-9]+}}, 315 // BLOCKS: alloca i{{[0-9]+}}, 316 // BLOCKS: alloca i{{[0-9]+}}, 317 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 128 318 // BLOCKS: [[G1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, align 4 319 // BLOCKS: [[SIVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 320 // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]], 321 // BLOCKS: {{.+}} = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]] 322 // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 323 // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 324 // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 325 // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 326 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 327 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 328 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 329 // BLOCKS: call void {{%.+}}(i8 330 // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 331 g = 1; 332 g1 = 1; 333 sivar = 2; 334 // Check for final copying of private values back to original vars. 335 // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 336 // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 337 // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 338 // BLOCKS: [[LAST_THEN]] 339 // Actual copying. 340 341 // original g=private_g; 342 // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 343 // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 344 // BLOCKS: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIVATE_ADDR]], 345 // BLOCKS: store i{{[0-9]+}} [[SIVAR_VAL]], i{{[0-9]+}}* %{{.+}}, 346 // BLOCKS: br label %[[LAST_DONE]] 347 // BLOCKS: [[LAST_DONE]] 348 // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 349 g = 1; 350 g1 = 1; 351 ^{ 352 // BLOCKS: define {{.+}} void {{@.+}}(i8* 353 g = 2; 354 g1 = 1; 355 sivar = 4; 356 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 357 // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}* 358 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 359 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 360 // BLOCKS: store i{{[0-9]+}} 4, i{{[0-9]+}}* 361 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 362 // BLOCKS: ret 363 }(); 364 } 365 }(); 366 return 0; 367 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]* 368 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 369 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 370 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 371 // BLOCKS: store i8 372 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 373 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 374 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 375 // BLOCKS: call void @__kmpc_for_static_init_4( 376 // BLOCKS-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 377 // BLOCKS: call void 378 // BLOCKS: call void @__kmpc_for_static_fini(% 379 // BLOCKS: ret 380 381 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 382 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0 383 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 384 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2 385 // BLOCKS: call void @__kmpc_for_static_init_4( 386 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* 387 // BLOCKS: call{{.*}} void 388 // BLOCKS: call void @__kmpc_for_static_fini( 389 // BLOCKS: br i1 390 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 391 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]], 392 // BLOCKS: br label 393 // BLOCKS: ret void 394 395 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}}) 396 // BLOCKS: alloca i{{[0-9]+}}, 397 // BLOCKS: alloca i{{[0-9]+}}, 398 // BLOCKS: alloca i{{[0-9]+}}, 399 // BLOCKS: alloca i{{[0-9]+}}, 400 // BLOCKS: alloca i{{[0-9]+}}, 401 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 402 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 403 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 404 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 405 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 406 // BLOCKS: call void @__kmpc_for_static_init_4( 407 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 408 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 409 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 410 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 411 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 412 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 413 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 414 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 415 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 416 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 417 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 418 // BLOCKS: call void @__kmpc_for_static_fini( 419 // BLOCKS: br i1 420 // BLOCKS: br label 421 // BLOCKS: ret void 422 #else 423 S<float> test; 424 int t_var = 0; 425 int vec[] = {1, 2}; 426 S<float> s_arr[] = {1, 2}; 427 S<float> var(3); 428 #pragma omp parallel 429 #pragma omp for lastprivate(t_var, vec, s_arr, var, sivar) 430 for (int i = 0; i < 2; ++i) { 431 vec[i] = t_var; 432 s_arr[i] = var; 433 sivar += i; 434 } 435 #pragma omp parallel 436 #pragma omp for lastprivate(A::x, B::x) firstprivate(f) lastprivate(f) 437 for (int i = 0; i < 2; ++i) { 438 A::x++; 439 } 440 #pragma omp parallel 441 #pragma omp for firstprivate(f) lastprivate(f) 442 for (int i = 0; i < 2; ++i) { 443 A::x++; 444 } 445 #pragma omp parallel 446 #pragma omp for lastprivate(cnt) 447 for (cnt = 0; cnt < 2; ++cnt) { 448 A::x++; 449 } 450 return tmain<int>(); 451 #endif 452 } 453 454 // CHECK: define i{{[0-9]+}} @main() 455 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 456 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 457 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i32*)* [[MAIN_MICROTASK:@.+]] to void 458 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void 459 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK2:@.+]] to void 460 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK3:@.+]] to void 461 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]() 462 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 463 // CHECK: ret 464 465 // CHECK: define internal void [[MAIN_MICROTASK]](i32* noalias [[GTID_ADDR:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 466 // CHECK: alloca i{{[0-9]+}}, 467 // CHECK: alloca i{{[0-9]+}}, 468 // CHECK: alloca i{{[0-9]+}}, 469 // CHECK: alloca i{{[0-9]+}}, 470 // CHECK: alloca i{{[0-9]+}}, 471 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 472 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 473 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], 474 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 475 // CHECK: [[SIVAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 476 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 477 478 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 479 // CHECK: [[VEC_REF:%.+]] = load [2 x i32]*, [2 x i32]** % 480 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** % 481 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 482 483 // Check for default initialization. 484 // CHECK-NOT: [[T_VAR_PRIV]] 485 // CHECK-NOT: [[VEC_PRIV]] 486 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_FLOAT_TY]]* 487 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[S_ARR_PRIV_ITEM]]) 488 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 489 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 490 // <Skip loop body> 491 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 492 493 // Check for final copying of private values back to original vars. 494 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 495 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 496 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 497 // CHECK: [[LAST_THEN]] 498 // Actual copying. 499 500 // original t_var=private_t_var; 501 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 502 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]], 503 504 // original vec[]=private_vec[]; 505 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8* 506 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* 507 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]], 508 509 // original s_arr[]=private_s_arr[]; 510 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 511 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]* 512 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 513 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]] 514 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]] 515 // CHECK: [[S_ARR_BODY]] 516 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}}) 517 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]] 518 // CHECK: [[S_ARR_BODY_DONE]] 519 520 // original var=private_var; 521 // CHECK: call {{.*}} [[S_FLOAT_TY_COPY_ASSIGN:@.+]]([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* {{.*}} [[VAR_PRIV]]) 522 // CHECK: [[SIVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR_PRIV]], 523 // CHECK: br label %[[LAST_DONE]] 524 // CHECK: [[LAST_DONE]] 525 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 526 // CHECK-DAG: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 527 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 528 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 529 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 530 // CHECK: ret void 531 532 // 533 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) 534 // CHECK: [[F_PRIV:%.+]] = alloca float, 535 // CHECK-NOT: alloca float 536 // CHECK: [[X_PRIV:%.+]] = alloca double, 537 // CHECK-NOT: alloca float 538 // CHECK-NOT: alloca double 539 540 // Check for default initialization. 541 // CHECK-NOT: [[X_PRIV]] 542 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]], 543 // CHECK: store float [[F_VAL]], float* [[F_PRIV]], 544 // CHECK-NOT: [[X_PRIV]] 545 546 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 547 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 548 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 549 // <Skip loop body> 550 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 551 552 // Check for final copying of private values back to original vars. 553 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 554 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 555 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 556 // CHECK: [[LAST_THEN]] 557 // Actual copying. 558 559 // original x=private_x; 560 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]], 561 // CHECK: store double [[X_VAL]], double* [[X]], 562 563 // original f=private_f; 564 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]], 565 // CHECK: store float [[F_VAL]], float* [[F]], 566 567 // CHECK-NEXT: br label %[[LAST_DONE]] 568 // CHECK: [[LAST_DONE]] 569 570 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 571 // CHECK: ret void 572 573 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) 574 // CHECK: [[F_PRIV:%.+]] = alloca float, 575 // CHECK-NOT: alloca float 576 577 // Check for default initialization. 578 // CHECK: [[F_VAL:%.+]] = load float, float* [[F]], 579 // CHECK: store float [[F_VAL]], float* [[F_PRIV]], 580 581 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 582 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 583 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 584 // <Skip loop body> 585 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 586 587 // Check for final copying of private values back to original vars. 588 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 589 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 590 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 591 // CHECK: [[LAST_THEN]] 592 // Actual copying. 593 594 // original f=private_f; 595 // CHECK: [[F_VAL:%.+]] = load float, float* [[F_PRIV]], 596 // CHECK: store float [[F_VAL]], float* [[F]], 597 598 // CHECK-NEXT: br label %[[LAST_DONE]] 599 // CHECK: [[LAST_DONE]] 600 601 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 602 // CHECK: ret void 603 604 // CHECK: define internal void [[MAIN_MICROTASK3]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) 605 // CHECK: [[CNT_PRIV:%.+]] = alloca i8, 606 607 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 608 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 609 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* [[OMP_LB:%[^,]+]], i32* [[OMP_UB:%[^,]+]], i32* [[OMP_ST:%[^,]+]], i32 1, i32 1) 610 // UB = min(UB, GlobalUB) 611 // CHECK-NEXT: [[UB:%.+]] = load i32, i32* [[OMP_UB]] 612 // CHECK-NEXT: [[UBCMP:%.+]] = icmp sgt i32 [[UB]], 1 613 // CHECK-NEXT: br i1 [[UBCMP]], label [[UB_TRUE:%[^,]+]], label [[UB_FALSE:%[^,]+]] 614 // CHECK: [[UBRESULT:%.+]] = phi i32 [ 1, [[UB_TRUE]] ], [ [[UBVAL:%[^,]+]], [[UB_FALSE]] ] 615 // CHECK-NEXT: store i32 [[UBRESULT]], i32* [[OMP_UB]] 616 // CHECK-NEXT: [[LB:%.+]] = load i32, i32* [[OMP_LB]] 617 // CHECK-NEXT: store i32 [[LB]], i32* [[OMP_IV:[^,]+]] 618 // <Skip loop body> 619 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 620 621 // Check for final copying of private values back to original vars. 622 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 623 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 624 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 625 // CHECK: [[LAST_THEN]] 626 627 // Calculate private cnt value. 628 // CHECK: store i8 2, i8* [[CNT_PRIV]] 629 // original cnt=private_cnt; 630 // CHECK: [[CNT_VAL:%.+]] = load i8, i8* [[CNT_PRIV]], 631 // CHECK: store i8 [[CNT_VAL]], i8* [[CNT]], 632 633 // CHECK-NEXT: br label %[[LAST_DONE]] 634 // CHECK: [[LAST_DONE]] 635 636 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 637 // CHECK: ret void 638 639 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 640 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 641 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 642 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void 643 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 644 // CHECK: ret 645 646 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]* 647 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 648 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 649 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 650 // CHECK: store i8 651 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 652 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 653 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 654 // CHECK: call void @__kmpc_for_static_init_4( 655 // CHECK-NOT: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 656 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 657 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 658 // CHECK: call void @__kmpc_for_static_fini(% 659 // CHECK: ret 660 661 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 662 // CHECK: alloca i{{[0-9]+}}, 663 // CHECK: alloca i{{[0-9]+}}, 664 // CHECK: alloca i{{[0-9]+}}, 665 // CHECK: alloca i{{[0-9]+}}, 666 // CHECK: alloca i{{[0-9]+}}, 667 // CHECK: alloca i{{[0-9]+}}, 668 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 669 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 670 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 671 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 672 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 673 // CHECK: call void @__kmpc_for_static_init_4( 674 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 675 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 676 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 677 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 678 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 679 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 680 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 681 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 682 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 683 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 684 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 685 // CHECK: call void @__kmpc_for_static_fini( 686 // CHECK: br i1 687 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 688 // CHECK: store i8 %{{.+}}, i8* [[B_REF]], 689 // CHECK: br label 690 // CHECK: ret void 691 692 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}}) 693 // CHECK: alloca i{{[0-9]+}}, 694 // CHECK: alloca i{{[0-9]+}}, 695 // CHECK: alloca i{{[0-9]+}}, 696 // CHECK: alloca i{{[0-9]+}}, 697 // CHECK: alloca i{{[0-9]+}}, 698 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128 699 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], align 128 700 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], align 128 701 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128 702 // CHECK: [[VAR_PRIV_REF:%.+]] = alloca [[S_INT_TY]]*, 703 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 704 705 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 706 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** % 707 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** % 708 709 // Check for default initialization. 710 // CHECK-NOT: [[T_VAR_PRIV]] 711 // CHECK-NOT: [[VEC_PRIV]] 712 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* 713 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) 714 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 715 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 716 // CHECK: store [[S_INT_TY]]* [[VAR_PRIV]], [[S_INT_TY]]** [[VAR_PRIV_REF]] 717 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 718 // <Skip loop body> 719 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 720 721 // Check for final copying of private values back to original vars. 722 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 723 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 724 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 725 // CHECK: [[LAST_THEN]] 726 // Actual copying. 727 728 // original t_var=private_t_var; 729 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 730 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]], 731 732 // original vec[]=private_vec[]; 733 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8* 734 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* 735 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]], 736 737 // original s_arr[]=private_s_arr[]; 738 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 739 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]* 740 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 741 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]] 742 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]] 743 // CHECK: [[S_ARR_BODY]] 744 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}}) 745 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]] 746 // CHECK: [[S_ARR_BODY_DONE]] 747 748 // original var=private_var; 749 // CHECK: [[VAR_PRIV1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PRIV_REF]], 750 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV1]]) 751 // CHECK: br label %[[LAST_DONE]] 752 // CHECK: [[LAST_DONE]] 753 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 754 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* 755 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 756 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 757 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 758 // CHECK: ret void 759 #endif 760 761