1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 template <class T> 11 struct S { 12 T f; 13 S(T a) : f(a) {} 14 S() : f() {} 15 S<T> &operator=(const S<T> &); 16 operator T() { return T(); } 17 ~S() {} 18 }; 19 20 volatile int g = 1212; 21 volatile int &g1 = g; 22 float f; 23 char cnt; 24 25 struct SS { 26 int a; 27 int b : 4; 28 int &c; 29 SS(int &d) : a(0), b(0), c(d) { 30 #pragma omp parallel 31 #pragma omp for linear(a, b, c) 32 for (int i = 0; i < 2; ++i) 33 #ifdef LAMBDA 34 [&]() { 35 ++this->a, --b, (this)->c /= 1; 36 #pragma omp parallel 37 #pragma omp for linear(a, b) linear(ref(c)) 38 for (int i = 0; i < 2; ++i) 39 ++(this)->a, --b, this->c /= 1; 40 }(); 41 #elif defined(BLOCKS) 42 ^{ 43 ++a; 44 --this->b; 45 (this)->c /= 1; 46 #pragma omp parallel 47 #pragma omp for linear(a, b) linear(uval(c)) 48 for (int i = 0; i < 2; ++i) 49 ++(this)->a, --b, this->c /= 1; 50 }(); 51 #else 52 ++this->a, --b, c /= 1; 53 #endif 54 } 55 }; 56 57 template <typename T> 58 struct SST { 59 T a; 60 SST() : a(T()) { 61 #pragma omp parallel 62 #pragma omp for linear(a) 63 for (int i = 0; i < 2; ++i) 64 #ifdef LAMBDA 65 [&]() { 66 [&]() { 67 ++this->a; 68 #pragma omp parallel 69 #pragma omp for linear(a) 70 for (int i = 0; i < 2; ++i) 71 ++(this)->a; 72 }(); 73 }(); 74 #elif defined(BLOCKS) 75 ^{ 76 ^{ 77 ++a; 78 #pragma omp parallel 79 #pragma omp for linear(a) 80 for (int i = 0; i < 2; ++i) 81 ++(this)->a; 82 }(); 83 }(); 84 #else 85 ++(this)->a; 86 #endif 87 } 88 }; 89 90 // CHECK: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 91 // LAMBDA: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 92 // BLOCKS: [[SS_TY:%.+]] = type { i{{[0-9]+}}, i8 93 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 94 // CHECK: [[S_INT_TY:%.+]] = type { i32 } 95 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 96 // CHECK-DAG: [[F:@.+]] = global float 0.0 97 // CHECK-DAG: [[CNT:@.+]] = global i8 0 98 template <typename T> 99 T tmain() { 100 S<T> test; 101 SST<T> sst; 102 T *pvar = &test.f; 103 T &lvar = test.f; 104 #pragma omp parallel 105 #pragma omp for linear(pvar, lvar) 106 for (int i = 0; i < 2; ++i) { 107 ++pvar, ++lvar; 108 } 109 return T(); 110 } 111 112 int main() { 113 static int sivar; 114 SS ss(sivar); 115 #ifdef LAMBDA 116 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 117 // LAMBDA-LABEL: @main 118 // LAMBDA: alloca [[SS_TY]], 119 // LAMBDA: alloca [[CAP_TY:%.+]], 120 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]([[CAP_TY]]* 121 [&]() { 122 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 123 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 124 #pragma omp parallel 125 #pragma omp for linear(g, g1:5) 126 for (int i = 0; i < 2; ++i) { 127 // LAMBDA: define {{.+}} @{{.+}}([[SS_TY]]* 128 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 129 // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 130 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 131 // LAMBDA: store i8 132 // LAMBDA: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 133 // LAMBDA: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 134 // LAMBDA: ret 135 136 // LAMBDA: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 137 // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0 138 // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 139 // LAMBDA: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2 140 // LAMBDA: call void @__kmpc_for_static_init_4( 141 // LAMBDA-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* 142 // LAMBDA: call{{.*}} void 143 // LAMBDA: call void @__kmpc_for_static_fini( 144 // LAMBDA: br i1 145 // LAMBDA: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 146 // LAMBDA: store i8 %{{.+}}, i8* [[B_REF]], 147 // LAMBDA: br label 148 // LAMBDA: ret void 149 150 // LAMBDA: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}}) 151 // LAMBDA: alloca i{{[0-9]+}}, 152 // LAMBDA: alloca i{{[0-9]+}}, 153 // LAMBDA: alloca i{{[0-9]+}}, 154 // LAMBDA: alloca i{{[0-9]+}}, 155 // LAMBDA: alloca i{{[0-9]+}}, 156 // LAMBDA: alloca i{{[0-9]+}}, 157 // LAMBDA: alloca i{{[0-9]+}}, 158 // LAMBDA: alloca i{{[0-9]+}}, 159 // LAMBDA: alloca i{{[0-9]+}}, 160 // LAMBDA: alloca i{{[0-9]+}}, 161 // LAMBDA: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 162 // LAMBDA: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 163 // LAMBDA: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 164 // LAMBDA: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 165 // LAMBDA: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 166 // LAMBDA: call void @__kmpc_for_static_init_4( 167 // LAMBDA: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 168 // LAMBDA-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 169 // LAMBDA-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 170 // LAMBDA-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 171 // LAMBDA-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 172 // LAMBDA-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 173 // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 174 // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 175 // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 176 // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 177 // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 178 // LAMBDA: call void @__kmpc_for_static_fini( 179 // LAMBDA: br i1 180 // LAMBDA: br label 181 // LAMBDA: ret void 182 183 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 184 // LAMBDA: alloca i{{[0-9]+}}, 185 // LAMBDA: alloca i{{[0-9]+}}, 186 // LAMBDA: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}}, 187 // LAMBDA: alloca i{{[0-9]+}}, 188 // LAMBDA: alloca i{{[0-9]+}}, 189 // LAMBDA: alloca i{{[0-9]+}}, 190 // LAMBDA: alloca i{{[0-9]+}}, 191 // LAMBDA: alloca i{{[0-9]+}}, 192 // LAMBDA: alloca i{{[0-9]+}}, 193 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 194 // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 195 // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 196 // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 197 // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]] 198 // LAMBDA: [[CNT:%.+]] = load i32, i32* 199 // LAMBDA: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5 200 // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]] 201 // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]], 202 // LAMBDA: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]], 203 // LAMBDA: [[ADD:%.+]] = add nsw i32 [[VAL]], 5 204 // LAMBDA: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]], 205 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 206 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 207 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 208 // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 209 g += 5; 210 g1 += 5; 211 // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 212 [&]() { 213 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 214 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 215 g = 2; 216 g1 = 2; 217 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 218 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 219 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] 220 // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 221 }(); 222 } 223 }(); 224 return 0; 225 #elif defined(BLOCKS) 226 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 227 // BLOCKS-LABEL: @main 228 // BLOCKS: call 229 // BLOCKS: call void {{%.+}}(i8 230 ^{ 231 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 232 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 233 #pragma omp parallel 234 #pragma omp for linear(g, g1:5) 235 for (int i = 0; i < 2; ++i) { 236 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 237 // BLOCKS: alloca i{{[0-9]+}}, 238 // BLOCKS: alloca i{{[0-9]+}}, 239 // BLOCKS: [[G_START_ADDR:%.+]] = alloca i{{[0-9]+}}, 240 // BLOCKS: alloca i{{[0-9]+}}, 241 // BLOCKS: alloca i{{[0-9]+}}, 242 // BLOCKS: alloca i{{[0-9]+}}, 243 // BLOCKS: alloca i{{[0-9]+}}, 244 // BLOCKS: alloca i{{[0-9]+}}, 245 // BLOCKS: alloca i{{[0-9]+}}, 246 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 247 // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 248 // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 249 // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 250 // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_START_ADDR]] 251 // BLOCKS: [[CNT:%.+]] = load i32, i32* 252 // BLOCKS: [[MUL:%.+]] = mul nsw i32 [[CNT]], 5 253 // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], [[MUL]] 254 // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]], 255 // BLOCKS: [[VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]], 256 // BLOCKS: [[ADD:%.+]] = add nsw i32 [[VAL]], 5 257 // BLOCKS: store i32 [[ADD]], i32* [[G_PRIVATE_ADDR]], 258 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 259 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 260 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 261 // BLOCKS: call void {{%.+}}(i8 262 // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 263 g += 5; 264 g1 += 5; 265 // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 266 g = 1; 267 g1 = 5; 268 ^{ 269 // BLOCKS: define {{.+}} void {{@.+}}(i8* 270 g = 2; 271 g1 = 2; 272 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 273 // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}* 274 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 275 // BLOCKS: ret 276 }(); 277 } 278 }(); 279 return 0; 280 // BLOCKS: define {{.+}} @{{.+}}([[SS_TY]]* 281 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 282 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 283 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 284 // BLOCKS: store i8 285 // BLOCKS: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 286 // BLOCKS: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 287 // BLOCKS: ret 288 289 // BLOCKS: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 290 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 0 291 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 292 // BLOCKS: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 2 293 // BLOCKS: call void @__kmpc_for_static_init_4( 294 // BLOCKS-NOT: getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* 295 // BLOCKS: call{{.*}} void 296 // BLOCKS: call void @__kmpc_for_static_fini( 297 // BLOCKS: br i1 298 // BLOCKS: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 299 // BLOCKS: store i8 %{{.+}}, i8* [[B_REF]], 300 // BLOCKS: br label 301 // BLOCKS: ret void 302 303 // BLOCKS: define internal void @{{.+}}(i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}, i32* {{.+}}, i32* {{.+}}, i32* {{.+}}) 304 // BLOCKS: alloca i{{[0-9]+}}, 305 // BLOCKS: alloca i{{[0-9]+}}, 306 // BLOCKS: alloca i{{[0-9]+}}, 307 // BLOCKS: alloca i{{[0-9]+}}, 308 // BLOCKS: alloca i{{[0-9]+}}, 309 // BLOCKS: alloca i{{[0-9]+}}, 310 // BLOCKS: alloca i{{[0-9]+}}, 311 // BLOCKS: alloca i{{[0-9]+}}, 312 // BLOCKS: alloca i{{[0-9]+}}, 313 // BLOCKS: alloca i{{[0-9]+}}, 314 // BLOCKS: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 315 // BLOCKS: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 316 // BLOCKS: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 317 // BLOCKS: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 318 // BLOCKS: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 319 // BLOCKS: call void @__kmpc_for_static_init_4( 320 // BLOCKS: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 321 // BLOCKS-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 322 // BLOCKS-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 323 // BLOCKS-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 324 // BLOCKS-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 325 // BLOCKS-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 326 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 327 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 328 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 329 // BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 330 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 331 // BLOCKS: call void @__kmpc_for_static_fini( 332 // BLOCKS: br i1 333 // BLOCKS: br label 334 // BLOCKS: ret void 335 #else 336 S<float> test; 337 float *pvar = &test.f; 338 long long lvar = 0; 339 #pragma omp parallel 340 #pragma omp for linear(pvar, lvar : 3) 341 for (int i = 0; i < 2; ++i) { 342 pvar += 3, lvar += 3; 343 } 344 return tmain<int>(); 345 #endif 346 } 347 348 // CHECK: define i{{[0-9]+}} @main() 349 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 350 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 351 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 2, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float**, i64*)* [[MAIN_MICROTASK:@.+]] to void 352 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]() 353 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 354 // CHECK: ret 355 356 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, float** dereferenceable(8) %{{.+}}, i64* dereferenceable(8) %{{.+}}) 357 // CHECK: alloca i{{[0-9]+}}, 358 // CHECK: alloca i{{[0-9]+}}, 359 // CHECK: [[PVAR_START:%.+]] = alloca float*, 360 // CHECK: [[LVAR_START:%.+]] = alloca i64, 361 // CHECK: alloca i{{[0-9]+}}, 362 // CHECK: alloca i{{[0-9]+}}, 363 // CHECK: alloca i{{[0-9]+}}, 364 // CHECK: alloca i{{[0-9]+}}, 365 // CHECK: [[PVAR_PRIV:%.+]] = alloca float*, 366 // CHECK: [[LVAR_PRIV:%.+]] = alloca i64, 367 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 368 369 // Check for default initialization. 370 // CHECK: [[PVAR_REF:%.+]] = load float**, float*** % 371 // CHECK: [[LVAR_REF:%.+]] = load i64*, i64** % 372 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_REF]], 373 // CHECK: store float* [[PVAR_VAL]], float** [[PVAR_START]], 374 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_REF]], 375 // CHECK: store i64 [[LVAR_VAL]], i64* [[LVAR_START]], 376 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 377 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_START]], 378 // CHECK: [[CNT:%.+]] = load i32, i32* 379 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3 380 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64 381 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 [[IDX]] 382 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]], 383 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_START]], 384 // CHECK: [[CNT:%.+]] = load i32, i32* 385 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 3 386 // CHECK: [[CONV:%.+]] = sext i32 [[MUL]] to i64 387 // CHECK: [[VAL:%.+]] = add nsw i64 [[LVAR_VAL]], [[CONV]] 388 // CHECK: store i64 [[VAL]], i64* [[LVAR_PRIV]], 389 // CHECK: [[PVAR_VAL:%.+]] = load float*, float** [[PVAR_PRIV]] 390 // CHECK: [[PTR:%.+]] = getelementptr inbounds float, float* [[PVAR_VAL]], i64 3 391 // CHECK: store float* [[PTR]], float** [[PVAR_PRIV]], 392 // CHECK: [[LVAR_VAL:%.+]] = load i64, i64* [[LVAR_PRIV]], 393 // CHECK: [[ADD:%.+]] = add nsw i64 [[LVAR_VAL]], 3 394 // CHECK: store i64 [[ADD]], i64* [[LVAR_PRIV]], 395 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 396 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 397 // CHECK: ret void 398 399 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 400 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 401 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 402 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 2, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32**, i32*)* [[TMAIN_MICROTASK:@.+]] to void 403 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 404 // CHECK: ret 405 406 // CHECK: define {{.+}} @{{.+}}([[SS_TY]]* 407 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 0 408 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* % 409 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 1 410 // CHECK: store i8 411 // CHECK: getelementptr inbounds [[SS_TY]], [[SS_TY]]* %{{.+}}, i32 0, i32 2 412 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[SS_TY]]*)* [[SS_MICROTASK:@.+]] to void 413 // CHECK: ret 414 415 // CHECK: define internal void [[SS_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, [[SS_TY]]* %{{.+}}) 416 // CHECK: alloca i{{[0-9]+}}, 417 // CHECK: alloca i{{[0-9]+}}, 418 // CHECK: alloca i{{[0-9]+}}, 419 // CHECK: alloca i{{[0-9]+}}, 420 // CHECK: alloca i{{[0-9]+}}, 421 // CHECK: alloca i{{[0-9]+}}, 422 // CHECK: alloca i{{[0-9]+}}, 423 // CHECK: alloca i{{[0-9]+}}, 424 // CHECK: alloca i{{[0-9]+}}, 425 // CHECK: alloca i{{[0-9]+}}, 426 // CHECK: alloca i{{[0-9]+}}, 427 // CHECK: [[A_PRIV:%.+]] = alloca i{{[0-9]+}}, 428 // CHECK: [[B_PRIV:%.+]] = alloca i{{[0-9]+}}, 429 // CHECK: [[C_PRIV:%.+]] = alloca i{{[0-9]+}}, 430 // CHECK: call void @__kmpc_barrier( 431 // CHECK: store i{{[0-9]+}}* [[A_PRIV]], i{{[0-9]+}}** [[REFA:%.+]], 432 // CHECK: store i{{[0-9]+}}* [[C_PRIV]], i{{[0-9]+}}** [[REFC:%.+]], 433 // CHECK: call void @__kmpc_for_static_init_4( 434 // CHECK: [[A_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFA]], 435 // CHECK-NEXT: [[A_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[A_PRIV]], 436 // CHECK-NEXT: [[INC:%.+]] = add nsw i{{[0-9]+}} [[A_VAL]], 1 437 // CHECK-NEXT: store i{{[0-9]+}} [[INC]], i{{[0-9]+}}* [[A_PRIV]], 438 // CHECK-NEXT: [[B_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[B_PRIV]], 439 // CHECK-NEXT: [[DEC:%.+]] = add nsw i{{[0-9]+}} [[B_VAL]], -1 440 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]], 441 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]], 442 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]], 443 // CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1 444 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]], 445 // CHECK: call void @__kmpc_for_static_fini( 446 // CHECK: br i1 447 // CHECK: [[B_REF:%.+]] = getelementptr {{.*}}[[SS_TY]], [[SS_TY]]* %{{.*}}, i32 0, i32 1 448 // CHECK: store i8 %{{.+}}, i8* [[B_REF]], 449 // CHECK: br label 450 // CHECK: ret void 451 452 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32** dereferenceable(8) %{{.+}}, i32* dereferenceable(4) %{{.+}}) 453 // CHECK: alloca i{{[0-9]+}}, 454 // CHECK: alloca i{{[0-9]+}}, 455 // CHECK: [[PVAR_START:%.+]] = alloca i32*, 456 // CHECK: [[LVAR_START:%.+]] = alloca i32, 457 // CHECK: alloca i{{[0-9]+}}, 458 // CHECK: alloca i{{[0-9]+}}, 459 // CHECK: alloca i{{[0-9]+}}, 460 // CHECK: alloca i{{[0-9]+}}, 461 // CHECK: [[PVAR_PRIV:%.+]] = alloca i32*, 462 // CHECK: [[LVAR_PRIV:%.+]] = alloca i32, 463 // CHECK: [[LVAR_PRIV_REF:%.+]] = alloca i32*, 464 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 465 466 // Check for default initialization. 467 // CHECK: [[PVAR_REF:%.+]] = load i32**, i32*** % 468 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_REF]], 469 // CHECK: store i32* [[PVAR_VAL]], i32** [[PVAR_START]], 470 // CHECK: [[LVAR_REF:%.+]] = load i32*, i32** % 471 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_REF]], 472 // CHECK: store i32 [[LVAR_VAL]], i32* [[LVAR_START]], 473 // CHECK: store i32* [[LVAR_PRIV]], i32** [[LVAR_PRIV_REF]], 474 475 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID:%.+]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 476 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_START]], 477 // CHECK: [[CNT:%.+]] = load i32, i32* 478 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1 479 // CHECK: [[IDX:%.+]] = sext i32 [[MUL]] to i64 480 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i64 [[IDX]] 481 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]], 482 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_START]], 483 // CHECK: [[CNT:%.+]] = load i32, i32* 484 // CHECK: [[MUL:%.+]] = mul nsw i32 [[CNT]], 1 485 // CHECK: [[VAL:%.+]] = add nsw i32 [[LVAR_VAL]], [[MUL]] 486 // CHECK: store i32 [[VAL]], i32* [[LVAR_PRIV]], 487 // CHECK: [[PVAR_VAL:%.+]] = load i32*, i32** [[PVAR_PRIV]] 488 // CHECK: [[PTR:%.+]] = getelementptr inbounds i32, i32* [[PVAR_VAL]], i32 1 489 // CHECK: store i32* [[PTR]], i32** [[PVAR_PRIV]], 490 // CHECK: [[LVAR_PRIV:%.+]] = load i32*, i32** [[LVAR_PRIV_REF]], 491 // CHECK: [[LVAR_VAL:%.+]] = load i32, i32* [[LVAR_PRIV]], 492 // CHECK: [[ADD:%.+]] = add nsw i32 [[LVAR_VAL]], 1 493 // CHECK: store i32 [[ADD]], i32* [[LVAR_PRIV]], 494 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 495 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 496 // CHECK: ret void 497 #endif 498 499