1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 7 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 8 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 10 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 12 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 13 // expected-no-diagnostics 14 #ifndef HEADER 15 #define HEADER 16 17 template <class T> 18 struct S { 19 T f; 20 S(T a) : f(a) {} 21 S() : f() {} 22 S<T> &operator=(const S<T> &); 23 operator T() { return T(); } 24 ~S() {} 25 }; 26 27 volatile int g = 1212; 28 29 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 30 // CHECK [[CAP_MAIN_TY:%.+]] = type { i{{[0-9]+}}*, [2 x i{{[0-9]+}}]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}}* } 31 // CHECK: [[S_INT_TY:%.+]] = type { i32 } 32 // CHECK-DAG: [[SECTIONS_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 194, i32 0, i32 0, i8* 33 // CHECK-DAG: [[X:@.+]] = global double 0.0 34 template <typename T> 35 T tmain() { 36 S<T> test; 37 T t_var = T(); 38 T vec[] = {1, 2}; 39 S<T> s_arr[] = {1, 2}; 40 S<T> var(3); 41 #pragma omp parallel 42 #pragma omp sections lastprivate(t_var, vec, s_arr, var) 43 { 44 vec[0] = t_var; 45 #pragma omp section 46 s_arr[0] = var; 47 } 48 return T(); 49 } 50 51 namespace A { 52 double x; 53 } 54 namespace B { 55 using A::x; 56 } 57 58 int main() { 59 static int sivar; 60 #ifdef LAMBDA 61 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 62 // LAMBDA-LABEL: @main 63 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 64 [&]() { 65 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 66 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 67 #pragma omp parallel 68 #pragma omp sections lastprivate(g, sivar) 69 { 70 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias [[GTID:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR_REF:%.+]]) 71 // LAMBDA: alloca i{{[0-9]+}}, 72 // LAMBDA: alloca i{{[0-9]+}}, 73 // LAMBDA: alloca i{{[0-9]+}}, 74 // LAMBDA: alloca i{{[0-9]+}}, 75 // LAMBDA: alloca i{{[0-9]+}}, 76 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 77 // LAMBDA: [[SIVAR1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 78 79 // LAMBDA: store i{{[0-9]+}}* [[SIVAR_REF]], i{{[0-9]+}}** %{{.+}}, 80 // LAMBDA: [[SIVAR_REF_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}, 81 82 // LAMBDA: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}, align 8 83 // LAMBDA: [[GTID_ADDR_REF:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_ADDR]], align 4 84 85 // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 86 // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 87 // LAMBDA: store i{{[0-9]+}} 13, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], 88 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 89 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 90 // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 91 // LAMBDA: store i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]] 92 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 93 // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]]) 94 { 95 g = 1; 96 sivar = 13; 97 } 98 // Check for final copying of private values back to original vars. 99 // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 100 // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 101 // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 102 // LAMBDA: [[LAST_THEN]] 103 // Actual copying. 104 105 // original g=private_g; 106 // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 107 // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 108 109 // original sivar = private sivar; 110 // LAMBDA: [[SIVAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], 111 // LAMBDA: store i{{[0-9]+}} [[SIVAR1_VAL]], i{{[0-9]+}}* [[SIVAR_REF_ADDR]], 112 // LAMBDA: br label %[[LAST_DONE]] 113 // LAMBDA: [[LAST_DONE]] 114 // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID_ADDR_REF]]) 115 #pragma omp section 116 [&]() { 117 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 118 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 119 g = 2; 120 sivar = 23; 121 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 122 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 123 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] 124 // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 125 // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 126 // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]] 127 // LAMBDA: store i{{[0-9]+}} 23, i{{[0-9]+}}* [[SIVAR_REF]] 128 }(); 129 } 130 }(); 131 return 0; 132 #elif defined(BLOCKS) 133 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 134 // BLOCKS-LABEL: @main 135 // BLOCKS: call void {{%.+}}(i8 136 ^{ 137 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 138 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 139 #pragma omp parallel 140 #pragma omp sections lastprivate(g, sivar) 141 { 142 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias [[GTID:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]]) 143 // BLOCKS: alloca i{{[0-9]+}}, 144 // BLOCKS: alloca i{{[0-9]+}}, 145 // BLOCKS: alloca i{{[0-9]+}}, 146 // BLOCKS: alloca i{{[0-9]+}}, 147 // BLOCKS: alloca i{{[0-9]+}}, 148 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 149 // BLOCKS: [[SIVAR1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 150 151 // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]], 152 // BLOCKS: [[SIVAR_REF_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]], 153 154 // BLOCKS: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID:%.+]], align 8 155 // BLOCKS: [[GTID_ADDR_REF:%.+]] = load i32, i32* [[GTID_ADDR]], align 4 156 // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 157 // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 158 // BLOCKS: store i{{[0-9]+}} 17, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], 159 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 160 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 161 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 162 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 163 // BLOCKS: i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]] 164 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 165 // BLOCKS: call void {{%.+}}(i8 166 // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]]) 167 { 168 g = 1; 169 sivar = 17; 170 } 171 // Check for final copying of private values back to original vars. 172 // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 173 // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 174 // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 175 // BLOCKS: [[LAST_THEN]] 176 // Actual copying. 177 178 // original g=private_g; 179 // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 180 // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 181 182 // original sivar = private sivar; 183 // BLOCKS: [[SIVAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], 184 // BLOCKS: store i{{[0-9]+}} [[SIVAR1_VAL]], i{{[0-9]+}}* [[SIVAR_REF_ADDR]], 185 // BLOCKS: br label %[[LAST_DONE]] 186 // BLOCKS: [[LAST_DONE]] 187 // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID_ADDR_REF]]) 188 #pragma omp section 189 ^{ 190 // BLOCKS: define {{.+}} void {{@.+}}(i8* 191 g = 2; 192 sivar = 29; 193 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 194 // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}* 195 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 196 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 197 // BLOCKS: store i{{[0-9]+}} 29, i{{[0-9]+}}* 198 // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}} 199 // BLOCKS: ret 200 }(); 201 } 202 }(); 203 return 0; 204 #else 205 S<float> test; 206 int t_var = 0; 207 int vec[] = {1, 2}; 208 S<float> s_arr[] = {1, 2}; 209 S<float> var(3); 210 #pragma omp parallel 211 #pragma omp sections lastprivate(t_var, vec, s_arr, var, sivar) 212 { 213 { 214 vec[0] = t_var; 215 s_arr[0] = var; 216 sivar = 31; 217 } 218 } 219 #pragma omp parallel 220 #pragma omp sections lastprivate(A::x, B::x) 221 { 222 A::x++; 223 #pragma omp section 224 ; 225 } 226 return tmain<int>(); 227 #endif 228 } 229 230 // CHECK: define i{{[0-9]+}} @main() 231 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 232 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 233 234 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}}*)* [[MAIN_MICROTASK:@.+]] to void 235 236 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void 237 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]() 238 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 239 // CHECK: ret 240 241 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 242 // CHECK: alloca i{{[0-9]+}}, 243 // CHECK: alloca i{{[0-9]+}}, 244 // CHECK: alloca i{{[0-9]+}}, 245 // CHECK: alloca i{{[0-9]+}}, 246 // CHECK: alloca i{{[0-9]+}}, 247 // CHECK: alloca i{{[0-9]+}}, 248 // CHECK: alloca [2 x i{{[0-9]+}}], 249 // CHECK: alloca [2 x [[S_FLOAT_TY]]], 250 // CHECK: alloca [[S_FLOAT_TY]], 251 // CHECK: alloca i{{[0-9]+}}, 252 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 253 254 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 255 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 256 257 // CHECK: call void @__kmpc_for_static_init_4( 258 // <Skip loop body> 259 // CHECK: call void @__kmpc_for_static_fini( 260 261 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 262 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 263 264 // CHECK: call void @__kmpc_barrier( 265 // CHECK: ret void 266 267 // 268 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}) 269 // CHECK: [[X_PRIV:%.+]] = alloca double, 270 // CHECK-NOT: alloca double 271 272 // Check for default initialization. 273 // CHECK-NOT: [[X_PRIV]] 274 275 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 276 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 277 // CHECK: call void @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 278 // <Skip loop body> 279 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 280 281 // Check for final copying of private values back to original vars. 282 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 283 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 284 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 285 // CHECK: [[LAST_THEN]] 286 // Actual copying. 287 288 // original x=private_x; 289 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]], 290 // CHECK: store double [[X_VAL]], double* [[X]], 291 // CHECK-NEXT: br label %[[LAST_DONE]] 292 // CHECK: [[LAST_DONE]] 293 294 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 295 // CHECK: ret void 296 297 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 298 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 299 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 300 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void 301 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 302 // CHECK: ret 303 // 304 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 305 // CHECK: alloca i{{[0-9]+}}, 306 // CHECK: alloca i{{[0-9]+}}, 307 // CHECK: alloca i{{[0-9]+}}, 308 // CHECK: alloca i{{[0-9]+}}, 309 // CHECK: alloca i{{[0-9]+}}, 310 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 311 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 312 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], 313 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 314 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 315 316 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 317 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** % 318 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** % 319 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 320 321 // Check for default initialization. 322 // CHECK-NOT: [[T_VAR_PRIV]] 323 // CHECK-NOT: [[VEC_PRIV]] 324 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* 325 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) 326 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 327 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 328 // <Skip loop body> 329 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 330 331 // Check for final copying of private values back to original vars. 332 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 333 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 334 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 335 // CHECK: [[LAST_THEN]] 336 // Actual copying. 337 338 // original t_var=private_t_var; 339 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 340 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]], 341 342 // original vec[]=private_vec[]; 343 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8* 344 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* 345 // CHECK: call void @llvm.memcpy.{{.+}}(i8* align {{[0-9]+}} [[VEC_DEST]], i8* align {{[0-9]+}} [[VEC_SRC]], 346 347 // original s_arr[]=private_s_arr[]; 348 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 349 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]* 350 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 351 352 // CHK: [[SIVAR_REF:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 4 353 // CHK: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}} [[SIVAR_REF]] 354 355 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]] 356 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]] 357 // CHECK: [[S_ARR_BODY]] 358 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}}) 359 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]] 360 // CHECK: [[S_ARR_BODY_DONE]] 361 362 // original var=private_var; 363 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV]]) 364 // CHECK: br label %[[LAST_DONE]] 365 // CHECK: [[LAST_DONE]] 366 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 367 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* 368 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 369 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 370 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 371 // CHECK: ret void 372 #endif 373 374