1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 template <class T> 11 struct S { 12 T f; 13 S(T a) : f(a) {} 14 S() : f() {} 15 S<T> &operator=(const S<T> &); 16 operator T() { return T(); } 17 ~S() {} 18 }; 19 20 volatile int g = 1212; 21 22 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 23 // CHECK: [[CAP_MAIN_TY:%.+]] = type { i{{[0-9]+}}*, [2 x i{{[0-9]+}}]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]* } 24 // CHECK: [[S_INT_TY:%.+]] = type { i32 } 25 // CHECK: [[CAP_TMAIN_TY:%.+]] = type { i{{[0-9]+}}*, [2 x i{{[0-9]+}}]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]* } 26 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 27 // CHECK-DAG: [[SINGLE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8* 28 // CHECK-DAG: [[SECTIONS_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 194, i32 0, i32 0, i8* 29 // CHECK-DAG: [[X:@.+]] = global double 0.0 30 template <typename T> 31 T tmain() { 32 S<T> test; 33 T t_var = T(); 34 T vec[] = {1, 2}; 35 S<T> s_arr[] = {1, 2}; 36 S<T> var(3); 37 #pragma omp parallel 38 #pragma omp sections lastprivate(t_var, vec, s_arr, var) 39 { 40 vec[0] = t_var; 41 #pragma omp section 42 s_arr[0] = var; 43 } 44 return T(); 45 } 46 47 namespace A { 48 double x; 49 } 50 namespace B { 51 using A::x; 52 } 53 54 int main() { 55 #ifdef LAMBDA 56 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 57 // LAMBDA-LABEL: @main 58 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 59 [&]() { 60 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 61 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}}) 62 #pragma omp parallel 63 #pragma omp sections lastprivate(g) 64 { 65 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) 66 // LAMBDA: alloca i{{[0-9]+}}, 67 // LAMBDA: alloca i{{[0-9]+}}, 68 // LAMBDA: alloca i{{[0-9]+}}, 69 // LAMBDA: alloca i{{[0-9]+}}, 70 // LAMBDA: alloca i{{[0-9]+}}, 71 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 72 // LAMBDA: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], 73 // LAMBDA: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 74 // LAMBDA: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 75 // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 76 // LAMBDA: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 77 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 78 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 79 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 80 // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 81 g = 1; 82 // Check for final copying of private values back to original vars. 83 // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 84 // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 85 // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 86 // LAMBDA: [[LAST_THEN]] 87 // Actual copying. 88 89 // original g=private_g; 90 // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 91 // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 92 // LAMBDA: br label %[[LAST_DONE]] 93 // LAMBDA: [[LAST_DONE]] 94 // LAMBDA: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 95 #pragma omp section 96 [&]() { 97 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 98 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 99 g = 2; 100 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 101 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 102 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] 103 // LAMBDA: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 104 }(); 105 } 106 }(); 107 return 0; 108 #elif defined(BLOCKS) 109 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 110 // BLOCKS-LABEL: @main 111 // BLOCKS: call void {{%.+}}(i8 112 ^{ 113 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 114 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}}) 115 #pragma omp parallel 116 #pragma omp sections lastprivate(g) 117 { 118 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* [[ARG:%.+]]) 119 // BLOCKS: alloca i{{[0-9]+}}, 120 // BLOCKS: alloca i{{[0-9]+}}, 121 // BLOCKS: alloca i{{[0-9]+}}, 122 // BLOCKS: alloca i{{[0-9]+}}, 123 // BLOCKS: alloca i{{[0-9]+}}, 124 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 125 // BLOCKS: store %{{.+}}* [[ARG]], %{{.+}}** [[ARG_REF:%.+]], 126 // BLOCKS: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}} 127 // BLOCKS: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 128 // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 129 // BLOCKS: store volatile i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 130 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 131 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 132 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 133 // BLOCKS: call void {{%.+}}(i8 134 // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 135 g = 1; 136 // Check for final copying of private values back to original vars. 137 // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 138 // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 139 // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 140 // BLOCKS: [[LAST_THEN]] 141 // Actual copying. 142 143 // original g=private_g; 144 // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], 145 // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]], 146 // BLOCKS: br label %[[LAST_DONE]] 147 // BLOCKS: [[LAST_DONE]] 148 // BLOCKS: call i32 @__kmpc_cancel_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID]]) 149 #pragma omp section 150 ^{ 151 // BLOCKS: define {{.+}} void {{@.+}}(i8* 152 g = 2; 153 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 154 // BLOCKS: store volatile i{{[0-9]+}} 2, i{{[0-9]+}}* 155 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 156 // BLOCKS: ret 157 }(); 158 } 159 }(); 160 return 0; 161 #else 162 S<float> test; 163 int t_var = 0; 164 int vec[] = {1, 2}; 165 S<float> s_arr[] = {1, 2}; 166 S<float> var(3); 167 #pragma omp parallel 168 #pragma omp sections lastprivate(t_var, vec, s_arr, var) 169 { 170 { 171 vec[0] = t_var; 172 s_arr[0] = var; 173 } 174 } 175 #pragma omp parallel 176 #pragma omp sections lastprivate(A::x, B::x) 177 { 178 A::x++; 179 #pragma omp section 180 ; 181 } 182 return tmain<int>(); 183 #endif 184 } 185 186 // CHECK: define i{{[0-9]+}} @main() 187 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 188 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 189 // CHECK: %{{.+}} = bitcast [[CAP_MAIN_TY]]* 190 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_MAIN_TY]]*)* [[MAIN_MICROTASK:@.+]] to void 191 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, %{{.+}}*)* [[MAIN_MICROTASK1:@.+]] to void 192 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]() 193 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 194 // CHECK: ret 195 196 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_MAIN_TY]]* %{{.+}}) 197 // CHECK-NOT: alloca i{{[0-9]+}}, 198 // CHECK-NOT: alloca [2 x i{{[0-9]+}}], 199 // CHECK-NOT: alloca [2 x [[S_FLOAT_TY]]], 200 // CHECK-NOT: alloca [[S_FLOAT_TY]], 201 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 202 203 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 204 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 205 // CHECK: call i32 @__kmpc_single( 206 207 // CHECK-DAG: getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 208 // CHECK-DAG: getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1 209 // CHECK-DAG: getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 210 // CHECK-DAG: getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3 211 212 // <Skip loop body> 213 214 // CHECK-NOT: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 215 // CHECK-NOT: call void [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 216 217 // CHECK: call void @__kmpc_end_single( 218 219 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SINGLE_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 220 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 221 // CHECK: ret void 222 223 // 224 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, %{{.+}}* %{{.+}}) 225 // CHECK: [[X_PRIV:%.+]] = alloca double, 226 // CHECK-NOT: alloca double 227 228 // Check for default initialization. 229 // CHECK-NOT: [[X_PRIV]] 230 231 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 232 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 233 // CHECK: call void @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 234 // <Skip loop body> 235 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]]) 236 237 // Check for final copying of private values back to original vars. 238 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 239 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 240 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 241 // CHECK: [[LAST_THEN]] 242 // Actual copying. 243 244 // original x=private_x; 245 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]], 246 // CHECK: store double [[X_VAL]], double* [[X]], 247 // CHECK-NEXT: br label %[[LAST_DONE]] 248 // CHECK: [[LAST_DONE]] 249 250 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 251 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 252 // CHECK: ret void 253 254 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 255 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 256 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 257 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_TMAIN_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void 258 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 259 // CHECK: ret 260 // 261 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_TMAIN_TY]]* %{{.+}}) 262 // CHECK: alloca i{{[0-9]+}}, 263 // CHECK: alloca i{{[0-9]+}}, 264 // CHECK: alloca i{{[0-9]+}}, 265 // CHECK: alloca i{{[0-9]+}}, 266 // CHECK: alloca i{{[0-9]+}}, 267 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 268 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 269 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]], 270 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 271 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]] 272 273 // Check for default initialization. 274 // CHECK: [[T_VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 0 275 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_PTR_REF]], 276 // CHECK-NOT: [[T_VAR_PRIV]] 277 // CHECK: [[VEC_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 1 278 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_PTR_REF:%.+]], 279 // CHECK-NOT: [[VEC_PRIV]] 280 // CHECK: [[S_ARR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 2 281 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[S_ARR_REF_PTR]], 282 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]* 283 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]]) 284 // CHECK: [[VAR_REF_PTR:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} 3 285 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_REF_PTR]], 286 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 287 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) 288 // <Skip loop body> 289 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}}) 290 291 // Check for final copying of private values back to original vars. 292 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]], 293 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0 294 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]] 295 // CHECK: [[LAST_THEN]] 296 // Actual copying. 297 298 // original t_var=private_t_var; 299 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 300 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]], 301 302 // original vec[]=private_vec[]; 303 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8* 304 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* 305 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]], 306 307 // original s_arr[]=private_s_arr[]; 308 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 309 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]* 310 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 311 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]] 312 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]] 313 // CHECK: [[S_ARR_BODY]] 314 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}}) 315 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]] 316 // CHECK: [[S_ARR_BODY_DONE]] 317 318 // original var=private_var; 319 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV]]) 320 // CHECK: br label %[[LAST_DONE]] 321 // CHECK: [[LAST_DONE]] 322 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 323 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* 324 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 325 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 326 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 327 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]] 328 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 329 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 330 // CHECK: ret void 331 #endif 332 333