1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 // REQUIRES: x86-registered-target 8 #ifndef HEADER 9 #define HEADER 10 11 volatile double g; 12 13 template <class T> 14 struct S { 15 T f; 16 S(T a) : f(a + g) {} 17 S() : f(g) {} 18 operator T() { return T(); } 19 S &operator&(const S &) { return *this; } 20 ~S() {} 21 }; 22 23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float } 24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 25 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 26 // CHECK-DAG: [[SINGLE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 322, i32 0, i32 0, i8* 27 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 28 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer 29 30 template <typename T> 31 T tmain() { 32 T t; 33 S<T> test; 34 T t_var = T(), t_var1; 35 T vec[] = {1, 2}; 36 S<T> s_arr[] = {1, 2}; 37 S<T> var(3), var1; 38 #pragma omp parallel 39 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait 40 { 41 vec[0] = t_var; 42 #pragma omp section 43 s_arr[0] = var; 44 } 45 return T(); 46 } 47 48 int main() { 49 #ifdef LAMBDA 50 // LAMBDA: [[G:@.+]] = global double 51 // LAMBDA-LABEL: @main 52 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 53 [&]() { 54 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 55 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 56 #pragma omp parallel 57 #pragma omp sections reduction(+:g) 58 { 59 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 60 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double, 61 62 // Reduction list for runtime. 63 // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*], 64 65 // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 66 // LAMBDA: call void @__kmpc_for_static_init_4( 67 g = 1; 68 // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 69 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 70 // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]] 71 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 72 // LAMBDA: call void @__kmpc_for_static_fini( 73 74 // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0 75 // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 76 // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 77 // LAMBDA: call i32 @__kmpc_reduce( 78 // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 79 // LAMBDA: i32 1, label %[[CASE1:.+]] 80 // LAMBDA: i32 2, label %[[CASE2:.+]] 81 // LAMBDA: [[CASE1]] 82 // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]] 83 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 84 // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 85 // LAMBDA: store double [[ADD]], double* [[G]] 86 // LAMBDA: call void @__kmpc_end_reduce( 87 // LAMBDA: br label %[[REDUCTION_DONE]] 88 // LAMBDA: [[CASE2]] 89 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 90 // LAMBDA: fadd double 91 // LAMBDA: cmpxchg i64* 92 // LAMBDA: call void @__kmpc_end_reduce( 93 // LAMBDA: br label %[[REDUCTION_DONE]] 94 // LAMBDA: [[REDUCTION_DONE]] 95 // LAMBDA: ret void 96 #pragma omp section 97 [&]() { 98 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 99 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 100 g = 2; 101 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 102 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 103 // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] 104 // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] 105 }(); 106 } 107 }(); 108 return 0; 109 #elif defined(BLOCKS) 110 // BLOCKS: [[G:@.+]] = global double 111 // BLOCKS-LABEL: @main 112 // BLOCKS: call void {{%.+}}(i8 113 ^{ 114 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 115 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 116 #pragma omp parallel 117 #pragma omp sections reduction(-:g) 118 { 119 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 120 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double, 121 122 // Reduction list for runtime. 123 // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*], 124 125 // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 126 g = 1; 127 // BLOCKS: call void @__kmpc_for_static_init_4( 128 // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 129 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 130 // BLOCKS: double* [[G_PRIVATE_ADDR]] 131 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 132 // BLOCKS: call void {{%.+}}(i8 133 // BLOCKS: call void @__kmpc_for_static_fini( 134 135 // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0 136 // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 137 // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 138 // BLOCKS: call i32 @__kmpc_reduce( 139 // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 140 // BLOCKS: i32 1, label %[[CASE1:.+]] 141 // BLOCKS: i32 2, label %[[CASE2:.+]] 142 // BLOCKS: [[CASE1]] 143 // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]] 144 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 145 // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 146 // BLOCKS: store double [[ADD]], double* [[G]] 147 // BLOCKS: call void @__kmpc_end_reduce( 148 // BLOCKS: br label %[[REDUCTION_DONE]] 149 // BLOCKS: [[CASE2]] 150 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 151 // BLOCKS: fadd double 152 // BLOCKS: cmpxchg i64* 153 // BLOCKS: call void @__kmpc_end_reduce( 154 // BLOCKS: br label %[[REDUCTION_DONE]] 155 // BLOCKS: [[REDUCTION_DONE]] 156 // BLOCKS: ret void 157 #pragma omp section 158 ^{ 159 // BLOCKS: define {{.+}} void {{@.+}}(i8* 160 g = 2; 161 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 162 // BLOCKS: store double 2.0{{.+}}, double* 163 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 164 // BLOCKS: ret 165 }(); 166 } 167 }(); 168 return 0; 169 #else 170 S<float> test; 171 float t_var = 0, t_var1; 172 int vec[] = {1, 2}; 173 S<float> s_arr[] = {1, 2}; 174 S<float> var(3), var1; 175 #pragma omp parallel 176 #pragma omp sections reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) 177 { 178 { 179 vec[0] = t_var; 180 s_arr[0] = var; 181 vec[1] = t_var1; 182 s_arr[1] = var1; 183 } 184 } 185 return tmain<int>(); 186 #endif 187 } 188 189 // CHECK: define {{.*}}i{{[0-9]+}} @main() 190 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 191 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 192 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void 193 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]() 194 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 195 // CHECK: ret 196 // 197 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 198 // CHECK-NOT: alloca float, 199 // CHECK-NOT: alloca [[S_FLOAT_TY]], 200 // CHECK-NOT: alloca [[S_FLOAT_TY]], 201 // CHECK-NOT: alloca float, 202 203 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 204 205 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 206 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 207 // CHECK: call i32 @__kmpc_single( 208 209 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 210 // CHECK-NOT: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 211 212 // CHECK: call void @__kmpc_end_single( 213 214 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SINGLE_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 215 216 // CHECK: ret void 217 218 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 219 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 220 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 221 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void 222 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 223 // CHECK: ret 224 // 225 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 226 // CHECK: alloca i{{[0-9]+}}, 227 // CHECK: alloca i{{[0-9]+}}, 228 // CHECK: alloca i{{[0-9]+}}, 229 // CHECK: alloca i{{[0-9]+}}, 230 // CHECK: alloca i{{[0-9]+}}, 231 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 232 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 233 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]], 234 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}}, 235 236 // Reduction list for runtime. 237 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 238 239 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 240 241 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 242 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 243 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 244 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 245 246 // For + reduction operation initial value of private variable is 0. 247 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]], 248 249 // For & reduction operation initial value of private variable is ones in all bits. 250 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]]) 251 252 // For && reduction operation initial value of private variable is 1.0. 253 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]]) 254 255 // For min reduction operation initial value of private variable is largest repesentable value. 256 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]], 257 258 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 259 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 260 // CHECK: call void @__kmpc_for_static_init_4( 261 // Skip checks for internal operations. 262 // CHECK: call void @__kmpc_for_static_fini( 263 264 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 265 266 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 267 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8* 268 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 269 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 270 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8* 271 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 272 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 273 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8* 274 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 275 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 276 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8* 277 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 278 279 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 280 281 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 282 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 283 284 // switch(res) 285 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 286 // CHECK: i32 1, label %[[CASE1:.+]] 287 // CHECK: i32 2, label %[[CASE2:.+]] 288 // CHECK: ] 289 290 // case 1: 291 // t_var += t_var_reduction; 292 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], 293 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 294 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 295 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]], 296 297 // var = var.operator &(var_reduction); 298 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 299 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 300 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 301 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 302 303 // var1 = var1.operator &&(var1_reduction); 304 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 305 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 306 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 307 // CHECK: [[TRUE]] 308 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 309 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 310 // CHECK: br label %[[END2]] 311 // CHECK: [[END2]] 312 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 313 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 314 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 315 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 316 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 317 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 318 319 // t_var1 = min(t_var1, t_var1_reduction); 320 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]], 321 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]], 322 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 323 // CHECK: br i1 [[CMP]] 324 // CHECK: [[UP:%.+]] = phi i32 325 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]], 326 327 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); 328 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 329 330 // break; 331 // CHECK: br label %[[RED_DONE]] 332 333 // case 2: 334 // t_var += t_var_reduction; 335 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]] 336 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic 337 338 // var = var.operator &(var_reduction); 339 // CHECK: call void @__kmpc_critical( 340 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 341 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 342 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 343 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 344 // CHECK: call void @__kmpc_end_critical( 345 346 // var1 = var1.operator &&(var1_reduction); 347 // CHECK: call void @__kmpc_critical( 348 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 349 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 350 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 351 // CHECK: [[TRUE]] 352 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 353 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 354 // CHECK: br label %[[END2]] 355 // CHECK: [[END2]] 356 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 357 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 358 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 359 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 360 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 361 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 362 // CHECK: call void @__kmpc_end_critical( 363 364 // t_var1 = min(t_var1, t_var1_reduction); 365 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]] 366 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic 367 368 // break; 369 // CHECK: br label %[[RED_DONE]] 370 // CHECK: [[RED_DONE]] 371 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 372 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* 373 // CHECK: ret void 374 375 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 376 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 377 // ... 378 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 379 // *(Type<n>-1*)rhs[<n>-1]); 380 // } 381 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 382 // t_var_lhs = (i{{[0-9]+}}*)lhs[0]; 383 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 384 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 385 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}* 386 // t_var_rhs = (i{{[0-9]+}}*)rhs[0]; 387 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 388 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 389 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}* 390 391 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1]; 392 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 393 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 394 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]* 395 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1]; 396 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 397 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 398 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]* 399 400 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2]; 401 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 402 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 403 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]* 404 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2]; 405 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 406 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 407 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]* 408 409 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3]; 410 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 411 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 412 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}* 413 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3]; 414 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 415 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 416 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}* 417 418 // t_var_lhs += t_var_rhs; 419 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]], 420 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]], 421 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 422 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]], 423 424 // var_lhs = var_lhs.operator &(var_rhs); 425 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]]) 426 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8* 427 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 428 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 429 430 // var1_lhs = var1_lhs.operator &&(var1_rhs); 431 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]]) 432 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 433 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 434 // CHECK: [[TRUE]] 435 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]]) 436 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 437 // CHECK: br label %[[END2]] 438 // CHECK: [[END2]] 439 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 440 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 441 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 442 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8* 443 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 444 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 445 446 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 447 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]], 448 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]], 449 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 450 // CHECK: br i1 [[CMP]] 451 // CHECK: [[UP:%.+]] = phi i32 452 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]], 453 // CHECK: ret void 454 455 #endif 456 457