1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 // REQUIRES: x86-registered-target 8 #ifndef HEADER 9 #define HEADER 10 11 volatile double g; 12 13 template <class T> 14 struct S { 15 T f; 16 S(T a) : f(a + g) {} 17 S() : f(g) {} 18 operator T() { return T(); } 19 S &operator&(const S &) { return *this; } 20 ~S() {} 21 }; 22 23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float } 24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 25 // CHECK-DAG: [[CAP_MAIN_TY:%.+]] = type { float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i{{[0-9]+}}]*, [2 x [[S_FLOAT_TY]]]* } 26 // CHECK-DAG: [[CAP_TMAIN_TY:%.+]] = type { i{{[0-9]+}}*, [[S_INT_TY]]*, [[S_INT_TY]]*, i{{[0-9]+}}*, [2 x i{{[0-9]+}}]*, [2 x [[S_INT_TY]]]* } 27 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 28 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 29 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 30 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer 31 32 template <typename T> 33 T tmain() { 34 T t; 35 S<T> test; 36 T t_var = T(), t_var1; 37 T vec[] = {1, 2}; 38 S<T> s_arr[] = {1, 2}; 39 S<T> var(3), var1; 40 #pragma omp parallel 41 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait 42 for (int i = 0; i < 2; ++i) { 43 vec[i] = t_var; 44 s_arr[i] = var; 45 } 46 #pragma omp parallel 47 #pragma omp for reduction(&& : t_var) 48 for (int i = 0; i < 2; ++i) { 49 vec[i] = t_var; 50 s_arr[i] = var; 51 } 52 return T(); 53 } 54 55 int main() { 56 #ifdef LAMBDA 57 // LAMBDA: [[G:@.+]] = global double 58 // LAMBDA-LABEL: @main 59 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 60 [&]() { 61 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 62 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}}) 63 #pragma omp parallel 64 #pragma omp for reduction(+:g) 65 for (int i = 0; i < 2; ++i) { 66 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* %{{.+}}) 67 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double, 68 69 // Reduction list for runtime. 70 // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*], 71 72 // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 73 // LAMBDA: call void @__kmpc_for_static_init_4( 74 g = 1; 75 // LAMBDA: store volatile double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 76 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 77 // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]] 78 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 79 // LAMBDA: call void @__kmpc_for_static_fini( 80 81 // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0 82 // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 83 // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 84 // LAMBDA: call i32 @__kmpc_reduce( 85 // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 86 // LAMBDA: i32 1, label %[[CASE1:.+]] 87 // LAMBDA: i32 2, label %[[CASE2:.+]] 88 // LAMBDA: [[CASE1]] 89 // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]] 90 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 91 // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 92 // LAMBDA: store double [[ADD]], double* [[G]] 93 // LAMBDA: call void @__kmpc_end_reduce( 94 // LAMBDA: br label %[[REDUCTION_DONE]] 95 // LAMBDA: [[CASE2]] 96 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 97 // LAMBDA: fadd double 98 // LAMBDA: cmpxchg i64* 99 // LAMBDA: call void @__kmpc_end_reduce( 100 // LAMBDA: br label %[[REDUCTION_DONE]] 101 // LAMBDA: [[REDUCTION_DONE]] 102 // LAMBDA: ret void 103 [&]() { 104 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 105 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 106 g = 2; 107 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 108 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 109 // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] 110 // LAMBDA: store volatile double 2.0{{.+}}, double* [[G_REF]] 111 }(); 112 } 113 }(); 114 return 0; 115 #elif defined(BLOCKS) 116 // BLOCKS: [[G:@.+]] = global double 117 // BLOCKS-LABEL: @main 118 // BLOCKS: call void {{%.+}}(i8 119 ^{ 120 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 121 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i8* %{{.+}}) 122 #pragma omp parallel 123 #pragma omp for reduction(-:g) 124 for (int i = 0; i < 2; ++i) { 125 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* %{{.+}}, i32* %{{.+}}, %{{.+}}* %{{.+}}) 126 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double, 127 128 // Reduction list for runtime. 129 // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*], 130 131 // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 132 g = 1; 133 // BLOCKS: call void @__kmpc_for_static_init_4( 134 // BLOCKS: store volatile double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 135 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 136 // BLOCKS: double* [[G_PRIVATE_ADDR]] 137 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 138 // BLOCKS: call void {{%.+}}(i8 139 // BLOCKS: call void @__kmpc_for_static_fini( 140 141 // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i32 0, i32 0 142 // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 143 // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 144 // BLOCKS: call i32 @__kmpc_reduce( 145 // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 146 // BLOCKS: i32 1, label %[[CASE1:.+]] 147 // BLOCKS: i32 2, label %[[CASE2:.+]] 148 // BLOCKS: [[CASE1]] 149 // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]] 150 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 151 // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 152 // BLOCKS: store double [[ADD]], double* [[G]] 153 // BLOCKS: call void @__kmpc_end_reduce( 154 // BLOCKS: br label %[[REDUCTION_DONE]] 155 // BLOCKS: [[CASE2]] 156 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 157 // BLOCKS: fadd double 158 // BLOCKS: cmpxchg i64* 159 // BLOCKS: call void @__kmpc_end_reduce( 160 // BLOCKS: br label %[[REDUCTION_DONE]] 161 // BLOCKS: [[REDUCTION_DONE]] 162 // BLOCKS: ret void 163 ^{ 164 // BLOCKS: define {{.+}} void {{@.+}}(i8* 165 g = 2; 166 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 167 // BLOCKS: store volatile double 2.0{{.+}}, double* 168 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 169 // BLOCKS: ret 170 }(); 171 } 172 }(); 173 return 0; 174 #else 175 S<float> test; 176 float t_var = 0, t_var1; 177 int vec[] = {1, 2}; 178 S<float> s_arr[] = {1, 2}; 179 S<float> var(3), var1; 180 #pragma omp parallel 181 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) 182 for (int i = 0; i < 2; ++i) { 183 vec[i] = t_var; 184 s_arr[i] = var; 185 } 186 return tmain<int>(); 187 #endif 188 } 189 190 // CHECK: define {{.*}}i{{[0-9]+}} @main() 191 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 192 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 193 // CHECK: %{{.+}} = bitcast [[CAP_MAIN_TY]]* 194 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_MAIN_TY]]*)* [[MAIN_MICROTASK:@.+]] to void 195 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]() 196 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 197 // CHECK: ret 198 // 199 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_MAIN_TY]]* %{{.+}}) 200 // CHECK: [[T_VAR_PRIV:%.+]] = alloca float, 201 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 202 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 203 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float, 204 205 // Reduction list for runtime. 206 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 207 208 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 209 210 // CHECK: [[T_VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 211 // CHECK: [[T_VAR_REF:%.+]] = load float*, float** [[T_VAR_PTR_REF]], 212 // For + reduction operation initial value of private variable is 0. 213 // CHECK: store float 0.0{{.+}}, float* [[T_VAR_PRIV]], 214 215 // CHECK: [[VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 216 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_PTR_REF:%.+]], 217 // For & reduction operation initial value of private variable is ones in all bits. 218 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 219 220 // CHECK: [[VAR1_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 221 // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_PTR_REF:%.+]], 222 // For && reduction operation initial value of private variable is 1.0. 223 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 224 225 // CHECK: [[T_VAR1_PTR_REF:%.+]] = getelementptr inbounds [[CAP_MAIN_TY]], [[CAP_MAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 226 // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** [[T_VAR1_PTR_REF]], 227 // For min reduction operation initial value of private variable is largest repesentable value. 228 // CHECK: store float 0x47EFFFFFE0000000, float* [[T_VAR1_PRIV]], 229 230 231 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 232 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 233 // CHECK: call void @__kmpc_for_static_init_4( 234 // Skip checks for internal operations. 235 // CHECK: call void @__kmpc_for_static_fini( 236 237 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 238 239 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0 240 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8* 241 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 242 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1 243 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8* 244 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 245 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2 246 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8* 247 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 248 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3 249 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8* 250 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 251 252 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 253 254 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 255 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 256 257 // switch(res) 258 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 259 // CHECK: i32 1, label %[[CASE1:.+]] 260 // CHECK: i32 2, label %[[CASE2:.+]] 261 // CHECK: ] 262 263 // case 1: 264 // t_var += t_var_reduction; 265 // CHECK: [[T_VAR_VAL:%.+]] = load float, float* [[T_VAR_REF]], 266 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load float, float* [[T_VAR_PRIV]], 267 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 268 // CHECK: store float [[UP]], float* [[T_VAR_REF]], 269 270 // var = var.operator &(var_reduction); 271 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 272 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 273 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 274 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 275 276 // var1 = var1.operator &&(var1_reduction); 277 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 278 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 279 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 280 // CHECK: [[TRUE]] 281 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 282 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 283 // CHECK: br label %[[END2]] 284 // CHECK: [[END2]] 285 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 286 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 287 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 288 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 289 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 290 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 291 292 // t_var1 = min(t_var1, t_var1_reduction); 293 // CHECK: [[T_VAR1_VAL:%.+]] = load float, float* [[T_VAR1_REF]], 294 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load float, float* [[T_VAR1_PRIV]], 295 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 296 // CHECK: br i1 [[CMP]] 297 // CHECK: [[UP:%.+]] = phi float 298 // CHECK: store float [[UP]], float* [[T_VAR1_REF]], 299 300 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 301 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 302 303 // break; 304 // CHECK: br label %[[RED_DONE]] 305 306 // case 2: 307 // t_var += t_var_reduction; 308 // CHECK: load float, float* [[T_VAR_PRIV]] 309 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 310 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR_REF_INT]] monotonic, 311 // CHECK: br label %[[CONT:.+]] 312 // CHECK: [[CONT]] 313 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %[[CONT]] ] 314 // CHECK: fadd float 315 // CHECK: [[UP_INT:%.+]] = load i32, i32* 316 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 317 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 318 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 319 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 320 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 321 // CHECK: [[ATOMIC_DONE]] 322 323 // var = var.operator &(var_reduction); 324 // CHECK: call void @__kmpc_critical( 325 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 326 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 327 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 328 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 329 // CHECK: call void @__kmpc_end_critical( 330 331 // var1 = var1.operator &&(var1_reduction); 332 // CHECK: call void @__kmpc_critical( 333 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 334 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 335 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 336 // CHECK: [[TRUE]] 337 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 338 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 339 // CHECK: br label %[[END2]] 340 // CHECK: [[END2]] 341 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 342 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 343 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 344 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 345 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 346 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 347 // CHECK: call void @__kmpc_end_critical( 348 349 // t_var1 = min(t_var1, t_var1_reduction); 350 // CHECK: load float, float* [[T_VAR1_PRIV]] 351 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 352 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR1_REF_INT]] monotonic, 353 // CHECK: br label %[[CONT:.+]] 354 // CHECK: [[CONT]] 355 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %{{.+}} ] 356 // CHECK: [[CMP:%.+]] = fcmp olt float 357 // CHECK: br i1 [[CMP]] 358 // CHECK: phi float 359 // CHECK: [[UP_INT:%.+]] = load i32 360 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 361 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR1_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 362 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 363 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 364 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 365 // CHECK: [[ATOMIC_DONE]] 366 367 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 368 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 369 370 // break; 371 // CHECK: br label %[[RED_DONE]] 372 // CHECK: [[RED_DONE]] 373 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 374 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 375 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 376 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 377 378 // CHECK: ret void 379 380 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 381 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 382 // ... 383 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 384 // *(Type<n>-1*)rhs[<n>-1]); 385 // } 386 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 387 // t_var_lhs = (float*)lhs[0]; 388 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0 389 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 390 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float* 391 // t_var_rhs = (float*)rhs[0]; 392 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0 393 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 394 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float* 395 396 // var_lhs = (S<float>*)lhs[1]; 397 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1 398 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 399 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]* 400 // var_rhs = (S<float>*)rhs[1]; 401 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1 402 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 403 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]* 404 405 // var1_lhs = (S<float>*)lhs[2]; 406 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2 407 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 408 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]* 409 // var1_rhs = (S<float>*)rhs[2]; 410 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2 411 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 412 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]* 413 414 // t_var1_lhs = (float*)lhs[3]; 415 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3 416 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 417 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float* 418 // t_var1_rhs = (float*)rhs[3]; 419 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3 420 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 421 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float* 422 423 // t_var_lhs += t_var_rhs; 424 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load float, float* [[T_VAR_LHS]], 425 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load float, float* [[T_VAR_RHS]], 426 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 427 // CHECK: store float [[UP]], float* [[T_VAR_LHS]], 428 429 // var_lhs = var_lhs.operator &(var_rhs); 430 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_LHS]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_RHS]]) 431 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_LHS]] to i8* 432 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 433 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 434 435 // var1_lhs = var1_lhs.operator &&(var1_rhs); 436 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_LHS]]) 437 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 438 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 439 // CHECK: [[TRUE]] 440 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_RHS]]) 441 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 442 // CHECK: br label %[[END2]] 443 // CHECK: [[END2]] 444 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 445 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 446 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 447 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_LHS]] to i8* 448 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 449 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 450 451 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 452 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load float, float* [[T_VAR1_LHS]], 453 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load float, float* [[T_VAR1_RHS]], 454 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 455 // CHECK: br i1 [[CMP]] 456 // CHECK: [[UP:%.+]] = phi float 457 // CHECK: store float [[UP]], float* [[T_VAR1_LHS]], 458 // CHECK: ret void 459 460 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 461 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 462 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 463 // CHECK: %{{.+}} = bitcast [[CAP_TMAIN_TY]]* 464 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [[CAP_TMAIN_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void 465 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 466 // CHECK: ret 467 // 468 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* [[GTID_ADDR:%.+]], i{{[0-9]+}}* %{{.+}}, [[CAP_TMAIN_TY]]* %{{.+}}) 469 // CHECK: alloca i{{[0-9]+}}, 470 // CHECK: alloca i{{[0-9]+}}, 471 // CHECK: alloca i{{[0-9]+}}, 472 // CHECK: alloca i{{[0-9]+}}, 473 // CHECK: alloca i{{[0-9]+}}, 474 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 475 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 476 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]], 477 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}}, 478 479 // Reduction list for runtime. 480 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 481 482 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 483 484 // CHECK: [[T_VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 485 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_PTR_REF]], 486 // For + reduction operation initial value of private variable is 0. 487 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]], 488 489 // CHECK: [[VAR_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 490 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PTR_REF:%.+]], 491 // For & reduction operation initial value of private variable is ones in all bits. 492 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]]) 493 494 // CHECK: [[VAR1_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 495 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_PTR_REF:%.+]], 496 // For && reduction operation initial value of private variable is 1.0. 497 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]]) 498 499 // CHECK: [[T_VAR1_PTR_REF:%.+]] = getelementptr inbounds [[CAP_TMAIN_TY]], [[CAP_TMAIN_TY]]* %{{.+}}, i{{[0-9]+}} 0, i{{[0-9]+}} {{[0-9]+}} 500 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR1_PTR_REF]], 501 // For min reduction operation initial value of private variable is largest repesentable value. 502 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]], 503 504 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 505 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 506 // CHECK: call void @__kmpc_for_static_init_4( 507 // Skip checks for internal operations. 508 // CHECK: call void @__kmpc_for_static_fini( 509 510 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 511 512 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 0 513 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8* 514 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 515 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 1 516 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8* 517 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 518 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 2 519 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8* 520 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 521 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i32 0, i32 3 522 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8* 523 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 524 525 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 526 527 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 528 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 529 530 // switch(res) 531 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 532 // CHECK: i32 1, label %[[CASE1:.+]] 533 // CHECK: i32 2, label %[[CASE2:.+]] 534 // CHECK: ] 535 536 // case 1: 537 // t_var += t_var_reduction; 538 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], 539 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 540 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 541 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]], 542 543 // var = var.operator &(var_reduction); 544 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 545 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 546 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 547 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 548 549 // var1 = var1.operator &&(var1_reduction); 550 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 551 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 552 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 553 // CHECK: [[TRUE]] 554 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 555 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 556 // CHECK: br label %[[END2]] 557 // CHECK: [[END2]] 558 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 559 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 560 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 561 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 562 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 563 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 564 565 // t_var1 = min(t_var1, t_var1_reduction); 566 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]], 567 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]], 568 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 569 // CHECK: br i1 [[CMP]] 570 // CHECK: [[UP:%.+]] = phi i32 571 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]], 572 573 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); 574 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 575 576 // break; 577 // CHECK: br label %[[RED_DONE]] 578 579 // case 2: 580 // t_var += t_var_reduction; 581 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]] 582 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic 583 584 // var = var.operator &(var_reduction); 585 // CHECK: call void @__kmpc_critical( 586 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 587 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 588 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 589 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 590 // CHECK: call void @__kmpc_end_critical( 591 592 // var1 = var1.operator &&(var1_reduction); 593 // CHECK: call void @__kmpc_critical( 594 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 595 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 596 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 597 // CHECK: [[TRUE]] 598 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 599 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 600 // CHECK: br label %[[END2]] 601 // CHECK: [[END2]] 602 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 603 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 604 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 605 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 606 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 607 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 608 // CHECK: call void @__kmpc_end_critical( 609 610 // t_var1 = min(t_var1, t_var1_reduction); 611 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]] 612 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic 613 614 // break; 615 // CHECK: br label %[[RED_DONE]] 616 // CHECK: [[RED_DONE]] 617 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 618 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* 619 // CHECK: call i32 @__kmpc_cancel_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 620 // CHECK: ret void 621 622 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 623 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 624 // ... 625 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 626 // *(Type<n>-1*)rhs[<n>-1]); 627 // } 628 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 629 // t_var_lhs = (i{{[0-9]+}}*)lhs[0]; 630 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i32 0, i32 0 631 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 632 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}* 633 // t_var_rhs = (i{{[0-9]+}}*)rhs[0]; 634 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i32 0, i32 0 635 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 636 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}* 637 638 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1]; 639 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 1 640 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 641 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]* 642 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1]; 643 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 1 644 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 645 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]* 646 647 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2]; 648 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 2 649 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 650 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]* 651 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2]; 652 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 2 653 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 654 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]* 655 656 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3]; 657 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i32 0, i32 3 658 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 659 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}* 660 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3]; 661 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i32 0, i32 3 662 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 663 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}* 664 665 // t_var_lhs += t_var_rhs; 666 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]], 667 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]], 668 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 669 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]], 670 671 // var_lhs = var_lhs.operator &(var_rhs); 672 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]]) 673 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8* 674 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 675 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 676 677 // var1_lhs = var1_lhs.operator &&(var1_rhs); 678 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]]) 679 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 680 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 681 // CHECK: [[TRUE]] 682 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]]) 683 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 684 // CHECK: br label %[[END2]] 685 // CHECK: [[END2]] 686 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 687 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 688 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 689 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8* 690 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 691 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 692 693 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 694 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]], 695 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]], 696 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 697 // CHECK: br i1 [[CMP]] 698 // CHECK: [[UP:%.+]] = phi i32 699 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]], 700 // CHECK: ret void 701 702 #endif 703 704