1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // REQUIRES: x86-registered-target 7 // expected-no-diagnostics 8 #ifndef HEADER 9 #define HEADER 10 11 volatile int g __attribute__((aligned(128))) = 1212; 12 13 template <class T> 14 struct S { 15 T f; 16 S(T a) : f(a + g) {} 17 S() : f(g) {} 18 operator T() { return T(); } 19 S &operator&(const S &) { return *this; } 20 ~S() {} 21 }; 22 23 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float } 24 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 25 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 26 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer 27 28 template <typename T> 29 T tmain() { 30 T t; 31 S<T> test; 32 T t_var __attribute__((aligned(128))) = T(), t_var1 __attribute__((aligned(128))); 33 T vec[] = {1, 2}; 34 S<T> s_arr[] = {1, 2}; 35 S<T> var __attribute__((aligned(128))) (3), var1 __attribute__((aligned(128))); 36 #pragma omp parallel reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) 37 { 38 vec[0] = t_var; 39 s_arr[0] = var; 40 } 41 return T(); 42 } 43 44 int main() { 45 #ifdef LAMBDA 46 // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212, 47 // LAMBDA-LABEL: @main 48 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 49 [&]() { 50 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 51 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* [[G]]) 52 #pragma omp parallel reduction(+:g) 53 { 54 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}) 55 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 56 57 // Reduction list for runtime. 58 // LAMBDA: [[RED_LIST:%.+]] = alloca [1 x i8*], 59 60 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR:%.+]] 61 // LAMBDA: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128 62 g = 1; 63 // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128 64 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 65 // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]] 66 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 67 68 // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0 69 // LAMBDA: [[BITCAST:%.+]] = bitcast i32* [[G_PRIVATE_ADDR]] to i8* 70 // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 71 // LAMBDA: call i32 @__kmpc_reduce_nowait( 72 // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 73 // LAMBDA: i32 1, label %[[CASE1:.+]] 74 // LAMBDA: i32 2, label %[[CASE2:.+]] 75 // LAMBDA: [[CASE1]] 76 // LAMBDA: [[G_VAL:%.+]] = load i32, i32* [[G_REF]] 77 // LAMBDA: [[G_PRIV_VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]] 78 // LAMBDA: [[ADD:%.+]] = add nsw i32 [[G_VAL]], [[G_PRIV_VAL]] 79 // LAMBDA: store i32 [[ADD]], i32* [[G_REF]] 80 // LAMBDA: call void @__kmpc_end_reduce_nowait( 81 // LAMBDA: br label %[[REDUCTION_DONE]] 82 // LAMBDA: [[CASE2]] 83 // LAMBDA: [[G_PRIV_VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]] 84 // LAMBDA: atomicrmw add i32* [[G_REF]], i32 [[G_PRIV_VAL]] monotonic 85 // LAMBDA: br label %[[REDUCTION_DONE]] 86 // LAMBDA: [[REDUCTION_DONE]] 87 // LAMBDA: ret void 88 [&]() { 89 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 90 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 91 g = 2; 92 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 93 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 94 // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]] 95 // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]] 96 }(); 97 } 98 }(); 99 return 0; 100 #elif defined(BLOCKS) 101 // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212, 102 // BLOCKS-LABEL: @main 103 // BLOCKS: call void {{%.+}}(i8 104 ^{ 105 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 106 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}, i32* [[G]]) 107 #pragma omp parallel reduction(-:g) 108 { 109 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}) 110 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}, 111 112 // Reduction list for runtime. 113 // BLOCKS: [[RED_LIST:%.+]] = alloca [1 x i8*], 114 115 // BLOCKS: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_REF_ADDR:%.+]] 116 // BLOCKS: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128 117 g = 1; 118 // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]], align 128 119 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 120 // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]] 121 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 122 // BLOCKS: call void {{%.+}}(i8 123 124 // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[RED_LIST]], i64 0, i64 0 125 // BLOCKS: [[BITCAST:%.+]] = bitcast i32* [[G_PRIVATE_ADDR]] to i8* 126 // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 127 // BLOCKS: call i32 @__kmpc_reduce_nowait( 128 // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 129 // BLOCKS: i32 1, label %[[CASE1:.+]] 130 // BLOCKS: i32 2, label %[[CASE2:.+]] 131 // BLOCKS: [[CASE1]] 132 // BLOCKS: [[G_VAL:%.+]] = load i32, i32* [[G_REF]] 133 // BLOCKS: [[G_PRIV_VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]] 134 // BLOCKS: [[ADD:%.+]] = add nsw i32 [[G_VAL]], [[G_PRIV_VAL]] 135 // BLOCKS: store i32 [[ADD]], i32* [[G_REF]] 136 // BLOCKS: call void @__kmpc_end_reduce_nowait( 137 // BLOCKS: br label %[[REDUCTION_DONE]] 138 // BLOCKS: [[CASE2]] 139 // BLOCKS: [[G_PRIV_VAL:%.+]] = load i32, i32* [[G_PRIVATE_ADDR]] 140 // BLOCKS: atomicrmw add i32* [[G_REF]], i32 [[G_PRIV_VAL]] monotonic 141 // BLOCKS: br label %[[REDUCTION_DONE]] 142 // BLOCKS: [[REDUCTION_DONE]] 143 // BLOCKS: ret void 144 ^{ 145 // BLOCKS: define {{.+}} void {{@.+}}(i8* 146 g = 2; 147 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 148 // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}* 149 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 150 // BLOCKS: ret 151 }(); 152 } 153 }(); 154 return 0; 155 #else 156 S<float> test; 157 float t_var = 0, t_var1; 158 int vec[] = {1, 2}; 159 S<float> s_arr[] = {1, 2}; 160 S<float> var(3), var1; 161 float _Complex cf; 162 #pragma omp parallel reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) 163 { 164 vec[0] = t_var; 165 s_arr[0] = var; 166 } 167 if (var1) 168 #pragma omp parallel reduction(+ : t_var) reduction(& : var) reduction(&& : var1) reduction(min : t_var1) 169 while (1) { 170 vec[0] = t_var; 171 s_arr[0] = var; 172 } 173 #pragma omp parallel reduction(+ : cf) 174 ; 175 return tmain<int>(); 176 #endif 177 } 178 179 // CHECK: define {{.*}}i{{[0-9]+}} @main() 180 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 181 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 182 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x i32]*, float*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*)* [[MAIN_MICROTASK:@.+]] to void 183 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x i32]*, float*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*)* [[MAIN_MICROTASK1:@.+]] to void 184 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 1, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, { float, float }*)* [[MAIN_MICROTASK2:@.+]] to void 185 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]() 186 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 187 // CHECK: ret 188 // 189 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 190 // CHECK: [[T_VAR_PRIV:%.+]] = alloca float, 191 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 192 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 193 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float, 194 195 // Reduction list for runtime. 196 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 197 198 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 199 200 // CHECK: [[T_VAR_REF:%.+]] = load float*, float** % 201 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 202 // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 203 // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** % 204 205 // For + reduction operation initial value of private variable is 0. 206 // CHECK: store float 0.0{{.+}}, float* [[T_VAR_PRIV]], 207 208 // For & reduction operation initial value of private variable is ones in all bits. 209 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 210 211 // For && reduction operation initial value of private variable is 1.0. 212 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 213 214 // For min reduction operation initial value of private variable is largest repesentable value. 215 // CHECK: store float 0x47EFFFFFE0000000, float* [[T_VAR1_PRIV]], 216 217 // Skip checks for internal operations. 218 219 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 220 221 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 222 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8* 223 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 224 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 225 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8* 226 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 227 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 228 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8* 229 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 230 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 231 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8* 232 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 233 234 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 235 236 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 237 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 238 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 239 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 240 241 // switch(res) 242 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 243 // CHECK: i32 1, label %[[CASE1:.+]] 244 // CHECK: i32 2, label %[[CASE2:.+]] 245 // CHECK: ] 246 247 // case 1: 248 // t_var += t_var_reduction; 249 // CHECK: [[T_VAR_VAL:%.+]] = load float, float* [[T_VAR_REF]], 250 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load float, float* [[T_VAR_PRIV]], 251 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 252 // CHECK: store float [[UP]], float* [[T_VAR_REF]], 253 254 // var = var.operator &(var_reduction); 255 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 256 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 257 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 258 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 259 260 // var1 = var1.operator &&(var1_reduction); 261 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 262 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 263 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 264 // CHECK: [[TRUE]] 265 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 266 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 267 // CHECK: br label %[[END2]] 268 // CHECK: [[END2]] 269 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 270 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 271 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 272 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 273 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 274 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 275 276 // t_var1 = min(t_var1, t_var1_reduction); 277 // CHECK: [[T_VAR1_VAL:%.+]] = load float, float* [[T_VAR1_REF]], 278 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load float, float* [[T_VAR1_PRIV]], 279 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 280 // CHECK: br i1 [[CMP]] 281 // CHECK: [[UP:%.+]] = phi float 282 // CHECK: store float [[UP]], float* [[T_VAR1_REF]], 283 284 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); 285 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 286 287 // break; 288 // CHECK: br label %[[RED_DONE]] 289 290 // case 2: 291 // t_var += t_var_reduction; 292 // CHECK: load float, float* [[T_VAR_PRIV]] 293 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 294 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR_REF_INT]] monotonic, 295 // CHECK: br label %[[CONT:.+]] 296 // CHECK: [[CONT]] 297 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %[[CONT]] ] 298 // CHECK: fadd float 299 // CHECK: [[UP_INT:%.+]] = load i32 300 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 301 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 302 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 303 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 304 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 305 // CHECK: [[ATOMIC_DONE]] 306 307 // var = var.operator &(var_reduction); 308 // CHECK: call void @__kmpc_critical( 309 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 310 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 311 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 312 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 313 // CHECK: call void @__kmpc_end_critical( 314 315 // var1 = var1.operator &&(var1_reduction); 316 // CHECK: call void @__kmpc_critical( 317 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 318 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 319 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 320 // CHECK: [[TRUE]] 321 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 322 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 323 // CHECK: br label %[[END2]] 324 // CHECK: [[END2]] 325 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 326 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 327 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 328 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 329 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 330 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 331 // CHECK: call void @__kmpc_end_critical( 332 333 // t_var1 = min(t_var1, t_var1_reduction); 334 // CHECK: load float, float* [[T_VAR1_PRIV]] 335 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 336 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR1_REF_INT]] monotonic, 337 // CHECK: br label %[[CONT:.+]] 338 // CHECK: [[CONT]] 339 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %{{.+}} ] 340 // CHECK: [[CMP:%.+]] = fcmp olt float 341 // CHECK: br i1 [[CMP]] 342 // CHECK: [[UP:%.+]] = phi float 343 // CHECK: [[UP_INT:%.+]] = load i32 344 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 345 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR1_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 346 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 347 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 348 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 349 // CHECK: [[ATOMIC_DONE]] 350 351 // break; 352 // CHECK: br label %[[RED_DONE]] 353 // CHECK: [[RED_DONE]] 354 355 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 356 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 357 // CHECK: ret void 358 359 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 360 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 361 // ... 362 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 363 // *(Type<n>-1*)rhs[<n>-1]); 364 // } 365 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 366 // t_var_lhs = (float*)lhs[0]; 367 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 368 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 369 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float* 370 // t_var_rhs = (float*)rhs[0]; 371 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 372 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 373 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float* 374 375 // var_lhs = (S<float>*)lhs[1]; 376 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 377 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 378 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]* 379 // var_rhs = (S<float>*)rhs[1]; 380 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 381 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 382 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]* 383 384 // var1_lhs = (S<float>*)lhs[2]; 385 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 386 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 387 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]* 388 // var1_rhs = (S<float>*)rhs[2]; 389 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 390 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 391 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]* 392 393 // t_var1_lhs = (float*)lhs[3]; 394 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 395 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 396 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float* 397 // t_var1_rhs = (float*)rhs[3]; 398 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 399 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 400 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float* 401 402 // t_var_lhs += t_var_rhs; 403 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load float, float* [[T_VAR_LHS]], 404 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load float, float* [[T_VAR_RHS]], 405 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 406 // CHECK: store float [[UP]], float* [[T_VAR_LHS]], 407 408 // var_lhs = var_lhs.operator &(var_rhs); 409 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_LHS]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_RHS]]) 410 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_LHS]] to i8* 411 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 412 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 413 414 // var1_lhs = var1_lhs.operator &&(var1_rhs); 415 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_LHS]]) 416 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 417 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 418 // CHECK: [[TRUE]] 419 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_RHS]]) 420 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 421 // CHECK: br label %[[END2]] 422 // CHECK: [[END2]] 423 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 424 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 425 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 426 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_LHS]] to i8* 427 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 428 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 429 430 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 431 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load float, float* [[T_VAR1_LHS]], 432 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load float, float* [[T_VAR1_RHS]], 433 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 434 // CHECK: br i1 [[CMP]] 435 // CHECK: [[UP:%.+]] = phi float 436 // CHECK: store float [[UP]], float* [[T_VAR1_LHS]], 437 // CHECK: ret void 438 439 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 440 // CHECK: [[T_VAR_PRIV:%.+]] = alloca float, 441 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 442 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 443 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float, 444 445 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 446 447 // CHECK: [[T_VAR_REF:%.+]] = load float*, float** % 448 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 449 // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 450 // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** % 451 452 // For + reduction operation initial value of private variable is 0. 453 // CHECK: store float 0.0{{.+}}, float* [[T_VAR_PRIV]], 454 455 // For & reduction operation initial value of private variable is ones in all bits. 456 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 457 458 // For && reduction operation initial value of private variable is 1.0. 459 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 460 461 // For min reduction operation initial value of private variable is largest repesentable value. 462 // CHECK: store float 0x47EFFFFFE0000000, float* [[T_VAR1_PRIV]], 463 464 // CHECK-NOT: call i32 @__kmpc_reduce 465 466 // CHECK: ret void 467 468 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 469 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 470 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 471 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, [2 x i32]*, i32*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*)* [[TMAIN_MICROTASK:@.+]] to void 472 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 473 // CHECK: ret 474 // 475 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, 476 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128 477 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], align 128 478 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]], align 128 479 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}}, align 128 480 481 // Reduction list for runtime. 482 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 483 484 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 485 486 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 487 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 488 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 489 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 490 491 // For + reduction operation initial value of private variable is 0. 492 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]], 493 494 // For & reduction operation initial value of private variable is ones in all bits. 495 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]]) 496 497 // For && reduction operation initial value of private variable is 1.0. 498 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]]) 499 500 // For min reduction operation initial value of private variable is largest repesentable value. 501 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]], 502 503 // Skip checks for internal operations. 504 505 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 506 507 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 508 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8* 509 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 510 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 511 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8* 512 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 513 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 514 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8* 515 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 516 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 517 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8* 518 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 519 520 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 521 522 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 523 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 524 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 525 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 526 527 // switch(res) 528 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 529 // CHECK: i32 1, label %[[CASE1:.+]] 530 // CHECK: i32 2, label %[[CASE2:.+]] 531 // CHECK: ] 532 533 // case 1: 534 // t_var += t_var_reduction; 535 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], 536 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 537 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 538 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]], 539 540 // var = var.operator &(var_reduction); 541 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 542 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 543 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 544 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 545 546 // var1 = var1.operator &&(var1_reduction); 547 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 548 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 549 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 550 // CHECK: [[TRUE]] 551 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 552 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 553 // CHECK: [[END2]] 554 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 555 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 556 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 557 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 558 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 559 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 560 561 // t_var1 = min(t_var1, t_var1_reduction); 562 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]], 563 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]], 564 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 565 // CHECK: br i1 [[CMP]] 566 // CHECK: [[UP:%.+]] = phi i32 567 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]], 568 569 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); 570 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 571 572 // break; 573 // CHECK: br label %[[RED_DONE]] 574 575 // case 2: 576 // t_var += t_var_reduction; 577 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]] 578 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic 579 580 // var = var.operator &(var_reduction); 581 // CHECK: call void @__kmpc_critical( 582 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 583 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 584 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 585 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 586 // CHECK: call void @__kmpc_end_critical( 587 588 // var1 = var1.operator &&(var1_reduction); 589 // CHECK: call void @__kmpc_critical( 590 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 591 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 592 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 593 // CHECK: [[TRUE]] 594 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 595 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 596 // CHECK: br label %[[END2]] 597 // CHECK: [[END2]] 598 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 599 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 600 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 601 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 602 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 603 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 604 // CHECK: call void @__kmpc_end_critical( 605 606 // t_var1 = min(t_var1, t_var1_reduction); 607 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]] 608 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic 609 610 // break; 611 // CHECK: br label %[[RED_DONE]] 612 // CHECK: [[RED_DONE]] 613 614 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 615 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* 616 // CHECK: ret void 617 618 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 619 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 620 // ... 621 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 622 // *(Type<n>-1*)rhs[<n>-1]); 623 // } 624 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 625 // t_var_lhs = (i{{[0-9]+}}*)lhs[0]; 626 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 627 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 628 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}* 629 // t_var_rhs = (i{{[0-9]+}}*)rhs[0]; 630 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 631 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 632 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}* 633 634 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1]; 635 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 636 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 637 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]* 638 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1]; 639 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 640 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 641 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]* 642 643 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2]; 644 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 645 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 646 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]* 647 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2]; 648 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 649 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 650 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]* 651 652 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3]; 653 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 654 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 655 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}* 656 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3]; 657 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 658 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 659 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}* 660 661 // t_var_lhs += t_var_rhs; 662 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]], 663 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]], 664 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 665 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]], 666 667 // var_lhs = var_lhs.operator &(var_rhs); 668 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]]) 669 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8* 670 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 671 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 672 673 // var1_lhs = var1_lhs.operator &&(var1_rhs); 674 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]]) 675 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 676 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 677 // CHECK: [[TRUE]] 678 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]]) 679 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 680 // CHECK: br label %[[END2]] 681 // CHECK: [[END2]] 682 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 683 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 684 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 685 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8* 686 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 687 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 688 689 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 690 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]], 691 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]], 692 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 693 // CHECK: br i1 [[CMP]] 694 // CHECK: [[UP:%.+]] = phi i32 695 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]], 696 // CHECK: ret void 697 698 #endif 699 700