1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s 6 // expected-no-diagnostics 7 // REQUIRES: x86-registered-target 8 #ifndef HEADER 9 #define HEADER 10 11 volatile double g, g_orig; 12 volatile double &g1 = g_orig; 13 14 template <class T> 15 struct S { 16 T f; 17 S(T a) : f(a + g) {} 18 S() : f(g) {} 19 operator T() { return T(); } 20 S &operator&(const S &) { return *this; } 21 ~S() {} 22 }; 23 24 // CHECK-DAG: [[S_FLOAT_TY:%.+]] = type { float } 25 // CHECK-DAG: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 26 // CHECK-DAG: [[ATOMIC_REDUCE_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 27 // CHECK-DAG: [[IMPLICIT_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 66, i32 0, i32 0, i8* 28 // CHECK-DAG: [[REDUCTION_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 18, i32 0, i32 0, i8* 29 // CHECK-DAG: [[REDUCTION_LOCK:@.+]] = common global [8 x i32] zeroinitializer 30 31 template <typename T> 32 T tmain() { 33 T t; 34 S<T> test; 35 T t_var = T(), t_var1; 36 T vec[] = {1, 2}; 37 S<T> s_arr[] = {1, 2}; 38 S<T> &var = test; 39 S<T> var1; 40 #pragma omp parallel 41 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) nowait 42 for (int i = 0; i < 2; ++i) { 43 vec[i] = t_var; 44 s_arr[i] = var; 45 } 46 #pragma omp parallel 47 #pragma omp for reduction(&& : t_var) 48 for (int i = 0; i < 2; ++i) { 49 vec[i] = t_var; 50 s_arr[i] = var; 51 } 52 return T(); 53 } 54 55 int main() { 56 #ifdef LAMBDA 57 // LAMBDA: [[G:@.+]] = global double 58 // LAMBDA-LABEL: @main 59 // LAMBDA: call void [[OUTER_LAMBDA:@.+]]( 60 [&]() { 61 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 62 // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 63 #pragma omp parallel 64 #pragma omp for reduction(+:g, g1) 65 for (int i = 0; i < 2; ++i) { 66 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 67 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double, 68 69 // Reduction list for runtime. 70 // LAMBDA: [[RED_LIST:%.+]] = alloca [2 x i8*], 71 72 // LAMBDA: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 73 // LAMBDA: call void @__kmpc_for_static_init_4( 74 g = 1; 75 g1 = 1; 76 // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 77 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 78 // LAMBDA: store double* [[G_PRIVATE_ADDR]], double** [[G_PRIVATE_ADDR_REF]] 79 // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 80 // LAMBDA: call void @__kmpc_for_static_fini( 81 82 // LAMBDA: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0 83 // LAMBDA: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 84 // LAMBDA: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 85 // LAMBDA: call i32 @__kmpc_reduce( 86 // LAMBDA: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 87 // LAMBDA: i32 1, label %[[CASE1:.+]] 88 // LAMBDA: i32 2, label %[[CASE2:.+]] 89 // LAMBDA: [[CASE1]] 90 // LAMBDA: [[G_VAL:%.+]] = load double, double* [[G]] 91 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 92 // LAMBDA: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 93 // LAMBDA: store double [[ADD]], double* [[G]] 94 // LAMBDA: call void @__kmpc_end_reduce( 95 // LAMBDA: br label %[[REDUCTION_DONE]] 96 // LAMBDA: [[CASE2]] 97 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 98 // LAMBDA: fadd double 99 // LAMBDA: cmpxchg i64* 100 // LAMBDA: call void @__kmpc_end_reduce( 101 // LAMBDA: br label %[[REDUCTION_DONE]] 102 // LAMBDA: [[REDUCTION_DONE]] 103 // LAMBDA: ret void 104 [&]() { 105 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 106 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 107 g = 2; 108 g1 = 2; 109 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 110 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 111 // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] 112 // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] 113 }(); 114 } 115 }(); 116 return 0; 117 #elif defined(BLOCKS) 118 // BLOCKS: [[G:@.+]] = global double 119 // BLOCKS-LABEL: @main 120 // BLOCKS: call void {{%.+}}(i8 121 ^{ 122 // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8* 123 // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 0, {{.+}}* [[OMP_REGION:@.+]] to {{.+}}) 124 #pragma omp parallel 125 #pragma omp for reduction(-:g, g1) 126 for (int i = 0; i < 2; ++i) { 127 // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias %{{.+}}, i32* noalias %{{.+}}) 128 // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca double, 129 130 // Reduction list for runtime. 131 // BLOCKS: [[RED_LIST:%.+]] = alloca [2 x i8*], 132 133 // BLOCKS: store double 0.0{{.+}}, double* [[G_PRIVATE_ADDR]] 134 g = 1; 135 g1 = 1; 136 // BLOCKS: call void @__kmpc_for_static_init_4( 137 // BLOCKS: store double 1.0{{.+}}, double* [[G_PRIVATE_ADDR]], 138 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 139 // BLOCKS: double* [[G_PRIVATE_ADDR]] 140 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 141 // BLOCKS: call void {{%.+}}(i8 142 // BLOCKS: call void @__kmpc_for_static_fini( 143 144 // BLOCKS: [[G_PRIV_REF:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[RED_LIST]], i64 0, i64 0 145 // BLOCKS: [[BITCAST:%.+]] = bitcast double* [[G_PRIVATE_ADDR]] to i8* 146 // BLOCKS: store i8* [[BITCAST]], i8** [[G_PRIV_REF]], 147 // BLOCKS: call i32 @__kmpc_reduce( 148 // BLOCKS: switch i32 %{{.+}}, label %[[REDUCTION_DONE:.+]] [ 149 // BLOCKS: i32 1, label %[[CASE1:.+]] 150 // BLOCKS: i32 2, label %[[CASE2:.+]] 151 // BLOCKS: [[CASE1]] 152 // BLOCKS: [[G_VAL:%.+]] = load double, double* [[G]] 153 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 154 // BLOCKS: [[ADD:%.+]] = fadd double [[G_VAL]], [[G_PRIV_VAL]] 155 // BLOCKS: store double [[ADD]], double* [[G]] 156 // BLOCKS: call void @__kmpc_end_reduce( 157 // BLOCKS: br label %[[REDUCTION_DONE]] 158 // BLOCKS: [[CASE2]] 159 // BLOCKS: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE_ADDR]] 160 // BLOCKS: fadd double 161 // BLOCKS: cmpxchg i64* 162 // BLOCKS: call void @__kmpc_end_reduce( 163 // BLOCKS: br label %[[REDUCTION_DONE]] 164 // BLOCKS: [[REDUCTION_DONE]] 165 // BLOCKS: ret void 166 ^{ 167 // BLOCKS: define {{.+}} void {{@.+}}(i8* 168 g = 2; 169 g1 = 2; 170 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 171 // BLOCKS: store double 2.0{{.+}}, double* 172 // BLOCKS-NOT: [[G]]{{[[^:word:]]}} 173 // BLOCKS: ret 174 }(); 175 } 176 }(); 177 return 0; 178 #else 179 S<float> test; 180 float t_var = 0, t_var1; 181 int vec[] = {1, 2}; 182 S<float> s_arr[] = {1, 2}; 183 S<float> &var = test; 184 S<float> var1, arrs[10][4]; 185 #pragma omp parallel 186 #pragma omp for reduction(+:t_var) reduction(&:var) reduction(&& : var1) reduction(min: t_var1) 187 for (int i = 0; i < 2; ++i) { 188 vec[i] = t_var; 189 s_arr[i] = var; 190 } 191 int arr[10][vec[1]]; 192 #pragma omp parallel for reduction(+:arr[1][:vec[1]]) reduction(&:arrs[1:vec[1]][1:2]) 193 for (int i = 0; i < 10; ++i) 194 ++arr[1][i]; 195 #pragma omp parallel 196 #pragma omp for reduction(+:arr) reduction(&:arrs) 197 for (int i = 0; i < 10; ++i) 198 ++arr[1][i]; 199 return tmain<int>(); 200 #endif 201 } 202 203 // CHECK: define {{.*}}i{{[0-9]+}} @main() 204 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 205 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 206 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, float*, [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]*, float*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*)* [[MAIN_MICROTASK:@.+]] to void 207 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [2 x i32]*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK1:@.+]] to void 208 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i64, i64, i32*, [10 x [4 x [[S_FLOAT_TY]]]]*)* [[MAIN_MICROTASK2:@.+]] to void 209 // CHECK: = call {{.*}}i{{.+}} [[TMAIN_INT:@.+]]() 210 // CHECK: call {{.*}} [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]* 211 // CHECK: ret 212 // 213 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, float* dereferenceable(4) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}, float* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %vec, [2 x [[S_FLOAT_TY]]]* dereferenceable(8) %{{.+}}) 214 // CHECK: [[T_VAR_PRIV:%.+]] = alloca float, 215 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 216 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 217 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca float, 218 219 // Reduction list for runtime. 220 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 221 222 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 223 224 // CHECK: [[T_VAR_REF:%.+]] = load float*, float** % 225 // CHECK: [[VAR1_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 226 // CHECK: [[T_VAR1_REF:%.+]] = load float*, float** % 227 228 // For + reduction operation initial value of private variable is 0. 229 // CHECK: store float 0.0{{.+}}, float* [[T_VAR_PRIV]], 230 231 // For & reduction operation initial value of private variable is ones in all bits. 232 // CHECK: [[VAR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** % 233 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 234 235 // For && reduction operation initial value of private variable is 1.0. 236 // CHECK: call {{.*}} [[S_FLOAT_TY_CONSTR:@.+]]([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 237 238 // For min reduction operation initial value of private variable is largest repesentable value. 239 // CHECK: store float 0x47EFFFFFE0000000, float* [[T_VAR1_PRIV]], 240 241 242 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 243 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 244 // CHECK: call void @__kmpc_for_static_init_4( 245 // Skip checks for internal operations. 246 // CHECK: call void @__kmpc_for_static_fini( 247 248 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 249 250 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 251 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR_PRIV]] to i8* 252 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 253 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 254 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_PRIV]] to i8* 255 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 256 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 257 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_PRIV]] to i8* 258 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 259 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 260 // CHECK: [[BITCAST:%.+]] = bitcast float* [[T_VAR1_PRIV]] to i8* 261 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 262 263 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 264 265 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 266 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 267 268 // switch(res) 269 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 270 // CHECK: i32 1, label %[[CASE1:.+]] 271 // CHECK: i32 2, label %[[CASE2:.+]] 272 // CHECK: ] 273 274 // case 1: 275 // t_var += t_var_reduction; 276 // CHECK: [[T_VAR_VAL:%.+]] = load float, float* [[T_VAR_REF]], 277 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load float, float* [[T_VAR_PRIV]], 278 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 279 // CHECK: store float [[UP]], float* [[T_VAR_REF]], 280 281 // var = var.operator &(var_reduction); 282 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 283 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 284 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 285 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 286 287 // var1 = var1.operator &&(var1_reduction); 288 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 289 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 290 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 291 // CHECK: [[TRUE]] 292 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 293 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 294 // CHECK: br label %[[END2]] 295 // CHECK: [[END2]] 296 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 297 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 298 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 299 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 300 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 301 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 302 303 // t_var1 = min(t_var1, t_var1_reduction); 304 // CHECK: [[T_VAR1_VAL:%.+]] = load float, float* [[T_VAR1_REF]], 305 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load float, float* [[T_VAR1_PRIV]], 306 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 307 // CHECK: br i1 [[CMP]] 308 // CHECK: [[UP:%.+]] = phi float 309 // CHECK: store float [[UP]], float* [[T_VAR1_REF]], 310 311 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 312 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 313 314 // break; 315 // CHECK: br label %[[RED_DONE]] 316 317 // case 2: 318 // t_var += t_var_reduction; 319 // CHECK: load float, float* [[T_VAR_PRIV]] 320 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 321 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR_REF_INT]] monotonic, 322 // CHECK: br label %[[CONT:.+]] 323 // CHECK: [[CONT]] 324 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %[[CONT]] ] 325 // CHECK: fadd float 326 // CHECK: [[UP_INT:%.+]] = load i32, i32* 327 // CHECK: [[T_VAR_REF_INT:%.+]] = bitcast float* [[T_VAR_REF]] to i32* 328 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 329 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 330 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 331 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 332 // CHECK: [[ATOMIC_DONE]] 333 334 // var = var.operator &(var_reduction); 335 // CHECK: call void @__kmpc_critical( 336 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_REF]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 337 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_REF]] to i8* 338 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 339 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 340 // CHECK: call void @__kmpc_end_critical( 341 342 // var1 = var1.operator &&(var1_reduction); 343 // CHECK: call void @__kmpc_critical( 344 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_REF]]) 345 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 346 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 347 // CHECK: [[TRUE]] 348 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_PRIV]]) 349 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 350 // CHECK: br label %[[END2]] 351 // CHECK: [[END2]] 352 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 353 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 354 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 355 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_REF]] to i8* 356 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 357 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 358 // CHECK: call void @__kmpc_end_critical( 359 360 // t_var1 = min(t_var1, t_var1_reduction); 361 // CHECK: load float, float* [[T_VAR1_PRIV]] 362 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 363 // CHECK: [[OLD1:%.+]] = load atomic i32, i32* [[T_VAR1_REF_INT]] monotonic, 364 // CHECK: br label %[[CONT:.+]] 365 // CHECK: [[CONT]] 366 // CHECK: [[ORIG_OLD_INT:%.+]] = phi i32 [ [[OLD1]], %{{.+}} ], [ [[OLD2:%.+]], %{{.+}} ] 367 // CHECK: [[CMP:%.+]] = fcmp olt float 368 // CHECK: br i1 [[CMP]] 369 // CHECK: phi float 370 // CHECK: [[UP_INT:%.+]] = load i32 371 // CHECK: [[T_VAR1_REF_INT:%.+]] = bitcast float* [[T_VAR1_REF]] to i32* 372 // CHECK: [[RES:%.+]] = cmpxchg i32* [[T_VAR1_REF_INT]], i32 [[ORIG_OLD_INT]], i32 [[UP_INT]] monotonic monotonic 373 // CHECK: [[OLD2:%.+]] = extractvalue { i32, i1 } [[RES]], 0 374 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1 375 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[ATOMIC_DONE:.+]], label %[[CONT]] 376 // CHECK: [[ATOMIC_DONE]] 377 378 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 379 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 380 381 // break; 382 // CHECK: br label %[[RED_DONE]] 383 // CHECK: [[RED_DONE]] 384 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* [[VAR_PRIV]]) 385 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]* 386 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[IMPLICIT_BARRIER_LOC]], i{{[0-9]+}} [[GTID]]) 387 388 // CHECK: ret void 389 390 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 391 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 392 // ... 393 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 394 // *(Type<n>-1*)rhs[<n>-1]); 395 // } 396 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 397 // t_var_lhs = (float*)lhs[0]; 398 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 399 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 400 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to float* 401 // t_var_rhs = (float*)rhs[0]; 402 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 403 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 404 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to float* 405 406 // var_lhs = (S<float>*)lhs[1]; 407 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 408 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 409 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_FLOAT_TY]]* 410 // var_rhs = (S<float>*)rhs[1]; 411 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 412 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 413 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_FLOAT_TY]]* 414 415 // var1_lhs = (S<float>*)lhs[2]; 416 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 417 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 418 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_FLOAT_TY]]* 419 // var1_rhs = (S<float>*)rhs[2]; 420 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 421 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 422 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_FLOAT_TY]]* 423 424 // t_var1_lhs = (float*)lhs[3]; 425 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 426 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 427 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to float* 428 // t_var1_rhs = (float*)rhs[3]; 429 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 430 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 431 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to float* 432 433 // t_var_lhs += t_var_rhs; 434 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load float, float* [[T_VAR_LHS]], 435 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load float, float* [[T_VAR_RHS]], 436 // CHECK: [[UP:%.+]] = fadd float [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 437 // CHECK: store float [[UP]], float* [[T_VAR_LHS]], 438 439 // var_lhs = var_lhs.operator &(var_rhs); 440 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @{{.+}}([[S_FLOAT_TY]]* [[VAR_LHS]], [[S_FLOAT_TY]]* dereferenceable(4) [[VAR_RHS]]) 441 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR_LHS]] to i8* 442 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[UP]] to i8* 443 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 444 445 // var1_lhs = var1_lhs.operator &&(var1_rhs); 446 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_LHS]]) 447 // CHECK: [[VAR1_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 448 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 449 // CHECK: [[TRUE]] 450 // CHECK: [[TO_FLOAT:%.+]] = call float @{{.+}}([[S_FLOAT_TY]]* [[VAR1_RHS]]) 451 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = fcmp une float [[TO_FLOAT]], 0.0 452 // CHECK: br label %[[END2]] 453 // CHECK: [[END2]] 454 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 455 // CHECK: [[CONV:%.+]] = uitofp i1 [[COND_LVALUE]] to float 456 // CHECK: call void @{{.+}}([[S_FLOAT_TY]]* [[COND_LVALUE:%.+]], float [[CONV]]) 457 // CHECK: [[BC1:%.+]] = bitcast [[S_FLOAT_TY]]* [[VAR1_LHS]] to i8* 458 // CHECK: [[BC2:%.+]] = bitcast [[S_FLOAT_TY]]* [[COND_LVALUE]] to i8* 459 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 460 461 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 462 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load float, float* [[T_VAR1_LHS]], 463 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load float, float* [[T_VAR1_RHS]], 464 // CHECK: [[CMP:%.+]] = fcmp olt float [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 465 // CHECK: br i1 [[CMP]] 466 // CHECK: [[UP:%.+]] = phi float 467 // CHECK: store float [[UP]], float* [[T_VAR1_LHS]], 468 // CHECK: ret void 469 470 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* nonnull %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* dereferenceable(160) %{{.+}}) 471 472 // Reduction list for runtime. 473 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 474 475 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 476 477 // CHECK: [[IDX1:%.+]] = mul nsw i64 1, %{{.+}} 478 // CHECK: [[LB1:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 479 // CHECK: [[LB1_0:%.+]] = getelementptr inbounds i32, i32* [[LB1]], i64 0 480 // CHECK: [[IDX1:%.+]] = mul nsw i64 1, %{{.+}} 481 // CHECK: [[UB1:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 482 // CHECK: [[UB1_UP:%.+]] = getelementptr inbounds i32, i32* [[UB1]], i64 % 483 // CHECK: [[UB_CAST:%.+]] = ptrtoint i32* [[UB1_UP]] to i64 484 // CHECK: [[LB_CAST:%.+]] = ptrtoint i32* [[LB1_0]] to i64 485 // CHECK: [[DIFF:%.+]] = sub i64 [[UB_CAST]], [[LB_CAST]] 486 // CHECK: [[SIZE_1:%.+]] = sdiv exact i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 487 // CHECK: [[ARR_SIZE:%.+]] = add nuw i64 [[SIZE_1]], 1 488 // CHECK: call i8* @llvm.stacksave() 489 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]], 490 491 // Check initialization of private copy. 492 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]] 493 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]] 494 // CHECK: br i1 [[ISEMPTY]], 495 // CHECK: phi i32* 496 // CHECK: store i32 0, i32* % 497 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 498 // CHECK: br i1 [[DONE]], 499 500 // CHECK: [[ARRS_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[ARRS_SIZE:%.+]], 501 502 // Check initialization of private copy. 503 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]] 504 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]] 505 // CHECK: br i1 [[ISEMPTY]], 506 // CHECK: phi [[S_FLOAT_TY]]* 507 // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* % 508 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 509 // CHECK: br i1 [[DONE]], 510 511 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 512 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 513 // CHECK: call void @__kmpc_for_static_init_4( 514 // Skip checks for internal operations. 515 // CHECK: call void @__kmpc_for_static_fini( 516 517 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 518 519 // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 520 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8* 521 // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]], 522 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 523 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8* 524 // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]], 525 // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 526 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV]] to i8* 527 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]], 528 // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 529 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARRS_SIZE]] to i8* 530 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_SIZE_REF]], 531 532 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 533 534 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 535 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 536 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 537 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 538 539 // switch(res) 540 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 541 // CHECK: i32 1, label %[[CASE1:.+]] 542 // CHECK: i32 2, label %[[CASE2:.+]] 543 // CHECK: ] 544 545 // case 1: 546 // CHECK: [[CASE1]] 547 548 // arr[:] += arr_reduction[:]; 549 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] 550 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] 551 // CHECK: br i1 [[ISEMPTY]], 552 // CHECK: phi i32* 553 // CHECK: [[ADD:%.+]] = add nsw i32 % 554 // CHECK: store i32 [[ADD]], i32* % 555 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 556 // CHECK: br i1 [[DONE]], 557 558 // arrs[:] = var.operator &(arrs_reduction[:]); 559 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] 560 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] 561 // CHECK: br i1 [[ISEMPTY]], 562 // CHECK: phi [[S_FLOAT_TY]]* 563 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 564 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 565 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 566 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 567 // CHECK: br i1 [[DONE]], 568 569 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 570 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 571 572 // break; 573 // CHECK: br label %[[RED_DONE]] 574 575 // case 2: 576 // CHECK: [[CASE2]] 577 578 // arr[:] += arr_reduction[:]; 579 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] 580 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] 581 // CHECK: br i1 [[ISEMPTY]], 582 // CHECK: phi i32* 583 // CHECK: atomicrmw add i32* %{{.+}}, i32 %{{.+}} monotonic 584 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 585 // CHECK: br i1 [[DONE]], 586 587 // arrs[:] = var.operator &(arrs_reduction[:]); 588 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] 589 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] 590 // CHECK: br i1 [[ISEMPTY]], 591 // CHECK: phi [[S_FLOAT_TY]]* 592 // CHECK: call void @__kmpc_critical( 593 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 594 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 595 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 596 // CHECK: call void @__kmpc_end_critical( 597 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 598 // CHECK: br i1 [[DONE]], 599 600 // break; 601 // CHECK: br label %[[RED_DONE]] 602 // CHECK: [[RED_DONE]] 603 604 // Check destruction of private copy. 605 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_PRIV]], i64 [[ARRS_SIZE]] 606 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_PRIV]], [[END]] 607 // CHECK: br i1 [[ISEMPTY]], 608 // CHECK: phi [[S_FLOAT_TY]]* 609 // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* % 610 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[ARRS_PRIV]] 611 // CHECK: br i1 [[DONE]], 612 // CHECK: call void @llvm.stackrestore(i8* 613 614 // CHECK: ret void 615 616 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 617 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 618 // ... 619 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 620 // *(Type<n>-1*)rhs[<n>-1]); 621 // } 622 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 623 // arr_rhs = (int*)rhs[0]; 624 // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 625 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]], 626 // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32* 627 // arr_lhs = (int*)lhs[0]; 628 // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 629 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]], 630 // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32* 631 632 // arr_size = (size_t)lhs[1]; 633 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 634 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]], 635 // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64 636 637 // arrs_rhs = (S<float>*)rhs[2]; 638 // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 639 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]], 640 // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]* 641 // arrs_lhs = (S<float>*)lhs[2]; 642 // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 643 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]], 644 // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]* 645 646 // arrs_size = (size_t)lhs[3]; 647 // CHECK: [[ARRS_SIZE_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 648 // CHECK: [[ARRS_SIZE_VOID:%.+]] = load i8*, i8** [[ARRS_SIZE_REF]], 649 // CHECK: [[ARRS_SIZE:%.+]] = ptrtoint i8* [[ARRS_SIZE_VOID]] to i64 650 651 // arr_lhs[:] += arr_rhs[:]; 652 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]] 653 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]] 654 // CHECK: br i1 [[ISEMPTY]], 655 // CHECK: phi i32* 656 // CHECK: [[ADD:%.+]] = add nsw i32 % 657 // CHECK: store i32 [[ADD]], i32* % 658 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 659 // CHECK: br i1 [[DONE]], 660 661 // arrs_lhs = arrs_lhs.operator &(arrs_rhs); 662 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 [[ARRS_SIZE]] 663 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] 664 // CHECK: br i1 [[ISEMPTY]], 665 // CHECK: phi [[S_FLOAT_TY]]* 666 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 667 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 668 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 669 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 670 // CHECK: br i1 [[DONE]], 671 672 // CHECK: ret void 673 674 // CHECK: define internal void [[MAIN_MICROTASK2]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i32* nonnull %{{.+}}, [10 x [4 x [[S_FLOAT_TY]]]]* dereferenceable(160) %{{.+}}) 675 676 // CHECK: [[ARRS_PRIV:%.+]] = alloca [10 x [4 x [[S_FLOAT_TY]]]], 677 678 // Reduction list for runtime. 679 // CHECK: [[RED_LIST:%.+]] = alloca [3 x i8*], 680 681 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 682 683 // CHECK: [[ARR_SIZE:%.+]] = mul nuw i64 %{{.+}}, 4 684 // CHECK: call i8* @llvm.stacksave() 685 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]], 686 687 // Check initialization of private copy. 688 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_PRIV]], i64 [[ARR_SIZE]] 689 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_PRIV]], [[END]] 690 // CHECK: br i1 [[ISEMPTY]], 691 // CHECK: phi i32* 692 // CHECK: store i32 0, i32* % 693 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 694 // CHECK: br i1 [[DONE]], 695 696 // Check initialization of private copy. 697 // CHECK: [[LHS_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* %{{.+}} to [[S_FLOAT_TY]]* 698 // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0 699 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40 700 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[BEGIN]], [[END]] 701 // CHECK: br i1 [[ISEMPTY]], 702 // CHECK: phi [[S_FLOAT_TY]]* 703 // CHECK: call void @_ZN1SIfEC1Ev([[S_FLOAT_TY]]* % 704 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 705 // CHECK: br i1 [[DONE]], 706 // CHECK: [[ARRS_PRIV_BEGIN:%.+]] = bitcast [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]] to [[S_FLOAT_TY]]* 707 708 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 709 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 710 // CHECK: call void @__kmpc_for_static_init_4( 711 // Skip checks for internal operations. 712 // CHECK: call void @__kmpc_for_static_fini( 713 714 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 715 716 // CHECK: [[ARR_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 0 717 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[ARR_PRIV]] to i8* 718 // CHECK: store i8* [[BITCAST]], i8** [[ARR_PRIV_REF]], 719 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 1 720 // CHECK: [[BITCAST:%.+]] = inttoptr i64 [[ARR_SIZE]] to i8* 721 // CHECK: store i8* [[BITCAST]], i8** [[ARR_SIZE_REF]], 722 // CHECK: [[ARRS_PRIV_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST]], i64 0, i64 2 723 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[ARRS_PRIV_BEGIN]] to i8* 724 // CHECK: store i8* [[BITCAST]], i8** [[ARRS_PRIV_REF]], 725 726 // res = __kmpc_reduce(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 727 728 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 729 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 730 // CHECK: [[BITCAST:%.+]] = bitcast [3 x i8*]* [[RED_LIST]] to i8* 731 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 2, i64 24, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 732 733 // switch(res) 734 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 735 // CHECK: i32 1, label %[[CASE1:.+]] 736 // CHECK: i32 2, label %[[CASE2:.+]] 737 // CHECK: ] 738 739 // case 1: 740 // CHECK: [[CASE1]] 741 742 // arr[:] += arr_reduction[:]; 743 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0:%.+]], i64 [[ARR_SIZE]] 744 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] 745 // CHECK: br i1 [[ISEMPTY]], 746 // CHECK: phi i32* 747 // CHECK: [[ADD:%.+]] = add nsw i32 % 748 // CHECK: store i32 [[ADD]], i32* % 749 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 750 // CHECK: br i1 [[DONE]], 751 752 // arrs[:] = var.operator &(arrs_reduction[:]); 753 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40 754 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]] 755 // CHECK: br i1 [[ISEMPTY]], 756 // CHECK: phi [[S_FLOAT_TY]]* 757 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 758 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 759 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 760 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 761 // CHECK: br i1 [[DONE]], 762 763 // __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 764 // CHECK: call void @__kmpc_end_reduce(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 765 766 // break; 767 // CHECK: br label %[[RED_DONE]] 768 769 // case 2: 770 // CHECK: [[CASE2]] 771 772 // arr[:] += arr_reduction[:]; 773 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[LB1_0]], i64 [[ARR_SIZE]] 774 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[LB1_0]], [[END]] 775 // CHECK: br i1 [[ISEMPTY]], 776 // CHECK: phi i32* 777 // CHECK: atomicrmw add i32* %{{.+}}, i32 %{{.+}} monotonic 778 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 779 // CHECK: br i1 [[DONE]], 780 781 // arrs[:] = var.operator &(arrs_reduction[:]); 782 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[LHS_BEGIN]], i64 40 783 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[LHS_BEGIN]], [[END]] 784 // CHECK: br i1 [[ISEMPTY]], 785 // CHECK: phi [[S_FLOAT_TY]]* 786 // CHECK: call void @__kmpc_critical( 787 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 788 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 789 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 790 // CHECK: call void @__kmpc_end_critical( 791 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 792 // CHECK: br i1 [[DONE]], 793 794 // break; 795 // CHECK: br label %[[RED_DONE]] 796 // CHECK: [[RED_DONE]] 797 798 // Check destruction of private copy. 799 // CHECK: [[BEGIN:%.+]] = getelementptr inbounds [10 x [4 x [[S_FLOAT_TY]]]], [10 x [4 x [[S_FLOAT_TY]]]]* [[ARRS_PRIV]], i32 0, i32 0, i32 0 800 // CHECK: [[END:%.+]] = getelementptr inbounds [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[BEGIN]], i64 40 801 // CHECK: br 802 // CHECK: phi [[S_FLOAT_TY]]* 803 // CHECK: call void @_ZN1SIfED1Ev([[S_FLOAT_TY]]* % 804 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[BEGIN]] 805 // CHECK: br i1 [[DONE]], 806 // CHECK: call void @llvm.stackrestore(i8* 807 // CHECK: call void @__kmpc_barrier( 808 809 // CHECK: ret void 810 811 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 812 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 813 // ... 814 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 815 // *(Type<n>-1*)rhs[<n>-1]); 816 // } 817 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 818 // arr_rhs = (int*)rhs[0]; 819 // CHECK: [[ARR_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 820 // CHECK: [[ARR_RHS_VOID:%.+]] = load i8*, i8** [[ARR_RHS_REF]], 821 // CHECK: [[ARR_RHS:%.+]] = bitcast i8* [[ARR_RHS_VOID]] to i32* 822 // arr_lhs = (int*)lhs[0]; 823 // CHECK: [[ARR_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 824 // CHECK: [[ARR_LHS_VOID:%.+]] = load i8*, i8** [[ARR_LHS_REF]], 825 // CHECK: [[ARR_LHS:%.+]] = bitcast i8* [[ARR_LHS_VOID]] to i32* 826 827 // arr_size = (size_t)lhs[1]; 828 // CHECK: [[ARR_SIZE_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 829 // CHECK: [[ARR_SIZE_VOID:%.+]] = load i8*, i8** [[ARR_SIZE_REF]], 830 // CHECK: [[ARR_SIZE:%.+]] = ptrtoint i8* [[ARR_SIZE_VOID]] to i64 831 832 // arrs_rhs = (S<float>*)rhs[2]; 833 // CHECK: [[ARRS_RHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 834 // CHECK: [[ARRS_RHS_VOID:%.+]] = load i8*, i8** [[ARRS_RHS_REF]], 835 // CHECK: [[ARRS_RHS:%.+]] = bitcast i8* [[ARRS_RHS_VOID]] to [[S_FLOAT_TY]]* 836 // arrs_lhs = (S<float>*)lhs[2]; 837 // CHECK: [[ARRS_LHS_REF:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 838 // CHECK: [[ARRS_LHS_VOID:%.+]] = load i8*, i8** [[ARRS_LHS_REF]], 839 // CHECK: [[ARRS_LHS:%.+]] = bitcast i8* [[ARRS_LHS_VOID]] to [[S_FLOAT_TY]]* 840 841 // arr_lhs[:] += arr_rhs[:]; 842 // CHECK: [[END:%.+]] = getelementptr i32, i32* [[ARR_LHS]], i64 [[ARR_SIZE]] 843 // CHECK: [[ISEMPTY:%.+]] = icmp eq i32* [[ARR_LHS]], [[END]] 844 // CHECK: br i1 [[ISEMPTY]], 845 // CHECK: phi i32* 846 // CHECK: [[ADD:%.+]] = add nsw i32 % 847 // CHECK: store i32 [[ADD]], i32* % 848 // CHECK: [[DONE:%.+]] = icmp eq i32* %{{.+}}, [[END]] 849 // CHECK: br i1 [[DONE]], 850 851 // arrs_lhs = arrs_lhs.operator &(arrs_rhs); 852 // CHECK: [[END:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[ARRS_LB:%.+]], i64 40 853 // CHECK: [[ISEMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[ARRS_LB]], [[END]] 854 // CHECK: br i1 [[ISEMPTY]], 855 // CHECK: phi [[S_FLOAT_TY]]* 856 // CHECK: [[AND:%.+]] = call dereferenceable(4) [[S_FLOAT_TY]]* @_ZN1SIfEanERKS0_([[S_FLOAT_TY]]* %{{.+}}, [[S_FLOAT_TY]]* dereferenceable(4) %{{.+}}) 857 // CHECK: [[BITCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[AND]] to i8* 858 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.+}}, i8* [[BITCAST]], i64 4, i32 4, i1 false) 859 // CHECK: [[DONE:%.+]] = icmp eq [[S_FLOAT_TY]]* %{{.+}}, [[END]] 860 // CHECK: br i1 [[DONE]], 861 862 // CHECK: ret void 863 864 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]() 865 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 866 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 867 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 6, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [[S_INT_TY]]*, [[S_INT_TY]]*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*)* [[TMAIN_MICROTASK:@.+]] to void 868 // CHECK: call {{.*}} [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]* 869 // CHECK: ret 870 // 871 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i32* dereferenceable(4) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}}, [[S_INT_TY]]* dereferenceable(4) %{{.+}}, i32* dereferenceable(4) %{{.+}}, [2 x i32]* dereferenceable(8) %{{.+}}, [2 x [[S_INT_TY]]]* dereferenceable(8) %{{.+}}) 872 // CHECK: alloca i{{[0-9]+}}, 873 // CHECK: alloca i{{[0-9]+}}, 874 // CHECK: alloca i{{[0-9]+}}, 875 // CHECK: alloca i{{[0-9]+}}, 876 // CHECK: alloca i{{[0-9]+}}, 877 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 878 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]], 879 // CHECK: [[VAR1_PRIV:%.+]] = alloca [[S_INT_TY]], 880 // CHECK: [[T_VAR1_PRIV:%.+]] = alloca i{{[0-9]+}}, 881 882 // Reduction list for runtime. 883 // CHECK: [[RED_LIST:%.+]] = alloca [4 x i8*], 884 885 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]], 886 887 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 888 // CHECK: [[VAR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 889 // CHECK: [[T_VAR1_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** % 890 891 // For + reduction operation initial value of private variable is 0. 892 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[T_VAR_PRIV]], 893 894 // For & reduction operation initial value of private variable is ones in all bits. 895 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** % 896 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR_PRIV]]) 897 898 // For && reduction operation initial value of private variable is 1.0. 899 // CHECK: call {{.*}} [[S_INT_TY_CONSTR:@.+]]([[S_INT_TY]]* [[VAR1_PRIV]]) 900 901 // For min reduction operation initial value of private variable is largest repesentable value. 902 // CHECK: store i{{[0-9]+}} 2147483647, i{{[0-9]+}}* [[T_VAR1_PRIV]], 903 904 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]] 905 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]] 906 // CHECK: call void @__kmpc_for_static_init_4( 907 // Skip checks for internal operations. 908 // CHECK: call void @__kmpc_for_static_fini( 909 910 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 911 912 // CHECK: [[T_VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 0 913 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR_PRIV]] to i8* 914 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR_PRIV_REF]], 915 // CHECK: [[VAR_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 1 916 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR_PRIV]] to i8* 917 // CHECK: store i8* [[BITCAST]], i8** [[VAR_PRIV_REF]], 918 // CHECK: [[VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 2 919 // CHECK: [[BITCAST:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_PRIV]] to i8* 920 // CHECK: store i8* [[BITCAST]], i8** [[VAR1_PRIV_REF]], 921 // CHECK: [[T_VAR1_PRIV_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST]], i64 0, i64 3 922 // CHECK: [[BITCAST:%.+]] = bitcast i{{[0-9]+}}* [[T_VAR1_PRIV]] to i8* 923 // CHECK: store i8* [[BITCAST]], i8** [[T_VAR1_PRIV_REF]], 924 925 // res = __kmpc_reduce_nowait(<loc>, <gtid>, <n>, sizeof(RedList), RedList, reduce_func, &<lock>); 926 927 // CHECK: [[BITCAST:%.+]] = bitcast [4 x i8*]* [[RED_LIST]] to i8* 928 // CHECK: [[RES:%.+]] = call i32 @__kmpc_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], i32 4, i64 32, i8* [[BITCAST]], void (i8*, i8*)* [[REDUCTION_FUNC:@.+]], [8 x i32]* [[REDUCTION_LOCK]]) 929 930 // switch(res) 931 // CHECK: switch i32 [[RES]], label %[[RED_DONE:.+]] [ 932 // CHECK: i32 1, label %[[CASE1:.+]] 933 // CHECK: i32 2, label %[[CASE2:.+]] 934 // CHECK: ] 935 936 // case 1: 937 // t_var += t_var_reduction; 938 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_REF]], 939 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 940 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_VAL]], [[T_VAR_PRIV_VAL]] 941 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_REF]], 942 943 // var = var.operator &(var_reduction); 944 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 945 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 946 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 947 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 948 949 // var1 = var1.operator &&(var1_reduction); 950 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 951 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 952 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 953 // CHECK: [[TRUE]] 954 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 955 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 956 // CHECK: br label %[[END2]] 957 // CHECK: [[END2]] 958 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 959 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 960 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 961 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 962 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 963 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 964 965 // t_var1 = min(t_var1, t_var1_reduction); 966 // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]], 967 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]], 968 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_VAL]], [[T_VAR1_PRIV_VAL]] 969 // CHECK: br i1 [[CMP]] 970 // CHECK: [[UP:%.+]] = phi i32 971 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_REF]], 972 973 // __kmpc_end_reduce_nowait(<loc>, <gtid>, &<lock>); 974 // CHECK: call void @__kmpc_end_reduce_nowait(%{{.+}}* [[REDUCTION_LOC]], i32 [[GTID]], [8 x i32]* [[REDUCTION_LOCK]]) 975 976 // break; 977 // CHECK: br label %[[RED_DONE]] 978 979 // case 2: 980 // t_var += t_var_reduction; 981 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]] 982 // CHECK: atomicrmw add i32* [[T_VAR_REF]], i32 [[T_VAR_PRIV_VAL]] monotonic 983 984 // var = var.operator &(var_reduction); 985 // CHECK: call void @__kmpc_critical( 986 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) 987 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* 988 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 989 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 990 // CHECK: call void @__kmpc_end_critical( 991 992 // var1 = var1.operator &&(var1_reduction); 993 // CHECK: call void @__kmpc_critical( 994 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) 995 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 996 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 997 // CHECK: [[TRUE]] 998 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_PRIV]]) 999 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 1000 // CHECK: br label %[[END2]] 1001 // CHECK: [[END2]] 1002 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 1003 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 1004 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 1005 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* 1006 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 1007 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 1008 // CHECK: call void @__kmpc_end_critical( 1009 1010 // t_var1 = min(t_var1, t_var1_reduction); 1011 // CHECK: [[T_VAR1_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_PRIV]] 1012 // CHECK: atomicrmw min i32* [[T_VAR1_REF]], i32 [[T_VAR1_PRIV_VAL]] monotonic 1013 1014 // break; 1015 // CHECK: br label %[[RED_DONE]] 1016 // CHECK: [[RED_DONE]] 1017 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]]) 1018 // CHECK-DAG: call {{.*}} [[S_INT_TY_DESTR]]([[S_INT_TY]]* 1019 // CHECK: ret void 1020 1021 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 1022 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 1023 // ... 1024 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 1025 // *(Type<n>-1*)rhs[<n>-1]); 1026 // } 1027 // CHECK: define internal void [[REDUCTION_FUNC]](i8*, i8*) 1028 // t_var_lhs = (i{{[0-9]+}}*)lhs[0]; 1029 // CHECK: [[T_VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS:%.+]], i64 0, i64 0 1030 // CHECK: [[T_VAR_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR_RHS_REF]], 1031 // CHECK: [[T_VAR_RHS:%.+]] = bitcast i8* [[T_VAR_RHS_VOID]] to i{{[0-9]+}}* 1032 // t_var_rhs = (i{{[0-9]+}}*)rhs[0]; 1033 // CHECK: [[T_VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS:%.+]], i64 0, i64 0 1034 // CHECK: [[T_VAR_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR_LHS_REF]], 1035 // CHECK: [[T_VAR_LHS:%.+]] = bitcast i8* [[T_VAR_LHS_VOID]] to i{{[0-9]+}}* 1036 1037 // var_lhs = (S<i{{[0-9]+}}>*)lhs[1]; 1038 // CHECK: [[VAR_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 1 1039 // CHECK: [[VAR_RHS_VOID:%.+]] = load i8*, i8** [[VAR_RHS_REF]], 1040 // CHECK: [[VAR_RHS:%.+]] = bitcast i8* [[VAR_RHS_VOID]] to [[S_INT_TY]]* 1041 // var_rhs = (S<i{{[0-9]+}}>*)rhs[1]; 1042 // CHECK: [[VAR_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 1 1043 // CHECK: [[VAR_LHS_VOID:%.+]] = load i8*, i8** [[VAR_LHS_REF]], 1044 // CHECK: [[VAR_LHS:%.+]] = bitcast i8* [[VAR_LHS_VOID]] to [[S_INT_TY]]* 1045 1046 // var1_lhs = (S<i{{[0-9]+}}>*)lhs[2]; 1047 // CHECK: [[VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 2 1048 // CHECK: [[VAR1_RHS_VOID:%.+]] = load i8*, i8** [[VAR1_RHS_REF]], 1049 // CHECK: [[VAR1_RHS:%.+]] = bitcast i8* [[VAR1_RHS_VOID]] to [[S_INT_TY]]* 1050 // var1_rhs = (S<i{{[0-9]+}}>*)rhs[2]; 1051 // CHECK: [[VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 2 1052 // CHECK: [[VAR1_LHS_VOID:%.+]] = load i8*, i8** [[VAR1_LHS_REF]], 1053 // CHECK: [[VAR1_LHS:%.+]] = bitcast i8* [[VAR1_LHS_VOID]] to [[S_INT_TY]]* 1054 1055 // t_var1_lhs = (i{{[0-9]+}}*)lhs[3]; 1056 // CHECK: [[T_VAR1_RHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_RHS]], i64 0, i64 3 1057 // CHECK: [[T_VAR1_RHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_RHS_REF]], 1058 // CHECK: [[T_VAR1_RHS:%.+]] = bitcast i8* [[T_VAR1_RHS_VOID]] to i{{[0-9]+}}* 1059 // t_var1_rhs = (i{{[0-9]+}}*)rhs[3]; 1060 // CHECK: [[T_VAR1_LHS_REF:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[RED_LIST_LHS]], i64 0, i64 3 1061 // CHECK: [[T_VAR1_LHS_VOID:%.+]] = load i8*, i8** [[T_VAR1_LHS_REF]], 1062 // CHECK: [[T_VAR1_LHS:%.+]] = bitcast i8* [[T_VAR1_LHS_VOID]] to i{{[0-9]+}}* 1063 1064 // t_var_lhs += t_var_rhs; 1065 // CHECK: [[T_VAR_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_LHS]], 1066 // CHECK: [[T_VAR_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_RHS]], 1067 // CHECK: [[UP:%.+]] = add nsw i{{[0-9]+}} [[T_VAR_LHS_VAL]], [[T_VAR_RHS_VAL]] 1068 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR_LHS]], 1069 1070 // var_lhs = var_lhs.operator &(var_rhs); 1071 // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]]) 1072 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8* 1073 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* 1074 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 1075 1076 // var1_lhs = var1_lhs.operator &&(var1_rhs); 1077 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]]) 1078 // CHECK: [[VAR1_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 1079 // CHECK: br i1 [[VAR1_BOOL]], label %[[TRUE:.+]], label %[[END2:.+]] 1080 // CHECK: [[TRUE]] 1081 // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_RHS]]) 1082 // CHECK: [[VAR1_REDUCTION_BOOL:%.+]] = icmp ne i{{[0-9]+}} [[TO_INT]], 0 1083 // CHECK: br label %[[END2]] 1084 // CHECK: [[END2]] 1085 // CHECK: [[COND_LVALUE:%.+]] = phi i1 [ false, %{{.+}} ], [ [[VAR1_REDUCTION_BOOL]], %[[TRUE]] ] 1086 // CHECK: [[CONV:%.+]] = zext i1 [[COND_LVALUE]] to i32 1087 // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) 1088 // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8* 1089 // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* 1090 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[BC1]], i8* [[BC2]], i64 4, i32 4, i1 false) 1091 1092 // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); 1093 // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]], 1094 // CHECK: [[T_VAR1_RHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_RHS]], 1095 // CHECK: [[CMP:%.+]] = icmp slt i{{[0-9]+}} [[T_VAR1_LHS_VAL]], [[T_VAR1_RHS_VAL]] 1096 // CHECK: br i1 [[CMP]] 1097 // CHECK: [[UP:%.+]] = phi i32 1098 // CHECK: store i{{[0-9]+}} [[UP]], i{{[0-9]+}}* [[T_VAR1_LHS]], 1099 // CHECK: ret void 1100 1101 #endif 1102 1103