1 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 2 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 3 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-64 4 // RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 5 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 6 // RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix LAMBDA --check-prefix LAMBDA-32 7 8 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 9 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 10 // RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 11 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 12 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 13 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 14 // expected-no-diagnostics 15 #ifndef HEADER 16 #define HEADER 17 18 template <class T> 19 struct S { 20 T f; 21 S(T a) : f(a) {} 22 S() : f() {} 23 operator T() { return T(); } 24 ~S() {} 25 }; 26 27 // CHECK: [[S_FLOAT_TY:%.+]] = type { float } 28 // CHECK: [[S_INT_TY:%.+]] = type { i{{[0-9]+}} } 29 template <typename T> 30 T tmain() { 31 S<T> test; 32 T t_var = T(); 33 T vec[] = {1, 2}; 34 S<T> s_arr[] = {1, 2}; 35 S<T> &var = test; 36 #pragma omp target 37 #pragma omp teams 38 #pragma omp distribute lastprivate(t_var, vec, s_arr, s_arr, var, var) 39 for (int i = 0; i < 2; ++i) { 40 vec[i] = t_var; 41 s_arr[i] = var; 42 } 43 return T(); 44 } 45 46 int main() { 47 static int svar; 48 volatile double g; 49 volatile double &g1 = g; 50 51 #ifdef LAMBDA 52 // LAMBDA-LABEL: @main 53 // LAMBDA: call{{.*}} void [[OUTER_LAMBDA:@.+]]( 54 [&]() { 55 static float sfvar; 56 // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]]( 57 // LAMBDA: call i{{[0-9]+}} @__tgt_target_teams( 58 // LAMBDA: call void [[OFFLOADING_FUN:@.+]]( 59 60 // LAMBDA: define{{.+}} void [[OFFLOADING_FUN]]( 61 // LAMBDA: call {{.*}}void {{.+}} @__kmpc_fork_teams({{.+}}, i32 4, {{.+}}* [[OMP_OUTLINED:@.+]] to {{.+}}) 62 #pragma omp target 63 #pragma omp teams 64 #pragma omp distribute lastprivate(g, g1, svar, sfvar) 65 for (int i = 0; i < 2; ++i) { 66 // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_OUTLINED]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, double*{{.+}} [[G_IN:%.+]], double*{{.+}} [[G1_IN:%.+]], i{{[0-9]+}}*{{.+}} [[SVAR_IN:%.+]], float*{{.+}} [[SFVAR_IN:%.+]]) 67 // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca double*, 68 // LAMBDA: [[G1_PRIVATE_ADDR:%.+]] = alloca double*, 69 // LAMBDA: [[SVAR_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}}*, 70 // LAMBDA: [[SFVAR_PRIVATE_ADDR:%.+]] = alloca float*, 71 // LAMBDA: [[TMP_G1:%.+]] = alloca double*, 72 // loop variables 73 // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, 74 // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, 75 // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, 76 // LAMBDA: {{.+}} = alloca i{{[0-9]+}}, 77 // LAMBDA: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, 78 // LAMBDA: [[G_PRIVATE:%.+]] = alloca double, 79 // LAMBDA: [[G1_PRIVATE:%.+]] = alloca double, 80 // LAMBDA: [[TMP_G1_PRIVATE:%.+]] = alloca double*, 81 // LAMBDA: [[SVAR_PRIVATE:%.+]] = alloca i{{[0-9]+}}, 82 // LAMBDA: [[SFVAR_PRIVATE:%.+]] = alloca float, 83 // LAMBDA: store double* [[G_IN]], double** [[G_PRIVATE_ADDR]], 84 // LAMBDA: store double* [[G1_IN]], double** [[G1_PRIVATE_ADDR]], 85 // LAMBDA: store i{{[0-9]+}}* [[SVAR_IN]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], 86 // LAMBDA: store float* [[SFVAR_IN]], float** [[SFVAR_PRIVATE_ADDR]], 87 88 // init private variables 89 // LAMBDA: [[G_IN_REF:%.+]] = load double*, double** [[G_PRIVATE_ADDR]], 90 // LAMBDA: [[SVAR_IN_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR]], 91 // LAMBDA: [[SFVAR_IN_REF:%.+]] = load float*, float** [[SFVAR_PRIVATE_ADDR]], 92 // LAMBDA: [[G1_IN_REF:%.+]] = load double*, double** [[G1_PRIVATE_ADDR]], 93 // LAMBDA: store double* [[G1_IN_REF]], double** [[TMP_G1]], 94 // LAMBDA: [[TMP_G1_VAL:%.+]] = load double*, double** [[TMP_G1]], 95 // LAMBDA: store double* [[G1_PRIVATE]], double** [[TMP_G1_PRIVATE]], 96 g = 1; 97 g1 = 1; 98 svar = 3; 99 sfvar = 4.0; 100 // LAMBDA: call {{.*}}void @__kmpc_for_static_init_4( 101 // LAMBDA: store double 1.0{{.+}}, double* [[G_PRIVATE]], 102 // LAMBDA: [[TMP_G1_REF:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], 103 // LAMBDA: store{{.+}} double 1.0{{.+}}, double* [[TMP_G1_REF]], 104 // LAMBDA: store i{{[0-9]+}} 3, i{{[0-9]+}}* [[SVAR_PRIVATE]], 105 // LAMBDA: store float 4.0{{.+}}, float* [[SFVAR_PRIVATE]], 106 // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 107 // LAMBDA: store double* [[G_PRIVATE]], double** [[G_PRIVATE_ADDR_REF]], 108 // LAMBDA: [[TMP_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 109 // LAMBDA: [[G1_PRIVATE_ADDR_FROM_TMP:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], 110 // LAMBDA: store double* [[G1_PRIVATE_ADDR_FROM_TMP]], double** [[TMP_PRIVATE_ADDR_REF]], 111 // LAMBDA: [[SVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 112 // LAMBDA: store i{{[0-9]+}}* [[SVAR_PRIVATE]], i{{[0-9]+}}** [[SVAR_PRIVATE_ADDR_REF]] 113 // LAMBDA: [[SFVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 114 // LAMBDA: store float* [[SFVAR_PRIVATE]], float** [[SFVAR_PRIVATE_ADDR_REF]] 115 // LAMBDA: call{{.*}} void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]]) 116 // LAMBDA: call {{.*}}void @__kmpc_for_static_fini( 117 // LAMBDA: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], 118 // LAMBDA: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 119 // LAMBDA: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] 120 121 // LAMBDA: [[OMP_LASTPRIV_BLOCK]]: 122 // LAMBDA: [[G_PRIV_VAL:%.+]] = load double, double* [[G_PRIVATE]], 123 // LAMBDA: store{{.*}} double [[G_PRIV_VAL]], double* [[G_IN_REF]], 124 // LAMBDA: [[TMP_G1_PRIV_REF:%.+]] = load double*, double** [[TMP_G1_PRIVATE]], 125 // LAMBDA: [[TMP_G1_PRIV_VAL:%.+]] = load double, double* [[TMP_G1_PRIV_REF]], 126 // LAMBDA: store{{.*}} double [[TMP_G1_PRIV_VAL]], double* [[TMP_G1_VAL]], 127 128 // LAMBDA: [[SVAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SVAR_PRIVATE]], 129 // LAMBDA: store i{{[0-9]+}} [[SVAR_PRIV_VAL]], i{{[0-9]+}}* [[SVAR_IN_REF]], 130 // LAMBDA: [[SFVAR_PRIV_VAL:%.+]] = load float, float* [[SFVAR_PRIVATE]], 131 // LAMBDA: store float [[SFVAR_PRIV_VAL]], float* [[SFVAR_IN_REF]], 132 // LAMBDA: br label %[[OMP_LASTPRIV_DONE]] 133 // LAMBDA: [[OMP_LASTPRIV_DONE]]: 134 // LAMBDA: ret 135 [&]() { 136 // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]]) 137 // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]], 138 g = 2; 139 g1 = 2; 140 svar = 4; 141 sfvar = 8.0; 142 // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]] 143 // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 144 // LAMBDA: [[G_REF:%.+]] = load double*, double** [[G_PTR_REF]] 145 // LAMBDA: store double 2.0{{.+}}, double* [[G_REF]] 146 147 // LAMBDA: [[TMP_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 148 // LAMBDA: [[G1_REF:%.+]] = load double*, double** [[TMP_PTR_REF]] 149 // LAMBDA: store double 2.0{{.+}}, double* [[G1_REF]], 150 // LAMBDA: [[SVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 151 // LAMBDA: [[SVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_PTR_REF]] 152 // LAMBDA: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SVAR_REF]] 153 // LAMBDA: [[SFVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 154 // LAMBDA: [[SFVAR_REF:%.+]] = load float*, float** [[SFVAR_PTR_REF]] 155 // LAMBDA: store float 8.0{{.+}}, float* [[SFVAR_REF]] 156 }(); 157 } 158 }(); 159 return 0; 160 #else 161 S<float> test; 162 int t_var = 0; 163 int vec[] = {1, 2}; 164 S<float> s_arr[] = {1, 2}; 165 S<float> &var = test; 166 167 #pragma omp target 168 #pragma omp teams 169 #pragma omp distribute lastprivate(t_var, vec, s_arr, s_arr, var, var, svar) 170 for (int i = 0; i < 2; ++i) { 171 vec[i] = t_var; 172 s_arr[i] = var; 173 } 174 int i; 175 176 return tmain<int>(); 177 #endif 178 } 179 180 // CHECK: define{{.*}} i{{[0-9]+}} @main() 181 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]], 182 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]]) 183 // CHECK: call i{{[0-9]+}} @__tgt_target_teams( 184 // CHECK: call void [[OFFLOAD_FUN:@.+]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]* {{.+}}, [2 x [[S_FLOAT_TY]]]* {{.+}}, [[S_FLOAT_TY]]* {{.+}}, i{{[0-9]+}} {{.+}}) 185 // CHECK: ret 186 187 // CHECK: define{{.+}} [[OFFLOAD_FUN]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]*{{.+}} {{.+}}, [2 x [[S_FLOAT_TY]]]*{{.+}} {{.+}}, [[S_FLOAT_TY]]*{{.+}} {{.+}}, i{{[0-9]+}} {{.+}}) 188 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams( 189 // CHECK: ret 190 // 191 // CHECK: define internal void [[OMP_OUTLINED:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}}*{{.+}} [[T_VAR_IN:%.+]], [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN:%.+]], [2 x [[S_FLOAT_TY]]]*{{.+}} [[S_ARR_IN:%.+]], [[S_FLOAT_TY]]*{{.+}} [[VAR_IN:%.+]], i{{[0-9]+}}*{{.*}} [[S_VAR_IN:%.+]]) 192 // CHECK: {{.+}} = alloca i{{[0-9]+}}*, 193 // CHECK: {{.+}} = alloca i{{[0-9]+}}*, 194 // CHECK: [[T_VAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, 195 // CHECK: [[VEC_ADDR:%.+]] = alloca [2 x i{{[0-9]+}}]*, 196 // CHECK: [[S_ARR_ADDR:%.+]] = alloca [2 x [[S_FLOAT_TY]]]*, 197 // CHECK: [[VAR_ADDR:%.+]] = alloca [[S_FLOAT_TY]]*, 198 // CHECK: [[SVAR_ADDR:%.+]] = alloca i{{[0-9]+}}*, 199 // CHECK: [[TMP:%.*]] = alloca [[S_FLOAT_TY]]*, 200 // skip loop variables 201 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 202 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 203 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 204 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 205 // CHECK: [[OMP_IS_LAST:%.+]] = alloca i{{[0-9]+}}, 206 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 207 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}], 208 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_FLOAT_TY]]], 209 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_FLOAT_TY]], 210 // CHECK: [[TMP_PRIV:%.+]] = alloca [[S_FLOAT_TY]]*, 211 // CHECK: [[S_VAR_PRIV:%.+]] = alloca i{{[0-9]+}}, 212 213 // copy from parameters to local address variables 214 // CHECK: store i{{[0-9]+}}* [[T_VAR_IN]], i{{[0-9]+}}** [[T_VAR_ADDR]], 215 // CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN]], [2 x i{{[0-9]+}}]** [[VEC_ADDR]], 216 // CHECK: store [2 x [[S_FLOAT_TY]]]* [[S_ARR_IN]], [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], 217 // CHECK: store [[S_FLOAT_TY]]* [[VAR_IN]], [[S_FLOAT_TY]]** [[VAR_ADDR]], 218 // CHECK: store i{{[0-9]+}}* [[S_VAR_IN]], i{{[0-9]+}}** [[SVAR_ADDR]], 219 220 // load content of local address variables 221 // CHECK: [[T_VAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR]], 222 // CHECK: [[VEC_ADDR_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR]], 223 // CHECK: [[S_ARR_ADDR_REF:%.+]] = load [2 x [[S_FLOAT_TY]]]*, [2 x [[S_FLOAT_TY]]]** [[S_ARR_ADDR]], 224 // CHECK: [[SVAR_ADDR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SVAR_ADDR]], 225 // CHECK: [[VAR_ADDR_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[VAR_ADDR]], 226 // CHECK: store [[S_FLOAT_TY]]* [[VAR_ADDR_REF]], [[S_FLOAT_TY]]** [[TMP]], 227 // CHECK: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST]], 228 // CHECK: [[TMP_REF:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP]], 229 // the distribute loop 230 // CHECK: call void @__kmpc_for_static_init_4( 231 // assignment: vec[i] = t_var; 232 // the following is extremely weak, because it assumes ordering of this load with no reference after the call to static_init: fix!!!! 233 // CHECK: [[IV_VAL:%.+]] = 234 // CHECK: [[T_VAR_PRIV_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 235 // CHECK: [[VEC_PTR:%.+]] = getelementptr inbounds [2 x i{{[0-9]+}}], [2 x i{{[0-9]+}}]* [[VEC_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} {{.+}} 236 // CHECK: store i{{[0-9]+}} [[T_VAR_PRIV_VAL]], i{{[0-9]+}}* [[VEC_PTR]], 237 238 // assignment: s_arr[i] = var; 239 // CHECK-DAG: [[S_ARR_PTR:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]], 240 // CHECK-DAG: [[TMP_VAL:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], 241 // CHECK-DAG: [[S_ARR_PTR_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_PTR]] to i8* 242 // CHECK-DAG: [[TMP_VAL_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_VAL]] to i8* 243 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_PTR_BCAST]], i8* [[TMP_VAL_BCAST]], 244 // CHECK: call void @__kmpc_for_static_fini( 245 246 // lastprivates 247 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST]], 248 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 249 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] 250 251 // CHECK: [[OMP_LASTPRIV_BLOCK]]: 252 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]], 253 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF]], 254 // CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF]] to i8* 255 // CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8* 256 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], 257 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_FLOAT_TY]]], [2 x [[S_FLOAT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 258 // CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[S_ARR_PRIV]] to [[S_FLOAT_TY]]* 259 // CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 260 // CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] 261 // CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] 262 // CHECK: [[S_ARR_COPY_BLOCK]]: 263 // CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} 264 // CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_FLOAT_TY]]*{{.+}} 265 // CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_DST_EL]] to i8* 266 // CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[S_ARR_SRC_EL]] to i8* 267 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) 268 // CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 269 // CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} 270 // CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_FLOAT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] 271 // CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] 272 // CHECK: [[S_ARR_COPY_DONE]]: 273 // CHECK: [[TMP_VAL1:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[TMP_PRIV]], 274 // CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_REF]] to i8* 275 // CHECK: [[TMP_VAL1_BCAST:%.+]] = bitcast [[S_FLOAT_TY]]* [[TMP_VAL1]] to i8* 276 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL1_BCAST]],{{.+}}) 277 // CHECK: [[SVAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[S_VAR_PRIV]], 278 // CHECK: store i{{[0-9]+}} [[SVAR_VAL]], i{{[0-9]+}}* [[SVAR_ADDR_REF]], 279 // CHECK: ret void 280 281 // template tmain 282 // CHECK: define{{.*}} i{{[0-9]+}} [[TMAIN_INT:@.+]]() 283 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]], 284 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]]) 285 // CHECK: call i{{[0-9]+}} @__tgt_target_teams( 286 // CHECK: call void [[OFFLOAD_FUN_1:@.+]](i{{[0-9]+}} {{.+}}, [2 x i{{[0-9]+}}]* {{.+}}, [2 x [[S_INT_TY]]]* {{.+}}, [[S_INT_TY]]* {{.+}}) 287 // CHECK: ret 288 289 290 // CHECK: define internal void [[OFFLOAD_FUN_1]]( 291 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_teams(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, 292 // CHECK: ret 293 294 // CHECK: define internal void [[OMP_OUTLINED_1:@.+]](i{{[0-9]+}}* noalias [[GTID_ADDR1:%.+]], i{{[0-9]+}}* noalias %{{.+}}, i{{[0-9]+}}*{{.+}} [[T_VAR_IN1:%.+]], [2 x i{{[0-9]+}}]*{{.+}} [[VEC_IN1:%.+]], [2 x [[S_INT_TY]]]*{{.+}} [[S_ARR_IN1:%.+]], [[S_INT_TY]]*{{.+}} [[VAR_IN1:%.+]]) 295 // skip alloca of global_tid and bound_tid 296 // CHECK: {{.+}} = alloca i{{[0-9]+}}*, 297 // CHECK: {{.+}} = alloca i{{[0-9]+}}*, 298 // CHECK: [[T_VAR_ADDR1:%.+]] = alloca i{{[0-9]+}}*, 299 // CHECK: [[VEC_ADDR1:%.+]] = alloca [2 x i{{[0-9]+}}]*, 300 // CHECK: [[S_ARR_ADDR1:%.+]] = alloca [2 x [[S_INT_TY]]]*, 301 // CHECK: [[VAR_ADDR1:%.+]] = alloca [[S_INT_TY]]*, 302 // CHECK: [[TMP:%.+]] = alloca [[S_INT_TY]]*, 303 // skip loop variables 304 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 305 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 306 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 307 // CHECK: {{.+}} = alloca i{{[0-9]+}}, 308 // CHECK: [[OMP_IS_LAST1:%.+]] = alloca i{{[0-9]+}}, 309 // CHECK: [[T_VAR_PRIV1:%.+]] = alloca i{{[0-9]+}}, 310 // CHECK: [[VEC_PRIV1:%.+]] = alloca [2 x i{{[0-9]+}}], 311 // CHECK: [[S_ARR_PRIV1:%.+]] = alloca [2 x [[S_INT_TY]]], 312 // CHECK: [[VAR_PRIV1:%.+]] = alloca [[S_INT_TY]], 313 // CHECK: [[TMP_PRIV1:%.+]] = alloca [[S_INT_TY]]*, 314 315 // skip init of bound and global tid 316 // CHECK: store i{{[0-9]+}}* {{.*}}, 317 // CHECK: store i{{[0-9]+}}* {{.*}}, 318 // copy from parameters to local address variables 319 // CHECK: store i{{[0-9]+}}* [[T_VAR_IN1]], i{{[0-9]+}}** [[T_VAR_ADDR1]], 320 // CHECK: store [2 x i{{[0-9]+}}]* [[VEC_IN1]], [2 x i{{[0-9]+}}]** [[VEC_ADDR1]], 321 // CHECK: store [2 x [[S_INT_TY]]]* [[S_ARR_IN1]], [2 x [[S_INT_TY]]]** [[S_ARR_ADDR1]], 322 // CHECK: store [[S_INT_TY]]* [[VAR_IN1]], [[S_INT_TY]]** [[VAR_ADDR1]], 323 324 // load content of local address variables 325 // CHECK: [[T_VAR_ADDR_REF1:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[T_VAR_ADDR1]], 326 // CHECK: [[VEC_ADDR_REF1:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** [[VEC_ADDR1]], 327 // CHECK: [[S_ARR_ADDR_REF1:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** [[S_ARR_ADDR1]], 328 // CHECK: [[VAR_ADDR1_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[VAR_ADDR1]], 329 // CHECK-DAG: store [[S_INT_TY]]* [[VAR_ADDR1_REF]], [[S_INT_TY]]** [[TMP]], 330 // CHECK-DAG: store i{{[0-9]+}} 0, i{{[0-9]+}}* [[OMP_IS_LAST1]], 331 // CHECK-DAG: [[TMP_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP]], 332 // CHECK: call void @__kmpc_for_static_init_4( 333 // assignment: vec[i] = t_var; 334 // CHECK: [[IV_VAL1:%.+]] = 335 // CHECK: [[T_VAR_PRIV_VAL1:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV1]], 336 // CHECK: [[VEC_PTR1:%.+]] = getelementptr inbounds [2 x i{{[0-9]+}}], [2 x i{{[0-9]+}}]* [[VEC_PRIV1]], i{{[0-9]+}} 0, i{{[0-9]+}} {{.+}} 337 // CHECK: store i{{[0-9]+}} [[T_VAR_PRIV_VAL1]], i{{[0-9]+}}* [[VEC_PTR1]], 338 339 // assignment: s_arr[i] = var; 340 // CHECK-DAG: [[S_ARR_PTR1:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_PRIV1]], 341 // CHECK-DAG: [[TMP_VAL1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV1]], 342 // CHECK-DAG: [[S_ARR_PTR_BCAST1:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_PTR1]] to i8* 343 // CHECK-DAG: [[TMP_VAL_BCAST1:%.+]] = bitcast [[S_INT_TY]]* [[TMP_VAL1]] to i8* 344 // CHECK-DAG: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_PTR_BCAST1]], i8* [[TMP_VAL_BCAST1]], 345 // CHECK: call void @__kmpc_for_static_fini( 346 347 // lastprivates 348 // CHECK: [[OMP_IS_LAST_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[OMP_IS_LAST1]], 349 // CHECK: [[IS_LAST_IT:%.+]] = icmp ne i{{[0-9]+}} [[OMP_IS_LAST_VAL]], 0 350 // CHECK: br i1 [[IS_LAST_IT]], label %[[OMP_LASTPRIV_BLOCK:.+]], label %[[OMP_LASTPRIV_DONE:.+]] 351 352 // CHECK: [[OMP_LASTPRIV_BLOCK]]: 353 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV1]], 354 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_ADDR_REF1]], 355 // CHECK: [[BCAST_VEC_ADDR_REF:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_ADDR_REF1]] to i8* 356 // CHECK: [[BCAST_VEC_PRIV:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV1]] to i8* 357 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[BCAST_VEC_ADDR_REF]], i8* [[BCAST_VEC_PRIV]], 358 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_ADDR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 359 // CHECK: [[S_ARR_PRIV_BCAST:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV1]] to [[S_INT_TY]]* 360 // CHECK: [[S_ARR_BEGIN_GEP:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2 361 // CHECK: [[S_ARR_IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_BEGIN_GEP]] 362 // CHECK: br i1 [[S_ARR_IS_EMPTY]], label %[[S_ARR_COPY_DONE:.+]], label %[[S_ARR_COPY_BLOCK:.+]] 363 // CHECK: [[S_ARR_COPY_BLOCK]]: 364 // CHECK: [[S_ARR_SRC_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} 365 // CHECK: [[S_ARR_DST_EL:%.+]] = phi [[S_INT_TY]]*{{.+}} 366 // CHECK: [[S_ARR_DST_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_DST_EL]] to i8* 367 // CHECK: [[S_ARR_SRC_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[S_ARR_SRC_EL]] to i8* 368 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[S_ARR_DST_BCAST]], i8* [[S_ARR_SRC_BCAST]]{{.+}}) 369 // CHECK: [[S_ARR_DST_NEXT:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_DST_EL]], i{{[0-9]+}} 1 370 // CHECK: [[S_ARR_SRC_NEXT:%.+]] = getelementptr{{.+}} 371 // CHECK: [[CPY_IS_FINISHED:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_DST_NEXT]], [[S_ARR_BEGIN_GEP]] 372 // CHECK: br i1 [[CPY_IS_FINISHED]], label %[[S_ARR_COPY_DONE]], label %[[S_ARR_COPY_BLOCK]] 373 // CHECK: [[S_ARR_COPY_DONE]]: 374 // CHECK: [[TMP_VAL1:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** [[TMP_PRIV1]], 375 // CHECK: [[VAR_ADDR_REF_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_REF]] to i8* 376 // CHECK: [[TMP_VAL1_BCAST:%.+]] = bitcast [[S_INT_TY]]* [[TMP_VAL1]] to i8* 377 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VAR_ADDR_REF_BCAST]], i8* [[TMP_VAL1_BCAST]],{{.+}}) 378 // CHECK: ret void 379 #endif 380