1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4 7 8 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5 9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7 12 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK8 13 // expected-no-diagnostics 14 #ifndef HEADER 15 #define HEADER 16 17 struct St { 18 int a, b; 19 St() : a(0), b(0) {} 20 St(const St &st) : a(st.a + st.b), b(0) {} 21 ~St() {} 22 }; 23 24 volatile int g = 1212; 25 volatile int &g1 = g; 26 27 template <class T> 28 struct S { 29 T f; 30 S(T a) : f(a + g) {} 31 S() : f(g) {} 32 S(const S &s, St t = St()) : f(s.f + t.a) {} 33 operator T() { return T(); } 34 ~S() {} 35 }; 36 37 38 template <typename T> 39 T tmain() { 40 S<T> test; 41 T t_var = T(); 42 T vec[] = {1, 2}; 43 S<T> s_arr[] = {1, 2}; 44 S<T> &var = test; 45 #pragma omp parallel 46 #pragma omp for firstprivate(t_var, vec, s_arr, var) 47 for (int i = 0; i < 2; ++i) { 48 vec[i] = t_var; 49 s_arr[i] = var; 50 } 51 return T(); 52 } 53 54 S<float> test; 55 int t_var = 333; 56 int vec[] = {1, 2}; 57 S<float> s_arr[] = {1, 2}; 58 S<float> var(3); 59 60 int main() { 61 static int sivar; 62 #ifdef LAMBDA 63 [&]() { 64 #pragma omp parallel 65 #pragma omp for firstprivate(g, g1, sivar) 66 for (int i = 0; i < 2; ++i) { 67 // Skip temp vars for loop 68 69 70 71 72 g = 1; 73 g1 = 2; 74 sivar = 3; 75 76 [&]() { 77 g = 4; 78 g1 = 5; 79 sivar = 6; 80 81 }(); 82 } 83 }(); 84 return 0; 85 #elif defined(BLOCKS) 86 ^{ 87 #pragma omp parallel 88 #pragma omp for firstprivate(g, g1, sivar) 89 for (int i = 0; i < 2; ++i) { 90 // Skip temp vars for loop 91 92 93 94 95 g = 1; 96 g1 =1; 97 sivar = 2; 98 ^{ 99 g = 2; 100 g1 = 2; 101 sivar = 4; 102 }(); 103 } 104 }(); 105 return 0; 106 #else 107 #pragma omp for firstprivate(t_var, vec, s_arr, var, sivar) 108 for (int i = 0; i < 2; ++i) { 109 vec[i] = t_var; 110 s_arr[i] = var; 111 sivar += i; 112 } 113 return tmain<int>(); 114 #endif 115 } 116 117 // Skip temp vars for loop 118 119 // firstprivate t_var(t_var) 120 121 // firstprivate vec(vec) 122 123 // firstprivate s_arr(s_arr) 124 125 // firstprivate var(var) 126 127 // firstprivate (sivar) 128 129 // Synchronization for initialization. 130 131 132 // ~(firstprivate var), ~(firstprivate s_arr) 133 134 135 136 // Skip temp vars for loop 137 138 139 // firstprivate vec(vec) 140 141 // firstprivate s_arr(s_arr) 142 143 // firstprivate var(var) 144 145 // No synchronization for initialization. 146 147 148 // ~(firstprivate var), ~(firstprivate s_arr) 149 #endif 150 151 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init 152 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 153 // CHECK1-NEXT: entry: 154 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 155 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 156 // CHECK1-NEXT: ret void 157 // 158 // 159 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 160 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 161 // CHECK1-NEXT: entry: 162 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 163 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 164 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 165 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 166 // CHECK1-NEXT: ret void 167 // 168 // 169 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 170 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 171 // CHECK1-NEXT: entry: 172 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 173 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 174 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 175 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 176 // CHECK1-NEXT: ret void 177 // 178 // 179 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 180 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 181 // CHECK1-NEXT: entry: 182 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 183 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 184 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 185 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 186 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 187 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 188 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4 189 // CHECK1-NEXT: ret void 190 // 191 // 192 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 193 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 194 // CHECK1-NEXT: entry: 195 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 196 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 197 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 198 // CHECK1-NEXT: ret void 199 // 200 // 201 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 202 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 203 // CHECK1-NEXT: entry: 204 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 205 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 206 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 207 // CHECK1-NEXT: ret void 208 // 209 // 210 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 211 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 212 // CHECK1-NEXT: entry: 213 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 214 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 215 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 216 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4 217 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 218 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 219 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 220 // CHECK1-NEXT: ret void 221 // 222 // 223 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 224 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 225 // CHECK1-NEXT: entry: 226 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 227 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 228 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 229 // CHECK1: arraydestroy.body: 230 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 231 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 232 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 233 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 234 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 235 // CHECK1: arraydestroy.done1: 236 // CHECK1-NEXT: ret void 237 // 238 // 239 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 240 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 241 // CHECK1-NEXT: entry: 242 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 243 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 244 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 245 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4 246 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 247 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 248 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 249 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 250 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 251 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 252 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4 253 // CHECK1-NEXT: ret void 254 // 255 // 256 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 257 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 258 // CHECK1-NEXT: entry: 259 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 260 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 261 // CHECK1-NEXT: ret void 262 // 263 // 264 // CHECK1-LABEL: define {{[^@]+}}@main 265 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 266 // CHECK1-NEXT: entry: 267 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 268 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 269 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 270 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 271 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 272 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 273 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 274 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 275 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 276 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4 277 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 278 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4 279 // CHECK1-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4 280 // CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 281 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 282 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 283 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 284 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 285 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 286 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 287 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 288 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 289 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4 290 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 291 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false) 292 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 293 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2 294 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]] 295 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 296 // CHECK1: omp.arraycpy.body: 297 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 298 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 299 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) 300 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* [[AGG_TMP]]) 301 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]] 302 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 303 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 304 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]] 305 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]] 306 // CHECK1: omp.arraycpy.done1: 307 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP2]]) 308 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[VAR]], %struct.S* nonnull align 4 dereferenceable(4) @var, %struct.St* [[AGG_TMP2]]) 309 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP2]]) #[[ATTR2]] 310 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 311 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4 312 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 313 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 314 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 315 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 316 // CHECK1: cond.true: 317 // CHECK1-NEXT: br label [[COND_END:%.*]] 318 // CHECK1: cond.false: 319 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 320 // CHECK1-NEXT: br label [[COND_END]] 321 // CHECK1: cond.end: 322 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 323 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 324 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 325 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 326 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 327 // CHECK1: omp.inner.for.cond: 328 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 329 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 330 // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 331 // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 332 // CHECK1: omp.inner.for.cond.cleanup: 333 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 334 // CHECK1: omp.inner.for.body: 335 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 336 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 337 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 338 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4 339 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[T_VAR]], align 4 340 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 341 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 342 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] 343 // CHECK1-NEXT: store i32 [[TMP11]], i32* [[ARRAYIDX]], align 4 344 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4 345 // CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64 346 // CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]] 347 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8* 348 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[VAR]] to i8* 349 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false) 350 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4 351 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[SIVAR]], align 4 352 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP16]] 353 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[SIVAR]], align 4 354 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 355 // CHECK1: omp.body.continue: 356 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 357 // CHECK1: omp.inner.for.inc: 358 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 359 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1 360 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 361 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 362 // CHECK1: omp.inner.for.end: 363 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 364 // CHECK1: omp.loop.exit: 365 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 366 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[VAR]]) #[[ATTR2]] 367 // CHECK1-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 368 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN8]], i64 2 369 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 370 // CHECK1: arraydestroy.body: 371 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP19]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 372 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 373 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 374 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN8]] 375 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE9:%.*]], label [[ARRAYDESTROY_BODY]] 376 // CHECK1: arraydestroy.done9: 377 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]]) 378 // CHECK1-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v() 379 // CHECK1-NEXT: ret i32 [[CALL]] 380 // 381 // 382 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC1Ev 383 // CHECK1-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 384 // CHECK1-NEXT: entry: 385 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 386 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 387 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 388 // CHECK1-NEXT: call void @_ZN2StC2Ev(%struct.St* nonnull dereferenceable(8) [[THIS1]]) 389 // CHECK1-NEXT: ret void 390 // 391 // 392 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St 393 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 394 // CHECK1-NEXT: entry: 395 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 396 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 397 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 398 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 399 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 400 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 401 // CHECK1-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* [[T]]) 402 // CHECK1-NEXT: ret void 403 // 404 // 405 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD1Ev 406 // CHECK1-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 407 // CHECK1-NEXT: entry: 408 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 409 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 410 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 411 // CHECK1-NEXT: call void @_ZN2StD2Ev(%struct.St* nonnull dereferenceable(8) [[THIS1]]) #[[ATTR2]] 412 // CHECK1-NEXT: ret void 413 // 414 // 415 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 416 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] { 417 // CHECK1-NEXT: entry: 418 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 419 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 420 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 421 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 422 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 423 // CHECK1-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8 424 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) 425 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4 426 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 427 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 428 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 429 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1) 430 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 431 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2) 432 // CHECK1-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8 433 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8 434 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP1]]) 435 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 436 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 437 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 438 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 439 // CHECK1: arraydestroy.body: 440 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 441 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 442 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 443 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 444 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 445 // CHECK1: arraydestroy.done1: 446 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) #[[ATTR2]] 447 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[RETVAL]], align 4 448 // CHECK1-NEXT: ret i32 [[TMP3]] 449 // 450 // 451 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC2Ev 452 // CHECK1-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 453 // CHECK1-NEXT: entry: 454 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 455 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 456 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 457 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0 458 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 459 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1 460 // CHECK1-NEXT: store i32 0, i32* [[B]], align 4 461 // CHECK1-NEXT: ret void 462 // 463 // 464 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St 465 // CHECK1-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 466 // CHECK1-NEXT: entry: 467 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 468 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 469 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 470 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 471 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 472 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 473 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 474 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0 475 // CHECK1-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4 476 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 477 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 478 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float 479 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]] 480 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4 481 // CHECK1-NEXT: ret void 482 // 483 // 484 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD2Ev 485 // CHECK1-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 486 // CHECK1-NEXT: entry: 487 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 488 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 489 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 490 // CHECK1-NEXT: ret void 491 // 492 // 493 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 494 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 495 // CHECK1-NEXT: entry: 496 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 497 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 498 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 499 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) 500 // CHECK1-NEXT: ret void 501 // 502 // 503 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 504 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 505 // CHECK1-NEXT: entry: 506 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 507 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 508 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 509 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 510 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 511 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 512 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], i32 [[TMP0]]) 513 // CHECK1-NEXT: ret void 514 // 515 // 516 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 517 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR7:[0-9]+]] { 518 // CHECK1-NEXT: entry: 519 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 520 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 521 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 522 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 523 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 524 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 525 // CHECK1-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 526 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8 527 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 528 // CHECK1-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 529 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 530 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 531 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 532 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 533 // CHECK1-NEXT: [[T_VAR3:%.*]] = alloca i32, align 4 534 // CHECK1-NEXT: [[VEC4:%.*]] = alloca [2 x i32], align 4 535 // CHECK1-NEXT: [[S_ARR5:%.*]] = alloca [2 x %struct.S.0], align 4 536 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 537 // CHECK1-NEXT: [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 538 // CHECK1-NEXT: [[AGG_TMP8:%.*]] = alloca [[STRUCT_ST]], align 4 539 // CHECK1-NEXT: [[_TMP9:%.*]] = alloca %struct.S.0*, align 8 540 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 541 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 542 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 543 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 544 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8 545 // CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 546 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8 547 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 548 // CHECK1-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8 549 // CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 550 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8 551 // CHECK1-NEXT: store %struct.S.0* [[TMP3]], %struct.S.0** [[TMP]], align 8 552 // CHECK1-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 553 // CHECK1-NEXT: store %struct.S.0* [[TMP4]], %struct.S.0** [[_TMP1]], align 8 554 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 555 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 556 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 557 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 558 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4 559 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[T_VAR3]], align 4 560 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8* 561 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8* 562 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 8, i1 false) 563 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0 564 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0* 565 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 566 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP9]] 567 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 568 // CHECK1: omp.arraycpy.body: 569 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 570 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 571 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) 572 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* [[AGG_TMP]]) 573 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]] 574 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 575 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 576 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]] 577 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 578 // CHECK1: omp.arraycpy.done6: 579 // CHECK1-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8 580 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP8]]) 581 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[VAR7]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP10]], %struct.St* [[AGG_TMP8]]) 582 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP8]]) #[[ATTR2]] 583 // CHECK1-NEXT: store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP9]], align 8 584 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 585 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 586 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 587 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 588 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 1 589 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 590 // CHECK1: cond.true: 591 // CHECK1-NEXT: br label [[COND_END:%.*]] 592 // CHECK1: cond.false: 593 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 594 // CHECK1-NEXT: br label [[COND_END]] 595 // CHECK1: cond.end: 596 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] 597 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 598 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 599 // CHECK1-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4 600 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 601 // CHECK1: omp.inner.for.cond: 602 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 603 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 604 // CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 605 // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 606 // CHECK1: omp.inner.for.cond.cleanup: 607 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 608 // CHECK1: omp.inner.for.body: 609 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 610 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 611 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 612 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4 613 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[T_VAR3]], align 4 614 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4 615 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64 616 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC4]], i64 0, i64 [[IDXPROM]] 617 // CHECK1-NEXT: store i32 [[TMP19]], i32* [[ARRAYIDX]], align 4 618 // CHECK1-NEXT: [[TMP21:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP9]], align 8 619 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4 620 // CHECK1-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP22]] to i64 621 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i64 0, i64 [[IDXPROM11]] 622 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX12]] to i8* 623 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[TMP21]] to i8* 624 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false) 625 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 626 // CHECK1: omp.body.continue: 627 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 628 // CHECK1: omp.inner.for.inc: 629 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 630 // CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP25]], 1 631 // CHECK1-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4 632 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 633 // CHECK1: omp.inner.for.end: 634 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 635 // CHECK1: omp.loop.exit: 636 // CHECK1-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 637 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 638 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 639 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[VAR7]]) #[[ATTR2]] 640 // CHECK1-NEXT: [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0 641 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2 642 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 643 // CHECK1: arraydestroy.body: 644 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP28]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 645 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 646 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 647 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]] 648 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]] 649 // CHECK1: arraydestroy.done15: 650 // CHECK1-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 651 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 652 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]]) 653 // CHECK1-NEXT: ret void 654 // 655 // 656 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St 657 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 658 // CHECK1-NEXT: entry: 659 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 660 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 661 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 662 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 663 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 664 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 665 // CHECK1-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* [[T]]) 666 // CHECK1-NEXT: ret void 667 // 668 // 669 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 670 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 671 // CHECK1-NEXT: entry: 672 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 673 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 674 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 675 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 676 // CHECK1-NEXT: ret void 677 // 678 // 679 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 680 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 681 // CHECK1-NEXT: entry: 682 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 683 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 684 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 685 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 686 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 687 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 688 // CHECK1-NEXT: ret void 689 // 690 // 691 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 692 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 693 // CHECK1-NEXT: entry: 694 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 695 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 696 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 697 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 698 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 699 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 700 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 701 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 702 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 703 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4 704 // CHECK1-NEXT: ret void 705 // 706 // 707 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St 708 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 709 // CHECK1-NEXT: entry: 710 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 711 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 712 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 713 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 714 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 715 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 716 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 717 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0 718 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4 719 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 720 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 721 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]] 722 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4 723 // CHECK1-NEXT: ret void 724 // 725 // 726 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 727 // CHECK1-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 728 // CHECK1-NEXT: entry: 729 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 730 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 731 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 732 // CHECK1-NEXT: ret void 733 // 734 // 735 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 736 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 737 // CHECK1-NEXT: entry: 738 // CHECK1-NEXT: call void @__cxx_global_var_init() 739 // CHECK1-NEXT: call void @__cxx_global_var_init.1() 740 // CHECK1-NEXT: call void @__cxx_global_var_init.2() 741 // CHECK1-NEXT: ret void 742 // 743 // 744 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init 745 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 746 // CHECK2-NEXT: entry: 747 // CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 748 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 749 // CHECK2-NEXT: ret void 750 // 751 // 752 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 753 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 754 // CHECK2-NEXT: entry: 755 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 756 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 757 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 758 // CHECK2-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 759 // CHECK2-NEXT: ret void 760 // 761 // 762 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 763 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 764 // CHECK2-NEXT: entry: 765 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 766 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 767 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 768 // CHECK2-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 769 // CHECK2-NEXT: ret void 770 // 771 // 772 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 773 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 774 // CHECK2-NEXT: entry: 775 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 776 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 777 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 778 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 779 // CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 780 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 781 // CHECK2-NEXT: store float [[CONV]], float* [[F]], align 4 782 // CHECK2-NEXT: ret void 783 // 784 // 785 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 786 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 787 // CHECK2-NEXT: entry: 788 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 789 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 790 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 791 // CHECK2-NEXT: ret void 792 // 793 // 794 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 795 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 796 // CHECK2-NEXT: entry: 797 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 798 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 799 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 800 // CHECK2-NEXT: ret void 801 // 802 // 803 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 804 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 805 // CHECK2-NEXT: entry: 806 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 807 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 808 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 809 // CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4 810 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 811 // CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 812 // CHECK2-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 813 // CHECK2-NEXT: ret void 814 // 815 // 816 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 817 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 818 // CHECK2-NEXT: entry: 819 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 820 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 821 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 822 // CHECK2: arraydestroy.body: 823 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 824 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 825 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 826 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 827 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 828 // CHECK2: arraydestroy.done1: 829 // CHECK2-NEXT: ret void 830 // 831 // 832 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 833 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 834 // CHECK2-NEXT: entry: 835 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 836 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 837 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 838 // CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4 839 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 840 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 841 // CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 842 // CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 843 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 844 // CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 845 // CHECK2-NEXT: store float [[ADD]], float* [[F]], align 4 846 // CHECK2-NEXT: ret void 847 // 848 // 849 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 850 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 851 // CHECK2-NEXT: entry: 852 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 853 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 854 // CHECK2-NEXT: ret void 855 // 856 // 857 // CHECK2-LABEL: define {{[^@]+}}@main 858 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] { 859 // CHECK2-NEXT: entry: 860 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 861 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 862 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 863 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 864 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 865 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 866 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 867 // CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 868 // CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 869 // CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4 870 // CHECK2-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 871 // CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4 872 // CHECK2-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4 873 // CHECK2-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 874 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 875 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 876 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 877 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 878 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 879 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 880 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 881 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 882 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4 883 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 884 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false) 885 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 886 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2 887 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]] 888 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 889 // CHECK2: omp.arraycpy.body: 890 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 891 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 892 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) 893 // CHECK2-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* [[AGG_TMP]]) 894 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]] 895 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 896 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 897 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]] 898 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]] 899 // CHECK2: omp.arraycpy.done1: 900 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP2]]) 901 // CHECK2-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[VAR]], %struct.S* nonnull align 4 dereferenceable(4) @var, %struct.St* [[AGG_TMP2]]) 902 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP2]]) #[[ATTR2]] 903 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 904 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4 905 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 906 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 907 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 908 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 909 // CHECK2: cond.true: 910 // CHECK2-NEXT: br label [[COND_END:%.*]] 911 // CHECK2: cond.false: 912 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 913 // CHECK2-NEXT: br label [[COND_END]] 914 // CHECK2: cond.end: 915 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 916 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 917 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 918 // CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 919 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 920 // CHECK2: omp.inner.for.cond: 921 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 922 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 923 // CHECK2-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 924 // CHECK2-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 925 // CHECK2: omp.inner.for.cond.cleanup: 926 // CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 927 // CHECK2: omp.inner.for.body: 928 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 929 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 930 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 931 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4 932 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[T_VAR]], align 4 933 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 934 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 935 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] 936 // CHECK2-NEXT: store i32 [[TMP11]], i32* [[ARRAYIDX]], align 4 937 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4 938 // CHECK2-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64 939 // CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]] 940 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8* 941 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[VAR]] to i8* 942 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP14]], i8* align 4 [[TMP15]], i64 4, i1 false) 943 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4 944 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[SIVAR]], align 4 945 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP16]] 946 // CHECK2-NEXT: store i32 [[ADD6]], i32* [[SIVAR]], align 4 947 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 948 // CHECK2: omp.body.continue: 949 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 950 // CHECK2: omp.inner.for.inc: 951 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 952 // CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1 953 // CHECK2-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 954 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 955 // CHECK2: omp.inner.for.end: 956 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 957 // CHECK2: omp.loop.exit: 958 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 959 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[VAR]]) #[[ATTR2]] 960 // CHECK2-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 961 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN8]], i64 2 962 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 963 // CHECK2: arraydestroy.body: 964 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP19]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 965 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 966 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 967 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN8]] 968 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE9:%.*]], label [[ARRAYDESTROY_BODY]] 969 // CHECK2: arraydestroy.done9: 970 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP0]]) 971 // CHECK2-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v() 972 // CHECK2-NEXT: ret i32 [[CALL]] 973 // 974 // 975 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StC1Ev 976 // CHECK2-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 977 // CHECK2-NEXT: entry: 978 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 979 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 980 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 981 // CHECK2-NEXT: call void @_ZN2StC2Ev(%struct.St* nonnull dereferenceable(8) [[THIS1]]) 982 // CHECK2-NEXT: ret void 983 // 984 // 985 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St 986 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 987 // CHECK2-NEXT: entry: 988 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 989 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 990 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 991 // CHECK2-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 992 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 993 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 994 // CHECK2-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* nonnull dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* [[T]]) 995 // CHECK2-NEXT: ret void 996 // 997 // 998 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StD1Ev 999 // CHECK2-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1000 // CHECK2-NEXT: entry: 1001 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 1002 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 1003 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 1004 // CHECK2-NEXT: call void @_ZN2StD2Ev(%struct.St* nonnull dereferenceable(8) [[THIS1]]) #[[ATTR2]] 1005 // CHECK2-NEXT: ret void 1006 // 1007 // 1008 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1009 // CHECK2-SAME: () #[[ATTR6:[0-9]+]] { 1010 // CHECK2-NEXT: entry: 1011 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1012 // CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 1013 // CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1014 // CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1015 // CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 1016 // CHECK2-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8 1017 // CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) 1018 // CHECK2-NEXT: store i32 0, i32* [[T_VAR]], align 4 1019 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1020 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 1021 // CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 1022 // CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1) 1023 // CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 1024 // CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2) 1025 // CHECK2-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8 1026 // CHECK2-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8 1027 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[TMP1]]) 1028 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 1029 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 1030 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 1031 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1032 // CHECK2: arraydestroy.body: 1033 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1034 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1035 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1036 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 1037 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1038 // CHECK2: arraydestroy.done1: 1039 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) #[[ATTR2]] 1040 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[RETVAL]], align 4 1041 // CHECK2-NEXT: ret i32 [[TMP3]] 1042 // 1043 // 1044 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StC2Ev 1045 // CHECK2-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1046 // CHECK2-NEXT: entry: 1047 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 1048 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 1049 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 1050 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0 1051 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 1052 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1 1053 // CHECK2-NEXT: store i32 0, i32* [[B]], align 4 1054 // CHECK2-NEXT: ret void 1055 // 1056 // 1057 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St 1058 // CHECK2-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1059 // CHECK2-NEXT: entry: 1060 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1061 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 1062 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1063 // CHECK2-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 1064 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1065 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1066 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 1067 // CHECK2-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0 1068 // CHECK2-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4 1069 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 1070 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1071 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float 1072 // CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]] 1073 // CHECK2-NEXT: store float [[ADD]], float* [[F]], align 4 1074 // CHECK2-NEXT: ret void 1075 // 1076 // 1077 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StD2Ev 1078 // CHECK2-SAME: (%struct.St* nonnull dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1079 // CHECK2-NEXT: entry: 1080 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 1081 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 1082 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 1083 // CHECK2-NEXT: ret void 1084 // 1085 // 1086 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 1087 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1088 // CHECK2-NEXT: entry: 1089 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1090 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1091 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1092 // CHECK2-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) 1093 // CHECK2-NEXT: ret void 1094 // 1095 // 1096 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 1097 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1098 // CHECK2-NEXT: entry: 1099 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1100 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1101 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1102 // CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1103 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1104 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 1105 // CHECK2-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], i32 [[TMP0]]) 1106 // CHECK2-NEXT: ret void 1107 // 1108 // 1109 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1110 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR7:[0-9]+]] { 1111 // CHECK2-NEXT: entry: 1112 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1113 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1114 // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 1115 // CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 1116 // CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 1117 // CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 1118 // CHECK2-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 1119 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8 1120 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1121 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 1122 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1123 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1124 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1125 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1126 // CHECK2-NEXT: [[T_VAR3:%.*]] = alloca i32, align 4 1127 // CHECK2-NEXT: [[VEC4:%.*]] = alloca [2 x i32], align 4 1128 // CHECK2-NEXT: [[S_ARR5:%.*]] = alloca [2 x %struct.S.0], align 4 1129 // CHECK2-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 1130 // CHECK2-NEXT: [[VAR7:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 1131 // CHECK2-NEXT: [[AGG_TMP8:%.*]] = alloca [[STRUCT_ST]], align 4 1132 // CHECK2-NEXT: [[_TMP9:%.*]] = alloca %struct.S.0*, align 8 1133 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1134 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1135 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1136 // CHECK2-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 1137 // CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8 1138 // CHECK2-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 1139 // CHECK2-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8 1140 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 1141 // CHECK2-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8 1142 // CHECK2-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 1143 // CHECK2-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8 1144 // CHECK2-NEXT: store %struct.S.0* [[TMP3]], %struct.S.0** [[TMP]], align 8 1145 // CHECK2-NEXT: [[TMP4:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 1146 // CHECK2-NEXT: store %struct.S.0* [[TMP4]], %struct.S.0** [[_TMP1]], align 8 1147 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1148 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1149 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1150 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1151 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4 1152 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[T_VAR3]], align 4 1153 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[VEC4]] to i8* 1154 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8* 1155 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 8, i1 false) 1156 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0 1157 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0* 1158 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 1159 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP9]] 1160 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1161 // CHECK2: omp.arraycpy.body: 1162 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1163 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1164 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) 1165 // CHECK2-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* [[AGG_TMP]]) 1166 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP]]) #[[ATTR2]] 1167 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1168 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1169 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP9]] 1170 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 1171 // CHECK2: omp.arraycpy.done6: 1172 // CHECK2-NEXT: [[TMP10:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8 1173 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP8]]) 1174 // CHECK2-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[VAR7]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP10]], %struct.St* [[AGG_TMP8]]) 1175 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* nonnull dereferenceable(8) [[AGG_TMP8]]) #[[ATTR2]] 1176 // CHECK2-NEXT: store %struct.S.0* [[VAR7]], %struct.S.0** [[_TMP9]], align 8 1177 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1178 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1179 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP12]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1180 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1181 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 1 1182 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1183 // CHECK2: cond.true: 1184 // CHECK2-NEXT: br label [[COND_END:%.*]] 1185 // CHECK2: cond.false: 1186 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1187 // CHECK2-NEXT: br label [[COND_END]] 1188 // CHECK2: cond.end: 1189 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] 1190 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1191 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1192 // CHECK2-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_IV]], align 4 1193 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1194 // CHECK2: omp.inner.for.cond: 1195 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1196 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1197 // CHECK2-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 1198 // CHECK2-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 1199 // CHECK2: omp.inner.for.cond.cleanup: 1200 // CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 1201 // CHECK2: omp.inner.for.body: 1202 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1203 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 1204 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1205 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4 1206 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[T_VAR3]], align 4 1207 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4 1208 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64 1209 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC4]], i64 0, i64 [[IDXPROM]] 1210 // CHECK2-NEXT: store i32 [[TMP19]], i32* [[ARRAYIDX]], align 4 1211 // CHECK2-NEXT: [[TMP21:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP9]], align 8 1212 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[I]], align 4 1213 // CHECK2-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP22]] to i64 1214 // CHECK2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i64 0, i64 [[IDXPROM11]] 1215 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast %struct.S.0* [[ARRAYIDX12]] to i8* 1216 // CHECK2-NEXT: [[TMP24:%.*]] = bitcast %struct.S.0* [[TMP21]] to i8* 1217 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 4, i1 false) 1218 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1219 // CHECK2: omp.body.continue: 1220 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1221 // CHECK2: omp.inner.for.inc: 1222 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1223 // CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP25]], 1 1224 // CHECK2-NEXT: store i32 [[ADD13]], i32* [[DOTOMP_IV]], align 4 1225 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1226 // CHECK2: omp.inner.for.end: 1227 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1228 // CHECK2: omp.loop.exit: 1229 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1230 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 1231 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 1232 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[VAR7]]) #[[ATTR2]] 1233 // CHECK2-NEXT: [[ARRAY_BEGIN14:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR5]], i32 0, i32 0 1234 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN14]], i64 2 1235 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1236 // CHECK2: arraydestroy.body: 1237 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP28]], [[OMP_LOOP_EXIT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1238 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1239 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1240 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN14]] 1241 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY]] 1242 // CHECK2: arraydestroy.done15: 1243 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1244 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1245 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]]) 1246 // CHECK2-NEXT: ret void 1247 // 1248 // 1249 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St 1250 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1251 // CHECK2-NEXT: entry: 1252 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1253 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 1254 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1255 // CHECK2-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 1256 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1257 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 1258 // CHECK2-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], %struct.S.0* nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* [[T]]) 1259 // CHECK2-NEXT: ret void 1260 // 1261 // 1262 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 1263 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1264 // CHECK2-NEXT: entry: 1265 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1266 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1267 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1268 // CHECK2-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 1269 // CHECK2-NEXT: ret void 1270 // 1271 // 1272 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 1273 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1274 // CHECK2-NEXT: entry: 1275 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1276 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1277 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1278 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1279 // CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1280 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 1281 // CHECK2-NEXT: ret void 1282 // 1283 // 1284 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 1285 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1286 // CHECK2-NEXT: entry: 1287 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1288 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1289 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1290 // CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1291 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1292 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1293 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 1294 // CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1295 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 1296 // CHECK2-NEXT: store i32 [[ADD]], i32* [[F]], align 4 1297 // CHECK2-NEXT: ret void 1298 // 1299 // 1300 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St 1301 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1302 // CHECK2-NEXT: entry: 1303 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1304 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 1305 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1306 // CHECK2-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 1307 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1308 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1309 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 1310 // CHECK2-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0 1311 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4 1312 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 1313 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1314 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]] 1315 // CHECK2-NEXT: store i32 [[ADD]], i32* [[F]], align 4 1316 // CHECK2-NEXT: ret void 1317 // 1318 // 1319 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 1320 // CHECK2-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1321 // CHECK2-NEXT: entry: 1322 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1323 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1324 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1325 // CHECK2-NEXT: ret void 1326 // 1327 // 1328 // CHECK2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 1329 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1330 // CHECK2-NEXT: entry: 1331 // CHECK2-NEXT: call void @__cxx_global_var_init() 1332 // CHECK2-NEXT: call void @__cxx_global_var_init.1() 1333 // CHECK2-NEXT: call void @__cxx_global_var_init.2() 1334 // CHECK2-NEXT: ret void 1335 // 1336 // 1337 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init 1338 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1339 // CHECK3-NEXT: entry: 1340 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 1341 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 1342 // CHECK3-NEXT: ret void 1343 // 1344 // 1345 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 1346 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 1347 // CHECK3-NEXT: entry: 1348 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1349 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1350 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1351 // CHECK3-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 1352 // CHECK3-NEXT: ret void 1353 // 1354 // 1355 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 1356 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1357 // CHECK3-NEXT: entry: 1358 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1359 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1360 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1361 // CHECK3-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 1362 // CHECK3-NEXT: ret void 1363 // 1364 // 1365 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 1366 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1367 // CHECK3-NEXT: entry: 1368 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1369 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1370 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1371 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1372 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1373 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 1374 // CHECK3-NEXT: store float [[CONV]], float* [[F]], align 4 1375 // CHECK3-NEXT: ret void 1376 // 1377 // 1378 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 1379 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1380 // CHECK3-NEXT: entry: 1381 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1382 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1383 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1384 // CHECK3-NEXT: ret void 1385 // 1386 // 1387 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 1388 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1389 // CHECK3-NEXT: entry: 1390 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 1391 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 1392 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 1393 // CHECK3-NEXT: ret void 1394 // 1395 // 1396 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 1397 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1398 // CHECK3-NEXT: entry: 1399 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1400 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1401 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1402 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1403 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1404 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1405 // CHECK3-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 1406 // CHECK3-NEXT: ret void 1407 // 1408 // 1409 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 1410 // CHECK3-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1411 // CHECK3-NEXT: entry: 1412 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1413 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1414 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1415 // CHECK3: arraydestroy.body: 1416 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1417 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1418 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1419 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 1420 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1421 // CHECK3: arraydestroy.done1: 1422 // CHECK3-NEXT: ret void 1423 // 1424 // 1425 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 1426 // CHECK3-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1427 // CHECK3-NEXT: entry: 1428 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1429 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1430 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1431 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1432 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1433 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1434 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1435 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1436 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 1437 // CHECK3-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 1438 // CHECK3-NEXT: store float [[ADD]], float* [[F]], align 4 1439 // CHECK3-NEXT: ret void 1440 // 1441 // 1442 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 1443 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1444 // CHECK3-NEXT: entry: 1445 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 1446 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 1447 // CHECK3-NEXT: ret void 1448 // 1449 // 1450 // CHECK3-LABEL: define {{[^@]+}}@main 1451 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] { 1452 // CHECK3-NEXT: entry: 1453 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1454 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1455 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4 1456 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0 1457 // CHECK3-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8 1458 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull dereferenceable(8) [[REF_TMP]]) 1459 // CHECK3-NEXT: ret i32 0 1460 // 1461 // 1462 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 1463 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { 1464 // CHECK3-NEXT: entry: 1465 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1466 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1467 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1468 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1469 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1470 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 1471 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1472 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1473 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1474 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1475 // CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4 1476 // CHECK3-NEXT: [[G1:%.*]] = alloca i32, align 4 1477 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8 1478 // CHECK3-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4 1479 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 1480 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1481 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1482 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1483 // CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1484 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1485 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8 1486 // CHECK3-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1487 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1488 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1489 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1490 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1491 // CHECK3-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4 1492 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[G]], align 4 1493 // CHECK3-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4 1494 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4 1495 // CHECK3-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8 1496 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4 1497 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4 1498 // CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1499 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 1500 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1501 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1502 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1 1503 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1504 // CHECK3: cond.true: 1505 // CHECK3-NEXT: br label [[COND_END:%.*]] 1506 // CHECK3: cond.false: 1507 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1508 // CHECK3-NEXT: br label [[COND_END]] 1509 // CHECK3: cond.end: 1510 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 1511 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1512 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1513 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4 1514 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1515 // CHECK3: omp.inner.for.cond: 1516 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1517 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1518 // CHECK3-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] 1519 // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1520 // CHECK3: omp.inner.for.body: 1521 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1522 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 1523 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1524 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4 1525 // CHECK3-NEXT: store i32 1, i32* [[G]], align 4 1526 // CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8 1527 // CHECK3-NEXT: store volatile i32 2, i32* [[TMP13]], align 4 1528 // CHECK3-NEXT: store i32 3, i32* [[SIVAR3]], align 4 1529 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 1530 // CHECK3-NEXT: store i32* [[G]], i32** [[TMP14]], align 8 1531 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 1532 // CHECK3-NEXT: [[TMP16:%.*]] = load i32*, i32** [[_TMP2]], align 8 1533 // CHECK3-NEXT: store i32* [[TMP16]], i32** [[TMP15]], align 8 1534 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2 1535 // CHECK3-NEXT: store i32* [[SIVAR3]], i32** [[TMP17]], align 8 1536 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull dereferenceable(24) [[REF_TMP]]) 1537 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1538 // CHECK3: omp.body.continue: 1539 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1540 // CHECK3: omp.inner.for.inc: 1541 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1542 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], 1 1543 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4 1544 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 1545 // CHECK3: omp.inner.for.end: 1546 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1547 // CHECK3: omp.loop.exit: 1548 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 1549 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]]) 1550 // CHECK3-NEXT: ret void 1551 // 1552 // 1553 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 1554 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1555 // CHECK3-NEXT: entry: 1556 // CHECK3-NEXT: call void @__cxx_global_var_init() 1557 // CHECK3-NEXT: call void @__cxx_global_var_init.1() 1558 // CHECK3-NEXT: call void @__cxx_global_var_init.2() 1559 // CHECK3-NEXT: ret void 1560 // 1561 // 1562 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init 1563 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1564 // CHECK4-NEXT: entry: 1565 // CHECK4-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 1566 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 1567 // CHECK4-NEXT: ret void 1568 // 1569 // 1570 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 1571 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 1572 // CHECK4-NEXT: entry: 1573 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1574 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1575 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1576 // CHECK4-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 1577 // CHECK4-NEXT: ret void 1578 // 1579 // 1580 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 1581 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1582 // CHECK4-NEXT: entry: 1583 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1584 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1585 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1586 // CHECK4-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 1587 // CHECK4-NEXT: ret void 1588 // 1589 // 1590 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 1591 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1592 // CHECK4-NEXT: entry: 1593 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1594 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1595 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1596 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1597 // CHECK4-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1598 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 1599 // CHECK4-NEXT: store float [[CONV]], float* [[F]], align 4 1600 // CHECK4-NEXT: ret void 1601 // 1602 // 1603 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 1604 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1605 // CHECK4-NEXT: entry: 1606 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1607 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1608 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1609 // CHECK4-NEXT: ret void 1610 // 1611 // 1612 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 1613 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1614 // CHECK4-NEXT: entry: 1615 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 1616 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 1617 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 1618 // CHECK4-NEXT: ret void 1619 // 1620 // 1621 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 1622 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1623 // CHECK4-NEXT: entry: 1624 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1625 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1626 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1627 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1628 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1629 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1630 // CHECK4-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 1631 // CHECK4-NEXT: ret void 1632 // 1633 // 1634 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 1635 // CHECK4-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1636 // CHECK4-NEXT: entry: 1637 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1638 // CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1639 // CHECK4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1640 // CHECK4: arraydestroy.body: 1641 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1642 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1643 // CHECK4-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1644 // CHECK4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 1645 // CHECK4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1646 // CHECK4: arraydestroy.done1: 1647 // CHECK4-NEXT: ret void 1648 // 1649 // 1650 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 1651 // CHECK4-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1652 // CHECK4-NEXT: entry: 1653 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1654 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1655 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1656 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1657 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1658 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1659 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1660 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1661 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 1662 // CHECK4-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 1663 // CHECK4-NEXT: store float [[ADD]], float* [[F]], align 4 1664 // CHECK4-NEXT: ret void 1665 // 1666 // 1667 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 1668 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1669 // CHECK4-NEXT: entry: 1670 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 1671 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 1672 // CHECK4-NEXT: ret void 1673 // 1674 // 1675 // CHECK4-LABEL: define {{[^@]+}}@main 1676 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] { 1677 // CHECK4-NEXT: entry: 1678 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1679 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8 1680 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4 1681 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0 1682 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 1683 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1 1684 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 1685 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2 1686 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 1687 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3 1688 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8 1689 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4 1690 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.3 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 1691 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5 1692 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1693 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 8 1694 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()* 1695 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP1]] to %struct.__block_literal_generic* 1696 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 1697 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 1698 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[TMP2]], align 8 1699 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to void (i8*)* 1700 // CHECK4-NEXT: call void [[TMP5]](i8* [[TMP3]]) 1701 // CHECK4-NEXT: ret i32 0 1702 // 1703 // 1704 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke 1705 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 1706 // CHECK4-NEXT: entry: 1707 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 1708 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8 1709 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 1710 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* 1711 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8 1712 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5sivar) 1713 // CHECK4-NEXT: ret void 1714 // 1715 // 1716 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 1717 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR4:[0-9]+]] { 1718 // CHECK4-NEXT: entry: 1719 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1720 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1721 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1722 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32*, align 8 1723 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1724 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 1725 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1726 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1727 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1728 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1729 // CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4 1730 // CHECK4-NEXT: [[G1:%.*]] = alloca i32, align 4 1731 // CHECK4-NEXT: [[_TMP2:%.*]] = alloca i32*, align 8 1732 // CHECK4-NEXT: [[SIVAR3:%.*]] = alloca i32, align 4 1733 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 1734 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, align 8 1735 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1736 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1737 // CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1738 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1739 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** @g1, align 8 1740 // CHECK4-NEXT: store i32* [[TMP1]], i32** [[TMP]], align 8 1741 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1742 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1743 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1744 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1745 // CHECK4-NEXT: [[TMP2:%.*]] = load volatile i32, i32* @g, align 4 1746 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[G]], align 4 1747 // CHECK4-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4 1748 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[G1]], align 4 1749 // CHECK4-NEXT: store i32* [[G1]], i32** [[_TMP2]], align 8 1750 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4 1751 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[SIVAR3]], align 4 1752 // CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1753 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 1754 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1755 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1756 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 1 1757 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1758 // CHECK4: cond.true: 1759 // CHECK4-NEXT: br label [[COND_END:%.*]] 1760 // CHECK4: cond.false: 1761 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1762 // CHECK4-NEXT: br label [[COND_END]] 1763 // CHECK4: cond.end: 1764 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] 1765 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1766 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1767 // CHECK4-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV]], align 4 1768 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1769 // CHECK4: omp.inner.for.cond: 1770 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1771 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1772 // CHECK4-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] 1773 // CHECK4-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1774 // CHECK4: omp.inner.for.body: 1775 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1776 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 1777 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1778 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4 1779 // CHECK4-NEXT: store i32 1, i32* [[G]], align 4 1780 // CHECK4-NEXT: [[TMP13:%.*]] = load i32*, i32** [[_TMP2]], align 8 1781 // CHECK4-NEXT: store volatile i32 1, i32* [[TMP13]], align 4 1782 // CHECK4-NEXT: store i32 2, i32* [[SIVAR3]], align 4 1783 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 0 1784 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 1785 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 1 1786 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 1787 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 2 1788 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 1789 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 3 1790 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @var_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8 1791 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 4 1792 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 1793 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6 1794 // CHECK4-NEXT: [[TMP14:%.*]] = load volatile i32, i32* [[G]], align 4 1795 // CHECK4-NEXT: store volatile i32 [[TMP14]], i32* [[BLOCK_CAPTURED]], align 8 1796 // CHECK4-NEXT: [[BLOCK_CAPTURED5:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5 1797 // CHECK4-NEXT: [[TMP15:%.*]] = load i32*, i32** [[_TMP2]], align 8 1798 // CHECK4-NEXT: store i32* [[TMP15]], i32** [[BLOCK_CAPTURED5]], align 8 1799 // CHECK4-NEXT: [[BLOCK_CAPTURED6:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7 1800 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR3]], align 4 1801 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[BLOCK_CAPTURED6]], align 4 1802 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]] to void ()* 1803 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP17]] to %struct.__block_literal_generic* 1804 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 1805 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 1806 // CHECK4-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP18]], align 8 1807 // CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to void (i8*)* 1808 // CHECK4-NEXT: call void [[TMP21]](i8* [[TMP19]]) 1809 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1810 // CHECK4: omp.body.continue: 1811 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1812 // CHECK4: omp.inner.for.inc: 1813 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1814 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1 1815 // CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 1816 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 1817 // CHECK4: omp.inner.for.end: 1818 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1819 // CHECK4: omp.loop.exit: 1820 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 1821 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP6]]) 1822 // CHECK4-NEXT: ret void 1823 // 1824 // 1825 // CHECK4-LABEL: define {{[^@]+}}@var_block_invoke 1826 // CHECK4-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 1827 // CHECK4-NEXT: entry: 1828 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 1829 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>*, align 8 1830 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 1831 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* 1832 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>** [[BLOCK_ADDR]], align 8 1833 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6 1834 // CHECK4-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8 1835 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5 1836 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR1]], align 8 1837 // CHECK4-NEXT: store i32 2, i32* [[TMP0]], align 4 1838 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7 1839 // CHECK4-NEXT: store i32 4, i32* [[BLOCK_CAPTURE_ADDR2]], align 4 1840 // CHECK4-NEXT: ret void 1841 // 1842 // 1843 // CHECK4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 1844 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1845 // CHECK4-NEXT: entry: 1846 // CHECK4-NEXT: call void @__cxx_global_var_init() 1847 // CHECK4-NEXT: call void @__cxx_global_var_init.1() 1848 // CHECK4-NEXT: call void @__cxx_global_var_init.2() 1849 // CHECK4-NEXT: ret void 1850 // 1851 // 1852 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_var_init 1853 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1854 // CHECK5-NEXT: entry: 1855 // CHECK5-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 1856 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 1857 // CHECK5-NEXT: ret void 1858 // 1859 // 1860 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 1861 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 1862 // CHECK5-NEXT: entry: 1863 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1864 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1865 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1866 // CHECK5-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 1867 // CHECK5-NEXT: ret void 1868 // 1869 // 1870 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 1871 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1872 // CHECK5-NEXT: entry: 1873 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1874 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1875 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1876 // CHECK5-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 1877 // CHECK5-NEXT: ret void 1878 // 1879 // 1880 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 1881 // CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1882 // CHECK5-NEXT: entry: 1883 // CHECK5-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 1884 // CHECK5-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 1885 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 1886 // CHECK5-NEXT: ret void 1887 // 1888 // 1889 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 1890 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1891 // CHECK5-NEXT: entry: 1892 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1893 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1894 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1895 // CHECK5-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1896 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1897 // CHECK5-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1898 // CHECK5-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 1899 // CHECK5-NEXT: ret void 1900 // 1901 // 1902 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 1903 // CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1904 // CHECK5-NEXT: entry: 1905 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1906 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1907 // CHECK5-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1908 // CHECK5: arraydestroy.body: 1909 // CHECK5-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1910 // CHECK5-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1911 // CHECK5-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1912 // CHECK5-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 1913 // CHECK5-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1914 // CHECK5: arraydestroy.done1: 1915 // CHECK5-NEXT: ret void 1916 // 1917 // 1918 // CHECK5-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 1919 // CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1920 // CHECK5-NEXT: entry: 1921 // CHECK5-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 1922 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 1923 // CHECK5-NEXT: ret void 1924 // 1925 // 1926 // CHECK5-LABEL: define {{[^@]+}}@main 1927 // CHECK5-SAME: () #[[ATTR3:[0-9]+]] { 1928 // CHECK5-NEXT: entry: 1929 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1930 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 1931 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4 1932 // CHECK5-NEXT: store i32 0, i32* [[I]], align 4 1933 // CHECK5-NEXT: br label [[FOR_COND:%.*]] 1934 // CHECK5: for.cond: 1935 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 1936 // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 2 1937 // CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1938 // CHECK5: for.body: 1939 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 1940 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 1941 // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64 1942 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* @vec, i64 0, i64 [[IDXPROM]] 1943 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX]], align 4 1944 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[I]], align 4 1945 // CHECK5-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP3]] to i64 1946 // CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 [[IDXPROM1]] 1947 // CHECK5-NEXT: [[TMP4:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8* 1948 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP4]], i8* align 4 bitcast (%struct.S* @var to i8*), i64 4, i1 false) 1949 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4 1950 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1951 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 1952 // CHECK5-NEXT: store i32 [[ADD]], i32* @_ZZ4mainE5sivar, align 4 1953 // CHECK5-NEXT: br label [[FOR_INC:%.*]] 1954 // CHECK5: for.inc: 1955 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4 1956 // CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 1957 // CHECK5-NEXT: store i32 [[INC]], i32* [[I]], align 4 1958 // CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] 1959 // CHECK5: for.end: 1960 // CHECK5-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v() 1961 // CHECK5-NEXT: ret i32 [[CALL]] 1962 // 1963 // 1964 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1965 // CHECK5-SAME: () #[[ATTR5:[0-9]+]] { 1966 // CHECK5-NEXT: entry: 1967 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1968 // CHECK5-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 1969 // CHECK5-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1970 // CHECK5-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1971 // CHECK5-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 1972 // CHECK5-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8 1973 // CHECK5-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 1974 // CHECK5-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8 1975 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 1976 // CHECK5-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) 1977 // CHECK5-NEXT: store i32 0, i32* [[T_VAR]], align 4 1978 // CHECK5-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1979 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 1980 // CHECK5-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 1981 // CHECK5-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1) 1982 // CHECK5-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 1983 // CHECK5-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2) 1984 // CHECK5-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8 1985 // CHECK5-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8 1986 // CHECK5-NEXT: store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8 1987 // CHECK5-NEXT: [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 1988 // CHECK5-NEXT: store %struct.S.0* [[TMP2]], %struct.S.0** [[_TMP1]], align 8 1989 // CHECK5-NEXT: store i32 0, i32* [[I]], align 4 1990 // CHECK5-NEXT: br label [[FOR_COND:%.*]] 1991 // CHECK5: for.cond: 1992 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[I]], align 4 1993 // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 2 1994 // CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1995 // CHECK5: for.body: 1996 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR]], align 4 1997 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4 1998 // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 1999 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] 2000 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX]], align 4 2001 // CHECK5-NEXT: [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8 2002 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4 2003 // CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP7]] to i64 2004 // CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM2]] 2005 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast %struct.S.0* [[ARRAYIDX3]] to i8* 2006 // CHECK5-NEXT: [[TMP9:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8* 2007 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP8]], i8* align 4 [[TMP9]], i64 4, i1 false) 2008 // CHECK5-NEXT: br label [[FOR_INC:%.*]] 2009 // CHECK5: for.inc: 2010 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 2011 // CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 2012 // CHECK5-NEXT: store i32 [[INC]], i32* [[I]], align 4 2013 // CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 2014 // CHECK5: for.end: 2015 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4 2016 // CHECK5-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 2017 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 2018 // CHECK5-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 2019 // CHECK5: arraydestroy.body: 2020 // CHECK5-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP11]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 2021 // CHECK5-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 2022 // CHECK5-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 2023 // CHECK5-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 2024 // CHECK5-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]] 2025 // CHECK5: arraydestroy.done4: 2026 // CHECK5-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) #[[ATTR2]] 2027 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[RETVAL]], align 4 2028 // CHECK5-NEXT: ret i32 [[TMP12]] 2029 // 2030 // 2031 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 2032 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2033 // CHECK5-NEXT: entry: 2034 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2035 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2036 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2037 // CHECK5-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2038 // CHECK5-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2039 // CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 2040 // CHECK5-NEXT: store float [[CONV]], float* [[F]], align 4 2041 // CHECK5-NEXT: ret void 2042 // 2043 // 2044 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 2045 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2046 // CHECK5-NEXT: entry: 2047 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2048 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2049 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2050 // CHECK5-NEXT: ret void 2051 // 2052 // 2053 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 2054 // CHECK5-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2055 // CHECK5-NEXT: entry: 2056 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2057 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2058 // CHECK5-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2059 // CHECK5-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2060 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2061 // CHECK5-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2062 // CHECK5-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2063 // CHECK5-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2064 // CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 2065 // CHECK5-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 2066 // CHECK5-NEXT: store float [[ADD]], float* [[F]], align 4 2067 // CHECK5-NEXT: ret void 2068 // 2069 // 2070 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 2071 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2072 // CHECK5-NEXT: entry: 2073 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2074 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2075 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2076 // CHECK5-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) 2077 // CHECK5-NEXT: ret void 2078 // 2079 // 2080 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 2081 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2082 // CHECK5-NEXT: entry: 2083 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2084 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2085 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2086 // CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2087 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2088 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 2089 // CHECK5-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], i32 [[TMP0]]) 2090 // CHECK5-NEXT: ret void 2091 // 2092 // 2093 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 2094 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2095 // CHECK5-NEXT: entry: 2096 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2097 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2098 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2099 // CHECK5-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 2100 // CHECK5-NEXT: ret void 2101 // 2102 // 2103 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 2104 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2105 // CHECK5-NEXT: entry: 2106 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2107 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2108 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2109 // CHECK5-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 2110 // CHECK5-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2111 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 2112 // CHECK5-NEXT: ret void 2113 // 2114 // 2115 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 2116 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2117 // CHECK5-NEXT: entry: 2118 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2119 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2120 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2121 // CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2122 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2123 // CHECK5-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 2124 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 2125 // CHECK5-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2126 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 2127 // CHECK5-NEXT: store i32 [[ADD]], i32* [[F]], align 4 2128 // CHECK5-NEXT: ret void 2129 // 2130 // 2131 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 2132 // CHECK5-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2133 // CHECK5-NEXT: entry: 2134 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2135 // CHECK5-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2136 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2137 // CHECK5-NEXT: ret void 2138 // 2139 // 2140 // CHECK5-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 2141 // CHECK5-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2142 // CHECK5-NEXT: entry: 2143 // CHECK5-NEXT: call void @__cxx_global_var_init() 2144 // CHECK5-NEXT: call void @__cxx_global_var_init.1() 2145 // CHECK5-NEXT: call void @__cxx_global_var_init.2() 2146 // CHECK5-NEXT: ret void 2147 // 2148 // 2149 // CHECK6-LABEL: define {{[^@]+}}@__cxx_global_var_init 2150 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2151 // CHECK6-NEXT: entry: 2152 // CHECK6-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 2153 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 2154 // CHECK6-NEXT: ret void 2155 // 2156 // 2157 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 2158 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 2159 // CHECK6-NEXT: entry: 2160 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2161 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2162 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2163 // CHECK6-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 2164 // CHECK6-NEXT: ret void 2165 // 2166 // 2167 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 2168 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2169 // CHECK6-NEXT: entry: 2170 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2171 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2172 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2173 // CHECK6-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 2174 // CHECK6-NEXT: ret void 2175 // 2176 // 2177 // CHECK6-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 2178 // CHECK6-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2179 // CHECK6-NEXT: entry: 2180 // CHECK6-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 2181 // CHECK6-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 2182 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 2183 // CHECK6-NEXT: ret void 2184 // 2185 // 2186 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 2187 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2188 // CHECK6-NEXT: entry: 2189 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2190 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2191 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2192 // CHECK6-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2193 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2194 // CHECK6-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2195 // CHECK6-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 2196 // CHECK6-NEXT: ret void 2197 // 2198 // 2199 // CHECK6-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 2200 // CHECK6-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2201 // CHECK6-NEXT: entry: 2202 // CHECK6-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2203 // CHECK6-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2204 // CHECK6-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 2205 // CHECK6: arraydestroy.body: 2206 // CHECK6-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 2207 // CHECK6-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 2208 // CHECK6-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 2209 // CHECK6-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 2210 // CHECK6-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 2211 // CHECK6: arraydestroy.done1: 2212 // CHECK6-NEXT: ret void 2213 // 2214 // 2215 // CHECK6-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 2216 // CHECK6-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2217 // CHECK6-NEXT: entry: 2218 // CHECK6-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 2219 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 2220 // CHECK6-NEXT: ret void 2221 // 2222 // 2223 // CHECK6-LABEL: define {{[^@]+}}@main 2224 // CHECK6-SAME: () #[[ATTR3:[0-9]+]] { 2225 // CHECK6-NEXT: entry: 2226 // CHECK6-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2227 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 2228 // CHECK6-NEXT: store i32 0, i32* [[RETVAL]], align 4 2229 // CHECK6-NEXT: store i32 0, i32* [[I]], align 4 2230 // CHECK6-NEXT: br label [[FOR_COND:%.*]] 2231 // CHECK6: for.cond: 2232 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 2233 // CHECK6-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 2 2234 // CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 2235 // CHECK6: for.body: 2236 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 2237 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 2238 // CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64 2239 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* @vec, i64 0, i64 [[IDXPROM]] 2240 // CHECK6-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX]], align 4 2241 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[I]], align 4 2242 // CHECK6-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP3]] to i64 2243 // CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 [[IDXPROM1]] 2244 // CHECK6-NEXT: [[TMP4:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8* 2245 // CHECK6-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP4]], i8* align 4 bitcast (%struct.S* @var to i8*), i64 4, i1 false) 2246 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4 2247 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 2248 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 2249 // CHECK6-NEXT: store i32 [[ADD]], i32* @_ZZ4mainE5sivar, align 4 2250 // CHECK6-NEXT: br label [[FOR_INC:%.*]] 2251 // CHECK6: for.inc: 2252 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4 2253 // CHECK6-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 2254 // CHECK6-NEXT: store i32 [[INC]], i32* [[I]], align 4 2255 // CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] 2256 // CHECK6: for.end: 2257 // CHECK6-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIiET_v() 2258 // CHECK6-NEXT: ret i32 [[CALL]] 2259 // 2260 // 2261 // CHECK6-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 2262 // CHECK6-SAME: () #[[ATTR5:[0-9]+]] { 2263 // CHECK6-NEXT: entry: 2264 // CHECK6-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2265 // CHECK6-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 2266 // CHECK6-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 2267 // CHECK6-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 2268 // CHECK6-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 2269 // CHECK6-NEXT: [[VAR:%.*]] = alloca %struct.S.0*, align 8 2270 // CHECK6-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 2271 // CHECK6-NEXT: [[_TMP1:%.*]] = alloca %struct.S.0*, align 8 2272 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 2273 // CHECK6-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) 2274 // CHECK6-NEXT: store i32 0, i32* [[T_VAR]], align 4 2275 // CHECK6-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 2276 // CHECK6-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 2277 // CHECK6-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 2278 // CHECK6-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 1) 2279 // CHECK6-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 2280 // CHECK6-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* nonnull dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 2) 2281 // CHECK6-NEXT: store %struct.S.0* [[TEST]], %struct.S.0** [[VAR]], align 8 2282 // CHECK6-NEXT: [[TMP1:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR]], align 8 2283 // CHECK6-NEXT: store %struct.S.0* [[TMP1]], %struct.S.0** [[TMP]], align 8 2284 // CHECK6-NEXT: [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 2285 // CHECK6-NEXT: store %struct.S.0* [[TMP2]], %struct.S.0** [[_TMP1]], align 8 2286 // CHECK6-NEXT: store i32 0, i32* [[I]], align 4 2287 // CHECK6-NEXT: br label [[FOR_COND:%.*]] 2288 // CHECK6: for.cond: 2289 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[I]], align 4 2290 // CHECK6-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 2 2291 // CHECK6-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 2292 // CHECK6: for.body: 2293 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR]], align 4 2294 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4 2295 // CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 2296 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] 2297 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX]], align 4 2298 // CHECK6-NEXT: [[TMP6:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP1]], align 8 2299 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[I]], align 4 2300 // CHECK6-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP7]] to i64 2301 // CHECK6-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM2]] 2302 // CHECK6-NEXT: [[TMP8:%.*]] = bitcast %struct.S.0* [[ARRAYIDX3]] to i8* 2303 // CHECK6-NEXT: [[TMP9:%.*]] = bitcast %struct.S.0* [[TMP6]] to i8* 2304 // CHECK6-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP8]], i8* align 4 [[TMP9]], i64 4, i1 false) 2305 // CHECK6-NEXT: br label [[FOR_INC:%.*]] 2306 // CHECK6: for.inc: 2307 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 2308 // CHECK6-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 2309 // CHECK6-NEXT: store i32 [[INC]], i32* [[I]], align 4 2310 // CHECK6-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 2311 // CHECK6: for.end: 2312 // CHECK6-NEXT: store i32 0, i32* [[RETVAL]], align 4 2313 // CHECK6-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 2314 // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 2315 // CHECK6-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 2316 // CHECK6: arraydestroy.body: 2317 // CHECK6-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP11]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 2318 // CHECK6-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 2319 // CHECK6-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 2320 // CHECK6-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 2321 // CHECK6-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]] 2322 // CHECK6: arraydestroy.done4: 2323 // CHECK6-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* nonnull dereferenceable(4) [[TEST]]) #[[ATTR2]] 2324 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[RETVAL]], align 4 2325 // CHECK6-NEXT: ret i32 [[TMP12]] 2326 // 2327 // 2328 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 2329 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2330 // CHECK6-NEXT: entry: 2331 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2332 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2333 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2334 // CHECK6-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2335 // CHECK6-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2336 // CHECK6-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 2337 // CHECK6-NEXT: store float [[CONV]], float* [[F]], align 4 2338 // CHECK6-NEXT: ret void 2339 // 2340 // 2341 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 2342 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2343 // CHECK6-NEXT: entry: 2344 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2345 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2346 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2347 // CHECK6-NEXT: ret void 2348 // 2349 // 2350 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 2351 // CHECK6-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2352 // CHECK6-NEXT: entry: 2353 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2354 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2355 // CHECK6-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2356 // CHECK6-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2357 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2358 // CHECK6-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2359 // CHECK6-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2360 // CHECK6-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2361 // CHECK6-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 2362 // CHECK6-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 2363 // CHECK6-NEXT: store float [[ADD]], float* [[F]], align 4 2364 // CHECK6-NEXT: ret void 2365 // 2366 // 2367 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 2368 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2369 // CHECK6-NEXT: entry: 2370 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2371 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2372 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2373 // CHECK6-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) 2374 // CHECK6-NEXT: ret void 2375 // 2376 // 2377 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 2378 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2379 // CHECK6-NEXT: entry: 2380 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2381 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2382 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2383 // CHECK6-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2384 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2385 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 2386 // CHECK6-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* nonnull dereferenceable(4) [[THIS1]], i32 [[TMP0]]) 2387 // CHECK6-NEXT: ret void 2388 // 2389 // 2390 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 2391 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2392 // CHECK6-NEXT: entry: 2393 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2394 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2395 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2396 // CHECK6-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 2397 // CHECK6-NEXT: ret void 2398 // 2399 // 2400 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 2401 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2402 // CHECK6-NEXT: entry: 2403 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2404 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2405 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2406 // CHECK6-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 2407 // CHECK6-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2408 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 2409 // CHECK6-NEXT: ret void 2410 // 2411 // 2412 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 2413 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]], i32 [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2414 // CHECK6-NEXT: entry: 2415 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2416 // CHECK6-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 2417 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2418 // CHECK6-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 2419 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2420 // CHECK6-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 2421 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 2422 // CHECK6-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2423 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 2424 // CHECK6-NEXT: store i32 [[ADD]], i32* [[F]], align 4 2425 // CHECK6-NEXT: ret void 2426 // 2427 // 2428 // CHECK6-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 2429 // CHECK6-SAME: (%struct.S.0* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2430 // CHECK6-NEXT: entry: 2431 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 2432 // CHECK6-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 2433 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 2434 // CHECK6-NEXT: ret void 2435 // 2436 // 2437 // CHECK6-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 2438 // CHECK6-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2439 // CHECK6-NEXT: entry: 2440 // CHECK6-NEXT: call void @__cxx_global_var_init() 2441 // CHECK6-NEXT: call void @__cxx_global_var_init.1() 2442 // CHECK6-NEXT: call void @__cxx_global_var_init.2() 2443 // CHECK6-NEXT: ret void 2444 // 2445 // 2446 // CHECK7-LABEL: define {{[^@]+}}@__cxx_global_var_init 2447 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2448 // CHECK7-NEXT: entry: 2449 // CHECK7-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 2450 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 2451 // CHECK7-NEXT: ret void 2452 // 2453 // 2454 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 2455 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 2456 // CHECK7-NEXT: entry: 2457 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2458 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2459 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2460 // CHECK7-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 2461 // CHECK7-NEXT: ret void 2462 // 2463 // 2464 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 2465 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2466 // CHECK7-NEXT: entry: 2467 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2468 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2469 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2470 // CHECK7-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 2471 // CHECK7-NEXT: ret void 2472 // 2473 // 2474 // CHECK7-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 2475 // CHECK7-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2476 // CHECK7-NEXT: entry: 2477 // CHECK7-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 2478 // CHECK7-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 2479 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 2480 // CHECK7-NEXT: ret void 2481 // 2482 // 2483 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 2484 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2485 // CHECK7-NEXT: entry: 2486 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2487 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2488 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2489 // CHECK7-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2490 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2491 // CHECK7-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2492 // CHECK7-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 2493 // CHECK7-NEXT: ret void 2494 // 2495 // 2496 // CHECK7-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 2497 // CHECK7-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2498 // CHECK7-NEXT: entry: 2499 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2500 // CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2501 // CHECK7-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 2502 // CHECK7: arraydestroy.body: 2503 // CHECK7-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 2504 // CHECK7-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 2505 // CHECK7-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 2506 // CHECK7-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 2507 // CHECK7-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 2508 // CHECK7: arraydestroy.done1: 2509 // CHECK7-NEXT: ret void 2510 // 2511 // 2512 // CHECK7-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 2513 // CHECK7-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2514 // CHECK7-NEXT: entry: 2515 // CHECK7-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 2516 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 2517 // CHECK7-NEXT: ret void 2518 // 2519 // 2520 // CHECK7-LABEL: define {{[^@]+}}@main 2521 // CHECK7-SAME: () #[[ATTR3:[0-9]+]] { 2522 // CHECK7-NEXT: entry: 2523 // CHECK7-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2524 // CHECK7-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 2525 // CHECK7-NEXT: store i32 0, i32* [[RETVAL]], align 4 2526 // CHECK7-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0 2527 // CHECK7-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8 2528 // CHECK7-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* nonnull dereferenceable(8) [[REF_TMP]]) 2529 // CHECK7-NEXT: ret i32 0 2530 // 2531 // 2532 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 2533 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2534 // CHECK7-NEXT: entry: 2535 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2536 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2537 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2538 // CHECK7-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2539 // CHECK7-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2540 // CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 2541 // CHECK7-NEXT: store float [[CONV]], float* [[F]], align 4 2542 // CHECK7-NEXT: ret void 2543 // 2544 // 2545 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 2546 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2547 // CHECK7-NEXT: entry: 2548 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2549 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2550 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2551 // CHECK7-NEXT: ret void 2552 // 2553 // 2554 // CHECK7-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 2555 // CHECK7-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2556 // CHECK7-NEXT: entry: 2557 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2558 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2559 // CHECK7-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2560 // CHECK7-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2561 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2562 // CHECK7-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2563 // CHECK7-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2564 // CHECK7-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2565 // CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 2566 // CHECK7-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 2567 // CHECK7-NEXT: store float [[ADD]], float* [[F]], align 4 2568 // CHECK7-NEXT: ret void 2569 // 2570 // 2571 // CHECK7-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 2572 // CHECK7-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2573 // CHECK7-NEXT: entry: 2574 // CHECK7-NEXT: call void @__cxx_global_var_init() 2575 // CHECK7-NEXT: call void @__cxx_global_var_init.1() 2576 // CHECK7-NEXT: call void @__cxx_global_var_init.2() 2577 // CHECK7-NEXT: ret void 2578 // 2579 // 2580 // CHECK8-LABEL: define {{[^@]+}}@__cxx_global_var_init 2581 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2582 // CHECK8-NEXT: entry: 2583 // CHECK8-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* nonnull dereferenceable(4) @test) 2584 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 2585 // CHECK8-NEXT: ret void 2586 // 2587 // 2588 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 2589 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 2590 // CHECK8-NEXT: entry: 2591 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2592 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2593 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2594 // CHECK8-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) 2595 // CHECK8-NEXT: ret void 2596 // 2597 // 2598 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 2599 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2600 // CHECK8-NEXT: entry: 2601 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2602 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2603 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2604 // CHECK8-NEXT: call void @_ZN1SIfED2Ev(%struct.S* nonnull dereferenceable(4) [[THIS1]]) #[[ATTR2]] 2605 // CHECK8-NEXT: ret void 2606 // 2607 // 2608 // CHECK8-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 2609 // CHECK8-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2610 // CHECK8-NEXT: entry: 2611 // CHECK8-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float 1.000000e+00) 2612 // CHECK8-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float 2.000000e+00) 2613 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 2614 // CHECK8-NEXT: ret void 2615 // 2616 // 2617 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 2618 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2619 // CHECK8-NEXT: entry: 2620 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2621 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2622 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2623 // CHECK8-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2624 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2625 // CHECK8-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2626 // CHECK8-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* nonnull dereferenceable(4) [[THIS1]], float [[TMP0]]) 2627 // CHECK8-NEXT: ret void 2628 // 2629 // 2630 // CHECK8-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 2631 // CHECK8-SAME: (i8* [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2632 // CHECK8-NEXT: entry: 2633 // CHECK8-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2634 // CHECK8-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2635 // CHECK8-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 2636 // CHECK8: arraydestroy.body: 2637 // CHECK8-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 2638 // CHECK8-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 2639 // CHECK8-NEXT: call void @_ZN1SIfED1Ev(%struct.S* nonnull dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 2640 // CHECK8-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 2641 // CHECK8-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 2642 // CHECK8: arraydestroy.done1: 2643 // CHECK8-NEXT: ret void 2644 // 2645 // 2646 // CHECK8-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 2647 // CHECK8-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2648 // CHECK8-NEXT: entry: 2649 // CHECK8-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* nonnull dereferenceable(4) @var, float 3.000000e+00) 2650 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 2651 // CHECK8-NEXT: ret void 2652 // 2653 // 2654 // CHECK8-LABEL: define {{[^@]+}}@main 2655 // CHECK8-SAME: () #[[ATTR3:[0-9]+]] { 2656 // CHECK8-NEXT: entry: 2657 // CHECK8-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2658 // CHECK8-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8 2659 // CHECK8-NEXT: store i32 0, i32* [[RETVAL]], align 4 2660 // CHECK8-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0 2661 // CHECK8-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 2662 // CHECK8-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1 2663 // CHECK8-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 2664 // CHECK8-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2 2665 // CHECK8-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 2666 // CHECK8-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3 2667 // CHECK8-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8 2668 // CHECK8-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4 2669 // CHECK8-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.3 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 2670 // CHECK8-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5 2671 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 2672 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 8 2673 // CHECK8-NEXT: [[TMP1:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()* 2674 // CHECK8-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP1]] to %struct.__block_literal_generic* 2675 // CHECK8-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 2676 // CHECK8-NEXT: [[TMP3:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 2677 // CHECK8-NEXT: [[TMP4:%.*]] = load i8*, i8** [[TMP2]], align 8 2678 // CHECK8-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to void (i8*)* 2679 // CHECK8-NEXT: call void [[TMP5]](i8* [[TMP3]]) 2680 // CHECK8-NEXT: ret i32 0 2681 // 2682 // 2683 // CHECK8-LABEL: define {{[^@]+}}@__main_block_invoke 2684 // CHECK8-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 2685 // CHECK8-NEXT: entry: 2686 // CHECK8-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 2687 // CHECK8-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8 2688 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32*, align 8 2689 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 2690 // CHECK8-NEXT: [[BLOCK1:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, align 8 2691 // CHECK8-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 2692 // CHECK8-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* 2693 // CHECK8-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8 2694 // CHECK8-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5 2695 // CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** @g1, align 8 2696 // CHECK8-NEXT: store i32* [[TMP0]], i32** [[TMP]], align 8 2697 // CHECK8-NEXT: store i32 0, i32* [[I]], align 4 2698 // CHECK8-NEXT: br label [[FOR_COND:%.*]] 2699 // CHECK8: for.cond: 2700 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4 2701 // CHECK8-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2 2702 // CHECK8-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 2703 // CHECK8: for.body: 2704 // CHECK8-NEXT: store i32 1, i32* @g, align 4 2705 // CHECK8-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP]], align 8 2706 // CHECK8-NEXT: store volatile i32 1, i32* [[TMP2]], align 4 2707 // CHECK8-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8 2708 // CHECK8-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 0 2709 // CHECK8-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 2710 // CHECK8-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 1 2711 // CHECK8-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 2712 // CHECK8-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 2 2713 // CHECK8-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 2714 // CHECK8-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 3 2715 // CHECK8-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke_2 to i8*), i8** [[BLOCK_INVOKE]], align 8 2716 // CHECK8-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 4 2717 // CHECK8-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 2718 // CHECK8-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 6 2719 // CHECK8-NEXT: [[TMP3:%.*]] = load volatile i32, i32* @g, align 4 2720 // CHECK8-NEXT: store volatile i32 [[TMP3]], i32* [[BLOCK_CAPTURED]], align 8 2721 // CHECK8-NEXT: [[BLOCK_CAPTURED2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 5 2722 // CHECK8-NEXT: [[TMP4:%.*]] = load i32*, i32** [[TMP]], align 8 2723 // CHECK8-NEXT: store i32* [[TMP4]], i32** [[BLOCK_CAPTURED2]], align 8 2724 // CHECK8-NEXT: [[BLOCK_CAPTURED3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]], i32 0, i32 7 2725 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[BLOCK_CAPTURE_ADDR]], align 8 2726 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[BLOCK_CAPTURED3]], align 4 2727 // CHECK8-NEXT: [[TMP6:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK1]] to void ()* 2728 // CHECK8-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP6]] to %struct.__block_literal_generic* 2729 // CHECK8-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 2730 // CHECK8-NEXT: [[TMP8:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 2731 // CHECK8-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP7]], align 8 2732 // CHECK8-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to void (i8*)* 2733 // CHECK8-NEXT: call void [[TMP10]](i8* [[TMP8]]) 2734 // CHECK8-NEXT: br label [[FOR_INC:%.*]] 2735 // CHECK8: for.inc: 2736 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4 2737 // CHECK8-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 2738 // CHECK8-NEXT: store i32 [[INC]], i32* [[I]], align 4 2739 // CHECK8-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] 2740 // CHECK8: for.end: 2741 // CHECK8-NEXT: ret void 2742 // 2743 // 2744 // CHECK8-LABEL: define {{[^@]+}}@__main_block_invoke_2 2745 // CHECK8-SAME: (i8* [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 2746 // CHECK8-NEXT: entry: 2747 // CHECK8-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 2748 // CHECK8-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>*, align 8 2749 // CHECK8-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 2750 // CHECK8-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* 2751 // CHECK8-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>** [[BLOCK_ADDR]], align 8 2752 // CHECK8-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 6 2753 // CHECK8-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8 2754 // CHECK8-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 5 2755 // CHECK8-NEXT: [[TMP0:%.*]] = load i32*, i32** [[BLOCK_CAPTURE_ADDR1]], align 8 2756 // CHECK8-NEXT: store i32 2, i32* [[TMP0]], align 4 2757 // CHECK8-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32*, i32, i32 }>* [[BLOCK]], i32 0, i32 7 2758 // CHECK8-NEXT: store i32 4, i32* [[BLOCK_CAPTURE_ADDR2]], align 4 2759 // CHECK8-NEXT: ret void 2760 // 2761 // 2762 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 2763 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2764 // CHECK8-NEXT: entry: 2765 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2766 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2767 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2768 // CHECK8-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2769 // CHECK8-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 2770 // CHECK8-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 2771 // CHECK8-NEXT: store float [[CONV]], float* [[F]], align 4 2772 // CHECK8-NEXT: ret void 2773 // 2774 // 2775 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 2776 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2777 // CHECK8-NEXT: entry: 2778 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2779 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2780 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2781 // CHECK8-NEXT: ret void 2782 // 2783 // 2784 // CHECK8-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 2785 // CHECK8-SAME: (%struct.S* nonnull dereferenceable(4) [[THIS:%.*]], float [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 2786 // CHECK8-NEXT: entry: 2787 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 2788 // CHECK8-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 2789 // CHECK8-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 2790 // CHECK8-NEXT: store float [[A]], float* [[A_ADDR]], align 4 2791 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 2792 // CHECK8-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 2793 // CHECK8-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 2794 // CHECK8-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 2795 // CHECK8-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 2796 // CHECK8-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 2797 // CHECK8-NEXT: store float [[ADD]], float* [[F]], align 4 2798 // CHECK8-NEXT: ret void 2799 // 2800 // 2801 // CHECK8-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_for_firstprivate_codegen.cpp 2802 // CHECK8-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 2803 // CHECK8-NEXT: entry: 2804 // CHECK8-NEXT: call void @__cxx_global_var_init() 2805 // CHECK8-NEXT: call void @__cxx_global_var_init.1() 2806 // CHECK8-NEXT: call void @__cxx_global_var_init.2() 2807 // CHECK8-NEXT: ret void 2808 // 2809