1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
3 #ifndef HEADER
4 #define HEADER
5 
6 #ifdef CK1
7 ///==========================================================================///
8 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
9 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
10 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
11 
12 // RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
13 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
14 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
15 
16 
17 void foo() { extern void mayThrow(); mayThrow(); }
18 
19 void parallel_master() {
20 #pragma omp parallel master
21   foo();
22 }
23 
24 
25 
26 #endif
27 
28 #ifdef CK2
29 ///==========================================================================///
30 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
31 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
32 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
33 
34 // RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK7
35 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
36 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8
37 
38 
39 void parallel_master_private() {
40   int a;
41 #pragma omp parallel master private(a)
42   a++;
43 }
44 
45 
46 
47 #endif
48 
49 #ifdef CK3
50 ///==========================================================================///
51 // RUN: %clang_cc1 -DCK3 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9
52 // RUN: %clang_cc1 -DCK3 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
53 // RUN: %clang_cc1 -DCK3 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
54 
55 // RUN: %clang_cc1 -DCK3 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
56 // RUN: %clang_cc1 -DCK3 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
57 // RUN: %clang_cc1 -DCK3 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
58 
59 
60 void parallel_master_private() {
61   int a;
62 #pragma omp parallel master default(shared)
63   a++;
64 }
65 
66 
67 
68 #endif
69 
70 #ifdef CK31
71 ///==========================================================================///
72 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13
73 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
74 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
75 
76 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK15
77 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
78 // RUN: %clang_cc1 -DCK31 -fopenmp-version=51 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16
79 
80 
81 void parallel_master_default_firstprivate() {
82   int a;
83 #pragma omp parallel master default(firstprivate)
84   a++;
85 }
86 
87 
88 
89 
90 
91 #endif
92 
93 #ifdef CK32
94 ///==========================================================================///
95 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK17
96 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
97 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK18
98 
99 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK19
100 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
101 // RUN: %clang_cc1 -DCK32 -fopenmp-version=51 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK20
102 
103 
104 struct St {
105   int a, b;
106   static int y;
107   St() : a(0), b(0) {}
108   ~St() {}
109 };
110 int St::y = 0;
111 
112 void parallel_master_default_firstprivate() {
113   St a = St();
114   static int y = 0;
115 #pragma omp parallel master default(firstprivate)
116   {
117     a.a += 1;
118     a.b += 1;
119     y++;
120     a.y++;
121   }
122 }
123 
124 
125 
126 
127 
128 
129 
130 
131 #endif
132 
133 #ifdef CK4
134 ///==========================================================================///
135 // RUN: %clang_cc1 -DCK4 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK21
136 // RUN: %clang_cc1 -DCK4 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
137 // RUN: %clang_cc1 -DCK4 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK22
138 
139 // RUN: %clang_cc1 -DCK4 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK23
140 // RUN: %clang_cc1 -DCK4 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
141 // RUN: %clang_cc1 -DCK4 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK24
142 
143 
144 void parallel_master_firstprivate() {
145   int a;
146 #pragma omp parallel master firstprivate(a)
147   a++;
148 }
149 
150 
151 
152 #endif
153 
154 #ifdef CK5
155 ///==========================================================================///
156 // RUN: %clang_cc1 -DCK5 -verify -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK25
157 // RUN: %clang_cc1 -DCK5 -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
158 // RUN: %clang_cc1 -DCK5 -fopenmp -fopenmp -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK26
159 
160 // RUN: %clang_cc1 -DCK5 -verify -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK27
161 // RUN: %clang_cc1 -DCK5 -fopenmp-simd -fnoopenmp-use-tls -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
162 // RUN: %clang_cc1 -DCK5 -fopenmp-simd -fnoopenmp-use-tls -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK28
163 
164 // RUN: %clang_cc1 -DCK5 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK29
165 // RUN: %clang_cc1 -DCK5 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
166 // RUN: %clang_cc1 -DCK5 -fopenmp -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK30
167 
168 // RUN: %clang_cc1 -DCK5 -verify -fopenmp-simd -x c++ -triple x86_64-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK31
169 // RUN: %clang_cc1 -DCK5 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
170 // RUN: %clang_cc1 -DCK5 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK32
171 
172 
173 int a;
174 #pragma omp threadprivate(a)
175 
176 void parallel_master_copyin() {
177 #pragma omp parallel master copyin(a)
178   a++;
179 }
180 
181 
182 
183 
184 
185 
186 // TLC-CHECK-DAG:   [[INC:%.+]] = add nsw i32 [[TEN]], 1
187 // TLC-CHECK-DAG:   store i32 [[INC]], i32* [[TEN]]
188 
189 #endif
190 #ifdef CK6
191 ///==========================================================================///
192 // RUN: %clang_cc1 -DCK6 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK33
193 // RUN: %clang_cc1 -DCK6 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
194 // RUN: %clang_cc1 -DCK6 -fopenmp -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK34
195 
196 // RUN: %clang_cc1 -DCK6 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK35
197 // RUN: %clang_cc1 -DCK6 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s
198 // RUN: %clang_cc1 -DCK6 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK36
199 
200 
201 void parallel_master_reduction() {
202   int g;
203 #pragma omp parallel master reduction(+:g)
204   g = 1;
205 }
206 
207 
208 
209 
210 
211 // switch
212 
213 // case 1:
214 
215 // case 2:
216 
217 #endif
218 #ifdef CK7
219 ///==========================================================================///
220 // RUN: %clang_cc1 -DCK7 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK37
221 // RUN: %clang_cc1 -DCK7 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
222 // RUN: %clang_cc1 -DCK7 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK38
223 
224 // RUN: %clang_cc1 -DCK7 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK39
225 // RUN: %clang_cc1 -DCK7 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
226 // RUN: %clang_cc1 -DCK7 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK40
227 
228 
229 void parallel_master_if() {
230 #pragma omp parallel master if (parallel: false)
231   parallel_master_if();
232 }
233 
234 
235 
236 #endif
237 #ifdef CK8
238 ///==========================================================================///
239 // RUN: %clang_cc1 -DCK8 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK41
240 // RUN: %clang_cc1 -DCK8 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
241 // RUN: %clang_cc1 -DCK8 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK42
242 
243 // RUN: %clang_cc1 -DCK8 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK43
244 // RUN: %clang_cc1 -DCK8 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
245 // RUN: %clang_cc1 -DCK8 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK44
246 
247 typedef __INTPTR_TYPE__ intptr_t;
248 
249 
250 void foo();
251 
252 struct S {
253   intptr_t a, b, c;
254   S(intptr_t a) : a(a) {}
255   operator char() { return a; }
256   ~S() {}
257 };
258 
259 template <typename T>
260 T tmain() {
261 #pragma omp parallel master proc_bind(master)
262   foo();
263   return T();
264 }
265 
266 int main() {
267 #pragma omp parallel master proc_bind(spread)
268   foo();
269 #pragma omp parallel master proc_bind(close)
270   foo();
271   return tmain<int>();
272 }
273 
274 
275 
276 
277 #endif
278 #ifdef CK9
279 ///==========================================================================///
280 // RUN: %clang_cc1 -DCK9 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK45
281 // RUN: %clang_cc1 -DCK9 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
282 // RUN: %clang_cc1 -DCK9 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK46
283 
284 // RUN: %clang_cc1 -DCK9 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK47
285 // RUN: %clang_cc1 -DCK9 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
286 // RUN: %clang_cc1 -DCK9 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK48
287 typedef void **omp_allocator_handle_t;
288 extern const omp_allocator_handle_t omp_null_allocator;
289 extern const omp_allocator_handle_t omp_default_mem_alloc;
290 extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
291 extern const omp_allocator_handle_t omp_const_mem_alloc;
292 extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
293 extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
294 extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
295 extern const omp_allocator_handle_t omp_pteam_mem_alloc;
296 extern const omp_allocator_handle_t omp_thread_mem_alloc;
297 
298 void parallel_master_allocate() {
299   int a;
300   omp_allocator_handle_t myalloc = nullptr;
301 #pragma omp parallel master firstprivate(a) allocate(myalloc:a)
302   a++;
303 }
304 
305 
306 #endif
307 #endif
308 // CHECK1-LABEL: define {{[^@]+}}@_Z3foov
309 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
310 // CHECK1-NEXT:  entry:
311 // CHECK1-NEXT:    call void @_Z8mayThrowv()
312 // CHECK1-NEXT:    ret void
313 //
314 //
315 // CHECK1-LABEL: define {{[^@]+}}@_Z15parallel_masterv
316 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
317 // CHECK1-NEXT:  entry:
318 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
319 // CHECK1-NEXT:    ret void
320 //
321 //
322 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
323 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
324 // CHECK1-NEXT:  entry:
325 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
326 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
327 // CHECK1-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
328 // CHECK1-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
329 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
330 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
331 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
332 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
333 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
334 // CHECK1-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
335 // CHECK1-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
336 // CHECK1:       omp_if.then:
337 // CHECK1-NEXT:    invoke void @_Z3foov()
338 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
339 // CHECK1:       invoke.cont:
340 // CHECK1-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
341 // CHECK1-NEXT:    br label [[OMP_IF_END]]
342 // CHECK1:       lpad:
343 // CHECK1-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
344 // CHECK1-NEXT:    catch i8* null
345 // CHECK1-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
346 // CHECK1-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
347 // CHECK1-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
348 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
349 // CHECK1-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
350 // CHECK1-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
351 // CHECK1:       omp_if.end:
352 // CHECK1-NEXT:    ret void
353 // CHECK1:       terminate.handler:
354 // CHECK1-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
355 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
356 // CHECK1-NEXT:    unreachable
357 //
358 //
359 // CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
360 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
361 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR4:[0-9]+]]
362 // CHECK1-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
363 // CHECK1-NEXT:    unreachable
364 //
365 //
366 // CHECK2-LABEL: define {{[^@]+}}@_Z3foov
367 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
368 // CHECK2-NEXT:  entry:
369 // CHECK2-NEXT:    call void @_Z8mayThrowv()
370 // CHECK2-NEXT:    ret void
371 //
372 //
373 // CHECK2-LABEL: define {{[^@]+}}@_Z15parallel_masterv
374 // CHECK2-SAME: () #[[ATTR2:[0-9]+]] {
375 // CHECK2-NEXT:  entry:
376 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
377 // CHECK2-NEXT:    ret void
378 //
379 //
380 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
381 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
382 // CHECK2-NEXT:  entry:
383 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
384 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
385 // CHECK2-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
386 // CHECK2-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
387 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
388 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
389 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
390 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
391 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
392 // CHECK2-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
393 // CHECK2-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
394 // CHECK2:       omp_if.then:
395 // CHECK2-NEXT:    invoke void @_Z3foov()
396 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
397 // CHECK2:       invoke.cont:
398 // CHECK2-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
399 // CHECK2-NEXT:    br label [[OMP_IF_END]]
400 // CHECK2:       lpad:
401 // CHECK2-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
402 // CHECK2-NEXT:    catch i8* null
403 // CHECK2-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
404 // CHECK2-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
405 // CHECK2-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
406 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
407 // CHECK2-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
408 // CHECK2-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
409 // CHECK2:       omp_if.end:
410 // CHECK2-NEXT:    ret void
411 // CHECK2:       terminate.handler:
412 // CHECK2-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
413 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
414 // CHECK2-NEXT:    unreachable
415 //
416 //
417 // CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
418 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
419 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR4:[0-9]+]]
420 // CHECK2-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
421 // CHECK2-NEXT:    unreachable
422 //
423 //
424 // CHECK3-LABEL: define {{[^@]+}}@_Z3foov
425 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
426 // CHECK3-NEXT:  entry:
427 // CHECK3-NEXT:    call void @_Z8mayThrowv()
428 // CHECK3-NEXT:    ret void
429 //
430 //
431 // CHECK3-LABEL: define {{[^@]+}}@_Z15parallel_masterv
432 // CHECK3-SAME: () #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
433 // CHECK3-NEXT:  entry:
434 // CHECK3-NEXT:    invoke void @_Z3foov()
435 // CHECK3-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
436 // CHECK3:       invoke.cont:
437 // CHECK3-NEXT:    ret void
438 // CHECK3:       terminate.lpad:
439 // CHECK3-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
440 // CHECK3-NEXT:    catch i8* null
441 // CHECK3-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
442 // CHECK3-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4:[0-9]+]]
443 // CHECK3-NEXT:    unreachable
444 //
445 //
446 // CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
447 // CHECK3-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
448 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
449 // CHECK3-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
450 // CHECK3-NEXT:    unreachable
451 //
452 //
453 // CHECK4-LABEL: define {{[^@]+}}@_Z3foov
454 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] {
455 // CHECK4-NEXT:  entry:
456 // CHECK4-NEXT:    call void @_Z8mayThrowv()
457 // CHECK4-NEXT:    ret void
458 //
459 //
460 // CHECK4-LABEL: define {{[^@]+}}@_Z15parallel_masterv
461 // CHECK4-SAME: () #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
462 // CHECK4-NEXT:  entry:
463 // CHECK4-NEXT:    invoke void @_Z3foov()
464 // CHECK4-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
465 // CHECK4:       invoke.cont:
466 // CHECK4-NEXT:    ret void
467 // CHECK4:       terminate.lpad:
468 // CHECK4-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
469 // CHECK4-NEXT:    catch i8* null
470 // CHECK4-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
471 // CHECK4-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4:[0-9]+]]
472 // CHECK4-NEXT:    unreachable
473 //
474 //
475 // CHECK4-LABEL: define {{[^@]+}}@__clang_call_terminate
476 // CHECK4-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
477 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
478 // CHECK4-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
479 // CHECK4-NEXT:    unreachable
480 //
481 //
482 // CHECK5-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
483 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
484 // CHECK5-NEXT:  entry:
485 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
486 // CHECK5-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
487 // CHECK5-NEXT:    ret void
488 //
489 //
490 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
491 // CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
492 // CHECK5-NEXT:  entry:
493 // CHECK5-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
494 // CHECK5-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
495 // CHECK5-NEXT:    [[A:%.*]] = alloca i32, align 4
496 // CHECK5-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
497 // CHECK5-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
498 // CHECK5-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
499 // CHECK5-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
500 // CHECK5-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
501 // CHECK5-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
502 // CHECK5-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
503 // CHECK5:       omp_if.then:
504 // CHECK5-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
505 // CHECK5-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
506 // CHECK5-NEXT:    store i32 [[INC]], i32* [[A]], align 4
507 // CHECK5-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
508 // CHECK5-NEXT:    br label [[OMP_IF_END]]
509 // CHECK5:       omp_if.end:
510 // CHECK5-NEXT:    ret void
511 //
512 //
513 // CHECK6-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
514 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
515 // CHECK6-NEXT:  entry:
516 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
517 // CHECK6-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
518 // CHECK6-NEXT:    ret void
519 //
520 //
521 // CHECK6-LABEL: define {{[^@]+}}@.omp_outlined.
522 // CHECK6-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
523 // CHECK6-NEXT:  entry:
524 // CHECK6-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
525 // CHECK6-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
526 // CHECK6-NEXT:    [[A:%.*]] = alloca i32, align 4
527 // CHECK6-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
528 // CHECK6-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
529 // CHECK6-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
530 // CHECK6-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
531 // CHECK6-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
532 // CHECK6-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
533 // CHECK6-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
534 // CHECK6:       omp_if.then:
535 // CHECK6-NEXT:    [[TMP4:%.*]] = load i32, i32* [[A]], align 4
536 // CHECK6-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
537 // CHECK6-NEXT:    store i32 [[INC]], i32* [[A]], align 4
538 // CHECK6-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
539 // CHECK6-NEXT:    br label [[OMP_IF_END]]
540 // CHECK6:       omp_if.end:
541 // CHECK6-NEXT:    ret void
542 //
543 //
544 // CHECK7-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
545 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
546 // CHECK7-NEXT:  entry:
547 // CHECK7-NEXT:    [[A:%.*]] = alloca i32, align 4
548 // CHECK7-NEXT:    [[A1:%.*]] = alloca i32, align 4
549 // CHECK7-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A1]], align 4
550 // CHECK7-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
551 // CHECK7-NEXT:    store i32 [[INC]], i32* [[A1]], align 4
552 // CHECK7-NEXT:    ret void
553 //
554 //
555 // CHECK8-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
556 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] {
557 // CHECK8-NEXT:  entry:
558 // CHECK8-NEXT:    [[A:%.*]] = alloca i32, align 4
559 // CHECK8-NEXT:    [[A1:%.*]] = alloca i32, align 4
560 // CHECK8-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A1]], align 4
561 // CHECK8-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
562 // CHECK8-NEXT:    store i32 [[INC]], i32* [[A1]], align 4
563 // CHECK8-NEXT:    ret void
564 //
565 //
566 // CHECK9-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
567 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
568 // CHECK9-NEXT:  entry:
569 // CHECK9-NEXT:    [[A:%.*]] = alloca i32, align 4
570 // CHECK9-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
571 // CHECK9-NEXT:    ret void
572 //
573 //
574 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined.
575 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
576 // CHECK9-NEXT:  entry:
577 // CHECK9-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
578 // CHECK9-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
579 // CHECK9-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
580 // CHECK9-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
581 // CHECK9-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
582 // CHECK9-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
583 // CHECK9-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
584 // CHECK9-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
585 // CHECK9-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
586 // CHECK9-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
587 // CHECK9-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
588 // CHECK9-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
589 // CHECK9:       omp_if.then:
590 // CHECK9-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
591 // CHECK9-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
592 // CHECK9-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
593 // CHECK9-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
594 // CHECK9-NEXT:    br label [[OMP_IF_END]]
595 // CHECK9:       omp_if.end:
596 // CHECK9-NEXT:    ret void
597 //
598 //
599 // CHECK10-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
600 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] {
601 // CHECK10-NEXT:  entry:
602 // CHECK10-NEXT:    [[A:%.*]] = alloca i32, align 4
603 // CHECK10-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]])
604 // CHECK10-NEXT:    ret void
605 //
606 //
607 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined.
608 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
609 // CHECK10-NEXT:  entry:
610 // CHECK10-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
611 // CHECK10-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
612 // CHECK10-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
613 // CHECK10-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
614 // CHECK10-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
615 // CHECK10-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
616 // CHECK10-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
617 // CHECK10-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
618 // CHECK10-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
619 // CHECK10-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
620 // CHECK10-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
621 // CHECK10-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
622 // CHECK10:       omp_if.then:
623 // CHECK10-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP0]], align 4
624 // CHECK10-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
625 // CHECK10-NEXT:    store i32 [[INC]], i32* [[TMP0]], align 4
626 // CHECK10-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
627 // CHECK10-NEXT:    br label [[OMP_IF_END]]
628 // CHECK10:       omp_if.end:
629 // CHECK10-NEXT:    ret void
630 //
631 //
632 // CHECK11-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
633 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
634 // CHECK11-NEXT:  entry:
635 // CHECK11-NEXT:    [[A:%.*]] = alloca i32, align 4
636 // CHECK11-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
637 // CHECK11-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
638 // CHECK11-NEXT:    store i32 [[INC]], i32* [[A]], align 4
639 // CHECK11-NEXT:    ret void
640 //
641 //
642 // CHECK12-LABEL: define {{[^@]+}}@_Z23parallel_master_privatev
643 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] {
644 // CHECK12-NEXT:  entry:
645 // CHECK12-NEXT:    [[A:%.*]] = alloca i32, align 4
646 // CHECK12-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
647 // CHECK12-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
648 // CHECK12-NEXT:    store i32 [[INC]], i32* [[A]], align 4
649 // CHECK12-NEXT:    ret void
650 //
651 //
652 // CHECK13-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
653 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] {
654 // CHECK13-NEXT:  entry:
655 // CHECK13-NEXT:    [[A:%.*]] = alloca i32, align 4
656 // CHECK13-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
657 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
658 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
659 // CHECK13-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
660 // CHECK13-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
661 // CHECK13-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
662 // CHECK13-NEXT:    ret void
663 //
664 //
665 // CHECK13-LABEL: define {{[^@]+}}@.omp_outlined.
666 // CHECK13-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
667 // CHECK13-NEXT:  entry:
668 // CHECK13-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
669 // CHECK13-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
670 // CHECK13-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
671 // CHECK13-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
672 // CHECK13-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
673 // CHECK13-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
674 // CHECK13-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
675 // CHECK13-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
676 // CHECK13-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
677 // CHECK13-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
678 // CHECK13-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
679 // CHECK13-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
680 // CHECK13:       omp_if.then:
681 // CHECK13-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
682 // CHECK13-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
683 // CHECK13-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
684 // CHECK13-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
685 // CHECK13-NEXT:    br label [[OMP_IF_END]]
686 // CHECK13:       omp_if.end:
687 // CHECK13-NEXT:    ret void
688 //
689 //
690 // CHECK14-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
691 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] {
692 // CHECK14-NEXT:  entry:
693 // CHECK14-NEXT:    [[A:%.*]] = alloca i32, align 4
694 // CHECK14-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
695 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
696 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
697 // CHECK14-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
698 // CHECK14-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
699 // CHECK14-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
700 // CHECK14-NEXT:    ret void
701 //
702 //
703 // CHECK14-LABEL: define {{[^@]+}}@.omp_outlined.
704 // CHECK14-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
705 // CHECK14-NEXT:  entry:
706 // CHECK14-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
707 // CHECK14-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
708 // CHECK14-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
709 // CHECK14-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
710 // CHECK14-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
711 // CHECK14-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
712 // CHECK14-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
713 // CHECK14-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
714 // CHECK14-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
715 // CHECK14-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
716 // CHECK14-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
717 // CHECK14-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
718 // CHECK14:       omp_if.then:
719 // CHECK14-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
720 // CHECK14-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
721 // CHECK14-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
722 // CHECK14-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
723 // CHECK14-NEXT:    br label [[OMP_IF_END]]
724 // CHECK14:       omp_if.end:
725 // CHECK14-NEXT:    ret void
726 //
727 //
728 // CHECK15-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
729 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] {
730 // CHECK15-NEXT:  entry:
731 // CHECK15-NEXT:    [[A:%.*]] = alloca i32, align 4
732 // CHECK15-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
733 // CHECK15-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
734 // CHECK15-NEXT:    store i32 [[INC]], i32* [[A]], align 4
735 // CHECK15-NEXT:    ret void
736 //
737 //
738 // CHECK16-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
739 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] {
740 // CHECK16-NEXT:  entry:
741 // CHECK16-NEXT:    [[A:%.*]] = alloca i32, align 4
742 // CHECK16-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
743 // CHECK16-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
744 // CHECK16-NEXT:    store i32 [[INC]], i32* [[A]], align 4
745 // CHECK16-NEXT:    ret void
746 //
747 //
748 // CHECK17-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
749 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] {
750 // CHECK17-NEXT:  entry:
751 // CHECK17-NEXT:    [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
752 // CHECK17-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
753 // CHECK17-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
754 // CHECK17-NEXT:    [[TMP0:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
755 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
756 // CHECK17-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
757 // CHECK17-NEXT:    [[TMP1:%.*]] = load i64, i64* [[Y_CASTED]], align 8
758 // CHECK17-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.St*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.St* [[A]], i64 [[TMP1]])
759 // CHECK17-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
760 // CHECK17-NEXT:    ret void
761 //
762 //
763 // CHECK17-LABEL: define {{[^@]+}}@_ZN2StC1Ev
764 // CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
765 // CHECK17-NEXT:  entry:
766 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
767 // CHECK17-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
768 // CHECK17-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
769 // CHECK17-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
770 // CHECK17-NEXT:    ret void
771 //
772 //
773 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined.
774 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[A:%.*]], i64 [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
775 // CHECK17-NEXT:  entry:
776 // CHECK17-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
777 // CHECK17-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
778 // CHECK17-NEXT:    [[A_ADDR:%.*]] = alloca %struct.St*, align 8
779 // CHECK17-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
780 // CHECK17-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
781 // CHECK17-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
782 // CHECK17-NEXT:    store %struct.St* [[A]], %struct.St** [[A_ADDR]], align 8
783 // CHECK17-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
784 // CHECK17-NEXT:    [[TMP0:%.*]] = load %struct.St*, %struct.St** [[A_ADDR]], align 8
785 // CHECK17-NEXT:    [[CONV:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
786 // CHECK17-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
787 // CHECK17-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
788 // CHECK17-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
789 // CHECK17-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
790 // CHECK17-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
791 // CHECK17:       omp_if.then:
792 // CHECK17-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[TMP0]], i32 0, i32 0
793 // CHECK17-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A1]], align 4
794 // CHECK17-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
795 // CHECK17-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
796 // CHECK17-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[TMP0]], i32 0, i32 1
797 // CHECK17-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
798 // CHECK17-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
799 // CHECK17-NEXT:    store i32 [[ADD2]], i32* [[B]], align 4
800 // CHECK17-NEXT:    [[TMP7:%.*]] = load i32, i32* [[CONV]], align 8
801 // CHECK17-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP7]], 1
802 // CHECK17-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
803 // CHECK17-NEXT:    [[TMP8:%.*]] = load i32, i32* @_ZN2St1yE, align 4
804 // CHECK17-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP8]], 1
805 // CHECK17-NEXT:    store i32 [[INC3]], i32* @_ZN2St1yE, align 4
806 // CHECK17-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
807 // CHECK17-NEXT:    br label [[OMP_IF_END]]
808 // CHECK17:       omp_if.end:
809 // CHECK17-NEXT:    ret void
810 //
811 //
812 // CHECK17-LABEL: define {{[^@]+}}@_ZN2StD1Ev
813 // CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4:[0-9]+]] comdat align 2 {
814 // CHECK17-NEXT:  entry:
815 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
816 // CHECK17-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
817 // CHECK17-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
818 // CHECK17-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
819 // CHECK17-NEXT:    ret void
820 //
821 //
822 // CHECK17-LABEL: define {{[^@]+}}@_ZN2StC2Ev
823 // CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
824 // CHECK17-NEXT:  entry:
825 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
826 // CHECK17-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
827 // CHECK17-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
828 // CHECK17-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
829 // CHECK17-NEXT:    store i32 0, i32* [[A]], align 4
830 // CHECK17-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
831 // CHECK17-NEXT:    store i32 0, i32* [[B]], align 4
832 // CHECK17-NEXT:    ret void
833 //
834 //
835 // CHECK17-LABEL: define {{[^@]+}}@_ZN2StD2Ev
836 // CHECK17-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
837 // CHECK17-NEXT:  entry:
838 // CHECK17-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
839 // CHECK17-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
840 // CHECK17-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
841 // CHECK17-NEXT:    ret void
842 //
843 //
844 // CHECK18-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
845 // CHECK18-SAME: () #[[ATTR0:[0-9]+]] {
846 // CHECK18-NEXT:  entry:
847 // CHECK18-NEXT:    [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
848 // CHECK18-NEXT:    [[Y_CASTED:%.*]] = alloca i64, align 8
849 // CHECK18-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
850 // CHECK18-NEXT:    [[TMP0:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
851 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
852 // CHECK18-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
853 // CHECK18-NEXT:    [[TMP1:%.*]] = load i64, i64* [[Y_CASTED]], align 8
854 // CHECK18-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.St*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.St* [[A]], i64 [[TMP1]])
855 // CHECK18-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
856 // CHECK18-NEXT:    ret void
857 //
858 //
859 // CHECK18-LABEL: define {{[^@]+}}@_ZN2StC1Ev
860 // CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
861 // CHECK18-NEXT:  entry:
862 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
863 // CHECK18-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
864 // CHECK18-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
865 // CHECK18-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
866 // CHECK18-NEXT:    ret void
867 //
868 //
869 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined.
870 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.St* nonnull align 4 dereferenceable(8) [[A:%.*]], i64 [[Y:%.*]]) #[[ATTR2:[0-9]+]] {
871 // CHECK18-NEXT:  entry:
872 // CHECK18-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
873 // CHECK18-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
874 // CHECK18-NEXT:    [[A_ADDR:%.*]] = alloca %struct.St*, align 8
875 // CHECK18-NEXT:    [[Y_ADDR:%.*]] = alloca i64, align 8
876 // CHECK18-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
877 // CHECK18-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
878 // CHECK18-NEXT:    store %struct.St* [[A]], %struct.St** [[A_ADDR]], align 8
879 // CHECK18-NEXT:    store i64 [[Y]], i64* [[Y_ADDR]], align 8
880 // CHECK18-NEXT:    [[TMP0:%.*]] = load %struct.St*, %struct.St** [[A_ADDR]], align 8
881 // CHECK18-NEXT:    [[CONV:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
882 // CHECK18-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
883 // CHECK18-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
884 // CHECK18-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
885 // CHECK18-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
886 // CHECK18-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
887 // CHECK18:       omp_if.then:
888 // CHECK18-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[TMP0]], i32 0, i32 0
889 // CHECK18-NEXT:    [[TMP5:%.*]] = load i32, i32* [[A1]], align 4
890 // CHECK18-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP5]], 1
891 // CHECK18-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
892 // CHECK18-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[TMP0]], i32 0, i32 1
893 // CHECK18-NEXT:    [[TMP6:%.*]] = load i32, i32* [[B]], align 4
894 // CHECK18-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
895 // CHECK18-NEXT:    store i32 [[ADD2]], i32* [[B]], align 4
896 // CHECK18-NEXT:    [[TMP7:%.*]] = load i32, i32* [[CONV]], align 8
897 // CHECK18-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP7]], 1
898 // CHECK18-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
899 // CHECK18-NEXT:    [[TMP8:%.*]] = load i32, i32* @_ZN2St1yE, align 4
900 // CHECK18-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP8]], 1
901 // CHECK18-NEXT:    store i32 [[INC3]], i32* @_ZN2St1yE, align 4
902 // CHECK18-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
903 // CHECK18-NEXT:    br label [[OMP_IF_END]]
904 // CHECK18:       omp_if.end:
905 // CHECK18-NEXT:    ret void
906 //
907 //
908 // CHECK18-LABEL: define {{[^@]+}}@_ZN2StD1Ev
909 // CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4:[0-9]+]] comdat align 2 {
910 // CHECK18-NEXT:  entry:
911 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
912 // CHECK18-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
913 // CHECK18-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
914 // CHECK18-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
915 // CHECK18-NEXT:    ret void
916 //
917 //
918 // CHECK18-LABEL: define {{[^@]+}}@_ZN2StC2Ev
919 // CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
920 // CHECK18-NEXT:  entry:
921 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
922 // CHECK18-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
923 // CHECK18-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
924 // CHECK18-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
925 // CHECK18-NEXT:    store i32 0, i32* [[A]], align 4
926 // CHECK18-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
927 // CHECK18-NEXT:    store i32 0, i32* [[B]], align 4
928 // CHECK18-NEXT:    ret void
929 //
930 //
931 // CHECK18-LABEL: define {{[^@]+}}@_ZN2StD2Ev
932 // CHECK18-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
933 // CHECK18-NEXT:  entry:
934 // CHECK18-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
935 // CHECK18-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
936 // CHECK18-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
937 // CHECK18-NEXT:    ret void
938 //
939 //
940 // CHECK19-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
941 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] {
942 // CHECK19-NEXT:  entry:
943 // CHECK19-NEXT:    [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
944 // CHECK19-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
945 // CHECK19-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 0
946 // CHECK19-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A1]], align 4
947 // CHECK19-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
948 // CHECK19-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
949 // CHECK19-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 1
950 // CHECK19-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B]], align 4
951 // CHECK19-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP1]], 1
952 // CHECK19-NEXT:    store i32 [[ADD2]], i32* [[B]], align 4
953 // CHECK19-NEXT:    [[TMP2:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
954 // CHECK19-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP2]], 1
955 // CHECK19-NEXT:    store i32 [[INC]], i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
956 // CHECK19-NEXT:    [[TMP3:%.*]] = load i32, i32* @_ZN2St1yE, align 4
957 // CHECK19-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP3]], 1
958 // CHECK19-NEXT:    store i32 [[INC3]], i32* @_ZN2St1yE, align 4
959 // CHECK19-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
960 // CHECK19-NEXT:    ret void
961 //
962 //
963 // CHECK19-LABEL: define {{[^@]+}}@_ZN2StC1Ev
964 // CHECK19-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
965 // CHECK19-NEXT:  entry:
966 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
967 // CHECK19-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
968 // CHECK19-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
969 // CHECK19-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
970 // CHECK19-NEXT:    ret void
971 //
972 //
973 // CHECK19-LABEL: define {{[^@]+}}@_ZN2StD1Ev
974 // CHECK19-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
975 // CHECK19-NEXT:  entry:
976 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
977 // CHECK19-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
978 // CHECK19-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
979 // CHECK19-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
980 // CHECK19-NEXT:    ret void
981 //
982 //
983 // CHECK19-LABEL: define {{[^@]+}}@_ZN2StC2Ev
984 // CHECK19-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
985 // CHECK19-NEXT:  entry:
986 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
987 // CHECK19-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
988 // CHECK19-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
989 // CHECK19-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
990 // CHECK19-NEXT:    store i32 0, i32* [[A]], align 4
991 // CHECK19-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
992 // CHECK19-NEXT:    store i32 0, i32* [[B]], align 4
993 // CHECK19-NEXT:    ret void
994 //
995 //
996 // CHECK19-LABEL: define {{[^@]+}}@_ZN2StD2Ev
997 // CHECK19-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
998 // CHECK19-NEXT:  entry:
999 // CHECK19-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1000 // CHECK19-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1001 // CHECK19-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1002 // CHECK19-NEXT:    ret void
1003 //
1004 //
1005 // CHECK20-LABEL: define {{[^@]+}}@_Z36parallel_master_default_firstprivatev
1006 // CHECK20-SAME: () #[[ATTR0:[0-9]+]] {
1007 // CHECK20-NEXT:  entry:
1008 // CHECK20-NEXT:    [[A:%.*]] = alloca [[STRUCT_ST:%.*]], align 4
1009 // CHECK20-NEXT:    call void @_ZN2StC1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]])
1010 // CHECK20-NEXT:    [[A1:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 0
1011 // CHECK20-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A1]], align 4
1012 // CHECK20-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
1013 // CHECK20-NEXT:    store i32 [[ADD]], i32* [[A1]], align 4
1014 // CHECK20-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[A]], i32 0, i32 1
1015 // CHECK20-NEXT:    [[TMP1:%.*]] = load i32, i32* [[B]], align 4
1016 // CHECK20-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP1]], 1
1017 // CHECK20-NEXT:    store i32 [[ADD2]], i32* [[B]], align 4
1018 // CHECK20-NEXT:    [[TMP2:%.*]] = load i32, i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
1019 // CHECK20-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP2]], 1
1020 // CHECK20-NEXT:    store i32 [[INC]], i32* @_ZZ36parallel_master_default_firstprivatevE1y, align 4
1021 // CHECK20-NEXT:    [[TMP3:%.*]] = load i32, i32* @_ZN2St1yE, align 4
1022 // CHECK20-NEXT:    [[INC3:%.*]] = add nsw i32 [[TMP3]], 1
1023 // CHECK20-NEXT:    store i32 [[INC3]], i32* @_ZN2St1yE, align 4
1024 // CHECK20-NEXT:    call void @_ZN2StD1Ev(%struct.St* nonnull align 4 dereferenceable(8) [[A]]) #[[ATTR3:[0-9]+]]
1025 // CHECK20-NEXT:    ret void
1026 //
1027 //
1028 // CHECK20-LABEL: define {{[^@]+}}@_ZN2StC1Ev
1029 // CHECK20-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1030 // CHECK20-NEXT:  entry:
1031 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1032 // CHECK20-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1033 // CHECK20-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1034 // CHECK20-NEXT:    call void @_ZN2StC2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]])
1035 // CHECK20-NEXT:    ret void
1036 //
1037 //
1038 // CHECK20-LABEL: define {{[^@]+}}@_ZN2StD1Ev
1039 // CHECK20-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 {
1040 // CHECK20-NEXT:  entry:
1041 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1042 // CHECK20-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1043 // CHECK20-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1044 // CHECK20-NEXT:    call void @_ZN2StD2Ev(%struct.St* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]]
1045 // CHECK20-NEXT:    ret void
1046 //
1047 //
1048 // CHECK20-LABEL: define {{[^@]+}}@_ZN2StC2Ev
1049 // CHECK20-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
1050 // CHECK20-NEXT:  entry:
1051 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1052 // CHECK20-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1053 // CHECK20-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1054 // CHECK20-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0
1055 // CHECK20-NEXT:    store i32 0, i32* [[A]], align 4
1056 // CHECK20-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1
1057 // CHECK20-NEXT:    store i32 0, i32* [[B]], align 4
1058 // CHECK20-NEXT:    ret void
1059 //
1060 //
1061 // CHECK20-LABEL: define {{[^@]+}}@_ZN2StD2Ev
1062 // CHECK20-SAME: (%struct.St* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 {
1063 // CHECK20-NEXT:  entry:
1064 // CHECK20-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8
1065 // CHECK20-NEXT:    store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8
1066 // CHECK20-NEXT:    [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8
1067 // CHECK20-NEXT:    ret void
1068 //
1069 //
1070 // CHECK21-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
1071 // CHECK21-SAME: () #[[ATTR0:[0-9]+]] {
1072 // CHECK21-NEXT:  entry:
1073 // CHECK21-NEXT:    [[A:%.*]] = alloca i32, align 4
1074 // CHECK21-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1075 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1076 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1077 // CHECK21-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1078 // CHECK21-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1079 // CHECK21-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
1080 // CHECK21-NEXT:    ret void
1081 //
1082 //
1083 // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined.
1084 // CHECK21-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
1085 // CHECK21-NEXT:  entry:
1086 // CHECK21-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1087 // CHECK21-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1088 // CHECK21-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1089 // CHECK21-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1090 // CHECK21-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1091 // CHECK21-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1092 // CHECK21-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1093 // CHECK21-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1094 // CHECK21-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1095 // CHECK21-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1096 // CHECK21-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1097 // CHECK21-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1098 // CHECK21:       omp_if.then:
1099 // CHECK21-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
1100 // CHECK21-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
1101 // CHECK21-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
1102 // CHECK21-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1103 // CHECK21-NEXT:    br label [[OMP_IF_END]]
1104 // CHECK21:       omp_if.end:
1105 // CHECK21-NEXT:    ret void
1106 //
1107 //
1108 // CHECK22-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
1109 // CHECK22-SAME: () #[[ATTR0:[0-9]+]] {
1110 // CHECK22-NEXT:  entry:
1111 // CHECK22-NEXT:    [[A:%.*]] = alloca i32, align 4
1112 // CHECK22-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
1113 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1114 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
1115 // CHECK22-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
1116 // CHECK22-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
1117 // CHECK22-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]])
1118 // CHECK22-NEXT:    ret void
1119 //
1120 //
1121 // CHECK22-LABEL: define {{[^@]+}}@.omp_outlined.
1122 // CHECK22-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]]) #[[ATTR1:[0-9]+]] {
1123 // CHECK22-NEXT:  entry:
1124 // CHECK22-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1125 // CHECK22-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1126 // CHECK22-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
1127 // CHECK22-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1128 // CHECK22-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1129 // CHECK22-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
1130 // CHECK22-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
1131 // CHECK22-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1132 // CHECK22-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1133 // CHECK22-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1134 // CHECK22-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1135 // CHECK22-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1136 // CHECK22:       omp_if.then:
1137 // CHECK22-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
1138 // CHECK22-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP4]], 1
1139 // CHECK22-NEXT:    store i32 [[INC]], i32* [[CONV]], align 8
1140 // CHECK22-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1141 // CHECK22-NEXT:    br label [[OMP_IF_END]]
1142 // CHECK22:       omp_if.end:
1143 // CHECK22-NEXT:    ret void
1144 //
1145 //
1146 // CHECK23-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
1147 // CHECK23-SAME: () #[[ATTR0:[0-9]+]] {
1148 // CHECK23-NEXT:  entry:
1149 // CHECK23-NEXT:    [[A:%.*]] = alloca i32, align 4
1150 // CHECK23-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1151 // CHECK23-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1152 // CHECK23-NEXT:    store i32 [[INC]], i32* [[A]], align 4
1153 // CHECK23-NEXT:    ret void
1154 //
1155 //
1156 // CHECK24-LABEL: define {{[^@]+}}@_Z28parallel_master_firstprivatev
1157 // CHECK24-SAME: () #[[ATTR0:[0-9]+]] {
1158 // CHECK24-NEXT:  entry:
1159 // CHECK24-NEXT:    [[A:%.*]] = alloca i32, align 4
1160 // CHECK24-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
1161 // CHECK24-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1162 // CHECK24-NEXT:    store i32 [[INC]], i32* [[A]], align 4
1163 // CHECK24-NEXT:    ret void
1164 //
1165 //
1166 // CHECK25-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1167 // CHECK25-SAME: () #[[ATTR0:[0-9]+]] {
1168 // CHECK25-NEXT:  entry:
1169 // CHECK25-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1170 // CHECK25-NEXT:    ret void
1171 //
1172 //
1173 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined.
1174 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
1175 // CHECK25-NEXT:  entry:
1176 // CHECK25-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1177 // CHECK25-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1178 // CHECK25-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1179 // CHECK25-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1180 // CHECK25-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1181 // CHECK25-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1182 // CHECK25-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
1183 // CHECK25-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1184 // CHECK25-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1185 // CHECK25-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP4]]
1186 // CHECK25-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1187 // CHECK25:       copyin.not.master:
1188 // CHECK25-NEXT:    [[TMP6:%.*]] = load i32, i32* @a, align 4
1189 // CHECK25-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
1190 // CHECK25-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1191 // CHECK25:       copyin.not.master.end:
1192 // CHECK25-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1193 // CHECK25-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1194 // CHECK25-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1195 // CHECK25-NEXT:    br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1196 // CHECK25:       omp_if.then:
1197 // CHECK25-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
1198 // CHECK25-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1199 // CHECK25-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1200 // CHECK25-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP11]], 1
1201 // CHECK25-NEXT:    store i32 [[INC]], i32* [[TMP10]], align 4
1202 // CHECK25-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1203 // CHECK25-NEXT:    br label [[OMP_IF_END]]
1204 // CHECK25:       omp_if.end:
1205 // CHECK25-NEXT:    ret void
1206 //
1207 //
1208 // CHECK26-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1209 // CHECK26-SAME: () #[[ATTR0:[0-9]+]] {
1210 // CHECK26-NEXT:  entry:
1211 // CHECK26-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1212 // CHECK26-NEXT:    ret void
1213 //
1214 //
1215 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined.
1216 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] {
1217 // CHECK26-NEXT:  entry:
1218 // CHECK26-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1219 // CHECK26-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1220 // CHECK26-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1221 // CHECK26-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1222 // CHECK26-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1223 // CHECK26-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1224 // CHECK26-NEXT:    [[TMP2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
1225 // CHECK26-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
1226 // CHECK26-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64
1227 // CHECK26-NEXT:    [[TMP5:%.*]] = icmp ne i64 ptrtoint (i32* @a to i64), [[TMP4]]
1228 // CHECK26-NEXT:    br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1229 // CHECK26:       copyin.not.master:
1230 // CHECK26-NEXT:    [[TMP6:%.*]] = load i32, i32* @a, align 4
1231 // CHECK26-NEXT:    store i32 [[TMP6]], i32* [[TMP3]], align 4
1232 // CHECK26-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1233 // CHECK26:       copyin.not.master.end:
1234 // CHECK26-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1235 // CHECK26-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1236 // CHECK26-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1237 // CHECK26-NEXT:    br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1238 // CHECK26:       omp_if.then:
1239 // CHECK26-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* bitcast (i32* @a to i8*), i64 4, i8*** @a.cache.)
1240 // CHECK26-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1241 // CHECK26-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
1242 // CHECK26-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP11]], 1
1243 // CHECK26-NEXT:    store i32 [[INC]], i32* [[TMP10]], align 4
1244 // CHECK26-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1245 // CHECK26-NEXT:    br label [[OMP_IF_END]]
1246 // CHECK26:       omp_if.end:
1247 // CHECK26-NEXT:    ret void
1248 //
1249 //
1250 // CHECK27-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1251 // CHECK27-SAME: () #[[ATTR0:[0-9]+]] {
1252 // CHECK27-NEXT:  entry:
1253 // CHECK27-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
1254 // CHECK27-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1255 // CHECK27-NEXT:    store i32 [[INC]], i32* @a, align 4
1256 // CHECK27-NEXT:    ret void
1257 //
1258 //
1259 // CHECK28-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1260 // CHECK28-SAME: () #[[ATTR0:[0-9]+]] {
1261 // CHECK28-NEXT:  entry:
1262 // CHECK28-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
1263 // CHECK28-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1264 // CHECK28-NEXT:    store i32 [[INC]], i32* @a, align 4
1265 // CHECK28-NEXT:    ret void
1266 //
1267 //
1268 // CHECK29-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1269 // CHECK29-SAME: () #[[ATTR0:[0-9]+]] {
1270 // CHECK29-NEXT:  entry:
1271 // CHECK29-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @a)
1272 // CHECK29-NEXT:    ret void
1273 //
1274 //
1275 // CHECK29-LABEL: define {{[^@]+}}@.omp_outlined.
1276 // CHECK29-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
1277 // CHECK29-NEXT:  entry:
1278 // CHECK29-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1279 // CHECK29-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1280 // CHECK29-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
1281 // CHECK29-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1282 // CHECK29-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1283 // CHECK29-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
1284 // CHECK29-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1285 // CHECK29-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
1286 // CHECK29-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @a to i64)
1287 // CHECK29-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1288 // CHECK29:       copyin.not.master:
1289 // CHECK29-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
1290 // CHECK29-NEXT:    store i32 [[TMP3]], i32* @a, align 4
1291 // CHECK29-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1292 // CHECK29:       copyin.not.master.end:
1293 // CHECK29-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1294 // CHECK29-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1295 // CHECK29-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
1296 // CHECK29-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1297 // CHECK29-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1298 // CHECK29-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
1299 // CHECK29-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
1300 // CHECK29-NEXT:    br i1 [[TMP9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1301 // CHECK29:       omp_if.then:
1302 // CHECK29-NEXT:    [[TMP10:%.*]] = load i32, i32* @a, align 4
1303 // CHECK29-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP10]], 1
1304 // CHECK29-NEXT:    store i32 [[INC]], i32* @a, align 4
1305 // CHECK29-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
1306 // CHECK29-NEXT:    br label [[OMP_IF_END]]
1307 // CHECK29:       omp_if.end:
1308 // CHECK29-NEXT:    ret void
1309 //
1310 //
1311 // CHECK29-LABEL: define {{[^@]+}}@_ZTW1a
1312 // CHECK29-SAME: () #[[ATTR4:[0-9]+]] comdat {
1313 // CHECK29-NEXT:    ret i32* @a
1314 //
1315 //
1316 // CHECK30-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1317 // CHECK30-SAME: () #[[ATTR0:[0-9]+]] {
1318 // CHECK30-NEXT:  entry:
1319 // CHECK30-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @a)
1320 // CHECK30-NEXT:    ret void
1321 //
1322 //
1323 // CHECK30-LABEL: define {{[^@]+}}@.omp_outlined.
1324 // CHECK30-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR1:[0-9]+]] {
1325 // CHECK30-NEXT:  entry:
1326 // CHECK30-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1327 // CHECK30-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1328 // CHECK30-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
1329 // CHECK30-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1330 // CHECK30-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1331 // CHECK30-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
1332 // CHECK30-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1333 // CHECK30-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[TMP0]] to i64
1334 // CHECK30-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], ptrtoint (i32* @a to i64)
1335 // CHECK30-NEXT:    br i1 [[TMP2]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
1336 // CHECK30:       copyin.not.master:
1337 // CHECK30-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4
1338 // CHECK30-NEXT:    store i32 [[TMP3]], i32* @a, align 4
1339 // CHECK30-NEXT:    br label [[COPYIN_NOT_MASTER_END]]
1340 // CHECK30:       copyin.not.master.end:
1341 // CHECK30-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1342 // CHECK30-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1343 // CHECK30-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
1344 // CHECK30-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1345 // CHECK30-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1346 // CHECK30-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
1347 // CHECK30-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
1348 // CHECK30-NEXT:    br i1 [[TMP9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1349 // CHECK30:       omp_if.then:
1350 // CHECK30-NEXT:    [[TMP10:%.*]] = load i32, i32* @a, align 4
1351 // CHECK30-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP10]], 1
1352 // CHECK30-NEXT:    store i32 [[INC]], i32* @a, align 4
1353 // CHECK30-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]])
1354 // CHECK30-NEXT:    br label [[OMP_IF_END]]
1355 // CHECK30:       omp_if.end:
1356 // CHECK30-NEXT:    ret void
1357 //
1358 //
1359 // CHECK30-LABEL: define {{[^@]+}}@_ZTW1a
1360 // CHECK30-SAME: () #[[ATTR4:[0-9]+]] comdat {
1361 // CHECK30-NEXT:    ret i32* @a
1362 //
1363 //
1364 // CHECK31-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1365 // CHECK31-SAME: () #[[ATTR0:[0-9]+]] {
1366 // CHECK31-NEXT:  entry:
1367 // CHECK31-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
1368 // CHECK31-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1369 // CHECK31-NEXT:    store i32 [[INC]], i32* @a, align 4
1370 // CHECK31-NEXT:    ret void
1371 //
1372 //
1373 // CHECK32-LABEL: define {{[^@]+}}@_Z22parallel_master_copyinv
1374 // CHECK32-SAME: () #[[ATTR0:[0-9]+]] {
1375 // CHECK32-NEXT:  entry:
1376 // CHECK32-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
1377 // CHECK32-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
1378 // CHECK32-NEXT:    store i32 [[INC]], i32* @a, align 4
1379 // CHECK32-NEXT:    ret void
1380 //
1381 //
1382 // CHECK33-LABEL: define {{[^@]+}}@_Z25parallel_master_reductionv
1383 // CHECK33-SAME: () #[[ATTR0:[0-9]+]] {
1384 // CHECK33-NEXT:  entry:
1385 // CHECK33-NEXT:    [[G:%.*]] = alloca i32, align 4
1386 // CHECK33-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[G]])
1387 // CHECK33-NEXT:    ret void
1388 //
1389 //
1390 // CHECK33-LABEL: define {{[^@]+}}@.omp_outlined.
1391 // CHECK33-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR1:[0-9]+]] {
1392 // CHECK33-NEXT:  entry:
1393 // CHECK33-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1394 // CHECK33-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1395 // CHECK33-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
1396 // CHECK33-NEXT:    [[G1:%.*]] = alloca i32, align 4
1397 // CHECK33-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1398 // CHECK33-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1399 // CHECK33-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1400 // CHECK33-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
1401 // CHECK33-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
1402 // CHECK33-NEXT:    store i32 0, i32* [[G1]], align 4
1403 // CHECK33-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1404 // CHECK33-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1405 // CHECK33-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1406 // CHECK33-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
1407 // CHECK33-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1408 // CHECK33:       omp_if.then:
1409 // CHECK33-NEXT:    store i32 1, i32* [[G1]], align 4
1410 // CHECK33-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1411 // CHECK33-NEXT:    br label [[OMP_IF_END]]
1412 // CHECK33:       omp_if.end:
1413 // CHECK33-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1414 // CHECK33-NEXT:    [[TMP6:%.*]] = bitcast i32* [[G1]] to i8*
1415 // CHECK33-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
1416 // CHECK33-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1417 // CHECK33-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1418 // CHECK33-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1419 // CHECK33-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1420 // CHECK33-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1421 // CHECK33-NEXT:    ]
1422 // CHECK33:       .omp.reduction.case1:
1423 // CHECK33-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1424 // CHECK33-NEXT:    [[TMP10:%.*]] = load i32, i32* [[G1]], align 4
1425 // CHECK33-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
1426 // CHECK33-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
1427 // CHECK33-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1428 // CHECK33-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1429 // CHECK33:       .omp.reduction.case2:
1430 // CHECK33-NEXT:    [[TMP11:%.*]] = load i32, i32* [[G1]], align 4
1431 // CHECK33-NEXT:    [[TMP12:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP11]] monotonic, align 4
1432 // CHECK33-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1433 // CHECK33:       .omp.reduction.default:
1434 // CHECK33-NEXT:    ret void
1435 //
1436 //
1437 // CHECK33-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1438 // CHECK33-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
1439 // CHECK33-NEXT:  entry:
1440 // CHECK33-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1441 // CHECK33-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1442 // CHECK33-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1443 // CHECK33-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1444 // CHECK33-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1445 // CHECK33-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1446 // CHECK33-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1447 // CHECK33-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1448 // CHECK33-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1449 // CHECK33-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1450 // CHECK33-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1451 // CHECK33-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1452 // CHECK33-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1453 // CHECK33-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1454 // CHECK33-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1455 // CHECK33-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1456 // CHECK33-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1457 // CHECK33-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1458 // CHECK33-NEXT:    ret void
1459 //
1460 //
1461 // CHECK34-LABEL: define {{[^@]+}}@_Z25parallel_master_reductionv
1462 // CHECK34-SAME: () #[[ATTR0:[0-9]+]] {
1463 // CHECK34-NEXT:  entry:
1464 // CHECK34-NEXT:    [[G:%.*]] = alloca i32, align 4
1465 // CHECK34-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[G]])
1466 // CHECK34-NEXT:    ret void
1467 //
1468 //
1469 // CHECK34-LABEL: define {{[^@]+}}@.omp_outlined.
1470 // CHECK34-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR1:[0-9]+]] {
1471 // CHECK34-NEXT:  entry:
1472 // CHECK34-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1473 // CHECK34-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1474 // CHECK34-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
1475 // CHECK34-NEXT:    [[G1:%.*]] = alloca i32, align 4
1476 // CHECK34-NEXT:    [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
1477 // CHECK34-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1478 // CHECK34-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1479 // CHECK34-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
1480 // CHECK34-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
1481 // CHECK34-NEXT:    store i32 0, i32* [[G1]], align 4
1482 // CHECK34-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1483 // CHECK34-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1484 // CHECK34-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1485 // CHECK34-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
1486 // CHECK34-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1487 // CHECK34:       omp_if.then:
1488 // CHECK34-NEXT:    store i32 1, i32* [[G1]], align 4
1489 // CHECK34-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1490 // CHECK34-NEXT:    br label [[OMP_IF_END]]
1491 // CHECK34:       omp_if.end:
1492 // CHECK34-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1493 // CHECK34-NEXT:    [[TMP6:%.*]] = bitcast i32* [[G1]] to i8*
1494 // CHECK34-NEXT:    store i8* [[TMP6]], i8** [[TMP5]], align 8
1495 // CHECK34-NEXT:    [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1496 // CHECK34-NEXT:    [[TMP8:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
1497 // CHECK34-NEXT:    switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
1498 // CHECK34-NEXT:    i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
1499 // CHECK34-NEXT:    i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
1500 // CHECK34-NEXT:    ]
1501 // CHECK34:       .omp.reduction.case1:
1502 // CHECK34-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP0]], align 4
1503 // CHECK34-NEXT:    [[TMP10:%.*]] = load i32, i32* [[G1]], align 4
1504 // CHECK34-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
1505 // CHECK34-NEXT:    store i32 [[ADD]], i32* [[TMP0]], align 4
1506 // CHECK34-NEXT:    call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], [8 x i32]* @.gomp_critical_user_.reduction.var)
1507 // CHECK34-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1508 // CHECK34:       .omp.reduction.case2:
1509 // CHECK34-NEXT:    [[TMP11:%.*]] = load i32, i32* [[G1]], align 4
1510 // CHECK34-NEXT:    [[TMP12:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP11]] monotonic, align 4
1511 // CHECK34-NEXT:    br label [[DOTOMP_REDUCTION_DEFAULT]]
1512 // CHECK34:       .omp.reduction.default:
1513 // CHECK34-NEXT:    ret void
1514 //
1515 //
1516 // CHECK34-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
1517 // CHECK34-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
1518 // CHECK34-NEXT:  entry:
1519 // CHECK34-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1520 // CHECK34-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1521 // CHECK34-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1522 // CHECK34-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1523 // CHECK34-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1524 // CHECK34-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
1525 // CHECK34-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1526 // CHECK34-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1527 // CHECK34-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
1528 // CHECK34-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1529 // CHECK34-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
1530 // CHECK34-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
1531 // CHECK34-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1532 // CHECK34-NEXT:    [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
1533 // CHECK34-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1534 // CHECK34-NEXT:    [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4
1535 // CHECK34-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1536 // CHECK34-NEXT:    store i32 [[ADD]], i32* [[TMP11]], align 4
1537 // CHECK34-NEXT:    ret void
1538 //
1539 //
1540 // CHECK35-LABEL: define {{[^@]+}}@_Z25parallel_master_reductionv
1541 // CHECK35-SAME: () #[[ATTR0:[0-9]+]] {
1542 // CHECK35-NEXT:  entry:
1543 // CHECK35-NEXT:    [[G:%.*]] = alloca i32, align 4
1544 // CHECK35-NEXT:    store i32 1, i32* [[G]], align 4
1545 // CHECK35-NEXT:    ret void
1546 //
1547 //
1548 // CHECK36-LABEL: define {{[^@]+}}@_Z25parallel_master_reductionv
1549 // CHECK36-SAME: () #[[ATTR0:[0-9]+]] {
1550 // CHECK36-NEXT:  entry:
1551 // CHECK36-NEXT:    [[G:%.*]] = alloca i32, align 4
1552 // CHECK36-NEXT:    store i32 1, i32* [[G]], align 4
1553 // CHECK36-NEXT:    ret void
1554 //
1555 //
1556 // CHECK37-LABEL: define {{[^@]+}}@_Z18parallel_master_ifv
1557 // CHECK37-SAME: () #[[ATTR0:[0-9]+]] {
1558 // CHECK37-NEXT:  entry:
1559 // CHECK37-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1560 // CHECK37-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
1561 // CHECK37-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
1562 // CHECK37-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1563 // CHECK37-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1564 // CHECK37-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
1565 // CHECK37-NEXT:    call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2:[0-9]+]]
1566 // CHECK37-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1567 // CHECK37-NEXT:    ret void
1568 //
1569 //
1570 // CHECK37-LABEL: define {{[^@]+}}@.omp_outlined.
1571 // CHECK37-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1572 // CHECK37-NEXT:  entry:
1573 // CHECK37-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1574 // CHECK37-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1575 // CHECK37-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1576 // CHECK37-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1577 // CHECK37-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1578 // CHECK37-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1579 // CHECK37-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1580 // CHECK37-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1581 // CHECK37-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1582 // CHECK37-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1583 // CHECK37-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1584 // CHECK37:       omp_if.then:
1585 // CHECK37-NEXT:    invoke void @_Z18parallel_master_ifv()
1586 // CHECK37-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1587 // CHECK37:       invoke.cont:
1588 // CHECK37-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1589 // CHECK37-NEXT:    br label [[OMP_IF_END]]
1590 // CHECK37:       lpad:
1591 // CHECK37-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1592 // CHECK37-NEXT:    catch i8* null
1593 // CHECK37-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1594 // CHECK37-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1595 // CHECK37-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1596 // CHECK37-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1597 // CHECK37-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1598 // CHECK37-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1599 // CHECK37:       omp_if.end:
1600 // CHECK37-NEXT:    ret void
1601 // CHECK37:       terminate.handler:
1602 // CHECK37-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1603 // CHECK37-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR4:[0-9]+]]
1604 // CHECK37-NEXT:    unreachable
1605 //
1606 //
1607 // CHECK37-LABEL: define {{[^@]+}}@__clang_call_terminate
1608 // CHECK37-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
1609 // CHECK37-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2]]
1610 // CHECK37-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
1611 // CHECK37-NEXT:    unreachable
1612 //
1613 //
1614 // CHECK38-LABEL: define {{[^@]+}}@_Z18parallel_master_ifv
1615 // CHECK38-SAME: () #[[ATTR0:[0-9]+]] {
1616 // CHECK38-NEXT:  entry:
1617 // CHECK38-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1618 // CHECK38-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
1619 // CHECK38-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
1620 // CHECK38-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1621 // CHECK38-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1622 // CHECK38-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
1623 // CHECK38-NEXT:    call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]]) #[[ATTR2:[0-9]+]]
1624 // CHECK38-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1625 // CHECK38-NEXT:    ret void
1626 //
1627 //
1628 // CHECK38-LABEL: define {{[^@]+}}@.omp_outlined.
1629 // CHECK38-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1630 // CHECK38-NEXT:  entry:
1631 // CHECK38-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1632 // CHECK38-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1633 // CHECK38-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1634 // CHECK38-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1635 // CHECK38-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1636 // CHECK38-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1637 // CHECK38-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1638 // CHECK38-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1639 // CHECK38-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1640 // CHECK38-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1641 // CHECK38-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1642 // CHECK38:       omp_if.then:
1643 // CHECK38-NEXT:    invoke void @_Z18parallel_master_ifv()
1644 // CHECK38-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1645 // CHECK38:       invoke.cont:
1646 // CHECK38-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1647 // CHECK38-NEXT:    br label [[OMP_IF_END]]
1648 // CHECK38:       lpad:
1649 // CHECK38-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1650 // CHECK38-NEXT:    catch i8* null
1651 // CHECK38-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1652 // CHECK38-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1653 // CHECK38-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1654 // CHECK38-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1655 // CHECK38-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1656 // CHECK38-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1657 // CHECK38:       omp_if.end:
1658 // CHECK38-NEXT:    ret void
1659 // CHECK38:       terminate.handler:
1660 // CHECK38-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1661 // CHECK38-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR4:[0-9]+]]
1662 // CHECK38-NEXT:    unreachable
1663 //
1664 //
1665 // CHECK38-LABEL: define {{[^@]+}}@__clang_call_terminate
1666 // CHECK38-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
1667 // CHECK38-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2]]
1668 // CHECK38-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
1669 // CHECK38-NEXT:    unreachable
1670 //
1671 //
1672 // CHECK39-LABEL: define {{[^@]+}}@_Z18parallel_master_ifv
1673 // CHECK39-SAME: () #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1674 // CHECK39-NEXT:  entry:
1675 // CHECK39-NEXT:    invoke void @_Z18parallel_master_ifv()
1676 // CHECK39-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
1677 // CHECK39:       invoke.cont:
1678 // CHECK39-NEXT:    ret void
1679 // CHECK39:       terminate.lpad:
1680 // CHECK39-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
1681 // CHECK39-NEXT:    catch i8* null
1682 // CHECK39-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
1683 // CHECK39-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR2:[0-9]+]]
1684 // CHECK39-NEXT:    unreachable
1685 //
1686 //
1687 // CHECK39-LABEL: define {{[^@]+}}@__clang_call_terminate
1688 // CHECK39-SAME: (i8* [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] comdat {
1689 // CHECK39-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR3:[0-9]+]]
1690 // CHECK39-NEXT:    call void @_ZSt9terminatev() #[[ATTR2]]
1691 // CHECK39-NEXT:    unreachable
1692 //
1693 //
1694 // CHECK40-LABEL: define {{[^@]+}}@_Z18parallel_master_ifv
1695 // CHECK40-SAME: () #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1696 // CHECK40-NEXT:  entry:
1697 // CHECK40-NEXT:    invoke void @_Z18parallel_master_ifv()
1698 // CHECK40-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
1699 // CHECK40:       invoke.cont:
1700 // CHECK40-NEXT:    ret void
1701 // CHECK40:       terminate.lpad:
1702 // CHECK40-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
1703 // CHECK40-NEXT:    catch i8* null
1704 // CHECK40-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
1705 // CHECK40-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR2:[0-9]+]]
1706 // CHECK40-NEXT:    unreachable
1707 //
1708 //
1709 // CHECK40-LABEL: define {{[^@]+}}@__clang_call_terminate
1710 // CHECK40-SAME: (i8* [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] comdat {
1711 // CHECK40-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR3:[0-9]+]]
1712 // CHECK40-NEXT:    call void @_ZSt9terminatev() #[[ATTR2]]
1713 // CHECK40-NEXT:    unreachable
1714 //
1715 //
1716 // CHECK41-LABEL: define {{[^@]+}}@main
1717 // CHECK41-SAME: () #[[ATTR0:[0-9]+]] {
1718 // CHECK41-NEXT:  entry:
1719 // CHECK41-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1720 // CHECK41-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1721 // CHECK41-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1722 // CHECK41-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 4)
1723 // CHECK41-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1724 // CHECK41-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
1725 // CHECK41-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
1726 // CHECK41-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1727 // CHECK41-NEXT:    ret i32 [[CALL]]
1728 //
1729 //
1730 // CHECK41-LABEL: define {{[^@]+}}@.omp_outlined.
1731 // CHECK41-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1732 // CHECK41-NEXT:  entry:
1733 // CHECK41-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1734 // CHECK41-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1735 // CHECK41-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1736 // CHECK41-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1737 // CHECK41-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1738 // CHECK41-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1739 // CHECK41-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1740 // CHECK41-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1741 // CHECK41-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1742 // CHECK41-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1743 // CHECK41-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1744 // CHECK41:       omp_if.then:
1745 // CHECK41-NEXT:    invoke void @_Z3foov()
1746 // CHECK41-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1747 // CHECK41:       invoke.cont:
1748 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1749 // CHECK41-NEXT:    br label [[OMP_IF_END]]
1750 // CHECK41:       lpad:
1751 // CHECK41-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1752 // CHECK41-NEXT:    catch i8* null
1753 // CHECK41-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1754 // CHECK41-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1755 // CHECK41-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1756 // CHECK41-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1757 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1758 // CHECK41-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1759 // CHECK41:       omp_if.end:
1760 // CHECK41-NEXT:    ret void
1761 // CHECK41:       terminate.handler:
1762 // CHECK41-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1763 // CHECK41-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
1764 // CHECK41-NEXT:    unreachable
1765 //
1766 //
1767 // CHECK41-LABEL: define {{[^@]+}}@__clang_call_terminate
1768 // CHECK41-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat {
1769 // CHECK41-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
1770 // CHECK41-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
1771 // CHECK41-NEXT:    unreachable
1772 //
1773 //
1774 // CHECK41-LABEL: define {{[^@]+}}@.omp_outlined..1
1775 // CHECK41-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1776 // CHECK41-NEXT:  entry:
1777 // CHECK41-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1778 // CHECK41-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1779 // CHECK41-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1780 // CHECK41-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1781 // CHECK41-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1782 // CHECK41-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1783 // CHECK41-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1784 // CHECK41-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1785 // CHECK41-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1786 // CHECK41-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1787 // CHECK41-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1788 // CHECK41:       omp_if.then:
1789 // CHECK41-NEXT:    invoke void @_Z3foov()
1790 // CHECK41-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1791 // CHECK41:       invoke.cont:
1792 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1793 // CHECK41-NEXT:    br label [[OMP_IF_END]]
1794 // CHECK41:       lpad:
1795 // CHECK41-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1796 // CHECK41-NEXT:    catch i8* null
1797 // CHECK41-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1798 // CHECK41-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1799 // CHECK41-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1800 // CHECK41-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1801 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1802 // CHECK41-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1803 // CHECK41:       omp_if.end:
1804 // CHECK41-NEXT:    ret void
1805 // CHECK41:       terminate.handler:
1806 // CHECK41-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1807 // CHECK41-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6]]
1808 // CHECK41-NEXT:    unreachable
1809 //
1810 //
1811 // CHECK41-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1812 // CHECK41-SAME: () #[[ATTR5:[0-9]+]] comdat {
1813 // CHECK41-NEXT:  entry:
1814 // CHECK41-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1815 // CHECK41-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
1816 // CHECK41-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
1817 // CHECK41-NEXT:    ret i32 0
1818 //
1819 //
1820 // CHECK41-LABEL: define {{[^@]+}}@.omp_outlined..2
1821 // CHECK41-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1822 // CHECK41-NEXT:  entry:
1823 // CHECK41-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1824 // CHECK41-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1825 // CHECK41-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1826 // CHECK41-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1827 // CHECK41-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1828 // CHECK41-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1829 // CHECK41-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1830 // CHECK41-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1831 // CHECK41-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1832 // CHECK41-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1833 // CHECK41-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1834 // CHECK41:       omp_if.then:
1835 // CHECK41-NEXT:    invoke void @_Z3foov()
1836 // CHECK41-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1837 // CHECK41:       invoke.cont:
1838 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1839 // CHECK41-NEXT:    br label [[OMP_IF_END]]
1840 // CHECK41:       lpad:
1841 // CHECK41-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1842 // CHECK41-NEXT:    catch i8* null
1843 // CHECK41-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1844 // CHECK41-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1845 // CHECK41-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1846 // CHECK41-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1847 // CHECK41-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1848 // CHECK41-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1849 // CHECK41:       omp_if.end:
1850 // CHECK41-NEXT:    ret void
1851 // CHECK41:       terminate.handler:
1852 // CHECK41-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1853 // CHECK41-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6]]
1854 // CHECK41-NEXT:    unreachable
1855 //
1856 //
1857 // CHECK42-LABEL: define {{[^@]+}}@main
1858 // CHECK42-SAME: () #[[ATTR0:[0-9]+]] {
1859 // CHECK42-NEXT:  entry:
1860 // CHECK42-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1861 // CHECK42-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
1862 // CHECK42-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1863 // CHECK42-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 4)
1864 // CHECK42-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1865 // CHECK42-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 3)
1866 // CHECK42-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
1867 // CHECK42-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
1868 // CHECK42-NEXT:    ret i32 [[CALL]]
1869 //
1870 //
1871 // CHECK42-LABEL: define {{[^@]+}}@.omp_outlined.
1872 // CHECK42-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1873 // CHECK42-NEXT:  entry:
1874 // CHECK42-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1875 // CHECK42-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1876 // CHECK42-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1877 // CHECK42-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1878 // CHECK42-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1879 // CHECK42-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1880 // CHECK42-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1881 // CHECK42-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1882 // CHECK42-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1883 // CHECK42-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1884 // CHECK42-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1885 // CHECK42:       omp_if.then:
1886 // CHECK42-NEXT:    invoke void @_Z3foov()
1887 // CHECK42-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1888 // CHECK42:       invoke.cont:
1889 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1890 // CHECK42-NEXT:    br label [[OMP_IF_END]]
1891 // CHECK42:       lpad:
1892 // CHECK42-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1893 // CHECK42-NEXT:    catch i8* null
1894 // CHECK42-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1895 // CHECK42-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1896 // CHECK42-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1897 // CHECK42-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1898 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1899 // CHECK42-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1900 // CHECK42:       omp_if.end:
1901 // CHECK42-NEXT:    ret void
1902 // CHECK42:       terminate.handler:
1903 // CHECK42-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1904 // CHECK42-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6:[0-9]+]]
1905 // CHECK42-NEXT:    unreachable
1906 //
1907 //
1908 // CHECK42-LABEL: define {{[^@]+}}@__clang_call_terminate
1909 // CHECK42-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat {
1910 // CHECK42-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR2:[0-9]+]]
1911 // CHECK42-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
1912 // CHECK42-NEXT:    unreachable
1913 //
1914 //
1915 // CHECK42-LABEL: define {{[^@]+}}@.omp_outlined..1
1916 // CHECK42-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1917 // CHECK42-NEXT:  entry:
1918 // CHECK42-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1919 // CHECK42-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1920 // CHECK42-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1921 // CHECK42-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1922 // CHECK42-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1923 // CHECK42-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1924 // CHECK42-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1925 // CHECK42-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1926 // CHECK42-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1927 // CHECK42-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1928 // CHECK42-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1929 // CHECK42:       omp_if.then:
1930 // CHECK42-NEXT:    invoke void @_Z3foov()
1931 // CHECK42-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1932 // CHECK42:       invoke.cont:
1933 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1934 // CHECK42-NEXT:    br label [[OMP_IF_END]]
1935 // CHECK42:       lpad:
1936 // CHECK42-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1937 // CHECK42-NEXT:    catch i8* null
1938 // CHECK42-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1939 // CHECK42-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1940 // CHECK42-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1941 // CHECK42-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1942 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1943 // CHECK42-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1944 // CHECK42:       omp_if.end:
1945 // CHECK42-NEXT:    ret void
1946 // CHECK42:       terminate.handler:
1947 // CHECK42-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1948 // CHECK42-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6]]
1949 // CHECK42-NEXT:    unreachable
1950 //
1951 //
1952 // CHECK42-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1953 // CHECK42-SAME: () #[[ATTR5:[0-9]+]] comdat {
1954 // CHECK42-NEXT:  entry:
1955 // CHECK42-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1956 // CHECK42-NEXT:    call void @__kmpc_push_proc_bind(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 2)
1957 // CHECK42-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*))
1958 // CHECK42-NEXT:    ret i32 0
1959 //
1960 //
1961 // CHECK42-LABEL: define {{[^@]+}}@.omp_outlined..2
1962 // CHECK42-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
1963 // CHECK42-NEXT:  entry:
1964 // CHECK42-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1965 // CHECK42-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1966 // CHECK42-NEXT:    [[EXN_SLOT:%.*]] = alloca i8*, align 8
1967 // CHECK42-NEXT:    [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1968 // CHECK42-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1969 // CHECK42-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1970 // CHECK42-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1971 // CHECK42-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1972 // CHECK42-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1973 // CHECK42-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1974 // CHECK42-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1975 // CHECK42:       omp_if.then:
1976 // CHECK42-NEXT:    invoke void @_Z3foov()
1977 // CHECK42-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1978 // CHECK42:       invoke.cont:
1979 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1980 // CHECK42-NEXT:    br label [[OMP_IF_END]]
1981 // CHECK42:       lpad:
1982 // CHECK42-NEXT:    [[TMP4:%.*]] = landingpad { i8*, i32 }
1983 // CHECK42-NEXT:    catch i8* null
1984 // CHECK42-NEXT:    [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0
1985 // CHECK42-NEXT:    store i8* [[TMP5]], i8** [[EXN_SLOT]], align 8
1986 // CHECK42-NEXT:    [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 1
1987 // CHECK42-NEXT:    store i32 [[TMP6]], i32* [[EHSELECTOR_SLOT]], align 4
1988 // CHECK42-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1989 // CHECK42-NEXT:    br label [[TERMINATE_HANDLER:%.*]]
1990 // CHECK42:       omp_if.end:
1991 // CHECK42-NEXT:    ret void
1992 // CHECK42:       terminate.handler:
1993 // CHECK42-NEXT:    [[EXN:%.*]] = load i8*, i8** [[EXN_SLOT]], align 8
1994 // CHECK42-NEXT:    call void @__clang_call_terminate(i8* [[EXN]]) #[[ATTR6]]
1995 // CHECK42-NEXT:    unreachable
1996 //
1997 //
1998 // CHECK43-LABEL: define {{[^@]+}}@main
1999 // CHECK43-SAME: () #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
2000 // CHECK43-NEXT:  entry:
2001 // CHECK43-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2002 // CHECK43-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2003 // CHECK43-NEXT:    invoke void @_Z3foov()
2004 // CHECK43-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
2005 // CHECK43:       invoke.cont:
2006 // CHECK43-NEXT:    invoke void @_Z3foov()
2007 // CHECK43-NEXT:    to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD]]
2008 // CHECK43:       invoke.cont1:
2009 // CHECK43-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
2010 // CHECK43-NEXT:    ret i32 [[CALL]]
2011 // CHECK43:       terminate.lpad:
2012 // CHECK43-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
2013 // CHECK43-NEXT:    catch i8* null
2014 // CHECK43-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
2015 // CHECK43-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4:[0-9]+]]
2016 // CHECK43-NEXT:    unreachable
2017 //
2018 //
2019 // CHECK43-LABEL: define {{[^@]+}}@__clang_call_terminate
2020 // CHECK43-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat {
2021 // CHECK43-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
2022 // CHECK43-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
2023 // CHECK43-NEXT:    unreachable
2024 //
2025 //
2026 // CHECK43-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2027 // CHECK43-SAME: () #[[ATTR3:[0-9]+]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
2028 // CHECK43-NEXT:  entry:
2029 // CHECK43-NEXT:    invoke void @_Z3foov()
2030 // CHECK43-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
2031 // CHECK43:       invoke.cont:
2032 // CHECK43-NEXT:    ret i32 0
2033 // CHECK43:       terminate.lpad:
2034 // CHECK43-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
2035 // CHECK43-NEXT:    catch i8* null
2036 // CHECK43-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
2037 // CHECK43-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4]]
2038 // CHECK43-NEXT:    unreachable
2039 //
2040 //
2041 // CHECK44-LABEL: define {{[^@]+}}@main
2042 // CHECK44-SAME: () #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
2043 // CHECK44-NEXT:  entry:
2044 // CHECK44-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2045 // CHECK44-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2046 // CHECK44-NEXT:    invoke void @_Z3foov()
2047 // CHECK44-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
2048 // CHECK44:       invoke.cont:
2049 // CHECK44-NEXT:    invoke void @_Z3foov()
2050 // CHECK44-NEXT:    to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD]]
2051 // CHECK44:       invoke.cont1:
2052 // CHECK44-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIiET_v()
2053 // CHECK44-NEXT:    ret i32 [[CALL]]
2054 // CHECK44:       terminate.lpad:
2055 // CHECK44-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
2056 // CHECK44-NEXT:    catch i8* null
2057 // CHECK44-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
2058 // CHECK44-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4:[0-9]+]]
2059 // CHECK44-NEXT:    unreachable
2060 //
2061 //
2062 // CHECK44-LABEL: define {{[^@]+}}@__clang_call_terminate
2063 // CHECK44-SAME: (i8* [[TMP0:%.*]]) #[[ATTR2:[0-9]+]] comdat {
2064 // CHECK44-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
2065 // CHECK44-NEXT:    call void @_ZSt9terminatev() #[[ATTR4]]
2066 // CHECK44-NEXT:    unreachable
2067 //
2068 //
2069 // CHECK44-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2070 // CHECK44-SAME: () #[[ATTR3:[0-9]+]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
2071 // CHECK44-NEXT:  entry:
2072 // CHECK44-NEXT:    invoke void @_Z3foov()
2073 // CHECK44-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
2074 // CHECK44:       invoke.cont:
2075 // CHECK44-NEXT:    ret i32 0
2076 // CHECK44:       terminate.lpad:
2077 // CHECK44-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
2078 // CHECK44-NEXT:    catch i8* null
2079 // CHECK44-NEXT:    [[TMP1:%.*]] = extractvalue { i8*, i32 } [[TMP0]], 0
2080 // CHECK44-NEXT:    call void @__clang_call_terminate(i8* [[TMP1]]) #[[ATTR4]]
2081 // CHECK44-NEXT:    unreachable
2082 //
2083 //
2084 // CHECK45-LABEL: define {{[^@]+}}@_Z24parallel_master_allocatev
2085 // CHECK45-SAME: () #[[ATTR0:[0-9]+]] {
2086 // CHECK45-NEXT:  entry:
2087 // CHECK45-NEXT:    [[A:%.*]] = alloca i32, align 4
2088 // CHECK45-NEXT:    [[MYALLOC:%.*]] = alloca i8**, align 8
2089 // CHECK45-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2090 // CHECK45-NEXT:    store i8** null, i8*** [[MYALLOC]], align 8
2091 // CHECK45-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
2092 // CHECK45-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2093 // CHECK45-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
2094 // CHECK45-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2095 // CHECK45-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i8***)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i8*** [[MYALLOC]])
2096 // CHECK45-NEXT:    ret void
2097 //
2098 //
2099 // CHECK45-LABEL: define {{[^@]+}}@.omp_outlined.
2100 // CHECK45-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i8*** nonnull align 8 dereferenceable(8) [[MYALLOC:%.*]]) #[[ATTR1:[0-9]+]] {
2101 // CHECK45-NEXT:  entry:
2102 // CHECK45-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2103 // CHECK45-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2104 // CHECK45-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2105 // CHECK45-NEXT:    [[MYALLOC_ADDR:%.*]] = alloca i8***, align 8
2106 // CHECK45-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2107 // CHECK45-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2108 // CHECK45-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2109 // CHECK45-NEXT:    store i8*** [[MYALLOC]], i8**** [[MYALLOC_ADDR]], align 8
2110 // CHECK45-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2111 // CHECK45-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[MYALLOC_ADDR]], align 8
2112 // CHECK45-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2113 // CHECK45-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2114 // CHECK45-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[TMP0]], align 8
2115 // CHECK45-NEXT:    [[CONV1:%.*]] = bitcast i8** [[TMP3]] to i8*
2116 // CHECK45-NEXT:    [[DOTA__VOID_ADDR:%.*]] = call i8* @__kmpc_alloc(i32 [[TMP2]], i64 4, i8* [[CONV1]])
2117 // CHECK45-NEXT:    [[DOTA__ADDR:%.*]] = bitcast i8* [[DOTA__VOID_ADDR]] to i32*
2118 // CHECK45-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
2119 // CHECK45-NEXT:    store i32 [[TMP4]], i32* [[DOTA__ADDR]], align 4
2120 // CHECK45-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2121 // CHECK45-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
2122 // CHECK45-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2123 // CHECK45:       omp_if.then:
2124 // CHECK45-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTA__ADDR]], align 4
2125 // CHECK45-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP7]], 1
2126 // CHECK45-NEXT:    store i32 [[INC]], i32* [[DOTA__ADDR]], align 4
2127 // CHECK45-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2128 // CHECK45-NEXT:    br label [[OMP_IF_END]]
2129 // CHECK45:       omp_if.end:
2130 // CHECK45-NEXT:    [[TMP8:%.*]] = bitcast i32* [[DOTA__ADDR]] to i8*
2131 // CHECK45-NEXT:    [[TMP9:%.*]] = load i8**, i8*** [[TMP0]], align 8
2132 // CHECK45-NEXT:    [[CONV2:%.*]] = bitcast i8** [[TMP9]] to i8*
2133 // CHECK45-NEXT:    call void @__kmpc_free(i32 [[TMP2]], i8* [[TMP8]], i8* [[CONV2]])
2134 // CHECK45-NEXT:    ret void
2135 //
2136 //
2137 // CHECK46-LABEL: define {{[^@]+}}@_Z24parallel_master_allocatev
2138 // CHECK46-SAME: () #[[ATTR0:[0-9]+]] {
2139 // CHECK46-NEXT:  entry:
2140 // CHECK46-NEXT:    [[A:%.*]] = alloca i32, align 4
2141 // CHECK46-NEXT:    [[MYALLOC:%.*]] = alloca i8**, align 8
2142 // CHECK46-NEXT:    [[A_CASTED:%.*]] = alloca i64, align 8
2143 // CHECK46-NEXT:    store i8** null, i8*** [[MYALLOC]], align 8
2144 // CHECK46-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
2145 // CHECK46-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32*
2146 // CHECK46-NEXT:    store i32 [[TMP0]], i32* [[CONV]], align 4
2147 // CHECK46-NEXT:    [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
2148 // CHECK46-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i8***)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i8*** [[MYALLOC]])
2149 // CHECK46-NEXT:    ret void
2150 //
2151 //
2152 // CHECK46-LABEL: define {{[^@]+}}@.omp_outlined.
2153 // CHECK46-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i8*** nonnull align 8 dereferenceable(8) [[MYALLOC:%.*]]) #[[ATTR1:[0-9]+]] {
2154 // CHECK46-NEXT:  entry:
2155 // CHECK46-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2156 // CHECK46-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2157 // CHECK46-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
2158 // CHECK46-NEXT:    [[MYALLOC_ADDR:%.*]] = alloca i8***, align 8
2159 // CHECK46-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2160 // CHECK46-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2161 // CHECK46-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
2162 // CHECK46-NEXT:    store i8*** [[MYALLOC]], i8**** [[MYALLOC_ADDR]], align 8
2163 // CHECK46-NEXT:    [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
2164 // CHECK46-NEXT:    [[TMP0:%.*]] = load i8***, i8**** [[MYALLOC_ADDR]], align 8
2165 // CHECK46-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2166 // CHECK46-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
2167 // CHECK46-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[TMP0]], align 8
2168 // CHECK46-NEXT:    [[CONV1:%.*]] = bitcast i8** [[TMP3]] to i8*
2169 // CHECK46-NEXT:    [[DOTA__VOID_ADDR:%.*]] = call i8* @__kmpc_alloc(i32 [[TMP2]], i64 4, i8* [[CONV1]])
2170 // CHECK46-NEXT:    [[DOTA__ADDR:%.*]] = bitcast i8* [[DOTA__VOID_ADDR]] to i32*
2171 // CHECK46-NEXT:    [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8
2172 // CHECK46-NEXT:    store i32 [[TMP4]], i32* [[DOTA__ADDR]], align 4
2173 // CHECK46-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2174 // CHECK46-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
2175 // CHECK46-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2176 // CHECK46:       omp_if.then:
2177 // CHECK46-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTA__ADDR]], align 4
2178 // CHECK46-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP7]], 1
2179 // CHECK46-NEXT:    store i32 [[INC]], i32* [[DOTA__ADDR]], align 4
2180 // CHECK46-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
2181 // CHECK46-NEXT:    br label [[OMP_IF_END]]
2182 // CHECK46:       omp_if.end:
2183 // CHECK46-NEXT:    [[TMP8:%.*]] = bitcast i32* [[DOTA__ADDR]] to i8*
2184 // CHECK46-NEXT:    [[TMP9:%.*]] = load i8**, i8*** [[TMP0]], align 8
2185 // CHECK46-NEXT:    [[CONV2:%.*]] = bitcast i8** [[TMP9]] to i8*
2186 // CHECK46-NEXT:    call void @__kmpc_free(i32 [[TMP2]], i8* [[TMP8]], i8* [[CONV2]])
2187 // CHECK46-NEXT:    ret void
2188 //
2189 //
2190 // CHECK47-LABEL: define {{[^@]+}}@_Z24parallel_master_allocatev
2191 // CHECK47-SAME: () #[[ATTR0:[0-9]+]] {
2192 // CHECK47-NEXT:  entry:
2193 // CHECK47-NEXT:    [[A:%.*]] = alloca i32, align 4
2194 // CHECK47-NEXT:    [[MYALLOC:%.*]] = alloca i8**, align 8
2195 // CHECK47-NEXT:    store i8** null, i8*** [[MYALLOC]], align 8
2196 // CHECK47-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
2197 // CHECK47-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
2198 // CHECK47-NEXT:    store i32 [[INC]], i32* [[A]], align 4
2199 // CHECK47-NEXT:    ret void
2200 //
2201 //
2202 // CHECK48-LABEL: define {{[^@]+}}@_Z24parallel_master_allocatev
2203 // CHECK48-SAME: () #[[ATTR0:[0-9]+]] {
2204 // CHECK48-NEXT:  entry:
2205 // CHECK48-NEXT:    [[A:%.*]] = alloca i32, align 4
2206 // CHECK48-NEXT:    [[MYALLOC:%.*]] = alloca i8**, align 8
2207 // CHECK48-NEXT:    store i8** null, i8*** [[MYALLOC]], align 8
2208 // CHECK48-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 4
2209 // CHECK48-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP0]], 1
2210 // CHECK48-NEXT:    store i32 [[INC]], i32* [[A]], align 4
2211 // CHECK48-NEXT:    ret void
2212 //
2213