1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -gno-column-info -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
8 
9 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
11 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
15 // expected-no-diagnostics
16 #ifndef HEADER
17 #define HEADER
18 
19 
20 template <class T>
21 void foo(T argc) {}
22 
23 template <typename T>
24 int tmain(T argc) {
25   typedef double (*chunk_t)[argc[0][0]];
26 #pragma omp parallel
27   {
28   foo(argc);
29   chunk_t var;(void)var[0][0];
30   }
31   return 0;
32 }
33 
34 int global;
35 int main (int argc, char **argv) {
36   int a[argc];
37 #pragma omp parallel shared(global, a) default(none)
38   foo(a[1]), a[1] = global;
39 #ifndef IRBUILDER
40 // TODO: Support for privates in IRBuilder.
41 #pragma omp parallel private(global, a) default(none)
42 #pragma omp parallel shared(global, a) default(none)
43   foo(a[1]), a[1] = global;
44 // FIXME: IRBuilder crashes in void llvm::OpenMPIRBuilder::finalize()
45 // Assertion `Extractor.isEligible() && "Expected OpenMP outlining to be possible!"' failed.
46 #pragma omp parallel shared(global, a) default(none)
47 #pragma omp parallel shared(global, a) default(none)
48   foo(a[1]), a[1] = global;
49 #endif // IRBUILDER
50   return tmain(argv);
51 }
52 
53 
54 
55 
56 
57 
58 
59 
60 
61 
62 
63 
64 // Note that OpenMPIRBuilder puts the trailing arguments in a different order:
65 // arguments that are wrapped into additional pointers precede the other
66 // arguments. This is expected and not problematic because both the call and the
67 // function are generated from the same place, and the function is internal.
68 
69 
70 
71 
72 #endif
73 // CHECK1-LABEL: define {{[^@]+}}@main
74 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
75 // CHECK1-NEXT:  entry:
76 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
77 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
78 // CHECK1-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
79 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
80 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
81 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
82 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
83 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
84 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
85 // CHECK1-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
86 // CHECK1-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
87 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
88 // CHECK1-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
89 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
90 // CHECK1-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
91 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
92 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
93 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
94 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP3]], align 8
95 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
96 // CHECK1-NEXT:    store i32* [[VLA]], i32** [[TMP4]], align 8
97 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]])
98 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 0
99 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP5]], align 8
100 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]])
101 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 0
102 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8
103 // CHECK1-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 1
104 // CHECK1-NEXT:    store i32* [[VLA]], i32** [[TMP7]], align 8
105 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]])
106 // CHECK1-NEXT:    [[TMP8:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
107 // CHECK1-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP8]])
108 // CHECK1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
109 // CHECK1-NEXT:    [[TMP9:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
110 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP9]])
111 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
112 // CHECK1-NEXT:    ret i32 [[TMP10]]
113 //
114 //
115 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
116 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
117 // CHECK1-NEXT:  entry:
118 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
119 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
120 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
121 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
122 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
123 // CHECK1-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
124 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
125 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
126 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
127 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1
128 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
129 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
130 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
131 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP5]])
132 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
133 // CHECK1:       invoke.cont:
134 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @global, align 4
135 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
136 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4
137 // CHECK1-NEXT:    ret void
138 // CHECK1:       terminate.lpad:
139 // CHECK1-NEXT:    [[TMP7:%.*]] = landingpad { i8*, i32 }
140 // CHECK1-NEXT:    catch i8* null
141 // CHECK1-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0
142 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR6:[0-9]+]]
143 // CHECK1-NEXT:    unreachable
144 //
145 //
146 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
147 // CHECK1-SAME: (i32 [[ARGC:%.*]]) #[[ATTR3:[0-9]+]] comdat {
148 // CHECK1-NEXT:  entry:
149 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
150 // CHECK1-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
151 // CHECK1-NEXT:    ret void
152 //
153 //
154 // CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
155 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat {
156 // CHECK1-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]]
157 // CHECK1-NEXT:    call void @_ZSt9terminatev() #[[ATTR6]]
158 // CHECK1-NEXT:    unreachable
159 //
160 //
161 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
162 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias [[__CONTEXT:%.*]]) #[[ATTR2]] {
163 // CHECK1-NEXT:  entry:
164 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
165 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
166 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
167 // CHECK1-NEXT:    [[GLOBAL:%.*]] = alloca i32, align 4
168 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
169 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
170 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
171 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
172 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
173 // CHECK1-NEXT:    store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
174 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
175 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
176 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
177 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
178 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
179 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP2]], align 16
180 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
181 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
182 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP4]], align 8
183 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
184 // CHECK1-NEXT:    store i32* [[VLA]], i32** [[TMP5]], align 8
185 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2
186 // CHECK1-NEXT:    store i32* [[GLOBAL]], i32** [[TMP6]], align 8
187 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]])
188 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
189 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP7]])
190 // CHECK1-NEXT:    ret void
191 //
192 //
193 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2
194 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
195 // CHECK1-NEXT:  entry:
196 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
197 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
198 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
199 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
200 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
201 // CHECK1-NEXT:    store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
202 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8
203 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0
204 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
205 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1
206 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
207 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2
208 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8
209 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
210 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
211 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP7]])
212 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
213 // CHECK1:       invoke.cont:
214 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4
215 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
216 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX1]], align 4
217 // CHECK1-NEXT:    ret void
218 // CHECK1:       terminate.lpad:
219 // CHECK1-NEXT:    [[TMP9:%.*]] = landingpad { i8*, i32 }
220 // CHECK1-NEXT:    catch i8* null
221 // CHECK1-NEXT:    [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0
222 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP10]]) #[[ATTR6]]
223 // CHECK1-NEXT:    unreachable
224 //
225 //
226 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3
227 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias [[__CONTEXT:%.*]]) #[[ATTR2]] {
228 // CHECK1-NEXT:  entry:
229 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
230 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
231 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
232 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
233 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
234 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
235 // CHECK1-NEXT:    store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
236 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8
237 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0
238 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
239 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1
240 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
241 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
242 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[TMP5]], align 8
243 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
244 // CHECK1-NEXT:    store i32* [[TMP4]], i32** [[TMP6]], align 8
245 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]])
246 // CHECK1-NEXT:    ret void
247 //
248 //
249 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
250 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
251 // CHECK1-NEXT:  entry:
252 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
253 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
254 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
255 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
256 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
257 // CHECK1-NEXT:    store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
258 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8
259 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0
260 // CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8
261 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[TMP0]], i32 0, i32 1
262 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8
263 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
264 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
265 // CHECK1-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP5]])
266 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
267 // CHECK1:       invoke.cont:
268 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @global, align 4
269 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1
270 // CHECK1-NEXT:    store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4
271 // CHECK1-NEXT:    ret void
272 // CHECK1:       terminate.lpad:
273 // CHECK1-NEXT:    [[TMP7:%.*]] = landingpad { i8*, i32 }
274 // CHECK1-NEXT:    catch i8* null
275 // CHECK1-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0
276 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR6]]
277 // CHECK1-NEXT:    unreachable
278 //
279 //
280 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
281 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat {
282 // CHECK1-NEXT:  entry:
283 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
284 // CHECK1-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
285 // CHECK1-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
286 // CHECK1-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
287 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0
288 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
289 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0
290 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
291 // CHECK1-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
292 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0
293 // CHECK1-NEXT:    store i8*** [[ARGC_ADDR]], i8**** [[TMP4]], align 8
294 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1
295 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[TMP5]], align 8
296 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]])
297 // CHECK1-NEXT:    ret i32 0
298 //
299 //
300 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5
301 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias [[__CONTEXT:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
302 // CHECK1-NEXT:  entry:
303 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
304 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
305 // CHECK1-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
306 // CHECK1-NEXT:    [[VAR:%.*]] = alloca double*, align 8
307 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
308 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
309 // CHECK1-NEXT:    store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
310 // CHECK1-NEXT:    [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8
311 // CHECK1-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0
312 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8
313 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1
314 // CHECK1-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8
315 // CHECK1-NEXT:    [[TMP5:%.*]] = load i8**, i8*** [[TMP2]], align 8
316 // CHECK1-NEXT:    invoke void @_Z3fooIPPcEvT_(i8** [[TMP5]])
317 // CHECK1-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]]
318 // CHECK1:       invoke.cont:
319 // CHECK1-NEXT:    [[TMP6:%.*]] = load double*, double** [[VAR]], align 8
320 // CHECK1-NEXT:    [[TMP7:%.*]] = mul nsw i64 0, [[TMP4]]
321 // CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP7]]
322 // CHECK1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0
323 // CHECK1-NEXT:    ret void
324 // CHECK1:       terminate.lpad:
325 // CHECK1-NEXT:    [[TMP8:%.*]] = landingpad { i8*, i32 }
326 // CHECK1-NEXT:    catch i8* null
327 // CHECK1-NEXT:    [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP8]], 0
328 // CHECK1-NEXT:    call void @__clang_call_terminate(i8* [[TMP9]]) #[[ATTR6]]
329 // CHECK1-NEXT:    unreachable
330 //
331 //
332 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
333 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat {
334 // CHECK1-NEXT:  entry:
335 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
336 // CHECK1-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
337 // CHECK1-NEXT:    ret void
338 //
339 //
340 // CHECK2-LABEL: define {{[^@]+}}@main
341 // CHECK2-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] {
342 // CHECK2-NEXT:  entry:
343 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
344 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
345 // CHECK2-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
346 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
347 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
348 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
349 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
350 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_2:%.*]] = alloca [[STRUCT_ANON_2:%.*]], align 8
351 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
352 // CHECK2-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
353 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]]
354 // CHECK2-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
355 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]]
356 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]]
357 // CHECK2-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG22:![0-9]+]]
358 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG22]]
359 // CHECK2-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG22]]
360 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG22]]
361 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG22]]
362 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]]
363 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30:![0-9]+]]
364 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG31:![0-9]+]]
365 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP3]], align 8, !dbg [[DBG31]]
366 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG31]]
367 // CHECK2-NEXT:    store i32* [[VLA]], i32** [[TMP4]], align 8, !dbg [[DBG31]]
368 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.anon* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG31]]
369 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]], i32 0, i32 0, !dbg [[DBG32:![0-9]+]]
370 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP5]], align 8, !dbg [[DBG32]]
371 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.0*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.anon.0* [[OMP_OUTLINED_ARG_AGG_1]]), !dbg [[DBG32]]
372 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 0, !dbg [[DBG33:![0-9]+]]
373 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP6]], align 8, !dbg [[DBG33]]
374 // CHECK2-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]], i32 0, i32 1, !dbg [[DBG33]]
375 // CHECK2-NEXT:    store i32* [[VLA]], i32** [[TMP7]], align 8, !dbg [[DBG33]]
376 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.2*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), %struct.anon.2* [[OMP_OUTLINED_ARG_AGG_2]]), !dbg [[DBG33]]
377 // CHECK2-NEXT:    [[TMP8:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG34:![0-9]+]]
378 // CHECK2-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP8]]), !dbg [[DBG35:![0-9]+]]
379 // CHECK2-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG36:![0-9]+]]
380 // CHECK2-NEXT:    [[TMP9:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG37:![0-9]+]]
381 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP9]]), !dbg [[DBG37]]
382 // CHECK2-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG37]]
383 // CHECK2-NEXT:    ret i32 [[TMP10]], !dbg [[DBG37]]
384 //
385 //
386 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
387 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG38:![0-9]+]] {
388 // CHECK2-NEXT:  entry:
389 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
390 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
391 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
392 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
393 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50:![0-9]+]]
394 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
395 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META51:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50]]
396 // CHECK2-NEXT:    store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
397 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon** [[__CONTEXT_ADDR]], metadata [[META52:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50]]
398 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG53:![0-9]+]]
399 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0, !dbg [[DBG53]]
400 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG53]]
401 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP0]], i32 0, i32 1, !dbg [[DBG53]]
402 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG53]]
403 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG54:![0-9]+]]
404 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG54]]
405 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP5]])
406 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG55:![0-9]+]]
407 // CHECK2:       invoke.cont:
408 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG56:![0-9]+]]
409 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG57:![0-9]+]]
410 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG58:![0-9]+]]
411 // CHECK2-NEXT:    ret void, !dbg [[DBG56]]
412 // CHECK2:       terminate.lpad:
413 // CHECK2-NEXT:    [[TMP7:%.*]] = landingpad { i8*, i32 }
414 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG55]]
415 // CHECK2-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG55]]
416 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR7:[0-9]+]], !dbg [[DBG55]]
417 // CHECK2-NEXT:    unreachable, !dbg [[DBG55]]
418 //
419 //
420 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
421 // CHECK2-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat !dbg [[DBG59:![0-9]+]] {
422 // CHECK2-NEXT:  entry:
423 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
424 // CHECK2-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
425 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META64:![0-9]+]], metadata !DIExpression()), !dbg [[DBG65:![0-9]+]]
426 // CHECK2-NEXT:    ret void, !dbg [[DBG66:![0-9]+]]
427 //
428 //
429 // CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate
430 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
431 // CHECK2-NEXT:    [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR6:[0-9]+]]
432 // CHECK2-NEXT:    call void @_ZSt9terminatev() #[[ATTR7]]
433 // CHECK2-NEXT:    unreachable
434 //
435 //
436 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1
437 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.0* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] !dbg [[DBG69:![0-9]+]] {
438 // CHECK2-NEXT:  entry:
439 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
440 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
441 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
442 // CHECK2-NEXT:    [[GLOBAL:%.*]] = alloca i32, align 4
443 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
444 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
445 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
446 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
447 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META76:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77:![0-9]+]]
448 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
449 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META78:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
450 // CHECK2-NEXT:    store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
451 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon.0** [[__CONTEXT_ADDR]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
452 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG80:![0-9]+]]
453 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0, !dbg [[DBG80]]
454 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG80]]
455 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META81:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
456 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG80]]
457 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG80]]
458 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP2]], align 16, !dbg [[DBG80]]
459 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG80]]
460 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
461 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77]]
462 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG84:![0-9]+]]
463 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP4]], align 8, !dbg [[DBG84]]
464 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG84]]
465 // CHECK2-NEXT:    store i32* [[VLA]], i32** [[TMP5]], align 8, !dbg [[DBG84]]
466 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 2, !dbg [[DBG84]]
467 // CHECK2-NEXT:    store i32* [[GLOBAL]], i32** [[TMP6]], align 8, !dbg [[DBG84]]
468 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.1*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), %struct.anon.1* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG84]]
469 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG85:![0-9]+]]
470 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP7]]), !dbg [[DBG85]]
471 // CHECK2-NEXT:    ret void, !dbg [[DBG87:![0-9]+]]
472 //
473 //
474 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2
475 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG88:![0-9]+]] {
476 // CHECK2-NEXT:  entry:
477 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
478 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
479 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.1*, align 8
480 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
481 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96:![0-9]+]]
482 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
483 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META97:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96]]
484 // CHECK2-NEXT:    store %struct.anon.1* [[__CONTEXT]], %struct.anon.1** [[__CONTEXT_ADDR]], align 8
485 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon.1** [[__CONTEXT_ADDR]], metadata [[META98:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96]]
486 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG99:![0-9]+]]
487 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP0]], i32 0, i32 0, !dbg [[DBG99]]
488 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG99]]
489 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 1, !dbg [[DBG99]]
490 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG99]]
491 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP0]], i32 0, i32 2, !dbg [[DBG99]]
492 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32*, i32** [[TMP5]], align 8, !dbg [[DBG99]]
493 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG100:![0-9]+]]
494 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG100]]
495 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP7]])
496 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG101:![0-9]+]]
497 // CHECK2:       invoke.cont:
498 // CHECK2-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4, !dbg [[DBG102:![0-9]+]]
499 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG103:![0-9]+]]
500 // CHECK2-NEXT:    store i32 [[TMP8]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG104:![0-9]+]]
501 // CHECK2-NEXT:    ret void, !dbg [[DBG102]]
502 // CHECK2:       terminate.lpad:
503 // CHECK2-NEXT:    [[TMP9:%.*]] = landingpad { i8*, i32 }
504 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG101]]
505 // CHECK2-NEXT:    [[TMP10:%.*]] = extractvalue { i8*, i32 } [[TMP9]], 0, !dbg [[DBG101]]
506 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP10]]) #[[ATTR7]], !dbg [[DBG101]]
507 // CHECK2-NEXT:    unreachable, !dbg [[DBG101]]
508 //
509 //
510 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3
511 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.2* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] !dbg [[DBG105:![0-9]+]] {
512 // CHECK2-NEXT:  entry:
513 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
514 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
515 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.2*, align 8
516 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_3:%.*]], align 8
517 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
518 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META112:![0-9]+]], metadata !DIExpression()), !dbg [[DBG113:![0-9]+]]
519 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
520 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META114:![0-9]+]], metadata !DIExpression()), !dbg [[DBG113]]
521 // CHECK2-NEXT:    store %struct.anon.2* [[__CONTEXT]], %struct.anon.2** [[__CONTEXT_ADDR]], align 8
522 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon.2** [[__CONTEXT_ADDR]], metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG113]]
523 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.2*, %struct.anon.2** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG116:![0-9]+]]
524 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], %struct.anon.2* [[TMP0]], i32 0, i32 0, !dbg [[DBG116]]
525 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG116]]
526 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], %struct.anon.2* [[TMP0]], i32 0, i32 1, !dbg [[DBG116]]
527 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG116]]
528 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG117:![0-9]+]]
529 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[TMP5]], align 8, !dbg [[DBG117]]
530 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG117]]
531 // CHECK2-NEXT:    store i32* [[TMP4]], i32** [[TMP6]], align 8, !dbg [[DBG117]]
532 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.3*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.anon.3* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG117]]
533 // CHECK2-NEXT:    ret void, !dbg [[DBG118:![0-9]+]]
534 //
535 //
536 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4
537 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.3* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG119:![0-9]+]] {
538 // CHECK2-NEXT:  entry:
539 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
540 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
541 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.3*, align 8
542 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
543 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
544 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
545 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META128:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127]]
546 // CHECK2-NEXT:    store %struct.anon.3* [[__CONTEXT]], %struct.anon.3** [[__CONTEXT_ADDR]], align 8
547 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon.3** [[__CONTEXT_ADDR]], metadata [[META129:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127]]
548 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.3*, %struct.anon.3** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG130:![0-9]+]]
549 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_3:%.*]], %struct.anon.3* [[TMP0]], i32 0, i32 0, !dbg [[DBG130]]
550 // CHECK2-NEXT:    [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 8, !dbg [[DBG130]]
551 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_3]], %struct.anon.3* [[TMP0]], i32 0, i32 1, !dbg [[DBG130]]
552 // CHECK2-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[TMP3]], align 8, !dbg [[DBG130]]
553 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG131:![0-9]+]]
554 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG131]]
555 // CHECK2-NEXT:    invoke void @_Z3fooIiEvT_(i32 [[TMP5]])
556 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG132:![0-9]+]]
557 // CHECK2:       invoke.cont:
558 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG133:![0-9]+]]
559 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 1, !dbg [[DBG134:![0-9]+]]
560 // CHECK2-NEXT:    store i32 [[TMP6]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG135:![0-9]+]]
561 // CHECK2-NEXT:    ret void, !dbg [[DBG133]]
562 // CHECK2:       terminate.lpad:
563 // CHECK2-NEXT:    [[TMP7:%.*]] = landingpad { i8*, i32 }
564 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG132]]
565 // CHECK2-NEXT:    [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG132]]
566 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR7]], !dbg [[DBG132]]
567 // CHECK2-NEXT:    unreachable, !dbg [[DBG132]]
568 //
569 //
570 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
571 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG136:![0-9]+]] {
572 // CHECK2-NEXT:  entry:
573 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
574 // CHECK2-NEXT:    [[OMP_OUTLINED_ARG_AGG_:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 8
575 // CHECK2-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
576 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG142:![0-9]+]]
577 // CHECK2-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG143:![0-9]+]]
578 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG143]]
579 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG143]]
580 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG143]]
581 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG143]]
582 // CHECK2-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG144:![0-9]+]]
583 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 0, !dbg [[DBG145:![0-9]+]]
584 // CHECK2-NEXT:    store i8*** [[ARGC_ADDR]], i8**** [[TMP4]], align 8, !dbg [[DBG145]]
585 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]], i32 0, i32 1, !dbg [[DBG145]]
586 // CHECK2-NEXT:    store i64 [[TMP3]], i64* [[TMP5]], align 8, !dbg [[DBG145]]
587 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.anon.4*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.anon.4* [[OMP_OUTLINED_ARG_AGG_]]), !dbg [[DBG145]]
588 // CHECK2-NEXT:    ret i32 0, !dbg [[DBG146:![0-9]+]]
589 //
590 //
591 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..5
592 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.4* noalias [[__CONTEXT:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG147:![0-9]+]] {
593 // CHECK2-NEXT:  entry:
594 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
595 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
596 // CHECK2-NEXT:    [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.4*, align 8
597 // CHECK2-NEXT:    [[VAR:%.*]] = alloca double*, align 8
598 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
599 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META154:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155:![0-9]+]]
600 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
601 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155]]
602 // CHECK2-NEXT:    store %struct.anon.4* [[__CONTEXT]], %struct.anon.4** [[__CONTEXT_ADDR]], align 8
603 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata %struct.anon.4** [[__CONTEXT_ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155]]
604 // CHECK2-NEXT:    [[TMP0:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR]], align 8, !dbg [[DBG158:![0-9]+]]
605 // CHECK2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], %struct.anon.4* [[TMP0]], i32 0, i32 0, !dbg [[DBG158]]
606 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8***, i8**** [[TMP1]], align 8, !dbg [[DBG158]]
607 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[TMP0]], i32 0, i32 1, !dbg [[DBG158]]
608 // CHECK2-NEXT:    [[TMP4:%.*]] = load i64, i64* [[TMP3]], align 8, !dbg [[DBG158]]
609 // CHECK2-NEXT:    [[TMP5:%.*]] = load i8**, i8*** [[TMP2]], align 8, !dbg [[DBG159:![0-9]+]]
610 // CHECK2-NEXT:    invoke void @_Z3fooIPPcEvT_(i8** [[TMP5]])
611 // CHECK2-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG161:![0-9]+]]
612 // CHECK2:       invoke.cont:
613 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META162:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169:![0-9]+]]
614 // CHECK2-NEXT:    [[TMP6:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG170:![0-9]+]]
615 // CHECK2-NEXT:    [[TMP7:%.*]] = mul nsw i64 0, [[TMP4]], !dbg [[DBG170]]
616 // CHECK2-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP7]], !dbg [[DBG170]]
617 // CHECK2-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0, !dbg [[DBG170]]
618 // CHECK2-NEXT:    ret void, !dbg [[DBG171:![0-9]+]]
619 // CHECK2:       terminate.lpad:
620 // CHECK2-NEXT:    [[TMP8:%.*]] = landingpad { i8*, i32 }
621 // CHECK2-NEXT:    catch i8* null, !dbg [[DBG161]]
622 // CHECK2-NEXT:    [[TMP9:%.*]] = extractvalue { i8*, i32 } [[TMP8]], 0, !dbg [[DBG161]]
623 // CHECK2-NEXT:    call void @__clang_call_terminate(i8* [[TMP9]]) #[[ATTR7]], !dbg [[DBG161]]
624 // CHECK2-NEXT:    unreachable, !dbg [[DBG161]]
625 //
626 //
627 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
628 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG172:![0-9]+]] {
629 // CHECK2-NEXT:  entry:
630 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
631 // CHECK2-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
632 // CHECK2-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META175:![0-9]+]], metadata !DIExpression()), !dbg [[DBG176:![0-9]+]]
633 // CHECK2-NEXT:    ret void, !dbg [[DBG177:![0-9]+]]
634 //
635 //
636 // CHECK3-LABEL: define {{[^@]+}}@main
637 // CHECK3-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
638 // CHECK3-NEXT:  entry:
639 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
640 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
641 // CHECK3-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
642 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
643 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
644 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
645 // CHECK3-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
646 // CHECK3-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
647 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
648 // CHECK3-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
649 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
650 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
651 // CHECK3-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
652 // CHECK3-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
653 // CHECK3-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
654 // CHECK3-NEXT:    br label [[OMP_PARALLEL:%.*]]
655 // CHECK3:       omp_parallel:
656 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]])
657 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
658 // CHECK3:       omp.par.outlined.exit:
659 // CHECK3-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
660 // CHECK3:       omp.par.exit.split:
661 // CHECK3-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8
662 // CHECK3-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]])
663 // CHECK3-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
664 // CHECK3-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
665 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]])
666 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4
667 // CHECK3-NEXT:    ret i32 [[TMP5]]
668 //
669 //
670 // CHECK3-LABEL: define {{[^@]+}}@main..omp_par
671 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] {
672 // CHECK3-NEXT:  omp.par.entry:
673 // CHECK3-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
674 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
675 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
676 // CHECK3-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
677 // CHECK3-NEXT:    br label [[OMP_PAR_REGION:%.*]]
678 // CHECK3:       omp.par.outlined.exit.exitStub:
679 // CHECK3-NEXT:    ret void
680 // CHECK3:       omp.par.region:
681 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1
682 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
683 // CHECK3-NEXT:    call void @_Z3fooIiEvT_(i32 [[TMP1]])
684 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32, i32* @global, align 4
685 // CHECK3-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1
686 // CHECK3-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4
687 // CHECK3-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]]
688 // CHECK3:       omp.par.pre_finalize:
689 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
690 //
691 //
692 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
693 // CHECK3-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat {
694 // CHECK3-NEXT:  entry:
695 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
696 // CHECK3-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
697 // CHECK3-NEXT:    ret void
698 //
699 //
700 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
701 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
702 // CHECK3-NEXT:  entry:
703 // CHECK3-NEXT:    [[DOTRELOADED:%.*]] = alloca i64, align 8
704 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
705 // CHECK3-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
706 // CHECK3-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
707 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0
708 // CHECK3-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8
709 // CHECK3-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0
710 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1
711 // CHECK3-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64
712 // CHECK3-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
713 // CHECK3-NEXT:    store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8
714 // CHECK3-NEXT:    br label [[OMP_PARALLEL:%.*]]
715 // CHECK3:       omp_parallel:
716 // CHECK3-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]])
717 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
718 // CHECK3:       omp.par.outlined.exit:
719 // CHECK3-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
720 // CHECK3:       omp.par.exit.split:
721 // CHECK3-NEXT:    ret i32 0
722 //
723 //
724 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par
725 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] {
726 // CHECK3-NEXT:  omp.par.entry:
727 // CHECK3-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
728 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
729 // CHECK3-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
730 // CHECK3-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
731 // CHECK3-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8
732 // CHECK3-NEXT:    [[VAR:%.*]] = alloca double*, align 8
733 // CHECK3-NEXT:    br label [[OMP_PAR_REGION:%.*]]
734 // CHECK3:       omp.par.outlined.exit.exitStub:
735 // CHECK3-NEXT:    ret void
736 // CHECK3:       omp.par.region:
737 // CHECK3-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8
738 // CHECK3-NEXT:    call void @_Z3fooIPPcEvT_(i8** [[TMP2]])
739 // CHECK3-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8
740 // CHECK3-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]]
741 // CHECK3-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]]
742 // CHECK3-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0
743 // CHECK3-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]]
744 // CHECK3:       omp.par.pre_finalize:
745 // CHECK3-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]]
746 //
747 //
748 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
749 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat {
750 // CHECK3-NEXT:  entry:
751 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
752 // CHECK3-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
753 // CHECK3-NEXT:    ret void
754 //
755 //
756 // CHECK4-LABEL: define {{[^@]+}}@main
757 // CHECK4-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] {
758 // CHECK4-NEXT:  entry:
759 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
760 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
761 // CHECK4-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
762 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
763 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
764 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
765 // CHECK4-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
766 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]]
767 // CHECK4-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
768 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]]
769 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG20:![0-9]+]]
770 // CHECK4-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG20]]
771 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG20]]
772 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG20]]
773 // CHECK4-NEXT:    [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG20]]
774 // CHECK4-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG20]]
775 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META21:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]]
776 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20]]
777 // CHECK4-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG28:![0-9]+]]
778 // CHECK4-NEXT:    br label [[OMP_PARALLEL:%.*]]
779 // CHECK4:       omp_parallel:
780 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]]), !dbg [[DBG29:![0-9]+]]
781 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
782 // CHECK4:       omp.par.outlined.exit:
783 // CHECK4-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
784 // CHECK4:       omp.par.exit.split:
785 // CHECK4-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG30:![0-9]+]]
786 // CHECK4-NEXT:    [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]), !dbg [[DBG30]]
787 // CHECK4-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG30]]
788 // CHECK4-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG31:![0-9]+]]
789 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG31]]
790 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG31]]
791 // CHECK4-NEXT:    ret i32 [[TMP5]], !dbg [[DBG31]]
792 //
793 //
794 // CHECK4-LABEL: define {{[^@]+}}@main..omp_par
795 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] !dbg [[DBG32:![0-9]+]] {
796 // CHECK4-NEXT:  omp.par.entry:
797 // CHECK4-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
798 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
799 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
800 // CHECK4-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
801 // CHECK4-NEXT:    br label [[OMP_PAR_REGION:%.*]]
802 // CHECK4:       omp.par.outlined.exit.exitStub:
803 // CHECK4-NEXT:    ret void
804 // CHECK4:       omp.par.region:
805 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34:![0-9]+]]
806 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG34]]
807 // CHECK4-NEXT:    call void @_Z3fooIiEvT_(i32 [[TMP1]]), !dbg [[DBG34]]
808 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG34]]
809 // CHECK4-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34]]
810 // CHECK4-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG34]]
811 // CHECK4-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG34]]
812 // CHECK4:       omp.par.pre_finalize:
813 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG34]]
814 //
815 //
816 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIiEvT_
817 // CHECK4-SAME: (i32 [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat !dbg [[DBG35:![0-9]+]] {
818 // CHECK4-NEXT:  entry:
819 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
820 // CHECK4-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
821 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG41:![0-9]+]]
822 // CHECK4-NEXT:    ret void, !dbg [[DBG41]]
823 //
824 //
825 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_
826 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat !dbg [[DBG44:![0-9]+]] {
827 // CHECK4-NEXT:  entry:
828 // CHECK4-NEXT:    [[DOTRELOADED:%.*]] = alloca i64, align 8
829 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
830 // CHECK4-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
831 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50:![0-9]+]]
832 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG51:![0-9]+]]
833 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG51]]
834 // CHECK4-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG51]]
835 // CHECK4-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG51]]
836 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG51]]
837 // CHECK4-NEXT:    [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG51]]
838 // CHECK4-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]), !dbg [[DBG52:![0-9]+]]
839 // CHECK4-NEXT:    store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8
840 // CHECK4-NEXT:    br label [[OMP_PARALLEL:%.*]]
841 // CHECK4:       omp_parallel:
842 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]]), !dbg [[DBG53:![0-9]+]]
843 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
844 // CHECK4:       omp.par.outlined.exit:
845 // CHECK4-NEXT:    br label [[OMP_PAR_EXIT_SPLIT:%.*]]
846 // CHECK4:       omp.par.exit.split:
847 // CHECK4-NEXT:    ret i32 0, !dbg [[DBG55:![0-9]+]]
848 //
849 //
850 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par
851 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] !dbg [[DBG56:![0-9]+]] {
852 // CHECK4-NEXT:  omp.par.entry:
853 // CHECK4-NEXT:    [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4
854 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4
855 // CHECK4-NEXT:    store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4
856 // CHECK4-NEXT:    [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4
857 // CHECK4-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8
858 // CHECK4-NEXT:    [[VAR:%.*]] = alloca double*, align 8
859 // CHECK4-NEXT:    br label [[OMP_PAR_REGION:%.*]]
860 // CHECK4:       omp.par.outlined.exit.exitStub:
861 // CHECK4-NEXT:    ret void
862 // CHECK4:       omp.par.region:
863 // CHECK4-NEXT:    [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG57:![0-9]+]]
864 // CHECK4-NEXT:    call void @_Z3fooIPPcEvT_(i8** [[TMP2]]), !dbg [[DBG57]]
865 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META58:![0-9]+]], metadata !DIExpression()), !dbg [[DBG65:![0-9]+]]
866 // CHECK4-NEXT:    [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG65]]
867 // CHECK4-NEXT:    [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG65]]
868 // CHECK4-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG65]]
869 // CHECK4-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG65]]
870 // CHECK4-NEXT:    br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG66:![0-9]+]]
871 // CHECK4:       omp.par.pre_finalize:
872 // CHECK4-NEXT:    br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG66]]
873 //
874 //
875 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_
876 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5]] comdat !dbg [[DBG67:![0-9]+]] {
877 // CHECK4-NEXT:  entry:
878 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i8**, align 8
879 // CHECK4-NEXT:    store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8
880 // CHECK4-NEXT:    call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71:![0-9]+]]
881 // CHECK4-NEXT:    ret void, !dbg [[DBG71]]
882 //
883