1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 6 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 7 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -DIRBUILDER -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -gno-column-info -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4 8 9 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5 10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 11 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 12 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-linux -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK7 13 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8 15 // expected-no-diagnostics 16 #ifndef HEADER 17 #define HEADER 18 19 20 template <class T> 21 void foo(T argc) {} 22 23 template <typename T> 24 int tmain(T argc) { 25 typedef double (*chunk_t)[argc[0][0]]; 26 #pragma omp parallel 27 { 28 foo(argc); 29 chunk_t var;(void)var[0][0]; 30 } 31 return 0; 32 } 33 34 int global; 35 int main (int argc, char **argv) { 36 int a[argc]; 37 #pragma omp parallel shared(global, a) default(none) 38 foo(a[1]), a[1] = global; 39 #ifndef IRBUILDER 40 // TODO: Support for privates in IRBuilder. 41 #pragma omp parallel private(global, a) default(none) 42 #pragma omp parallel shared(global, a) default(none) 43 foo(a[1]), a[1] = global; 44 // FIXME: IRBuilder crashes in void llvm::OpenMPIRBuilder::finalize() 45 // Assertion `Extractor.isEligible() && "Expected OpenMP outlining to be possible!"' failed. 46 #pragma omp parallel shared(global, a) default(none) 47 #pragma omp parallel shared(global, a) default(none) 48 foo(a[1]), a[1] = global; 49 #endif // IRBUILDER 50 return tmain(argv); 51 } 52 53 54 55 56 57 58 59 60 61 62 63 64 // Note that OpenMPIRBuilder puts the trailing arguments in a different order: 65 // arguments that are wrapped into additional pointers precede the other 66 // arguments. This is expected and not problematic because both the call and the 67 // function are generated from the same place, and the function is internal. 68 69 70 71 72 #endif 73 // CHECK1-LABEL: define {{[^@]+}}@main 74 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 75 // CHECK1-NEXT: entry: 76 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 77 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 78 // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 79 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 80 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 81 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 82 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 83 // CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 84 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 85 // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 86 // CHECK1-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 87 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 88 // CHECK1-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 89 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 90 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]) 91 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]]) 92 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]) 93 // CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8 94 // CHECK1-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]) 95 // CHECK1-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 96 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 97 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP4]]) 98 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4 99 // CHECK1-NEXT: ret i32 [[TMP5]] 100 // 101 // 102 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 103 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 104 // CHECK1-NEXT: entry: 105 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 106 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 107 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 108 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 109 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 110 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 111 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 112 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 113 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 114 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 115 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 116 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 117 // CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP2]]) 118 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 119 // CHECK1: invoke.cont: 120 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4 121 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 122 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4 123 // CHECK1-NEXT: ret void 124 // CHECK1: terminate.lpad: 125 // CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 } 126 // CHECK1-NEXT: catch i8* null 127 // CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0 128 // CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6:[0-9]+]] 129 // CHECK1-NEXT: unreachable 130 // 131 // 132 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 133 // CHECK1-SAME: (i32 [[ARGC:%.*]]) #[[ATTR3:[0-9]+]] comdat { 134 // CHECK1-NEXT: entry: 135 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 136 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 137 // CHECK1-NEXT: ret void 138 // 139 // 140 // CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate 141 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat { 142 // CHECK1-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]] 143 // CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR6]] 144 // CHECK1-NEXT: unreachable 145 // 146 // 147 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 148 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR2]] { 149 // CHECK1-NEXT: entry: 150 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 151 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 152 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 153 // CHECK1-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 154 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 155 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 156 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 157 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 158 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 159 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 160 // CHECK1-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 161 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8 162 // CHECK1-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16 163 // CHECK1-NEXT: store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8 164 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]]) 165 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 166 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP2]]) 167 // CHECK1-NEXT: ret void 168 // 169 // 170 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2 171 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 172 // CHECK1-NEXT: entry: 173 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 174 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 175 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 176 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 177 // CHECK1-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8 178 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 179 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 180 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 181 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 182 // CHECK1-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8 183 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 184 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 185 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8 186 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 187 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 188 // CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 189 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 190 // CHECK1: invoke.cont: 191 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4 192 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 193 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4 194 // CHECK1-NEXT: ret void 195 // CHECK1: terminate.lpad: 196 // CHECK1-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 } 197 // CHECK1-NEXT: catch i8* null 198 // CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0 199 // CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]] 200 // CHECK1-NEXT: unreachable 201 // 202 // 203 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 204 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 205 // CHECK1-NEXT: entry: 206 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 207 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 208 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 209 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 210 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 211 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 212 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 213 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 214 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 215 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 216 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]]) 217 // CHECK1-NEXT: ret void 218 // 219 // 220 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 221 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 222 // CHECK1-NEXT: entry: 223 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 224 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 225 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 226 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 227 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 228 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 229 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 230 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 231 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 232 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 233 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 234 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 235 // CHECK1-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP2]]) 236 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 237 // CHECK1: invoke.cont: 238 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4 239 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1 240 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4 241 // CHECK1-NEXT: ret void 242 // CHECK1: terminate.lpad: 243 // CHECK1-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 } 244 // CHECK1-NEXT: catch i8* null 245 // CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0 246 // CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR6]] 247 // CHECK1-NEXT: unreachable 248 // 249 // 250 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 251 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat { 252 // CHECK1-NEXT: entry: 253 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 254 // CHECK1-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 255 // CHECK1-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 256 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0 257 // CHECK1-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8 258 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0 259 // CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1 260 // CHECK1-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64 261 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]]) 262 // CHECK1-NEXT: ret i32 0 263 // 264 // 265 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5 266 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR2]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 267 // CHECK1-NEXT: entry: 268 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 269 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 270 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8 271 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 272 // CHECK1-NEXT: [[VAR:%.*]] = alloca double*, align 8 273 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 274 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 275 // CHECK1-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8 276 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 277 // CHECK1-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8 278 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 279 // CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8 280 // CHECK1-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP2]]) 281 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 282 // CHECK1: invoke.cont: 283 // CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8 284 // CHECK1-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]] 285 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]] 286 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0 287 // CHECK1-NEXT: ret void 288 // CHECK1: terminate.lpad: 289 // CHECK1-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 } 290 // CHECK1-NEXT: catch i8* null 291 // CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0 292 // CHECK1-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR6]] 293 // CHECK1-NEXT: unreachable 294 // 295 // 296 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 297 // CHECK1-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat { 298 // CHECK1-NEXT: entry: 299 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 300 // CHECK1-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 301 // CHECK1-NEXT: ret void 302 // 303 // 304 // CHECK2-LABEL: define {{[^@]+}}@main 305 // CHECK2-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] { 306 // CHECK2-NEXT: entry: 307 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 308 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 309 // CHECK2-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 310 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 311 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 312 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 313 // CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 314 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]] 315 // CHECK2-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 316 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]] 317 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]] 318 // CHECK2-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG22:![0-9]+]] 319 // CHECK2-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG22]] 320 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG22]] 321 // CHECK2-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG22]] 322 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG22]] 323 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]] 324 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30:![0-9]+]] 325 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG31:![0-9]+]] 326 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]]), !dbg [[DBG32:![0-9]+]] 327 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB9:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP1]], i32* [[VLA]]), !dbg [[DBG33:![0-9]+]] 328 // CHECK2-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG34:![0-9]+]] 329 // CHECK2-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]), !dbg [[DBG35:![0-9]+]] 330 // CHECK2-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG36:![0-9]+]] 331 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG37:![0-9]+]] 332 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG37]] 333 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG37]] 334 // CHECK2-NEXT: ret i32 [[TMP5]], !dbg [[DBG37]] 335 // 336 // 337 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__ 338 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG38:![0-9]+]] { 339 // CHECK2-NEXT: entry: 340 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 341 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 342 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 343 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 344 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 345 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META46:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47:![0-9]+]] 346 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 347 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META48:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]] 348 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 349 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG47]] 350 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 351 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META50:![0-9]+]], metadata !DIExpression()), !dbg [[DBG51:![0-9]+]] 352 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG52:![0-9]+]] 353 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG52]] 354 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG53:![0-9]+]] 355 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG53]] 356 // CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP2]]) 357 // CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG52]] 358 // CHECK2: invoke.cont: 359 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG54:![0-9]+]] 360 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG55:![0-9]+]] 361 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG56:![0-9]+]] 362 // CHECK2-NEXT: ret void, !dbg [[DBG54]] 363 // CHECK2: terminate.lpad: 364 // CHECK2-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 } 365 // CHECK2-NEXT: catch i8* null, !dbg [[DBG52]] 366 // CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG52]] 367 // CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7:[0-9]+]], !dbg [[DBG52]] 368 // CHECK2-NEXT: unreachable, !dbg [[DBG52]] 369 // 370 // 371 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 372 // CHECK2-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat !dbg [[DBG57:![0-9]+]] { 373 // CHECK2-NEXT: entry: 374 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 375 // CHECK2-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 376 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META62:![0-9]+]], metadata !DIExpression()), !dbg [[DBG63:![0-9]+]] 377 // CHECK2-NEXT: ret void, !dbg [[DBG64:![0-9]+]] 378 // 379 // 380 // CHECK2-LABEL: define {{[^@]+}}@__clang_call_terminate 381 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { 382 // CHECK2-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR6:[0-9]+]] 383 // CHECK2-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] 384 // CHECK2-NEXT: unreachable 385 // 386 // 387 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 388 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG65:![0-9]+]] { 389 // CHECK2-NEXT: entry: 390 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 391 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 392 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 393 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 394 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 395 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67:![0-9]+]] 396 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 397 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]] 398 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 399 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META69:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]] 400 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 401 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67]] 402 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG71:![0-9]+]] 403 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG71]] 404 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG71]] 405 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG71]] 406 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG71]] 407 // CHECK2-NEXT: call void @.omp_outlined._debug__(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG71]] 408 // CHECK2-NEXT: ret void, !dbg [[DBG71]] 409 // 410 // 411 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.1 412 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG74:![0-9]+]] { 413 // CHECK2-NEXT: entry: 414 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 415 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 416 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 417 // CHECK2-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 418 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 419 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 420 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 421 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META77:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78:![0-9]+]] 422 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 423 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META79:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]] 424 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 425 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META80:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]] 426 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG81:![0-9]+]] 427 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]] 428 // CHECK2-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG81]] 429 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG81]] 430 // CHECK2-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP0]], align 16, !dbg [[DBG81]] 431 // CHECK2-NEXT: store i64 [[TMP0]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG81]] 432 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META83:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]] 433 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA1]], metadata [[META84:![0-9]+]], metadata !DIExpression()), !dbg [[DBG78]] 434 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[VLA1]], i32* [[GLOBAL]]), !dbg [[DBG81]] 435 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG85:![0-9]+]] 436 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP2]]), !dbg [[DBG85]] 437 // CHECK2-NEXT: ret void, !dbg [[DBG87:![0-9]+]] 438 // 439 // 440 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.2 441 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG88:![0-9]+]] { 442 // CHECK2-NEXT: entry: 443 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 444 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 445 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 446 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 447 // CHECK2-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8 448 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 449 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META91:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92:![0-9]+]] 450 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 451 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META93:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92]] 452 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 453 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META94:![0-9]+]], metadata !DIExpression()), !dbg [[DBG92]] 454 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 455 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96:![0-9]+]] 456 // CHECK2-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8 457 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META97:![0-9]+]], metadata !DIExpression()), !dbg [[DBG98:![0-9]+]] 458 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG99:![0-9]+]] 459 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG99]] 460 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG99]] 461 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG100:![0-9]+]] 462 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG100]] 463 // CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 464 // CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG99]] 465 // CHECK2: invoke.cont: 466 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG101:![0-9]+]] 467 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG102:![0-9]+]] 468 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG103:![0-9]+]] 469 // CHECK2-NEXT: ret void, !dbg [[DBG101]] 470 // CHECK2: terminate.lpad: 471 // CHECK2-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 } 472 // CHECK2-NEXT: catch i8* null, !dbg [[DBG99]] 473 // CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG99]] 474 // CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG99]] 475 // CHECK2-NEXT: unreachable, !dbg [[DBG99]] 476 // 477 // 478 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 479 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i32* nonnull align 4 dereferenceable(4) [[GLOBAL:%.*]]) #[[ATTR3]] !dbg [[DBG104:![0-9]+]] { 480 // CHECK2-NEXT: entry: 481 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 482 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 483 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 484 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 485 // CHECK2-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32*, align 8 486 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 487 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META105:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106:![0-9]+]] 488 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 489 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META107:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]] 490 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 491 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META108:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]] 492 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 493 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META109:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]] 494 // CHECK2-NEXT: store i32* [[GLOBAL]], i32** [[GLOBAL_ADDR]], align 8 495 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[GLOBAL_ADDR]], metadata [[META110:![0-9]+]], metadata !DIExpression()), !dbg [[DBG106]] 496 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG111:![0-9]+]] 497 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG111]] 498 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG111]] 499 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG111]] 500 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG111]] 501 // CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG111]] 502 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[GLOBAL_ADDR]], align 8, !dbg [[DBG111]] 503 // CHECK2-NEXT: call void @.omp_outlined._debug__.2(i32* [[TMP3]], i32* [[TMP4]], i64 [[TMP0]], i32* [[TMP5]], i32* [[TMP6]]) #[[ATTR6]], !dbg [[DBG111]] 504 // CHECK2-NEXT: ret void, !dbg [[DBG111]] 505 // 506 // 507 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 508 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG112:![0-9]+]] { 509 // CHECK2-NEXT: entry: 510 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 511 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 512 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 513 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 514 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META113:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114:![0-9]+]] 515 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 516 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META115:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]] 517 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 518 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG114]] 519 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG117:![0-9]+]] 520 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG117]] 521 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG117]] 522 // CHECK2-NEXT: call void @.omp_outlined._debug__.1(i32* [[TMP1]], i32* [[TMP2]], i64 [[TMP0]]) #[[ATTR6]], !dbg [[DBG117]] 523 // CHECK2-NEXT: ret void, !dbg [[DBG117]] 524 // 525 // 526 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.5 527 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG118:![0-9]+]] { 528 // CHECK2-NEXT: entry: 529 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 530 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 531 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 532 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 533 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 534 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META119:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]] 535 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 536 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META121:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120]] 537 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 538 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META122:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120]] 539 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 540 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG124:![0-9]+]] 541 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG125:![0-9]+]] 542 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG125]] 543 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB7:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i32*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP0]], i32* [[TMP1]]), !dbg [[DBG125]] 544 // CHECK2-NEXT: ret void, !dbg [[DBG126:![0-9]+]] 545 // 546 // 547 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.6 548 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG127:![0-9]+]] { 549 // CHECK2-NEXT: entry: 550 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 551 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 552 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 553 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 554 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 555 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META128:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129:![0-9]+]] 556 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 557 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META130:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]] 558 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 559 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129]] 560 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 561 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]] 562 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG134:![0-9]+]] 563 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG134]] 564 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG135:![0-9]+]] 565 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG135]] 566 // CHECK2-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP2]]) 567 // CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG134]] 568 // CHECK2: invoke.cont: 569 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG136:![0-9]+]] 570 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i64 1, !dbg [[DBG137:![0-9]+]] 571 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG138:![0-9]+]] 572 // CHECK2-NEXT: ret void, !dbg [[DBG136]] 573 // CHECK2: terminate.lpad: 574 // CHECK2-NEXT: [[TMP4:%.*]] = landingpad { i8*, i32 } 575 // CHECK2-NEXT: catch i8* null, !dbg [[DBG134]] 576 // CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { i8*, i32 } [[TMP4]], 0, !dbg [[DBG134]] 577 // CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP5]]) #[[ATTR7]], !dbg [[DBG134]] 578 // CHECK2-NEXT: unreachable, !dbg [[DBG134]] 579 // 580 // 581 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7 582 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG139:![0-9]+]] { 583 // CHECK2-NEXT: entry: 584 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 585 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 586 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 587 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 588 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 589 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]] 590 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 591 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]] 592 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 593 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]] 594 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 595 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141]] 596 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG145:![0-9]+]] 597 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG145]] 598 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG145]] 599 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG145]] 600 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG145]] 601 // CHECK2-NEXT: call void @.omp_outlined._debug__.6(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG145]] 602 // CHECK2-NEXT: ret void, !dbg [[DBG145]] 603 // 604 // 605 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8 606 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] !dbg [[DBG146:![0-9]+]] { 607 // CHECK2-NEXT: entry: 608 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 609 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 610 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 611 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 612 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 613 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META147:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148:![0-9]+]] 614 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 615 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]] 616 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 617 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]] 618 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 619 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[A_ADDR]], metadata [[META151:![0-9]+]], metadata !DIExpression()), !dbg [[DBG148]] 620 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG152:![0-9]+]] 621 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG152]] 622 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG152]] 623 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG152]] 624 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8, !dbg [[DBG152]] 625 // CHECK2-NEXT: call void @.omp_outlined._debug__.5(i32* [[TMP2]], i32* [[TMP3]], i64 [[TMP0]], i32* [[TMP4]]) #[[ATTR6]], !dbg [[DBG152]] 626 // CHECK2-NEXT: ret void, !dbg [[DBG152]] 627 // 628 // 629 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 630 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG153:![0-9]+]] { 631 // CHECK2-NEXT: entry: 632 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 633 // CHECK2-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 634 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]] 635 // CHECK2-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG160:![0-9]+]] 636 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG160]] 637 // CHECK2-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG160]] 638 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG160]] 639 // CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG160]] 640 // CHECK2-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG161:![0-9]+]] 641 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB11:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i8***, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i8*** [[ARGC_ADDR]], i64 [[TMP3]]), !dbg [[DBG162:![0-9]+]] 642 // CHECK2-NEXT: ret i32 0, !dbg [[DBG163:![0-9]+]] 643 // 644 // 645 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined._debug__.9 646 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG164:![0-9]+]] { 647 // CHECK2-NEXT: entry: 648 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 649 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 650 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8 651 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 652 // CHECK2-NEXT: [[VAR:%.*]] = alloca double*, align 8 653 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 654 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META168:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169:![0-9]+]] 655 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 656 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]] 657 // CHECK2-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8 658 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]] 659 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 660 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG169]] 661 // CHECK2-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG174:![0-9]+]] 662 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG174]] 663 // CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[TMP0]], align 8, !dbg [[DBG175:![0-9]+]] 664 // CHECK2-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP2]]) 665 // CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG177:![0-9]+]] 666 // CHECK2: invoke.cont: 667 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG185:![0-9]+]] 668 // CHECK2-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG186:![0-9]+]] 669 // CHECK2-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG186]] 670 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG186]] 671 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX]], i64 0, !dbg [[DBG186]] 672 // CHECK2-NEXT: ret void, !dbg [[DBG187:![0-9]+]] 673 // CHECK2: terminate.lpad: 674 // CHECK2-NEXT: [[TMP5:%.*]] = landingpad { i8*, i32 } 675 // CHECK2-NEXT: catch i8* null, !dbg [[DBG177]] 676 // CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { i8*, i32 } [[TMP5]], 0, !dbg [[DBG177]] 677 // CHECK2-NEXT: call void @__clang_call_terminate(i8* [[TMP6]]) #[[ATTR7]], !dbg [[DBG177]] 678 // CHECK2-NEXT: unreachable, !dbg [[DBG177]] 679 // 680 // 681 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 682 // CHECK2-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat !dbg [[DBG188:![0-9]+]] { 683 // CHECK2-NEXT: entry: 684 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 685 // CHECK2-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 686 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META191:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]] 687 // CHECK2-NEXT: ret void, !dbg [[DBG193:![0-9]+]] 688 // 689 // 690 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 691 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i8*** nonnull align 8 dereferenceable(8) [[ARGC:%.*]], i64 [[VLA:%.*]]) #[[ATTR3]] !dbg [[DBG194:![0-9]+]] { 692 // CHECK2-NEXT: entry: 693 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 694 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 695 // CHECK2-NEXT: [[ARGC_ADDR:%.*]] = alloca i8***, align 8 696 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 697 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 698 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTGLOBAL_TID__ADDR]], metadata [[META195:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196:![0-9]+]] 699 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 700 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i32** [[DOTBOUND_TID__ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]] 701 // CHECK2-NEXT: store i8*** [[ARGC]], i8**** [[ARGC_ADDR]], align 8 702 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i8**** [[ARGC_ADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]] 703 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 704 // CHECK2-NEXT: call void @llvm.dbg.declare(metadata i64* [[VLA_ADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG196]] 705 // CHECK2-NEXT: [[TMP0:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG200:![0-9]+]] 706 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8, !dbg [[DBG200]] 707 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !dbg [[DBG200]] 708 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTBOUND_TID__ADDR]], align 8, !dbg [[DBG200]] 709 // CHECK2-NEXT: [[TMP4:%.*]] = load i8***, i8**** [[ARGC_ADDR]], align 8, !dbg [[DBG200]] 710 // CHECK2-NEXT: call void @.omp_outlined._debug__.9(i32* [[TMP2]], i32* [[TMP3]], i8*** [[TMP4]], i64 [[TMP1]]) #[[ATTR6]], !dbg [[DBG200]] 711 // CHECK2-NEXT: ret void, !dbg [[DBG200]] 712 // 713 // 714 // CHECK3-LABEL: define {{[^@]+}}@main 715 // CHECK3-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 716 // CHECK3-NEXT: entry: 717 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 718 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 719 // CHECK3-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 720 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 721 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 722 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4 723 // CHECK3-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 724 // CHECK3-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 725 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 726 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 727 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 728 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 729 // CHECK3-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 730 // CHECK3-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 731 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 732 // CHECK3-NEXT: br label [[OMP_PARALLEL:%.*]] 733 // CHECK3: omp_parallel: 734 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]]) 735 // CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] 736 // CHECK3: omp.par.outlined.exit: 737 // CHECK3-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] 738 // CHECK3: omp.par.exit.split: 739 // CHECK3-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8 740 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]) 741 // CHECK3-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 742 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 743 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP4]]) 744 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4 745 // CHECK3-NEXT: ret i32 [[TMP5]] 746 // 747 // 748 // CHECK3-LABEL: define {{[^@]+}}@main..omp_par 749 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] { 750 // CHECK3-NEXT: omp.par.entry: 751 // CHECK3-NEXT: [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4 752 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4 753 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4 754 // CHECK3-NEXT: [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4 755 // CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]] 756 // CHECK3: omp.par.outlined.exit.exitStub: 757 // CHECK3-NEXT: ret void 758 // CHECK3: omp.par.region: 759 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 760 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 761 // CHECK3-NEXT: call void @_Z3fooIiEvT_(i32 [[TMP1]]) 762 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* @global, align 4 763 // CHECK3-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 764 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4 765 // CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]] 766 // CHECK3: omp.par.pre_finalize: 767 // CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]] 768 // 769 // 770 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 771 // CHECK3-SAME: (i32 [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat { 772 // CHECK3-NEXT: entry: 773 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 774 // CHECK3-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 775 // CHECK3-NEXT: ret void 776 // 777 // 778 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 779 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat { 780 // CHECK3-NEXT: entry: 781 // CHECK3-NEXT: [[DOTRELOADED:%.*]] = alloca i64, align 8 782 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 783 // CHECK3-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 784 // CHECK3-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 785 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0 786 // CHECK3-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8 787 // CHECK3-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0 788 // CHECK3-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1 789 // CHECK3-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64 790 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 791 // CHECK3-NEXT: store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8 792 // CHECK3-NEXT: br label [[OMP_PARALLEL:%.*]] 793 // CHECK3: omp_parallel: 794 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]]) 795 // CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] 796 // CHECK3: omp.par.outlined.exit: 797 // CHECK3-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] 798 // CHECK3: omp.par.exit.split: 799 // CHECK3-NEXT: ret i32 0 800 // 801 // 802 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par 803 // CHECK3-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] { 804 // CHECK3-NEXT: omp.par.entry: 805 // CHECK3-NEXT: [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4 806 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4 807 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4 808 // CHECK3-NEXT: [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4 809 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8 810 // CHECK3-NEXT: [[VAR:%.*]] = alloca double*, align 8 811 // CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]] 812 // CHECK3: omp.par.outlined.exit.exitStub: 813 // CHECK3-NEXT: ret void 814 // CHECK3: omp.par.region: 815 // CHECK3-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 816 // CHECK3-NEXT: call void @_Z3fooIPPcEvT_(i8** [[TMP2]]) 817 // CHECK3-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8 818 // CHECK3-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]] 819 // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]] 820 // CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0 821 // CHECK3-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]] 822 // CHECK3: omp.par.pre_finalize: 823 // CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]] 824 // 825 // 826 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 827 // CHECK3-SAME: (i8** [[ARGC:%.*]]) #[[ATTR4]] comdat { 828 // CHECK3-NEXT: entry: 829 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 830 // CHECK3-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 831 // CHECK3-NEXT: ret void 832 // 833 // 834 // CHECK4-LABEL: define {{[^@]+}}@main 835 // CHECK4-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] !dbg [[DBG11:![0-9]+]] { 836 // CHECK4-NEXT: entry: 837 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 838 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 839 // CHECK4-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 840 // CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 841 // CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 842 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4 843 // CHECK4-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 844 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]] 845 // CHECK4-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 846 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18]] 847 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG20:![0-9]+]] 848 // CHECK4-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG20]] 849 // CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG20]] 850 // CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG20]] 851 // CHECK4-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG20]] 852 // CHECK4-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG20]] 853 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META21:![0-9]+]], metadata !DIExpression()), !dbg [[DBG23:![0-9]+]] 854 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META24:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20]] 855 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG28:![0-9]+]] 856 // CHECK4-NEXT: br label [[OMP_PARALLEL:%.*]] 857 // CHECK4: omp_parallel: 858 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @main..omp_par to void (i32*, i32*, ...)*), i32* [[VLA]]), !dbg [[DBG29:![0-9]+]] 859 // CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] 860 // CHECK4: omp.par.outlined.exit: 861 // CHECK4-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] 862 // CHECK4: omp.par.exit.split: 863 // CHECK4-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG30:![0-9]+]] 864 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP3]]), !dbg [[DBG30]] 865 // CHECK4-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG30]] 866 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG31:![0-9]+]] 867 // CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP4]]), !dbg [[DBG31]] 868 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG31]] 869 // CHECK4-NEXT: ret i32 [[TMP5]], !dbg [[DBG31]] 870 // 871 // 872 // CHECK4-LABEL: define {{[^@]+}}@main..omp_par 873 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i32* [[VLA:%.*]]) #[[ATTR1:[0-9]+]] !dbg [[DBG32:![0-9]+]] { 874 // CHECK4-NEXT: omp.par.entry: 875 // CHECK4-NEXT: [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4 876 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4 877 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4 878 // CHECK4-NEXT: [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4 879 // CHECK4-NEXT: br label [[OMP_PAR_REGION:%.*]] 880 // CHECK4: omp.par.outlined.exit.exitStub: 881 // CHECK4-NEXT: ret void 882 // CHECK4: omp.par.region: 883 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34:![0-9]+]] 884 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG34]] 885 // CHECK4-NEXT: call void @_Z3fooIiEvT_(i32 [[TMP1]]), !dbg [[DBG34]] 886 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG34]] 887 // CHECK4-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG34]] 888 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG34]] 889 // CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG34]] 890 // CHECK4: omp.par.pre_finalize: 891 // CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG34]] 892 // 893 // 894 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 895 // CHECK4-SAME: (i32 [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat !dbg [[DBG35:![0-9]+]] { 896 // CHECK4-NEXT: entry: 897 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 898 // CHECK4-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 899 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META40:![0-9]+]], metadata !DIExpression()), !dbg [[DBG41:![0-9]+]] 900 // CHECK4-NEXT: ret void, !dbg [[DBG41]] 901 // 902 // 903 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 904 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat !dbg [[DBG44:![0-9]+]] { 905 // CHECK4-NEXT: entry: 906 // CHECK4-NEXT: [[DOTRELOADED:%.*]] = alloca i64, align 8 907 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 908 // CHECK4-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 909 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META49:![0-9]+]], metadata !DIExpression()), !dbg [[DBG50:![0-9]+]] 910 // CHECK4-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG51:![0-9]+]] 911 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG51]] 912 // CHECK4-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG51]] 913 // CHECK4-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG51]] 914 // CHECK4-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG51]] 915 // CHECK4-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG51]] 916 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]), !dbg [[DBG52:![0-9]+]] 917 // CHECK4-NEXT: store i64 [[TMP3]], i64* [[DOTRELOADED]], align 8 918 // CHECK4-NEXT: br label [[OMP_PARALLEL:%.*]] 919 // CHECK4: omp_parallel: 920 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i8***)* @_Z5tmainIPPcEiT_..omp_par to void (i32*, i32*, ...)*), i64* [[DOTRELOADED]], i8*** [[ARGC_ADDR]]), !dbg [[DBG53:![0-9]+]] 921 // CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] 922 // CHECK4: omp.par.outlined.exit: 923 // CHECK4-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] 924 // CHECK4: omp.par.exit.split: 925 // CHECK4-NEXT: ret i32 0, !dbg [[DBG55:![0-9]+]] 926 // 927 // 928 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_..omp_par 929 // CHECK4-SAME: (i32* noalias [[TID_ADDR:%.*]], i32* noalias [[ZERO_ADDR:%.*]], i64* [[DOTRELOADED:%.*]], i8*** [[ARGC_ADDR:%.*]]) #[[ATTR1]] !dbg [[DBG56:![0-9]+]] { 930 // CHECK4-NEXT: omp.par.entry: 931 // CHECK4-NEXT: [[TID_ADDR_LOCAL:%.*]] = alloca i32, align 4 932 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[TID_ADDR]], align 4 933 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[TID_ADDR_LOCAL]], align 4 934 // CHECK4-NEXT: [[TID:%.*]] = load i32, i32* [[TID_ADDR_LOCAL]], align 4 935 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTRELOADED]], align 8 936 // CHECK4-NEXT: [[VAR:%.*]] = alloca double*, align 8 937 // CHECK4-NEXT: br label [[OMP_PAR_REGION:%.*]] 938 // CHECK4: omp.par.outlined.exit.exitStub: 939 // CHECK4-NEXT: ret void 940 // CHECK4: omp.par.region: 941 // CHECK4-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG57:![0-9]+]] 942 // CHECK4-NEXT: call void @_Z3fooIPPcEvT_(i8** [[TMP2]]), !dbg [[DBG57]] 943 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META58:![0-9]+]], metadata !DIExpression()), !dbg [[DBG65:![0-9]+]] 944 // CHECK4-NEXT: [[TMP3:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG65]] 945 // CHECK4-NEXT: [[TMP4:%.*]] = mul nsw i64 0, [[TMP1]], !dbg [[DBG65]] 946 // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP3]], i64 [[TMP4]], !dbg [[DBG65]] 947 // CHECK4-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG65]] 948 // CHECK4-NEXT: br label [[OMP_PAR_PRE_FINALIZE:%.*]], !dbg [[DBG66:![0-9]+]] 949 // CHECK4: omp.par.pre_finalize: 950 // CHECK4-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]], !dbg [[DBG66]] 951 // 952 // 953 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 954 // CHECK4-SAME: (i8** [[ARGC:%.*]]) #[[ATTR5]] comdat !dbg [[DBG67:![0-9]+]] { 955 // CHECK4-NEXT: entry: 956 // CHECK4-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 957 // CHECK4-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 958 // CHECK4-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META70:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71:![0-9]+]] 959 // CHECK4-NEXT: ret void, !dbg [[DBG71]] 960 // 961 // 962 // CHECK5-LABEL: define {{[^@]+}}@main 963 // CHECK5-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 964 // CHECK5-NEXT: entry: 965 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 966 // CHECK5-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 967 // CHECK5-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 968 // CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 969 // CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 970 // CHECK5-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 971 // CHECK5-NEXT: [[SAVED_STACK2:%.*]] = alloca i8*, align 8 972 // CHECK5-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 973 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4 974 // CHECK5-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 975 // CHECK5-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 976 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 977 // CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 978 // CHECK5-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 979 // CHECK5-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 980 // CHECK5-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 981 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 982 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 983 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 984 // CHECK5-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 985 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 986 // CHECK5: invoke.cont: 987 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4 988 // CHECK5-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 989 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4 990 // CHECK5-NEXT: [[TMP5:%.*]] = call i8* @llvm.stacksave() 991 // CHECK5-NEXT: store i8* [[TMP5]], i8** [[SAVED_STACK2]], align 8 992 // CHECK5-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP1]], align 16 993 // CHECK5-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR1]], align 8 994 // CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1 995 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4 996 // CHECK5-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP6]]) 997 // CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[TERMINATE_LPAD]] 998 // CHECK5: invoke.cont5: 999 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[GLOBAL]], align 4 1000 // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1 1001 // CHECK5-NEXT: store i32 [[TMP7]], i32* [[ARRAYIDX6]], align 4 1002 // CHECK5-NEXT: [[TMP8:%.*]] = load i8*, i8** [[SAVED_STACK2]], align 8 1003 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP8]]) 1004 // CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1005 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4 1006 // CHECK5-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP9]]) 1007 // CHECK5-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[TERMINATE_LPAD]] 1008 // CHECK5: invoke.cont8: 1009 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* @global, align 4 1010 // CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1011 // CHECK5-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX9]], align 4 1012 // CHECK5-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8 1013 // CHECK5-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP11]]) 1014 // CHECK5-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 1015 // CHECK5-NEXT: [[TMP12:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 1016 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP12]]) 1017 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4 1018 // CHECK5-NEXT: ret i32 [[TMP13]] 1019 // CHECK5: terminate.lpad: 1020 // CHECK5-NEXT: [[TMP14:%.*]] = landingpad { i8*, i32 } 1021 // CHECK5-NEXT: catch i8* null 1022 // CHECK5-NEXT: [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP14]], 0 1023 // CHECK5-NEXT: call void @__clang_call_terminate(i8* [[TMP15]]) #[[ATTR4:[0-9]+]] 1024 // CHECK5-NEXT: unreachable 1025 // 1026 // 1027 // CHECK5-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 1028 // CHECK5-SAME: (i32 [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 1029 // CHECK5-NEXT: entry: 1030 // CHECK5-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1031 // CHECK5-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1032 // CHECK5-NEXT: ret void 1033 // 1034 // 1035 // CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate 1036 // CHECK5-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { 1037 // CHECK5-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]] 1038 // CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR4]] 1039 // CHECK5-NEXT: unreachable 1040 // 1041 // 1042 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 1043 // CHECK5-SAME: (i8** [[ARGC:%.*]]) #[[ATTR2]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 1044 // CHECK5-NEXT: entry: 1045 // CHECK5-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1046 // CHECK5-NEXT: [[VAR:%.*]] = alloca double*, align 8 1047 // CHECK5-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1048 // CHECK5-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 1049 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0 1050 // CHECK5-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8 1051 // CHECK5-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0 1052 // CHECK5-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1 1053 // CHECK5-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64 1054 // CHECK5-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 1055 // CHECK5-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP4]]) 1056 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 1057 // CHECK5: invoke.cont: 1058 // CHECK5-NEXT: [[TMP5:%.*]] = load double*, double** [[VAR]], align 8 1059 // CHECK5-NEXT: [[TMP6:%.*]] = mul nsw i64 0, [[TMP3]] 1060 // CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP5]], i64 [[TMP6]] 1061 // CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0 1062 // CHECK5-NEXT: ret i32 0 1063 // CHECK5: terminate.lpad: 1064 // CHECK5-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 } 1065 // CHECK5-NEXT: catch i8* null 1066 // CHECK5-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0 1067 // CHECK5-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR4]] 1068 // CHECK5-NEXT: unreachable 1069 // 1070 // 1071 // CHECK5-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 1072 // CHECK5-SAME: (i8** [[ARGC:%.*]]) #[[ATTR2]] comdat { 1073 // CHECK5-NEXT: entry: 1074 // CHECK5-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1075 // CHECK5-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1076 // CHECK5-NEXT: ret void 1077 // 1078 // 1079 // CHECK6-LABEL: define {{[^@]+}}@main 1080 // CHECK6-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG11:![0-9]+]] { 1081 // CHECK6-NEXT: entry: 1082 // CHECK6-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1083 // CHECK6-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1084 // CHECK6-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 1085 // CHECK6-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1086 // CHECK6-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1087 // CHECK6-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 1088 // CHECK6-NEXT: [[SAVED_STACK2:%.*]] = alloca i8*, align 8 1089 // CHECK6-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 1090 // CHECK6-NEXT: store i32 0, i32* [[RETVAL]], align 4 1091 // CHECK6-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1092 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]] 1093 // CHECK6-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 1094 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]] 1095 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]] 1096 // CHECK6-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG22:![0-9]+]] 1097 // CHECK6-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG22]] 1098 // CHECK6-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG22]] 1099 // CHECK6-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG22]] 1100 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG22]] 1101 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]] 1102 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30:![0-9]+]] 1103 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG31:![0-9]+]] 1104 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG31]] 1105 // CHECK6-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 1106 // CHECK6-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG33:![0-9]+]] 1107 // CHECK6: invoke.cont: 1108 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG34:![0-9]+]] 1109 // CHECK6-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG35:![0-9]+]] 1110 // CHECK6-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG36:![0-9]+]] 1111 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39:![0-9]+]] 1112 // CHECK6-NEXT: [[TMP5:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG40:![0-9]+]] 1113 // CHECK6-NEXT: store i8* [[TMP5]], i8** [[SAVED_STACK2]], align 8, !dbg [[DBG40]] 1114 // CHECK6-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG40]] 1115 // CHECK6-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR1]], align 8, !dbg [[DBG40]] 1116 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR1]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39]] 1117 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA3]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39]] 1118 // CHECK6-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1, !dbg [[DBG43:![0-9]+]] 1119 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4, !dbg [[DBG43]] 1120 // CHECK6-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP6]]) 1121 // CHECK6-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[TERMINATE_LPAD]], !dbg [[DBG45:![0-9]+]] 1122 // CHECK6: invoke.cont5: 1123 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[GLOBAL]], align 4, !dbg [[DBG46:![0-9]+]] 1124 // CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1, !dbg [[DBG47:![0-9]+]] 1125 // CHECK6-NEXT: store i32 [[TMP7]], i32* [[ARRAYIDX6]], align 4, !dbg [[DBG48:![0-9]+]] 1126 // CHECK6-NEXT: [[TMP8:%.*]] = load i8*, i8** [[SAVED_STACK2]], align 8, !dbg [[DBG49:![0-9]+]] 1127 // CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP8]]), !dbg [[DBG49]] 1128 // CHECK6-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG50:![0-9]+]] 1129 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !dbg [[DBG50]] 1130 // CHECK6-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP9]]) 1131 // CHECK6-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[TERMINATE_LPAD]], !dbg [[DBG53:![0-9]+]] 1132 // CHECK6: invoke.cont8: 1133 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG54:![0-9]+]] 1134 // CHECK6-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG55:![0-9]+]] 1135 // CHECK6-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX9]], align 4, !dbg [[DBG56:![0-9]+]] 1136 // CHECK6-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG57:![0-9]+]] 1137 // CHECK6-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP11]]), !dbg [[DBG58:![0-9]+]] 1138 // CHECK6-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG59:![0-9]+]] 1139 // CHECK6-NEXT: [[TMP12:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG60:![0-9]+]] 1140 // CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP12]]), !dbg [[DBG60]] 1141 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG60]] 1142 // CHECK6-NEXT: ret i32 [[TMP13]], !dbg [[DBG60]] 1143 // CHECK6: terminate.lpad: 1144 // CHECK6-NEXT: [[TMP14:%.*]] = landingpad { i8*, i32 } 1145 // CHECK6-NEXT: catch i8* null, !dbg [[DBG33]] 1146 // CHECK6-NEXT: [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP14]], 0, !dbg [[DBG33]] 1147 // CHECK6-NEXT: call void @__clang_call_terminate(i8* [[TMP15]]) #[[ATTR5:[0-9]+]], !dbg [[DBG33]] 1148 // CHECK6-NEXT: unreachable, !dbg [[DBG33]] 1149 // 1150 // 1151 // CHECK6-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 1152 // CHECK6-SAME: (i32 [[ARGC:%.*]]) #[[ATTR3:[0-9]+]] comdat !dbg [[DBG61:![0-9]+]] { 1153 // CHECK6-NEXT: entry: 1154 // CHECK6-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1155 // CHECK6-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1156 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67:![0-9]+]] 1157 // CHECK6-NEXT: ret void, !dbg [[DBG68:![0-9]+]] 1158 // 1159 // 1160 // CHECK6-LABEL: define {{[^@]+}}@__clang_call_terminate 1161 // CHECK6-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat { 1162 // CHECK6-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR6:[0-9]+]] 1163 // CHECK6-NEXT: call void @_ZSt9terminatev() #[[ATTR5]] 1164 // CHECK6-NEXT: unreachable 1165 // 1166 // 1167 // CHECK6-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 1168 // CHECK6-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG69:![0-9]+]] { 1169 // CHECK6-NEXT: entry: 1170 // CHECK6-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1171 // CHECK6-NEXT: [[VAR:%.*]] = alloca double*, align 8 1172 // CHECK6-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1173 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META74:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75:![0-9]+]] 1174 // CHECK6-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG76:![0-9]+]] 1175 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG76]] 1176 // CHECK6-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG76]] 1177 // CHECK6-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG76]] 1178 // CHECK6-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG76]] 1179 // CHECK6-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG77:![0-9]+]] 1180 // CHECK6-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG78:![0-9]+]] 1181 // CHECK6-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP4]]) 1182 // CHECK6-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG81:![0-9]+]] 1183 // CHECK6: invoke.cont: 1184 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG89:![0-9]+]] 1185 // CHECK6-NEXT: [[TMP5:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG90:![0-9]+]] 1186 // CHECK6-NEXT: [[TMP6:%.*]] = mul nsw i64 0, [[TMP3]], !dbg [[DBG90]] 1187 // CHECK6-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP5]], i64 [[TMP6]], !dbg [[DBG90]] 1188 // CHECK6-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG90]] 1189 // CHECK6-NEXT: ret i32 0, !dbg [[DBG91:![0-9]+]] 1190 // CHECK6: terminate.lpad: 1191 // CHECK6-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 } 1192 // CHECK6-NEXT: catch i8* null, !dbg [[DBG81]] 1193 // CHECK6-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG81]] 1194 // CHECK6-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR5]], !dbg [[DBG81]] 1195 // CHECK6-NEXT: unreachable, !dbg [[DBG81]] 1196 // 1197 // 1198 // CHECK6-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 1199 // CHECK6-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat !dbg [[DBG92:![0-9]+]] { 1200 // CHECK6-NEXT: entry: 1201 // CHECK6-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1202 // CHECK6-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1203 // CHECK6-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96:![0-9]+]] 1204 // CHECK6-NEXT: ret void, !dbg [[DBG97:![0-9]+]] 1205 // 1206 // 1207 // CHECK7-LABEL: define {{[^@]+}}@main 1208 // CHECK7-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 1209 // CHECK7-NEXT: entry: 1210 // CHECK7-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1211 // CHECK7-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1212 // CHECK7-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 1213 // CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1214 // CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1215 // CHECK7-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 1216 // CHECK7-NEXT: [[SAVED_STACK2:%.*]] = alloca i8*, align 8 1217 // CHECK7-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 1218 // CHECK7-NEXT: store i32 0, i32* [[RETVAL]], align 4 1219 // CHECK7-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1220 // CHECK7-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 1221 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 1222 // CHECK7-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 1223 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 1224 // CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 1225 // CHECK7-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16 1226 // CHECK7-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 1227 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1228 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1229 // CHECK7-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 1230 // CHECK7-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 1231 // CHECK7: invoke.cont: 1232 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4 1233 // CHECK7-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1234 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4 1235 // CHECK7-NEXT: [[TMP5:%.*]] = call i8* @llvm.stacksave() 1236 // CHECK7-NEXT: store i8* [[TMP5]], i8** [[SAVED_STACK2]], align 8 1237 // CHECK7-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP1]], align 16 1238 // CHECK7-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR1]], align 8 1239 // CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1 1240 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4 1241 // CHECK7-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP6]]) 1242 // CHECK7-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[TERMINATE_LPAD]] 1243 // CHECK7: invoke.cont5: 1244 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[GLOBAL]], align 4 1245 // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1 1246 // CHECK7-NEXT: store i32 [[TMP7]], i32* [[ARRAYIDX6]], align 4 1247 // CHECK7-NEXT: [[TMP8:%.*]] = load i8*, i8** [[SAVED_STACK2]], align 8 1248 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP8]]) 1249 // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1250 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4 1251 // CHECK7-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP9]]) 1252 // CHECK7-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[TERMINATE_LPAD]] 1253 // CHECK7: invoke.cont8: 1254 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* @global, align 4 1255 // CHECK7-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1 1256 // CHECK7-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX9]], align 4 1257 // CHECK7-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8 1258 // CHECK7-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP11]]) 1259 // CHECK7-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 1260 // CHECK7-NEXT: [[TMP12:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 1261 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP12]]) 1262 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4 1263 // CHECK7-NEXT: ret i32 [[TMP13]] 1264 // CHECK7: terminate.lpad: 1265 // CHECK7-NEXT: [[TMP14:%.*]] = landingpad { i8*, i32 } 1266 // CHECK7-NEXT: catch i8* null 1267 // CHECK7-NEXT: [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP14]], 0 1268 // CHECK7-NEXT: call void @__clang_call_terminate(i8* [[TMP15]]) #[[ATTR4:[0-9]+]] 1269 // CHECK7-NEXT: unreachable 1270 // 1271 // 1272 // CHECK7-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 1273 // CHECK7-SAME: (i32 [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 1274 // CHECK7-NEXT: entry: 1275 // CHECK7-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1276 // CHECK7-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1277 // CHECK7-NEXT: ret void 1278 // 1279 // 1280 // CHECK7-LABEL: define {{[^@]+}}@__clang_call_terminate 1281 // CHECK7-SAME: (i8* [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { 1282 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR5:[0-9]+]] 1283 // CHECK7-NEXT: call void @_ZSt9terminatev() #[[ATTR4]] 1284 // CHECK7-NEXT: unreachable 1285 // 1286 // 1287 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 1288 // CHECK7-SAME: (i8** [[ARGC:%.*]]) #[[ATTR2]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { 1289 // CHECK7-NEXT: entry: 1290 // CHECK7-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1291 // CHECK7-NEXT: [[VAR:%.*]] = alloca double*, align 8 1292 // CHECK7-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1293 // CHECK7-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 1294 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0 1295 // CHECK7-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8 1296 // CHECK7-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0 1297 // CHECK7-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1 1298 // CHECK7-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64 1299 // CHECK7-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8 1300 // CHECK7-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP4]]) 1301 // CHECK7-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] 1302 // CHECK7: invoke.cont: 1303 // CHECK7-NEXT: [[TMP5:%.*]] = load double*, double** [[VAR]], align 8 1304 // CHECK7-NEXT: [[TMP6:%.*]] = mul nsw i64 0, [[TMP3]] 1305 // CHECK7-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP5]], i64 [[TMP6]] 1306 // CHECK7-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0 1307 // CHECK7-NEXT: ret i32 0 1308 // CHECK7: terminate.lpad: 1309 // CHECK7-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 } 1310 // CHECK7-NEXT: catch i8* null 1311 // CHECK7-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0 1312 // CHECK7-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR4]] 1313 // CHECK7-NEXT: unreachable 1314 // 1315 // 1316 // CHECK7-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 1317 // CHECK7-SAME: (i8** [[ARGC:%.*]]) #[[ATTR2]] comdat { 1318 // CHECK7-NEXT: entry: 1319 // CHECK7-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1320 // CHECK7-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1321 // CHECK7-NEXT: ret void 1322 // 1323 // 1324 // CHECK8-LABEL: define {{[^@]+}}@main 1325 // CHECK8-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG11:![0-9]+]] { 1326 // CHECK8-NEXT: entry: 1327 // CHECK8-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1328 // CHECK8-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1329 // CHECK8-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 1330 // CHECK8-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1331 // CHECK8-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1332 // CHECK8-NEXT: [[GLOBAL:%.*]] = alloca i32, align 4 1333 // CHECK8-NEXT: [[SAVED_STACK2:%.*]] = alloca i8*, align 8 1334 // CHECK8-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 1335 // CHECK8-NEXT: store i32 0, i32* [[RETVAL]], align 4 1336 // CHECK8-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1337 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META17:![0-9]+]], metadata !DIExpression()), !dbg [[DBG18:![0-9]+]] 1338 // CHECK8-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 1339 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGV_ADDR]], metadata [[META19:![0-9]+]], metadata !DIExpression()), !dbg [[DBG20:![0-9]+]] 1340 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4, !dbg [[DBG21:![0-9]+]] 1341 // CHECK8-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg [[DBG22:![0-9]+]] 1342 // CHECK8-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG22]] 1343 // CHECK8-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8, !dbg [[DBG22]] 1344 // CHECK8-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG22]] 1345 // CHECK8-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8, !dbg [[DBG22]] 1346 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR0]], metadata [[META23:![0-9]+]], metadata !DIExpression()), !dbg [[DBG25:![0-9]+]] 1347 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA]], metadata [[META26:![0-9]+]], metadata !DIExpression()), !dbg [[DBG30:![0-9]+]] 1348 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG31:![0-9]+]] 1349 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !dbg [[DBG31]] 1350 // CHECK8-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP3]]) 1351 // CHECK8-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG33:![0-9]+]] 1352 // CHECK8: invoke.cont: 1353 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG34:![0-9]+]] 1354 // CHECK8-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG35:![0-9]+]] 1355 // CHECK8-NEXT: store i32 [[TMP4]], i32* [[ARRAYIDX1]], align 4, !dbg [[DBG36:![0-9]+]] 1356 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i32* [[GLOBAL]], metadata [[META37:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39:![0-9]+]] 1357 // CHECK8-NEXT: [[TMP5:%.*]] = call i8* @llvm.stacksave(), !dbg [[DBG40:![0-9]+]] 1358 // CHECK8-NEXT: store i8* [[TMP5]], i8** [[SAVED_STACK2]], align 8, !dbg [[DBG40]] 1359 // CHECK8-NEXT: [[VLA3:%.*]] = alloca i32, i64 [[TMP1]], align 16, !dbg [[DBG40]] 1360 // CHECK8-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR1]], align 8, !dbg [[DBG40]] 1361 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i64* [[__VLA_EXPR1]], metadata [[META41:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39]] 1362 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i32* [[VLA3]], metadata [[META42:![0-9]+]], metadata !DIExpression()), !dbg [[DBG39]] 1363 // CHECK8-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1, !dbg [[DBG43:![0-9]+]] 1364 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX4]], align 4, !dbg [[DBG43]] 1365 // CHECK8-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP6]]) 1366 // CHECK8-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[TERMINATE_LPAD]], !dbg [[DBG45:![0-9]+]] 1367 // CHECK8: invoke.cont5: 1368 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[GLOBAL]], align 4, !dbg [[DBG46:![0-9]+]] 1369 // CHECK8-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[VLA3]], i64 1, !dbg [[DBG47:![0-9]+]] 1370 // CHECK8-NEXT: store i32 [[TMP7]], i32* [[ARRAYIDX6]], align 4, !dbg [[DBG48:![0-9]+]] 1371 // CHECK8-NEXT: [[TMP8:%.*]] = load i8*, i8** [[SAVED_STACK2]], align 8, !dbg [[DBG49:![0-9]+]] 1372 // CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP8]]), !dbg [[DBG49]] 1373 // CHECK8-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG50:![0-9]+]] 1374 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4, !dbg [[DBG50]] 1375 // CHECK8-NEXT: invoke void @_Z3fooIiEvT_(i32 [[TMP9]]) 1376 // CHECK8-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[TERMINATE_LPAD]], !dbg [[DBG53:![0-9]+]] 1377 // CHECK8: invoke.cont8: 1378 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* @global, align 4, !dbg [[DBG54:![0-9]+]] 1379 // CHECK8-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 1, !dbg [[DBG55:![0-9]+]] 1380 // CHECK8-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX9]], align 4, !dbg [[DBG56:![0-9]+]] 1381 // CHECK8-NEXT: [[TMP11:%.*]] = load i8**, i8*** [[ARGV_ADDR]], align 8, !dbg [[DBG57:![0-9]+]] 1382 // CHECK8-NEXT: [[CALL:%.*]] = call i32 @_Z5tmainIPPcEiT_(i8** [[TMP11]]), !dbg [[DBG58:![0-9]+]] 1383 // CHECK8-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4, !dbg [[DBG59:![0-9]+]] 1384 // CHECK8-NEXT: [[TMP12:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8, !dbg [[DBG60:![0-9]+]] 1385 // CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP12]]), !dbg [[DBG60]] 1386 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4, !dbg [[DBG60]] 1387 // CHECK8-NEXT: ret i32 [[TMP13]], !dbg [[DBG60]] 1388 // CHECK8: terminate.lpad: 1389 // CHECK8-NEXT: [[TMP14:%.*]] = landingpad { i8*, i32 } 1390 // CHECK8-NEXT: catch i8* null, !dbg [[DBG33]] 1391 // CHECK8-NEXT: [[TMP15:%.*]] = extractvalue { i8*, i32 } [[TMP14]], 0, !dbg [[DBG33]] 1392 // CHECK8-NEXT: call void @__clang_call_terminate(i8* [[TMP15]]) #[[ATTR5:[0-9]+]], !dbg [[DBG33]] 1393 // CHECK8-NEXT: unreachable, !dbg [[DBG33]] 1394 // 1395 // 1396 // CHECK8-LABEL: define {{[^@]+}}@_Z3fooIiEvT_ 1397 // CHECK8-SAME: (i32 [[ARGC:%.*]]) #[[ATTR3:[0-9]+]] comdat !dbg [[DBG61:![0-9]+]] { 1398 // CHECK8-NEXT: entry: 1399 // CHECK8-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 1400 // CHECK8-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 1401 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i32* [[ARGC_ADDR]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG67:![0-9]+]] 1402 // CHECK8-NEXT: ret void, !dbg [[DBG68:![0-9]+]] 1403 // 1404 // 1405 // CHECK8-LABEL: define {{[^@]+}}@__clang_call_terminate 1406 // CHECK8-SAME: (i8* [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat { 1407 // CHECK8-NEXT: [[TMP2:%.*]] = call i8* @__cxa_begin_catch(i8* [[TMP0]]) #[[ATTR6:[0-9]+]] 1408 // CHECK8-NEXT: call void @_ZSt9terminatev() #[[ATTR5]] 1409 // CHECK8-NEXT: unreachable 1410 // 1411 // 1412 // CHECK8-LABEL: define {{[^@]+}}@_Z5tmainIPPcEiT_ 1413 // CHECK8-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg [[DBG69:![0-9]+]] { 1414 // CHECK8-NEXT: entry: 1415 // CHECK8-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1416 // CHECK8-NEXT: [[VAR:%.*]] = alloca double*, align 8 1417 // CHECK8-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1418 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META74:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75:![0-9]+]] 1419 // CHECK8-NEXT: [[TMP0:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG76:![0-9]+]] 1420 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8*, i8** [[TMP0]], i64 0, !dbg [[DBG76]] 1421 // CHECK8-NEXT: [[TMP1:%.*]] = load i8*, i8** [[ARRAYIDX]], align 8, !dbg [[DBG76]] 1422 // CHECK8-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, i8* [[TMP1]], i64 0, !dbg [[DBG76]] 1423 // CHECK8-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX1]], align 1, !dbg [[DBG76]] 1424 // CHECK8-NEXT: [[TMP3:%.*]] = zext i8 [[TMP2]] to i64, !dbg [[DBG77:![0-9]+]] 1425 // CHECK8-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[ARGC_ADDR]], align 8, !dbg [[DBG78:![0-9]+]] 1426 // CHECK8-NEXT: invoke void @_Z3fooIPPcEvT_(i8** [[TMP4]]) 1427 // CHECK8-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !dbg [[DBG81:![0-9]+]] 1428 // CHECK8: invoke.cont: 1429 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata double** [[VAR]], metadata [[META82:![0-9]+]], metadata !DIExpression()), !dbg [[DBG89:![0-9]+]] 1430 // CHECK8-NEXT: [[TMP5:%.*]] = load double*, double** [[VAR]], align 8, !dbg [[DBG90:![0-9]+]] 1431 // CHECK8-NEXT: [[TMP6:%.*]] = mul nsw i64 0, [[TMP3]], !dbg [[DBG90]] 1432 // CHECK8-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[TMP5]], i64 [[TMP6]], !dbg [[DBG90]] 1433 // CHECK8-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX2]], i64 0, !dbg [[DBG90]] 1434 // CHECK8-NEXT: ret i32 0, !dbg [[DBG91:![0-9]+]] 1435 // CHECK8: terminate.lpad: 1436 // CHECK8-NEXT: [[TMP7:%.*]] = landingpad { i8*, i32 } 1437 // CHECK8-NEXT: catch i8* null, !dbg [[DBG81]] 1438 // CHECK8-NEXT: [[TMP8:%.*]] = extractvalue { i8*, i32 } [[TMP7]], 0, !dbg [[DBG81]] 1439 // CHECK8-NEXT: call void @__clang_call_terminate(i8* [[TMP8]]) #[[ATTR5]], !dbg [[DBG81]] 1440 // CHECK8-NEXT: unreachable, !dbg [[DBG81]] 1441 // 1442 // 1443 // CHECK8-LABEL: define {{[^@]+}}@_Z3fooIPPcEvT_ 1444 // CHECK8-SAME: (i8** [[ARGC:%.*]]) #[[ATTR3]] comdat !dbg [[DBG92:![0-9]+]] { 1445 // CHECK8-NEXT: entry: 1446 // CHECK8-NEXT: [[ARGC_ADDR:%.*]] = alloca i8**, align 8 1447 // CHECK8-NEXT: store i8** [[ARGC]], i8*** [[ARGC_ADDR]], align 8 1448 // CHECK8-NEXT: call void @llvm.dbg.declare(metadata i8*** [[ARGC_ADDR]], metadata [[META95:![0-9]+]], metadata !DIExpression()), !dbg [[DBG96:![0-9]+]] 1449 // CHECK8-NEXT: ret void, !dbg [[DBG97:![0-9]+]] 1450 // 1451