1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 4 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 5 6 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 7 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 8 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 9 // expected-no-diagnostics 10 #ifndef HEADER 11 #define HEADER 12 13 14 struct S { 15 int a; 16 S() : a(0) {} 17 S(const S&) {} 18 S& operator=(const S&) {return *this;} 19 ~S() {} 20 friend S operator+(const S&a, const S&b) {return a;} 21 }; 22 23 24 int main(int argc, char **argv) { 25 int a; 26 float b; 27 S c[5]; 28 short d[argc]; 29 #pragma omp taskgroup task_reduction(+: a, b, argc) 30 { 31 #pragma omp taskgroup task_reduction(-:c, d) 32 #pragma omp parallel 33 #pragma omp master taskloop in_reduction(+:a) in_reduction(-:d) 34 for (int i = 0; i < 5; ++i) 35 a += d[a]; 36 } 37 return 0; 38 } 39 40 41 42 #endif 43 // CHECK1-LABEL: define {{[^@]+}}@main 44 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 45 // CHECK1-NEXT: entry: 46 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 47 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 48 // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 49 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 50 // CHECK1-NEXT: [[B:%.*]] = alloca float, align 4 51 // CHECK1-NEXT: [[C:%.*]] = alloca [5 x %struct.S], align 16 52 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 53 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 54 // CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [3 x %struct.kmp_taskred_input_t], align 8 55 // CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8 56 // CHECK1-NEXT: [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8 57 // CHECK1-NEXT: [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8 58 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) 59 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 60 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 61 // CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 62 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0 63 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5 64 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] 65 // CHECK1: arrayctor.loop: 66 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ] 67 // CHECK1-NEXT: call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]]) 68 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1 69 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] 70 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] 71 // CHECK1: arrayctor.cont: 72 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 73 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 74 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 75 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 76 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16 77 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 78 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 79 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0 80 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0 81 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[A]] to i8* 82 // CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 83 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1 84 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i32* [[A]] to i8* 85 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8 86 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2 87 // CHECK1-NEXT: store i64 4, i64* [[TMP8]], align 8 88 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3 89 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP9]], align 8 90 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4 91 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8 92 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5 93 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP11]], align 8 94 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6 95 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP12]] to i8* 96 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP13]], i8 0, i64 4, i1 false) 97 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_1:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1 98 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 0 99 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast float* [[B]] to i8* 100 // CHECK1-NEXT: store i8* [[TMP15]], i8** [[TMP14]], align 8 101 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 1 102 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast float* [[B]] to i8* 103 // CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8 104 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 2 105 // CHECK1-NEXT: store i64 4, i64* [[TMP18]], align 8 106 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 3 107 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP19]], align 8 108 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 4 109 // CHECK1-NEXT: store i8* null, i8** [[TMP20]], align 8 110 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 5 111 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP21]], align 8 112 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 6 113 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8* 114 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP23]], i8 0, i64 4, i1 false) 115 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_2:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 2 116 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 0 117 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8* 118 // CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8 119 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 1 120 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8* 121 // CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8 122 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 2 123 // CHECK1-NEXT: store i64 4, i64* [[TMP28]], align 8 124 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 3 125 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..3 to i8*), i8** [[TMP29]], align 8 126 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 4 127 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8 128 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 5 129 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..4 to i8*), i8** [[TMP31]], align 8 130 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 6 131 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast i32* [[TMP32]] to i8* 132 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 4, i1 false) 133 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8* 134 // CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 3, i8* [[TMP34]]) 135 // CHECK1-NEXT: store i8* [[TMP35]], i8** [[DOTTASK_RED_]], align 8 136 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 137 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_4:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 0 138 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 0 139 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8* 140 // CHECK1-NEXT: store i8* [[TMP37]], i8** [[TMP36]], align 8 141 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 1 142 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8* 143 // CHECK1-NEXT: store i8* [[TMP39]], i8** [[TMP38]], align 8 144 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2 145 // CHECK1-NEXT: store i64 20, i64* [[TMP40]], align 8 146 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3 147 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..5 to i8*), i8** [[TMP41]], align 8 148 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 4 149 // CHECK1-NEXT: store i8* bitcast (void (i8*)* @.red_fini. to i8*), i8** [[TMP42]], align 8 150 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 5 151 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..6 to i8*), i8** [[TMP43]], align 8 152 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 6 153 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast i32* [[TMP44]] to i8* 154 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP45]], i8 0, i64 4, i1 false) 155 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 1 156 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0 157 // CHECK1-NEXT: [[TMP47:%.*]] = bitcast i16* [[VLA]] to i8* 158 // CHECK1-NEXT: store i8* [[TMP47]], i8** [[TMP46]], align 8 159 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1 160 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast i16* [[VLA]] to i8* 161 // CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8 162 // CHECK1-NEXT: [[TMP50:%.*]] = mul nuw i64 [[TMP2]], 2 163 // CHECK1-NEXT: [[TMP51:%.*]] = udiv exact i64 [[TMP50]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64) 164 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2 165 // CHECK1-NEXT: store i64 [[TMP50]], i64* [[TMP52]], align 8 166 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3 167 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..7 to i8*), i8** [[TMP53]], align 8 168 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4 169 // CHECK1-NEXT: store i8* null, i8** [[TMP54]], align 8 170 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5 171 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..8 to i8*), i8** [[TMP55]], align 8 172 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6 173 // CHECK1-NEXT: store i32 1, i32* [[TMP56]], align 8 174 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8* 175 // CHECK1-NEXT: [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]]) 176 // CHECK1-NEXT: store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8 177 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]]) 178 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 179 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 180 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 181 // CHECK1-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 182 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) 183 // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0 184 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5 185 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 186 // CHECK1: arraydestroy.body: 187 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 188 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 189 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]] 190 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]] 191 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]] 192 // CHECK1: arraydestroy.done8: 193 // CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4 194 // CHECK1-NEXT: ret i32 [[TMP61]] 195 // 196 // 197 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev 198 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 199 // CHECK1-NEXT: entry: 200 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 201 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 202 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 203 // CHECK1-NEXT: call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) 204 // CHECK1-NEXT: ret void 205 // 206 // 207 // CHECK1-LABEL: define {{[^@]+}}@.red_init. 208 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 209 // CHECK1-NEXT: entry: 210 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 211 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 212 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 213 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 214 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32** 215 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8 216 // CHECK1-NEXT: store i32 0, i32* [[TMP3]], align 4 217 // CHECK1-NEXT: ret void 218 // 219 // 220 // CHECK1-LABEL: define {{[^@]+}}@.red_comb. 221 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 222 // CHECK1-NEXT: entry: 223 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 224 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 225 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 226 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 227 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32** 228 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8 229 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to i32** 230 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8 231 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 4 232 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4 233 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] 234 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP3]], align 4 235 // CHECK1-NEXT: ret void 236 // 237 // 238 // CHECK1-LABEL: define {{[^@]+}}@.red_init..1 239 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] { 240 // CHECK1-NEXT: entry: 241 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 242 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 243 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 244 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 245 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to float** 246 // CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[TMP2]], align 8 247 // CHECK1-NEXT: store float 0.000000e+00, float* [[TMP3]], align 4 248 // CHECK1-NEXT: ret void 249 // 250 // 251 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..2 252 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 253 // CHECK1-NEXT: entry: 254 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 255 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 256 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 257 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 258 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to float** 259 // CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[TMP2]], align 8 260 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to float** 261 // CHECK1-NEXT: [[TMP5:%.*]] = load float*, float** [[TMP4]], align 8 262 // CHECK1-NEXT: [[TMP6:%.*]] = load float, float* [[TMP3]], align 4 263 // CHECK1-NEXT: [[TMP7:%.*]] = load float, float* [[TMP5]], align 4 264 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP6]], [[TMP7]] 265 // CHECK1-NEXT: store float [[ADD]], float* [[TMP3]], align 4 266 // CHECK1-NEXT: ret void 267 // 268 // 269 // CHECK1-LABEL: define {{[^@]+}}@.red_init..3 270 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] { 271 // CHECK1-NEXT: entry: 272 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 273 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 274 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 275 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 276 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32** 277 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8 278 // CHECK1-NEXT: store i32 0, i32* [[TMP3]], align 4 279 // CHECK1-NEXT: ret void 280 // 281 // 282 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..4 283 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 284 // CHECK1-NEXT: entry: 285 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 286 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 287 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 288 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 289 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32** 290 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8 291 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to i32** 292 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8 293 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 4 294 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4 295 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] 296 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP3]], align 4 297 // CHECK1-NEXT: ret void 298 // 299 // 300 // CHECK1-LABEL: define {{[^@]+}}@.red_init..5 301 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] { 302 // CHECK1-NEXT: entry: 303 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 304 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 305 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 306 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 307 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to [5 x %struct.S]** 308 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x %struct.S]*, [5 x %struct.S]** [[TMP2]], align 8 309 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP3]], i32 0, i32 0 310 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5 311 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP4]] 312 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 313 // CHECK1: omp.arrayinit.body: 314 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 315 // CHECK1-NEXT: call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]]) 316 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 317 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 318 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 319 // CHECK1: omp.arrayinit.done: 320 // CHECK1-NEXT: ret void 321 // 322 // 323 // CHECK1-LABEL: define {{[^@]+}}@.red_fini. 324 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5]] { 325 // CHECK1-NEXT: entry: 326 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 327 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 328 // CHECK1-NEXT: [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8 329 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [5 x %struct.S]* 330 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP2]], i32 0, i32 0 331 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5 332 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 333 // CHECK1: arraydestroy.body: 334 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 335 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 336 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]] 337 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 338 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 339 // CHECK1: arraydestroy.done1: 340 // CHECK1-NEXT: ret void 341 // 342 // 343 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev 344 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 345 // CHECK1-NEXT: entry: 346 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 347 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 348 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 349 // CHECK1-NEXT: call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]] 350 // CHECK1-NEXT: ret void 351 // 352 // 353 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..6 354 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 355 // CHECK1-NEXT: entry: 356 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 357 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 358 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4 359 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 360 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 361 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to %struct.S** 362 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP2]], align 8 363 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to %struct.S** 364 // CHECK1-NEXT: [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 365 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP3]], i64 5 366 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[TMP3]], [[TMP6]] 367 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 368 // CHECK1: omp.arraycpy.body: 369 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 370 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 371 // CHECK1-NEXT: call void @_ZplRK1SS1_(%struct.S* sret([[STRUCT_S]]) align 4 [[REF_TMP]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]]) 372 // CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SaSERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) 373 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]] 374 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 375 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 376 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP6]] 377 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 378 // CHECK1: omp.arraycpy.done2: 379 // CHECK1-NEXT: ret void 380 // 381 // 382 // CHECK1-LABEL: define {{[^@]+}}@_ZplRK1SS1_ 383 // CHECK1-SAME: (%struct.S* noalias sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR7:[0-9]+]] { 384 // CHECK1-NEXT: entry: 385 // CHECK1-NEXT: [[RESULT_PTR:%.*]] = alloca i8*, align 8 386 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca %struct.S*, align 8 387 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 8 388 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast %struct.S* [[AGG_RESULT]] to i8* 389 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[RESULT_PTR]], align 8 390 // CHECK1-NEXT: store %struct.S* [[A]], %struct.S** [[A_ADDR]], align 8 391 // CHECK1-NEXT: store %struct.S* [[B]], %struct.S** [[B_ADDR]], align 8 392 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 8 393 // CHECK1-NEXT: call void @_ZN1SC1ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[AGG_RESULT]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]]) 394 // CHECK1-NEXT: ret void 395 // 396 // 397 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SaSERKS_ 398 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR7]] align 2 { 399 // CHECK1-NEXT: entry: 400 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 401 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8 402 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 403 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8 404 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 405 // CHECK1-NEXT: ret %struct.S* [[THIS1]] 406 // 407 // 408 // CHECK1-LABEL: define {{[^@]+}}@.red_init..7 409 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] { 410 // CHECK1-NEXT: entry: 411 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 412 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 413 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 414 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 415 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 416 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[DOTADDR]] to i16** 417 // CHECK1-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 8 418 // CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}}) 419 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64* 420 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 8 421 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP4]], i64 [[TMP7]] 422 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[TMP4]], [[TMP8]] 423 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 424 // CHECK1: omp.arrayinit.body: 425 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP4]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 426 // CHECK1-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 427 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 428 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 429 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 430 // CHECK1: omp.arrayinit.done: 431 // CHECK1-NEXT: ret void 432 // 433 // 434 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..8 435 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 436 // CHECK1-NEXT: entry: 437 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 438 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 439 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) 440 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 441 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 442 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}}) 443 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64* 444 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8 445 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[DOTADDR]] to i16** 446 // CHECK1-NEXT: [[TMP7:%.*]] = load i16*, i16** [[TMP6]], align 8 447 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[DOTADDR1]] to i16** 448 // CHECK1-NEXT: [[TMP9:%.*]] = load i16*, i16** [[TMP8]], align 8 449 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP5]] 450 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP10]] 451 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 452 // CHECK1: omp.arraycpy.body: 453 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 454 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 455 // CHECK1-NEXT: [[TMP11:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 456 // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP11]] to i32 457 // CHECK1-NEXT: [[TMP12:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 458 // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP12]] to i32 459 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] 460 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 461 // CHECK1-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 462 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 463 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 464 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP10]] 465 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]] 466 // CHECK1: omp.arraycpy.done4: 467 // CHECK1-NEXT: ret void 468 // 469 // 470 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 471 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] { 472 // CHECK1-NEXT: entry: 473 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 474 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 475 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 476 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 477 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i16*, align 8 478 // CHECK1-NEXT: [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8 479 // CHECK1-NEXT: [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8 480 // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8 481 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 482 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 483 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 484 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 485 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 486 // CHECK1-NEXT: store i16* [[D]], i16** [[D_ADDR]], align 8 487 // CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8 488 // CHECK1-NEXT: store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8 489 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 490 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 491 // CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8 492 // CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8 493 // CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8 494 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 495 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 496 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 497 // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 498 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] 499 // CHECK1: omp_if.then: 500 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 501 // CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP9]], align 8 502 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 503 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 504 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 505 // CHECK1-NEXT: store i16* [[TMP2]], i16** [[TMP11]], align 8 506 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3 507 // CHECK1-NEXT: store i8** [[TMP3]], i8*** [[TMP12]], align 8 508 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4 509 // CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP13]], align 8 510 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 511 // CHECK1-NEXT: [[TMP14:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*)) 512 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to %struct.kmp_task_t_with_privates* 513 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 0 514 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 0 515 // CHECK1-NEXT: [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8 516 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 517 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP18]], i8* align 8 [[TMP19]], i64 40, i1 false) 518 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 1 519 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 0 520 // CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP3]], align 8 521 // CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8 522 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 1 523 // CHECK1-NEXT: [[TMP24:%.*]] = load i8*, i8** [[TMP4]], align 8 524 // CHECK1-NEXT: store i8* [[TMP24]], i8** [[TMP23]], align 8 525 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 5 526 // CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8 527 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 6 528 // CHECK1-NEXT: store i64 4, i64* [[TMP26]], align 8 529 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 7 530 // CHECK1-NEXT: store i64 1, i64* [[TMP27]], align 8 531 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 9 532 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8* 533 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false) 534 // CHECK1-NEXT: [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8 535 // CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i8* [[TMP14]], i32 1, i64* [[TMP25]], i64* [[TMP26]], i64 [[TMP30]], i32 1, i32 0, i64 0, i8* null) 536 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 537 // CHECK1-NEXT: call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]]) 538 // CHECK1-NEXT: br label [[OMP_IF_END]] 539 // CHECK1: omp_if.end: 540 // CHECK1-NEXT: ret void 541 // 542 // 543 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_privates_map. 544 // CHECK1-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i8*** noalias [[TMP1:%.*]], i8*** noalias [[TMP2:%.*]]) #[[ATTR9:[0-9]+]] { 545 // CHECK1-NEXT: entry: 546 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 547 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8***, align 8 548 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8***, align 8 549 // CHECK1-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 550 // CHECK1-NEXT: store i8*** [[TMP1]], i8**** [[DOTADDR1]], align 8 551 // CHECK1-NEXT: store i8*** [[TMP2]], i8**** [[DOTADDR2]], align 8 552 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 553 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0 554 // CHECK1-NEXT: [[TMP5:%.*]] = load i8***, i8**** [[DOTADDR1]], align 8 555 // CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP5]], align 8 556 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1 557 // CHECK1-NEXT: [[TMP7:%.*]] = load i8***, i8**** [[DOTADDR2]], align 8 558 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP7]], align 8 559 // CHECK1-NEXT: ret void 560 // 561 // 562 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry. 563 // CHECK1-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5]] { 564 // CHECK1-NEXT: entry: 565 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 566 // CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 567 // CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 568 // CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 569 // CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 570 // CHECK1-NEXT: [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8 571 // CHECK1-NEXT: [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8 572 // CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8 573 // CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4 574 // CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8 575 // CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 576 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8 577 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8 578 // CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4 579 // CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4 580 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 581 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 582 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 583 // CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 584 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 585 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 586 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 587 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 588 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 589 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 590 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 591 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 592 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 593 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 594 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5 595 // CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8 596 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6 597 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8 598 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7 599 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8 600 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8 601 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8 602 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9 603 // CHECK1-NEXT: [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8 604 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) 605 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) 606 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) 607 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) 608 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) 609 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 610 // CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 611 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 612 // CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 613 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 614 // CHECK1-NEXT: store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14 615 // CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14 616 // CHECK1-NEXT: store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14 617 // CHECK1-NEXT: store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14 618 // CHECK1-NEXT: store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 619 // CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14 620 // CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14 621 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1 622 // CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8 623 // CHECK1-NEXT: [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 624 // CHECK1-NEXT: [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 625 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, i8***, i8***)* 626 // CHECK1-NEXT: call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] 627 // CHECK1-NEXT: [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 628 // CHECK1-NEXT: [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 629 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0 630 // CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8 631 // CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8 632 // CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 633 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8* 634 // CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]]) 635 // CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32* 636 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2 637 // CHECK1-NEXT: [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8 638 // CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2 639 // CHECK1-NEXT: [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64) 640 // CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]], i8* bitcast (i64* @{{reduction_size[.].+[.]}}) 641 // CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8* [[TMP40]] to i64* 642 // CHECK1-NEXT: store i64 [[TMP39]], i64* [[TMP41]], align 8 643 // CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP29]], align 8 644 // CHECK1-NEXT: [[TMP43:%.*]] = bitcast i16* [[TMP37]] to i8* 645 // CHECK1-NEXT: [[TMP44:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP42]], i8* [[TMP43]]) 646 // CHECK1-NEXT: [[CONV2_I:%.*]] = bitcast i8* [[TMP44]] to i16* 647 // CHECK1-NEXT: [[TMP45:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14 648 // CHECK1-NEXT: [[CONV3_I:%.*]] = trunc i64 [[TMP45]] to i32 649 // CHECK1-NEXT: store i32 [[CONV3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14 650 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] 651 // CHECK1: omp.inner.for.cond.i: 652 // CHECK1-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14 653 // CHECK1-NEXT: [[CONV4_I:%.*]] = sext i32 [[TMP46]] to i64 654 // CHECK1-NEXT: [[TMP47:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14 655 // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP47]] 656 // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] 657 // CHECK1: omp.inner.for.body.i: 658 // CHECK1-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14 659 // CHECK1-NEXT: store i32 [[TMP48]], i32* [[I_I]], align 4, !noalias !14 660 // CHECK1-NEXT: [[TMP49:%.*]] = load i32, i32* [[CONV_I]], align 4 661 // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP49]] to i64 662 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, i16* [[CONV2_I]], i64 [[IDXPROM_I]] 663 // CHECK1-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX_I]], align 2 664 // CHECK1-NEXT: [[CONV5_I:%.*]] = sext i16 [[TMP50]] to i32 665 // CHECK1-NEXT: [[TMP51:%.*]] = load i32, i32* [[CONV_I]], align 4 666 // CHECK1-NEXT: [[ADD6_I:%.*]] = add nsw i32 [[TMP51]], [[CONV5_I]] 667 // CHECK1-NEXT: store i32 [[ADD6_I]], i32* [[CONV_I]], align 4 668 // CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14 669 // CHECK1-NEXT: [[ADD7_I:%.*]] = add nsw i32 [[TMP52]], 1 670 // CHECK1-NEXT: store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14 671 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] 672 // CHECK1: .omp_outlined..9.exit: 673 // CHECK1-NEXT: ret i32 0 674 // 675 // 676 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2Ev 677 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 678 // CHECK1-NEXT: entry: 679 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 680 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 681 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 682 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 683 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 684 // CHECK1-NEXT: ret void 685 // 686 // 687 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev 688 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 689 // CHECK1-NEXT: entry: 690 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 691 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 692 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 693 // CHECK1-NEXT: ret void 694 // 695 // 696 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1ERKS_ 697 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 698 // CHECK1-NEXT: entry: 699 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 700 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8 701 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 702 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8 703 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 704 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[DOTADDR]], align 8 705 // CHECK1-NEXT: call void @_ZN1SC2ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]]) 706 // CHECK1-NEXT: ret void 707 // 708 // 709 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2ERKS_ 710 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 711 // CHECK1-NEXT: entry: 712 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 713 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8 714 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 715 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8 716 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 717 // CHECK1-NEXT: ret void 718 // 719