1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
5
6 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-opaque-pointers -no-enable-noundef-analysis -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12
13
14 struct S {
15 int a;
SS16 S() : a(0) {}
SS17 S(const S&) {}
operator =S18 S& operator=(const S&) {return *this;}
~SS19 ~S() {}
operator +(const S & a,const S & b)20 friend S operator+(const S&a, const S&b) {return a;}
21 };
22
23
main(int argc,char ** argv)24 int main(int argc, char **argv) {
25 int a;
26 float b;
27 S c[5];
28 short d[argc];
29 #pragma omp taskgroup task_reduction(+: a, b, argc)
30 {
31 #pragma omp taskgroup task_reduction(-:c, d)
32 #pragma omp parallel
33 #pragma omp taskloop simd in_reduction(+:a) in_reduction(-:d)
34 for (int i = 0; i < 5; ++i)
35 a += d[a];
36 }
37 return 0;
38 }
39
40
41
42 #endif
43 // CHECK1-LABEL: define {{[^@]+}}@main
44 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
45 // CHECK1-NEXT: entry:
46 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
47 // CHECK1-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
48 // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
49 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
50 // CHECK1-NEXT: [[B:%.*]] = alloca float, align 4
51 // CHECK1-NEXT: [[C:%.*]] = alloca [5 x %struct.S], align 16
52 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
53 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
54 // CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [3 x %struct.kmp_taskred_input_t], align 8
55 // CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
56 // CHECK1-NEXT: [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8
57 // CHECK1-NEXT: [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8
58 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
59 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
60 // CHECK1-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
61 // CHECK1-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
62 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
63 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
64 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
65 // CHECK1: arrayctor.loop:
66 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
67 // CHECK1-NEXT: call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
68 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
69 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
70 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
71 // CHECK1: arrayctor.cont:
72 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
73 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
74 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
75 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
76 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16
77 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
78 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
79 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
80 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
81 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[A]] to i8*
82 // CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
83 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
84 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i32* [[A]] to i8*
85 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8
86 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
87 // CHECK1-NEXT: store i64 4, i64* [[TMP8]], align 8
88 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
89 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP9]], align 8
90 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
91 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8
92 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
93 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP11]], align 8
94 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
95 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP12]] to i8*
96 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP13]], i8 0, i64 4, i1 false)
97 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_1:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
98 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 0
99 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast float* [[B]] to i8*
100 // CHECK1-NEXT: store i8* [[TMP15]], i8** [[TMP14]], align 8
101 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 1
102 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast float* [[B]] to i8*
103 // CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8
104 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 2
105 // CHECK1-NEXT: store i64 4, i64* [[TMP18]], align 8
106 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 3
107 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP19]], align 8
108 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 4
109 // CHECK1-NEXT: store i8* null, i8** [[TMP20]], align 8
110 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 5
111 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP21]], align 8
112 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 6
113 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8*
114 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP23]], i8 0, i64 4, i1 false)
115 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_2:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 2
116 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 0
117 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
118 // CHECK1-NEXT: store i8* [[TMP25]], i8** [[TMP24]], align 8
119 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 1
120 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
121 // CHECK1-NEXT: store i8* [[TMP27]], i8** [[TMP26]], align 8
122 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 2
123 // CHECK1-NEXT: store i64 4, i64* [[TMP28]], align 8
124 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 3
125 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..3 to i8*), i8** [[TMP29]], align 8
126 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 4
127 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8
128 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 5
129 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..4 to i8*), i8** [[TMP31]], align 8
130 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 6
131 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast i32* [[TMP32]] to i8*
132 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 4, i1 false)
133 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
134 // CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 3, i8* [[TMP34]])
135 // CHECK1-NEXT: store i8* [[TMP35]], i8** [[DOTTASK_RED_]], align 8
136 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
137 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_4:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 0
138 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 0
139 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
140 // CHECK1-NEXT: store i8* [[TMP37]], i8** [[TMP36]], align 8
141 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 1
142 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
143 // CHECK1-NEXT: store i8* [[TMP39]], i8** [[TMP38]], align 8
144 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
145 // CHECK1-NEXT: store i64 20, i64* [[TMP40]], align 8
146 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
147 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..5 to i8*), i8** [[TMP41]], align 8
148 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 4
149 // CHECK1-NEXT: store i8* bitcast (void (i8*)* @.red_fini. to i8*), i8** [[TMP42]], align 8
150 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 5
151 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..6 to i8*), i8** [[TMP43]], align 8
152 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 6
153 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast i32* [[TMP44]] to i8*
154 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP45]], i8 0, i64 4, i1 false)
155 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 1
156 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
157 // CHECK1-NEXT: [[TMP47:%.*]] = bitcast i16* [[VLA]] to i8*
158 // CHECK1-NEXT: store i8* [[TMP47]], i8** [[TMP46]], align 8
159 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
160 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast i16* [[VLA]] to i8*
161 // CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8
162 // CHECK1-NEXT: [[TMP50:%.*]] = mul nuw i64 [[TMP2]], 2
163 // CHECK1-NEXT: [[TMP51:%.*]] = udiv exact i64 [[TMP50]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
164 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
165 // CHECK1-NEXT: store i64 [[TMP50]], i64* [[TMP52]], align 8
166 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
167 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init..7 to i8*), i8** [[TMP53]], align 8
168 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
169 // CHECK1-NEXT: store i8* null, i8** [[TMP54]], align 8
170 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
171 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb..8 to i8*), i8** [[TMP55]], align 8
172 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
173 // CHECK1-NEXT: store i32 1, i32* [[TMP56]], align 8
174 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8*
175 // CHECK1-NEXT: [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]])
176 // CHECK1-NEXT: store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8
177 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]])
178 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
179 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
180 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4
181 // CHECK1-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
182 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP59]])
183 // CHECK1-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
184 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
185 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
186 // CHECK1: arraydestroy.body:
187 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
188 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
189 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
190 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
191 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
192 // CHECK1: arraydestroy.done8:
193 // CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
194 // CHECK1-NEXT: ret i32 [[TMP61]]
195 //
196 //
197 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev
198 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
199 // CHECK1-NEXT: entry:
200 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
201 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
202 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
203 // CHECK1-NEXT: call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
204 // CHECK1-NEXT: ret void
205 //
206 //
207 // CHECK1-LABEL: define {{[^@]+}}@.red_init.
208 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
209 // CHECK1-NEXT: entry:
210 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
211 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
212 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
213 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
214 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32**
215 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
216 // CHECK1-NEXT: store i32 0, i32* [[TMP3]], align 4
217 // CHECK1-NEXT: ret void
218 //
219 //
220 // CHECK1-LABEL: define {{[^@]+}}@.red_comb.
221 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
222 // CHECK1-NEXT: entry:
223 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
224 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
225 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
226 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
227 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32**
228 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
229 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to i32**
230 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
231 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 4
232 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4
233 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
234 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP3]], align 4
235 // CHECK1-NEXT: ret void
236 //
237 //
238 // CHECK1-LABEL: define {{[^@]+}}@.red_init..1
239 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
240 // CHECK1-NEXT: entry:
241 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
242 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
243 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
244 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
245 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to float**
246 // CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[TMP2]], align 8
247 // CHECK1-NEXT: store float 0.000000e+00, float* [[TMP3]], align 4
248 // CHECK1-NEXT: ret void
249 //
250 //
251 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..2
252 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
253 // CHECK1-NEXT: entry:
254 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
255 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
256 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
257 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
258 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to float**
259 // CHECK1-NEXT: [[TMP3:%.*]] = load float*, float** [[TMP2]], align 8
260 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to float**
261 // CHECK1-NEXT: [[TMP5:%.*]] = load float*, float** [[TMP4]], align 8
262 // CHECK1-NEXT: [[TMP6:%.*]] = load float, float* [[TMP3]], align 4
263 // CHECK1-NEXT: [[TMP7:%.*]] = load float, float* [[TMP5]], align 4
264 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP6]], [[TMP7]]
265 // CHECK1-NEXT: store float [[ADD]], float* [[TMP3]], align 4
266 // CHECK1-NEXT: ret void
267 //
268 //
269 // CHECK1-LABEL: define {{[^@]+}}@.red_init..3
270 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
271 // CHECK1-NEXT: entry:
272 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
273 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
274 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
275 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
276 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32**
277 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
278 // CHECK1-NEXT: store i32 0, i32* [[TMP3]], align 4
279 // CHECK1-NEXT: ret void
280 //
281 //
282 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..4
283 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
284 // CHECK1-NEXT: entry:
285 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
286 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
287 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
288 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
289 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to i32**
290 // CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP2]], align 8
291 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to i32**
292 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
293 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 4
294 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4
295 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
296 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP3]], align 4
297 // CHECK1-NEXT: ret void
298 //
299 //
300 // CHECK1-LABEL: define {{[^@]+}}@.red_init..5
301 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
302 // CHECK1-NEXT: entry:
303 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
304 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
305 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
306 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
307 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to [5 x %struct.S]**
308 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x %struct.S]*, [5 x %struct.S]** [[TMP2]], align 8
309 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP3]], i32 0, i32 0
310 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
311 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP4]]
312 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
313 // CHECK1: omp.arrayinit.body:
314 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
315 // CHECK1-NEXT: call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]])
316 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
317 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
318 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
319 // CHECK1: omp.arrayinit.done:
320 // CHECK1-NEXT: ret void
321 //
322 //
323 // CHECK1-LABEL: define {{[^@]+}}@.red_fini.
324 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5]] {
325 // CHECK1-NEXT: entry:
326 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
327 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
328 // CHECK1-NEXT: [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
329 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [5 x %struct.S]*
330 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP2]], i32 0, i32 0
331 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
332 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
333 // CHECK1: arraydestroy.body:
334 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
335 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
336 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
337 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
338 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
339 // CHECK1: arraydestroy.done1:
340 // CHECK1-NEXT: ret void
341 //
342 //
343 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
344 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
345 // CHECK1-NEXT: entry:
346 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
347 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
348 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
349 // CHECK1-NEXT: call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
350 // CHECK1-NEXT: ret void
351 //
352 //
353 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..6
354 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
355 // CHECK1-NEXT: entry:
356 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
357 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
358 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
359 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
360 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
361 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to %struct.S**
362 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S*, %struct.S** [[TMP2]], align 8
363 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[DOTADDR1]] to %struct.S**
364 // CHECK1-NEXT: [[TMP5:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
365 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP3]], i64 5
366 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[TMP3]], [[TMP6]]
367 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
368 // CHECK1: omp.arraycpy.body:
369 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
370 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
371 // CHECK1-NEXT: call void @_ZplRK1SS1_(%struct.S* sret([[STRUCT_S]]) align 4 [[REF_TMP]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
372 // CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SaSERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
373 // CHECK1-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]]
374 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
375 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
376 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP6]]
377 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
378 // CHECK1: omp.arraycpy.done2:
379 // CHECK1-NEXT: ret void
380 //
381 //
382 // CHECK1-LABEL: define {{[^@]+}}@_ZplRK1SS1_
383 // CHECK1-SAME: (%struct.S* noalias sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR7:[0-9]+]] {
384 // CHECK1-NEXT: entry:
385 // CHECK1-NEXT: [[RESULT_PTR:%.*]] = alloca i8*, align 8
386 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca %struct.S*, align 8
387 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 8
388 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast %struct.S* [[AGG_RESULT]] to i8*
389 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[RESULT_PTR]], align 8
390 // CHECK1-NEXT: store %struct.S* [[A]], %struct.S** [[A_ADDR]], align 8
391 // CHECK1-NEXT: store %struct.S* [[B]], %struct.S** [[B_ADDR]], align 8
392 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 8
393 // CHECK1-NEXT: call void @_ZN1SC1ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[AGG_RESULT]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
394 // CHECK1-NEXT: ret void
395 //
396 //
397 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SaSERKS_
398 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR7]] align 2 {
399 // CHECK1-NEXT: entry:
400 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
401 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
402 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
403 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
404 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
405 // CHECK1-NEXT: ret %struct.S* [[THIS1]]
406 //
407 //
408 // CHECK1-LABEL: define {{[^@]+}}@.red_init..7
409 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
410 // CHECK1-NEXT: entry:
411 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
412 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
413 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
414 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
415 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
416 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[DOTADDR]] to i16**
417 // CHECK1-NEXT: [[TMP4:%.*]] = load i16*, i16** [[TMP3]], align 8
418 // CHECK1-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
419 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to i64*
420 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 8
421 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr i16, i16* [[TMP4]], i64 [[TMP7]]
422 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[TMP4]], [[TMP8]]
423 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
424 // CHECK1: omp.arrayinit.body:
425 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP4]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
426 // CHECK1-NEXT: store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
427 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
428 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]]
429 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
430 // CHECK1: omp.arrayinit.done:
431 // CHECK1-NEXT: ret void
432 //
433 //
434 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..8
435 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
436 // CHECK1-NEXT: entry:
437 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
438 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
439 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
440 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
441 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
442 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
443 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64*
444 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
445 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[DOTADDR]] to i16**
446 // CHECK1-NEXT: [[TMP7:%.*]] = load i16*, i16** [[TMP6]], align 8
447 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[DOTADDR1]] to i16**
448 // CHECK1-NEXT: [[TMP9:%.*]] = load i16*, i16** [[TMP8]], align 8
449 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP5]]
450 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP10]]
451 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
452 // CHECK1: omp.arraycpy.body:
453 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
454 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
455 // CHECK1-NEXT: [[TMP11:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
456 // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP11]] to i32
457 // CHECK1-NEXT: [[TMP12:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
458 // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP12]] to i32
459 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
460 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
461 // CHECK1-NEXT: store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
462 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
463 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
464 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP10]]
465 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
466 // CHECK1: omp.arraycpy.done4:
467 // CHECK1-NEXT: ret void
468 //
469 //
470 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
471 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] {
472 // CHECK1-NEXT: entry:
473 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
474 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
475 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
476 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
477 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i16*, align 8
478 // CHECK1-NEXT: [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8
479 // CHECK1-NEXT: [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8
480 // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
481 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
482 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
483 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
484 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
485 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
486 // CHECK1-NEXT: store i16* [[D]], i16** [[D_ADDR]], align 8
487 // CHECK1-NEXT: store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8
488 // CHECK1-NEXT: store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8
489 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
490 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
491 // CHECK1-NEXT: [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8
492 // CHECK1-NEXT: [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8
493 // CHECK1-NEXT: [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8
494 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
495 // CHECK1-NEXT: store i32* [[TMP0]], i32** [[TMP5]], align 8
496 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
497 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8
498 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
499 // CHECK1-NEXT: store i16* [[TMP2]], i16** [[TMP7]], align 8
500 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3
501 // CHECK1-NEXT: store i8** [[TMP3]], i8*** [[TMP8]], align 8
502 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4
503 // CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP9]], align 8
504 // CHECK1-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
505 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4
506 // CHECK1-NEXT: call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]])
507 // CHECK1-NEXT: [[TMP12:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
508 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to %struct.kmp_task_t_with_privates*
509 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP13]], i32 0, i32 0
510 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP14]], i32 0, i32 0
511 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
512 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
513 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP16]], i8* align 8 [[TMP17]], i64 40, i1 false)
514 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP13]], i32 0, i32 1
515 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP18]], i32 0, i32 0
516 // CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP3]], align 8
517 // CHECK1-NEXT: store i8* [[TMP20]], i8** [[TMP19]], align 8
518 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP18]], i32 0, i32 1
519 // CHECK1-NEXT: [[TMP22:%.*]] = load i8*, i8** [[TMP4]], align 8
520 // CHECK1-NEXT: store i8* [[TMP22]], i8** [[TMP21]], align 8
521 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP14]], i32 0, i32 5
522 // CHECK1-NEXT: store i64 0, i64* [[TMP23]], align 8
523 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP14]], i32 0, i32 6
524 // CHECK1-NEXT: store i64 4, i64* [[TMP24]], align 8
525 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP14]], i32 0, i32 7
526 // CHECK1-NEXT: store i64 1, i64* [[TMP25]], align 8
527 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP14]], i32 0, i32 9
528 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i8*
529 // CHECK1-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP27]], i8 0, i64 8, i1 false)
530 // CHECK1-NEXT: [[TMP28:%.*]] = load i64, i64* [[TMP25]], align 8
531 // CHECK1-NEXT: call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]], i8* [[TMP12]], i32 1, i64* [[TMP23]], i64* [[TMP24]], i64 [[TMP28]], i32 1, i32 0, i64 0, i8* null)
532 // CHECK1-NEXT: call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP11]])
533 // CHECK1-NEXT: ret void
534 //
535 //
536 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_privates_map.
537 // CHECK1-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i8*** noalias [[TMP1:%.*]], i8*** noalias [[TMP2:%.*]]) #[[ATTR9:[0-9]+]] {
538 // CHECK1-NEXT: entry:
539 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
540 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8***, align 8
541 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8***, align 8
542 // CHECK1-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
543 // CHECK1-NEXT: store i8*** [[TMP1]], i8**** [[DOTADDR1]], align 8
544 // CHECK1-NEXT: store i8*** [[TMP2]], i8**** [[DOTADDR2]], align 8
545 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
546 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
547 // CHECK1-NEXT: [[TMP5:%.*]] = load i8***, i8**** [[DOTADDR1]], align 8
548 // CHECK1-NEXT: store i8** [[TMP4]], i8*** [[TMP5]], align 8
549 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
550 // CHECK1-NEXT: [[TMP7:%.*]] = load i8***, i8**** [[DOTADDR2]], align 8
551 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP7]], align 8
552 // CHECK1-NEXT: ret void
553 //
554 //
555 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
556 // CHECK1-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5]] {
557 // CHECK1-NEXT: entry:
558 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
559 // CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
560 // CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
561 // CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
562 // CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
563 // CHECK1-NEXT: [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
564 // CHECK1-NEXT: [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
565 // CHECK1-NEXT: [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
566 // CHECK1-NEXT: [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
567 // CHECK1-NEXT: [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
568 // CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
569 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
570 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8
571 // CHECK1-NEXT: [[I_I:%.*]] = alloca i32, align 4
572 // CHECK1-NEXT: [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
573 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
574 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
575 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
576 // CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
577 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
578 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
579 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
580 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
581 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
582 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
583 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
584 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
585 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
586 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
587 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
588 // CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
589 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
590 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
591 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
592 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
593 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
594 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
595 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
596 // CHECK1-NEXT: [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
597 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
598 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
599 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
600 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
601 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
602 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
603 // CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
604 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
605 // CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
606 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
607 // CHECK1-NEXT: store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
608 // CHECK1-NEXT: store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
609 // CHECK1-NEXT: store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
610 // CHECK1-NEXT: store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
611 // CHECK1-NEXT: store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
612 // CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
613 // CHECK1-NEXT: [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
614 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1
615 // CHECK1-NEXT: [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
616 // CHECK1-NEXT: [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
617 // CHECK1-NEXT: [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
618 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, i8***, i8***)*
619 // CHECK1-NEXT: call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
620 // CHECK1-NEXT: [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
621 // CHECK1-NEXT: [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
622 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0
623 // CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
624 // CHECK1-NEXT: [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8
625 // CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
626 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8*
627 // CHECK1-NEXT: [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]])
628 // CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32*
629 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2
630 // CHECK1-NEXT: [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8
631 // CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2
632 // CHECK1-NEXT: [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
633 // CHECK1-NEXT: [[TMP40:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
634 // CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8* [[TMP40]] to i64*
635 // CHECK1-NEXT: store i64 [[TMP39]], i64* [[TMP41]], align 8
636 // CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP29]], align 8
637 // CHECK1-NEXT: [[TMP43:%.*]] = bitcast i16* [[TMP37]] to i8*
638 // CHECK1-NEXT: [[TMP44:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP42]], i8* [[TMP43]])
639 // CHECK1-NEXT: [[CONV2_I:%.*]] = bitcast i8* [[TMP44]] to i16*
640 // CHECK1-NEXT: [[TMP45:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
641 // CHECK1-NEXT: [[CONV3_I:%.*]] = trunc i64 [[TMP45]] to i32
642 // CHECK1-NEXT: store i32 [[CONV3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
643 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]]
644 // CHECK1: omp.inner.for.cond.i:
645 // CHECK1-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]]
646 // CHECK1-NEXT: [[CONV4_I:%.*]] = sext i32 [[TMP46]] to i64
647 // CHECK1-NEXT: [[TMP47:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
648 // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP47]]
649 // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]]
650 // CHECK1: omp.inner.for.body.i:
651 // CHECK1-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
652 // CHECK1-NEXT: store i32 [[TMP48]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
653 // CHECK1-NEXT: [[TMP49:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group [[ACC_GRP15]]
654 // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP49]] to i64
655 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, i16* [[CONV2_I]], i64 [[IDXPROM_I]]
656 // CHECK1-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX_I]], align 2, !llvm.access.group [[ACC_GRP15]]
657 // CHECK1-NEXT: [[CONV5_I:%.*]] = sext i16 [[TMP50]] to i32
658 // CHECK1-NEXT: [[TMP51:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group [[ACC_GRP15]]
659 // CHECK1-NEXT: [[ADD6_I:%.*]] = add nsw i32 [[TMP51]], [[CONV5_I]]
660 // CHECK1-NEXT: store i32 [[ADD6_I]], i32* [[CONV_I]], align 4, !llvm.access.group [[ACC_GRP15]]
661 // CHECK1-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
662 // CHECK1-NEXT: [[ADD7_I:%.*]] = add nsw i32 [[TMP52]], 1
663 // CHECK1-NEXT: store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
664 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
665 // CHECK1: .omp_outlined..9.exit:
666 // CHECK1-NEXT: ret i32 0
667 //
668 //
669 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2Ev
670 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
671 // CHECK1-NEXT: entry:
672 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
673 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
674 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
675 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
676 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4
677 // CHECK1-NEXT: ret void
678 //
679 //
680 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
681 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
682 // CHECK1-NEXT: entry:
683 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
684 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
685 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
686 // CHECK1-NEXT: ret void
687 //
688 //
689 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1ERKS_
690 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
691 // CHECK1-NEXT: entry:
692 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
693 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
694 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
695 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
696 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
697 // CHECK1-NEXT: [[TMP1:%.*]] = load %struct.S*, %struct.S** [[DOTADDR]], align 8
698 // CHECK1-NEXT: call void @_ZN1SC2ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
699 // CHECK1-NEXT: ret void
700 //
701 //
702 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2ERKS_
703 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
704 // CHECK1-NEXT: entry:
705 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
706 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct.S*, align 8
707 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
708 // CHECK1-NEXT: store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
709 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
710 // CHECK1-NEXT: ret void
711 //
712 //
713 // CHECK3-LABEL: define {{[^@]+}}@main
714 // CHECK3-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
715 // CHECK3-NEXT: entry:
716 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
717 // CHECK3-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
718 // CHECK3-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8
719 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4
720 // CHECK3-NEXT: [[B:%.*]] = alloca float, align 4
721 // CHECK3-NEXT: [[C:%.*]] = alloca [5 x %struct.S], align 16
722 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
723 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
724 // CHECK3-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
725 // CHECK3-NEXT: [[DOTTASK_RED_1:%.*]] = alloca i8*, align 8
726 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
727 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
728 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
729 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
730 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
731 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
732 // CHECK3-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
733 // CHECK3-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
734 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
735 // CHECK3-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
736 // CHECK3-NEXT: br label [[ARRAYCTOR_LOOP:%.*]]
737 // CHECK3: arrayctor.loop:
738 // CHECK3-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
739 // CHECK3-NEXT: call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
740 // CHECK3-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
741 // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
742 // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
743 // CHECK3: arrayctor.cont:
744 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
745 // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
746 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
747 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
748 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP1]], align 16
749 // CHECK3-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
750 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8
751 // CHECK3-NEXT: store i64 4, i64* [[DOTOMP_UB]], align 8
752 // CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
753 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP3]] to i32
754 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
755 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
756 // CHECK3: omp.inner.for.cond:
757 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
758 // CHECK3-NEXT: [[CONV2:%.*]] = sext i32 [[TMP4]] to i64
759 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]]
760 // CHECK3-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP5]]
761 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
762 // CHECK3: omp.inner.for.body:
763 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
764 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
765 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
766 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
767 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP2]]
768 // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
769 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[IDXPROM]]
770 // CHECK3-NEXT: [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP2]]
771 // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP8]] to i32
772 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP2]]
773 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[CONV3]]
774 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP2]]
775 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
776 // CHECK3: omp.body.continue:
777 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
778 // CHECK3: omp.inner.for.inc:
779 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
780 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1
781 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
782 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
783 // CHECK3: omp.inner.for.end:
784 // CHECK3-NEXT: store i32 5, i32* [[I]], align 4
785 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4
786 // CHECK3-NEXT: [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
787 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP11]])
788 // CHECK3-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
789 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 5
790 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]]
791 // CHECK3: arraydestroy.body:
792 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
793 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
794 // CHECK3-NEXT: call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
795 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
796 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
797 // CHECK3: arraydestroy.done7:
798 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4
799 // CHECK3-NEXT: ret i32 [[TMP13]]
800 //
801 //
802 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1Ev
803 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
804 // CHECK3-NEXT: entry:
805 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
806 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
807 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
808 // CHECK3-NEXT: call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
809 // CHECK3-NEXT: ret void
810 //
811 //
812 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
813 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
814 // CHECK3-NEXT: entry:
815 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
816 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
817 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
818 // CHECK3-NEXT: call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
819 // CHECK3-NEXT: ret void
820 //
821 //
822 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2Ev
823 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
824 // CHECK3-NEXT: entry:
825 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
826 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
827 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
828 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
829 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4
830 // CHECK3-NEXT: ret void
831 //
832 //
833 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
834 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
835 // CHECK3-NEXT: entry:
836 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
837 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
838 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
839 // CHECK3-NEXT: ret void
840 //
841