1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
2 // RUN: %clang_cc1 -no-opaque-pointers -emit-llvm -o - -fopenmp \
3 // RUN: -triple i386-unknown-unknown -fopenmp-version=51 %s | \
4 // RUN: FileCheck %s --check-prefix=CHECK-32
5 // RUN: %clang_cc1 -no-opaque-pointers -emit-llvm -o - -fopenmp \
6 // RUN: -triple x86_64-unknown-linux-gnu -fopenmp-version=51 %s | FileCheck %s
7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp \
8 // RUN: -triple x86_64-unknown-linux-gnu -fopenmp-version=51 \
9 // RUN: -emit-pch %s -o %t
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp \
11 // RUN: -triple x86_64-unknown-linux-gnu -fopenmp-version=51 \
12 // RUN: -include-pch %t -emit-llvm %s -o - | FileCheck %s
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 typedef enum omp_allocator_handle_t {
18   omp_null_allocator = 0,
19   omp_default_mem_alloc = 1,
20   omp_large_cap_mem_alloc = 2,
21   omp_const_mem_alloc = 3,
22   omp_high_bw_mem_alloc = 4,
23   omp_low_lat_mem_alloc = 5,
24   omp_cgroup_mem_alloc = 6,
25   omp_pteam_mem_alloc = 7,
26   omp_thread_mem_alloc = 8,
27   KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__
28 } omp_allocator_handle_t;
29 
main()30 int main() {
31   int foo0[5];
32   int foo1[10];
33   int foo2[20];
34   int foo3[30];
35   int foo4[40];
36   int foo5[50];
37   int foo6[60];
38   int foo7[70];
39   int foo8[80];
40   omp_allocator_handle_t MyAlloc = omp_large_cap_mem_alloc;
41 
42 #pragma omp allocate(foo0) align(1)
43 #pragma omp allocate(foo1) allocator(omp_pteam_mem_alloc) align(2)
44 #pragma omp allocate(foo2) align(4) allocator(omp_cgroup_mem_alloc)
45 #pragma omp allocate(foo3) align(8) allocator(omp_low_lat_mem_alloc)
46 #pragma omp allocate(foo4) align(16) allocator(omp_high_bw_mem_alloc)
47 #pragma omp allocate(foo5) align(32) allocator(omp_const_mem_alloc)
48 #pragma omp allocate(foo6) align(64) allocator(omp_large_cap_mem_alloc)
49 #pragma omp allocate(foo7) align(32) allocator(omp_thread_mem_alloc)
50 #pragma omp allocate(foo8) align(16) allocator(omp_null_allocator)
51   {
52     double foo9[80];
53     double foo10[90];
54 #pragma omp allocate(foo9) align(8) allocator(omp_thread_mem_alloc)
55 #pragma omp allocate(foo10) align(128)
56   }
57   {
58     int bar1;
59     int bar2[10];
60     int bar3[20];
61     int *bar4;
62     float bar5;
63     double bar6[30];
64 #pragma omp allocate(bar1, bar2, bar3) align(2) allocator(MyAlloc)
65 #pragma omp allocate(bar4, bar5, bar6) align(16)
66   }
67 }
68 
69 // Verify align clause in template with non-type template parameter.
70 template <typename T, unsigned size, unsigned align>
run()71 T run() {
72   T foo[size];
73 #pragma omp allocate(foo) align(align) allocator(omp_cgroup_mem_alloc)
74   return foo[0];
75 }
76 
template_test()77 int template_test() {
78   double result;
79   result = run<double, 1000, 16>();
80   return 0;
81 }
82 #endif
83 // CHECK-32-LABEL: define {{[^@]+}}@main
84 // CHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
85 // CHECK-32-NEXT:  entry:
86 // CHECK-32-NEXT:    [[MYALLOC:%.*]] = alloca i32, align 4
87 // CHECK-32-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
88 // CHECK-32-NEXT:    [[DOTFOO0__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 20, i8* null)
89 // CHECK-32-NEXT:    [[DOTFOO0__ADDR:%.*]] = bitcast i8* [[DOTFOO0__VOID_ADDR]] to [5 x i32]*
90 // CHECK-32-NEXT:    [[DOTFOO1__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 40, i8* inttoptr (i32 7 to i8*))
91 // CHECK-32-NEXT:    [[DOTFOO1__ADDR:%.*]] = bitcast i8* [[DOTFOO1__VOID_ADDR]] to [10 x i32]*
92 // CHECK-32-NEXT:    [[DOTFOO2__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 80, i8* inttoptr (i32 6 to i8*))
93 // CHECK-32-NEXT:    [[DOTFOO2__ADDR:%.*]] = bitcast i8* [[DOTFOO2__VOID_ADDR]] to [20 x i32]*
94 // CHECK-32-NEXT:    [[DOTFOO3__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 8, i32 120, i8* inttoptr (i32 5 to i8*))
95 // CHECK-32-NEXT:    [[DOTFOO3__ADDR:%.*]] = bitcast i8* [[DOTFOO3__VOID_ADDR]] to [30 x i32]*
96 // CHECK-32-NEXT:    [[DOTFOO4__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 160, i8* inttoptr (i32 4 to i8*))
97 // CHECK-32-NEXT:    [[DOTFOO4__ADDR:%.*]] = bitcast i8* [[DOTFOO4__VOID_ADDR]] to [40 x i32]*
98 // CHECK-32-NEXT:    [[DOTFOO5__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 32, i32 200, i8* inttoptr (i32 3 to i8*))
99 // CHECK-32-NEXT:    [[DOTFOO5__ADDR:%.*]] = bitcast i8* [[DOTFOO5__VOID_ADDR]] to [50 x i32]*
100 // CHECK-32-NEXT:    [[DOTFOO6__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 64, i32 240, i8* inttoptr (i32 2 to i8*))
101 // CHECK-32-NEXT:    [[DOTFOO6__ADDR:%.*]] = bitcast i8* [[DOTFOO6__VOID_ADDR]] to [60 x i32]*
102 // CHECK-32-NEXT:    [[DOTFOO7__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 32, i32 280, i8* inttoptr (i32 8 to i8*))
103 // CHECK-32-NEXT:    [[DOTFOO7__ADDR:%.*]] = bitcast i8* [[DOTFOO7__VOID_ADDR]] to [70 x i32]*
104 // CHECK-32-NEXT:    [[DOTFOO8__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 320, i8* null)
105 // CHECK-32-NEXT:    [[DOTFOO8__ADDR:%.*]] = bitcast i8* [[DOTFOO8__VOID_ADDR]] to [80 x i32]*
106 // CHECK-32-NEXT:    store i32 2, i32* [[MYALLOC]], align 4
107 // CHECK-32-NEXT:    [[DOTFOO9__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 8, i32 640, i8* inttoptr (i32 8 to i8*))
108 // CHECK-32-NEXT:    [[DOTFOO9__ADDR:%.*]] = bitcast i8* [[DOTFOO9__VOID_ADDR]] to [80 x double]*
109 // CHECK-32-NEXT:    [[DOTFOO10__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 128, i32 720, i8* null)
110 // CHECK-32-NEXT:    [[DOTFOO10__ADDR:%.*]] = bitcast i8* [[DOTFOO10__VOID_ADDR]] to [90 x double]*
111 // CHECK-32-NEXT:    [[TMP1:%.*]] = bitcast [90 x double]* [[DOTFOO10__ADDR]] to i8*
112 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP1]], i8* null)
113 // CHECK-32-NEXT:    [[TMP2:%.*]] = bitcast [80 x double]* [[DOTFOO9__ADDR]] to i8*
114 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP2]], i8* inttoptr (i32 8 to i8*))
115 // CHECK-32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[MYALLOC]], align 4
116 // CHECK-32-NEXT:    [[CONV:%.*]] = inttoptr i32 [[TMP3]] to i8*
117 // CHECK-32-NEXT:    [[DOTBAR1__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 4, i8* [[CONV]])
118 // CHECK-32-NEXT:    [[DOTBAR1__ADDR:%.*]] = bitcast i8* [[DOTBAR1__VOID_ADDR]] to i32*
119 // CHECK-32-NEXT:    [[TMP4:%.*]] = load i32, i32* [[MYALLOC]], align 4
120 // CHECK-32-NEXT:    [[CONV1:%.*]] = inttoptr i32 [[TMP4]] to i8*
121 // CHECK-32-NEXT:    [[DOTBAR2__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 40, i8* [[CONV1]])
122 // CHECK-32-NEXT:    [[DOTBAR2__ADDR:%.*]] = bitcast i8* [[DOTBAR2__VOID_ADDR]] to [10 x i32]*
123 // CHECK-32-NEXT:    [[TMP5:%.*]] = load i32, i32* [[MYALLOC]], align 4
124 // CHECK-32-NEXT:    [[CONV2:%.*]] = inttoptr i32 [[TMP5]] to i8*
125 // CHECK-32-NEXT:    [[DOTBAR3__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 4, i32 80, i8* [[CONV2]])
126 // CHECK-32-NEXT:    [[DOTBAR3__ADDR:%.*]] = bitcast i8* [[DOTBAR3__VOID_ADDR]] to [20 x i32]*
127 // CHECK-32-NEXT:    [[DOTBAR4__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 4, i8* null)
128 // CHECK-32-NEXT:    [[DOTBAR4__ADDR:%.*]] = bitcast i8* [[DOTBAR4__VOID_ADDR]] to i32**
129 // CHECK-32-NEXT:    [[DOTBAR5__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 4, i8* null)
130 // CHECK-32-NEXT:    [[DOTBAR5__ADDR:%.*]] = bitcast i8* [[DOTBAR5__VOID_ADDR]] to float*
131 // CHECK-32-NEXT:    [[DOTBAR6__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 240, i8* null)
132 // CHECK-32-NEXT:    [[DOTBAR6__ADDR:%.*]] = bitcast i8* [[DOTBAR6__VOID_ADDR]] to [30 x double]*
133 // CHECK-32-NEXT:    [[TMP6:%.*]] = bitcast [30 x double]* [[DOTBAR6__ADDR]] to i8*
134 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP6]], i8* null)
135 // CHECK-32-NEXT:    [[TMP7:%.*]] = bitcast float* [[DOTBAR5__ADDR]] to i8*
136 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP7]], i8* null)
137 // CHECK-32-NEXT:    [[TMP8:%.*]] = bitcast i32** [[DOTBAR4__ADDR]] to i8*
138 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP8]], i8* null)
139 // CHECK-32-NEXT:    [[TMP9:%.*]] = bitcast [20 x i32]* [[DOTBAR3__ADDR]] to i8*
140 // CHECK-32-NEXT:    [[TMP10:%.*]] = load i32, i32* [[MYALLOC]], align 4
141 // CHECK-32-NEXT:    [[CONV3:%.*]] = inttoptr i32 [[TMP10]] to i8*
142 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP9]], i8* [[CONV3]])
143 // CHECK-32-NEXT:    [[TMP11:%.*]] = bitcast [10 x i32]* [[DOTBAR2__ADDR]] to i8*
144 // CHECK-32-NEXT:    [[TMP12:%.*]] = load i32, i32* [[MYALLOC]], align 4
145 // CHECK-32-NEXT:    [[CONV4:%.*]] = inttoptr i32 [[TMP12]] to i8*
146 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP11]], i8* [[CONV4]])
147 // CHECK-32-NEXT:    [[TMP13:%.*]] = bitcast i32* [[DOTBAR1__ADDR]] to i8*
148 // CHECK-32-NEXT:    [[TMP14:%.*]] = load i32, i32* [[MYALLOC]], align 4
149 // CHECK-32-NEXT:    [[CONV5:%.*]] = inttoptr i32 [[TMP14]] to i8*
150 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP13]], i8* [[CONV5]])
151 // CHECK-32-NEXT:    [[TMP15:%.*]] = bitcast [80 x i32]* [[DOTFOO8__ADDR]] to i8*
152 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP15]], i8* null)
153 // CHECK-32-NEXT:    [[TMP16:%.*]] = bitcast [70 x i32]* [[DOTFOO7__ADDR]] to i8*
154 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP16]], i8* inttoptr (i32 8 to i8*))
155 // CHECK-32-NEXT:    [[TMP17:%.*]] = bitcast [60 x i32]* [[DOTFOO6__ADDR]] to i8*
156 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP17]], i8* inttoptr (i32 2 to i8*))
157 // CHECK-32-NEXT:    [[TMP18:%.*]] = bitcast [50 x i32]* [[DOTFOO5__ADDR]] to i8*
158 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP18]], i8* inttoptr (i32 3 to i8*))
159 // CHECK-32-NEXT:    [[TMP19:%.*]] = bitcast [40 x i32]* [[DOTFOO4__ADDR]] to i8*
160 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP19]], i8* inttoptr (i32 4 to i8*))
161 // CHECK-32-NEXT:    [[TMP20:%.*]] = bitcast [30 x i32]* [[DOTFOO3__ADDR]] to i8*
162 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP20]], i8* inttoptr (i32 5 to i8*))
163 // CHECK-32-NEXT:    [[TMP21:%.*]] = bitcast [20 x i32]* [[DOTFOO2__ADDR]] to i8*
164 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP21]], i8* inttoptr (i32 6 to i8*))
165 // CHECK-32-NEXT:    [[TMP22:%.*]] = bitcast [10 x i32]* [[DOTFOO1__ADDR]] to i8*
166 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP22]], i8* inttoptr (i32 7 to i8*))
167 // CHECK-32-NEXT:    [[TMP23:%.*]] = bitcast [5 x i32]* [[DOTFOO0__ADDR]] to i8*
168 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP23]], i8* null)
169 // CHECK-32-NEXT:    ret i32 0
170 //
171 //
172 // CHECK-32-LABEL: define {{[^@]+}}@_Z13template_testv
173 // CHECK-32-SAME: () #[[ATTR2:[0-9]+]] {
174 // CHECK-32-NEXT:  entry:
175 // CHECK-32-NEXT:    [[RESULT:%.*]] = alloca double, align 8
176 // CHECK-32-NEXT:    [[CALL:%.*]] = call noundef double @_Z3runIdLj1000ELj16EET_v()
177 // CHECK-32-NEXT:    store double [[CALL]], double* [[RESULT]], align 8
178 // CHECK-32-NEXT:    ret i32 0
179 //
180 //
181 // CHECK-32-LABEL: define {{[^@]+}}@_Z3runIdLj1000ELj16EET_v
182 // CHECK-32-SAME: () #[[ATTR2]] comdat {
183 // CHECK-32-NEXT:  entry:
184 // CHECK-32-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
185 // CHECK-32-NEXT:    [[DOTFOO__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i32 16, i32 8000, i8* inttoptr (i32 6 to i8*))
186 // CHECK-32-NEXT:    [[DOTFOO__ADDR:%.*]] = bitcast i8* [[DOTFOO__VOID_ADDR]] to [1000 x double]*
187 // CHECK-32-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x double], [1000 x double]* [[DOTFOO__ADDR]], i32 0, i32 0
188 // CHECK-32-NEXT:    [[TMP1:%.*]] = load double, double* [[ARRAYIDX]], align 8
189 // CHECK-32-NEXT:    [[TMP2:%.*]] = bitcast [1000 x double]* [[DOTFOO__ADDR]] to i8*
190 // CHECK-32-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP2]], i8* inttoptr (i32 6 to i8*))
191 // CHECK-32-NEXT:    ret double [[TMP1]]
192 //
193 //
194 // CHECK-LABEL: define {{[^@]+}}@main
195 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
196 // CHECK-NEXT:  entry:
197 // CHECK-NEXT:    [[MYALLOC:%.*]] = alloca i64, align 8
198 // CHECK-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
199 // CHECK-NEXT:    [[DOTFOO0__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 32, i8* null)
200 // CHECK-NEXT:    [[DOTFOO0__ADDR:%.*]] = bitcast i8* [[DOTFOO0__VOID_ADDR]] to [5 x i32]*
201 // CHECK-NEXT:    [[DOTFOO1__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 48, i8* inttoptr (i64 7 to i8*))
202 // CHECK-NEXT:    [[DOTFOO1__ADDR:%.*]] = bitcast i8* [[DOTFOO1__VOID_ADDR]] to [10 x i32]*
203 // CHECK-NEXT:    [[DOTFOO2__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 80, i8* inttoptr (i64 6 to i8*))
204 // CHECK-NEXT:    [[DOTFOO2__ADDR:%.*]] = bitcast i8* [[DOTFOO2__VOID_ADDR]] to [20 x i32]*
205 // CHECK-NEXT:    [[DOTFOO3__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 128, i8* inttoptr (i64 5 to i8*))
206 // CHECK-NEXT:    [[DOTFOO3__ADDR:%.*]] = bitcast i8* [[DOTFOO3__VOID_ADDR]] to [30 x i32]*
207 // CHECK-NEXT:    [[DOTFOO4__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 160, i8* inttoptr (i64 4 to i8*))
208 // CHECK-NEXT:    [[DOTFOO4__ADDR:%.*]] = bitcast i8* [[DOTFOO4__VOID_ADDR]] to [40 x i32]*
209 // CHECK-NEXT:    [[DOTFOO5__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 32, i64 208, i8* inttoptr (i64 3 to i8*))
210 // CHECK-NEXT:    [[DOTFOO5__ADDR:%.*]] = bitcast i8* [[DOTFOO5__VOID_ADDR]] to [50 x i32]*
211 // CHECK-NEXT:    [[DOTFOO6__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 64, i64 240, i8* inttoptr (i64 2 to i8*))
212 // CHECK-NEXT:    [[DOTFOO6__ADDR:%.*]] = bitcast i8* [[DOTFOO6__VOID_ADDR]] to [60 x i32]*
213 // CHECK-NEXT:    [[DOTFOO7__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 32, i64 288, i8* inttoptr (i64 8 to i8*))
214 // CHECK-NEXT:    [[DOTFOO7__ADDR:%.*]] = bitcast i8* [[DOTFOO7__VOID_ADDR]] to [70 x i32]*
215 // CHECK-NEXT:    [[DOTFOO8__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 320, i8* null)
216 // CHECK-NEXT:    [[DOTFOO8__ADDR:%.*]] = bitcast i8* [[DOTFOO8__VOID_ADDR]] to [80 x i32]*
217 // CHECK-NEXT:    store i64 2, i64* [[MYALLOC]], align 8
218 // CHECK-NEXT:    [[DOTFOO9__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 8, i64 640, i8* inttoptr (i64 8 to i8*))
219 // CHECK-NEXT:    [[DOTFOO9__ADDR:%.*]] = bitcast i8* [[DOTFOO9__VOID_ADDR]] to [80 x double]*
220 // CHECK-NEXT:    [[DOTFOO10__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 128, i64 720, i8* null)
221 // CHECK-NEXT:    [[DOTFOO10__ADDR:%.*]] = bitcast i8* [[DOTFOO10__VOID_ADDR]] to [90 x double]*
222 // CHECK-NEXT:    [[TMP1:%.*]] = bitcast [90 x double]* [[DOTFOO10__ADDR]] to i8*
223 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP1]], i8* null)
224 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast [80 x double]* [[DOTFOO9__ADDR]] to i8*
225 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP2]], i8* inttoptr (i64 8 to i8*))
226 // CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* [[MYALLOC]], align 8
227 // CHECK-NEXT:    [[CONV:%.*]] = inttoptr i64 [[TMP3]] to i8*
228 // CHECK-NEXT:    [[DOTBAR1__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 4, i8* [[CONV]])
229 // CHECK-NEXT:    [[DOTBAR1__ADDR:%.*]] = bitcast i8* [[DOTBAR1__VOID_ADDR]] to i32*
230 // CHECK-NEXT:    [[TMP4:%.*]] = load i64, i64* [[MYALLOC]], align 8
231 // CHECK-NEXT:    [[CONV1:%.*]] = inttoptr i64 [[TMP4]] to i8*
232 // CHECK-NEXT:    [[DOTBAR2__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 48, i8* [[CONV1]])
233 // CHECK-NEXT:    [[DOTBAR2__ADDR:%.*]] = bitcast i8* [[DOTBAR2__VOID_ADDR]] to [10 x i32]*
234 // CHECK-NEXT:    [[TMP5:%.*]] = load i64, i64* [[MYALLOC]], align 8
235 // CHECK-NEXT:    [[CONV2:%.*]] = inttoptr i64 [[TMP5]] to i8*
236 // CHECK-NEXT:    [[DOTBAR3__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 4, i64 80, i8* [[CONV2]])
237 // CHECK-NEXT:    [[DOTBAR3__ADDR:%.*]] = bitcast i8* [[DOTBAR3__VOID_ADDR]] to [20 x i32]*
238 // CHECK-NEXT:    [[DOTBAR4__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 8, i8* null)
239 // CHECK-NEXT:    [[DOTBAR4__ADDR:%.*]] = bitcast i8* [[DOTBAR4__VOID_ADDR]] to i32**
240 // CHECK-NEXT:    [[DOTBAR5__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 4, i8* null)
241 // CHECK-NEXT:    [[DOTBAR5__ADDR:%.*]] = bitcast i8* [[DOTBAR5__VOID_ADDR]] to float*
242 // CHECK-NEXT:    [[DOTBAR6__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 240, i8* null)
243 // CHECK-NEXT:    [[DOTBAR6__ADDR:%.*]] = bitcast i8* [[DOTBAR6__VOID_ADDR]] to [30 x double]*
244 // CHECK-NEXT:    [[TMP6:%.*]] = bitcast [30 x double]* [[DOTBAR6__ADDR]] to i8*
245 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP6]], i8* null)
246 // CHECK-NEXT:    [[TMP7:%.*]] = bitcast float* [[DOTBAR5__ADDR]] to i8*
247 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP7]], i8* null)
248 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32** [[DOTBAR4__ADDR]] to i8*
249 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP8]], i8* null)
250 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast [20 x i32]* [[DOTBAR3__ADDR]] to i8*
251 // CHECK-NEXT:    [[TMP10:%.*]] = load i64, i64* [[MYALLOC]], align 8
252 // CHECK-NEXT:    [[CONV3:%.*]] = inttoptr i64 [[TMP10]] to i8*
253 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP9]], i8* [[CONV3]])
254 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast [10 x i32]* [[DOTBAR2__ADDR]] to i8*
255 // CHECK-NEXT:    [[TMP12:%.*]] = load i64, i64* [[MYALLOC]], align 8
256 // CHECK-NEXT:    [[CONV4:%.*]] = inttoptr i64 [[TMP12]] to i8*
257 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP11]], i8* [[CONV4]])
258 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast i32* [[DOTBAR1__ADDR]] to i8*
259 // CHECK-NEXT:    [[TMP14:%.*]] = load i64, i64* [[MYALLOC]], align 8
260 // CHECK-NEXT:    [[CONV5:%.*]] = inttoptr i64 [[TMP14]] to i8*
261 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP13]], i8* [[CONV5]])
262 // CHECK-NEXT:    [[TMP15:%.*]] = bitcast [80 x i32]* [[DOTFOO8__ADDR]] to i8*
263 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP15]], i8* null)
264 // CHECK-NEXT:    [[TMP16:%.*]] = bitcast [70 x i32]* [[DOTFOO7__ADDR]] to i8*
265 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP16]], i8* inttoptr (i64 8 to i8*))
266 // CHECK-NEXT:    [[TMP17:%.*]] = bitcast [60 x i32]* [[DOTFOO6__ADDR]] to i8*
267 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP17]], i8* inttoptr (i64 2 to i8*))
268 // CHECK-NEXT:    [[TMP18:%.*]] = bitcast [50 x i32]* [[DOTFOO5__ADDR]] to i8*
269 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP18]], i8* inttoptr (i64 3 to i8*))
270 // CHECK-NEXT:    [[TMP19:%.*]] = bitcast [40 x i32]* [[DOTFOO4__ADDR]] to i8*
271 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP19]], i8* inttoptr (i64 4 to i8*))
272 // CHECK-NEXT:    [[TMP20:%.*]] = bitcast [30 x i32]* [[DOTFOO3__ADDR]] to i8*
273 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP20]], i8* inttoptr (i64 5 to i8*))
274 // CHECK-NEXT:    [[TMP21:%.*]] = bitcast [20 x i32]* [[DOTFOO2__ADDR]] to i8*
275 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP21]], i8* inttoptr (i64 6 to i8*))
276 // CHECK-NEXT:    [[TMP22:%.*]] = bitcast [10 x i32]* [[DOTFOO1__ADDR]] to i8*
277 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP22]], i8* inttoptr (i64 7 to i8*))
278 // CHECK-NEXT:    [[TMP23:%.*]] = bitcast [5 x i32]* [[DOTFOO0__ADDR]] to i8*
279 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP23]], i8* null)
280 // CHECK-NEXT:    ret i32 0
281 //
282 //
283 // CHECK-LABEL: define {{[^@]+}}@_Z13template_testv
284 // CHECK-SAME: () #[[ATTR2:[0-9]+]] {
285 // CHECK-NEXT:  entry:
286 // CHECK-NEXT:    [[RESULT:%.*]] = alloca double, align 8
287 // CHECK-NEXT:    [[CALL:%.*]] = call noundef double @_Z3runIdLj1000ELj16EET_v()
288 // CHECK-NEXT:    store double [[CALL]], double* [[RESULT]], align 8
289 // CHECK-NEXT:    ret i32 0
290 //
291 //
292 // CHECK-LABEL: define {{[^@]+}}@_Z3runIdLj1000ELj16EET_v
293 // CHECK-SAME: () #[[ATTR2]] comdat {
294 // CHECK-NEXT:  entry:
295 // CHECK-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
296 // CHECK-NEXT:    [[DOTFOO__VOID_ADDR:%.*]] = call i8* @__kmpc_aligned_alloc(i32 [[TMP0]], i64 16, i64 8000, i8* inttoptr (i64 6 to i8*))
297 // CHECK-NEXT:    [[DOTFOO__ADDR:%.*]] = bitcast i8* [[DOTFOO__VOID_ADDR]] to [1000 x double]*
298 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x double], [1000 x double]* [[DOTFOO__ADDR]], i64 0, i64 0
299 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[ARRAYIDX]], align 16
300 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast [1000 x double]* [[DOTFOO__ADDR]] to i8*
301 // CHECK-NEXT:    call void @__kmpc_free(i32 [[TMP0]], i8* [[TMP2]], i8* inttoptr (i64 6 to i8*))
302 // CHECK-NEXT:    ret double [[TMP1]]
303 //
304