1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK2
7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -fopenmp-cuda-teams-reduction-recs-num=2048 -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK3
8 // expected-no-diagnostics
9 #ifndef HEADER
10 #define HEADER
11
12 template<typename tx>
ftemplate(int n)13 tx ftemplate(int n) {
14 int a;
15 short b;
16 tx c;
17 float d;
18 double e;
19
20 #pragma omp target
21 #pragma omp teams reduction(+: e)
22 {
23 e += 5;
24 }
25
26 #pragma omp target
27 #pragma omp teams reduction(^: c) reduction(*: d)
28 {
29 c ^= 2;
30 d *= 33;
31 }
32
33 #pragma omp target
34 #pragma omp teams reduction(|: a) reduction(max: b)
35 #pragma omp parallel reduction(|: a) reduction(max: b)
36 {
37 a |= 1;
38 b = 99 > b ? 99 : b;
39 }
40
41 return a+b+c+d+e;
42 }
43
bar(int n)44 int bar(int n){
45 int a = 0;
46
47 a += ftemplate<char>(n);
48
49 return a;
50 }
51
52 #endif
53 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l20
54 // CHECK1-SAME: (i64 noundef [[E:%.*]]) #[[ATTR0:[0-9]+]] {
55 // CHECK1-NEXT: entry:
56 // CHECK1-NEXT: [[E_ADDR:%.*]] = alloca i64, align 8
57 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
58 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
59 // CHECK1-NEXT: store i64 [[E]], i64* [[E_ADDR]], align 8
60 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[E_ADDR]] to double*
61 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
62 // CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
63 // CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
64 // CHECK1: user_code.entry:
65 // CHECK1-NEXT: [[TMP1:%.*]] = load double, double* [[CONV]], align 8
66 // CHECK1-NEXT: [[E1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
67 // CHECK1-NEXT: [[E_ON_STACK:%.*]] = bitcast i8* [[E1]] to double*
68 // CHECK1-NEXT: store double [[TMP1]], double* [[E_ON_STACK]], align 8
69 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
70 // CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
71 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
72 // CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], double* [[E_ON_STACK]]) #[[ATTR4:[0-9]+]]
73 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[E1]], i64 8)
74 // CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
75 // CHECK1-NEXT: ret void
76 // CHECK1: worker.exit:
77 // CHECK1-NEXT: ret void
78 //
79 //
80 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__
81 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR2:[0-9]+]] {
82 // CHECK1-NEXT: entry:
83 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
84 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
85 // CHECK1-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8
86 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
87 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
88 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
89 // CHECK1-NEXT: store double* [[E]], double** [[E_ADDR]], align 8
90 // CHECK1-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 8
91 // CHECK1-NEXT: [[E1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 8)
92 // CHECK1-NEXT: [[E_ON_STACK:%.*]] = bitcast i8* [[E1]] to double*
93 // CHECK1-NEXT: store double 0.000000e+00, double* [[E_ON_STACK]], align 8
94 // CHECK1-NEXT: [[TMP1:%.*]] = load double, double* [[E_ON_STACK]], align 8
95 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
96 // CHECK1-NEXT: store double [[ADD]], double* [[E_ON_STACK]], align 8
97 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
98 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
99 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
100 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast double* [[E_ON_STACK]] to i8*
101 // CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
102 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
103 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 8
104 // CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i8* [[TMP7]], i32 1024, i8* [[TMP6]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func)
105 // CHECK1-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
106 // CHECK1-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
107 // CHECK1: .omp.reduction.then:
108 // CHECK1-NEXT: [[TMP10:%.*]] = load double, double* [[TMP0]], align 8
109 // CHECK1-NEXT: [[TMP11:%.*]] = load double, double* [[E_ON_STACK]], align 8
110 // CHECK1-NEXT: [[ADD2:%.*]] = fadd double [[TMP10]], [[TMP11]]
111 // CHECK1-NEXT: store double [[ADD2]], double* [[TMP0]], align 8
112 // CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
113 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
114 // CHECK1: .omp.reduction.done:
115 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[E1]], i64 8)
116 // CHECK1-NEXT: ret void
117 //
118 //
119 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
120 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3:[0-9]+]] {
121 // CHECK1-NEXT: entry:
122 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
123 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
124 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
125 // CHECK1-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
126 // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 8
127 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
128 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
129 // CHECK1-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
130 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
131 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
132 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8
133 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
134 // CHECK1-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
135 // CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
136 // CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
137 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
138 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double**
139 // CHECK1-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 8
140 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
141 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i64 1
142 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8*
143 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64*
144 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64*
145 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8
146 // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size()
147 // CHECK1-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
148 // CHECK1-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]])
149 // CHECK1-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8
150 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i64 1
151 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i64 1
152 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
153 // CHECK1-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 8
154 // CHECK1-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0
155 // CHECK1-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1
156 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
157 // CHECK1-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
158 // CHECK1-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2
159 // CHECK1-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1
160 // CHECK1-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0
161 // CHECK1-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]]
162 // CHECK1-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0
163 // CHECK1-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
164 // CHECK1-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]]
165 // CHECK1-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]]
166 // CHECK1-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]]
167 // CHECK1: then:
168 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8*
169 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
170 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR4]]
171 // CHECK1-NEXT: br label [[IFCONT:%.*]]
172 // CHECK1: else:
173 // CHECK1-NEXT: br label [[IFCONT]]
174 // CHECK1: ifcont:
175 // CHECK1-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
176 // CHECK1-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
177 // CHECK1-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
178 // CHECK1-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
179 // CHECK1: then4:
180 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
181 // CHECK1-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double**
182 // CHECK1-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 8
183 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
184 // CHECK1-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double**
185 // CHECK1-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 8
186 // CHECK1-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8
187 // CHECK1-NEXT: store double [[TMP47]], double* [[TMP46]], align 8
188 // CHECK1-NEXT: br label [[IFCONT6:%.*]]
189 // CHECK1: else5:
190 // CHECK1-NEXT: br label [[IFCONT6]]
191 // CHECK1: ifcont6:
192 // CHECK1-NEXT: ret void
193 //
194 //
195 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
196 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
197 // CHECK1-NEXT: entry:
198 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
199 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
200 // CHECK1-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
201 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
202 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
203 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
204 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
205 // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
206 // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
207 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
208 // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
209 // CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
210 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]*
211 // CHECK1-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4
212 // CHECK1-NEXT: br label [[PRECOND:%.*]]
213 // CHECK1: precond:
214 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4
215 // CHECK1-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2
216 // CHECK1-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]]
217 // CHECK1: body:
218 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
219 // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
220 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
221 // CHECK1: then:
222 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0
223 // CHECK1-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 8
224 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32*
225 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]]
226 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
227 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4
228 // CHECK1-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4
229 // CHECK1-NEXT: br label [[IFCONT:%.*]]
230 // CHECK1: else:
231 // CHECK1-NEXT: br label [[IFCONT]]
232 // CHECK1: ifcont:
233 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
234 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4
235 // CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]]
236 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
237 // CHECK1: then2:
238 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
239 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0
240 // CHECK1-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8
241 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
242 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]]
243 // CHECK1-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4
244 // CHECK1-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4
245 // CHECK1-NEXT: br label [[IFCONT4:%.*]]
246 // CHECK1: else3:
247 // CHECK1-NEXT: br label [[IFCONT4]]
248 // CHECK1: ifcont4:
249 // CHECK1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1
250 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4
251 // CHECK1-NEXT: br label [[PRECOND]]
252 // CHECK1: exit:
253 // CHECK1-NEXT: ret void
254 //
255 //
256 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func
257 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
258 // CHECK1-NEXT: entry:
259 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
260 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
261 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
262 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
263 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
264 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
265 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
266 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
267 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
268 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
269 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
270 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i64 0, i64 0
271 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
272 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
273 // CHECK1-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
274 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP7]]
275 // CHECK1-NEXT: [[TMP12:%.*]] = load double, double* [[TMP10]], align 8
276 // CHECK1-NEXT: store double [[TMP12]], double* [[TMP11]], align 128
277 // CHECK1-NEXT: ret void
278 //
279 //
280 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func
281 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
282 // CHECK1-NEXT: entry:
283 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
284 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
285 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
286 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
287 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
288 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
289 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
290 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
291 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
292 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
293 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
294 // CHECK1-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
295 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP5]]
296 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
297 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 8
298 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
299 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
300 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP9]], i8* [[TMP10]]) #[[ATTR4]]
301 // CHECK1-NEXT: ret void
302 //
303 //
304 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func
305 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
306 // CHECK1-NEXT: entry:
307 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
308 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
309 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
310 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
311 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
312 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
313 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
314 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
315 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
316 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
317 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
318 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i64 0, i64 0
319 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
320 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
321 // CHECK1-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
322 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP7]]
323 // CHECK1-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 128
324 // CHECK1-NEXT: store double [[TMP12]], double* [[TMP10]], align 8
325 // CHECK1-NEXT: ret void
326 //
327 //
328 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func
329 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
330 // CHECK1-NEXT: entry:
331 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
332 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
333 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
334 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
335 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
336 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
337 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
338 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
339 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
340 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
341 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
342 // CHECK1-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
343 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP5]]
344 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
345 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 8
346 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
347 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
348 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP10]], i8* [[TMP9]]) #[[ATTR4]]
349 // CHECK1-NEXT: ret void
350 //
351 //
352 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l26
353 // CHECK1-SAME: (i64 noundef [[C:%.*]], i64 noundef [[D:%.*]]) #[[ATTR0]] {
354 // CHECK1-NEXT: entry:
355 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i64, align 8
356 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i64, align 8
357 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
358 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
359 // CHECK1-NEXT: store i64 [[C]], i64* [[C_ADDR]], align 8
360 // CHECK1-NEXT: store i64 [[D]], i64* [[D_ADDR]], align 8
361 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[C_ADDR]] to i8*
362 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[D_ADDR]] to float*
363 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
364 // CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
365 // CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
366 // CHECK1: user_code.entry:
367 // CHECK1-NEXT: [[TMP1:%.*]] = load i8, i8* [[CONV]], align 1
368 // CHECK1-NEXT: [[C2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 1)
369 // CHECK1-NEXT: store i8 [[TMP1]], i8* [[C2]], align 1
370 // CHECK1-NEXT: [[TMP2:%.*]] = load float, float* [[CONV1]], align 4
371 // CHECK1-NEXT: [[D3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
372 // CHECK1-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D3]] to float*
373 // CHECK1-NEXT: store float [[TMP2]], float* [[D_ON_STACK]], align 4
374 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
375 // CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
376 // CHECK1-NEXT: store i32 [[TMP3]], i32* [[DOTTHREADID_TEMP_]], align 4
377 // CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8* [[C2]], float* [[D_ON_STACK]]) #[[ATTR4]]
378 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[D3]], i64 4)
379 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[C2]], i64 1)
380 // CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
381 // CHECK1-NEXT: ret void
382 // CHECK1: worker.exit:
383 // CHECK1-NEXT: ret void
384 //
385 //
386 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1
387 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8* noundef nonnull align 1 dereferenceable(1) [[C:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR2]] {
388 // CHECK1-NEXT: entry:
389 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
390 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
391 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i8*, align 8
392 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
393 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
394 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
395 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
396 // CHECK1-NEXT: store i8* [[C]], i8** [[C_ADDR]], align 8
397 // CHECK1-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
398 // CHECK1-NEXT: [[TMP0:%.*]] = load i8*, i8** [[C_ADDR]], align 8
399 // CHECK1-NEXT: [[TMP1:%.*]] = load float*, float** [[D_ADDR]], align 8
400 // CHECK1-NEXT: [[C1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 1)
401 // CHECK1-NEXT: [[D2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
402 // CHECK1-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D2]] to float*
403 // CHECK1-NEXT: store i8 0, i8* [[C1]], align 1
404 // CHECK1-NEXT: store float 1.000000e+00, float* [[D_ON_STACK]], align 4
405 // CHECK1-NEXT: [[TMP2:%.*]] = load i8, i8* [[C1]], align 1
406 // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
407 // CHECK1-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
408 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
409 // CHECK1-NEXT: store i8 [[CONV3]], i8* [[C1]], align 1
410 // CHECK1-NEXT: [[TMP3:%.*]] = load float, float* [[D_ON_STACK]], align 4
411 // CHECK1-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
412 // CHECK1-NEXT: store float [[MUL]], float* [[D_ON_STACK]], align 4
413 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
414 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
415 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
416 // CHECK1-NEXT: store i8* [[C1]], i8** [[TMP6]], align 8
417 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
418 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast float* [[D_ON_STACK]] to i8*
419 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
420 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
421 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 8
422 // CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i8* [[TMP10]], i32 1024, i8* [[TMP9]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func3, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func4, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func5, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func6, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func7, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func8)
423 // CHECK1-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 1
424 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
425 // CHECK1: .omp.reduction.then:
426 // CHECK1-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP0]], align 1
427 // CHECK1-NEXT: [[CONV4:%.*]] = sext i8 [[TMP13]] to i32
428 // CHECK1-NEXT: [[TMP14:%.*]] = load i8, i8* [[C1]], align 1
429 // CHECK1-NEXT: [[CONV5:%.*]] = sext i8 [[TMP14]] to i32
430 // CHECK1-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
431 // CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
432 // CHECK1-NEXT: store i8 [[CONV7]], i8* [[TMP0]], align 1
433 // CHECK1-NEXT: [[TMP15:%.*]] = load float, float* [[TMP1]], align 4
434 // CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[D_ON_STACK]], align 4
435 // CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP16]]
436 // CHECK1-NEXT: store float [[MUL8]], float* [[TMP1]], align 4
437 // CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
438 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
439 // CHECK1: .omp.reduction.done:
440 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[D2]], i64 4)
441 // CHECK1-NEXT: call void @__kmpc_free_shared(i8* [[C1]], i64 1)
442 // CHECK1-NEXT: ret void
443 //
444 //
445 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
446 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
447 // CHECK1-NEXT: entry:
448 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
449 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
450 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
451 // CHECK1-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
452 // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 8
453 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
454 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
455 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
456 // CHECK1-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
457 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
458 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
459 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8
460 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
461 // CHECK1-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
462 // CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
463 // CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
464 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
465 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
466 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
467 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[TMP10]], i64 1
468 // CHECK1-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP10]], align 1
469 // CHECK1-NEXT: [[TMP14:%.*]] = sext i8 [[TMP13]] to i32
470 // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_get_warp_size()
471 // CHECK1-NEXT: [[TMP16:%.*]] = trunc i32 [[TMP15]] to i16
472 // CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP14]], i16 [[TMP7]], i16 [[TMP16]])
473 // CHECK1-NEXT: [[TMP18:%.*]] = trunc i32 [[TMP17]] to i8
474 // CHECK1-NEXT: store i8 [[TMP18]], i8* [[DOTOMP_REDUCTION_ELEMENT]], align 1
475 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[TMP10]], i64 1
476 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[DOTOMP_REDUCTION_ELEMENT]], i64 1
477 // CHECK1-NEXT: store i8* [[DOTOMP_REDUCTION_ELEMENT]], i8** [[TMP11]], align 8
478 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
479 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to float**
480 // CHECK1-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP22]], align 8
481 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
482 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr float, float* [[TMP23]], i64 1
483 // CHECK1-NEXT: [[TMP26:%.*]] = bitcast float* [[TMP25]] to i8*
484 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast float* [[TMP23]] to i32*
485 // CHECK1-NEXT: [[TMP28:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i32*
486 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP27]], align 4
487 // CHECK1-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
488 // CHECK1-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
489 // CHECK1-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
490 // CHECK1-NEXT: store i32 [[TMP32]], i32* [[TMP28]], align 4
491 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr i32, i32* [[TMP27]], i64 1
492 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr i32, i32* [[TMP28]], i64 1
493 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
494 // CHECK1-NEXT: store i8* [[TMP35]], i8** [[TMP24]], align 8
495 // CHECK1-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP8]], 0
496 // CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 1
497 // CHECK1-NEXT: [[TMP38:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
498 // CHECK1-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
499 // CHECK1-NEXT: [[TMP40:%.*]] = icmp eq i16 [[TMP8]], 2
500 // CHECK1-NEXT: [[TMP41:%.*]] = and i16 [[TMP6]], 1
501 // CHECK1-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP41]], 0
502 // CHECK1-NEXT: [[TMP43:%.*]] = and i1 [[TMP40]], [[TMP42]]
503 // CHECK1-NEXT: [[TMP44:%.*]] = icmp sgt i16 [[TMP7]], 0
504 // CHECK1-NEXT: [[TMP45:%.*]] = and i1 [[TMP43]], [[TMP44]]
505 // CHECK1-NEXT: [[TMP46:%.*]] = or i1 [[TMP36]], [[TMP39]]
506 // CHECK1-NEXT: [[TMP47:%.*]] = or i1 [[TMP46]], [[TMP45]]
507 // CHECK1-NEXT: br i1 [[TMP47]], label [[THEN:%.*]], label [[ELSE:%.*]]
508 // CHECK1: then:
509 // CHECK1-NEXT: [[TMP48:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
510 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
511 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP48]], i8* [[TMP49]]) #[[ATTR4]]
512 // CHECK1-NEXT: br label [[IFCONT:%.*]]
513 // CHECK1: else:
514 // CHECK1-NEXT: br label [[IFCONT]]
515 // CHECK1: ifcont:
516 // CHECK1-NEXT: [[TMP50:%.*]] = icmp eq i16 [[TMP8]], 1
517 // CHECK1-NEXT: [[TMP51:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
518 // CHECK1-NEXT: [[TMP52:%.*]] = and i1 [[TMP50]], [[TMP51]]
519 // CHECK1-NEXT: br i1 [[TMP52]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
520 // CHECK1: then5:
521 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
522 // CHECK1-NEXT: [[TMP54:%.*]] = load i8*, i8** [[TMP53]], align 8
523 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
524 // CHECK1-NEXT: [[TMP56:%.*]] = load i8*, i8** [[TMP55]], align 8
525 // CHECK1-NEXT: [[TMP57:%.*]] = load i8, i8* [[TMP54]], align 1
526 // CHECK1-NEXT: store i8 [[TMP57]], i8* [[TMP56]], align 1
527 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
528 // CHECK1-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to float**
529 // CHECK1-NEXT: [[TMP60:%.*]] = load float*, float** [[TMP59]], align 8
530 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
531 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to float**
532 // CHECK1-NEXT: [[TMP63:%.*]] = load float*, float** [[TMP62]], align 8
533 // CHECK1-NEXT: [[TMP64:%.*]] = load float, float* [[TMP60]], align 4
534 // CHECK1-NEXT: store float [[TMP64]], float* [[TMP63]], align 4
535 // CHECK1-NEXT: br label [[IFCONT7:%.*]]
536 // CHECK1: else6:
537 // CHECK1-NEXT: br label [[IFCONT7]]
538 // CHECK1: ifcont7:
539 // CHECK1-NEXT: ret void
540 //
541 //
542 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
543 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
544 // CHECK1-NEXT: entry:
545 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
546 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
547 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
548 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
549 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
550 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
551 // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
552 // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
553 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
554 // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
555 // CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
556 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
557 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
558 // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
559 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
560 // CHECK1: then:
561 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
562 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
563 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
564 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i32 addrspace(3)* [[TMP10]] to i8 addrspace(3)*
565 // CHECK1-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP9]], align 1
566 // CHECK1-NEXT: store volatile i8 [[TMP12]], i8 addrspace(3)* [[TMP11]], align 1
567 // CHECK1-NEXT: br label [[IFCONT:%.*]]
568 // CHECK1: else:
569 // CHECK1-NEXT: br label [[IFCONT]]
570 // CHECK1: ifcont:
571 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
572 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
573 // CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
574 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
575 // CHECK1: then2:
576 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
577 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i32 addrspace(3)* [[TMP14]] to i8 addrspace(3)*
578 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
579 // CHECK1-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 8
580 // CHECK1-NEXT: [[TMP18:%.*]] = load volatile i8, i8 addrspace(3)* [[TMP15]], align 1
581 // CHECK1-NEXT: store i8 [[TMP18]], i8* [[TMP17]], align 1
582 // CHECK1-NEXT: br label [[IFCONT4:%.*]]
583 // CHECK1: else3:
584 // CHECK1-NEXT: br label [[IFCONT4]]
585 // CHECK1: ifcont4:
586 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
587 // CHECK1-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
588 // CHECK1-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
589 // CHECK1: then6:
590 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
591 // CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 8
592 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i32*
593 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
594 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]], align 4
595 // CHECK1-NEXT: store volatile i32 [[TMP23]], i32 addrspace(3)* [[TMP22]], align 4
596 // CHECK1-NEXT: br label [[IFCONT8:%.*]]
597 // CHECK1: else7:
598 // CHECK1-NEXT: br label [[IFCONT8]]
599 // CHECK1: ifcont8:
600 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
601 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTADDR1]], align 4
602 // CHECK1-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP24]]
603 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
604 // CHECK1: then10:
605 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
606 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
607 // CHECK1-NEXT: [[TMP27:%.*]] = load i8*, i8** [[TMP26]], align 8
608 // CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8* [[TMP27]] to i32*
609 // CHECK1-NEXT: [[TMP29:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP25]], align 4
610 // CHECK1-NEXT: store i32 [[TMP29]], i32* [[TMP28]], align 4
611 // CHECK1-NEXT: br label [[IFCONT12:%.*]]
612 // CHECK1: else11:
613 // CHECK1-NEXT: br label [[IFCONT12]]
614 // CHECK1: ifcont12:
615 // CHECK1-NEXT: ret void
616 //
617 //
618 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func5
619 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
620 // CHECK1-NEXT: entry:
621 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
622 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
623 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
624 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
625 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
626 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
627 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
628 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
629 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
630 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
631 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
632 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 0
633 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
634 // CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
635 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP7]]
636 // CHECK1-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1
637 // CHECK1-NEXT: store i8 [[TMP11]], i8* [[TMP10]], align 128
638 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 1
639 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
640 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
641 // CHECK1-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
642 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP7]]
643 // CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[TMP14]], align 4
644 // CHECK1-NEXT: store float [[TMP16]], float* [[TMP15]], align 128
645 // CHECK1-NEXT: ret void
646 //
647 //
648 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func6
649 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
650 // CHECK1-NEXT: entry:
651 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
652 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
653 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
654 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
655 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
656 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
657 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
658 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
659 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
660 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
661 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
662 // CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
663 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP5]]
664 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8
665 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
666 // CHECK1-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
667 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP5]]
668 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
669 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 8
670 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
671 // CHECK1-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
672 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP11]], i8* [[TMP12]]) #[[ATTR4]]
673 // CHECK1-NEXT: ret void
674 //
675 //
676 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func7
677 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
678 // CHECK1-NEXT: entry:
679 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
680 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
681 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
682 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
683 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
684 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
685 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
686 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
687 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
688 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
689 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
690 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 0
691 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
692 // CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
693 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP7]]
694 // CHECK1-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP10]], align 128
695 // CHECK1-NEXT: store i8 [[TMP11]], i8* [[TMP9]], align 1
696 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 1
697 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
698 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
699 // CHECK1-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
700 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP7]]
701 // CHECK1-NEXT: [[TMP16:%.*]] = load float, float* [[TMP15]], align 128
702 // CHECK1-NEXT: store float [[TMP16]], float* [[TMP14]], align 4
703 // CHECK1-NEXT: ret void
704 //
705 //
706 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func8
707 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
708 // CHECK1-NEXT: entry:
709 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
710 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
711 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
712 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
713 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
714 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
715 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
716 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
717 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
718 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
719 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
720 // CHECK1-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
721 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP5]]
722 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 8
723 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
724 // CHECK1-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
725 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP5]]
726 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
727 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 8
728 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
729 // CHECK1-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
730 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP12]], i8* [[TMP11]]) #[[ATTR4]]
731 // CHECK1-NEXT: ret void
732 //
733 //
734 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l33
735 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[B:%.*]]) #[[ATTR0]] {
736 // CHECK1-NEXT: entry:
737 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
738 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
739 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
740 // CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
741 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
742 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8
743 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32*
744 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[B_ADDR]] to i16*
745 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
746 // CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
747 // CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
748 // CHECK1: user_code.entry:
749 // CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
750 // CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
751 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
752 // CHECK1-NEXT: call void @__omp_outlined__9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[CONV]], i16* [[CONV1]]) #[[ATTR4]]
753 // CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
754 // CHECK1-NEXT: ret void
755 // CHECK1: worker.exit:
756 // CHECK1-NEXT: ret void
757 //
758 //
759 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__9
760 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR2]] {
761 // CHECK1-NEXT: entry:
762 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
763 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
764 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
765 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 8
766 // CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
767 // CHECK1-NEXT: [[B2:%.*]] = alloca i16, align 2
768 // CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8
769 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
770 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
771 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
772 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
773 // CHECK1-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 8
774 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
775 // CHECK1-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 8
776 // CHECK1-NEXT: store i32 0, i32* [[A1]], align 4
777 // CHECK1-NEXT: store i16 -32768, i16* [[B2]], align 2
778 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
779 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32* [[A1]] to i8*
780 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
781 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
782 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i16* [[B2]] to i8*
783 // CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
784 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
785 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
786 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
787 // CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*)* @__omp_outlined__10 to i8*), i8* null, i8** [[TMP8]], i64 2)
788 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
789 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i32* [[A1]] to i8*
790 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
791 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
792 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i16* [[B2]] to i8*
793 // CHECK1-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8
794 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
795 // CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 8
796 // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i8* [[TMP14]], i32 1024, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func15, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func16, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func17, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func18, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func19, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func20)
797 // CHECK1-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 1
798 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
799 // CHECK1: .omp.reduction.then:
800 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP0]], align 4
801 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4
802 // CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP17]], [[TMP18]]
803 // CHECK1-NEXT: store i32 [[OR]], i32* [[TMP0]], align 4
804 // CHECK1-NEXT: [[TMP19:%.*]] = load i16, i16* [[TMP1]], align 2
805 // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP19]] to i32
806 // CHECK1-NEXT: [[TMP20:%.*]] = load i16, i16* [[B2]], align 2
807 // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP20]] to i32
808 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CONV]], [[CONV3]]
809 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
810 // CHECK1: cond.true:
811 // CHECK1-NEXT: [[TMP21:%.*]] = load i16, i16* [[TMP1]], align 2
812 // CHECK1-NEXT: br label [[COND_END:%.*]]
813 // CHECK1: cond.false:
814 // CHECK1-NEXT: [[TMP22:%.*]] = load i16, i16* [[B2]], align 2
815 // CHECK1-NEXT: br label [[COND_END]]
816 // CHECK1: cond.end:
817 // CHECK1-NEXT: [[COND:%.*]] = phi i16 [ [[TMP21]], [[COND_TRUE]] ], [ [[TMP22]], [[COND_FALSE]] ]
818 // CHECK1-NEXT: store i16 [[COND]], i16* [[TMP1]], align 2
819 // CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP7]])
820 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
821 // CHECK1: .omp.reduction.done:
822 // CHECK1-NEXT: ret void
823 //
824 //
825 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__10
826 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR2]] {
827 // CHECK1-NEXT: entry:
828 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
829 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
830 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
831 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 8
832 // CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
833 // CHECK1-NEXT: [[B2:%.*]] = alloca i16, align 2
834 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
835 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
836 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
837 // CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
838 // CHECK1-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 8
839 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
840 // CHECK1-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 8
841 // CHECK1-NEXT: store i32 0, i32* [[A1]], align 4
842 // CHECK1-NEXT: store i16 -32768, i16* [[B2]], align 2
843 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A1]], align 4
844 // CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
845 // CHECK1-NEXT: store i32 [[OR]], i32* [[A1]], align 4
846 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[B2]], align 2
847 // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
848 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
849 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
850 // CHECK1: cond.true:
851 // CHECK1-NEXT: br label [[COND_END:%.*]]
852 // CHECK1: cond.false:
853 // CHECK1-NEXT: [[TMP4:%.*]] = load i16, i16* [[B2]], align 2
854 // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
855 // CHECK1-NEXT: br label [[COND_END]]
856 // CHECK1: cond.end:
857 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
858 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
859 // CHECK1-NEXT: store i16 [[CONV4]], i16* [[B2]], align 2
860 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
861 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
862 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
863 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i32* [[A1]] to i8*
864 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
865 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
866 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i16* [[B2]] to i8*
867 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 8
868 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
869 // CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 2, i64 16, i8* [[TMP11]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func12, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func13)
870 // CHECK1-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], 1
871 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
872 // CHECK1: .omp.reduction.then:
873 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
874 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[A1]], align 4
875 // CHECK1-NEXT: [[OR5:%.*]] = or i32 [[TMP14]], [[TMP15]]
876 // CHECK1-NEXT: store i32 [[OR5]], i32* [[TMP0]], align 4
877 // CHECK1-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP1]], align 2
878 // CHECK1-NEXT: [[CONV6:%.*]] = sext i16 [[TMP16]] to i32
879 // CHECK1-NEXT: [[TMP17:%.*]] = load i16, i16* [[B2]], align 2
880 // CHECK1-NEXT: [[CONV7:%.*]] = sext i16 [[TMP17]] to i32
881 // CHECK1-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
882 // CHECK1-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
883 // CHECK1: cond.true9:
884 // CHECK1-NEXT: [[TMP18:%.*]] = load i16, i16* [[TMP1]], align 2
885 // CHECK1-NEXT: br label [[COND_END11:%.*]]
886 // CHECK1: cond.false10:
887 // CHECK1-NEXT: [[TMP19:%.*]] = load i16, i16* [[B2]], align 2
888 // CHECK1-NEXT: br label [[COND_END11]]
889 // CHECK1: cond.end11:
890 // CHECK1-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP18]], [[COND_TRUE9]] ], [ [[TMP19]], [[COND_FALSE10]] ]
891 // CHECK1-NEXT: store i16 [[COND12]], i16* [[TMP1]], align 2
892 // CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
893 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
894 // CHECK1: .omp.reduction.done:
895 // CHECK1-NEXT: ret void
896 //
897 //
898 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func12
899 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
900 // CHECK1-NEXT: entry:
901 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
902 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
903 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
904 // CHECK1-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
905 // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 8
906 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
907 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
908 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
909 // CHECK1-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
910 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
911 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
912 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8
913 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
914 // CHECK1-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
915 // CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
916 // CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
917 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
918 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
919 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 8
920 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
921 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i64 1
922 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
923 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
924 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
925 // CHECK1-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
926 // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
927 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
928 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i64 1
929 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i64 1
930 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
931 // CHECK1-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 8
932 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
933 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
934 // CHECK1-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 8
935 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
936 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i64 1
937 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
938 // CHECK1-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
939 // CHECK1-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
940 // CHECK1-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
941 // CHECK1-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
942 // CHECK1-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
943 // CHECK1-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
944 // CHECK1-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
945 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i64 1
946 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
947 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
948 // CHECK1-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 8
949 // CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
950 // CHECK1-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
951 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
952 // CHECK1-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
953 // CHECK1-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
954 // CHECK1-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
955 // CHECK1-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
956 // CHECK1-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
957 // CHECK1-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
958 // CHECK1-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
959 // CHECK1-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
960 // CHECK1-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
961 // CHECK1-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
962 // CHECK1: then:
963 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
964 // CHECK1-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
965 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func11"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
966 // CHECK1-NEXT: br label [[IFCONT:%.*]]
967 // CHECK1: else:
968 // CHECK1-NEXT: br label [[IFCONT]]
969 // CHECK1: ifcont:
970 // CHECK1-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
971 // CHECK1-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
972 // CHECK1-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
973 // CHECK1-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
974 // CHECK1: then5:
975 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
976 // CHECK1-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
977 // CHECK1-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 8
978 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
979 // CHECK1-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
980 // CHECK1-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 8
981 // CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
982 // CHECK1-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
983 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
984 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
985 // CHECK1-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 8
986 // CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
987 // CHECK1-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
988 // CHECK1-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 8
989 // CHECK1-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
990 // CHECK1-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
991 // CHECK1-NEXT: br label [[IFCONT7:%.*]]
992 // CHECK1: else6:
993 // CHECK1-NEXT: br label [[IFCONT7]]
994 // CHECK1: ifcont7:
995 // CHECK1-NEXT: ret void
996 //
997 //
998 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func13
999 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
1000 // CHECK1-NEXT: entry:
1001 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1002 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1003 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
1004 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1005 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1006 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1007 // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1008 // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1009 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1010 // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1011 // CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1012 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
1013 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP2]])
1014 // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1015 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1016 // CHECK1: then:
1017 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
1018 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
1019 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1020 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1021 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
1022 // CHECK1-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
1023 // CHECK1-NEXT: br label [[IFCONT:%.*]]
1024 // CHECK1: else:
1025 // CHECK1-NEXT: br label [[IFCONT]]
1026 // CHECK1: ifcont:
1027 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1028 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1029 // CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
1030 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1031 // CHECK1: then2:
1032 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1033 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
1034 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1035 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
1036 // CHECK1-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
1037 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
1038 // CHECK1-NEXT: br label [[IFCONT4:%.*]]
1039 // CHECK1: else3:
1040 // CHECK1-NEXT: br label [[IFCONT4]]
1041 // CHECK1: ifcont4:
1042 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1043 // CHECK1-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1044 // CHECK1-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1045 // CHECK1: then6:
1046 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
1047 // CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 8
1048 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
1049 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1050 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
1051 // CHECK1-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
1052 // CHECK1-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
1053 // CHECK1-NEXT: br label [[IFCONT8:%.*]]
1054 // CHECK1: else7:
1055 // CHECK1-NEXT: br label [[IFCONT8]]
1056 // CHECK1: ifcont8:
1057 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1058 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1059 // CHECK1-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
1060 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1061 // CHECK1: then10:
1062 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1063 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
1064 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
1065 // CHECK1-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 8
1066 // CHECK1-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
1067 // CHECK1-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
1068 // CHECK1-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
1069 // CHECK1-NEXT: br label [[IFCONT12:%.*]]
1070 // CHECK1: else11:
1071 // CHECK1-NEXT: br label [[IFCONT12]]
1072 // CHECK1: ifcont12:
1073 // CHECK1-NEXT: ret void
1074 //
1075 //
1076 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func15
1077 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
1078 // CHECK1-NEXT: entry:
1079 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1080 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1081 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1082 // CHECK1-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1083 // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 8
1084 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
1085 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
1086 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1087 // CHECK1-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
1088 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
1089 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
1090 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1091 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
1092 // CHECK1-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
1093 // CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
1094 // CHECK1-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
1095 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
1096 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
1097 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 8
1098 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
1099 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i64 1
1100 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
1101 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
1102 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
1103 // CHECK1-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
1104 // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
1105 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
1106 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i64 1
1107 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i64 1
1108 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
1109 // CHECK1-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 8
1110 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
1111 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
1112 // CHECK1-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 8
1113 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
1114 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i64 1
1115 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
1116 // CHECK1-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
1117 // CHECK1-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
1118 // CHECK1-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
1119 // CHECK1-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
1120 // CHECK1-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
1121 // CHECK1-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
1122 // CHECK1-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
1123 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i64 1
1124 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
1125 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
1126 // CHECK1-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 8
1127 // CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
1128 // CHECK1-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
1129 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
1130 // CHECK1-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
1131 // CHECK1-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
1132 // CHECK1-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
1133 // CHECK1-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
1134 // CHECK1-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
1135 // CHECK1-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
1136 // CHECK1-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
1137 // CHECK1-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
1138 // CHECK1-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
1139 // CHECK1-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
1140 // CHECK1: then:
1141 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
1142 // CHECK1-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
1143 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
1144 // CHECK1-NEXT: br label [[IFCONT:%.*]]
1145 // CHECK1: else:
1146 // CHECK1-NEXT: br label [[IFCONT]]
1147 // CHECK1: ifcont:
1148 // CHECK1-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
1149 // CHECK1-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
1150 // CHECK1-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
1151 // CHECK1-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1152 // CHECK1: then5:
1153 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
1154 // CHECK1-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
1155 // CHECK1-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 8
1156 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
1157 // CHECK1-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
1158 // CHECK1-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 8
1159 // CHECK1-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
1160 // CHECK1-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
1161 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
1162 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
1163 // CHECK1-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 8
1164 // CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
1165 // CHECK1-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
1166 // CHECK1-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 8
1167 // CHECK1-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
1168 // CHECK1-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
1169 // CHECK1-NEXT: br label [[IFCONT7:%.*]]
1170 // CHECK1: else6:
1171 // CHECK1-NEXT: br label [[IFCONT7]]
1172 // CHECK1: ifcont7:
1173 // CHECK1-NEXT: ret void
1174 //
1175 //
1176 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func16
1177 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
1178 // CHECK1-NEXT: entry:
1179 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1180 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1181 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
1182 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1183 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1184 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1185 // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1186 // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1187 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1188 // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1189 // CHECK1-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1190 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
1191 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1192 // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1193 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1194 // CHECK1: then:
1195 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
1196 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
1197 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1198 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1199 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
1200 // CHECK1-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
1201 // CHECK1-NEXT: br label [[IFCONT:%.*]]
1202 // CHECK1: else:
1203 // CHECK1-NEXT: br label [[IFCONT]]
1204 // CHECK1: ifcont:
1205 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1206 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1207 // CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
1208 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1209 // CHECK1: then2:
1210 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1211 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 0
1212 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
1213 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
1214 // CHECK1-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
1215 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
1216 // CHECK1-NEXT: br label [[IFCONT4:%.*]]
1217 // CHECK1: else3:
1218 // CHECK1-NEXT: br label [[IFCONT4]]
1219 // CHECK1: ifcont4:
1220 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1221 // CHECK1-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1222 // CHECK1-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1223 // CHECK1: then6:
1224 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
1225 // CHECK1-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 8
1226 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
1227 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1228 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
1229 // CHECK1-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
1230 // CHECK1-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
1231 // CHECK1-NEXT: br label [[IFCONT8:%.*]]
1232 // CHECK1: else7:
1233 // CHECK1-NEXT: br label [[IFCONT8]]
1234 // CHECK1: ifcont8:
1235 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
1236 // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1237 // CHECK1-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
1238 // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1239 // CHECK1: then10:
1240 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1241 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
1242 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i64 0, i64 1
1243 // CHECK1-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 8
1244 // CHECK1-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
1245 // CHECK1-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
1246 // CHECK1-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
1247 // CHECK1-NEXT: br label [[IFCONT12:%.*]]
1248 // CHECK1: else11:
1249 // CHECK1-NEXT: br label [[IFCONT12]]
1250 // CHECK1: ifcont12:
1251 // CHECK1-NEXT: ret void
1252 //
1253 //
1254 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func17
1255 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1256 // CHECK1-NEXT: entry:
1257 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1258 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1259 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
1260 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1261 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1262 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
1263 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
1264 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
1265 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1266 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
1267 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1268 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 0
1269 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
1270 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1271 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
1272 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP7]]
1273 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
1274 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[TMP11]], align 128
1275 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 1
1276 // CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8
1277 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
1278 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
1279 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP7]]
1280 // CHECK1-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]], align 2
1281 // CHECK1-NEXT: store i16 [[TMP17]], i16* [[TMP16]], align 128
1282 // CHECK1-NEXT: ret void
1283 //
1284 //
1285 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func18
1286 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1287 // CHECK1-NEXT: entry:
1288 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1289 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1290 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
1291 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
1292 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1293 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1294 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
1295 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1296 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
1297 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1298 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1299 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
1300 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP5]]
1301 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
1302 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 8
1303 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1304 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
1305 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP5]]
1306 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
1307 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 8
1308 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1309 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
1310 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP12]], i8* [[TMP13]]) #[[ATTR4]]
1311 // CHECK1-NEXT: ret void
1312 //
1313 //
1314 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func19
1315 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1316 // CHECK1-NEXT: entry:
1317 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1318 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1319 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
1320 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1321 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1322 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
1323 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
1324 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
1325 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1326 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
1327 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1328 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 0
1329 // CHECK1-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 8
1330 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
1331 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
1332 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP7]]
1333 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 128
1334 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[TMP10]], align 4
1335 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i64 0, i64 1
1336 // CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 8
1337 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
1338 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
1339 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP7]]
1340 // CHECK1-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP16]], align 128
1341 // CHECK1-NEXT: store i16 [[TMP17]], i16* [[TMP15]], align 2
1342 // CHECK1-NEXT: ret void
1343 //
1344 //
1345 // CHECK1-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func20
1346 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1347 // CHECK1-NEXT: entry:
1348 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
1349 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1350 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 8
1351 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
1352 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1353 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1354 // CHECK1-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 8
1355 // CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1356 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
1357 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1358 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
1359 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
1360 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP5]]
1361 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
1362 // CHECK1-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 8
1363 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
1364 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
1365 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP5]]
1366 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
1367 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 8
1368 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1369 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 8
1370 // CHECK1-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP13]], i8* [[TMP12]]) #[[ATTR4]]
1371 // CHECK1-NEXT: ret void
1372 //
1373 //
1374 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l20
1375 // CHECK2-SAME: (double* noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
1376 // CHECK2-NEXT: entry:
1377 // CHECK2-NEXT: [[E_ADDR:%.*]] = alloca double*, align 4
1378 // CHECK2-NEXT: [[E1:%.*]] = alloca double, align 8
1379 // CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1380 // CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1381 // CHECK2-NEXT: store double* [[E]], double** [[E_ADDR]], align 4
1382 // CHECK2-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 4
1383 // CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
1384 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1385 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1386 // CHECK2: user_code.entry:
1387 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1388 // CHECK2-NEXT: [[TMP3:%.*]] = load double, double* [[TMP0]], align 8
1389 // CHECK2-NEXT: store double [[TMP3]], double* [[E1]], align 8
1390 // CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
1391 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
1392 // CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], double* [[E1]]) #[[ATTR4:[0-9]+]]
1393 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1394 // CHECK2-NEXT: ret void
1395 // CHECK2: worker.exit:
1396 // CHECK2-NEXT: ret void
1397 //
1398 //
1399 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__
1400 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
1401 // CHECK2-NEXT: entry:
1402 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1403 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1404 // CHECK2-NEXT: [[E_ADDR:%.*]] = alloca double*, align 4
1405 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1406 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1407 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1408 // CHECK2-NEXT: store double* [[E]], double** [[E_ADDR]], align 4
1409 // CHECK2-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 4
1410 // CHECK2-NEXT: [[E1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 8)
1411 // CHECK2-NEXT: [[E_ON_STACK:%.*]] = bitcast i8* [[E1]] to double*
1412 // CHECK2-NEXT: store double 0.000000e+00, double* [[E_ON_STACK]], align 8
1413 // CHECK2-NEXT: [[TMP1:%.*]] = load double, double* [[E_ON_STACK]], align 8
1414 // CHECK2-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
1415 // CHECK2-NEXT: store double [[ADD]], double* [[E_ON_STACK]], align 8
1416 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1417 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
1418 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1419 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast double* [[E_ON_STACK]] to i8*
1420 // CHECK2-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
1421 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1422 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
1423 // CHECK2-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i8* [[TMP7]], i32 1024, i8* [[TMP6]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func)
1424 // CHECK2-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
1425 // CHECK2-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1426 // CHECK2: .omp.reduction.then:
1427 // CHECK2-NEXT: [[TMP10:%.*]] = load double, double* [[TMP0]], align 8
1428 // CHECK2-NEXT: [[TMP11:%.*]] = load double, double* [[E_ON_STACK]], align 8
1429 // CHECK2-NEXT: [[ADD2:%.*]] = fadd double [[TMP10]], [[TMP11]]
1430 // CHECK2-NEXT: store double [[ADD2]], double* [[TMP0]], align 8
1431 // CHECK2-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
1432 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1433 // CHECK2: .omp.reduction.done:
1434 // CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[E1]], i32 8)
1435 // CHECK2-NEXT: ret void
1436 //
1437 //
1438 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
1439 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3:[0-9]+]] {
1440 // CHECK2-NEXT: entry:
1441 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1442 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1443 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1444 // CHECK2-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1445 // CHECK2-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 4
1446 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
1447 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1448 // CHECK2-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
1449 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
1450 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
1451 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1452 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
1453 // CHECK2-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
1454 // CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
1455 // CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
1456 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1457 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double**
1458 // CHECK2-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 4
1459 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1460 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i32 1
1461 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8*
1462 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64*
1463 // CHECK2-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64*
1464 // CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8
1465 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size()
1466 // CHECK2-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
1467 // CHECK2-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]])
1468 // CHECK2-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8
1469 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i32 1
1470 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i32 1
1471 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
1472 // CHECK2-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 4
1473 // CHECK2-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0
1474 // CHECK2-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1
1475 // CHECK2-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
1476 // CHECK2-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
1477 // CHECK2-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2
1478 // CHECK2-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1
1479 // CHECK2-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0
1480 // CHECK2-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]]
1481 // CHECK2-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0
1482 // CHECK2-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1483 // CHECK2-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]]
1484 // CHECK2-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]]
1485 // CHECK2-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]]
1486 // CHECK2: then:
1487 // CHECK2-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8*
1488 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
1489 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR4]]
1490 // CHECK2-NEXT: br label [[IFCONT:%.*]]
1491 // CHECK2: else:
1492 // CHECK2-NEXT: br label [[IFCONT]]
1493 // CHECK2: ifcont:
1494 // CHECK2-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
1495 // CHECK2-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
1496 // CHECK2-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
1497 // CHECK2-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
1498 // CHECK2: then4:
1499 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1500 // CHECK2-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double**
1501 // CHECK2-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 4
1502 // CHECK2-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
1503 // CHECK2-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double**
1504 // CHECK2-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 4
1505 // CHECK2-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8
1506 // CHECK2-NEXT: store double [[TMP47]], double* [[TMP46]], align 8
1507 // CHECK2-NEXT: br label [[IFCONT6:%.*]]
1508 // CHECK2: else5:
1509 // CHECK2-NEXT: br label [[IFCONT6]]
1510 // CHECK2: ifcont6:
1511 // CHECK2-NEXT: ret void
1512 //
1513 //
1514 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
1515 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
1516 // CHECK2-NEXT: entry:
1517 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1518 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1519 // CHECK2-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
1520 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1521 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1522 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1523 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1524 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1525 // CHECK2-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1526 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1527 // CHECK2-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1528 // CHECK2-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1529 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]*
1530 // CHECK2-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4
1531 // CHECK2-NEXT: br label [[PRECOND:%.*]]
1532 // CHECK2: precond:
1533 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4
1534 // CHECK2-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2
1535 // CHECK2-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]]
1536 // CHECK2: body:
1537 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
1538 // CHECK2-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1539 // CHECK2-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1540 // CHECK2: then:
1541 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i32 0, i32 0
1542 // CHECK2-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 4
1543 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32*
1544 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]]
1545 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1546 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4
1547 // CHECK2-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4
1548 // CHECK2-NEXT: br label [[IFCONT:%.*]]
1549 // CHECK2: else:
1550 // CHECK2-NEXT: br label [[IFCONT]]
1551 // CHECK2: ifcont:
1552 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
1553 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1554 // CHECK2-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]]
1555 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1556 // CHECK2: then2:
1557 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1558 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i32 0, i32 0
1559 // CHECK2-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 4
1560 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
1561 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]]
1562 // CHECK2-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4
1563 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4
1564 // CHECK2-NEXT: br label [[IFCONT4:%.*]]
1565 // CHECK2: else3:
1566 // CHECK2-NEXT: br label [[IFCONT4]]
1567 // CHECK2: ifcont4:
1568 // CHECK2-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1
1569 // CHECK2-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4
1570 // CHECK2-NEXT: br label [[PRECOND]]
1571 // CHECK2: exit:
1572 // CHECK2-NEXT: ret void
1573 //
1574 //
1575 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func
1576 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1577 // CHECK2-NEXT: entry:
1578 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1579 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1580 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1581 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1582 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1583 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1584 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1585 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
1586 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1587 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
1588 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1589 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i32 0, i32 0
1590 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
1591 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
1592 // CHECK2-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
1593 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP7]]
1594 // CHECK2-NEXT: [[TMP12:%.*]] = load double, double* [[TMP10]], align 8
1595 // CHECK2-NEXT: store double [[TMP12]], double* [[TMP11]], align 128
1596 // CHECK2-NEXT: ret void
1597 //
1598 //
1599 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func
1600 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1601 // CHECK2-NEXT: entry:
1602 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1603 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1604 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1605 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1606 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1607 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1608 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1609 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1610 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
1611 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1612 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1613 // CHECK2-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
1614 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP5]]
1615 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
1616 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
1617 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1618 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1619 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP9]], i8* [[TMP10]]) #[[ATTR4]]
1620 // CHECK2-NEXT: ret void
1621 //
1622 //
1623 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func
1624 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1625 // CHECK2-NEXT: entry:
1626 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1627 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1628 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1629 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1630 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1631 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1632 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1633 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
1634 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1635 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
1636 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1637 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i32 0, i32 0
1638 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
1639 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
1640 // CHECK2-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
1641 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP7]]
1642 // CHECK2-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 128
1643 // CHECK2-NEXT: store double [[TMP12]], double* [[TMP10]], align 8
1644 // CHECK2-NEXT: ret void
1645 //
1646 //
1647 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func
1648 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1649 // CHECK2-NEXT: entry:
1650 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1651 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1652 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1653 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
1654 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1655 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1656 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1657 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1658 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
1659 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1660 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1661 // CHECK2-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
1662 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x double], [1024 x double]* [[E]], i32 0, i32 [[TMP5]]
1663 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
1664 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
1665 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1666 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1667 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP10]], i8* [[TMP9]]) #[[ATTR4]]
1668 // CHECK2-NEXT: ret void
1669 //
1670 //
1671 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l26
1672 // CHECK2-SAME: (i32 noundef [[C:%.*]], i32 noundef [[D:%.*]]) #[[ATTR0]] {
1673 // CHECK2-NEXT: entry:
1674 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4
1675 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32, align 4
1676 // CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1677 // CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1678 // CHECK2-NEXT: store i32 [[C]], i32* [[C_ADDR]], align 4
1679 // CHECK2-NEXT: store i32 [[D]], i32* [[D_ADDR]], align 4
1680 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i32* [[C_ADDR]] to i8*
1681 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i32* [[D_ADDR]] to float*
1682 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
1683 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1684 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1685 // CHECK2: user_code.entry:
1686 // CHECK2-NEXT: [[TMP1:%.*]] = load i8, i8* [[CONV]], align 1
1687 // CHECK2-NEXT: [[C2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
1688 // CHECK2-NEXT: store i8 [[TMP1]], i8* [[C2]], align 1
1689 // CHECK2-NEXT: [[TMP2:%.*]] = load float, float* [[CONV1]], align 4
1690 // CHECK2-NEXT: [[D3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
1691 // CHECK2-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D3]] to float*
1692 // CHECK2-NEXT: store float [[TMP2]], float* [[D_ON_STACK]], align 4
1693 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1694 // CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
1695 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTTHREADID_TEMP_]], align 4
1696 // CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8* [[C2]], float* [[D_ON_STACK]]) #[[ATTR4]]
1697 // CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[D3]], i32 4)
1698 // CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[C2]], i32 1)
1699 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1700 // CHECK2-NEXT: ret void
1701 // CHECK2: worker.exit:
1702 // CHECK2-NEXT: ret void
1703 //
1704 //
1705 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1
1706 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8* noundef nonnull align 1 dereferenceable(1) [[C:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
1707 // CHECK2-NEXT: entry:
1708 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1709 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1710 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i8*, align 4
1711 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca float*, align 4
1712 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
1713 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1714 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1715 // CHECK2-NEXT: store i8* [[C]], i8** [[C_ADDR]], align 4
1716 // CHECK2-NEXT: store float* [[D]], float** [[D_ADDR]], align 4
1717 // CHECK2-NEXT: [[TMP0:%.*]] = load i8*, i8** [[C_ADDR]], align 4
1718 // CHECK2-NEXT: [[TMP1:%.*]] = load float*, float** [[D_ADDR]], align 4
1719 // CHECK2-NEXT: [[C1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
1720 // CHECK2-NEXT: [[D2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
1721 // CHECK2-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D2]] to float*
1722 // CHECK2-NEXT: store i8 0, i8* [[C1]], align 1
1723 // CHECK2-NEXT: store float 1.000000e+00, float* [[D_ON_STACK]], align 4
1724 // CHECK2-NEXT: [[TMP2:%.*]] = load i8, i8* [[C1]], align 1
1725 // CHECK2-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
1726 // CHECK2-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
1727 // CHECK2-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
1728 // CHECK2-NEXT: store i8 [[CONV3]], i8* [[C1]], align 1
1729 // CHECK2-NEXT: [[TMP3:%.*]] = load float, float* [[D_ON_STACK]], align 4
1730 // CHECK2-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
1731 // CHECK2-NEXT: store float [[MUL]], float* [[D_ON_STACK]], align 4
1732 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1733 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1734 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1735 // CHECK2-NEXT: store i8* [[C1]], i8** [[TMP6]], align 4
1736 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1737 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast float* [[D_ON_STACK]] to i8*
1738 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 4
1739 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1740 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
1741 // CHECK2-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i8* [[TMP10]], i32 1024, i8* [[TMP9]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func3, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func4, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func5, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func6, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func7, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func8)
1742 // CHECK2-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 1
1743 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1744 // CHECK2: .omp.reduction.then:
1745 // CHECK2-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP0]], align 1
1746 // CHECK2-NEXT: [[CONV4:%.*]] = sext i8 [[TMP13]] to i32
1747 // CHECK2-NEXT: [[TMP14:%.*]] = load i8, i8* [[C1]], align 1
1748 // CHECK2-NEXT: [[CONV5:%.*]] = sext i8 [[TMP14]] to i32
1749 // CHECK2-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
1750 // CHECK2-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
1751 // CHECK2-NEXT: store i8 [[CONV7]], i8* [[TMP0]], align 1
1752 // CHECK2-NEXT: [[TMP15:%.*]] = load float, float* [[TMP1]], align 4
1753 // CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[D_ON_STACK]], align 4
1754 // CHECK2-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP16]]
1755 // CHECK2-NEXT: store float [[MUL8]], float* [[TMP1]], align 4
1756 // CHECK2-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
1757 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1758 // CHECK2: .omp.reduction.done:
1759 // CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[D2]], i32 4)
1760 // CHECK2-NEXT: call void @__kmpc_free_shared(i8* [[C1]], i32 1)
1761 // CHECK2-NEXT: ret void
1762 //
1763 //
1764 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
1765 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
1766 // CHECK2-NEXT: entry:
1767 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1768 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1769 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1770 // CHECK2-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1771 // CHECK2-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
1772 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
1773 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
1774 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1775 // CHECK2-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
1776 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
1777 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
1778 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1779 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
1780 // CHECK2-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
1781 // CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
1782 // CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
1783 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
1784 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
1785 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1786 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[TMP10]], i32 1
1787 // CHECK2-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP10]], align 1
1788 // CHECK2-NEXT: [[TMP14:%.*]] = sext i8 [[TMP13]] to i32
1789 // CHECK2-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_get_warp_size()
1790 // CHECK2-NEXT: [[TMP16:%.*]] = trunc i32 [[TMP15]] to i16
1791 // CHECK2-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP14]], i16 [[TMP7]], i16 [[TMP16]])
1792 // CHECK2-NEXT: [[TMP18:%.*]] = trunc i32 [[TMP17]] to i8
1793 // CHECK2-NEXT: store i8 [[TMP18]], i8* [[DOTOMP_REDUCTION_ELEMENT]], align 1
1794 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[TMP10]], i32 1
1795 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1796 // CHECK2-NEXT: store i8* [[DOTOMP_REDUCTION_ELEMENT]], i8** [[TMP11]], align 4
1797 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
1798 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to float**
1799 // CHECK2-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP22]], align 4
1800 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1801 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr float, float* [[TMP23]], i32 1
1802 // CHECK2-NEXT: [[TMP26:%.*]] = bitcast float* [[TMP25]] to i8*
1803 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast float* [[TMP23]] to i32*
1804 // CHECK2-NEXT: [[TMP28:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i32*
1805 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP27]], align 4
1806 // CHECK2-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
1807 // CHECK2-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
1808 // CHECK2-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
1809 // CHECK2-NEXT: store i32 [[TMP32]], i32* [[TMP28]], align 4
1810 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr i32, i32* [[TMP27]], i32 1
1811 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr i32, i32* [[TMP28]], i32 1
1812 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
1813 // CHECK2-NEXT: store i8* [[TMP35]], i8** [[TMP24]], align 4
1814 // CHECK2-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP8]], 0
1815 // CHECK2-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 1
1816 // CHECK2-NEXT: [[TMP38:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
1817 // CHECK2-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1818 // CHECK2-NEXT: [[TMP40:%.*]] = icmp eq i16 [[TMP8]], 2
1819 // CHECK2-NEXT: [[TMP41:%.*]] = and i16 [[TMP6]], 1
1820 // CHECK2-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP41]], 0
1821 // CHECK2-NEXT: [[TMP43:%.*]] = and i1 [[TMP40]], [[TMP42]]
1822 // CHECK2-NEXT: [[TMP44:%.*]] = icmp sgt i16 [[TMP7]], 0
1823 // CHECK2-NEXT: [[TMP45:%.*]] = and i1 [[TMP43]], [[TMP44]]
1824 // CHECK2-NEXT: [[TMP46:%.*]] = or i1 [[TMP36]], [[TMP39]]
1825 // CHECK2-NEXT: [[TMP47:%.*]] = or i1 [[TMP46]], [[TMP45]]
1826 // CHECK2-NEXT: br i1 [[TMP47]], label [[THEN:%.*]], label [[ELSE:%.*]]
1827 // CHECK2: then:
1828 // CHECK2-NEXT: [[TMP48:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
1829 // CHECK2-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
1830 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP48]], i8* [[TMP49]]) #[[ATTR4]]
1831 // CHECK2-NEXT: br label [[IFCONT:%.*]]
1832 // CHECK2: else:
1833 // CHECK2-NEXT: br label [[IFCONT]]
1834 // CHECK2: ifcont:
1835 // CHECK2-NEXT: [[TMP50:%.*]] = icmp eq i16 [[TMP8]], 1
1836 // CHECK2-NEXT: [[TMP51:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
1837 // CHECK2-NEXT: [[TMP52:%.*]] = and i1 [[TMP50]], [[TMP51]]
1838 // CHECK2-NEXT: br i1 [[TMP52]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1839 // CHECK2: then5:
1840 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1841 // CHECK2-NEXT: [[TMP54:%.*]] = load i8*, i8** [[TMP53]], align 4
1842 // CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
1843 // CHECK2-NEXT: [[TMP56:%.*]] = load i8*, i8** [[TMP55]], align 4
1844 // CHECK2-NEXT: [[TMP57:%.*]] = load i8, i8* [[TMP54]], align 1
1845 // CHECK2-NEXT: store i8 [[TMP57]], i8* [[TMP56]], align 1
1846 // CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1847 // CHECK2-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to float**
1848 // CHECK2-NEXT: [[TMP60:%.*]] = load float*, float** [[TMP59]], align 4
1849 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
1850 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to float**
1851 // CHECK2-NEXT: [[TMP63:%.*]] = load float*, float** [[TMP62]], align 4
1852 // CHECK2-NEXT: [[TMP64:%.*]] = load float, float* [[TMP60]], align 4
1853 // CHECK2-NEXT: store float [[TMP64]], float* [[TMP63]], align 4
1854 // CHECK2-NEXT: br label [[IFCONT7:%.*]]
1855 // CHECK2: else6:
1856 // CHECK2-NEXT: br label [[IFCONT7]]
1857 // CHECK2: ifcont7:
1858 // CHECK2-NEXT: ret void
1859 //
1860 //
1861 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
1862 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
1863 // CHECK2-NEXT: entry:
1864 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1865 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1866 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1867 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1868 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1869 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1870 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1871 // CHECK2-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1872 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1873 // CHECK2-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1874 // CHECK2-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1875 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
1876 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
1877 // CHECK2-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1878 // CHECK2-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1879 // CHECK2: then:
1880 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
1881 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
1882 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1883 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i32 addrspace(3)* [[TMP10]] to i8 addrspace(3)*
1884 // CHECK2-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP9]], align 1
1885 // CHECK2-NEXT: store volatile i8 [[TMP12]], i8 addrspace(3)* [[TMP11]], align 1
1886 // CHECK2-NEXT: br label [[IFCONT:%.*]]
1887 // CHECK2: else:
1888 // CHECK2-NEXT: br label [[IFCONT]]
1889 // CHECK2: ifcont:
1890 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
1891 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1892 // CHECK2-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
1893 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1894 // CHECK2: then2:
1895 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1896 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i32 addrspace(3)* [[TMP14]] to i8 addrspace(3)*
1897 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
1898 // CHECK2-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 4
1899 // CHECK2-NEXT: [[TMP18:%.*]] = load volatile i8, i8 addrspace(3)* [[TMP15]], align 1
1900 // CHECK2-NEXT: store i8 [[TMP18]], i8* [[TMP17]], align 1
1901 // CHECK2-NEXT: br label [[IFCONT4:%.*]]
1902 // CHECK2: else3:
1903 // CHECK2-NEXT: br label [[IFCONT4]]
1904 // CHECK2: ifcont4:
1905 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
1906 // CHECK2-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1907 // CHECK2-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1908 // CHECK2: then6:
1909 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
1910 // CHECK2-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
1911 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i32*
1912 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1913 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]], align 4
1914 // CHECK2-NEXT: store volatile i32 [[TMP23]], i32 addrspace(3)* [[TMP22]], align 4
1915 // CHECK2-NEXT: br label [[IFCONT8:%.*]]
1916 // CHECK2: else7:
1917 // CHECK2-NEXT: br label [[IFCONT8]]
1918 // CHECK2: ifcont8:
1919 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
1920 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1921 // CHECK2-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP24]]
1922 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1923 // CHECK2: then10:
1924 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1925 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
1926 // CHECK2-NEXT: [[TMP27:%.*]] = load i8*, i8** [[TMP26]], align 4
1927 // CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8* [[TMP27]] to i32*
1928 // CHECK2-NEXT: [[TMP29:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP25]], align 4
1929 // CHECK2-NEXT: store i32 [[TMP29]], i32* [[TMP28]], align 4
1930 // CHECK2-NEXT: br label [[IFCONT12:%.*]]
1931 // CHECK2: else11:
1932 // CHECK2-NEXT: br label [[IFCONT12]]
1933 // CHECK2: ifcont12:
1934 // CHECK2-NEXT: ret void
1935 //
1936 //
1937 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func5
1938 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1939 // CHECK2-NEXT: entry:
1940 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1941 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1942 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1943 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1944 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1945 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1946 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1947 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
1948 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1949 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
1950 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1951 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
1952 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
1953 // CHECK2-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
1954 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP7]]
1955 // CHECK2-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1
1956 // CHECK2-NEXT: store i8 [[TMP11]], i8* [[TMP10]], align 128
1957 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
1958 // CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
1959 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
1960 // CHECK2-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
1961 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP7]]
1962 // CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[TMP14]], align 4
1963 // CHECK2-NEXT: store float [[TMP16]], float* [[TMP15]], align 128
1964 // CHECK2-NEXT: ret void
1965 //
1966 //
1967 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func6
1968 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1969 // CHECK2-NEXT: entry:
1970 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1971 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1972 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
1973 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
1974 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
1975 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
1976 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
1977 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
1978 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
1979 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
1980 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1981 // CHECK2-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
1982 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP5]]
1983 // CHECK2-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 4
1984 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1985 // CHECK2-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
1986 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP5]]
1987 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
1988 // CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 4
1989 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
1990 // CHECK2-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
1991 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP11]], i8* [[TMP12]]) #[[ATTR4]]
1992 // CHECK2-NEXT: ret void
1993 //
1994 //
1995 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func7
1996 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
1997 // CHECK2-NEXT: entry:
1998 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
1999 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2000 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2001 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2002 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2003 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2004 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2005 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
2006 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2007 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
2008 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2009 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
2010 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2011 // CHECK2-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
2012 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP7]]
2013 // CHECK2-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP10]], align 128
2014 // CHECK2-NEXT: store i8 [[TMP11]], i8* [[TMP9]], align 1
2015 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
2016 // CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
2017 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
2018 // CHECK2-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
2019 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP7]]
2020 // CHECK2-NEXT: [[TMP16:%.*]] = load float, float* [[TMP15]], align 128
2021 // CHECK2-NEXT: store float [[TMP16]], float* [[TMP14]], align 4
2022 // CHECK2-NEXT: ret void
2023 //
2024 //
2025 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func8
2026 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2027 // CHECK2-NEXT: entry:
2028 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2029 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2030 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2031 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2032 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2033 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2034 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2035 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2036 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
2037 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2038 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2039 // CHECK2-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
2040 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[C]], i32 0, i32 [[TMP5]]
2041 // CHECK2-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 4
2042 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2043 // CHECK2-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
2044 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x float], [1024 x float]* [[D]], i32 0, i32 [[TMP5]]
2045 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
2046 // CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 4
2047 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2048 // CHECK2-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2049 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP12]], i8* [[TMP11]]) #[[ATTR4]]
2050 // CHECK2-NEXT: ret void
2051 //
2052 //
2053 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l33
2054 // CHECK2-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
2055 // CHECK2-NEXT: entry:
2056 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2057 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
2058 // CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2059 // CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2060 // CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
2061 // CHECK2-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
2062 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
2063 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
2064 // CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2065 // CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2066 // CHECK2: user_code.entry:
2067 // CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
2068 // CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
2069 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2070 // CHECK2-NEXT: call void @__omp_outlined__9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[A_ADDR]], i16* [[CONV]]) #[[ATTR4]]
2071 // CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
2072 // CHECK2-NEXT: ret void
2073 // CHECK2: worker.exit:
2074 // CHECK2-NEXT: ret void
2075 //
2076 //
2077 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__9
2078 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
2079 // CHECK2-NEXT: entry:
2080 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2081 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2082 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
2083 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 4
2084 // CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4
2085 // CHECK2-NEXT: [[B2:%.*]] = alloca i16, align 2
2086 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 4
2087 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2088 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2089 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2090 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
2091 // CHECK2-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 4
2092 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
2093 // CHECK2-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 4
2094 // CHECK2-NEXT: store i32 0, i32* [[A1]], align 4
2095 // CHECK2-NEXT: store i16 -32768, i16* [[B2]], align 2
2096 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
2097 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i32* [[A1]] to i8*
2098 // CHECK2-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4
2099 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
2100 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i16* [[B2]] to i8*
2101 // CHECK2-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
2102 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2103 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2104 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2105 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*)* @__omp_outlined__10 to i8*), i8* null, i8** [[TMP8]], i32 2)
2106 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2107 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i32* [[A1]] to i8*
2108 // CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 4
2109 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2110 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i16* [[B2]] to i8*
2111 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4
2112 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2113 // CHECK2-NEXT: [[TMP14:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
2114 // CHECK2-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i8* [[TMP14]], i32 1024, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func15, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func16, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func17, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func18, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func19, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func20)
2115 // CHECK2-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 1
2116 // CHECK2-NEXT: br i1 [[TMP16]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
2117 // CHECK2: .omp.reduction.then:
2118 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP0]], align 4
2119 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4
2120 // CHECK2-NEXT: [[OR:%.*]] = or i32 [[TMP17]], [[TMP18]]
2121 // CHECK2-NEXT: store i32 [[OR]], i32* [[TMP0]], align 4
2122 // CHECK2-NEXT: [[TMP19:%.*]] = load i16, i16* [[TMP1]], align 2
2123 // CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP19]] to i32
2124 // CHECK2-NEXT: [[TMP20:%.*]] = load i16, i16* [[B2]], align 2
2125 // CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP20]] to i32
2126 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CONV]], [[CONV3]]
2127 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2128 // CHECK2: cond.true:
2129 // CHECK2-NEXT: [[TMP21:%.*]] = load i16, i16* [[TMP1]], align 2
2130 // CHECK2-NEXT: br label [[COND_END:%.*]]
2131 // CHECK2: cond.false:
2132 // CHECK2-NEXT: [[TMP22:%.*]] = load i16, i16* [[B2]], align 2
2133 // CHECK2-NEXT: br label [[COND_END]]
2134 // CHECK2: cond.end:
2135 // CHECK2-NEXT: [[COND:%.*]] = phi i16 [ [[TMP21]], [[COND_TRUE]] ], [ [[TMP22]], [[COND_FALSE]] ]
2136 // CHECK2-NEXT: store i16 [[COND]], i16* [[TMP1]], align 2
2137 // CHECK2-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP7]])
2138 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
2139 // CHECK2: .omp.reduction.done:
2140 // CHECK2-NEXT: ret void
2141 //
2142 //
2143 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__10
2144 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
2145 // CHECK2-NEXT: entry:
2146 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2147 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2148 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
2149 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 4
2150 // CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4
2151 // CHECK2-NEXT: [[B2:%.*]] = alloca i16, align 2
2152 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2153 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2154 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2155 // CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
2156 // CHECK2-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 4
2157 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
2158 // CHECK2-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 4
2159 // CHECK2-NEXT: store i32 0, i32* [[A1]], align 4
2160 // CHECK2-NEXT: store i16 -32768, i16* [[B2]], align 2
2161 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A1]], align 4
2162 // CHECK2-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
2163 // CHECK2-NEXT: store i32 [[OR]], i32* [[A1]], align 4
2164 // CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[B2]], align 2
2165 // CHECK2-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
2166 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
2167 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2168 // CHECK2: cond.true:
2169 // CHECK2-NEXT: br label [[COND_END:%.*]]
2170 // CHECK2: cond.false:
2171 // CHECK2-NEXT: [[TMP4:%.*]] = load i16, i16* [[B2]], align 2
2172 // CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
2173 // CHECK2-NEXT: br label [[COND_END]]
2174 // CHECK2: cond.end:
2175 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
2176 // CHECK2-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
2177 // CHECK2-NEXT: store i16 [[CONV4]], i16* [[B2]], align 2
2178 // CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2179 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2180 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2181 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i32* [[A1]] to i8*
2182 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 4
2183 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2184 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i16* [[B2]] to i8*
2185 // CHECK2-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 4
2186 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2187 // CHECK2-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 2, i32 8, i8* [[TMP11]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func12, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func13)
2188 // CHECK2-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], 1
2189 // CHECK2-NEXT: br i1 [[TMP13]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
2190 // CHECK2: .omp.reduction.then:
2191 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
2192 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[A1]], align 4
2193 // CHECK2-NEXT: [[OR5:%.*]] = or i32 [[TMP14]], [[TMP15]]
2194 // CHECK2-NEXT: store i32 [[OR5]], i32* [[TMP0]], align 4
2195 // CHECK2-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP1]], align 2
2196 // CHECK2-NEXT: [[CONV6:%.*]] = sext i16 [[TMP16]] to i32
2197 // CHECK2-NEXT: [[TMP17:%.*]] = load i16, i16* [[B2]], align 2
2198 // CHECK2-NEXT: [[CONV7:%.*]] = sext i16 [[TMP17]] to i32
2199 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
2200 // CHECK2-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
2201 // CHECK2: cond.true9:
2202 // CHECK2-NEXT: [[TMP18:%.*]] = load i16, i16* [[TMP1]], align 2
2203 // CHECK2-NEXT: br label [[COND_END11:%.*]]
2204 // CHECK2: cond.false10:
2205 // CHECK2-NEXT: [[TMP19:%.*]] = load i16, i16* [[B2]], align 2
2206 // CHECK2-NEXT: br label [[COND_END11]]
2207 // CHECK2: cond.end11:
2208 // CHECK2-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP18]], [[COND_TRUE9]] ], [ [[TMP19]], [[COND_FALSE10]] ]
2209 // CHECK2-NEXT: store i16 [[COND12]], i16* [[TMP1]], align 2
2210 // CHECK2-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
2211 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
2212 // CHECK2: .omp.reduction.done:
2213 // CHECK2-NEXT: ret void
2214 //
2215 //
2216 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func12
2217 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
2218 // CHECK2-NEXT: entry:
2219 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2220 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
2221 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
2222 // CHECK2-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
2223 // CHECK2-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
2224 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
2225 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
2226 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2227 // CHECK2-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
2228 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
2229 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
2230 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2231 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
2232 // CHECK2-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
2233 // CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
2234 // CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
2235 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
2236 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
2237 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 4
2238 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2239 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
2240 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
2241 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
2242 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
2243 // CHECK2-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
2244 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
2245 // CHECK2-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
2246 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
2247 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
2248 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
2249 // CHECK2-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 4
2250 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
2251 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
2252 // CHECK2-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 4
2253 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2254 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
2255 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
2256 // CHECK2-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
2257 // CHECK2-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
2258 // CHECK2-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
2259 // CHECK2-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
2260 // CHECK2-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
2261 // CHECK2-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
2262 // CHECK2-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
2263 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
2264 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
2265 // CHECK2-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
2266 // CHECK2-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 4
2267 // CHECK2-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
2268 // CHECK2-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
2269 // CHECK2-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
2270 // CHECK2-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
2271 // CHECK2-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
2272 // CHECK2-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
2273 // CHECK2-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
2274 // CHECK2-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
2275 // CHECK2-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
2276 // CHECK2-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
2277 // CHECK2-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
2278 // CHECK2-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
2279 // CHECK2-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
2280 // CHECK2: then:
2281 // CHECK2-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
2282 // CHECK2-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
2283 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func11"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
2284 // CHECK2-NEXT: br label [[IFCONT:%.*]]
2285 // CHECK2: else:
2286 // CHECK2-NEXT: br label [[IFCONT]]
2287 // CHECK2: ifcont:
2288 // CHECK2-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
2289 // CHECK2-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
2290 // CHECK2-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
2291 // CHECK2-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
2292 // CHECK2: then5:
2293 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2294 // CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
2295 // CHECK2-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 4
2296 // CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
2297 // CHECK2-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
2298 // CHECK2-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 4
2299 // CHECK2-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
2300 // CHECK2-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
2301 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2302 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
2303 // CHECK2-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 4
2304 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
2305 // CHECK2-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
2306 // CHECK2-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 4
2307 // CHECK2-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
2308 // CHECK2-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
2309 // CHECK2-NEXT: br label [[IFCONT7:%.*]]
2310 // CHECK2: else6:
2311 // CHECK2-NEXT: br label [[IFCONT7]]
2312 // CHECK2: ifcont7:
2313 // CHECK2-NEXT: ret void
2314 //
2315 //
2316 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func13
2317 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
2318 // CHECK2-NEXT: entry:
2319 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2320 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2321 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2322 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2323 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2324 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2325 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2326 // CHECK2-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
2327 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2328 // CHECK2-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
2329 // CHECK2-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2330 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
2331 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP2]])
2332 // CHECK2-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2333 // CHECK2-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
2334 // CHECK2: then:
2335 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
2336 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2337 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
2338 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2339 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
2340 // CHECK2-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
2341 // CHECK2-NEXT: br label [[IFCONT:%.*]]
2342 // CHECK2: else:
2343 // CHECK2-NEXT: br label [[IFCONT]]
2344 // CHECK2: ifcont:
2345 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2346 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2347 // CHECK2-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
2348 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
2349 // CHECK2: then2:
2350 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2351 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
2352 // CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
2353 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
2354 // CHECK2-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
2355 // CHECK2-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
2356 // CHECK2-NEXT: br label [[IFCONT4:%.*]]
2357 // CHECK2: else3:
2358 // CHECK2-NEXT: br label [[IFCONT4]]
2359 // CHECK2: ifcont4:
2360 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2361 // CHECK2-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2362 // CHECK2-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
2363 // CHECK2: then6:
2364 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
2365 // CHECK2-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
2366 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
2367 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2368 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
2369 // CHECK2-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
2370 // CHECK2-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
2371 // CHECK2-NEXT: br label [[IFCONT8:%.*]]
2372 // CHECK2: else7:
2373 // CHECK2-NEXT: br label [[IFCONT8]]
2374 // CHECK2: ifcont8:
2375 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2376 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2377 // CHECK2-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
2378 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
2379 // CHECK2: then10:
2380 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2381 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
2382 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
2383 // CHECK2-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 4
2384 // CHECK2-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
2385 // CHECK2-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
2386 // CHECK2-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
2387 // CHECK2-NEXT: br label [[IFCONT12:%.*]]
2388 // CHECK2: else11:
2389 // CHECK2-NEXT: br label [[IFCONT12]]
2390 // CHECK2: ifcont12:
2391 // CHECK2-NEXT: ret void
2392 //
2393 //
2394 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func15
2395 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
2396 // CHECK2-NEXT: entry:
2397 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2398 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
2399 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
2400 // CHECK2-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
2401 // CHECK2-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
2402 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
2403 // CHECK2-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
2404 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2405 // CHECK2-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
2406 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
2407 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
2408 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2409 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
2410 // CHECK2-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
2411 // CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
2412 // CHECK2-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
2413 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
2414 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
2415 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 4
2416 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2417 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
2418 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
2419 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
2420 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
2421 // CHECK2-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
2422 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
2423 // CHECK2-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
2424 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
2425 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
2426 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
2427 // CHECK2-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 4
2428 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
2429 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
2430 // CHECK2-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 4
2431 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2432 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
2433 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
2434 // CHECK2-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
2435 // CHECK2-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
2436 // CHECK2-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
2437 // CHECK2-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
2438 // CHECK2-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
2439 // CHECK2-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
2440 // CHECK2-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
2441 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
2442 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
2443 // CHECK2-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
2444 // CHECK2-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 4
2445 // CHECK2-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
2446 // CHECK2-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
2447 // CHECK2-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
2448 // CHECK2-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
2449 // CHECK2-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
2450 // CHECK2-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
2451 // CHECK2-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
2452 // CHECK2-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
2453 // CHECK2-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
2454 // CHECK2-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
2455 // CHECK2-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
2456 // CHECK2-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
2457 // CHECK2-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
2458 // CHECK2: then:
2459 // CHECK2-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
2460 // CHECK2-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
2461 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
2462 // CHECK2-NEXT: br label [[IFCONT:%.*]]
2463 // CHECK2: else:
2464 // CHECK2-NEXT: br label [[IFCONT]]
2465 // CHECK2: ifcont:
2466 // CHECK2-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
2467 // CHECK2-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
2468 // CHECK2-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
2469 // CHECK2-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
2470 // CHECK2: then5:
2471 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2472 // CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
2473 // CHECK2-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 4
2474 // CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
2475 // CHECK2-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
2476 // CHECK2-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 4
2477 // CHECK2-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
2478 // CHECK2-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
2479 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2480 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
2481 // CHECK2-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 4
2482 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
2483 // CHECK2-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
2484 // CHECK2-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 4
2485 // CHECK2-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
2486 // CHECK2-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
2487 // CHECK2-NEXT: br label [[IFCONT7:%.*]]
2488 // CHECK2: else6:
2489 // CHECK2-NEXT: br label [[IFCONT7]]
2490 // CHECK2: ifcont7:
2491 // CHECK2-NEXT: ret void
2492 //
2493 //
2494 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func16
2495 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
2496 // CHECK2-NEXT: entry:
2497 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2498 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2499 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
2500 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2501 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2502 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2503 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2504 // CHECK2-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
2505 // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2506 // CHECK2-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
2507 // CHECK2-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2508 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
2509 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2510 // CHECK2-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2511 // CHECK2-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
2512 // CHECK2: then:
2513 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
2514 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2515 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
2516 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2517 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
2518 // CHECK2-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
2519 // CHECK2-NEXT: br label [[IFCONT:%.*]]
2520 // CHECK2: else:
2521 // CHECK2-NEXT: br label [[IFCONT]]
2522 // CHECK2: ifcont:
2523 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2524 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2525 // CHECK2-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
2526 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
2527 // CHECK2: then2:
2528 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2529 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
2530 // CHECK2-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
2531 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
2532 // CHECK2-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
2533 // CHECK2-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
2534 // CHECK2-NEXT: br label [[IFCONT4:%.*]]
2535 // CHECK2: else3:
2536 // CHECK2-NEXT: br label [[IFCONT4]]
2537 // CHECK2: ifcont4:
2538 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2539 // CHECK2-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2540 // CHECK2-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
2541 // CHECK2: then6:
2542 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
2543 // CHECK2-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
2544 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
2545 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2546 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
2547 // CHECK2-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
2548 // CHECK2-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
2549 // CHECK2-NEXT: br label [[IFCONT8:%.*]]
2550 // CHECK2: else7:
2551 // CHECK2-NEXT: br label [[IFCONT8]]
2552 // CHECK2: ifcont8:
2553 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
2554 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2555 // CHECK2-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
2556 // CHECK2-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
2557 // CHECK2: then10:
2558 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2559 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
2560 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
2561 // CHECK2-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 4
2562 // CHECK2-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
2563 // CHECK2-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
2564 // CHECK2-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
2565 // CHECK2-NEXT: br label [[IFCONT12:%.*]]
2566 // CHECK2: else11:
2567 // CHECK2-NEXT: br label [[IFCONT12]]
2568 // CHECK2: ifcont12:
2569 // CHECK2-NEXT: ret void
2570 //
2571 //
2572 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func17
2573 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2574 // CHECK2-NEXT: entry:
2575 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2576 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2577 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2578 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2579 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2580 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2581 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2582 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
2583 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2584 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
2585 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2586 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
2587 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2588 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
2589 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
2590 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP7]]
2591 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
2592 // CHECK2-NEXT: store i32 [[TMP12]], i32* [[TMP11]], align 128
2593 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
2594 // CHECK2-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 4
2595 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
2596 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
2597 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP7]]
2598 // CHECK2-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]], align 2
2599 // CHECK2-NEXT: store i16 [[TMP17]], i16* [[TMP16]], align 128
2600 // CHECK2-NEXT: ret void
2601 //
2602 //
2603 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func18
2604 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2605 // CHECK2-NEXT: entry:
2606 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2607 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2608 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2609 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2610 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2611 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2612 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2613 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2614 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
2615 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2616 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2617 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
2618 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP5]]
2619 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
2620 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
2621 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2622 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
2623 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP5]]
2624 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
2625 // CHECK2-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 4
2626 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2627 // CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2628 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP12]], i8* [[TMP13]]) #[[ATTR4]]
2629 // CHECK2-NEXT: ret void
2630 //
2631 //
2632 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func19
2633 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2634 // CHECK2-NEXT: entry:
2635 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2636 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2637 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2638 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2639 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2640 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2641 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2642 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
2643 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2644 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
2645 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2646 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
2647 // CHECK2-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2648 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
2649 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
2650 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP7]]
2651 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 128
2652 // CHECK2-NEXT: store i32 [[TMP12]], i32* [[TMP10]], align 4
2653 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
2654 // CHECK2-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 4
2655 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
2656 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
2657 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP7]]
2658 // CHECK2-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP16]], align 128
2659 // CHECK2-NEXT: store i16 [[TMP17]], i16* [[TMP15]], align 2
2660 // CHECK2-NEXT: ret void
2661 //
2662 //
2663 // CHECK2-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func20
2664 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2665 // CHECK2-NEXT: entry:
2666 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2667 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2668 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2669 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
2670 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2671 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2672 // CHECK2-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2673 // CHECK2-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2674 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
2675 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2676 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2677 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
2678 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* [[A]], i32 0, i32 [[TMP5]]
2679 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
2680 // CHECK2-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
2681 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
2682 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
2683 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i16], [1024 x i16]* [[B]], i32 0, i32 [[TMP5]]
2684 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
2685 // CHECK2-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 4
2686 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2687 // CHECK2-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2688 // CHECK2-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP13]], i8* [[TMP12]]) #[[ATTR4]]
2689 // CHECK2-NEXT: ret void
2690 //
2691 //
2692 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l20
2693 // CHECK3-SAME: (double* noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
2694 // CHECK3-NEXT: entry:
2695 // CHECK3-NEXT: [[E_ADDR:%.*]] = alloca double*, align 4
2696 // CHECK3-NEXT: [[E1:%.*]] = alloca double, align 8
2697 // CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2698 // CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2699 // CHECK3-NEXT: store double* [[E]], double** [[E_ADDR]], align 4
2700 // CHECK3-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 4
2701 // CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
2702 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
2703 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2704 // CHECK3: user_code.entry:
2705 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2706 // CHECK3-NEXT: [[TMP3:%.*]] = load double, double* [[TMP0]], align 8
2707 // CHECK3-NEXT: store double [[TMP3]], double* [[E1]], align 8
2708 // CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
2709 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTTHREADID_TEMP_]], align 4
2710 // CHECK3-NEXT: call void @__omp_outlined__(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], double* [[E1]]) #[[ATTR4:[0-9]+]]
2711 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2712 // CHECK3-NEXT: ret void
2713 // CHECK3: worker.exit:
2714 // CHECK3-NEXT: ret void
2715 //
2716 //
2717 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__
2718 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
2719 // CHECK3-NEXT: entry:
2720 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2721 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2722 // CHECK3-NEXT: [[E_ADDR:%.*]] = alloca double*, align 4
2723 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2724 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2725 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2726 // CHECK3-NEXT: store double* [[E]], double** [[E_ADDR]], align 4
2727 // CHECK3-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 4
2728 // CHECK3-NEXT: [[E1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 8)
2729 // CHECK3-NEXT: [[E_ON_STACK:%.*]] = bitcast i8* [[E1]] to double*
2730 // CHECK3-NEXT: store double 0.000000e+00, double* [[E_ON_STACK]], align 8
2731 // CHECK3-NEXT: [[TMP1:%.*]] = load double, double* [[E_ON_STACK]], align 8
2732 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
2733 // CHECK3-NEXT: store double [[ADD]], double* [[E_ON_STACK]], align 8
2734 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2735 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
2736 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2737 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast double* [[E_ON_STACK]] to i8*
2738 // CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
2739 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2740 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
2741 // CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i8* [[TMP7]], i32 2048, i8* [[TMP6]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func)
2742 // CHECK3-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
2743 // CHECK3-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
2744 // CHECK3: .omp.reduction.then:
2745 // CHECK3-NEXT: [[TMP10:%.*]] = load double, double* [[TMP0]], align 8
2746 // CHECK3-NEXT: [[TMP11:%.*]] = load double, double* [[E_ON_STACK]], align 8
2747 // CHECK3-NEXT: [[ADD2:%.*]] = fadd double [[TMP10]], [[TMP11]]
2748 // CHECK3-NEXT: store double [[ADD2]], double* [[TMP0]], align 8
2749 // CHECK3-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
2750 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
2751 // CHECK3: .omp.reduction.done:
2752 // CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[E1]], i32 8)
2753 // CHECK3-NEXT: ret void
2754 //
2755 //
2756 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
2757 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3:[0-9]+]] {
2758 // CHECK3-NEXT: entry:
2759 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2760 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
2761 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
2762 // CHECK3-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
2763 // CHECK3-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 4
2764 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
2765 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2766 // CHECK3-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
2767 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
2768 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
2769 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2770 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
2771 // CHECK3-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
2772 // CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
2773 // CHECK3-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
2774 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2775 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double**
2776 // CHECK3-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 4
2777 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2778 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i32 1
2779 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8*
2780 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64*
2781 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64*
2782 // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8
2783 // CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size()
2784 // CHECK3-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
2785 // CHECK3-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]])
2786 // CHECK3-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8
2787 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i32 1
2788 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i32 1
2789 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
2790 // CHECK3-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 4
2791 // CHECK3-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0
2792 // CHECK3-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1
2793 // CHECK3-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
2794 // CHECK3-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
2795 // CHECK3-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2
2796 // CHECK3-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1
2797 // CHECK3-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0
2798 // CHECK3-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]]
2799 // CHECK3-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0
2800 // CHECK3-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
2801 // CHECK3-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]]
2802 // CHECK3-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]]
2803 // CHECK3-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]]
2804 // CHECK3: then:
2805 // CHECK3-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8*
2806 // CHECK3-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
2807 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR4]]
2808 // CHECK3-NEXT: br label [[IFCONT:%.*]]
2809 // CHECK3: else:
2810 // CHECK3-NEXT: br label [[IFCONT]]
2811 // CHECK3: ifcont:
2812 // CHECK3-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
2813 // CHECK3-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
2814 // CHECK3-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
2815 // CHECK3-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
2816 // CHECK3: then4:
2817 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2818 // CHECK3-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double**
2819 // CHECK3-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 4
2820 // CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
2821 // CHECK3-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double**
2822 // CHECK3-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 4
2823 // CHECK3-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8
2824 // CHECK3-NEXT: store double [[TMP47]], double* [[TMP46]], align 8
2825 // CHECK3-NEXT: br label [[IFCONT6:%.*]]
2826 // CHECK3: else5:
2827 // CHECK3-NEXT: br label [[IFCONT6]]
2828 // CHECK3: ifcont6:
2829 // CHECK3-NEXT: ret void
2830 //
2831 //
2832 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
2833 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
2834 // CHECK3-NEXT: entry:
2835 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2836 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2837 // CHECK3-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
2838 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
2839 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2840 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2841 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2842 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2843 // CHECK3-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
2844 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2845 // CHECK3-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
2846 // CHECK3-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2847 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]*
2848 // CHECK3-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4
2849 // CHECK3-NEXT: br label [[PRECOND:%.*]]
2850 // CHECK3: precond:
2851 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4
2852 // CHECK3-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2
2853 // CHECK3-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]]
2854 // CHECK3: body:
2855 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2]])
2856 // CHECK3-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2857 // CHECK3-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
2858 // CHECK3: then:
2859 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i32 0, i32 0
2860 // CHECK3-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 4
2861 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32*
2862 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]]
2863 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2864 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4
2865 // CHECK3-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4
2866 // CHECK3-NEXT: br label [[IFCONT:%.*]]
2867 // CHECK3: else:
2868 // CHECK3-NEXT: br label [[IFCONT]]
2869 // CHECK3: ifcont:
2870 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
2871 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2872 // CHECK3-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]]
2873 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
2874 // CHECK3: then2:
2875 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2876 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i32 0, i32 0
2877 // CHECK3-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 4
2878 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32*
2879 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]]
2880 // CHECK3-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4
2881 // CHECK3-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4
2882 // CHECK3-NEXT: br label [[IFCONT4:%.*]]
2883 // CHECK3: else3:
2884 // CHECK3-NEXT: br label [[IFCONT4]]
2885 // CHECK3: ifcont4:
2886 // CHECK3-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1
2887 // CHECK3-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4
2888 // CHECK3-NEXT: br label [[PRECOND]]
2889 // CHECK3: exit:
2890 // CHECK3-NEXT: ret void
2891 //
2892 //
2893 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func
2894 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2895 // CHECK3-NEXT: entry:
2896 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2897 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2898 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2899 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2900 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2901 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2902 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2903 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
2904 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2905 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
2906 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2907 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i32 0, i32 0
2908 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2909 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
2910 // CHECK3-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
2911 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x double], [2048 x double]* [[E]], i32 0, i32 [[TMP7]]
2912 // CHECK3-NEXT: [[TMP12:%.*]] = load double, double* [[TMP10]], align 8
2913 // CHECK3-NEXT: store double [[TMP12]], double* [[TMP11]], align 128
2914 // CHECK3-NEXT: ret void
2915 //
2916 //
2917 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func
2918 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2919 // CHECK3-NEXT: entry:
2920 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2921 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2922 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2923 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2924 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2925 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2926 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2927 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2928 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
2929 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2930 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2931 // CHECK3-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
2932 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x double], [2048 x double]* [[E]], i32 0, i32 [[TMP5]]
2933 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
2934 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
2935 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2936 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2937 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP9]], i8* [[TMP10]]) #[[ATTR4]]
2938 // CHECK3-NEXT: ret void
2939 //
2940 //
2941 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func
2942 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2943 // CHECK3-NEXT: entry:
2944 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2945 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2946 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2947 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2948 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2949 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2950 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2951 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [1 x i8*]*
2952 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2953 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty*
2954 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2955 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP4]], i32 0, i32 0
2956 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
2957 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to double*
2958 // CHECK3-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP6]], i32 0, i32 0
2959 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x double], [2048 x double]* [[E]], i32 0, i32 [[TMP7]]
2960 // CHECK3-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 128
2961 // CHECK3-NEXT: store double [[TMP12]], double* [[TMP10]], align 8
2962 // CHECK3-NEXT: ret void
2963 //
2964 //
2965 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func
2966 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
2967 // CHECK3-NEXT: entry:
2968 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
2969 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2970 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
2971 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
2972 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
2973 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
2974 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
2975 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
2976 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty*
2977 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
2978 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
2979 // CHECK3-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP4]], i32 0, i32 0
2980 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x double], [2048 x double]* [[E]], i32 0, i32 [[TMP5]]
2981 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast double* [[TMP7]] to i8*
2982 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
2983 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
2984 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
2985 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP10]], i8* [[TMP9]]) #[[ATTR4]]
2986 // CHECK3-NEXT: ret void
2987 //
2988 //
2989 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l26
2990 // CHECK3-SAME: (i32 noundef [[C:%.*]], i32 noundef [[D:%.*]]) #[[ATTR0]] {
2991 // CHECK3-NEXT: entry:
2992 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4
2993 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca i32, align 4
2994 // CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2995 // CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2996 // CHECK3-NEXT: store i32 [[C]], i32* [[C_ADDR]], align 4
2997 // CHECK3-NEXT: store i32 [[D]], i32* [[D_ADDR]], align 4
2998 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[C_ADDR]] to i8*
2999 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[D_ADDR]] to float*
3000 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3001 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3002 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3003 // CHECK3: user_code.entry:
3004 // CHECK3-NEXT: [[TMP1:%.*]] = load i8, i8* [[CONV]], align 1
3005 // CHECK3-NEXT: [[C2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
3006 // CHECK3-NEXT: store i8 [[TMP1]], i8* [[C2]], align 1
3007 // CHECK3-NEXT: [[TMP2:%.*]] = load float, float* [[CONV1]], align 4
3008 // CHECK3-NEXT: [[D3:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
3009 // CHECK3-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D3]] to float*
3010 // CHECK3-NEXT: store float [[TMP2]], float* [[D_ON_STACK]], align 4
3011 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
3012 // CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
3013 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[DOTTHREADID_TEMP_]], align 4
3014 // CHECK3-NEXT: call void @__omp_outlined__1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i8* [[C2]], float* [[D_ON_STACK]]) #[[ATTR4]]
3015 // CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[D3]], i32 4)
3016 // CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[C2]], i32 1)
3017 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3018 // CHECK3-NEXT: ret void
3019 // CHECK3: worker.exit:
3020 // CHECK3-NEXT: ret void
3021 //
3022 //
3023 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1
3024 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i8* noundef nonnull align 1 dereferenceable(1) [[C:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
3025 // CHECK3-NEXT: entry:
3026 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3027 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3028 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i8*, align 4
3029 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca float*, align 4
3030 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3031 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3032 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3033 // CHECK3-NEXT: store i8* [[C]], i8** [[C_ADDR]], align 4
3034 // CHECK3-NEXT: store float* [[D]], float** [[D_ADDR]], align 4
3035 // CHECK3-NEXT: [[TMP0:%.*]] = load i8*, i8** [[C_ADDR]], align 4
3036 // CHECK3-NEXT: [[TMP1:%.*]] = load float*, float** [[D_ADDR]], align 4
3037 // CHECK3-NEXT: [[C1:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 1)
3038 // CHECK3-NEXT: [[D2:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i32 4)
3039 // CHECK3-NEXT: [[D_ON_STACK:%.*]] = bitcast i8* [[D2]] to float*
3040 // CHECK3-NEXT: store i8 0, i8* [[C1]], align 1
3041 // CHECK3-NEXT: store float 1.000000e+00, float* [[D_ON_STACK]], align 4
3042 // CHECK3-NEXT: [[TMP2:%.*]] = load i8, i8* [[C1]], align 1
3043 // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
3044 // CHECK3-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
3045 // CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
3046 // CHECK3-NEXT: store i8 [[CONV3]], i8* [[C1]], align 1
3047 // CHECK3-NEXT: [[TMP3:%.*]] = load float, float* [[D_ON_STACK]], align 4
3048 // CHECK3-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
3049 // CHECK3-NEXT: store float [[MUL]], float* [[D_ON_STACK]], align 4
3050 // CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3051 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
3052 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3053 // CHECK3-NEXT: store i8* [[C1]], i8** [[TMP6]], align 4
3054 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3055 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast float* [[D_ON_STACK]] to i8*
3056 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 4
3057 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3058 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
3059 // CHECK3-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i8* [[TMP10]], i32 2048, i8* [[TMP9]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func3, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func4, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func5, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func6, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func7, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func8)
3060 // CHECK3-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 1
3061 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
3062 // CHECK3: .omp.reduction.then:
3063 // CHECK3-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP0]], align 1
3064 // CHECK3-NEXT: [[CONV4:%.*]] = sext i8 [[TMP13]] to i32
3065 // CHECK3-NEXT: [[TMP14:%.*]] = load i8, i8* [[C1]], align 1
3066 // CHECK3-NEXT: [[CONV5:%.*]] = sext i8 [[TMP14]] to i32
3067 // CHECK3-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
3068 // CHECK3-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
3069 // CHECK3-NEXT: store i8 [[CONV7]], i8* [[TMP0]], align 1
3070 // CHECK3-NEXT: [[TMP15:%.*]] = load float, float* [[TMP1]], align 4
3071 // CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[D_ON_STACK]], align 4
3072 // CHECK3-NEXT: [[MUL8:%.*]] = fmul float [[TMP15]], [[TMP16]]
3073 // CHECK3-NEXT: store float [[MUL8]], float* [[TMP1]], align 4
3074 // CHECK3-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
3075 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
3076 // CHECK3: .omp.reduction.done:
3077 // CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[D2]], i32 4)
3078 // CHECK3-NEXT: call void @__kmpc_free_shared(i8* [[C1]], i32 1)
3079 // CHECK3-NEXT: ret void
3080 //
3081 //
3082 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
3083 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
3084 // CHECK3-NEXT: entry:
3085 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3086 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
3087 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
3088 // CHECK3-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
3089 // CHECK3-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
3090 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
3091 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
3092 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3093 // CHECK3-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
3094 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
3095 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
3096 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3097 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
3098 // CHECK3-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
3099 // CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
3100 // CHECK3-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
3101 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3102 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
3103 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3104 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[TMP10]], i32 1
3105 // CHECK3-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP10]], align 1
3106 // CHECK3-NEXT: [[TMP14:%.*]] = sext i8 [[TMP13]] to i32
3107 // CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_get_warp_size()
3108 // CHECK3-NEXT: [[TMP16:%.*]] = trunc i32 [[TMP15]] to i16
3109 // CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP14]], i16 [[TMP7]], i16 [[TMP16]])
3110 // CHECK3-NEXT: [[TMP18:%.*]] = trunc i32 [[TMP17]] to i8
3111 // CHECK3-NEXT: store i8 [[TMP18]], i8* [[DOTOMP_REDUCTION_ELEMENT]], align 1
3112 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr i8, i8* [[TMP10]], i32 1
3113 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
3114 // CHECK3-NEXT: store i8* [[DOTOMP_REDUCTION_ELEMENT]], i8** [[TMP11]], align 4
3115 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3116 // CHECK3-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to float**
3117 // CHECK3-NEXT: [[TMP23:%.*]] = load float*, float** [[TMP22]], align 4
3118 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3119 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr float, float* [[TMP23]], i32 1
3120 // CHECK3-NEXT: [[TMP26:%.*]] = bitcast float* [[TMP25]] to i8*
3121 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast float* [[TMP23]] to i32*
3122 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i32*
3123 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP27]], align 4
3124 // CHECK3-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
3125 // CHECK3-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
3126 // CHECK3-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
3127 // CHECK3-NEXT: store i32 [[TMP32]], i32* [[TMP28]], align 4
3128 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr i32, i32* [[TMP27]], i32 1
3129 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr i32, i32* [[TMP28]], i32 1
3130 // CHECK3-NEXT: [[TMP35:%.*]] = bitcast float* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
3131 // CHECK3-NEXT: store i8* [[TMP35]], i8** [[TMP24]], align 4
3132 // CHECK3-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP8]], 0
3133 // CHECK3-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 1
3134 // CHECK3-NEXT: [[TMP38:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
3135 // CHECK3-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
3136 // CHECK3-NEXT: [[TMP40:%.*]] = icmp eq i16 [[TMP8]], 2
3137 // CHECK3-NEXT: [[TMP41:%.*]] = and i16 [[TMP6]], 1
3138 // CHECK3-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP41]], 0
3139 // CHECK3-NEXT: [[TMP43:%.*]] = and i1 [[TMP40]], [[TMP42]]
3140 // CHECK3-NEXT: [[TMP44:%.*]] = icmp sgt i16 [[TMP7]], 0
3141 // CHECK3-NEXT: [[TMP45:%.*]] = and i1 [[TMP43]], [[TMP44]]
3142 // CHECK3-NEXT: [[TMP46:%.*]] = or i1 [[TMP36]], [[TMP39]]
3143 // CHECK3-NEXT: [[TMP47:%.*]] = or i1 [[TMP46]], [[TMP45]]
3144 // CHECK3-NEXT: br i1 [[TMP47]], label [[THEN:%.*]], label [[ELSE:%.*]]
3145 // CHECK3: then:
3146 // CHECK3-NEXT: [[TMP48:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
3147 // CHECK3-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
3148 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP48]], i8* [[TMP49]]) #[[ATTR4]]
3149 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3150 // CHECK3: else:
3151 // CHECK3-NEXT: br label [[IFCONT]]
3152 // CHECK3: ifcont:
3153 // CHECK3-NEXT: [[TMP50:%.*]] = icmp eq i16 [[TMP8]], 1
3154 // CHECK3-NEXT: [[TMP51:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
3155 // CHECK3-NEXT: [[TMP52:%.*]] = and i1 [[TMP50]], [[TMP51]]
3156 // CHECK3-NEXT: br i1 [[TMP52]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
3157 // CHECK3: then5:
3158 // CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3159 // CHECK3-NEXT: [[TMP54:%.*]] = load i8*, i8** [[TMP53]], align 4
3160 // CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3161 // CHECK3-NEXT: [[TMP56:%.*]] = load i8*, i8** [[TMP55]], align 4
3162 // CHECK3-NEXT: [[TMP57:%.*]] = load i8, i8* [[TMP54]], align 1
3163 // CHECK3-NEXT: store i8 [[TMP57]], i8* [[TMP56]], align 1
3164 // CHECK3-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3165 // CHECK3-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to float**
3166 // CHECK3-NEXT: [[TMP60:%.*]] = load float*, float** [[TMP59]], align 4
3167 // CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3168 // CHECK3-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to float**
3169 // CHECK3-NEXT: [[TMP63:%.*]] = load float*, float** [[TMP62]], align 4
3170 // CHECK3-NEXT: [[TMP64:%.*]] = load float, float* [[TMP60]], align 4
3171 // CHECK3-NEXT: store float [[TMP64]], float* [[TMP63]], align 4
3172 // CHECK3-NEXT: br label [[IFCONT7:%.*]]
3173 // CHECK3: else6:
3174 // CHECK3-NEXT: br label [[IFCONT7]]
3175 // CHECK3: ifcont7:
3176 // CHECK3-NEXT: ret void
3177 //
3178 //
3179 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
3180 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
3181 // CHECK3-NEXT: entry:
3182 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3183 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3184 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
3185 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3186 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3187 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3188 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3189 // CHECK3-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
3190 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3191 // CHECK3-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
3192 // CHECK3-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3193 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
3194 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
3195 // CHECK3-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3196 // CHECK3-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
3197 // CHECK3: then:
3198 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3199 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3200 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3201 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i32 addrspace(3)* [[TMP10]] to i8 addrspace(3)*
3202 // CHECK3-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP9]], align 1
3203 // CHECK3-NEXT: store volatile i8 [[TMP12]], i8 addrspace(3)* [[TMP11]], align 1
3204 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3205 // CHECK3: else:
3206 // CHECK3-NEXT: br label [[IFCONT]]
3207 // CHECK3: ifcont:
3208 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
3209 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3210 // CHECK3-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
3211 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
3212 // CHECK3: then2:
3213 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3214 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i32 addrspace(3)* [[TMP14]] to i8 addrspace(3)*
3215 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3216 // CHECK3-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP16]], align 4
3217 // CHECK3-NEXT: [[TMP18:%.*]] = load volatile i8, i8 addrspace(3)* [[TMP15]], align 1
3218 // CHECK3-NEXT: store i8 [[TMP18]], i8* [[TMP17]], align 1
3219 // CHECK3-NEXT: br label [[IFCONT4:%.*]]
3220 // CHECK3: else3:
3221 // CHECK3-NEXT: br label [[IFCONT4]]
3222 // CHECK3: ifcont4:
3223 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
3224 // CHECK3-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3225 // CHECK3-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
3226 // CHECK3: then6:
3227 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3228 // CHECK3-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
3229 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i32*
3230 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3231 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP21]], align 4
3232 // CHECK3-NEXT: store volatile i32 [[TMP23]], i32 addrspace(3)* [[TMP22]], align 4
3233 // CHECK3-NEXT: br label [[IFCONT8:%.*]]
3234 // CHECK3: else7:
3235 // CHECK3-NEXT: br label [[IFCONT8]]
3236 // CHECK3: ifcont8:
3237 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]])
3238 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3239 // CHECK3-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP24]]
3240 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
3241 // CHECK3: then10:
3242 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3243 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3244 // CHECK3-NEXT: [[TMP27:%.*]] = load i8*, i8** [[TMP26]], align 4
3245 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8* [[TMP27]] to i32*
3246 // CHECK3-NEXT: [[TMP29:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP25]], align 4
3247 // CHECK3-NEXT: store i32 [[TMP29]], i32* [[TMP28]], align 4
3248 // CHECK3-NEXT: br label [[IFCONT12:%.*]]
3249 // CHECK3: else11:
3250 // CHECK3-NEXT: br label [[IFCONT12]]
3251 // CHECK3: ifcont12:
3252 // CHECK3-NEXT: ret void
3253 //
3254 //
3255 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func5
3256 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3257 // CHECK3-NEXT: entry:
3258 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3259 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3260 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3261 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3262 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3263 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3264 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3265 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
3266 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3267 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
3268 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3269 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
3270 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3271 // CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
3272 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2048 x i8], [2048 x i8]* [[C]], i32 0, i32 [[TMP7]]
3273 // CHECK3-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP9]], align 1
3274 // CHECK3-NEXT: store i8 [[TMP11]], i8* [[TMP10]], align 128
3275 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
3276 // CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
3277 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
3278 // CHECK3-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
3279 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2048 x float], [2048 x float]* [[D]], i32 0, i32 [[TMP7]]
3280 // CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[TMP14]], align 4
3281 // CHECK3-NEXT: store float [[TMP16]], float* [[TMP15]], align 128
3282 // CHECK3-NEXT: ret void
3283 //
3284 //
3285 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func6
3286 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3287 // CHECK3-NEXT: entry:
3288 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3289 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3290 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3291 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3292 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3293 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3294 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3295 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3296 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
3297 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3298 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3299 // CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
3300 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x i8], [2048 x i8]* [[C]], i32 0, i32 [[TMP5]]
3301 // CHECK3-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 4
3302 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3303 // CHECK3-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
3304 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2048 x float], [2048 x float]* [[D]], i32 0, i32 [[TMP5]]
3305 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
3306 // CHECK3-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 4
3307 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3308 // CHECK3-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3309 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP11]], i8* [[TMP12]]) #[[ATTR4]]
3310 // CHECK3-NEXT: ret void
3311 //
3312 //
3313 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func7
3314 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3315 // CHECK3-NEXT: entry:
3316 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3317 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3318 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3319 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3320 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3321 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3322 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3323 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
3324 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3325 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.0*
3326 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3327 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
3328 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3329 // CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 0
3330 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2048 x i8], [2048 x i8]* [[C]], i32 0, i32 [[TMP7]]
3331 // CHECK3-NEXT: [[TMP11:%.*]] = load i8, i8* [[TMP10]], align 128
3332 // CHECK3-NEXT: store i8 [[TMP11]], i8* [[TMP9]], align 1
3333 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
3334 // CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
3335 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to float*
3336 // CHECK3-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP6]], i32 0, i32 1
3337 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2048 x float], [2048 x float]* [[D]], i32 0, i32 [[TMP7]]
3338 // CHECK3-NEXT: [[TMP16:%.*]] = load float, float* [[TMP15]], align 128
3339 // CHECK3-NEXT: store float [[TMP16]], float* [[TMP14]], align 4
3340 // CHECK3-NEXT: ret void
3341 //
3342 //
3343 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func8
3344 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3345 // CHECK3-NEXT: entry:
3346 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3347 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3348 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3349 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3350 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3351 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3352 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3353 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3354 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.0*
3355 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3356 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3357 // CHECK3-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0:%.*]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 0
3358 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x i8], [2048 x i8]* [[C]], i32 0, i32 [[TMP5]]
3359 // CHECK3-NEXT: store i8* [[TMP7]], i8** [[TMP6]], align 4
3360 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3361 // CHECK3-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_0]], %struct._globalized_locals_ty.0* [[TMP4]], i32 0, i32 1
3362 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2048 x float], [2048 x float]* [[D]], i32 0, i32 [[TMP5]]
3363 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast float* [[TMP9]] to i8*
3364 // CHECK3-NEXT: store i8* [[TMP10]], i8** [[TMP8]], align 4
3365 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3366 // CHECK3-NEXT: [[TMP12:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3367 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func2"(i8* [[TMP12]], i8* [[TMP11]]) #[[ATTR4]]
3368 // CHECK3-NEXT: ret void
3369 //
3370 //
3371 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l33
3372 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
3373 // CHECK3-NEXT: entry:
3374 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3375 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
3376 // CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3377 // CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3378 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
3379 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4
3380 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[B_ADDR]] to i16*
3381 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 2, i1 false, i1 true)
3382 // CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3383 // CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3384 // CHECK3: user_code.entry:
3385 // CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
3386 // CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
3387 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3388 // CHECK3-NEXT: call void @__omp_outlined__9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTZERO_ADDR]], i32* [[A_ADDR]], i16* [[CONV]]) #[[ATTR4]]
3389 // CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true)
3390 // CHECK3-NEXT: ret void
3391 // CHECK3: worker.exit:
3392 // CHECK3-NEXT: ret void
3393 //
3394 //
3395 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__9
3396 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
3397 // CHECK3-NEXT: entry:
3398 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3399 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3400 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
3401 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 4
3402 // CHECK3-NEXT: [[A1:%.*]] = alloca i32, align 4
3403 // CHECK3-NEXT: [[B2:%.*]] = alloca i16, align 2
3404 // CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 4
3405 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3406 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3407 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3408 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
3409 // CHECK3-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 4
3410 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
3411 // CHECK3-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 4
3412 // CHECK3-NEXT: store i32 0, i32* [[A1]], align 4
3413 // CHECK3-NEXT: store i16 -32768, i16* [[B2]], align 2
3414 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
3415 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i32* [[A1]] to i8*
3416 // CHECK3-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4
3417 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
3418 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i16* [[B2]] to i8*
3419 // CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
3420 // CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3421 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
3422 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3423 // CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i16*)* @__omp_outlined__10 to i8*), i8* null, i8** [[TMP8]], i32 2)
3424 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3425 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i32* [[A1]] to i8*
3426 // CHECK3-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 4
3427 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3428 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i16* [[B2]] to i8*
3429 // CHECK3-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4
3430 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3431 // CHECK3-NEXT: [[TMP14:%.*]] = load i8*, i8** @"_openmp_teams_reductions_buffer_$_$ptr", align 4
3432 // CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_nvptx_teams_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP7]], i8* [[TMP14]], i32 2048, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func15, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func16, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_copy_func17, void (i8*, i32, i8*)* @_omp_reduction_list_to_global_reduce_func18, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_copy_func19, void (i8*, i32, i8*)* @_omp_reduction_global_to_list_reduce_func20)
3433 // CHECK3-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 1
3434 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
3435 // CHECK3: .omp.reduction.then:
3436 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP0]], align 4
3437 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4
3438 // CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP17]], [[TMP18]]
3439 // CHECK3-NEXT: store i32 [[OR]], i32* [[TMP0]], align 4
3440 // CHECK3-NEXT: [[TMP19:%.*]] = load i16, i16* [[TMP1]], align 2
3441 // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP19]] to i32
3442 // CHECK3-NEXT: [[TMP20:%.*]] = load i16, i16* [[B2]], align 2
3443 // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP20]] to i32
3444 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CONV]], [[CONV3]]
3445 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3446 // CHECK3: cond.true:
3447 // CHECK3-NEXT: [[TMP21:%.*]] = load i16, i16* [[TMP1]], align 2
3448 // CHECK3-NEXT: br label [[COND_END:%.*]]
3449 // CHECK3: cond.false:
3450 // CHECK3-NEXT: [[TMP22:%.*]] = load i16, i16* [[B2]], align 2
3451 // CHECK3-NEXT: br label [[COND_END]]
3452 // CHECK3: cond.end:
3453 // CHECK3-NEXT: [[COND:%.*]] = phi i16 [ [[TMP21]], [[COND_TRUE]] ], [ [[TMP22]], [[COND_FALSE]] ]
3454 // CHECK3-NEXT: store i16 [[COND]], i16* [[TMP1]], align 2
3455 // CHECK3-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP7]])
3456 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
3457 // CHECK3: .omp.reduction.done:
3458 // CHECK3-NEXT: ret void
3459 //
3460 //
3461 // CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__10
3462 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
3463 // CHECK3-NEXT: entry:
3464 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3465 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3466 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4
3467 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i16*, align 4
3468 // CHECK3-NEXT: [[A1:%.*]] = alloca i32, align 4
3469 // CHECK3-NEXT: [[B2:%.*]] = alloca i16, align 2
3470 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3471 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3472 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3473 // CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4
3474 // CHECK3-NEXT: store i16* [[B]], i16** [[B_ADDR]], align 4
3475 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4
3476 // CHECK3-NEXT: [[TMP1:%.*]] = load i16*, i16** [[B_ADDR]], align 4
3477 // CHECK3-NEXT: store i32 0, i32* [[A1]], align 4
3478 // CHECK3-NEXT: store i16 -32768, i16* [[B2]], align 2
3479 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A1]], align 4
3480 // CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
3481 // CHECK3-NEXT: store i32 [[OR]], i32* [[A1]], align 4
3482 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[B2]], align 2
3483 // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
3484 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
3485 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3486 // CHECK3: cond.true:
3487 // CHECK3-NEXT: br label [[COND_END:%.*]]
3488 // CHECK3: cond.false:
3489 // CHECK3-NEXT: [[TMP4:%.*]] = load i16, i16* [[B2]], align 2
3490 // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
3491 // CHECK3-NEXT: br label [[COND_END]]
3492 // CHECK3: cond.end:
3493 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
3494 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
3495 // CHECK3-NEXT: store i16 [[CONV4]], i16* [[B2]], align 2
3496 // CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3497 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
3498 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3499 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i32* [[A1]] to i8*
3500 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 4
3501 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3502 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i16* [[B2]] to i8*
3503 // CHECK3-NEXT: store i8* [[TMP10]], i8** [[TMP9]], align 4
3504 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3505 // CHECK3-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB3]], i32 [[TMP6]], i32 2, i32 8, i8* [[TMP11]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func12, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func13)
3506 // CHECK3-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP12]], 1
3507 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
3508 // CHECK3: .omp.reduction.then:
3509 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4
3510 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[A1]], align 4
3511 // CHECK3-NEXT: [[OR5:%.*]] = or i32 [[TMP14]], [[TMP15]]
3512 // CHECK3-NEXT: store i32 [[OR5]], i32* [[TMP0]], align 4
3513 // CHECK3-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP1]], align 2
3514 // CHECK3-NEXT: [[CONV6:%.*]] = sext i16 [[TMP16]] to i32
3515 // CHECK3-NEXT: [[TMP17:%.*]] = load i16, i16* [[B2]], align 2
3516 // CHECK3-NEXT: [[CONV7:%.*]] = sext i16 [[TMP17]] to i32
3517 // CHECK3-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
3518 // CHECK3-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
3519 // CHECK3: cond.true9:
3520 // CHECK3-NEXT: [[TMP18:%.*]] = load i16, i16* [[TMP1]], align 2
3521 // CHECK3-NEXT: br label [[COND_END11:%.*]]
3522 // CHECK3: cond.false10:
3523 // CHECK3-NEXT: [[TMP19:%.*]] = load i16, i16* [[B2]], align 2
3524 // CHECK3-NEXT: br label [[COND_END11]]
3525 // CHECK3: cond.end11:
3526 // CHECK3-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP18]], [[COND_TRUE9]] ], [ [[TMP19]], [[COND_FALSE10]] ]
3527 // CHECK3-NEXT: store i16 [[COND12]], i16* [[TMP1]], align 2
3528 // CHECK3-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
3529 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
3530 // CHECK3: .omp.reduction.done:
3531 // CHECK3-NEXT: ret void
3532 //
3533 //
3534 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func12
3535 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
3536 // CHECK3-NEXT: entry:
3537 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3538 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
3539 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
3540 // CHECK3-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
3541 // CHECK3-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
3542 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
3543 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
3544 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3545 // CHECK3-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
3546 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
3547 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
3548 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3549 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
3550 // CHECK3-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
3551 // CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
3552 // CHECK3-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
3553 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3554 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
3555 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 4
3556 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3557 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
3558 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
3559 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
3560 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
3561 // CHECK3-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
3562 // CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
3563 // CHECK3-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
3564 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
3565 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
3566 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
3567 // CHECK3-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 4
3568 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3569 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
3570 // CHECK3-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 4
3571 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3572 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
3573 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
3574 // CHECK3-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
3575 // CHECK3-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
3576 // CHECK3-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
3577 // CHECK3-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
3578 // CHECK3-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
3579 // CHECK3-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
3580 // CHECK3-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
3581 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
3582 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
3583 // CHECK3-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
3584 // CHECK3-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 4
3585 // CHECK3-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
3586 // CHECK3-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
3587 // CHECK3-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
3588 // CHECK3-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
3589 // CHECK3-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
3590 // CHECK3-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
3591 // CHECK3-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
3592 // CHECK3-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
3593 // CHECK3-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
3594 // CHECK3-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
3595 // CHECK3-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
3596 // CHECK3-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
3597 // CHECK3-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
3598 // CHECK3: then:
3599 // CHECK3-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
3600 // CHECK3-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
3601 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func11"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
3602 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3603 // CHECK3: else:
3604 // CHECK3-NEXT: br label [[IFCONT]]
3605 // CHECK3: ifcont:
3606 // CHECK3-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
3607 // CHECK3-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
3608 // CHECK3-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
3609 // CHECK3-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
3610 // CHECK3: then5:
3611 // CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3612 // CHECK3-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
3613 // CHECK3-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 4
3614 // CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3615 // CHECK3-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
3616 // CHECK3-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 4
3617 // CHECK3-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
3618 // CHECK3-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
3619 // CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3620 // CHECK3-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
3621 // CHECK3-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 4
3622 // CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3623 // CHECK3-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
3624 // CHECK3-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 4
3625 // CHECK3-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
3626 // CHECK3-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
3627 // CHECK3-NEXT: br label [[IFCONT7:%.*]]
3628 // CHECK3: else6:
3629 // CHECK3-NEXT: br label [[IFCONT7]]
3630 // CHECK3: ifcont7:
3631 // CHECK3-NEXT: ret void
3632 //
3633 //
3634 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func13
3635 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
3636 // CHECK3-NEXT: entry:
3637 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3638 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3639 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
3640 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3641 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3642 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3643 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3644 // CHECK3-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
3645 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3646 // CHECK3-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
3647 // CHECK3-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3648 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
3649 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP2]])
3650 // CHECK3-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3651 // CHECK3-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
3652 // CHECK3: then:
3653 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3654 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3655 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
3656 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3657 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
3658 // CHECK3-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
3659 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3660 // CHECK3: else:
3661 // CHECK3-NEXT: br label [[IFCONT]]
3662 // CHECK3: ifcont:
3663 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3664 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3665 // CHECK3-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
3666 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
3667 // CHECK3: then2:
3668 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3669 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3670 // CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
3671 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
3672 // CHECK3-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
3673 // CHECK3-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
3674 // CHECK3-NEXT: br label [[IFCONT4:%.*]]
3675 // CHECK3: else3:
3676 // CHECK3-NEXT: br label [[IFCONT4]]
3677 // CHECK3: ifcont4:
3678 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3679 // CHECK3-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3680 // CHECK3-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
3681 // CHECK3: then6:
3682 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3683 // CHECK3-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
3684 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
3685 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3686 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
3687 // CHECK3-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
3688 // CHECK3-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
3689 // CHECK3-NEXT: br label [[IFCONT8:%.*]]
3690 // CHECK3: else7:
3691 // CHECK3-NEXT: br label [[IFCONT8]]
3692 // CHECK3: ifcont8:
3693 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3694 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3695 // CHECK3-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
3696 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
3697 // CHECK3: then10:
3698 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3699 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
3700 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3701 // CHECK3-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 4
3702 // CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
3703 // CHECK3-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
3704 // CHECK3-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
3705 // CHECK3-NEXT: br label [[IFCONT12:%.*]]
3706 // CHECK3: else11:
3707 // CHECK3-NEXT: br label [[IFCONT12]]
3708 // CHECK3: ifcont12:
3709 // CHECK3-NEXT: ret void
3710 //
3711 //
3712 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func15
3713 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR3]] {
3714 // CHECK3-NEXT: entry:
3715 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3716 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
3717 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
3718 // CHECK3-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
3719 // CHECK3-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x i8*], align 4
3720 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
3721 // CHECK3-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
3722 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3723 // CHECK3-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2
3724 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2
3725 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2
3726 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3727 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
3728 // CHECK3-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2
3729 // CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2
3730 // CHECK3-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2
3731 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3732 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32**
3733 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP10]], align 4
3734 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3735 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
3736 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to i8*
3737 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP11]], align 4
3738 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_get_warp_size()
3739 // CHECK3-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
3740 // CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP15]], i16 [[TMP7]], i16 [[TMP17]])
3741 // CHECK3-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_REDUCTION_ELEMENT]], align 4
3742 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[TMP11]], i32 1
3743 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr i32, i32* [[DOTOMP_REDUCTION_ELEMENT]], i32 1
3744 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i32* [[DOTOMP_REDUCTION_ELEMENT]] to i8*
3745 // CHECK3-NEXT: store i8* [[TMP21]], i8** [[TMP12]], align 4
3746 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3747 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i16**
3748 // CHECK3-NEXT: [[TMP24:%.*]] = load i16*, i16** [[TMP23]], align 4
3749 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3750 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
3751 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i16* [[TMP26]] to i8*
3752 // CHECK3-NEXT: [[TMP28:%.*]] = load i16, i16* [[TMP24]], align 2
3753 // CHECK3-NEXT: [[TMP29:%.*]] = sext i16 [[TMP28]] to i32
3754 // CHECK3-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_get_warp_size()
3755 // CHECK3-NEXT: [[TMP31:%.*]] = trunc i32 [[TMP30]] to i16
3756 // CHECK3-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP29]], i16 [[TMP7]], i16 [[TMP31]])
3757 // CHECK3-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16
3758 // CHECK3-NEXT: store i16 [[TMP33]], i16* [[DOTOMP_REDUCTION_ELEMENT4]], align 2
3759 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr i16, i16* [[TMP24]], i32 1
3760 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr i16, i16* [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
3761 // CHECK3-NEXT: [[TMP36:%.*]] = bitcast i16* [[DOTOMP_REDUCTION_ELEMENT4]] to i8*
3762 // CHECK3-NEXT: store i8* [[TMP36]], i8** [[TMP25]], align 4
3763 // CHECK3-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP8]], 0
3764 // CHECK3-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1
3765 // CHECK3-NEXT: [[TMP39:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]]
3766 // CHECK3-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]]
3767 // CHECK3-NEXT: [[TMP41:%.*]] = icmp eq i16 [[TMP8]], 2
3768 // CHECK3-NEXT: [[TMP42:%.*]] = and i16 [[TMP6]], 1
3769 // CHECK3-NEXT: [[TMP43:%.*]] = icmp eq i16 [[TMP42]], 0
3770 // CHECK3-NEXT: [[TMP44:%.*]] = and i1 [[TMP41]], [[TMP43]]
3771 // CHECK3-NEXT: [[TMP45:%.*]] = icmp sgt i16 [[TMP7]], 0
3772 // CHECK3-NEXT: [[TMP46:%.*]] = and i1 [[TMP44]], [[TMP45]]
3773 // CHECK3-NEXT: [[TMP47:%.*]] = or i1 [[TMP37]], [[TMP40]]
3774 // CHECK3-NEXT: [[TMP48:%.*]] = or i1 [[TMP47]], [[TMP46]]
3775 // CHECK3-NEXT: br i1 [[TMP48]], label [[THEN:%.*]], label [[ELSE:%.*]]
3776 // CHECK3: then:
3777 // CHECK3-NEXT: [[TMP49:%.*]] = bitcast [2 x i8*]* [[TMP5]] to i8*
3778 // CHECK3-NEXT: [[TMP50:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8*
3779 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP49]], i8* [[TMP50]]) #[[ATTR4]]
3780 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3781 // CHECK3: else:
3782 // CHECK3-NEXT: br label [[IFCONT]]
3783 // CHECK3: ifcont:
3784 // CHECK3-NEXT: [[TMP51:%.*]] = icmp eq i16 [[TMP8]], 1
3785 // CHECK3-NEXT: [[TMP52:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]]
3786 // CHECK3-NEXT: [[TMP53:%.*]] = and i1 [[TMP51]], [[TMP52]]
3787 // CHECK3-NEXT: br i1 [[TMP53]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
3788 // CHECK3: then5:
3789 // CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
3790 // CHECK3-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32**
3791 // CHECK3-NEXT: [[TMP56:%.*]] = load i32*, i32** [[TMP55]], align 4
3792 // CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
3793 // CHECK3-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32**
3794 // CHECK3-NEXT: [[TMP59:%.*]] = load i32*, i32** [[TMP58]], align 4
3795 // CHECK3-NEXT: [[TMP60:%.*]] = load i32, i32* [[TMP56]], align 4
3796 // CHECK3-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
3797 // CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
3798 // CHECK3-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i16**
3799 // CHECK3-NEXT: [[TMP63:%.*]] = load i16*, i16** [[TMP62]], align 4
3800 // CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
3801 // CHECK3-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i16**
3802 // CHECK3-NEXT: [[TMP66:%.*]] = load i16*, i16** [[TMP65]], align 4
3803 // CHECK3-NEXT: [[TMP67:%.*]] = load i16, i16* [[TMP63]], align 2
3804 // CHECK3-NEXT: store i16 [[TMP67]], i16* [[TMP66]], align 2
3805 // CHECK3-NEXT: br label [[IFCONT7:%.*]]
3806 // CHECK3: else6:
3807 // CHECK3-NEXT: br label [[IFCONT7]]
3808 // CHECK3: ifcont7:
3809 // CHECK3-NEXT: ret void
3810 //
3811 //
3812 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func16
3813 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
3814 // CHECK3-NEXT: entry:
3815 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3816 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3817 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3]])
3818 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3819 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3820 // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3821 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3822 // CHECK3-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
3823 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
3824 // CHECK3-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
3825 // CHECK3-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3826 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [2 x i8*]*
3827 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3828 // CHECK3-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3829 // CHECK3-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
3830 // CHECK3: then:
3831 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3832 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3833 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
3834 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3835 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
3836 // CHECK3-NEXT: store volatile i32 [[TMP12]], i32 addrspace(3)* [[TMP11]], align 4
3837 // CHECK3-NEXT: br label [[IFCONT:%.*]]
3838 // CHECK3: else:
3839 // CHECK3-NEXT: br label [[IFCONT]]
3840 // CHECK3: ifcont:
3841 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3842 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3843 // CHECK3-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP13]]
3844 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
3845 // CHECK3: then2:
3846 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3847 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 0
3848 // CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
3849 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
3850 // CHECK3-NEXT: [[TMP18:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP14]], align 4
3851 // CHECK3-NEXT: store i32 [[TMP18]], i32* [[TMP17]], align 4
3852 // CHECK3-NEXT: br label [[IFCONT4:%.*]]
3853 // CHECK3: else3:
3854 // CHECK3-NEXT: br label [[IFCONT4]]
3855 // CHECK3: ifcont4:
3856 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3857 // CHECK3-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
3858 // CHECK3-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
3859 // CHECK3: then6:
3860 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3861 // CHECK3-NEXT: [[TMP20:%.*]] = load i8*, i8** [[TMP19]], align 4
3862 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16*
3863 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
3864 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i32 addrspace(3)* [[TMP22]] to i16 addrspace(3)*
3865 // CHECK3-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]], align 2
3866 // CHECK3-NEXT: store volatile i16 [[TMP24]], i16 addrspace(3)* [[TMP23]], align 2
3867 // CHECK3-NEXT: br label [[IFCONT8:%.*]]
3868 // CHECK3: else7:
3869 // CHECK3-NEXT: br label [[IFCONT8]]
3870 // CHECK3: ifcont8:
3871 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB4]], i32 [[TMP2]])
3872 // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3873 // CHECK3-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP25]]
3874 // CHECK3-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
3875 // CHECK3: then10:
3876 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
3877 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i32 addrspace(3)* [[TMP26]] to i16 addrspace(3)*
3878 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP7]], i32 0, i32 1
3879 // CHECK3-NEXT: [[TMP29:%.*]] = load i8*, i8** [[TMP28]], align 4
3880 // CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8* [[TMP29]] to i16*
3881 // CHECK3-NEXT: [[TMP31:%.*]] = load volatile i16, i16 addrspace(3)* [[TMP27]], align 2
3882 // CHECK3-NEXT: store i16 [[TMP31]], i16* [[TMP30]], align 2
3883 // CHECK3-NEXT: br label [[IFCONT12:%.*]]
3884 // CHECK3: else11:
3885 // CHECK3-NEXT: br label [[IFCONT12]]
3886 // CHECK3: ifcont12:
3887 // CHECK3-NEXT: ret void
3888 //
3889 //
3890 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_copy_func17
3891 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3892 // CHECK3-NEXT: entry:
3893 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3894 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3895 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3896 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3897 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3898 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3899 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3900 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
3901 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3902 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
3903 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3904 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
3905 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3906 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
3907 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
3908 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* [[A]], i32 0, i32 [[TMP7]]
3909 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP10]], align 4
3910 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP11]], align 128
3911 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
3912 // CHECK3-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 4
3913 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
3914 // CHECK3-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
3915 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2048 x i16], [2048 x i16]* [[B]], i32 0, i32 [[TMP7]]
3916 // CHECK3-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP15]], align 2
3917 // CHECK3-NEXT: store i16 [[TMP17]], i16* [[TMP16]], align 128
3918 // CHECK3-NEXT: ret void
3919 //
3920 //
3921 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_list_to_global_reduce_func18
3922 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3923 // CHECK3-NEXT: entry:
3924 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3925 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3926 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3927 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3928 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3929 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3930 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3931 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3932 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
3933 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3934 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3935 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
3936 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* [[A]], i32 0, i32 [[TMP5]]
3937 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
3938 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
3939 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
3940 // CHECK3-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
3941 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2048 x i16], [2048 x i16]* [[B]], i32 0, i32 [[TMP5]]
3942 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
3943 // CHECK3-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 4
3944 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
3945 // CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3946 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP12]], i8* [[TMP13]]) #[[ATTR4]]
3947 // CHECK3-NEXT: ret void
3948 //
3949 //
3950 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_copy_func19
3951 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3952 // CHECK3-NEXT: entry:
3953 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3954 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3955 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3956 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3957 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3958 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3959 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
3960 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to [2 x i8*]*
3961 // CHECK3-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3962 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct._globalized_locals_ty.1*
3963 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3964 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 0
3965 // CHECK3-NEXT: [[TMP9:%.*]] = load i8*, i8** [[TMP8]], align 4
3966 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to i32*
3967 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 0
3968 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* [[A]], i32 0, i32 [[TMP7]]
3969 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 128
3970 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP10]], align 4
3971 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP4]], i32 0, i32 1
3972 // CHECK3-NEXT: [[TMP14:%.*]] = load i8*, i8** [[TMP13]], align 4
3973 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
3974 // CHECK3-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP6]], i32 0, i32 1
3975 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2048 x i16], [2048 x i16]* [[B]], i32 0, i32 [[TMP7]]
3976 // CHECK3-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP16]], align 128
3977 // CHECK3-NEXT: store i16 [[TMP17]], i16* [[TMP15]], align 2
3978 // CHECK3-NEXT: ret void
3979 //
3980 //
3981 // CHECK3-LABEL: define {{[^@]+}}@_omp_reduction_global_to_list_reduce_func20
3982 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]], i8* noundef [[TMP2:%.*]]) #[[ATTR3]] {
3983 // CHECK3-NEXT: entry:
3984 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
3985 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3986 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca i8*, align 4
3987 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
3988 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
3989 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
3990 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[DOTADDR2]], align 4
3991 // CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 4
3992 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to %struct._globalized_locals_ty.1*
3993 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTADDR1]], align 4
3994 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
3995 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1:%.*]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 0
3996 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* [[A]], i32 0, i32 [[TMP5]]
3997 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to i8*
3998 // CHECK3-NEXT: store i8* [[TMP8]], i8** [[TMP6]], align 4
3999 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
4000 // CHECK3-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY_1]], %struct._globalized_locals_ty.1* [[TMP4]], i32 0, i32 1
4001 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2048 x i16], [2048 x i16]* [[B]], i32 0, i32 [[TMP5]]
4002 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i16* [[TMP10]] to i8*
4003 // CHECK3-NEXT: store i8* [[TMP11]], i8** [[TMP9]], align 4
4004 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
4005 // CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[DOTADDR2]], align 4
4006 // CHECK3-NEXT: call void @"_omp$reduction$reduction_func14"(i8* [[TMP13]], i8* [[TMP12]]) #[[ATTR4]]
4007 // CHECK3-NEXT: ret void
4008 //
4009