1 // Test host codegen only.
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
6 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
8 
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12 
13 // CHECK: [[ANON_T:%.+]] = type { i32*, i32* }
14 // CHECK-DAG: [[SIZES_TEMPLATE:@.+]] = private {{.+}} constant [5 x i[[PTRSZ:32|64]]] [i{{32|64}} 4, i{{32|64}} 4, i{{32|64}} {{8|16}}, i{{32|64}} 0, i{{32|64}} 0]
15 // CHECK-DAG: [[TYPES_TEMPLATE:@.+]] = private {{.+}} constant [5 x i64] [i64 800, i64 800, i64 673, i64 844424930132752, i64 844424930132752]
16 // CHECK-DAG: [[SIZES:@.+]] = private {{.+}} constant [3 x i[[PTRSZ:32|64]]] [i{{32|64}} {{8|16}}, i{{32|64}} 0, i{{32|64}} 0]
17 // CHECK-DAG: [[TYPES:@.+]] = private {{.+}} constant [3 x i64] [i64 673, i64 281474976711440, i64 281474976711440]
18 // CHECK-DAG: [[TYPES3:@.+]] = private {{.+}} constant [3 x i64] [i64 545, i64 281474976711440, i64 800]
19 // CHECK-DAG: [[TYPES11:@.+]] = private {{.+}} constant [5 x i64] [i64 800, i64 800, i64 549, i64 844424930132752, i64 844424930132752]
20 // CHECK-DAG: [[TYPES13:@.+]] = private {{.+}} constant [2 x i64] [i64 545, i64 281474976711440]
21 // CHECK-DAG: [[TYPES15:@.+]] = private {{.+}} constant [2 x i64] [i64 673, i64 281474976711440]
22 
23 template <typename F>
24 void omp_loop(int start, int end, F body) {
25 #pragma omp target teams distribute parallel for
26   for (int i = start; i < end; ++i) {
27     body(i);
28   }
29 }
30 
31 template <typename F>
32 void omp_loop_ref(int start, int end, F& body) {
33 #pragma omp target teams distribute parallel for map(always, to: body)
34   for (int i = start; i < end; ++i) {
35     body(i);
36   }
37   int *p;
38   const auto &body_ref = [=](int i) {p[i]=0;};
39   #pragma omp target map(to: body_ref)
40   body_ref(10);
41   #pragma omp target
42   body_ref(10);
43 }
44 
45 template <class FTy>
46 struct C {
47   static void xoo(const FTy& f) {
48     int x = 10;
49     #pragma omp target map(to:f)
50       f(x);
51   }
52 };
53 
54 template <class FTy>
55 void zoo(const FTy &functor) {
56   C<FTy>::xoo(functor);
57 }
58 
59 // CHECK: define {{.*}}[[MAIN:@.+]](
60 int main()
61 {
62   int* p = new int[100];
63   int* q = new int[100];
64   auto body = [=](int i){
65     p[i] = q[i];
66   };
67   zoo([=](int i){p[i] = 0;});
68 
69 #pragma omp target teams distribute parallel for
70   for (int i = 0; i < 100; ++i) {
71     body(i);
72   }
73 
74 // CHECK: [[BASE_PTRS:%.+]] = alloca [3 x i8*]{{.+}}
75 // CHECK: [[PTRS:%.+]] = alloca [3 x i8*]{{.+}}
76 
77 // First gep of pointers inside lambdas to store the values across function call need to be ignored
78 // CHECK: {{%.+}} = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0
79 // CHECK: {{%.+}} = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1
80 
81 // access of pointers inside lambdas
82 // CHECK: [[BASE_PTR1:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0
83 // CHECK: [[PTR1:%.+]] = load i32*, i32** [[BASE_PTR1]]
84 // CHECK: [[BASE_PTR2:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1
85 // CHECK: [[PTR2:%.+]] = load i32*, i32** [[BASE_PTR2]]
86 
87 // storage of pointers in baseptrs and ptrs arrays
88 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 0
89 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]**
90 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}}
91 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 0
92 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]**
93 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}}
94 
95 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 1
96 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32***
97 // CHECK: store i32** [[BASE_PTR1]], i32*** [[CAST_PTR1]]{{.+}}
98 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 1
99 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32**
100 // CHECK: store i32* [[PTR1]], i32** [[CAST_PTR1]]{{.+}}
101 
102 
103 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 2
104 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32***
105 // CHECK: store i32** [[BASE_PTR2]], i32*** [[CAST_PTR2]]{{.+}}
106 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 2
107 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32**
108 // CHECK: store i32* [[PTR2]], i32** [[CAST_PTR2]]{{.+}}
109 
110 
111 // actual target invocation
112 // CHECK: [[BASES_GEP:%.+]] = getelementptr {{.+}} [3 x {{.+}}*], [3 x {{.+}}*]* [[BASE_PTRS]], {{.+}} 0, {{.+}} 0
113 // CHECK: [[PTRS_GEP:%.+]] = getelementptr {{.+}} [3 x {{.+}}*], [3 x {{.+}}*]* [[PTRS]], {{.+}} 0, {{.+}} 0
114 // CHECK: {{%.+}} = call{{.+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, {{.+}}, {{.+}}, {{.+}}, i8** [[BASES_GEP]], i8** [[PTRS_GEP]], i[[PTRSZ]]* getelementptr inbounds ([3 x i{{.+}}], [3 x i{{.+}}]* [[SIZES]], i{{.+}} 0, i{{.+}} 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[TYPES]], i{{.+}} 0, i{{.+}} 0), i8** null, i8** null, {{.+}}, {{.+}})
115 
116 
117   omp_loop(0,100,body);
118   omp_loop_ref(0,100,body);
119 }
120 
121 // CHECK: [[BASE_PTRS:%.+]] = alloca [5 x i8*]{{.+}}
122 // CHECK: [[PTRS:%.+]] = alloca [5 x i8*]{{.+}}
123 
124 // access of pointers inside lambdas
125 // CHECK: [[BASE_PTR1:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0
126 // CHECK: [[PTR1:%.+]] = load i32*, i32** [[BASE_PTR1]]
127 // CHECK: [[BASE_PTR2:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1
128 // CHECK: [[PTR2:%.+]] = load i32*, i32** [[BASE_PTR2]]
129 
130 // storage of pointers in baseptrs and ptrs arrays
131 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 2
132 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]**
133 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}}
134 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 2
135 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]**
136 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}}
137 
138 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 3
139 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32***
140 // CHECK: store i32** [[BASE_PTR1]], i32*** [[CAST_PTR1]]{{.+}}
141 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 3
142 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32**
143 // CHECK: store i32* [[PTR1]], i32** [[CAST_PTR1]]{{.+}}
144 
145 
146 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 4
147 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32***
148 // CHECK: store i32** [[BASE_PTR2]], i32*** [[CAST_PTR2]]{{.+}}
149 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 4
150 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32**
151 // CHECK: store i32* [[PTR2]], i32** [[CAST_PTR2]]{{.+}}
152 
153 
154 // actual target invocation
155 // CHECK: [[BASES_GEP:%.+]] = getelementptr {{.+}} [5 x {{.+}}*], [5 x {{.+}}*]* [[BASE_PTRS]], {{.+}} 0, {{.+}} 0
156 // CHECK: [[PTRS_GEP:%.+]] = getelementptr {{.+}} [5 x {{.+}}*], [5 x {{.+}}*]* [[PTRS]], {{.+}} 0, {{.+}} 0
157 // CHECK: {{%.+}} = call{{.+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, {{.+}}, {{.+}}, {{.+}}, i8** [[BASES_GEP]], i8** [[PTRS_GEP]], i[[PTRSZ]]* getelementptr inbounds ([5 x i{{.+}}], [5 x i{{.+}}]* [[SIZES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[TYPES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i8** null, i8** null, {{.+}}, {{.+}})
158 
159 // CHECK: define internal void @{{.+}}omp_loop_ref{{.+}}(
160 // CHECK: [[BODY:%body.addr]] = alloca %class.anon*
161 // CHECK: [[TMP:%tmp]] = alloca %class.anon*
162 // CHECK: [[BODY_REF:%body_ref]] = alloca %class.anon.1*
163 // CHECK: [[REF_TMP:%ref.tmp]] = alloca %class.anon.1
164 // CHECK: [[TMP8:%tmp.+]] = alloca %class.anon.1*
165 // CHECK: [[L0:%.+]] = load %class.anon*, %class.anon** [[BODY]]
166 // CHECK: store %class.anon* [[L0]], %class.anon** [[TMP]]
167 // CHECK: [[L5:%.+]] = load %class.anon*, %class.anon** [[TMP]]
168 // CHECK-NOT [[L6:%.+]] = load %class.anon*, %class.anon** [[TMP]]
169 // CHECK-NOT [[L7:%.+]] = load %class.anon*, %class.anon** [[TMP]]
170 // CHECK: store %class.anon.1* [[REF_TMP]], %class.anon.1** [[BODY_REF]]
171 // CHECK:[[L47:%.+]] =  load %class.anon.1*, %class.anon.1** [[BODY_REF]]
172 // CHECK: store %class.anon.1* [[L47]], %class.anon.1** [[TMP8]]
173 // CHECK: [[L48:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
174 // CHECK-NOT: [[L49:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
175 // CHECK-NOT: [[L50:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]]
176 // CHECK: ret void
177 
178 // CHECK: define internal void @{{.+}}xoo{{.+}}(
179 // CHECK: [[FADDR:%f.addr]] = alloca %class.anon.0*
180 // CHECK: [[L0:%.+]] = load %class.anon.0*, %class.anon.0** [[FADDR]]
181 // CHECK: store %class.anon.0* [[L0]], %class.anon.0** [[TMP:%tmp]]
182 // CHECK: [[L1:%.+]] = load %class.anon.0*, %class.anon.0** [[TMP]]
183 // CHECK-NOT: %4 = load %class.anon.0*, %class.anon.0** [[TMP]]
184 // CHECK-NOT: %5 = load %class.anon.0*, %class.anon.0** [[TMP]]
185 // CHECK: [[L4:%.+]] = getelementptr inbounds %class.anon.0, %class.anon.0* [[L1]], i32 0, i32 0
186 // CHECK: [[L5:%.+]] = load i{{.*}}*, i{{.*}}** [[L4]]
187 // CHECK: ret void
188 
189 #endif
190