1 // Test host codegen only. 2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 6 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 7 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 8 9 // expected-no-diagnostics 10 #ifndef HEADER 11 #define HEADER 12 13 // CHECK: [[ANON_T:%.+]] = type { i32*, i32* } 14 // CHECK-DAG: [[SIZES_TEMPLATE:@.+]] = private {{.+}} constant [5 x i[[PTRSZ:32|64]]] [i{{32|64}} 4, i{{32|64}} 4, i{{32|64}} {{8|16}}, i{{32|64}} 0, i{{32|64}} 0] 15 // CHECK-DAG: [[TYPES_TEMPLATE:@.+]] = private {{.+}} constant [5 x i64] [i64 800, i64 800, i64 673, i64 844424930132752, i64 844424930132752] 16 // CHECK-DAG: [[SIZES:@.+]] = private {{.+}} constant [3 x i[[PTRSZ:32|64]]] [i{{32|64}} {{8|16}}, i{{32|64}} 0, i{{32|64}} 0] 17 // CHECK-DAG: [[TYPES:@.+]] = private {{.+}} constant [3 x i64] [i64 673, i64 281474976711440, i64 281474976711440] 18 19 template <typename F> 20 void omp_loop(int start, int end, F body) { 21 #pragma omp target teams distribute parallel for 22 for (int i = start; i < end; ++i) { 23 body(i); 24 } 25 } 26 27 // CHECK: define {{.*}}[[MAIN:@.+]]( 28 int main() 29 { 30 int* p = new int[100]; 31 int* q = new int[100]; 32 auto body = [=](int i){ 33 p[i] = q[i]; 34 }; 35 36 #pragma omp target teams distribute parallel for 37 for (int i = 0; i < 100; ++i) { 38 body(i); 39 } 40 41 // CHECK: [[BASE_PTRS:%.+]] = alloca [3 x i8*]{{.+}} 42 // CHECK: [[PTRS:%.+]] = alloca [3 x i8*]{{.+}} 43 44 // First gep of pointers inside lambdas to store the values across function call need to be ignored 45 // CHECK: {{%.+}} = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0 46 // CHECK: {{%.+}} = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1 47 48 // access of pointers inside lambdas 49 // CHECK: [[BASE_PTR1:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0 50 // CHECK: [[PTR1:%.+]] = load i32*, i32** [[BASE_PTR1]] 51 // CHECK: [[BASE_PTR2:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1 52 // CHECK: [[PTR2:%.+]] = load i32*, i32** [[BASE_PTR2]] 53 54 // storage of pointers in baseptrs and ptrs arrays 55 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 0 56 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]** 57 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}} 58 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 0 59 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]** 60 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}} 61 62 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 1 63 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32*** 64 // CHECK: store i32** [[BASE_PTR1]], i32*** [[CAST_PTR1]]{{.+}} 65 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 1 66 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32** 67 // CHECK: store i32* [[PTR1]], i32** [[CAST_PTR1]]{{.+}} 68 69 70 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 2 71 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32*** 72 // CHECK: store i32** [[BASE_PTR2]], i32*** [[CAST_PTR2]]{{.+}} 73 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 2 74 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32** 75 // CHECK: store i32* [[PTR2]], i32** [[CAST_PTR2]]{{.+}} 76 77 78 // actual target invocation 79 // CHECK: [[BASES_GEP:%.+]] = getelementptr {{.+}} [3 x {{.+}}*], [3 x {{.+}}*]* [[BASE_PTRS]], {{.+}} 0, {{.+}} 0 80 // CHECK: [[PTRS_GEP:%.+]] = getelementptr {{.+}} [3 x {{.+}}*], [3 x {{.+}}*]* [[PTRS]], {{.+}} 0, {{.+}} 0 81 // CHECK: {{%.+}} = call{{.+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, {{.+}}, {{.+}}, {{.+}}, i8** [[BASES_GEP]], i8** [[PTRS_GEP]], i[[PTRSZ]]* getelementptr inbounds ([3 x i{{.+}}], [3 x i{{.+}}]* [[SIZES]], i{{.+}} 0, i{{.+}} 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[TYPES]], i{{.+}} 0, i{{.+}} 0), i8** null, i8** null, {{.+}}, {{.+}}) 82 83 84 omp_loop(0,100,body); 85 } 86 87 // CHECK: [[BASE_PTRS:%.+]] = alloca [5 x i8*]{{.+}} 88 // CHECK: [[PTRS:%.+]] = alloca [5 x i8*]{{.+}} 89 90 // access of pointers inside lambdas 91 // CHECK: [[BASE_PTR1:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 0 92 // CHECK: [[PTR1:%.+]] = load i32*, i32** [[BASE_PTR1]] 93 // CHECK: [[BASE_PTR2:%.+]] = getelementptr inbounds [[ANON_T]], [[ANON_T]]* %{{.+}}, i{{.+}} 0, i{{.+}} 1 94 // CHECK: [[PTR2:%.+]] = load i32*, i32** [[BASE_PTR2]] 95 96 // storage of pointers in baseptrs and ptrs arrays 97 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 2 98 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]** 99 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}} 100 // CHECK: [[LOC_LAMBDA:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 2 101 // CHECK: [[CAST_LAMBDA:%.+]] = bitcast i8** [[LOC_LAMBDA]] to [[ANON_T]]** 102 // CHECK: store [[ANON_T]]* %{{.+}}, [[ANON_T]]** [[CAST_LAMBDA]]{{.+}} 103 104 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 3 105 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32*** 106 // CHECK: store i32** [[BASE_PTR1]], i32*** [[CAST_PTR1]]{{.+}} 107 // CHECK: [[LOC_PTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 3 108 // CHECK: [[CAST_PTR1:%.+]] = bitcast i8** [[LOC_PTR1]] to i32** 109 // CHECK: store i32* [[PTR1]], i32** [[CAST_PTR1]]{{.+}} 110 111 112 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS]], i{{.+}} 0, i{{.+}} 4 113 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32*** 114 // CHECK: store i32** [[BASE_PTR2]], i32*** [[CAST_PTR2]]{{.+}} 115 // CHECK: [[LOC_PTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i{{.+}} 0, i{{.+}} 4 116 // CHECK: [[CAST_PTR2:%.+]] = bitcast i8** [[LOC_PTR2]] to i32** 117 // CHECK: store i32* [[PTR2]], i32** [[CAST_PTR2]]{{.+}} 118 119 120 // actual target invocation 121 // CHECK: [[BASES_GEP:%.+]] = getelementptr {{.+}} [5 x {{.+}}*], [5 x {{.+}}*]* [[BASE_PTRS]], {{.+}} 0, {{.+}} 0 122 // CHECK: [[PTRS_GEP:%.+]] = getelementptr {{.+}} [5 x {{.+}}*], [5 x {{.+}}*]* [[PTRS]], {{.+}} 0, {{.+}} 0 123 // CHECK: {{%.+}} = call{{.+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, {{.+}}, {{.+}}, {{.+}}, i8** [[BASES_GEP]], i8** [[PTRS_GEP]], i[[PTRSZ]]* getelementptr inbounds ([5 x i{{.+}}], [5 x i{{.+}}]* [[SIZES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[TYPES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i8** null, i8** null, {{.+}}, {{.+}}) 124 125 #endif 126