1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 
5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12 
13 typedef void **omp_allocator_handle_t;
14 extern const omp_allocator_handle_t omp_default_mem_alloc;
15 extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
16 extern const omp_allocator_handle_t omp_const_mem_alloc;
17 extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
18 extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
19 extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
20 extern const omp_allocator_handle_t omp_pteam_mem_alloc;
21 extern const omp_allocator_handle_t omp_thread_mem_alloc;
22 
23 // CHECK: [[PRIVATES:%.+]] = type { i8*, i8* }
24 
25 struct S {
26   int a;
27   S() : a(0) {}
28   S(const S&) {}
29   S& operator=(const S&) {return *this;}
30   ~S() {}
31   friend S operator+(const S&a, const S&b) {return a;}
32 };
33 
34 
35 int main(int argc, char **argv) {
36   int a;
37   float b;
38   S c[5];
39   short d[argc];
40 #pragma omp taskgroup task_reduction(+: a, b, argc)
41   {
42 #pragma omp taskgroup task_reduction(-:c, d)
43 #pragma omp parallel
44 #pragma omp task in_reduction(+:a) in_reduction(-:d) allocate(omp_high_bw_mem_alloc: d)
45     a += d[a];
46   }
47   return 0;
48 }
49 
50 // CHECK-LABEL: @main
51 // CHECK:       void @__kmpc_taskgroup(%struct.ident_t* @0, i32 [[GTID:%.+]])
52 // CHECK:       [[TD1:%.+]] = call i8* @__kmpc_task_reduction_init(i32 [[GTID]], i32 3, i8* %
53 // CHECK-NEXT:  store i8* [[TD1]], i8** [[TD1_ADDR:%[^,]+]],
54 // CHECK-NEXT:  call void @__kmpc_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
55 // CHECK:       [[TD2:%.+]] = call i8* @__kmpc_task_reduction_init(i32 [[GTID]], i32 2, i8* %
56 // CHECK-NEXT:  store i8* [[TD2]], i8** [[TD2_ADDR:%[^,]+]],
57 // CHECK-NEXT:  call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @0, i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* [[OMP_PARALLEL:@.+]] to void (i32*, i32*, ...)*), i32* %{{.+}}, i64 %{{.+}}, i16* %{{.+}}, i8** [[TD1_ADDR]], i8** [[TD2_ADDR]])
58 // CHECK-NEXT:  call void @__kmpc_end_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
59 // CHECK-NEXT:  call void @__kmpc_end_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
60 
61 // CHECK:       define internal void [[OMP_PARALLEL]](
62 // CHECK:       [[TASK_T:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @0, i32 [[GTID:%.+]], i32 1, i64 56, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, [[T:%.+]]*)* [[OMP_TASK:@.+]] to i32 (i32, i8*)*))
63 // CHECK-NEXT:  [[TASK_T_WITH_PRIVS:%.+]] = bitcast i8* [[TASK_T]] to [[T]]*
64 // CHECK:       [[PRIVS:%.+]] = getelementptr inbounds [[T]], [[T]]* [[TASK_T_WITH_PRIVS]], i32 0, i32 1
65 // CHECK:       [[TD1_REF:%.+]] = getelementptr inbounds [[PRIVATES]], [[PRIVATES]]* [[PRIVS]], i32 0, i32 0
66 // CHECK-NEXT:  [[TD1_SHAR:%.+]] = getelementptr inbounds %
67 // CHECK-NEXT:  [[TD1_ADDR:%.+]] = load i8**, i8*** [[TD1_SHAR]],
68 // CHECK-NEXT:  [[TD1:%.+]] = load i8*, i8** [[TD1_ADDR]],
69 // CHECK-NEXT:  store i8* [[TD1]], i8** [[TD1_REF]],
70 // CHECK-NEXT:  [[TD2_REF:%.+]] = getelementptr inbounds [[PRIVATES]], [[PRIVATES]]* [[PRIVS]], i32 0, i32 1
71 // CHECK-NEXT:  [[TD2_SHAR:%.+]] = getelementptr inbounds %
72 // CHECK-NEXT:  [[TD2_ADDR:%.+]] = load i8**, i8*** [[TD2_SHAR]],
73 // CHECK-NEXT:  [[TD2:%.+]] = load i8*, i8** [[TD2_ADDR]],
74 // CHECK-NEXT:  store i8* [[TD2]], i8** [[TD2_REF]],
75 // CHECK-NEXT:  call i32 @__kmpc_omp_task(%struct.ident_t* @0, i32 [[GTID]], i8* [[TASK_T]])
76 // CHECK-NEXT:  ret void
77 // CHECK-NEXT:  }
78 
79 // CHECK:       define internal {{.*}} [[OMP_TASK]](
80 // CHECK:       call void (i8*, ...) %{{[^(]+}}(i8* %{{.+}}, i8*** [[TD1_REF:%[^,]+]], i8*** [[TD2_REF:%[^,]+]])
81 // CHECK-NEXT:  [[TD1_ADDR:%.+]] = load i8**, i8*** [[TD1_REF]],
82 // CHECK-NEXT:  [[TD2_ADDR:%.+]] = load i8**, i8*** [[TD2_REF]],
83 // CHECK-NEXT:  [[A_REF:%.+]] = getelementptr inbounds %
84 // CHECK-NEXT:  [[A_ADDR:%.+]] = load i32*, i32** [[A_REF]],
85 // CHECK-NEXT:  [[TD1:%.+]] = load i8*, i8** [[TD1_ADDR]],
86 // CHECK-NEXT:  [[GTID:%.+]] = load i32, i32* %
87 // CHECK-NEXT:  [[A_PTR:%.+]] = bitcast i32* [[A_ADDR]] to i8*
88 // CHECK-NEXT:  call i8* @__kmpc_task_reduction_get_th_data(i32 [[GTID]], i8* [[TD1]], i8* [[A_PTR]])
89 // CHECK:       [[D_REF:%.+]] = getelementptr inbounds %
90 // CHECK-NEXT:  [[D_ADDR:%.+]] = load i16*, i16** [[D_REF]],
91 // CHECK:       call i8* @__kmpc_threadprivate_cached(
92 // CHECK:       [[TD2:%.+]] = load i8*, i8** [[TD2_ADDR]],
93 // CHECK-NEXT:  [[D_PTR:%.+]] = bitcast i16* [[D_ADDR]] to i8*
94 // CHECK-NEXT:  call i8* @__kmpc_task_reduction_get_th_data(i32 [[GTID]], i8* [[TD2]], i8* [[D_PTR]])
95 // CHECK:       add nsw i32
96 // CHECK:       store i32 %
97 // CHECK-NOT:   call i8* @__kmpc_threadprivate_cached(
98 #endif
99