1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 
5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12 
13 typedef void **omp_allocator_handle_t;
14 extern const omp_allocator_handle_t omp_default_mem_alloc;
15 extern const omp_allocator_handle_t omp_large_cap_mem_alloc;
16 extern const omp_allocator_handle_t omp_const_mem_alloc;
17 extern const omp_allocator_handle_t omp_high_bw_mem_alloc;
18 extern const omp_allocator_handle_t omp_low_lat_mem_alloc;
19 extern const omp_allocator_handle_t omp_cgroup_mem_alloc;
20 extern const omp_allocator_handle_t omp_pteam_mem_alloc;
21 extern const omp_allocator_handle_t omp_thread_mem_alloc;
22 
23 // CHECK: [[PRIVATES:%.+]] = type { i8*, i8* }
24 
25 struct S {
26   int a;
27   S() : a(0) {}
28   S(const S&) {}
29   S& operator=(const S&) {return *this;}
30   ~S() {}
31   friend S operator+(const S&a, const S&b) {return a;}
32 };
33 
34 
35 int main(int argc, char **argv) {
36   int a;
37   float b;
38   S c[5];
39   short d[argc];
40 #pragma omp taskgroup task_reduction(+: a, b, argc)
41   {
42 #pragma omp taskgroup task_reduction(-:c, d)
43 #pragma omp parallel
44 #pragma omp task in_reduction(+:a) in_reduction(-:d) allocate(omp_high_bw_mem_alloc: d)
45     a += d[a];
46   }
47 #pragma omp task in_reduction(+:a)
48   ++a;
49   return 0;
50 }
51 
52 // CHECK-LABEL: @main
53 // CHECK:       void @__kmpc_taskgroup(%struct.ident_t* @0, i32 [[GTID:%.+]])
54 // CHECK:       [[TD1:%.+]] = call i8* @__kmpc_task_reduction_init(i32 [[GTID]], i32 3, i8* %
55 // CHECK-NEXT:  store i8* [[TD1]], i8** [[TD1_ADDR:%[^,]+]],
56 // CHECK-NEXT:  call void @__kmpc_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
57 // CHECK:       [[TD2:%.+]] = call i8* @__kmpc_task_reduction_init(i32 [[GTID]], i32 2, i8* %
58 // CHECK-NEXT:  store i8* [[TD2]], i8** [[TD2_ADDR:%[^,]+]],
59 // CHECK-NEXT:  call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @0, i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* [[OMP_PARALLEL:@.+]] to void (i32*, i32*, ...)*), i32* %{{.+}}, i64 %{{.+}}, i16* %{{.+}}, i8** [[TD1_ADDR]], i8** [[TD2_ADDR]])
60 // CHECK-NEXT:  call void @__kmpc_end_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
61 // CHECK-NEXT:  call void @__kmpc_end_taskgroup(%struct.ident_t* @0, i32 [[GTID]])
62 
63 // CHECK:       define internal void [[OMP_PARALLEL]](
64 // CHECK:       [[TASK_T:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @0, i32 [[GTID:%.+]], i32 1, i64 56, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, [[T:%.+]]*)* [[OMP_TASK:@.+]] to i32 (i32, i8*)*))
65 // CHECK-NEXT:  [[TASK_T_WITH_PRIVS:%.+]] = bitcast i8* [[TASK_T]] to [[T]]*
66 // CHECK:       [[PRIVS:%.+]] = getelementptr inbounds [[T]], [[T]]* [[TASK_T_WITH_PRIVS]], i32 0, i32 1
67 // CHECK:       [[TD1_REF:%.+]] = getelementptr inbounds [[PRIVATES]], [[PRIVATES]]* [[PRIVS]], i32 0, i32 0
68 // CHECK-NEXT:  [[TD1:%.+]] = load i8*, i8** %{{.+}},
69 // CHECK-NEXT:  store i8* [[TD1]], i8** [[TD1_REF]],
70 // CHECK-NEXT:  [[TD2_REF:%.+]] = getelementptr inbounds [[PRIVATES]], [[PRIVATES]]* [[PRIVS]], i32 0, i32 1
71 // CHECK-NEXT:  [[TD2:%.+]] = load i8*, i8** %{{.+}},
72 // CHECK-NEXT:  store i8* [[TD2]], i8** [[TD2_REF]],
73 // CHECK-NEXT:  call i32 @__kmpc_omp_task(%struct.ident_t* @0, i32 [[GTID]], i8* [[TASK_T]])
74 // CHECK-NEXT:  ret void
75 // CHECK-NEXT:  }
76 
77 // CHECK:       define internal {{.*}} [[OMP_TASK]](
78 // CHECK:       call void (i8*, ...) %{{[^(]+}}(i8* %{{.+}}, i8*** [[TD1_REF:%[^,]+]], i8*** [[TD2_REF:%[^,]+]])
79 // CHECK-NEXT:  [[TD1_ADDR:%.+]] = load i8**, i8*** [[TD1_REF]],
80 // CHECK-NEXT:  [[TD2_ADDR:%.+]] = load i8**, i8*** [[TD2_REF]],
81 // CHECK-NEXT:  [[A_REF:%.+]] = getelementptr inbounds %
82 // CHECK-NEXT:  [[A_ADDR:%.+]] = load i32*, i32** [[A_REF]],
83 // CHECK-NEXT:  [[TD1:%.+]] = load i8*, i8** [[TD1_ADDR]],
84 // CHECK-NEXT:  [[GTID:%.+]] = load i32, i32* %
85 // CHECK-NEXT:  [[A_PTR:%.+]] = bitcast i32* [[A_ADDR]] to i8*
86 // CHECK-NEXT:  call i8* @__kmpc_task_reduction_get_th_data(i32 [[GTID]], i8* [[TD1]], i8* [[A_PTR]])
87 // CHECK:       [[D_REF:%.+]] = getelementptr inbounds %
88 // CHECK-NEXT:  [[D_ADDR:%.+]] = load i16*, i16** [[D_REF]],
89 // CHECK:       call i8* @__kmpc_threadprivate_cached(
90 // CHECK:       [[TD2:%.+]] = load i8*, i8** [[TD2_ADDR]],
91 // CHECK-NEXT:  [[D_PTR:%.+]] = bitcast i16* [[D_ADDR]] to i8*
92 // CHECK-NEXT:  call i8* @__kmpc_task_reduction_get_th_data(i32 [[GTID]], i8* [[TD2]], i8* [[D_PTR]])
93 // CHECK:       add nsw i32
94 // CHECK:       store i32 %
95 // CHECK-NOT:   call i8* @__kmpc_threadprivate_cached(
96 
97 // CHECK: [[A_PTR:%.+]] = call i8* @__kmpc_task_reduction_get_th_data(i32 %{{.+}}, i8* null, i8* %{{.+}})
98 // CHECK-NEXT: [[A_ADDR:%.+]] = bitcast i8* [[A_PTR]] to i32*
99 // CHECK-NEXT: [[A:%.+]] = load i32, i32* [[A_ADDR]],
100 // CHECK-NEXT: [[NEW:%.+]] = add nsw i32 [[A]], 1
101 // CHECK-NEXT: store i32 [[NEW]], i32* [[A_ADDR]],
102 #endif
103