1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 
5 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
10 
11 #ifndef HEADER
12 #define HEADER
13 
14 // CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 }
15 extern int n;
16 int a[10], b[10], c[10], d[10];
17 void foo();
18 
19 // CHECK-LABEL: @main()
20 int main() {
21   int i;
22 // CHECK: [[DIMS:%.+]] = alloca [1 x [[KMP_DIM]]],
23 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]])
24 // CHECK: icmp
25 // CHECK-NEXT: br i1 %
26 // CHECK: [[CAST:%.+]] = bitcast [1 x [[KMP_DIM]]]* [[DIMS]] to i8*
27 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false)
28 // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
29 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1
30 // CHECK: store i64 %{{.+}}, i64* %
31 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2
32 // CHECK: store i64 1, i64* %
33 // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
34 // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIM]] to i8*
35 // CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]])
36 // CHECK: call void @__kmpc_for_static_init_4(
37 #pragma omp for ordered(1)
38   for (i = 0; i < n; ++i) {
39     a[i] = b[i] + 1;
40     foo();
41 // CHECK: invoke void [[FOO:.+]](
42 // CHECK: load i32, i32* [[I:%.+]],
43 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
44 // CHECK-NEXT: sdiv i32 %{{.+}}, 1
45 // CHECK-NEXT: sext i32 %{{.+}} to i64
46 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0
47 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
48 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0
49 // CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]])
50 #pragma omp ordered depend(source)
51     c[i] = c[i] + 1;
52     foo();
53 // CHECK: invoke void [[FOO]]
54 // CHECK: load i32, i32* [[I]],
55 // CHECK-NEXT: sub nsw i32 %{{.+}}, 2
56 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
57 // CHECK-NEXT: sdiv i32 %{{.+}}, 1
58 // CHECK-NEXT: sext i32 %{{.+}} to i64
59 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0
60 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
61 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0
62 // CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]])
63 #pragma omp ordered depend(sink : i - 2)
64     d[i] = a[i - 2];
65   }
66   // CHECK: landingpad
67   // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
68   // CHECK: br label %
69 
70   // CHECK: call void @__kmpc_for_static_fini(
71   // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
72   // CHECK: ret i32 0
73   return 0;
74 }
75 
76 // CHECK: define {{.+}}TestStruct
77 template <typename T>
78 struct TestStruct {
79   static const int M = 10;
80   static const int N = 20;
81   T i;
82   T a[N][M];
83   T b[N][M];
84   T foo(T, T);
85   T bar(T, T, T);
86   void baz(T, T);
87   TestStruct() {
88 // CHECK: [[DIMS:%.+]] = alloca [2 x [[KMP_DIM]]],
89 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]])
90 // CHECK: [[CAST:%.+]] = bitcast [2 x [[KMP_DIM]]]* [[DIMS]] to i8*
91 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 48, i1 false)
92 // CHECK: [[DIM:%.+]] = getelementptr inbounds [2 x [[KMP_DIM]]], [2 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
93 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1
94 // CHECK: store i64 10, i64* %
95 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2
96 // CHECK: store i64 1, i64* %
97 // CHECK: [[DIM:%.+]] = getelementptr inbounds [2 x [[KMP_DIM]]], [2 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 1
98 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1
99 // CHECK: store i64 %{{.+}}, i64* %
100 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2
101 // CHECK: store i64 1, i64* %
102 // CHECK: [[DIM:%.+]] = getelementptr inbounds [2 x [[KMP_DIM]]], [2 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
103 // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIM]] to i8*
104 // CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 2, i8* [[CAST]])
105 // CHECK: call void @__kmpc_for_static_init_4(
106 #pragma omp for ordered(2)
107     for (T j = 0; j < M; j++)
108       for (i = 0; i < n; i += 2) {
109         a[i][j] = foo(i, j);
110 // CHECK: invoke {{.+TestStruct.+foo}}
111 // CHECK: load i32*, i32** %
112 // CHECK: load i32, i32* %
113 // CHECK: load i32, i32* %
114 // CHECK: load i32, i32* [[J:%.+]],
115 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
116 // CHECK-NEXT: sdiv i32 %{{.+}}, 1
117 // CHECK-NEXT: sext i32 %{{.+}} to i64
118 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT:%.+]], i64 0, i64 0
119 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]],
120 // CHECK-NEXT: [[I:%.+]] = load i32*, i32** [[I_REF:%.+]],
121 // CHECK-NEXT: load i32, i32* [[I]],
122 // CHECK-NEXT: sub nsw i32 %{{.+}}, 2
123 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
124 // CHECK-NEXT: sdiv i32 %{{.+}}, 2
125 // CHECK-NEXT: sext i32 %{{.+}} to i64
126 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 1
127 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
128 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 0
129 // CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]])
130 // CHECK-NEXT: load i32, i32* [[J:%.+]],
131 // CHECK-NEXT: sub nsw i32 %{{.+}}, 1
132 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
133 // CHECK-NEXT: sdiv i32 %{{.+}}, 1
134 // CHECK-NEXT: sext i32 %{{.+}} to i64
135 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT:%.+]], i64 0, i64 0
136 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]],
137 // CHECK-NEXT: [[I:%.+]] = load i32*, i32** [[I_REF]],
138 // CHECK-NEXT: load i32, i32* [[I]],
139 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
140 // CHECK-NEXT: sdiv i32 %{{.+}}, 2
141 // CHECK-NEXT: sext i32 %{{.+}} to i64
142 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 1
143 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
144 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 0
145 // CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]])
146 #pragma omp ordered depend(sink : j, i - 2) depend(sink : j - 1, i)
147         b[i][j] = bar(a[i][j], b[i - 1][j], b[i][j - 1]);
148 // CHECK: invoke {{.+TestStruct.+bar}}
149 // CHECK: load i32*, i32** %
150 // CHECK: load i32, i32* %
151 // CHECK: load i32, i32* %
152 // CHECK: load i32, i32* [[J]],
153 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
154 // CHECK-NEXT: sdiv i32 %{{.+}}, 1
155 // CHECK-NEXT: sext i32 %{{.+}} to i64
156 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT:%.+]], i64 0, i64 0
157 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]],
158 // CHECK-NEXT: [[I:%.+]] = load i32*, i32** [[I_REF]],
159 // CHECK-NEXT: load i32, i32* [[I]],
160 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0
161 // CHECK-NEXT: sdiv i32 %{{.+}}, 2
162 // CHECK-NEXT: sext i32 %{{.+}} to i64
163 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 1
164 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
165 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[CNT]], i64 0, i64 0
166 // CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]])
167 #pragma omp ordered depend(source)
168         baz(a[i][j], b[i][j]);
169       }
170   }
171   // CHECK: landingpad
172   // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
173   // CHECK: br label %
174 
175   // CHECK: call void @__kmpc_for_static_fini(
176   // CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
177   // CHECK: ret
178 };
179 
180 TestStruct<int> s;
181 #endif // HEADER
182