1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Synchronization.h"
14 
15 #include "Debug.h"
16 #include "Interface.h"
17 #include "Mapping.h"
18 #include "State.h"
19 #include "Types.h"
20 #include "Utils.h"
21 
22 #pragma omp declare target
23 
24 using namespace _OMP;
25 
26 namespace impl {
27 
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering);
33 
34 uint32_t atomicLoad(uint32_t *Address, int Ordering) {
35   return __atomic_fetch_add(Address, 0U, __ATOMIC_SEQ_CST);
36 }
37 
38 void atomicStore(uint32_t *Address, uint32_t Val, int Ordering) {
39   __atomic_store_n(Address, Val, Ordering);
40 }
41 
42 uint32_t atomicAdd(uint32_t *Address, uint32_t Val, int Ordering) {
43   return __atomic_fetch_add(Address, Val, Ordering);
44 }
45 uint32_t atomicMax(uint32_t *Address, uint32_t Val, int Ordering) {
46   return __atomic_fetch_max(Address, Val, Ordering);
47 }
48 
49 uint32_t atomicExchange(uint32_t *Address, uint32_t Val, int Ordering) {
50   uint32_t R;
51   __atomic_exchange(Address, &Val, &R, Ordering);
52   return R;
53 }
54 uint32_t atomicCAS(uint32_t *Address, uint32_t Compare, uint32_t Val,
55                    int Ordering) {
56   (void)__atomic_compare_exchange(Address, &Compare, &Val, false, Ordering,
57                                   Ordering);
58   return Compare;
59 }
60 
61 uint64_t atomicAdd(uint64_t *Address, uint64_t Val, int Ordering) {
62   return __atomic_fetch_add(Address, Val, Ordering);
63 }
64 ///}
65 
66 /// AMDGCN Implementation
67 ///
68 ///{
69 #pragma omp begin declare variant match(device = {arch(amdgcn)})
70 
71 uint32_t atomicInc(uint32_t *A, uint32_t V, int Ordering) {
72   // builtin_amdgcn_atomic_inc32 should expand to this switch when
73   // passed a runtime value, but does not do so yet. Workaround here.
74   switch (Ordering) {
75   default:
76     __builtin_unreachable();
77   case __ATOMIC_RELAXED:
78     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_RELAXED, "");
79   case __ATOMIC_ACQUIRE:
80     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_ACQUIRE, "");
81   case __ATOMIC_RELEASE:
82     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_RELEASE, "");
83   case __ATOMIC_ACQ_REL:
84     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_ACQ_REL, "");
85   case __ATOMIC_SEQ_CST:
86     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_SEQ_CST, "");
87   }
88 }
89 
90 uint32_t SHARED(namedBarrierTracker);
91 
92 void namedBarrierInit() {
93   // Don't have global ctors, and shared memory is not zero init
94   atomic::store(&namedBarrierTracker, 0u, __ATOMIC_RELEASE);
95 }
96 
97 void namedBarrier() {
98   uint32_t NumThreads = omp_get_num_threads();
99   // assert(NumThreads % 32 == 0);
100 
101   uint32_t WarpSize = mapping::getWarpSize();
102   uint32_t NumWaves = NumThreads / WarpSize;
103 
104   fence::team(__ATOMIC_ACQUIRE);
105 
106   // named barrier implementation for amdgcn.
107   // Uses two 16 bit unsigned counters. One for the number of waves to have
108   // reached the barrier, and one to count how many times the barrier has been
109   // passed. These are packed in a single atomically accessed 32 bit integer.
110   // Low bits for the number of waves, assumed zero before this call.
111   // High bits to count the number of times the barrier has been passed.
112 
113   // precondition: NumWaves != 0;
114   // invariant: NumWaves * WarpSize == NumThreads;
115   // precondition: NumWaves < 0xffffu;
116 
117   // Increment the low 16 bits once, using the lowest active thread.
118   if (mapping::isLeaderInWarp()) {
119     uint32_t load = atomic::add(&namedBarrierTracker, 1,
120                                 __ATOMIC_RELAXED); // commutative
121 
122     // Record the number of times the barrier has been passed
123     uint32_t generation = load & 0xffff0000u;
124 
125     if ((load & 0x0000ffffu) == (NumWaves - 1)) {
126       // Reached NumWaves in low bits so this is the last wave.
127       // Set low bits to zero and increment high bits
128       load += 0x00010000u; // wrap is safe
129       load &= 0xffff0000u; // because bits zeroed second
130 
131       // Reset the wave counter and release the waiting waves
132       atomic::store(&namedBarrierTracker, load, __ATOMIC_RELAXED);
133     } else {
134       // more waves still to go, spin until generation counter changes
135       do {
136         __builtin_amdgcn_s_sleep(0);
137         load = atomic::load(&namedBarrierTracker, __ATOMIC_RELAXED);
138       } while ((load & 0xffff0000u) == generation);
139     }
140   }
141   fence::team(__ATOMIC_RELEASE);
142 }
143 
144 // sema checking of amdgcn_fence is aggressive. Intention is to patch clang
145 // so that it is usable within a template environment and so that a runtime
146 // value of the memory order is expanded to this switch within clang/llvm.
147 void fenceTeam(int Ordering) {
148   switch (Ordering) {
149   default:
150     __builtin_unreachable();
151   case __ATOMIC_ACQUIRE:
152     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "workgroup");
153   case __ATOMIC_RELEASE:
154     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "workgroup");
155   case __ATOMIC_ACQ_REL:
156     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "workgroup");
157   case __ATOMIC_SEQ_CST:
158     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
159   }
160 }
161 void fenceKernel(int Ordering) {
162   switch (Ordering) {
163   default:
164     __builtin_unreachable();
165   case __ATOMIC_ACQUIRE:
166     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent");
167   case __ATOMIC_RELEASE:
168     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "agent");
169   case __ATOMIC_ACQ_REL:
170     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "agent");
171   case __ATOMIC_SEQ_CST:
172     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "agent");
173   }
174 }
175 void fenceSystem(int Ordering) {
176   switch (Ordering) {
177   default:
178     __builtin_unreachable();
179   case __ATOMIC_ACQUIRE:
180     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "");
181   case __ATOMIC_RELEASE:
182     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "");
183   case __ATOMIC_ACQ_REL:
184     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "");
185   case __ATOMIC_SEQ_CST:
186     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "");
187   }
188 }
189 
190 void syncWarp(__kmpc_impl_lanemask_t) {
191   // AMDGCN doesn't need to sync threads in a warp
192 }
193 
194 void syncThreads() { __builtin_amdgcn_s_barrier(); }
195 void syncThreadsAligned() { syncThreads(); }
196 
197 // TODO: Don't have wavefront lane locks. Possibly can't have them.
198 void unsetLock(omp_lock_t *) { __builtin_trap(); }
199 int testLock(omp_lock_t *) { __builtin_trap(); }
200 void initLock(omp_lock_t *) { __builtin_trap(); }
201 void destroyLock(omp_lock_t *) { __builtin_trap(); }
202 void setLock(omp_lock_t *) { __builtin_trap(); }
203 
204 #pragma omp end declare variant
205 ///}
206 
207 /// NVPTX Implementation
208 ///
209 ///{
210 #pragma omp begin declare variant match(                                       \
211     device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
212 
213 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
214   return __nvvm_atom_inc_gen_ui(Address, Val);
215 }
216 
217 void namedBarrierInit() {}
218 
219 void namedBarrier() {
220   uint32_t NumThreads = omp_get_num_threads();
221   ASSERT(NumThreads % 32 == 0);
222 
223   // The named barrier for active parallel threads of a team in an L1 parallel
224   // region to synchronize with each other.
225   constexpr int BarrierNo = 7;
226   asm volatile("barrier.sync %0, %1;"
227                :
228                : "r"(BarrierNo), "r"(NumThreads)
229                : "memory");
230 }
231 
232 void fenceTeam(int) { __nvvm_membar_cta(); }
233 
234 void fenceKernel(int) { __nvvm_membar_gl(); }
235 
236 void fenceSystem(int) { __nvvm_membar_sys(); }
237 
238 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
239 
240 void syncThreads() {
241   constexpr int BarrierNo = 8;
242   asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory");
243 }
244 
245 void syncThreadsAligned() { __syncthreads(); }
246 
247 constexpr uint32_t OMP_SPIN = 1000;
248 constexpr uint32_t UNSET = 0;
249 constexpr uint32_t SET = 1;
250 
251 // TODO: This seems to hide a bug in the declare variant handling. If it is
252 // called before it is defined
253 //       here the overload won't happen. Investigate lalter!
254 void unsetLock(omp_lock_t *Lock) {
255   (void)atomicExchange((uint32_t *)Lock, UNSET, __ATOMIC_SEQ_CST);
256 }
257 
258 int testLock(omp_lock_t *Lock) {
259   return atomicAdd((uint32_t *)Lock, 0u, __ATOMIC_SEQ_CST);
260 }
261 
262 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
263 
264 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
265 
266 void setLock(omp_lock_t *Lock) {
267   // TODO: not sure spinning is a good idea here..
268   while (atomicCAS((uint32_t *)Lock, UNSET, SET, __ATOMIC_SEQ_CST) != UNSET) {
269     int32_t start = __nvvm_read_ptx_sreg_clock();
270     int32_t now;
271     for (;;) {
272       now = __nvvm_read_ptx_sreg_clock();
273       int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
274       if (cycles >= OMP_SPIN * mapping::getBlockId()) {
275         break;
276       }
277     }
278   } // wait for 0 to be the read value
279 }
280 
281 #pragma omp end declare variant
282 ///}
283 
284 } // namespace impl
285 
286 void synchronize::init(bool IsSPMD) {
287   if (!IsSPMD)
288     impl::namedBarrierInit();
289 }
290 
291 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
292 
293 void synchronize::threads() { impl::syncThreads(); }
294 
295 void synchronize::threadsAligned() { impl::syncThreadsAligned(); }
296 
297 void fence::team(int Ordering) { impl::fenceTeam(Ordering); }
298 
299 void fence::kernel(int Ordering) { impl::fenceKernel(Ordering); }
300 
301 void fence::system(int Ordering) { impl::fenceSystem(Ordering); }
302 
303 uint32_t atomic::load(uint32_t *Addr, int Ordering) {
304   return impl::atomicLoad(Addr, Ordering);
305 }
306 
307 void atomic::store(uint32_t *Addr, uint32_t V, int Ordering) {
308   impl::atomicStore(Addr, V, Ordering);
309 }
310 
311 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, int Ordering) {
312   return impl::atomicInc(Addr, V, Ordering);
313 }
314 
315 uint32_t atomic::add(uint32_t *Addr, uint32_t V, int Ordering) {
316   return impl::atomicAdd(Addr, V, Ordering);
317 }
318 
319 uint64_t atomic::add(uint64_t *Addr, uint64_t V, int Ordering) {
320   return impl::atomicAdd(Addr, V, Ordering);
321 }
322 
323 extern "C" {
324 void __kmpc_ordered(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
325 
326 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
327 
328 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
329   FunctionTracingRAII();
330   __kmpc_barrier(Loc, TId);
331   return 0;
332 }
333 
334 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
335   FunctionTracingRAII();
336   if (mapping::isMainThreadInGenericMode())
337     return __kmpc_flush(Loc);
338 
339   if (mapping::isSPMDMode())
340     return __kmpc_barrier_simple_spmd(Loc, TId);
341 
342   impl::namedBarrier();
343 }
344 
345 __attribute__((noinline)) void __kmpc_barrier_simple_spmd(IdentTy *Loc,
346                                                           int32_t TId) {
347   FunctionTracingRAII();
348   synchronize::threadsAligned();
349 }
350 
351 __attribute__((noinline)) void __kmpc_barrier_simple_generic(IdentTy *Loc,
352                                                              int32_t TId) {
353   FunctionTracingRAII();
354   synchronize::threads();
355 }
356 
357 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
358   FunctionTracingRAII();
359   return omp_get_team_num() == 0;
360 }
361 
362 void __kmpc_end_master(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
363 
364 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
365   FunctionTracingRAII();
366   return __kmpc_master(Loc, TId);
367 }
368 
369 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
370   FunctionTracingRAII();
371   // The barrier is explicitly called.
372 }
373 
374 void __kmpc_flush(IdentTy *Loc) {
375   FunctionTracingRAII();
376   fence::kernel(__ATOMIC_SEQ_CST);
377 }
378 
379 uint64_t __kmpc_warp_active_thread_mask(void) {
380   FunctionTracingRAII();
381   return mapping::activemask();
382 }
383 
384 void __kmpc_syncwarp(uint64_t Mask) {
385   FunctionTracingRAII();
386   synchronize::warp(Mask);
387 }
388 
389 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
390   FunctionTracingRAII();
391   omp_set_lock(reinterpret_cast<omp_lock_t *>(Name));
392 }
393 
394 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
395   FunctionTracingRAII();
396   omp_unset_lock(reinterpret_cast<omp_lock_t *>(Name));
397 }
398 
399 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
400 
401 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
402 
403 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
404 
405 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
406 
407 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
408 } // extern "C"
409 
410 #pragma omp end declare target
411