1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Synchronization.h"
14 
15 #include "Debug.h"
16 #include "Interface.h"
17 #include "Mapping.h"
18 #include "State.h"
19 #include "Types.h"
20 #include "Utils.h"
21 
22 #pragma omp begin declare target device_type(nohost)
23 
24 using namespace _OMP;
25 
26 namespace impl {
27 
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering);
33 
atomicLoad(uint32_t * Address,int Ordering)34 uint32_t atomicLoad(uint32_t *Address, int Ordering) {
35   return __atomic_fetch_add(Address, 0U, __ATOMIC_SEQ_CST);
36 }
37 
atomicStore(uint32_t * Address,uint32_t Val,int Ordering)38 void atomicStore(uint32_t *Address, uint32_t Val, int Ordering) {
39   __atomic_store_n(Address, Val, Ordering);
40 }
41 
atomicAdd(uint32_t * Address,uint32_t Val,int Ordering)42 uint32_t atomicAdd(uint32_t *Address, uint32_t Val, int Ordering) {
43   return __atomic_fetch_add(Address, Val, Ordering);
44 }
atomicMax(uint32_t * Address,uint32_t Val,int Ordering)45 uint32_t atomicMax(uint32_t *Address, uint32_t Val, int Ordering) {
46   return __atomic_fetch_max(Address, Val, Ordering);
47 }
48 
atomicExchange(uint32_t * Address,uint32_t Val,int Ordering)49 uint32_t atomicExchange(uint32_t *Address, uint32_t Val, int Ordering) {
50   uint32_t R;
51   __atomic_exchange(Address, &Val, &R, Ordering);
52   return R;
53 }
atomicCAS(uint32_t * Address,uint32_t Compare,uint32_t Val,int Ordering)54 uint32_t atomicCAS(uint32_t *Address, uint32_t Compare, uint32_t Val,
55                    int Ordering) {
56   (void)__atomic_compare_exchange(Address, &Compare, &Val, false, Ordering,
57                                   Ordering);
58   return Compare;
59 }
60 
atomicAdd(uint64_t * Address,uint64_t Val,int Ordering)61 uint64_t atomicAdd(uint64_t *Address, uint64_t Val, int Ordering) {
62   return __atomic_fetch_add(Address, Val, Ordering);
63 }
64 ///}
65 
66 // Forward declarations defined to be defined for AMDGCN and NVPTX.
67 uint32_t atomicInc(uint32_t *A, uint32_t V, int Ordering);
68 void namedBarrierInit();
69 void namedBarrier();
70 void fenceTeam(int Ordering);
71 void fenceKernel(int Ordering);
72 void fenceSystem(int Ordering);
73 void syncWarp(__kmpc_impl_lanemask_t);
74 void syncThreads();
syncThreadsAligned()75 void syncThreadsAligned() { syncThreads(); }
76 void unsetLock(omp_lock_t *);
77 int testLock(omp_lock_t *);
78 void initLock(omp_lock_t *);
79 void destroyLock(omp_lock_t *);
80 void setLock(omp_lock_t *);
81 
82 /// AMDGCN Implementation
83 ///
84 ///{
85 #pragma omp begin declare variant match(device = {arch(amdgcn)})
86 
atomicInc(uint32_t * A,uint32_t V,int Ordering)87 uint32_t atomicInc(uint32_t *A, uint32_t V, int Ordering) {
88   // builtin_amdgcn_atomic_inc32 should expand to this switch when
89   // passed a runtime value, but does not do so yet. Workaround here.
90   switch (Ordering) {
91   default:
92     __builtin_unreachable();
93   case __ATOMIC_RELAXED:
94     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_RELAXED, "");
95   case __ATOMIC_ACQUIRE:
96     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_ACQUIRE, "");
97   case __ATOMIC_RELEASE:
98     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_RELEASE, "");
99   case __ATOMIC_ACQ_REL:
100     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_ACQ_REL, "");
101   case __ATOMIC_SEQ_CST:
102     return __builtin_amdgcn_atomic_inc32(A, V, __ATOMIC_SEQ_CST, "");
103   }
104 }
105 
106 uint32_t SHARED(namedBarrierTracker);
107 
namedBarrierInit()108 void namedBarrierInit() {
109   // Don't have global ctors, and shared memory is not zero init
110   atomic::store(&namedBarrierTracker, 0u, __ATOMIC_RELEASE);
111 }
112 
namedBarrier()113 void namedBarrier() {
114   uint32_t NumThreads = omp_get_num_threads();
115   // assert(NumThreads % 32 == 0);
116 
117   uint32_t WarpSize = mapping::getWarpSize();
118   uint32_t NumWaves = NumThreads / WarpSize;
119 
120   fence::team(__ATOMIC_ACQUIRE);
121 
122   // named barrier implementation for amdgcn.
123   // Uses two 16 bit unsigned counters. One for the number of waves to have
124   // reached the barrier, and one to count how many times the barrier has been
125   // passed. These are packed in a single atomically accessed 32 bit integer.
126   // Low bits for the number of waves, assumed zero before this call.
127   // High bits to count the number of times the barrier has been passed.
128 
129   // precondition: NumWaves != 0;
130   // invariant: NumWaves * WarpSize == NumThreads;
131   // precondition: NumWaves < 0xffffu;
132 
133   // Increment the low 16 bits once, using the lowest active thread.
134   if (mapping::isLeaderInWarp()) {
135     uint32_t load = atomic::add(&namedBarrierTracker, 1,
136                                 __ATOMIC_RELAXED); // commutative
137 
138     // Record the number of times the barrier has been passed
139     uint32_t generation = load & 0xffff0000u;
140 
141     if ((load & 0x0000ffffu) == (NumWaves - 1)) {
142       // Reached NumWaves in low bits so this is the last wave.
143       // Set low bits to zero and increment high bits
144       load += 0x00010000u; // wrap is safe
145       load &= 0xffff0000u; // because bits zeroed second
146 
147       // Reset the wave counter and release the waiting waves
148       atomic::store(&namedBarrierTracker, load, __ATOMIC_RELAXED);
149     } else {
150       // more waves still to go, spin until generation counter changes
151       do {
152         __builtin_amdgcn_s_sleep(0);
153         load = atomic::load(&namedBarrierTracker, __ATOMIC_RELAXED);
154       } while ((load & 0xffff0000u) == generation);
155     }
156   }
157   fence::team(__ATOMIC_RELEASE);
158 }
159 
160 // sema checking of amdgcn_fence is aggressive. Intention is to patch clang
161 // so that it is usable within a template environment and so that a runtime
162 // value of the memory order is expanded to this switch within clang/llvm.
fenceTeam(int Ordering)163 void fenceTeam(int Ordering) {
164   switch (Ordering) {
165   default:
166     __builtin_unreachable();
167   case __ATOMIC_ACQUIRE:
168     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "workgroup");
169   case __ATOMIC_RELEASE:
170     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "workgroup");
171   case __ATOMIC_ACQ_REL:
172     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "workgroup");
173   case __ATOMIC_SEQ_CST:
174     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
175   }
176 }
fenceKernel(int Ordering)177 void fenceKernel(int Ordering) {
178   switch (Ordering) {
179   default:
180     __builtin_unreachable();
181   case __ATOMIC_ACQUIRE:
182     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "agent");
183   case __ATOMIC_RELEASE:
184     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "agent");
185   case __ATOMIC_ACQ_REL:
186     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "agent");
187   case __ATOMIC_SEQ_CST:
188     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "agent");
189   }
190 }
fenceSystem(int Ordering)191 void fenceSystem(int Ordering) {
192   switch (Ordering) {
193   default:
194     __builtin_unreachable();
195   case __ATOMIC_ACQUIRE:
196     return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "");
197   case __ATOMIC_RELEASE:
198     return __builtin_amdgcn_fence(__ATOMIC_RELEASE, "");
199   case __ATOMIC_ACQ_REL:
200     return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL, "");
201   case __ATOMIC_SEQ_CST:
202     return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "");
203   }
204 }
205 
syncWarp(__kmpc_impl_lanemask_t)206 void syncWarp(__kmpc_impl_lanemask_t) {
207   // AMDGCN doesn't need to sync threads in a warp
208 }
209 
syncThreads()210 void syncThreads() { __builtin_amdgcn_s_barrier(); }
syncThreadsAligned()211 void syncThreadsAligned() { syncThreads(); }
212 
213 // TODO: Don't have wavefront lane locks. Possibly can't have them.
unsetLock(omp_lock_t *)214 void unsetLock(omp_lock_t *) { __builtin_trap(); }
testLock(omp_lock_t *)215 int testLock(omp_lock_t *) { __builtin_trap(); }
initLock(omp_lock_t *)216 void initLock(omp_lock_t *) { __builtin_trap(); }
destroyLock(omp_lock_t *)217 void destroyLock(omp_lock_t *) { __builtin_trap(); }
setLock(omp_lock_t *)218 void setLock(omp_lock_t *) { __builtin_trap(); }
219 
220 #pragma omp end declare variant
221 ///}
222 
223 /// NVPTX Implementation
224 ///
225 ///{
226 #pragma omp begin declare variant match(                                       \
227     device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
228 
atomicInc(uint32_t * Address,uint32_t Val,int Ordering)229 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
230   return __nvvm_atom_inc_gen_ui(Address, Val);
231 }
232 
namedBarrierInit()233 void namedBarrierInit() {}
234 
namedBarrier()235 void namedBarrier() {
236   uint32_t NumThreads = omp_get_num_threads();
237   ASSERT(NumThreads % 32 == 0);
238 
239   // The named barrier for active parallel threads of a team in an L1 parallel
240   // region to synchronize with each other.
241   constexpr int BarrierNo = 7;
242   asm volatile("barrier.sync %0, %1;"
243                :
244                : "r"(BarrierNo), "r"(NumThreads)
245                : "memory");
246 }
247 
fenceTeam(int)248 void fenceTeam(int) { __nvvm_membar_cta(); }
249 
fenceKernel(int)250 void fenceKernel(int) { __nvvm_membar_gl(); }
251 
fenceSystem(int)252 void fenceSystem(int) { __nvvm_membar_sys(); }
253 
syncWarp(__kmpc_impl_lanemask_t Mask)254 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
255 
syncThreads()256 void syncThreads() {
257   constexpr int BarrierNo = 8;
258   asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory");
259 }
260 
syncThreadsAligned()261 void syncThreadsAligned() { __syncthreads(); }
262 
263 constexpr uint32_t OMP_SPIN = 1000;
264 constexpr uint32_t UNSET = 0;
265 constexpr uint32_t SET = 1;
266 
267 // TODO: This seems to hide a bug in the declare variant handling. If it is
268 // called before it is defined
269 //       here the overload won't happen. Investigate lalter!
unsetLock(omp_lock_t * Lock)270 void unsetLock(omp_lock_t *Lock) {
271   (void)atomicExchange((uint32_t *)Lock, UNSET, __ATOMIC_SEQ_CST);
272 }
273 
testLock(omp_lock_t * Lock)274 int testLock(omp_lock_t *Lock) {
275   return atomicAdd((uint32_t *)Lock, 0u, __ATOMIC_SEQ_CST);
276 }
277 
initLock(omp_lock_t * Lock)278 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
279 
destroyLock(omp_lock_t * Lock)280 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
281 
setLock(omp_lock_t * Lock)282 void setLock(omp_lock_t *Lock) {
283   // TODO: not sure spinning is a good idea here..
284   while (atomicCAS((uint32_t *)Lock, UNSET, SET, __ATOMIC_SEQ_CST) != UNSET) {
285     int32_t start = __nvvm_read_ptx_sreg_clock();
286     int32_t now;
287     for (;;) {
288       now = __nvvm_read_ptx_sreg_clock();
289       int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
290       if (cycles >= OMP_SPIN * mapping::getBlockId()) {
291         break;
292       }
293     }
294   } // wait for 0 to be the read value
295 }
296 
297 #pragma omp end declare variant
298 ///}
299 
300 } // namespace impl
301 
init(bool IsSPMD)302 void synchronize::init(bool IsSPMD) {
303   if (!IsSPMD)
304     impl::namedBarrierInit();
305 }
306 
warp(LaneMaskTy Mask)307 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
308 
threads()309 void synchronize::threads() { impl::syncThreads(); }
310 
threadsAligned()311 void synchronize::threadsAligned() { impl::syncThreadsAligned(); }
312 
team(int Ordering)313 void fence::team(int Ordering) { impl::fenceTeam(Ordering); }
314 
kernel(int Ordering)315 void fence::kernel(int Ordering) { impl::fenceKernel(Ordering); }
316 
system(int Ordering)317 void fence::system(int Ordering) { impl::fenceSystem(Ordering); }
318 
load(uint32_t * Addr,int Ordering)319 uint32_t atomic::load(uint32_t *Addr, int Ordering) {
320   return impl::atomicLoad(Addr, Ordering);
321 }
322 
store(uint32_t * Addr,uint32_t V,int Ordering)323 void atomic::store(uint32_t *Addr, uint32_t V, int Ordering) {
324   impl::atomicStore(Addr, V, Ordering);
325 }
326 
inc(uint32_t * Addr,uint32_t V,int Ordering)327 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, int Ordering) {
328   return impl::atomicInc(Addr, V, Ordering);
329 }
330 
add(uint32_t * Addr,uint32_t V,int Ordering)331 uint32_t atomic::add(uint32_t *Addr, uint32_t V, int Ordering) {
332   return impl::atomicAdd(Addr, V, Ordering);
333 }
334 
add(uint64_t * Addr,uint64_t V,int Ordering)335 uint64_t atomic::add(uint64_t *Addr, uint64_t V, int Ordering) {
336   return impl::atomicAdd(Addr, V, Ordering);
337 }
338 
339 extern "C" {
__kmpc_ordered(IdentTy * Loc,int32_t TId)340 void __kmpc_ordered(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
341 
__kmpc_end_ordered(IdentTy * Loc,int32_t TId)342 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
343 
__kmpc_cancel_barrier(IdentTy * Loc,int32_t TId)344 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
345   FunctionTracingRAII();
346   __kmpc_barrier(Loc, TId);
347   return 0;
348 }
349 
__kmpc_barrier(IdentTy * Loc,int32_t TId)350 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
351   FunctionTracingRAII();
352   if (mapping::isMainThreadInGenericMode())
353     return __kmpc_flush(Loc);
354 
355   if (mapping::isSPMDMode())
356     return __kmpc_barrier_simple_spmd(Loc, TId);
357 
358   impl::namedBarrier();
359 }
360 
__kmpc_barrier_simple_spmd(IdentTy * Loc,int32_t TId)361 __attribute__((noinline)) void __kmpc_barrier_simple_spmd(IdentTy *Loc,
362                                                           int32_t TId) {
363   FunctionTracingRAII();
364   synchronize::threadsAligned();
365 }
366 
__kmpc_barrier_simple_generic(IdentTy * Loc,int32_t TId)367 __attribute__((noinline)) void __kmpc_barrier_simple_generic(IdentTy *Loc,
368                                                              int32_t TId) {
369   FunctionTracingRAII();
370   synchronize::threads();
371 }
372 
__kmpc_master(IdentTy * Loc,int32_t TId)373 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
374   FunctionTracingRAII();
375   return omp_get_team_num() == 0;
376 }
377 
__kmpc_end_master(IdentTy * Loc,int32_t TId)378 void __kmpc_end_master(IdentTy *Loc, int32_t TId) { FunctionTracingRAII(); }
379 
__kmpc_single(IdentTy * Loc,int32_t TId)380 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
381   FunctionTracingRAII();
382   return __kmpc_master(Loc, TId);
383 }
384 
__kmpc_end_single(IdentTy * Loc,int32_t TId)385 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
386   FunctionTracingRAII();
387   // The barrier is explicitly called.
388 }
389 
__kmpc_flush(IdentTy * Loc)390 void __kmpc_flush(IdentTy *Loc) {
391   FunctionTracingRAII();
392   fence::kernel(__ATOMIC_SEQ_CST);
393 }
394 
__kmpc_warp_active_thread_mask(void)395 uint64_t __kmpc_warp_active_thread_mask(void) {
396   FunctionTracingRAII();
397   return mapping::activemask();
398 }
399 
__kmpc_syncwarp(uint64_t Mask)400 void __kmpc_syncwarp(uint64_t Mask) {
401   FunctionTracingRAII();
402   synchronize::warp(Mask);
403 }
404 
__kmpc_critical(IdentTy * Loc,int32_t TId,CriticalNameTy * Name)405 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
406   FunctionTracingRAII();
407   omp_set_lock(reinterpret_cast<omp_lock_t *>(Name));
408 }
409 
__kmpc_end_critical(IdentTy * Loc,int32_t TId,CriticalNameTy * Name)410 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
411   FunctionTracingRAII();
412   omp_unset_lock(reinterpret_cast<omp_lock_t *>(Name));
413 }
414 
omp_init_lock(omp_lock_t * Lock)415 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
416 
omp_destroy_lock(omp_lock_t * Lock)417 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
418 
omp_set_lock(omp_lock_t * Lock)419 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
420 
omp_unset_lock(omp_lock_t * Lock)421 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
422 
omp_test_lock(omp_lock_t * Lock)423 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
424 } // extern "C"
425 
426 #pragma omp end declare target
427