1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Synchronization.h"
14 
15 #include "Debug.h"
16 #include "Interface.h"
17 #include "Mapping.h"
18 #include "State.h"
19 #include "Types.h"
20 #include "Utils.h"
21 
22 #pragma omp declare target
23 
24 using namespace _OMP;
25 
26 namespace impl {
27 
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering);
33 
34 uint32_t atomicLoad(uint32_t *Address, int Ordering) {
35   return __atomic_fetch_add(Address, 0U, __ATOMIC_SEQ_CST);
36 }
37 
38 void atomicStore(uint32_t *Address, uint32_t Val, int Ordering) {
39   __atomic_store_n(Address, Val, Ordering);
40 }
41 
42 uint32_t atomicAdd(uint32_t *Address, uint32_t Val, int Ordering) {
43   return __atomic_fetch_add(Address, Val, Ordering);
44 }
45 uint32_t atomicMax(uint32_t *Address, uint32_t Val, int Ordering) {
46   return __atomic_fetch_max(Address, Val, Ordering);
47 }
48 
49 uint32_t atomicExchange(uint32_t *Address, uint32_t Val, int Ordering) {
50   uint32_t R;
51   __atomic_exchange(Address, &Val, &R, Ordering);
52   return R;
53 }
54 uint32_t atomicCAS(uint32_t *Address, uint32_t Compare, uint32_t Val,
55                    int Ordering) {
56   (void)__atomic_compare_exchange(Address, &Compare, &Val, false, Ordering,
57                                   Ordering);
58   return Compare;
59 }
60 
61 uint64_t atomicAdd(uint64_t *Address, uint64_t Val, int Ordering) {
62   return __atomic_fetch_add(Address, Val, Ordering);
63 }
64 ///}
65 
66 /// AMDGCN Implementation
67 ///
68 ///{
69 #pragma omp begin declare variant match(device = {arch(amdgcn)})
70 
71 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
72   return __builtin_amdgcn_atomic_inc32(Address, Val, Ordering, "");
73 }
74 
75 uint32_t SHARED(namedBarrierTracker);
76 
77 void namedBarrierInit() {
78   // Don't have global ctors, and shared memory is not zero init
79   atomic::store(&namedBarrierTracker, 0u, __ATOMIC_RELEASE);
80 }
81 
82 void namedBarrier() {
83   uint32_t NumThreads = omp_get_num_threads();
84   // assert(NumThreads % 32 == 0);
85 
86   uint32_t WarpSize = mapping::getWarpSize();
87   uint32_t NumWaves = NumThreads / WarpSize;
88 
89   fence::team(__ATOMIC_ACQUIRE);
90 
91   // named barrier implementation for amdgcn.
92   // Uses two 16 bit unsigned counters. One for the number of waves to have
93   // reached the barrier, and one to count how many times the barrier has been
94   // passed. These are packed in a single atomically accessed 32 bit integer.
95   // Low bits for the number of waves, assumed zero before this call.
96   // High bits to count the number of times the barrier has been passed.
97 
98   // precondition: NumWaves != 0;
99   // invariant: NumWaves * WarpSize == NumThreads;
100   // precondition: NumWaves < 0xffffu;
101 
102   // Increment the low 16 bits once, using the lowest active thread.
103   if (mapping::isLeaderInWarp()) {
104     uint32_t load = atomic::add(&namedBarrierTracker, 1,
105                                 __ATOMIC_RELAXED); // commutative
106 
107     // Record the number of times the barrier has been passed
108     uint32_t generation = load & 0xffff0000u;
109 
110     if ((load & 0x0000ffffu) == (NumWaves - 1)) {
111       // Reached NumWaves in low bits so this is the last wave.
112       // Set low bits to zero and increment high bits
113       load += 0x00010000u; // wrap is safe
114       load &= 0xffff0000u; // because bits zeroed second
115 
116       // Reset the wave counter and release the waiting waves
117       atomic::store(&namedBarrierTracker, load, __ATOMIC_RELAXED);
118     } else {
119       // more waves still to go, spin until generation counter changes
120       do {
121         __builtin_amdgcn_s_sleep(0);
122         load = atomic::load(&namedBarrierTracker, __ATOMIC_RELAXED);
123       } while ((load & 0xffff0000u) == generation);
124     }
125   }
126   fence::team(__ATOMIC_RELEASE);
127 }
128 
129 void syncWarp(__kmpc_impl_lanemask_t) {
130   // AMDGCN doesn't need to sync threads in a warp
131 }
132 
133 void syncThreads() { __builtin_amdgcn_s_barrier(); }
134 void syncThreadsAligned() { syncThreads(); }
135 
136 void fenceTeam(int Ordering) { __builtin_amdgcn_fence(Ordering, "workgroup"); }
137 
138 void fenceKernel(int Ordering) { __builtin_amdgcn_fence(Ordering, "agent"); }
139 
140 void fenceSystem(int Ordering) { __builtin_amdgcn_fence(Ordering, ""); }
141 
142 #pragma omp end declare variant
143 ///}
144 
145 /// NVPTX Implementation
146 ///
147 ///{
148 #pragma omp begin declare variant match(                                       \
149     device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
150 
151 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
152   return __nvvm_atom_inc_gen_ui(Address, Val);
153 }
154 
155 void namedBarrierInit() {}
156 
157 void namedBarrier() {
158   uint32_t NumThreads = omp_get_num_threads();
159   ASSERT(NumThreads % 32 == 0);
160 
161   // The named barrier for active parallel threads of a team in an L1 parallel
162   // region to synchronize with each other.
163   constexpr int BarrierNo = 7;
164   asm volatile("barrier.sync %0, %1;"
165                :
166                : "r"(BarrierNo), "r"(NumThreads)
167                : "memory");
168 }
169 
170 void fenceTeam(int) { __nvvm_membar_cta(); }
171 
172 void fenceKernel(int) { __nvvm_membar_gl(); }
173 
174 void fenceSystem(int) { __nvvm_membar_sys(); }
175 
176 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
177 
178 void syncThreads() {
179   constexpr int BarrierNo = 8;
180   asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory");
181 }
182 
183 void syncThreadsAligned() { __syncthreads(); }
184 
185 constexpr uint32_t OMP_SPIN = 1000;
186 constexpr uint32_t UNSET = 0;
187 constexpr uint32_t SET = 1;
188 
189 // TODO: This seems to hide a bug in the declare variant handling. If it is
190 // called before it is defined
191 //       here the overload won't happen. Investigate lalter!
192 void unsetLock(omp_lock_t *Lock) {
193   (void)atomicExchange((uint32_t *)Lock, UNSET, __ATOMIC_SEQ_CST);
194 }
195 
196 int testLock(omp_lock_t *Lock) {
197   return atomicAdd((uint32_t *)Lock, 0u, __ATOMIC_SEQ_CST);
198 }
199 
200 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
201 
202 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
203 
204 void setLock(omp_lock_t *Lock) {
205   // TODO: not sure spinning is a good idea here..
206   while (atomicCAS((uint32_t *)Lock, UNSET, SET, __ATOMIC_SEQ_CST) != UNSET) {
207     int32_t start = __nvvm_read_ptx_sreg_clock();
208     int32_t now;
209     for (;;) {
210       now = __nvvm_read_ptx_sreg_clock();
211       int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
212       if (cycles >= OMP_SPIN * mapping::getBlockId()) {
213         break;
214       }
215     }
216   } // wait for 0 to be the read value
217 }
218 
219 #pragma omp end declare variant
220 ///}
221 
222 } // namespace impl
223 
224 void synchronize::init(bool IsSPMD) {
225   if (!IsSPMD)
226     impl::namedBarrierInit();
227 }
228 
229 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
230 
231 void synchronize::threads() { impl::syncThreads(); }
232 
233 void synchronize::threadsAligned() { impl::syncThreadsAligned(); }
234 
235 void fence::team(int Ordering) { impl::fenceTeam(Ordering); }
236 
237 void fence::kernel(int Ordering) { impl::fenceKernel(Ordering); }
238 
239 void fence::system(int Ordering) { impl::fenceSystem(Ordering); }
240 
241 uint32_t atomic::load(uint32_t *Addr, int Ordering) {
242   return impl::atomicLoad(Addr, Ordering);
243 }
244 
245 void atomic::store(uint32_t *Addr, uint32_t V, int Ordering) {
246   impl::atomicStore(Addr, V, Ordering);
247 }
248 
249 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, int Ordering) {
250   return impl::atomicInc(Addr, V, Ordering);
251 }
252 
253 uint32_t atomic::add(uint32_t *Addr, uint32_t V, int Ordering) {
254   return impl::atomicAdd(Addr, V, Ordering);
255 }
256 
257 uint64_t atomic::add(uint64_t *Addr, uint64_t V, int Ordering) {
258   return impl::atomicAdd(Addr, V, Ordering);
259 }
260 
261 extern "C" {
262 void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
263 
264 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
265 
266 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
267   __kmpc_barrier(Loc, TId);
268   return 0;
269 }
270 
271 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
272   if (mapping::isMainThreadInGenericMode())
273     return __kmpc_flush(Loc);
274 
275   if (mapping::isSPMDMode())
276     return __kmpc_barrier_simple_spmd(Loc, TId);
277 
278   impl::namedBarrier();
279 }
280 
281 __attribute__((noinline)) void __kmpc_barrier_simple_spmd(IdentTy *Loc,
282                                                           int32_t TId) {
283   synchronize::threadsAligned();
284 }
285 
286 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
287   return omp_get_team_num() == 0;
288 }
289 
290 void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
291 
292 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
293   return __kmpc_master(Loc, TId);
294 }
295 
296 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
297   // The barrier is explicitly called.
298 }
299 
300 void __kmpc_flush(IdentTy *Loc) { fence::kernel(__ATOMIC_SEQ_CST); }
301 
302 uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
303 
304 void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
305 
306 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
307   omp_set_lock(reinterpret_cast<omp_lock_t *>(Name));
308 }
309 
310 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
311   omp_unset_lock(reinterpret_cast<omp_lock_t *>(Name));
312 }
313 
314 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
315 
316 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
317 
318 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
319 
320 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
321 
322 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
323 } // extern "C"
324 
325 #pragma omp end declare target
326