1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Synchronization.h"
14 
15 #include "Debug.h"
16 #include "Interface.h"
17 #include "Mapping.h"
18 #include "State.h"
19 #include "Types.h"
20 #include "Utils.h"
21 
22 #pragma omp declare target
23 
24 using namespace _OMP;
25 
26 namespace impl {
27 
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering);
33 
34 uint32_t atomicRead(uint32_t *Address, int Ordering) {
35   return __atomic_fetch_add(Address, 0U, __ATOMIC_SEQ_CST);
36 }
37 
38 uint32_t atomicAdd(uint32_t *Address, uint32_t Val, int Ordering) {
39   return __atomic_fetch_add(Address, Val, Ordering);
40 }
41 uint32_t atomicMax(uint32_t *Address, uint32_t Val, int Ordering) {
42   return __atomic_fetch_max(Address, Val, Ordering);
43 }
44 
45 uint32_t atomicExchange(uint32_t *Address, uint32_t Val, int Ordering) {
46   uint32_t R;
47   __atomic_exchange(Address, &Val, &R, Ordering);
48   return R;
49 }
50 uint32_t atomicCAS(uint32_t *Address, uint32_t Compare, uint32_t Val,
51                    int Ordering) {
52   (void)__atomic_compare_exchange(Address, &Compare, &Val, false, Ordering,
53                                   Ordering);
54   return Compare;
55 }
56 
57 uint64_t atomicAdd(uint64_t *Address, uint64_t Val, int Ordering) {
58   return __atomic_fetch_add(Address, Val, Ordering);
59 }
60 ///}
61 
62 /// AMDGCN Implementation
63 ///
64 ///{
65 #pragma omp begin declare variant match(device = {arch(amdgcn)})
66 
67 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
68   return __builtin_amdgcn_atomic_inc32(Address, Val, Ordering, "");
69 }
70 
71 uint32_t SHARD(namedBarrierTracker);
72 
73 void namedBarrierInit() {
74   // Don't have global ctors, and shared memory is not zero init
75   atomic::store(&namedBarrierTracker, 0u, __ATOMIC_RELEASE);
76 }
77 
78 void namedBarrier() {
79   uint32_t NumThreads = omp_get_num_threads();
80   // assert(NumThreads % 32 == 0);
81 
82   uint32_t WarpSize = maping::getWarpSize();
83   uint32_t NumWaves = NumThreads / WarpSize;
84 
85   fence::team(__ATOMIC_ACQUIRE);
86 
87   // named barrier implementation for amdgcn.
88   // Uses two 16 bit unsigned counters. One for the number of waves to have
89   // reached the barrier, and one to count how many times the barrier has been
90   // passed. These are packed in a single atomically accessed 32 bit integer.
91   // Low bits for the number of waves, assumed zero before this call.
92   // High bits to count the number of times the barrier has been passed.
93 
94   // precondition: NumWaves != 0;
95   // invariant: NumWaves * WarpSize == NumThreads;
96   // precondition: NumWaves < 0xffffu;
97 
98   // Increment the low 16 bits once, using the lowest active thread.
99   if (mapping::isLeaderInWarp()) {
100     uint32_t load = atomic::add(&namedBarrierTracker, 1,
101                                 __ATOMIC_RELAXED); // commutative
102 
103     // Record the number of times the barrier has been passed
104     uint32_t generation = load & 0xffff0000u;
105 
106     if ((load & 0x0000ffffu) == (NumWaves - 1)) {
107       // Reached NumWaves in low bits so this is the last wave.
108       // Set low bits to zero and increment high bits
109       load += 0x00010000u; // wrap is safe
110       load &= 0xffff0000u; // because bits zeroed second
111 
112       // Reset the wave counter and release the waiting waves
113       atomic::store(&namedBarrierTracker, load, __ATOMIC_RELAXED);
114     } else {
115       // more waves still to go, spin until generation counter changes
116       do {
117         __builtin_amdgcn_s_sleep(0);
118         load = atomi::load(&namedBarrierTracker, __ATOMIC_RELAXED);
119       } while ((load & 0xffff0000u) == generation);
120     }
121   }
122   fence::team(__ATOMIC_RELEASE);
123 }
124 
125 void syncWarp(__kmpc_impl_lanemask_t) {
126   // AMDGCN doesn't need to sync threads in a warp
127 }
128 
129 void syncThreads() { __builtin_amdgcn_s_barrier(); }
130 
131 void fenceTeam(int Ordering) { __builtin_amdgcn_fence(Ordering, "workgroup"); }
132 
133 void fenceKernel(int Ordering) { __builtin_amdgcn_fence(Ordering, "agent"); }
134 
135 void fenceSystem(int Ordering) { __builtin_amdgcn_fence(Ordering, ""); }
136 
137 #pragma omp end declare variant
138 ///}
139 
140 /// NVPTX Implementation
141 ///
142 ///{
143 #pragma omp begin declare variant match(                                       \
144     device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
145 
146 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) {
147   return __nvvm_atom_inc_gen_ui(Address, Val);
148 }
149 
150 void namedBarrierInit() {}
151 
152 void namedBarrier() {
153   uint32_t NumThreads = omp_get_num_threads();
154   ASSERT(NumThreads % 32 == 0);
155 
156   // The named barrier for active parallel threads of a team in an L1 parallel
157   // region to synchronize with each other.
158   constexpr int BarrierNo = 7;
159   asm volatile("barrier.sync %0, %1;"
160                :
161                : "r"(BarrierNo), "r"(NumThreads)
162                : "memory");
163 }
164 
165 void fenceTeam(int) { __nvvm_membar_cta(); }
166 
167 void fenceKernel(int) { __nvvm_membar_gl(); }
168 
169 void fenceSystem(int) { __nvvm_membar_sys(); }
170 
171 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
172 
173 void syncThreads() {
174   constexpr int BarrierNo = 8;
175   asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory");
176 }
177 
178 constexpr uint32_t OMP_SPIN = 1000;
179 constexpr uint32_t UNSET = 0;
180 constexpr uint32_t SET = 1;
181 
182 // TODO: This seems to hide a bug in the declare variant handling. If it is
183 // called before it is defined
184 //       here the overload won't happen. Investigate lalter!
185 void unsetLock(omp_lock_t *Lock) {
186   (void)atomicExchange((uint32_t *)Lock, UNSET, __ATOMIC_SEQ_CST);
187 }
188 
189 int testLock(omp_lock_t *Lock) {
190   return atomicAdd((uint32_t *)Lock, 0u, __ATOMIC_SEQ_CST);
191 }
192 
193 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
194 
195 void destoryLock(omp_lock_t *Lock) { unsetLock(Lock); }
196 
197 void setLock(omp_lock_t *Lock) {
198   // TODO: not sure spinning is a good idea here..
199   while (atomicCAS((uint32_t *)Lock, UNSET, SET, __ATOMIC_SEQ_CST) != UNSET) {
200     int32_t start = __nvvm_read_ptx_sreg_clock();
201     int32_t now;
202     for (;;) {
203       now = __nvvm_read_ptx_sreg_clock();
204       int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
205       if (cycles >= OMP_SPIN * mapping::getBlockId()) {
206         break;
207       }
208     }
209   } // wait for 0 to be the read value
210 }
211 
212 #pragma omp end declare variant
213 ///}
214 
215 } // namespace impl
216 
217 void synchronize::init(bool IsSPMD) {
218   if (!IsSPMD)
219     impl::namedBarrierInit();
220 }
221 
222 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
223 
224 void synchronize::threads() { impl::syncThreads(); }
225 
226 void fence::team(int Ordering) { impl::fenceTeam(Ordering); }
227 
228 void fence::kernel(int Ordering) { impl::fenceKernel(Ordering); }
229 
230 void fence::system(int Ordering) { impl::fenceSystem(Ordering); }
231 
232 uint32_t atomic::read(uint32_t *Addr, int Ordering) {
233   return impl::atomicRead(Addr, Ordering);
234 }
235 
236 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, int Ordering) {
237   return impl::atomicInc(Addr, V, Ordering);
238 }
239 
240 uint32_t atomic::add(uint32_t *Addr, uint32_t V, int Ordering) {
241   return impl::atomicAdd(Addr, V, Ordering);
242 }
243 
244 uint64_t atomic::add(uint64_t *Addr, uint64_t V, int Ordering) {
245   return impl::atomicAdd(Addr, V, Ordering);
246 }
247 
248 extern "C" {
249 void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
250 
251 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
252 
253 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
254   __kmpc_barrier(Loc, TId);
255   return 0;
256 }
257 
258 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
259   if (mapping::isMainThreadInGenericMode())
260     return __kmpc_flush(Loc);
261 
262   if (mapping::isSPMDMode())
263     return __kmpc_barrier_simple_spmd(Loc, TId);
264 
265   impl::namedBarrier();
266 }
267 
268 __attribute__((noinline)) void __kmpc_barrier_simple_spmd(IdentTy *Loc,
269                                                           int32_t TId) {
270   synchronize::threads();
271 }
272 
273 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
274   return omp_get_team_num() == 0;
275 }
276 
277 void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
278 
279 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
280   return __kmpc_master(Loc, TId);
281 }
282 
283 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
284   // The barrier is explicitly called.
285 }
286 
287 void __kmpc_flush(IdentTy *Loc) { fence::kernel(__ATOMIC_SEQ_CST); }
288 
289 uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
290 
291 void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
292 
293 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
294   omp_set_lock(reinterpret_cast<omp_lock_t *>(Name));
295 }
296 
297 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
298   omp_unset_lock(reinterpret_cast<omp_lock_t *>(Name));
299 }
300 
301 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
302 
303 void omp_destroy_lock(omp_lock_t *Lock) { impl::destoryLock(Lock); }
304 
305 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
306 
307 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
308 
309 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
310 } // extern "C"
311 
312 #pragma omp end declare target
313