1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Include all synchronization. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Synchronization.h" 14 15 #include "Debug.h" 16 #include "Interface.h" 17 #include "Mapping.h" 18 #include "State.h" 19 #include "Types.h" 20 #include "Utils.h" 21 22 #pragma omp declare target 23 24 using namespace _OMP; 25 26 namespace impl { 27 28 /// Atomics 29 /// 30 ///{ 31 /// NOTE: This function needs to be implemented by every target. 32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering); 33 34 uint32_t atomicLoad(uint32_t *Address, int Ordering) { 35 return __atomic_fetch_add(Address, 0U, __ATOMIC_SEQ_CST); 36 } 37 38 void atomicStore(uint32_t *Address, uint32_t Val, int Ordering) { 39 __atomic_store_n(Address, Val, Ordering); 40 } 41 42 uint32_t atomicAdd(uint32_t *Address, uint32_t Val, int Ordering) { 43 return __atomic_fetch_add(Address, Val, Ordering); 44 } 45 uint32_t atomicMax(uint32_t *Address, uint32_t Val, int Ordering) { 46 return __atomic_fetch_max(Address, Val, Ordering); 47 } 48 49 uint32_t atomicExchange(uint32_t *Address, uint32_t Val, int Ordering) { 50 uint32_t R; 51 __atomic_exchange(Address, &Val, &R, Ordering); 52 return R; 53 } 54 uint32_t atomicCAS(uint32_t *Address, uint32_t Compare, uint32_t Val, 55 int Ordering) { 56 (void)__atomic_compare_exchange(Address, &Compare, &Val, false, Ordering, 57 Ordering); 58 return Compare; 59 } 60 61 uint64_t atomicAdd(uint64_t *Address, uint64_t Val, int Ordering) { 62 return __atomic_fetch_add(Address, Val, Ordering); 63 } 64 ///} 65 66 /// AMDGCN Implementation 67 /// 68 ///{ 69 #pragma omp begin declare variant match(device = {arch(amdgcn)}) 70 71 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) { 72 return __builtin_amdgcn_atomic_inc32(Address, Val, Ordering, ""); 73 } 74 75 uint32_t SHARED(namedBarrierTracker); 76 77 void namedBarrierInit() { 78 // Don't have global ctors, and shared memory is not zero init 79 atomic::store(&namedBarrierTracker, 0u, __ATOMIC_RELEASE); 80 } 81 82 void namedBarrier() { 83 uint32_t NumThreads = omp_get_num_threads(); 84 // assert(NumThreads % 32 == 0); 85 86 uint32_t WarpSize = mapping::getWarpSize(); 87 uint32_t NumWaves = NumThreads / WarpSize; 88 89 fence::team(__ATOMIC_ACQUIRE); 90 91 // named barrier implementation for amdgcn. 92 // Uses two 16 bit unsigned counters. One for the number of waves to have 93 // reached the barrier, and one to count how many times the barrier has been 94 // passed. These are packed in a single atomically accessed 32 bit integer. 95 // Low bits for the number of waves, assumed zero before this call. 96 // High bits to count the number of times the barrier has been passed. 97 98 // precondition: NumWaves != 0; 99 // invariant: NumWaves * WarpSize == NumThreads; 100 // precondition: NumWaves < 0xffffu; 101 102 // Increment the low 16 bits once, using the lowest active thread. 103 if (mapping::isLeaderInWarp()) { 104 uint32_t load = atomic::add(&namedBarrierTracker, 1, 105 __ATOMIC_RELAXED); // commutative 106 107 // Record the number of times the barrier has been passed 108 uint32_t generation = load & 0xffff0000u; 109 110 if ((load & 0x0000ffffu) == (NumWaves - 1)) { 111 // Reached NumWaves in low bits so this is the last wave. 112 // Set low bits to zero and increment high bits 113 load += 0x00010000u; // wrap is safe 114 load &= 0xffff0000u; // because bits zeroed second 115 116 // Reset the wave counter and release the waiting waves 117 atomic::store(&namedBarrierTracker, load, __ATOMIC_RELAXED); 118 } else { 119 // more waves still to go, spin until generation counter changes 120 do { 121 __builtin_amdgcn_s_sleep(0); 122 load = atomic::load(&namedBarrierTracker, __ATOMIC_RELAXED); 123 } while ((load & 0xffff0000u) == generation); 124 } 125 } 126 fence::team(__ATOMIC_RELEASE); 127 } 128 129 void syncWarp(__kmpc_impl_lanemask_t) { 130 // AMDGCN doesn't need to sync threads in a warp 131 } 132 133 void syncThreads() { __builtin_amdgcn_s_barrier(); } 134 135 void fenceTeam(int Ordering) { __builtin_amdgcn_fence(Ordering, "workgroup"); } 136 137 void fenceKernel(int Ordering) { __builtin_amdgcn_fence(Ordering, "agent"); } 138 139 void fenceSystem(int Ordering) { __builtin_amdgcn_fence(Ordering, ""); } 140 141 #pragma omp end declare variant 142 ///} 143 144 /// NVPTX Implementation 145 /// 146 ///{ 147 #pragma omp begin declare variant match( \ 148 device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) 149 150 uint32_t atomicInc(uint32_t *Address, uint32_t Val, int Ordering) { 151 return __nvvm_atom_inc_gen_ui(Address, Val); 152 } 153 154 void namedBarrierInit() {} 155 156 void namedBarrier() { 157 uint32_t NumThreads = omp_get_num_threads(); 158 ASSERT(NumThreads % 32 == 0); 159 160 // The named barrier for active parallel threads of a team in an L1 parallel 161 // region to synchronize with each other. 162 constexpr int BarrierNo = 7; 163 asm volatile("barrier.sync %0, %1;" 164 : 165 : "r"(BarrierNo), "r"(NumThreads) 166 : "memory"); 167 } 168 169 void fenceTeam(int) { __nvvm_membar_cta(); } 170 171 void fenceKernel(int) { __nvvm_membar_gl(); } 172 173 void fenceSystem(int) { __nvvm_membar_sys(); } 174 175 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); } 176 177 void syncThreads() { 178 constexpr int BarrierNo = 8; 179 asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory"); 180 } 181 182 constexpr uint32_t OMP_SPIN = 1000; 183 constexpr uint32_t UNSET = 0; 184 constexpr uint32_t SET = 1; 185 186 // TODO: This seems to hide a bug in the declare variant handling. If it is 187 // called before it is defined 188 // here the overload won't happen. Investigate lalter! 189 void unsetLock(omp_lock_t *Lock) { 190 (void)atomicExchange((uint32_t *)Lock, UNSET, __ATOMIC_SEQ_CST); 191 } 192 193 int testLock(omp_lock_t *Lock) { 194 return atomicAdd((uint32_t *)Lock, 0u, __ATOMIC_SEQ_CST); 195 } 196 197 void initLock(omp_lock_t *Lock) { unsetLock(Lock); } 198 199 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); } 200 201 void setLock(omp_lock_t *Lock) { 202 // TODO: not sure spinning is a good idea here.. 203 while (atomicCAS((uint32_t *)Lock, UNSET, SET, __ATOMIC_SEQ_CST) != UNSET) { 204 int32_t start = __nvvm_read_ptx_sreg_clock(); 205 int32_t now; 206 for (;;) { 207 now = __nvvm_read_ptx_sreg_clock(); 208 int32_t cycles = now > start ? now - start : now + (0xffffffff - start); 209 if (cycles >= OMP_SPIN * mapping::getBlockId()) { 210 break; 211 } 212 } 213 } // wait for 0 to be the read value 214 } 215 216 #pragma omp end declare variant 217 ///} 218 219 } // namespace impl 220 221 void synchronize::init(bool IsSPMD) { 222 if (!IsSPMD) 223 impl::namedBarrierInit(); 224 } 225 226 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); } 227 228 void synchronize::threads() { impl::syncThreads(); } 229 230 void fence::team(int Ordering) { impl::fenceTeam(Ordering); } 231 232 void fence::kernel(int Ordering) { impl::fenceKernel(Ordering); } 233 234 void fence::system(int Ordering) { impl::fenceSystem(Ordering); } 235 236 uint32_t atomic::load(uint32_t *Addr, int Ordering) { 237 return impl::atomicLoad(Addr, Ordering); 238 } 239 240 void atomic::store(uint32_t *Addr, uint32_t V, int Ordering) { 241 impl::atomicStore(Addr, V, Ordering); 242 } 243 244 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, int Ordering) { 245 return impl::atomicInc(Addr, V, Ordering); 246 } 247 248 uint32_t atomic::add(uint32_t *Addr, uint32_t V, int Ordering) { 249 return impl::atomicAdd(Addr, V, Ordering); 250 } 251 252 uint64_t atomic::add(uint64_t *Addr, uint64_t V, int Ordering) { 253 return impl::atomicAdd(Addr, V, Ordering); 254 } 255 256 extern "C" { 257 void __kmpc_ordered(IdentTy *Loc, int32_t TId) {} 258 259 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {} 260 261 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) { 262 __kmpc_barrier(Loc, TId); 263 return 0; 264 } 265 266 void __kmpc_barrier(IdentTy *Loc, int32_t TId) { 267 if (mapping::isMainThreadInGenericMode()) 268 return __kmpc_flush(Loc); 269 270 if (mapping::isSPMDMode()) 271 return __kmpc_barrier_simple_spmd(Loc, TId); 272 273 impl::namedBarrier(); 274 } 275 276 __attribute__((noinline)) void __kmpc_barrier_simple_spmd(IdentTy *Loc, 277 int32_t TId) { 278 synchronize::threads(); 279 } 280 281 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) { 282 return omp_get_team_num() == 0; 283 } 284 285 void __kmpc_end_master(IdentTy *Loc, int32_t TId) {} 286 287 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) { 288 return __kmpc_master(Loc, TId); 289 } 290 291 void __kmpc_end_single(IdentTy *Loc, int32_t TId) { 292 // The barrier is explicitly called. 293 } 294 295 void __kmpc_flush(IdentTy *Loc) { fence::kernel(__ATOMIC_SEQ_CST); } 296 297 uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); } 298 299 void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); } 300 301 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) { 302 omp_set_lock(reinterpret_cast<omp_lock_t *>(Name)); 303 } 304 305 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) { 306 omp_unset_lock(reinterpret_cast<omp_lock_t *>(Name)); 307 } 308 309 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); } 310 311 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); } 312 313 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); } 314 315 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); } 316 317 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); } 318 } // extern "C" 319 320 #pragma omp end declare target 321