| /oneTBB/src/tbb/ |
| H A D | semaphore.cpp | 63 __TBB_ASSERT( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event, nullptr); in init_concmon_module() 65 __TBB_ASSERT( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event, nullptr); in init_concmon_module() 66 … __TBB_ASSERT( (uintptr_t)__TBB_acquire_binsem!=(uintptr_t)&acquire_binsem_using_event, nullptr); in init_concmon_module() 67 … __TBB_ASSERT( (uintptr_t)__TBB_release_binsem!=(uintptr_t)&release_binsem_using_event, nullptr); in init_concmon_module() 75 if( (uintptr_t)__TBB_init_binsem!=(uintptr_t)&init_binsem_using_event ) in binary_semaphore() 80 if( (uintptr_t)__TBB_init_binsem==(uintptr_t)&init_binsem_using_event ) in ~binary_semaphore()
|
| H A D | address_waiter.cpp | 31 address_context(void* address, std::uintptr_t context) : in address_context() 36 std::uintptr_t my_context{0}; 65 std::uintptr_t tag = std::uintptr_t(address); in get_address_waiter() 69 void wait_on_address(void* address, d1::delegate_base& predicate, std::uintptr_t context) { in wait_on_address() 74 void notify_by_address(void* address, std::uintptr_t target_context) { in notify_by_address()
|
| H A D | allocator.cpp | 219 std::uintptr_t base = reinterpret_cast<std::uintptr_t>(std::malloc(space)); in std_cache_aligned_allocate() 223 std::uintptr_t result = (base + nfs_size) & ~(nfs_size - 1); in std_cache_aligned_allocate() 225 …__TBB_ASSERT((result - base) >= sizeof(std::uintptr_t), "Cannot store a base pointer to the header… in std_cache_aligned_allocate() 229 (reinterpret_cast<std::uintptr_t*>(result))[-1] = base; in std_cache_aligned_allocate() 241 …__TBB_ASSERT(reinterpret_cast<std::uintptr_t>(p) >= 0x4096, "attempt to free block not obtained fr… in std_cache_aligned_deallocate() 243 std::uintptr_t base = (reinterpret_cast<std::uintptr_t*>(p))[-1]; in std_cache_aligned_deallocate() 244 …__TBB_ASSERT(((base + nfs_size) & ~(nfs_size - 1)) == reinterpret_cast<std::uintptr_t>(p), "Incorr… in std_cache_aligned_deallocate()
|
| H A D | arena.h | 140 static const std::uintptr_t SET = 1; 141 static const std::uintptr_t UNSET = 0; 142 std::atomic<std::uintptr_t> my_state{UNSET}; 145 std::uintptr_t state = my_state.load(std::memory_order_acquire); in test_and_set() 166 std::uintptr_t busy = std::uintptr_t(&busy); in try_clear_if() 167 std::uintptr_t state = my_state.load(std::memory_order_acquire); in try_clear_if() 267 std::uintptr_t my_guard; 374 std::uintptr_t calculate_stealing_threshold();
|
| H A D | scheduler_common.h | 310 extern std::atomic<std::uintptr_t> the_context_state_propagation_epoch; 338 static const std::uintptr_t venom = tbb::detail::select_size_t_constant<0xDEADBEEFU, 0xDDEEAADDDEAD… 340 inline void poison_value(std::uintptr_t& val) { val = venom; } in poison_value() 342 inline void poison_value(std::atomic<std::uintptr_t>& val) { val.store(venom, std::memory_order_rel… in poison_value() 345 inline bool is_alive(std::uintptr_t v) { return v != venom; } in is_alive() 477 std::uintptr_t m_stealing_threshold{}; in alignas() 536 void set_stealing_threshold(std::uintptr_t stealing_threshold) { in alignas() 568 inline std::uintptr_t calculate_stealing_threshold(std::uintptr_t base, std::size_t stack_size) { in calculate_stealing_threshold()
|
| H A D | co_context.h | 195 std::uintptr_t addr = std::uintptr_t(arg); in coroutine_thread_func() 313 …std::uintptr_t stack_ptr = (std::uintptr_t)mmap(nullptr, protected_stack_size, PROT_NONE, MAP_PRIV… in create_coroutine() 335 std::uintptr_t addr = std::uintptr_t(arg); in create_coroutine() 356 … munmap((void*)((std::uintptr_t)c.my_stack - REG_PAGE_SIZE), c.my_stack_size + 2 * REG_PAGE_SIZE); in destroy_coroutine()
|
| /oneTBB/src/tbbmalloc/ |
| H A D | backref.cpp | 65 static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024; 177 for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize; in requestNewSpace() 281 ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse in newBackRef() 282 && (uintptr_t)blockToUse->freeList < in newBackRef() 289 MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr in newBackRef() 310 uintptr_t offset = in newBackRef() 311 ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*); in newBackRef() 328 MALLOC_ASSERT(((uintptr_t)&backRefEntry >(uintptr_t)currBlock && in removeBackRef() 329 (uintptr_t)&backRefEntry <(uintptr_t)currBlock + slabSize), ASSERT_TEXT); in removeBackRef() 335 uintptr_t backRefEntryValue = (uintptr_t)backRefEntry.load(std::memory_order_relaxed); in removeBackRef() [all …]
|
| H A D | large_objects.h | 154 std::atomic<uintptr_t> oldest; 157 uintptr_t lastCleanedAge; 169 uintptr_t lastGet; 206 void forgetOutdatedState(uintptr_t currTime); 223 void updateAgeThreshold( uintptr_t currTime ) { in updateAgeThreshold() 230 void setLastGet( uintptr_t newLastGet ) { in setLastGet() 263 bool regularCleanup(ExtMemoryPool *extMemPool, uintptr_t currAge, bool doThreshDecr); 322 std::atomic<uintptr_t> cacheCurrTime; 344 bool isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime); 347 bool doCleanup(uintptr_t currTime, bool doThreshDecr); [all …]
|
| H A D | backend.cpp | 52 void Backend::UsedAddressRange::registerAlloc(uintptr_t left, uintptr_t right) in registerAlloc() 64 void Backend::UsedAddressRange::registerFree(uintptr_t left, uintptr_t right) in registerFree() 123 usedAddrRange.registerAlloc((uintptr_t)res, (uintptr_t)res+size); in allocRawMem() 146 usedAddrRange.registerFree((uintptr_t)object, (uintptr_t)object + size); in freeRawMem() 421 uintptr_t rightNew = (uintptr_t)newB + size; in getFromBin() 422 uintptr_t rightCurr = (uintptr_t)curr + szBlock; in getFromBin() 586 uintptr_t fBlockEnd = (uintptr_t)fBlock + fBlock->sizeTmp; in splitBlock() 597 size_t leftSize = (uintptr_t)newBlock - (uintptr_t)fBlock; in splitBlock() 980 const size_t userOffset = (uintptr_t)ptr - (uintptr_t)oldRegion; in remap() 1120 MALLOC_ASSERT((uintptr_t)memRegion < (uintptr_t)resBlock, ASSERT_TEXT); in doCoalesc() [all …]
|
| H A D | backend.h | 205 static const uintptr_t ADDRESS_UPPER_BOUND = UINTPTR_MAX; 207 std::atomic<uintptr_t> leftBound, 213 void registerAlloc(uintptr_t left, uintptr_t right); 214 void registerFree(uintptr_t left, uintptr_t right); 219 const uintptr_t p = (uintptr_t)ptr; in inRange() 228 void registerAlloc(uintptr_t, uintptr_t) {} in registerAlloc() argument 229 void registerFree(uintptr_t, uintptr_t) {} in registerFree() argument
|
| H A D | large_objects.cpp | 81 uintptr_t currTime; 97 uintptr_t lclTime; 103 uintptr_t cleanTime; 107 uintptr_t lastGetOpTime, lastGet; 152 uintptr_t currTime; 163 uintptr_t currTime; 253 uintptr_t age = head->age; in getFromPutList() 377 uintptr_t endTime = startTime + timeRange; in operator ()() 635 uintptr_t nextAge = 0; in cleanToThreshold() 795 inline bool LargeObjectCache::isCleanupNeededOnRange(uintptr_t range, uintptr_t currTime) in isCleanupNeededOnRange() [all …]
|
| H A D | shared_utils.h | 38 static inline T alignDown(T arg, uintptr_t alignment) { in alignDown() 39 return T( (uintptr_t)arg & ~(alignment-1)); in alignDown() 42 static inline T alignUp (T arg, uintptr_t alignment) { in alignUp() 43 return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1)); in alignUp() 47 static inline T alignUpGeneric(T arg, uintptr_t alignment) { in alignUpGeneric()
|
| H A D | Customize.h | 50 inline intptr_t BitScanRev(uintptr_t x) { in BitScanRev() 55 static inline bool isAligned(T* arg, uintptr_t alignment) { in isAligned() 59 static inline bool isPowerOfTwo(uintptr_t arg) { in isPowerOfTwo() 62 static inline bool isPowerOfTwoAtLeast(uintptr_t arg, uintptr_t power2) { in isPowerOfTwoAtLeast()
|
| H A D | tbbmalloc_internal.h | 115 const uintptr_t slabSize = 16*1024; 179 static const unsigned SZ = (NUM-1)/(CHAR_BIT*sizeof(uintptr_t))+1; 180 static const unsigned WORD_LEN = CHAR_BIT*sizeof(uintptr_t); 182 std::atomic<uintptr_t> mask[SZ]; 203 … uintptr_t actualMask = mask[idx].load(std::memory_order_relaxed) & (((uintptr_t)1<<pos) - 1); in getMinTrue() 305 typedef MainIndexSelect<4 < sizeof(uintptr_t)>::main_type main_t; 361 uintptr_t age; // age of block while in cache 704 const uintptr_t threadStackSz = 2*1024*1024; 707 … uintptr_t xi = (uintptr_t)autoObjPtr.load(std::memory_order_relaxed), yi = (uintptr_t)&dummy; 708 uintptr_t diffPtr = xi > yi ? xi - yi : yi - xi;
|
| H A D | MapMemory.h | 61 hint = hint ? (void*)((uintptr_t)hint - bytes) : hint; in mmapTHP() 86 uintptr_t offset = 0; in mmapTHP() 90 offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1)); in mmapTHP() 94 result = (void*)((uintptr_t)result + offset); in mmapTHP() 98 munmap((void*)((uintptr_t)result + bytes), HUGE_PAGE_SIZE - offset); in mmapTHP()
|
| H A D | frontend.cpp | 398 MALLOC_ASSERT((uintptr_t)object - (uintptr_t)this >= sizeof(Block), msg); in checkFreePrecond() 1838 return slabSize - ((uintptr_t)bumpPtr - (uintptr_t)this); in availableSize() 1911 MALLOC_ASSERT((uintptr_t)ptr>=(uintptr_t)this+sizeof(StartupBlock) in free() 1922 } else if ((uintptr_t)ptr + StartupBlock::msize(ptr) == (uintptr_t)bumpPtr) { in free() 1925 MALLOC_ASSERT((uintptr_t)newBump>(uintptr_t)this+sizeof(StartupBlock), in free() 2127 if ( (uintptr_t)bumpPtr < (uintptr_t)this+sizeof(Block) ) { in allocateFromBumpPtr() 2170 blSize - ((uintptr_t)object - (uintptr_t)findObjectToFree(object)); in findObjectSize() 2408 copySize = lmb->unalignedSize-((uintptr_t)ptr-(uintptr_t)lmb); in reallocAligned() 2458 return 0 == ((uintptr_t)this + slabSize - (uintptr_t)object) % objectSize; in isProperlyPlaced() 2466 uint16_t offset = (uintptr_t)this + slabSize - (uintptr_t)address; in findAllocatedObject() [all …]
|
| /oneTBB/include/oneapi/tbb/detail/ |
| H A D | _machine.h | 113 inline uintptr_t clz(unsigned int x) { return static_cast<uintptr_t>(__builtin_clz(x)); } in clz() 114 inline uintptr_t clz(unsigned long int x) { return static_cast<uintptr_t>(__builtin_clzl(x)); } in clz() 115 …inline uintptr_t clz(unsigned long long int x) { return static_cast<uintptr_t>(__builtin_clzll(x))… in clz() 120 static inline uintptr_t bit_scan_reverse(uintptr_t i) { in bit_scan_reverse() 129 constexpr std::uintptr_t number_of_bits() { in number_of_bits() 134 static inline uintptr_t machine_log2(uintptr_t x) { in machine_log2() 141 uintptr_t j, i = x; in machine_log2() 168 if( uintptr_t tmp = x >> 16 ) { x = tmp; result += 16; } in machine_log2() 169 if( uintptr_t tmp = x >> 8 ) { x = tmp; result += 8; } in machine_log2() 170 if( uintptr_t tmp = x >> 4 ) { x = tmp; result += 4; } in machine_log2() [all …]
|
| H A D | _aggregator.h | 38 std::atomic<uintptr_t> status; 69 const uintptr_t status = op->status.load(std::memory_order_relaxed); 96 spin_wait_while_eq(op->status, uintptr_t(0)); 115 spin_wait_until_eq(handler_busy, uintptr_t(0)); in start_handle_operations() 138 std::atomic<uintptr_t> handler_busy;
|
| H A D | _waitable_atomic.h | 26 …D_FUNC wait_on_address(void* address, d1::delegate_base& wakeup_condition, std::uintptr_t context); 27 TBB_EXPORT void __TBB_EXPORTED_FUNC notify_by_address(void* address, std::uintptr_t context); 35 void adaptive_wait_on_address(void* address, Predicate wakeup_condition, std::uintptr_t context) { in adaptive_wait_on_address() 60 void wait(T old, std::uintptr_t context, std::memory_order order) { in wait()
|
| /oneTBB/include/oneapi/tbb/ |
| H A D | collaborative_call_once.h | 35 constexpr std::uintptr_t collaborative_once_max_references = max_nfs_size; 36 constexpr std::uintptr_t collaborative_once_references_mask = collaborative_once_max_references-1; 87 std::uintptr_t to_bits() { in alignas() 88 return reinterpret_cast<std::uintptr_t>(this); in alignas() 91 static collaborative_once_runner* from_bits(std::uintptr_t bits) { in alignas() 132 enum state : std::uintptr_t { 139 std::atomic<std::uintptr_t> m_state{ state::uninitialized }; 144 void set_completion_state(std::uintptr_t runner_bits, std::uintptr_t desired) { in set_completion_state() 145 std::uintptr_t expected = runner_bits; in set_completion_state() 157 std::uintptr_t expected = m_state.load(std::memory_order_acquire); in do_collaborative_call_once()
|
| H A D | cache_aligned_allocator.h | 125 std::uintptr_t base = reinterpret_cast<std::uintptr_t>(m_upstream->allocate(space)); in do_allocate() 129 std::uintptr_t result = (base + cache_line_alignment) & ~(cache_line_alignment - 1); in do_allocate() 130 …__TBB_ASSERT((result - base) >= sizeof(std::uintptr_t), "Can`t store a base pointer to the header"… in do_allocate() 134 (reinterpret_cast<std::uintptr_t*>(result))[-1] = base; in do_allocate() 141 std::uintptr_t base = (reinterpret_cast<std::uintptr_t*>(ptr))[-1]; in do_deallocate() 169 return bytes < sizeof(std::uintptr_t) ? sizeof(std::uintptr_t) : bytes; in correct_size()
|
| H A D | queuing_rw_mutex.h | 70 …my_next.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed); in initialize() 71 …my_prev.store(reinterpret_cast<uintptr_t>(reinterpret_cast<void*>(-1)), std::memory_order_relaxed); in initialize() 118 std::atomic<uintptr_t> my_prev; 119 std::atomic<uintptr_t> my_next;
|
| /oneTBB/test/tbb/ |
| H A D | test_task_group.cpp | 115 SharedGroupBodyImpl ( std::uintptr_t numThreads, std::uintptr_t sharingMode = 0 ) in SharedGroupBodyImpl() 171 SharedGroupBody ( std::uintptr_t numThreads, std::uintptr_t sharingMode = 0 ) in SharedGroupBody() 273 const std::uintptr_t N = 20; 274 const std::uintptr_t F = 6765; 291 std::uintptr_t* m_pRes; 295 FibTaskBase( std::uintptr_t* y, std::uintptr_t n ) : m_pRes(y), m_Num(n) {} in FibTaskBase() 313 std::uintptr_t x = ~0u; in impl() 326 std::uintptr_t x = ~0u, in impl() 338 std::uintptr_t RunFibTask(std::uintptr_t n) { in RunFibTask() 339 std::uintptr_t res = ~0u; in RunFibTask() [all …]
|
| H A D | test_scheduler_mix.cpp | 101 static const std::uintptr_t READER_MASK = maxThreads; // 7F.. 102 static const std::uintptr_t LOCKED = Alignment - 1; // FF.. 103 static const std::uintptr_t LOCKED_MASK = LOCKED; // FF.. 104 static const std::uintptr_t LOCK_PENDING = READER_MASK + 1; // 80.. 106 std::atomic<std::uintptr_t> mState; 112 std::uintptr_t state() { in state() 180 auto p = reinterpret_cast<std::uintptr_t>(ptr); in trySet() 183 std::uintptr_t expected = 0; in trySet() 371 static constexpr std::uintptr_t SHUTDOWN_FLAG = 1; 372 static constexpr std::uintptr_t REFERENCE_FLAG = 1 << 1; [all …]
|
| /oneTBB/test/tbbmalloc/ |
| H A D | test_malloc_init_shutdown.cpp | 130 return p1>p2 ? ((uintptr_t)p1-(uintptr_t)p2)<n : ((uintptr_t)p2-(uintptr_t)p1)<n; in intersectingObjects()
|