151c0b2f7Stbbdev /* 22110128eSsarathnandu Copyright (c) 2005-2023 Intel Corporation 351c0b2f7Stbbdev 451c0b2f7Stbbdev Licensed under the Apache License, Version 2.0 (the "License"); 551c0b2f7Stbbdev you may not use this file except in compliance with the License. 651c0b2f7Stbbdev You may obtain a copy of the License at 751c0b2f7Stbbdev 851c0b2f7Stbbdev http://www.apache.org/licenses/LICENSE-2.0 951c0b2f7Stbbdev 1051c0b2f7Stbbdev Unless required by applicable law or agreed to in writing, software 1151c0b2f7Stbbdev distributed under the License is distributed on an "AS IS" BASIS, 1251c0b2f7Stbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 1351c0b2f7Stbbdev See the License for the specific language governing permissions and 1451c0b2f7Stbbdev limitations under the License. 1551c0b2f7Stbbdev */ 1651c0b2f7Stbbdev 1751c0b2f7Stbbdev #include <string.h> /* for memset */ 1851c0b2f7Stbbdev #include <errno.h> 1951c0b2f7Stbbdev #include "tbbmalloc_internal.h" 2051c0b2f7Stbbdev 2151c0b2f7Stbbdev namespace rml { 2251c0b2f7Stbbdev namespace internal { 2351c0b2f7Stbbdev 2451c0b2f7Stbbdev /*********** Code to acquire memory from the OS or other executive ****************/ 2551c0b2f7Stbbdev 2651c0b2f7Stbbdev /* 2751c0b2f7Stbbdev syscall/malloc can set non-zero errno in case of failure, 2851c0b2f7Stbbdev but later allocator might be able to find memory to fulfill the request. 2951c0b2f7Stbbdev And we do not want changing of errno by successful scalable_malloc call. 3051c0b2f7Stbbdev To support this, restore old errno in (get|free)RawMemory, and set errno 3151c0b2f7Stbbdev in frontend just before returning to user code. 3251c0b2f7Stbbdev Please note: every syscall/libc call used inside scalable_malloc that 3351c0b2f7Stbbdev sets errno must be protected this way, not just memory allocation per se. 3451c0b2f7Stbbdev */ 3551c0b2f7Stbbdev 3651c0b2f7Stbbdev #if USE_DEFAULT_MEMORY_MAPPING 3751c0b2f7Stbbdev #include "MapMemory.h" 3851c0b2f7Stbbdev #else 3951c0b2f7Stbbdev /* assume MapMemory and UnmapMemory are customized */ 4051c0b2f7Stbbdev #endif 4151c0b2f7Stbbdev 4251c0b2f7Stbbdev void* getRawMemory (size_t size, PageType pageType) { 4351c0b2f7Stbbdev return MapMemory(size, pageType); 4451c0b2f7Stbbdev } 4551c0b2f7Stbbdev 4651c0b2f7Stbbdev int freeRawMemory (void *object, size_t size) { 4751c0b2f7Stbbdev return UnmapMemory(object, size); 4851c0b2f7Stbbdev } 4951c0b2f7Stbbdev 5051c0b2f7Stbbdev #if CHECK_ALLOCATION_RANGE 5151c0b2f7Stbbdev 5251c0b2f7Stbbdev void Backend::UsedAddressRange::registerAlloc(uintptr_t left, uintptr_t right) 5351c0b2f7Stbbdev { 5451c0b2f7Stbbdev MallocMutex::scoped_lock lock(mutex); 55478de5b1Stbbdev if (left < leftBound.load(std::memory_order_relaxed)) 56478de5b1Stbbdev leftBound.store(left, std::memory_order_relaxed); 57478de5b1Stbbdev if (right > rightBound.load(std::memory_order_relaxed)) 58478de5b1Stbbdev rightBound.store(right, std::memory_order_relaxed); 59478de5b1Stbbdev MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed), ASSERT_TEXT); 60478de5b1Stbbdev MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed) < rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); 61478de5b1Stbbdev MALLOC_ASSERT(leftBound.load(std::memory_order_relaxed) <= left && right <= rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); 6251c0b2f7Stbbdev } 6351c0b2f7Stbbdev 6451c0b2f7Stbbdev void Backend::UsedAddressRange::registerFree(uintptr_t left, uintptr_t right) 6551c0b2f7Stbbdev { 6651c0b2f7Stbbdev MallocMutex::scoped_lock lock(mutex); 67478de5b1Stbbdev if (leftBound.load(std::memory_order_relaxed) == left) { 68478de5b1Stbbdev if (rightBound.load(std::memory_order_relaxed) == right) { 69478de5b1Stbbdev leftBound.store(ADDRESS_UPPER_BOUND, std::memory_order_relaxed); 70478de5b1Stbbdev rightBound.store(0, std::memory_order_relaxed); 7151c0b2f7Stbbdev } else 72478de5b1Stbbdev leftBound.store(right, std::memory_order_relaxed); 73478de5b1Stbbdev } else if (rightBound.load(std::memory_order_relaxed) == right) 74478de5b1Stbbdev rightBound.store(left, std::memory_order_relaxed); 75478de5b1Stbbdev MALLOC_ASSERT((!rightBound.load(std::memory_order_relaxed) && leftBound.load(std::memory_order_relaxed) == ADDRESS_UPPER_BOUND) 76478de5b1Stbbdev || leftBound.load(std::memory_order_relaxed) < rightBound.load(std::memory_order_relaxed), ASSERT_TEXT); 7751c0b2f7Stbbdev } 7851c0b2f7Stbbdev #endif // CHECK_ALLOCATION_RANGE 7951c0b2f7Stbbdev 8051c0b2f7Stbbdev // Initialized in frontend inside defaultMemPool 8151c0b2f7Stbbdev extern HugePagesStatus hugePages; 8251c0b2f7Stbbdev 8351c0b2f7Stbbdev void *Backend::allocRawMem(size_t &size) 8451c0b2f7Stbbdev { 8557f524caSIlya Isaev void *res = nullptr; 8651c0b2f7Stbbdev size_t allocSize = 0; 8751c0b2f7Stbbdev 8851c0b2f7Stbbdev if (extMemPool->userPool()) { 8951c0b2f7Stbbdev if (extMemPool->fixedPool && bootsrapMemDone == bootsrapMemStatus.load(std::memory_order_acquire)) 9057f524caSIlya Isaev return nullptr; 9151c0b2f7Stbbdev MALLOC_ASSERT(bootsrapMemStatus != bootsrapMemNotDone, 9251c0b2f7Stbbdev "Backend::allocRawMem() called prematurely?"); 9351c0b2f7Stbbdev // TODO: support for raw mem not aligned at sizeof(uintptr_t) 9451c0b2f7Stbbdev // memory from fixed pool is asked once and only once 9551c0b2f7Stbbdev allocSize = alignUpGeneric(size, extMemPool->granularity); 9651c0b2f7Stbbdev res = (*extMemPool->rawAlloc)(extMemPool->poolId, allocSize); 9751c0b2f7Stbbdev } else { 9851c0b2f7Stbbdev // Align allocation on page size 9951c0b2f7Stbbdev size_t pageSize = hugePages.isEnabled ? hugePages.getGranularity() : extMemPool->granularity; 10051c0b2f7Stbbdev MALLOC_ASSERT(pageSize, "Page size cannot be zero."); 10151c0b2f7Stbbdev allocSize = alignUpGeneric(size, pageSize); 10251c0b2f7Stbbdev 10351c0b2f7Stbbdev // If user requested huge pages and they are available, try to use preallocated ones firstly. 10451c0b2f7Stbbdev // If there are none, lets check transparent huge pages support and use them instead. 10551c0b2f7Stbbdev if (hugePages.isEnabled) { 10651c0b2f7Stbbdev if (hugePages.isHPAvailable) { 10751c0b2f7Stbbdev res = getRawMemory(allocSize, PREALLOCATED_HUGE_PAGE); 10851c0b2f7Stbbdev } 10951c0b2f7Stbbdev if (!res && hugePages.isTHPAvailable) { 11051c0b2f7Stbbdev res = getRawMemory(allocSize, TRANSPARENT_HUGE_PAGE); 11151c0b2f7Stbbdev } 11251c0b2f7Stbbdev } 11351c0b2f7Stbbdev 11451c0b2f7Stbbdev if (!res) { 11551c0b2f7Stbbdev res = getRawMemory(allocSize, REGULAR); 11651c0b2f7Stbbdev } 11751c0b2f7Stbbdev } 11851c0b2f7Stbbdev 11951c0b2f7Stbbdev if (res) { 12051c0b2f7Stbbdev MALLOC_ASSERT(allocSize > 0, "Invalid size of an allocated region."); 12151c0b2f7Stbbdev size = allocSize; 12251c0b2f7Stbbdev if (!extMemPool->userPool()) 12351c0b2f7Stbbdev usedAddrRange.registerAlloc((uintptr_t)res, (uintptr_t)res+size); 12451c0b2f7Stbbdev #if MALLOC_DEBUG 12551c0b2f7Stbbdev volatile size_t curTotalSize = totalMemSize; // to read global value once 12651c0b2f7Stbbdev MALLOC_ASSERT(curTotalSize+size > curTotalSize, "Overflow allocation size."); 12751c0b2f7Stbbdev #endif 12851c0b2f7Stbbdev totalMemSize.fetch_add(size); 12951c0b2f7Stbbdev } 13051c0b2f7Stbbdev 13151c0b2f7Stbbdev return res; 13251c0b2f7Stbbdev } 13351c0b2f7Stbbdev 13451c0b2f7Stbbdev bool Backend::freeRawMem(void *object, size_t size) 13551c0b2f7Stbbdev { 13651c0b2f7Stbbdev bool fail; 13751c0b2f7Stbbdev #if MALLOC_DEBUG 13851c0b2f7Stbbdev volatile size_t curTotalSize = totalMemSize; // to read global value once 13951c0b2f7Stbbdev MALLOC_ASSERT(curTotalSize-size < curTotalSize, "Negative allocation size."); 14051c0b2f7Stbbdev #endif 14151c0b2f7Stbbdev totalMemSize.fetch_sub(size); 14251c0b2f7Stbbdev if (extMemPool->userPool()) { 14351c0b2f7Stbbdev MALLOC_ASSERT(!extMemPool->fixedPool, "No free for fixed-size pools."); 14451c0b2f7Stbbdev fail = (*extMemPool->rawFree)(extMemPool->poolId, object, size); 14551c0b2f7Stbbdev } else { 14651c0b2f7Stbbdev usedAddrRange.registerFree((uintptr_t)object, (uintptr_t)object + size); 14751c0b2f7Stbbdev fail = freeRawMemory(object, size); 14851c0b2f7Stbbdev } 14951c0b2f7Stbbdev // TODO: use result in all freeRawMem() callers 15051c0b2f7Stbbdev return !fail; 15151c0b2f7Stbbdev } 15251c0b2f7Stbbdev 15351c0b2f7Stbbdev /********* End memory acquisition code ********************************/ 15451c0b2f7Stbbdev 15551c0b2f7Stbbdev // Protected object size. After successful locking returns size of locked block, 15651c0b2f7Stbbdev // and releasing requires setting block size. 15751c0b2f7Stbbdev class GuardedSize : tbb::detail::no_copy { 15851c0b2f7Stbbdev std::atomic<uintptr_t> value; 15951c0b2f7Stbbdev public: 16051c0b2f7Stbbdev enum State { 16151c0b2f7Stbbdev LOCKED, 16251c0b2f7Stbbdev COAL_BLOCK, // block is coalescing now 16351c0b2f7Stbbdev MAX_LOCKED_VAL = COAL_BLOCK, 16451c0b2f7Stbbdev LAST_REGION_BLOCK, // used to mark last block in region 16551c0b2f7Stbbdev // values after this are "normal" block sizes 16651c0b2f7Stbbdev MAX_SPEC_VAL = LAST_REGION_BLOCK 16751c0b2f7Stbbdev }; 16851c0b2f7Stbbdev 16951c0b2f7Stbbdev void initLocked() { value.store(LOCKED, std::memory_order_release); } // TBB_REVAMP_TODO: was relaxed 17051c0b2f7Stbbdev void makeCoalscing() { 17151c0b2f7Stbbdev MALLOC_ASSERT(value.load(std::memory_order_relaxed) == LOCKED, ASSERT_TEXT); 17251c0b2f7Stbbdev value.store(COAL_BLOCK, std::memory_order_release); // TBB_REVAMP_TODO: was relaxed 17351c0b2f7Stbbdev } 17451c0b2f7Stbbdev size_t tryLock(State state) { 17551c0b2f7Stbbdev MALLOC_ASSERT(state <= MAX_LOCKED_VAL, ASSERT_TEXT); 17651c0b2f7Stbbdev size_t sz = value.load(std::memory_order_acquire); 17751c0b2f7Stbbdev for (;;) { 17851c0b2f7Stbbdev if (sz <= MAX_LOCKED_VAL) { 17951c0b2f7Stbbdev break; 18051c0b2f7Stbbdev } 18151c0b2f7Stbbdev if (value.compare_exchange_strong(sz, state)) { 18251c0b2f7Stbbdev break; 18351c0b2f7Stbbdev } 18451c0b2f7Stbbdev } 18551c0b2f7Stbbdev return sz; 18651c0b2f7Stbbdev } 18751c0b2f7Stbbdev void unlock(size_t size) { 18851c0b2f7Stbbdev MALLOC_ASSERT(value.load(std::memory_order_relaxed) <= MAX_LOCKED_VAL, "The lock is not locked"); 18951c0b2f7Stbbdev MALLOC_ASSERT(size > MAX_LOCKED_VAL, ASSERT_TEXT); 19051c0b2f7Stbbdev value.store(size, std::memory_order_release); 19151c0b2f7Stbbdev } 19251c0b2f7Stbbdev bool isLastRegionBlock() const { return value.load(std::memory_order_relaxed) == LAST_REGION_BLOCK; } 19351c0b2f7Stbbdev friend void Backend::IndexedBins::verify(); 19451c0b2f7Stbbdev }; 19551c0b2f7Stbbdev 19651c0b2f7Stbbdev struct MemRegion { 19751c0b2f7Stbbdev MemRegion *next, // keep all regions in any pool to release all them on 19851c0b2f7Stbbdev *prev; // pool destroying, 2-linked list to release individual 19951c0b2f7Stbbdev // regions. 20051c0b2f7Stbbdev size_t allocSz, // got from pool callback 20151c0b2f7Stbbdev blockSz; // initial and maximal inner block size 20251c0b2f7Stbbdev MemRegionType type; 20351c0b2f7Stbbdev }; 20451c0b2f7Stbbdev 20551c0b2f7Stbbdev // this data must be unmodified while block is in use, so separate it 20651c0b2f7Stbbdev class BlockMutexes { 20751c0b2f7Stbbdev protected: 20851c0b2f7Stbbdev GuardedSize myL, // lock for me 20951c0b2f7Stbbdev leftL; // lock for left neighbor 21051c0b2f7Stbbdev }; 21151c0b2f7Stbbdev 21251c0b2f7Stbbdev class FreeBlock : BlockMutexes { 21351c0b2f7Stbbdev public: 21451c0b2f7Stbbdev static const size_t minBlockSize; 21551c0b2f7Stbbdev friend void Backend::IndexedBins::verify(); 21651c0b2f7Stbbdev 21751c0b2f7Stbbdev FreeBlock *prev, // in 2-linked list related to bin 21851c0b2f7Stbbdev *next, 21951c0b2f7Stbbdev *nextToFree; // used to form a queue during coalescing 22051c0b2f7Stbbdev // valid only when block is in processing, i.e. one is not free and not 22151c0b2f7Stbbdev size_t sizeTmp; // used outside of backend 22251c0b2f7Stbbdev int myBin; // bin that is owner of the block 22351c0b2f7Stbbdev bool slabAligned; 22451c0b2f7Stbbdev bool blockInBin; // this block in myBin already 22551c0b2f7Stbbdev 22651c0b2f7Stbbdev FreeBlock *rightNeig(size_t sz) const { 22751c0b2f7Stbbdev MALLOC_ASSERT(sz, ASSERT_TEXT); 22851c0b2f7Stbbdev return (FreeBlock*)((uintptr_t)this+sz); 22951c0b2f7Stbbdev } 23051c0b2f7Stbbdev FreeBlock *leftNeig(size_t sz) const { 23151c0b2f7Stbbdev MALLOC_ASSERT(sz, ASSERT_TEXT); 23251c0b2f7Stbbdev return (FreeBlock*)((uintptr_t)this - sz); 23351c0b2f7Stbbdev } 23451c0b2f7Stbbdev 23551c0b2f7Stbbdev void initHeader() { myL.initLocked(); leftL.initLocked(); } 23651c0b2f7Stbbdev void setMeFree(size_t size) { myL.unlock(size); } 23751c0b2f7Stbbdev size_t trySetMeUsed(GuardedSize::State s) { return myL.tryLock(s); } 23851c0b2f7Stbbdev bool isLastRegionBlock() const { return myL.isLastRegionBlock(); } 23951c0b2f7Stbbdev 24051c0b2f7Stbbdev void setLeftFree(size_t sz) { leftL.unlock(sz); } 24151c0b2f7Stbbdev size_t trySetLeftUsed(GuardedSize::State s) { return leftL.tryLock(s); } 24251c0b2f7Stbbdev 24351c0b2f7Stbbdev size_t tryLockBlock() { 24451c0b2f7Stbbdev size_t rSz, sz = trySetMeUsed(GuardedSize::LOCKED); 24551c0b2f7Stbbdev 24651c0b2f7Stbbdev if (sz <= GuardedSize::MAX_LOCKED_VAL) 24751c0b2f7Stbbdev return false; 24851c0b2f7Stbbdev rSz = rightNeig(sz)->trySetLeftUsed(GuardedSize::LOCKED); 24951c0b2f7Stbbdev if (rSz <= GuardedSize::MAX_LOCKED_VAL) { 25051c0b2f7Stbbdev setMeFree(sz); 25151c0b2f7Stbbdev return false; 25251c0b2f7Stbbdev } 25351c0b2f7Stbbdev MALLOC_ASSERT(rSz == sz, ASSERT_TEXT); 25451c0b2f7Stbbdev return sz; 25551c0b2f7Stbbdev } 25651c0b2f7Stbbdev void markCoalescing(size_t blockSz) { 25751c0b2f7Stbbdev myL.makeCoalscing(); 25851c0b2f7Stbbdev rightNeig(blockSz)->leftL.makeCoalscing(); 25951c0b2f7Stbbdev sizeTmp = blockSz; 26057f524caSIlya Isaev nextToFree = nullptr; 26151c0b2f7Stbbdev } 26251c0b2f7Stbbdev void markUsed() { 26351c0b2f7Stbbdev myL.initLocked(); 26451c0b2f7Stbbdev rightNeig(sizeTmp)->leftL.initLocked(); 26557f524caSIlya Isaev nextToFree = nullptr; 26651c0b2f7Stbbdev } 26751c0b2f7Stbbdev static void markBlocks(FreeBlock *fBlock, int num, size_t size) { 26851c0b2f7Stbbdev for (int i=1; i<num; i++) { 26951c0b2f7Stbbdev fBlock = (FreeBlock*)((uintptr_t)fBlock + size); 27051c0b2f7Stbbdev fBlock->initHeader(); 27151c0b2f7Stbbdev } 27251c0b2f7Stbbdev } 27351c0b2f7Stbbdev }; 27451c0b2f7Stbbdev 27551c0b2f7Stbbdev // Last block in any region. Its "size" field is GuardedSize::LAST_REGION_BLOCK, 27651c0b2f7Stbbdev // This kind of blocks used to find region header 27751c0b2f7Stbbdev // and have a possibility to return region back to OS 27851c0b2f7Stbbdev struct LastFreeBlock : public FreeBlock { 27951c0b2f7Stbbdev MemRegion *memRegion; 28051c0b2f7Stbbdev }; 28151c0b2f7Stbbdev 28251c0b2f7Stbbdev const size_t FreeBlock::minBlockSize = sizeof(FreeBlock); 28351c0b2f7Stbbdev 28451c0b2f7Stbbdev inline bool BackendSync::waitTillBlockReleased(intptr_t startModifiedCnt) 28551c0b2f7Stbbdev { 28651c0b2f7Stbbdev AtomicBackoff backoff; 28751c0b2f7Stbbdev #if __TBB_MALLOC_BACKEND_STAT 28851c0b2f7Stbbdev class ITT_Guard { 28951c0b2f7Stbbdev void *ptr; 29051c0b2f7Stbbdev public: 29151c0b2f7Stbbdev ITT_Guard(void *p) : ptr(p) { 29251c0b2f7Stbbdev MALLOC_ITT_SYNC_PREPARE(ptr); 29351c0b2f7Stbbdev } 29451c0b2f7Stbbdev ~ITT_Guard() { 29551c0b2f7Stbbdev MALLOC_ITT_SYNC_ACQUIRED(ptr); 29651c0b2f7Stbbdev } 29751c0b2f7Stbbdev }; 29851c0b2f7Stbbdev ITT_Guard ittGuard(&inFlyBlocks); 29951c0b2f7Stbbdev #endif 30051c0b2f7Stbbdev for (intptr_t myBinsInFlyBlocks = inFlyBlocks.load(std::memory_order_acquire), 30151c0b2f7Stbbdev myCoalescQInFlyBlocks = backend->blocksInCoalescing(); ; backoff.pause()) { 30257f524caSIlya Isaev MALLOC_ASSERT(myBinsInFlyBlocks>=0 && myCoalescQInFlyBlocks>=0, nullptr); 30351c0b2f7Stbbdev intptr_t currBinsInFlyBlocks = inFlyBlocks.load(std::memory_order_acquire), 30451c0b2f7Stbbdev currCoalescQInFlyBlocks = backend->blocksInCoalescing(); 30551c0b2f7Stbbdev WhiteboxTestingYield(); 30651c0b2f7Stbbdev // Stop waiting iff: 30751c0b2f7Stbbdev 30851c0b2f7Stbbdev // 1) blocks were removed from processing, not added 30951c0b2f7Stbbdev if (myBinsInFlyBlocks > currBinsInFlyBlocks 31051c0b2f7Stbbdev // 2) released during delayed coalescing queue 31151c0b2f7Stbbdev || myCoalescQInFlyBlocks > currCoalescQInFlyBlocks) 31251c0b2f7Stbbdev break; 31351c0b2f7Stbbdev // 3) if there are blocks in coalescing, and no progress in its processing, 31451c0b2f7Stbbdev // try to scan coalescing queue and stop waiting, if changes were made 31551c0b2f7Stbbdev // (if there are no changes and in-fly blocks exist, we continue 31651c0b2f7Stbbdev // waiting to not increase load on coalescQ) 31751c0b2f7Stbbdev if (currCoalescQInFlyBlocks > 0 && backend->scanCoalescQ(/*forceCoalescQDrop=*/false)) 31851c0b2f7Stbbdev break; 31951c0b2f7Stbbdev // 4) when there are no blocks 32051c0b2f7Stbbdev if (!currBinsInFlyBlocks && !currCoalescQInFlyBlocks) 32151c0b2f7Stbbdev // re-scan make sense only if bins were modified since scanned 32251c0b2f7Stbbdev return startModifiedCnt != getNumOfMods(); 32351c0b2f7Stbbdev myBinsInFlyBlocks = currBinsInFlyBlocks; 32451c0b2f7Stbbdev myCoalescQInFlyBlocks = currCoalescQInFlyBlocks; 32551c0b2f7Stbbdev } 32651c0b2f7Stbbdev return true; 32751c0b2f7Stbbdev } 32851c0b2f7Stbbdev 32951c0b2f7Stbbdev void CoalRequestQ::putBlock(FreeBlock *fBlock) 33051c0b2f7Stbbdev { 33151c0b2f7Stbbdev MALLOC_ASSERT(fBlock->sizeTmp >= FreeBlock::minBlockSize, ASSERT_TEXT); 33251c0b2f7Stbbdev fBlock->markUsed(); 33351c0b2f7Stbbdev // the block is in the queue, do not forget that it's here 33451c0b2f7Stbbdev inFlyBlocks++; 33551c0b2f7Stbbdev 33651c0b2f7Stbbdev FreeBlock *myBlToFree = blocksToFree.load(std::memory_order_acquire); 33751c0b2f7Stbbdev for (;;) { 33851c0b2f7Stbbdev fBlock->nextToFree = myBlToFree; 33951c0b2f7Stbbdev if (blocksToFree.compare_exchange_strong(myBlToFree, fBlock)) { 34051c0b2f7Stbbdev return; 34151c0b2f7Stbbdev } 34251c0b2f7Stbbdev } 34351c0b2f7Stbbdev } 34451c0b2f7Stbbdev 34551c0b2f7Stbbdev FreeBlock *CoalRequestQ::getAll() 34651c0b2f7Stbbdev { 34751c0b2f7Stbbdev for (;;) { 34851c0b2f7Stbbdev FreeBlock *myBlToFree = blocksToFree.load(std::memory_order_acquire); 34951c0b2f7Stbbdev 35051c0b2f7Stbbdev if (!myBlToFree) { 35157f524caSIlya Isaev return nullptr; 35251c0b2f7Stbbdev } else { 35357f524caSIlya Isaev if (blocksToFree.compare_exchange_strong(myBlToFree, nullptr)) { 35451c0b2f7Stbbdev return myBlToFree; 35551c0b2f7Stbbdev } else { 35651c0b2f7Stbbdev continue; 35751c0b2f7Stbbdev } 35851c0b2f7Stbbdev } 35951c0b2f7Stbbdev } 36051c0b2f7Stbbdev } 36151c0b2f7Stbbdev 36251c0b2f7Stbbdev inline void CoalRequestQ::blockWasProcessed() 36351c0b2f7Stbbdev { 36451c0b2f7Stbbdev bkndSync->binsModified(); 36551c0b2f7Stbbdev int prev = inFlyBlocks.fetch_sub(1); 3662110128eSsarathnandu tbb::detail::suppress_unused_warning(prev); 36751c0b2f7Stbbdev MALLOC_ASSERT(prev > 0, ASSERT_TEXT); 36851c0b2f7Stbbdev } 36951c0b2f7Stbbdev 37051c0b2f7Stbbdev // Try to get a block from a bin. 37151c0b2f7Stbbdev // If the remaining free space would stay in the same bin, 37251c0b2f7Stbbdev // split the block without removing it. 37351c0b2f7Stbbdev // If the free space should go to other bin(s), remove the block. 37451c0b2f7Stbbdev // alignedBin is true, if all blocks in the bin have slab-aligned right side. 37551c0b2f7Stbbdev FreeBlock *Backend::IndexedBins::getFromBin(int binIdx, BackendSync *sync, size_t size, 37651c0b2f7Stbbdev bool needAlignedRes, bool alignedBin, bool wait, int *binLocked) 37751c0b2f7Stbbdev { 37851c0b2f7Stbbdev Bin *b = &freeBins[binIdx]; 37951c0b2f7Stbbdev try_next: 38057f524caSIlya Isaev FreeBlock *fBlock = nullptr; 381478de5b1Stbbdev if (!b->empty()) { 38251c0b2f7Stbbdev bool locked; 38351c0b2f7Stbbdev MallocMutex::scoped_lock scopedLock(b->tLock, wait, &locked); 38451c0b2f7Stbbdev 38551c0b2f7Stbbdev if (!locked) { 38651c0b2f7Stbbdev if (binLocked) (*binLocked)++; 38757f524caSIlya Isaev return nullptr; 38851c0b2f7Stbbdev } 38951c0b2f7Stbbdev 390478de5b1Stbbdev for (FreeBlock *curr = b->head.load(std::memory_order_relaxed); curr; curr = curr->next) { 39151c0b2f7Stbbdev size_t szBlock = curr->tryLockBlock(); 39251c0b2f7Stbbdev if (!szBlock) { 39351c0b2f7Stbbdev // block is locked, re-do bin lock, as there is no place to spin 39451c0b2f7Stbbdev // while block coalescing 39551c0b2f7Stbbdev goto try_next; 39651c0b2f7Stbbdev } 39751c0b2f7Stbbdev 39851c0b2f7Stbbdev // GENERAL CASE 39951c0b2f7Stbbdev if (alignedBin || !needAlignedRes) { 40051c0b2f7Stbbdev size_t splitSz = szBlock - size; 40151c0b2f7Stbbdev // If we got a block as split result, it must have a room for control structures. 40251c0b2f7Stbbdev if (szBlock >= size && (splitSz >= FreeBlock::minBlockSize || !splitSz)) 40351c0b2f7Stbbdev fBlock = curr; 40451c0b2f7Stbbdev } else { 40551c0b2f7Stbbdev // SPECIAL CASE, to get aligned block from unaligned bin we have to cut the middle of a block 40651c0b2f7Stbbdev // and return remaining left and right part. Possible only in fixed pool scenario, assert for this 40751c0b2f7Stbbdev // is set inside splitBlock() function. 40851c0b2f7Stbbdev 40951c0b2f7Stbbdev void *newB = alignUp(curr, slabSize); 41051c0b2f7Stbbdev uintptr_t rightNew = (uintptr_t)newB + size; 41151c0b2f7Stbbdev uintptr_t rightCurr = (uintptr_t)curr + szBlock; 41251c0b2f7Stbbdev // Check if the block size is sufficient, 41351c0b2f7Stbbdev // and also left and right split results are either big enough or non-existent 41451c0b2f7Stbbdev if (rightNew <= rightCurr 41551c0b2f7Stbbdev && (newB == curr || ((uintptr_t)newB - (uintptr_t)curr) >= FreeBlock::minBlockSize) 41651c0b2f7Stbbdev && (rightNew == rightCurr || (rightCurr - rightNew) >= FreeBlock::minBlockSize)) 41751c0b2f7Stbbdev fBlock = curr; 41851c0b2f7Stbbdev } 41951c0b2f7Stbbdev 42051c0b2f7Stbbdev if (fBlock) { 42151c0b2f7Stbbdev // consume must be called before result of removing from a bin is visible externally. 42251c0b2f7Stbbdev sync->blockConsumed(); 42351c0b2f7Stbbdev // TODO: think about cases when block stays in the same bin 42451c0b2f7Stbbdev b->removeBlock(fBlock); 42551c0b2f7Stbbdev if (freeBins[binIdx].empty()) 42651c0b2f7Stbbdev bitMask.set(binIdx, false); 42751c0b2f7Stbbdev fBlock->sizeTmp = szBlock; 42851c0b2f7Stbbdev break; 42951c0b2f7Stbbdev } else { // block size is not valid, search for next block in the bin 43051c0b2f7Stbbdev curr->setMeFree(szBlock); 43151c0b2f7Stbbdev curr->rightNeig(szBlock)->setLeftFree(szBlock); 43251c0b2f7Stbbdev } 43351c0b2f7Stbbdev } 43451c0b2f7Stbbdev } 43551c0b2f7Stbbdev return fBlock; 43651c0b2f7Stbbdev } 43751c0b2f7Stbbdev 43851c0b2f7Stbbdev bool Backend::IndexedBins::tryReleaseRegions(int binIdx, Backend *backend) 43951c0b2f7Stbbdev { 44051c0b2f7Stbbdev Bin *b = &freeBins[binIdx]; 44157f524caSIlya Isaev FreeBlock *fBlockList = nullptr; 44251c0b2f7Stbbdev 44351c0b2f7Stbbdev // got all blocks from the bin and re-do coalesce on them 44451c0b2f7Stbbdev // to release single-block regions 44551c0b2f7Stbbdev try_next: 446478de5b1Stbbdev if (!b->empty()) { 44751c0b2f7Stbbdev MallocMutex::scoped_lock binLock(b->tLock); 448478de5b1Stbbdev for (FreeBlock *curr = b->head.load(std::memory_order_relaxed); curr; ) { 44951c0b2f7Stbbdev size_t szBlock = curr->tryLockBlock(); 45051c0b2f7Stbbdev if (!szBlock) 45151c0b2f7Stbbdev goto try_next; 45251c0b2f7Stbbdev 45351c0b2f7Stbbdev FreeBlock *next = curr->next; 45451c0b2f7Stbbdev 45551c0b2f7Stbbdev b->removeBlock(curr); 45651c0b2f7Stbbdev curr->sizeTmp = szBlock; 45751c0b2f7Stbbdev curr->nextToFree = fBlockList; 45851c0b2f7Stbbdev fBlockList = curr; 45951c0b2f7Stbbdev curr = next; 46051c0b2f7Stbbdev } 46151c0b2f7Stbbdev } 46251c0b2f7Stbbdev return backend->coalescAndPutList(fBlockList, /*forceCoalescQDrop=*/true, 46351c0b2f7Stbbdev /*reportBlocksProcessed=*/false); 46451c0b2f7Stbbdev } 46551c0b2f7Stbbdev 46651c0b2f7Stbbdev void Backend::Bin::removeBlock(FreeBlock *fBlock) 46751c0b2f7Stbbdev { 468478de5b1Stbbdev MALLOC_ASSERT(fBlock->next||fBlock->prev||fBlock== head.load(std::memory_order_relaxed), 46951c0b2f7Stbbdev "Detected that a block is not in the bin."); 470478de5b1Stbbdev if (head.load(std::memory_order_relaxed) == fBlock) 471478de5b1Stbbdev head.store(fBlock->next, std::memory_order_relaxed); 47251c0b2f7Stbbdev if (tail == fBlock) 47351c0b2f7Stbbdev tail = fBlock->prev; 47451c0b2f7Stbbdev if (fBlock->prev) 47551c0b2f7Stbbdev fBlock->prev->next = fBlock->next; 47651c0b2f7Stbbdev if (fBlock->next) 47751c0b2f7Stbbdev fBlock->next->prev = fBlock->prev; 47851c0b2f7Stbbdev } 47951c0b2f7Stbbdev 48051c0b2f7Stbbdev void Backend::IndexedBins::addBlock(int binIdx, FreeBlock *fBlock, size_t /* blockSz */, bool addToTail) 48151c0b2f7Stbbdev { 48251c0b2f7Stbbdev Bin *b = &freeBins[binIdx]; 48351c0b2f7Stbbdev fBlock->myBin = binIdx; 48457f524caSIlya Isaev fBlock->next = fBlock->prev = nullptr; 48551c0b2f7Stbbdev { 48651c0b2f7Stbbdev MallocMutex::scoped_lock scopedLock(b->tLock); 48751c0b2f7Stbbdev if (addToTail) { 48851c0b2f7Stbbdev fBlock->prev = b->tail; 48951c0b2f7Stbbdev b->tail = fBlock; 49051c0b2f7Stbbdev if (fBlock->prev) 49151c0b2f7Stbbdev fBlock->prev->next = fBlock; 492478de5b1Stbbdev if (!b->head.load(std::memory_order_relaxed)) 493478de5b1Stbbdev b->head.store(fBlock, std::memory_order_relaxed); 49451c0b2f7Stbbdev } else { 495478de5b1Stbbdev fBlock->next = b->head.load(std::memory_order_relaxed); 496478de5b1Stbbdev b->head.store(fBlock, std::memory_order_relaxed); 49751c0b2f7Stbbdev if (fBlock->next) 49851c0b2f7Stbbdev fBlock->next->prev = fBlock; 49951c0b2f7Stbbdev if (!b->tail) 50051c0b2f7Stbbdev b->tail = fBlock; 50151c0b2f7Stbbdev } 50251c0b2f7Stbbdev } 50351c0b2f7Stbbdev bitMask.set(binIdx, true); 50451c0b2f7Stbbdev } 50551c0b2f7Stbbdev 50651c0b2f7Stbbdev bool Backend::IndexedBins::tryAddBlock(int binIdx, FreeBlock *fBlock, bool addToTail) 50751c0b2f7Stbbdev { 50851c0b2f7Stbbdev bool locked; 50951c0b2f7Stbbdev Bin *b = &freeBins[binIdx]; 51051c0b2f7Stbbdev fBlock->myBin = binIdx; 51151c0b2f7Stbbdev if (addToTail) { 51257f524caSIlya Isaev fBlock->next = nullptr; 51351c0b2f7Stbbdev { 51451c0b2f7Stbbdev MallocMutex::scoped_lock scopedLock(b->tLock, /*wait=*/false, &locked); 51551c0b2f7Stbbdev if (!locked) 51651c0b2f7Stbbdev return false; 51751c0b2f7Stbbdev fBlock->prev = b->tail; 51851c0b2f7Stbbdev b->tail = fBlock; 51951c0b2f7Stbbdev if (fBlock->prev) 52051c0b2f7Stbbdev fBlock->prev->next = fBlock; 521478de5b1Stbbdev if (!b->head.load(std::memory_order_relaxed)) 522478de5b1Stbbdev b->head.store(fBlock, std::memory_order_relaxed); 52351c0b2f7Stbbdev } 52451c0b2f7Stbbdev } else { 52557f524caSIlya Isaev fBlock->prev = nullptr; 52651c0b2f7Stbbdev { 52751c0b2f7Stbbdev MallocMutex::scoped_lock scopedLock(b->tLock, /*wait=*/false, &locked); 52851c0b2f7Stbbdev if (!locked) 52951c0b2f7Stbbdev return false; 530478de5b1Stbbdev fBlock->next = b->head.load(std::memory_order_relaxed); 531478de5b1Stbbdev b->head.store(fBlock, std::memory_order_relaxed); 53251c0b2f7Stbbdev if (fBlock->next) 53351c0b2f7Stbbdev fBlock->next->prev = fBlock; 53451c0b2f7Stbbdev if (!b->tail) 53551c0b2f7Stbbdev b->tail = fBlock; 53651c0b2f7Stbbdev } 53751c0b2f7Stbbdev } 53851c0b2f7Stbbdev bitMask.set(binIdx, true); 53951c0b2f7Stbbdev return true; 54051c0b2f7Stbbdev } 54151c0b2f7Stbbdev 54251c0b2f7Stbbdev void Backend::IndexedBins::reset() 54351c0b2f7Stbbdev { 54451c0b2f7Stbbdev for (unsigned i=0; i<Backend::freeBinsNum; i++) 54551c0b2f7Stbbdev freeBins[i].reset(); 54651c0b2f7Stbbdev bitMask.reset(); 54751c0b2f7Stbbdev } 54851c0b2f7Stbbdev 54951c0b2f7Stbbdev void Backend::IndexedBins::lockRemoveBlock(int binIdx, FreeBlock *fBlock) 55051c0b2f7Stbbdev { 55151c0b2f7Stbbdev MallocMutex::scoped_lock scopedLock(freeBins[binIdx].tLock); 55251c0b2f7Stbbdev freeBins[binIdx].removeBlock(fBlock); 55351c0b2f7Stbbdev if (freeBins[binIdx].empty()) 55451c0b2f7Stbbdev bitMask.set(binIdx, false); 55551c0b2f7Stbbdev } 55651c0b2f7Stbbdev 55751c0b2f7Stbbdev bool ExtMemoryPool::regionsAreReleaseable() const 55851c0b2f7Stbbdev { 55951c0b2f7Stbbdev return !keepAllMemory && !delayRegsReleasing; 56051c0b2f7Stbbdev } 56151c0b2f7Stbbdev 56251c0b2f7Stbbdev FreeBlock *Backend::splitBlock(FreeBlock *fBlock, int num, size_t size, bool blockIsAligned, bool needAlignedBlock) 56351c0b2f7Stbbdev { 56451c0b2f7Stbbdev const size_t totalSize = num * size; 56551c0b2f7Stbbdev 56651c0b2f7Stbbdev // SPECIAL CASE, for unaligned block we have to cut the middle of a block 56751c0b2f7Stbbdev // and return remaining left and right part. Possible only in a fixed pool scenario. 56851c0b2f7Stbbdev if (needAlignedBlock && !blockIsAligned) { 56951c0b2f7Stbbdev MALLOC_ASSERT(extMemPool->fixedPool, 57051c0b2f7Stbbdev "Aligned block request from unaligned bin possible only in fixed pool scenario."); 57151c0b2f7Stbbdev 57251c0b2f7Stbbdev // Space to use is in the middle 57351c0b2f7Stbbdev FreeBlock *newBlock = alignUp(fBlock, slabSize); 57451c0b2f7Stbbdev FreeBlock *rightPart = (FreeBlock*)((uintptr_t)newBlock + totalSize); 57551c0b2f7Stbbdev uintptr_t fBlockEnd = (uintptr_t)fBlock + fBlock->sizeTmp; 57651c0b2f7Stbbdev 57751c0b2f7Stbbdev // Return free right part 57851c0b2f7Stbbdev if ((uintptr_t)rightPart != fBlockEnd) { 57951c0b2f7Stbbdev rightPart->initHeader(); // to prevent coalescing rightPart with fBlock 58051c0b2f7Stbbdev size_t rightSize = fBlockEnd - (uintptr_t)rightPart; 58151c0b2f7Stbbdev coalescAndPut(rightPart, rightSize, toAlignedBin(rightPart, rightSize)); 58251c0b2f7Stbbdev } 58351c0b2f7Stbbdev // And free left part 58451c0b2f7Stbbdev if (newBlock != fBlock) { 58551c0b2f7Stbbdev newBlock->initHeader(); // to prevent coalescing fBlock with newB 58651c0b2f7Stbbdev size_t leftSize = (uintptr_t)newBlock - (uintptr_t)fBlock; 58751c0b2f7Stbbdev coalescAndPut(fBlock, leftSize, toAlignedBin(fBlock, leftSize)); 58851c0b2f7Stbbdev } 58951c0b2f7Stbbdev fBlock = newBlock; 59051c0b2f7Stbbdev } else if (size_t splitSize = fBlock->sizeTmp - totalSize) { // need to split the block 59151c0b2f7Stbbdev // GENERAL CASE, cut the left or right part of the block 59257f524caSIlya Isaev FreeBlock *splitBlock = nullptr; 59351c0b2f7Stbbdev if (needAlignedBlock) { 59451c0b2f7Stbbdev // For slab aligned blocks cut the right side of the block 59551c0b2f7Stbbdev // and return it to a requester, original block returns to backend 59651c0b2f7Stbbdev splitBlock = fBlock; 59751c0b2f7Stbbdev fBlock = (FreeBlock*)((uintptr_t)splitBlock + splitSize); 59851c0b2f7Stbbdev fBlock->initHeader(); 59951c0b2f7Stbbdev } else { 60051c0b2f7Stbbdev // For large object blocks cut original block and put free righ part to backend 60151c0b2f7Stbbdev splitBlock = (FreeBlock*)((uintptr_t)fBlock + totalSize); 60251c0b2f7Stbbdev splitBlock->initHeader(); 60351c0b2f7Stbbdev } 60451c0b2f7Stbbdev // Mark free block as it`s parent only when the requested type (needAlignedBlock) 60551c0b2f7Stbbdev // and returned from Bins/OS block (isAligned) are equal (XOR operation used) 60651c0b2f7Stbbdev bool markAligned = (blockIsAligned ^ needAlignedBlock) ? toAlignedBin(splitBlock, splitSize) : blockIsAligned; 60751c0b2f7Stbbdev coalescAndPut(splitBlock, splitSize, markAligned); 60851c0b2f7Stbbdev } 60951c0b2f7Stbbdev MALLOC_ASSERT(!needAlignedBlock || isAligned(fBlock, slabSize), "Expect to get aligned block, if one was requested."); 61051c0b2f7Stbbdev FreeBlock::markBlocks(fBlock, num, size); 61151c0b2f7Stbbdev return fBlock; 61251c0b2f7Stbbdev } 61351c0b2f7Stbbdev 61451c0b2f7Stbbdev size_t Backend::getMaxBinnedSize() const 61551c0b2f7Stbbdev { 61651c0b2f7Stbbdev return hugePages.isEnabled && !inUserPool() ? 61751c0b2f7Stbbdev maxBinned_HugePage : maxBinned_SmallPage; 61851c0b2f7Stbbdev } 61951c0b2f7Stbbdev 62051c0b2f7Stbbdev inline bool Backend::MaxRequestComparator::operator()(size_t oldMaxReq, size_t requestSize) const 62151c0b2f7Stbbdev { 62251c0b2f7Stbbdev return requestSize > oldMaxReq && requestSize < backend->getMaxBinnedSize(); 62351c0b2f7Stbbdev } 62451c0b2f7Stbbdev 62551c0b2f7Stbbdev // last chance to get memory 62651c0b2f7Stbbdev FreeBlock *Backend::releaseMemInCaches(intptr_t startModifiedCnt, 62751c0b2f7Stbbdev int *lockedBinsThreshold, int numOfLockedBins) 62851c0b2f7Stbbdev { 62951c0b2f7Stbbdev // something released from caches 63051c0b2f7Stbbdev if (extMemPool->hardCachesCleanup() 63151c0b2f7Stbbdev // ..or can use blocks that are in processing now 63251c0b2f7Stbbdev || bkndSync.waitTillBlockReleased(startModifiedCnt)) 63351c0b2f7Stbbdev return (FreeBlock*)VALID_BLOCK_IN_BIN; 63451c0b2f7Stbbdev // OS can't give us more memory, but we have some in locked bins 63551c0b2f7Stbbdev if (*lockedBinsThreshold && numOfLockedBins) { 63651c0b2f7Stbbdev *lockedBinsThreshold = 0; 63751c0b2f7Stbbdev return (FreeBlock*)VALID_BLOCK_IN_BIN; 63851c0b2f7Stbbdev } 63957f524caSIlya Isaev return nullptr; // nothing found, give up 64051c0b2f7Stbbdev } 64151c0b2f7Stbbdev 64251c0b2f7Stbbdev FreeBlock *Backend::askMemFromOS(size_t blockSize, intptr_t startModifiedCnt, 64351c0b2f7Stbbdev int *lockedBinsThreshold, int numOfLockedBins, 64451c0b2f7Stbbdev bool *splittableRet, bool needSlabRegion) 64551c0b2f7Stbbdev { 64651c0b2f7Stbbdev FreeBlock *block; 64751c0b2f7Stbbdev // The block sizes can be divided into 3 groups: 64851c0b2f7Stbbdev // 1. "quite small": popular object size, we are in bootstarp or something 64951c0b2f7Stbbdev // like; request several regions. 65051c0b2f7Stbbdev // 2. "quite large": we want to have several such blocks in the region 65151c0b2f7Stbbdev // but not want several pre-allocated regions. 65251c0b2f7Stbbdev // 3. "huge": exact fit, we allocate only one block and do not allow 65351c0b2f7Stbbdev // any other allocations to placed in a region. 65451c0b2f7Stbbdev // Dividing the block sizes in these groups we are trying to balance between 65551c0b2f7Stbbdev // too small regions (that leads to fragmentation) and too large ones (that 65651c0b2f7Stbbdev // leads to excessive address space consumption). If a region is "too 65751c0b2f7Stbbdev // large", allocate only one, to prevent fragmentation. It supposedly 65851c0b2f7Stbbdev // doesn't hurt performance, because the object requested by user is large. 65951c0b2f7Stbbdev // Bounds for the groups are: 66051c0b2f7Stbbdev const size_t maxBinned = getMaxBinnedSize(); 66151c0b2f7Stbbdev const size_t quiteSmall = maxBinned / 8; 66251c0b2f7Stbbdev const size_t quiteLarge = maxBinned; 66351c0b2f7Stbbdev 66451c0b2f7Stbbdev if (blockSize >= quiteLarge) { 66551c0b2f7Stbbdev // Do not interact with other threads via semaphores, as for exact fit 66651c0b2f7Stbbdev // we can't share regions with them, memory requesting is individual. 66751c0b2f7Stbbdev block = addNewRegion(blockSize, MEMREG_ONE_BLOCK, /*addToBin=*/false); 66851c0b2f7Stbbdev if (!block) 66951c0b2f7Stbbdev return releaseMemInCaches(startModifiedCnt, lockedBinsThreshold, numOfLockedBins); 67051c0b2f7Stbbdev *splittableRet = false; 67151c0b2f7Stbbdev } else { 67251c0b2f7Stbbdev const size_t regSz_sizeBased = alignUp(4*maxRequestedSize, 1024*1024); 67351c0b2f7Stbbdev // Another thread is modifying backend while we can't get the block. 67451c0b2f7Stbbdev // Wait while it leaves and re-do the scan 67551c0b2f7Stbbdev // before trying other ways to extend the backend. 67651c0b2f7Stbbdev if (bkndSync.waitTillBlockReleased(startModifiedCnt) 67751c0b2f7Stbbdev // semaphore is protecting adding more more memory from OS 67851c0b2f7Stbbdev || memExtendingSema.wait()) 67951c0b2f7Stbbdev return (FreeBlock*)VALID_BLOCK_IN_BIN; 68051c0b2f7Stbbdev 68151c0b2f7Stbbdev if (startModifiedCnt != bkndSync.getNumOfMods()) { 68251c0b2f7Stbbdev memExtendingSema.signal(); 68351c0b2f7Stbbdev return (FreeBlock*)VALID_BLOCK_IN_BIN; 68451c0b2f7Stbbdev } 68551c0b2f7Stbbdev 68651c0b2f7Stbbdev if (blockSize < quiteSmall) { 68751c0b2f7Stbbdev // For this size of blocks, add NUM_OF_REG "advance" regions in bin, 68851c0b2f7Stbbdev // and return one as a result. 68951c0b2f7Stbbdev // TODO: add to bin first, because other threads can use them right away. 69051c0b2f7Stbbdev // This must be done carefully, because blocks in bins can be released 69151c0b2f7Stbbdev // in releaseCachesToLimit(). 69251c0b2f7Stbbdev const unsigned NUM_OF_REG = 3; 69351c0b2f7Stbbdev MemRegionType regType = needSlabRegion ? MEMREG_SLAB_BLOCKS : MEMREG_LARGE_BLOCKS; 69451c0b2f7Stbbdev block = addNewRegion(regSz_sizeBased, regType, /*addToBin=*/false); 69551c0b2f7Stbbdev if (block) 69651c0b2f7Stbbdev for (unsigned idx=0; idx<NUM_OF_REG; idx++) 69751c0b2f7Stbbdev if (! addNewRegion(regSz_sizeBased, regType, /*addToBin=*/true)) 69851c0b2f7Stbbdev break; 69951c0b2f7Stbbdev } else { 70051c0b2f7Stbbdev block = addNewRegion(regSz_sizeBased, MEMREG_LARGE_BLOCKS, /*addToBin=*/false); 70151c0b2f7Stbbdev } 70251c0b2f7Stbbdev memExtendingSema.signal(); 70351c0b2f7Stbbdev 70451c0b2f7Stbbdev // no regions found, try to clean cache 70551c0b2f7Stbbdev if (!block || block == (FreeBlock*)VALID_BLOCK_IN_BIN) 70651c0b2f7Stbbdev return releaseMemInCaches(startModifiedCnt, lockedBinsThreshold, numOfLockedBins); 70751c0b2f7Stbbdev // Since a region can hold more than one block it can be split. 70851c0b2f7Stbbdev *splittableRet = true; 70951c0b2f7Stbbdev } 71051c0b2f7Stbbdev // after asking memory from OS, release caches if we above the memory limits 71151c0b2f7Stbbdev releaseCachesToLimit(); 71251c0b2f7Stbbdev 71351c0b2f7Stbbdev return block; 71451c0b2f7Stbbdev } 71551c0b2f7Stbbdev 71651c0b2f7Stbbdev void Backend::releaseCachesToLimit() 71751c0b2f7Stbbdev { 71851c0b2f7Stbbdev if (!memSoftLimit.load(std::memory_order_relaxed) 71951c0b2f7Stbbdev || totalMemSize.load(std::memory_order_relaxed) <= memSoftLimit.load(std::memory_order_relaxed)) { 72051c0b2f7Stbbdev return; 72151c0b2f7Stbbdev } 72251c0b2f7Stbbdev size_t locTotalMemSize, locMemSoftLimit; 72351c0b2f7Stbbdev 72451c0b2f7Stbbdev scanCoalescQ(/*forceCoalescQDrop=*/false); 72551c0b2f7Stbbdev if (extMemPool->softCachesCleanup() && 72651c0b2f7Stbbdev (locTotalMemSize = totalMemSize.load(std::memory_order_acquire)) <= 72751c0b2f7Stbbdev (locMemSoftLimit = memSoftLimit.load(std::memory_order_acquire))) 72851c0b2f7Stbbdev return; 72951c0b2f7Stbbdev // clean global large-object cache, if this is not enough, clean local caches 73051c0b2f7Stbbdev // do this in several tries, because backend fragmentation can prevent 73151c0b2f7Stbbdev // region from releasing 73251c0b2f7Stbbdev for (int cleanLocal = 0; cleanLocal<2; cleanLocal++) 73351c0b2f7Stbbdev while (cleanLocal ? 73451c0b2f7Stbbdev extMemPool->allLocalCaches.cleanup(/*cleanOnlyUnused=*/true) : 73551c0b2f7Stbbdev extMemPool->loc.decreasingCleanup()) 73651c0b2f7Stbbdev if ((locTotalMemSize = totalMemSize.load(std::memory_order_acquire)) <= 73751c0b2f7Stbbdev (locMemSoftLimit = memSoftLimit.load(std::memory_order_acquire))) 73851c0b2f7Stbbdev return; 73951c0b2f7Stbbdev // last chance to match memSoftLimit 74051c0b2f7Stbbdev extMemPool->hardCachesCleanup(); 74151c0b2f7Stbbdev } 74251c0b2f7Stbbdev 74351c0b2f7Stbbdev int Backend::IndexedBins::getMinNonemptyBin(unsigned startBin) const 74451c0b2f7Stbbdev { 74551c0b2f7Stbbdev int p = bitMask.getMinTrue(startBin); 74651c0b2f7Stbbdev return p == -1 ? Backend::freeBinsNum : p; 74751c0b2f7Stbbdev } 74851c0b2f7Stbbdev 74951c0b2f7Stbbdev FreeBlock *Backend::IndexedBins::findBlock(int nativeBin, BackendSync *sync, size_t size, 75051c0b2f7Stbbdev bool needAlignedBlock, bool alignedBin, int *numOfLockedBins) 75151c0b2f7Stbbdev { 7522110128eSsarathnandu for (int i=getMinNonemptyBin(nativeBin); i<(int)freeBinsNum; i=getMinNonemptyBin(i+1)) 75351c0b2f7Stbbdev if (FreeBlock *block = getFromBin(i, sync, size, needAlignedBlock, alignedBin, /*wait=*/false, numOfLockedBins)) 75451c0b2f7Stbbdev return block; 75551c0b2f7Stbbdev 75657f524caSIlya Isaev return nullptr; 75751c0b2f7Stbbdev } 75851c0b2f7Stbbdev 75951c0b2f7Stbbdev void Backend::requestBootstrapMem() 76051c0b2f7Stbbdev { 76151c0b2f7Stbbdev if (bootsrapMemDone == bootsrapMemStatus.load(std::memory_order_acquire)) 76251c0b2f7Stbbdev return; 76351c0b2f7Stbbdev MallocMutex::scoped_lock lock( bootsrapMemStatusMutex ); 76451c0b2f7Stbbdev if (bootsrapMemDone == bootsrapMemStatus) 76551c0b2f7Stbbdev return; 76651c0b2f7Stbbdev MALLOC_ASSERT(bootsrapMemNotDone == bootsrapMemStatus, ASSERT_TEXT); 76751c0b2f7Stbbdev bootsrapMemStatus = bootsrapMemInitializing; 76851c0b2f7Stbbdev // request some rather big region during bootstrap in advance 76957f524caSIlya Isaev // ok to get nullptr here, as later we re-do a request with more modest size 77051c0b2f7Stbbdev addNewRegion(2*1024*1024, MEMREG_SLAB_BLOCKS, /*addToBin=*/true); 77151c0b2f7Stbbdev bootsrapMemStatus = bootsrapMemDone; 77251c0b2f7Stbbdev } 77351c0b2f7Stbbdev 77451c0b2f7Stbbdev // try to allocate size Byte block in available bins 77551c0b2f7Stbbdev // needAlignedRes is true if result must be slab-aligned 77651c0b2f7Stbbdev FreeBlock *Backend::genericGetBlock(int num, size_t size, bool needAlignedBlock) 77751c0b2f7Stbbdev { 77857f524caSIlya Isaev FreeBlock *block = nullptr; 77951c0b2f7Stbbdev const size_t totalReqSize = num*size; 78051c0b2f7Stbbdev // no splitting after requesting new region, asks exact size 78151c0b2f7Stbbdev const int nativeBin = sizeToBin(totalReqSize); 78251c0b2f7Stbbdev 78351c0b2f7Stbbdev requestBootstrapMem(); 78451c0b2f7Stbbdev // If we found 2 or less locked bins, it's time to ask more memory from OS. 78551c0b2f7Stbbdev // But nothing can be asked from fixed pool. And we prefer wait, not ask 78651c0b2f7Stbbdev // for more memory, if block is quite large. 78751c0b2f7Stbbdev int lockedBinsThreshold = extMemPool->fixedPool || size>=maxBinned_SmallPage? 0 : 2; 78851c0b2f7Stbbdev 78951c0b2f7Stbbdev // Find maximal requested size limited by getMaxBinnedSize() 79051c0b2f7Stbbdev AtomicUpdate(maxRequestedSize, totalReqSize, MaxRequestComparator(this)); 79151c0b2f7Stbbdev scanCoalescQ(/*forceCoalescQDrop=*/false); 79251c0b2f7Stbbdev 79351c0b2f7Stbbdev bool splittable = true; 79451c0b2f7Stbbdev for (;;) { 79551c0b2f7Stbbdev const intptr_t startModifiedCnt = bkndSync.getNumOfMods(); 79651c0b2f7Stbbdev int numOfLockedBins; 79751c0b2f7Stbbdev 79851c0b2f7Stbbdev do { 79951c0b2f7Stbbdev numOfLockedBins = 0; 80051c0b2f7Stbbdev if (needAlignedBlock) { 80151c0b2f7Stbbdev block = freeSlabAlignedBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, 80251c0b2f7Stbbdev /*alignedBin=*/true, &numOfLockedBins); 80351c0b2f7Stbbdev if (!block && extMemPool->fixedPool) 80451c0b2f7Stbbdev block = freeLargeBlockBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, 80551c0b2f7Stbbdev /*alignedBin=*/false, &numOfLockedBins); 80651c0b2f7Stbbdev } else { 80751c0b2f7Stbbdev block = freeLargeBlockBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, 80851c0b2f7Stbbdev /*alignedBin=*/false, &numOfLockedBins); 80951c0b2f7Stbbdev if (!block && extMemPool->fixedPool) 81051c0b2f7Stbbdev block = freeSlabAlignedBins.findBlock(nativeBin, &bkndSync, num*size, needAlignedBlock, 81151c0b2f7Stbbdev /*alignedBin=*/true, &numOfLockedBins); 81251c0b2f7Stbbdev } 81351c0b2f7Stbbdev } while (!block && numOfLockedBins>lockedBinsThreshold); 81451c0b2f7Stbbdev 81551c0b2f7Stbbdev if (block) 81651c0b2f7Stbbdev break; 81751c0b2f7Stbbdev 818*a96a032fSVladislav Shchapov bool retScanCoalescQ = scanCoalescQ(/*forceCoalescQDrop=*/true); 819*a96a032fSVladislav Shchapov bool retSoftCachesCleanup = extMemPool->softCachesCleanup(); 820*a96a032fSVladislav Shchapov if (!(retScanCoalescQ || retSoftCachesCleanup)) { 82151c0b2f7Stbbdev // bins are not updated, 82251c0b2f7Stbbdev // only remaining possibility is to ask for more memory 82351c0b2f7Stbbdev block = askMemFromOS(totalReqSize, startModifiedCnt, &lockedBinsThreshold, 82451c0b2f7Stbbdev numOfLockedBins, &splittable, needAlignedBlock); 82551c0b2f7Stbbdev if (!block) 82657f524caSIlya Isaev return nullptr; 82751c0b2f7Stbbdev if (block != (FreeBlock*)VALID_BLOCK_IN_BIN) { 82851c0b2f7Stbbdev // size can be increased in askMemFromOS, that's why >= 82951c0b2f7Stbbdev MALLOC_ASSERT(block->sizeTmp >= size, ASSERT_TEXT); 83051c0b2f7Stbbdev break; 83151c0b2f7Stbbdev } 83251c0b2f7Stbbdev // valid block somewhere in bins, let's find it 83357f524caSIlya Isaev block = nullptr; 83451c0b2f7Stbbdev } 83551c0b2f7Stbbdev } 83651c0b2f7Stbbdev MALLOC_ASSERT(block, ASSERT_TEXT); 83751c0b2f7Stbbdev if (splittable) { 83851c0b2f7Stbbdev // At this point we have to be sure that slabAligned attribute describes the right block state 83951c0b2f7Stbbdev block = splitBlock(block, num, size, block->slabAligned, needAlignedBlock); 84051c0b2f7Stbbdev } 84151c0b2f7Stbbdev // matched blockConsumed() from startUseBlock() 84251c0b2f7Stbbdev bkndSync.blockReleased(); 84351c0b2f7Stbbdev 84451c0b2f7Stbbdev return block; 84551c0b2f7Stbbdev } 84651c0b2f7Stbbdev 84751c0b2f7Stbbdev LargeMemoryBlock *Backend::getLargeBlock(size_t size) 84851c0b2f7Stbbdev { 84951c0b2f7Stbbdev LargeMemoryBlock *lmb = 85051c0b2f7Stbbdev (LargeMemoryBlock*)genericGetBlock(1, size, /*needAlignedRes=*/false); 85151c0b2f7Stbbdev if (lmb) { 85251c0b2f7Stbbdev lmb->unalignedSize = size; 85351c0b2f7Stbbdev if (extMemPool->userPool()) 85451c0b2f7Stbbdev extMemPool->lmbList.add(lmb); 85551c0b2f7Stbbdev } 85651c0b2f7Stbbdev return lmb; 85751c0b2f7Stbbdev } 85851c0b2f7Stbbdev 85951c0b2f7Stbbdev BlockI *Backend::getSlabBlock(int num) { 86051c0b2f7Stbbdev BlockI *b = (BlockI*)genericGetBlock(num, slabSize, /*slabAligned=*/true); 86151c0b2f7Stbbdev MALLOC_ASSERT(isAligned(b, slabSize), ASSERT_TEXT); 86251c0b2f7Stbbdev return b; 86351c0b2f7Stbbdev } 86451c0b2f7Stbbdev 86551c0b2f7Stbbdev void Backend::putSlabBlock(BlockI *block) { 86651c0b2f7Stbbdev genericPutBlock((FreeBlock *)block, slabSize, /*slabAligned=*/true); 86751c0b2f7Stbbdev } 86851c0b2f7Stbbdev 86951c0b2f7Stbbdev void *Backend::getBackRefSpace(size_t size, bool *rawMemUsed) 87051c0b2f7Stbbdev { 87151c0b2f7Stbbdev // This block is released only at shutdown, so it can prevent 87251c0b2f7Stbbdev // a entire region releasing when it's received from the backend, 87351c0b2f7Stbbdev // so prefer getRawMemory using. 87451c0b2f7Stbbdev if (void *ret = getRawMemory(size, REGULAR)) { 87551c0b2f7Stbbdev *rawMemUsed = true; 87651c0b2f7Stbbdev return ret; 87751c0b2f7Stbbdev } 87851c0b2f7Stbbdev void *ret = genericGetBlock(1, size, /*needAlignedRes=*/false); 87951c0b2f7Stbbdev if (ret) *rawMemUsed = false; 88051c0b2f7Stbbdev return ret; 88151c0b2f7Stbbdev } 88251c0b2f7Stbbdev 88351c0b2f7Stbbdev void Backend::putBackRefSpace(void *b, size_t size, bool rawMemUsed) 88451c0b2f7Stbbdev { 88551c0b2f7Stbbdev if (rawMemUsed) 88651c0b2f7Stbbdev freeRawMemory(b, size); 88751c0b2f7Stbbdev // ignore not raw mem, as it released on region releasing 88851c0b2f7Stbbdev } 88951c0b2f7Stbbdev 89051c0b2f7Stbbdev void Backend::removeBlockFromBin(FreeBlock *fBlock) 89151c0b2f7Stbbdev { 89251c0b2f7Stbbdev if (fBlock->myBin != Backend::NO_BIN) { 89351c0b2f7Stbbdev if (fBlock->slabAligned) 89451c0b2f7Stbbdev freeSlabAlignedBins.lockRemoveBlock(fBlock->myBin, fBlock); 89551c0b2f7Stbbdev else 89651c0b2f7Stbbdev freeLargeBlockBins.lockRemoveBlock(fBlock->myBin, fBlock); 89751c0b2f7Stbbdev } 89851c0b2f7Stbbdev } 89951c0b2f7Stbbdev 90051c0b2f7Stbbdev void Backend::genericPutBlock(FreeBlock *fBlock, size_t blockSz, bool slabAligned) 90151c0b2f7Stbbdev { 90251c0b2f7Stbbdev bkndSync.blockConsumed(); 90351c0b2f7Stbbdev coalescAndPut(fBlock, blockSz, slabAligned); 90451c0b2f7Stbbdev bkndSync.blockReleased(); 90551c0b2f7Stbbdev } 90651c0b2f7Stbbdev 90751c0b2f7Stbbdev void AllLargeBlocksList::add(LargeMemoryBlock *lmb) 90851c0b2f7Stbbdev { 90951c0b2f7Stbbdev MallocMutex::scoped_lock scoped_cs(largeObjLock); 91057f524caSIlya Isaev lmb->gPrev = nullptr; 91151c0b2f7Stbbdev lmb->gNext = loHead; 91251c0b2f7Stbbdev if (lmb->gNext) 91351c0b2f7Stbbdev lmb->gNext->gPrev = lmb; 91451c0b2f7Stbbdev loHead = lmb; 91551c0b2f7Stbbdev } 91651c0b2f7Stbbdev 91751c0b2f7Stbbdev void AllLargeBlocksList::remove(LargeMemoryBlock *lmb) 91851c0b2f7Stbbdev { 91951c0b2f7Stbbdev MallocMutex::scoped_lock scoped_cs(largeObjLock); 92051c0b2f7Stbbdev if (loHead == lmb) 92151c0b2f7Stbbdev loHead = lmb->gNext; 92251c0b2f7Stbbdev if (lmb->gNext) 92351c0b2f7Stbbdev lmb->gNext->gPrev = lmb->gPrev; 92451c0b2f7Stbbdev if (lmb->gPrev) 92551c0b2f7Stbbdev lmb->gPrev->gNext = lmb->gNext; 92651c0b2f7Stbbdev } 92751c0b2f7Stbbdev 92851c0b2f7Stbbdev void Backend::putLargeBlock(LargeMemoryBlock *lmb) 92951c0b2f7Stbbdev { 93051c0b2f7Stbbdev if (extMemPool->userPool()) 93151c0b2f7Stbbdev extMemPool->lmbList.remove(lmb); 93251c0b2f7Stbbdev genericPutBlock((FreeBlock *)lmb, lmb->unalignedSize, false); 93351c0b2f7Stbbdev } 93451c0b2f7Stbbdev 93551c0b2f7Stbbdev void Backend::returnLargeObject(LargeMemoryBlock *lmb) 93651c0b2f7Stbbdev { 93751c0b2f7Stbbdev removeBackRef(lmb->backRefIdx); 93851c0b2f7Stbbdev putLargeBlock(lmb); 93951c0b2f7Stbbdev STAT_increment(getThreadId(), ThreadCommonCounters, freeLargeObj); 94051c0b2f7Stbbdev } 94151c0b2f7Stbbdev 94251c0b2f7Stbbdev #if BACKEND_HAS_MREMAP 94351c0b2f7Stbbdev void *Backend::remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment) 94451c0b2f7Stbbdev { 94551c0b2f7Stbbdev // no remap for user pools and for object too small that living in bins 94651c0b2f7Stbbdev if (inUserPool() || min(oldSize, newSize)<maxBinned_SmallPage 94751c0b2f7Stbbdev // during remap, can't guarantee alignment more strict than current or 94851c0b2f7Stbbdev // more strict than page alignment 94951c0b2f7Stbbdev || !isAligned(ptr, alignment) || alignment>extMemPool->granularity) 95057f524caSIlya Isaev return nullptr; 95151c0b2f7Stbbdev const LargeMemoryBlock* lmbOld = ((LargeObjectHdr *)ptr - 1)->memoryBlock; 95251c0b2f7Stbbdev const size_t oldUnalignedSize = lmbOld->unalignedSize; 95351c0b2f7Stbbdev FreeBlock *oldFBlock = (FreeBlock *)lmbOld; 95451c0b2f7Stbbdev FreeBlock *right = oldFBlock->rightNeig(oldUnalignedSize); 95551c0b2f7Stbbdev // in every region only one block can have LAST_REGION_BLOCK on right, 95651c0b2f7Stbbdev // so don't need no synchronization 95751c0b2f7Stbbdev if (!right->isLastRegionBlock()) 95857f524caSIlya Isaev return nullptr; 95951c0b2f7Stbbdev 96051c0b2f7Stbbdev MemRegion *oldRegion = static_cast<LastFreeBlock*>(right)->memRegion; 96151c0b2f7Stbbdev MALLOC_ASSERT( oldRegion < ptr, ASSERT_TEXT ); 96251c0b2f7Stbbdev const size_t oldRegionSize = oldRegion->allocSz; 96351c0b2f7Stbbdev if (oldRegion->type != MEMREG_ONE_BLOCK) 96457f524caSIlya Isaev return nullptr; // we are not single in the region 96551c0b2f7Stbbdev const size_t userOffset = (uintptr_t)ptr - (uintptr_t)oldRegion; 96651c0b2f7Stbbdev const size_t alignedSize = LargeObjectCache::alignToBin(newSize + userOffset); 96751c0b2f7Stbbdev const size_t requestSize = 96851c0b2f7Stbbdev alignUp(sizeof(MemRegion) + alignedSize + sizeof(LastFreeBlock), extMemPool->granularity); 96951c0b2f7Stbbdev if (requestSize < alignedSize) // is wrapped around? 97057f524caSIlya Isaev return nullptr; 97151c0b2f7Stbbdev regionList.remove(oldRegion); 97251c0b2f7Stbbdev 9734df48f98SAlex // The deallocation should be registered in address range before mremap to 9744df48f98SAlex // prevent a race condition with allocation on another thread. 9754df48f98SAlex // (OS can reuse the memory and registerAlloc will be missed on another thread) 9764df48f98SAlex usedAddrRange.registerFree((uintptr_t)oldRegion, (uintptr_t)oldRegion + oldRegionSize); 9774df48f98SAlex 97851c0b2f7Stbbdev void *ret = mremap(oldRegion, oldRegion->allocSz, requestSize, MREMAP_MAYMOVE); 97951c0b2f7Stbbdev if (MAP_FAILED == ret) { // can't remap, revert and leave 98051c0b2f7Stbbdev regionList.add(oldRegion); 9814df48f98SAlex usedAddrRange.registerAlloc((uintptr_t)oldRegion, (uintptr_t)oldRegion + oldRegionSize); 98257f524caSIlya Isaev return nullptr; 98351c0b2f7Stbbdev } 98451c0b2f7Stbbdev MemRegion *region = (MemRegion*)ret; 98551c0b2f7Stbbdev MALLOC_ASSERT(region->type == MEMREG_ONE_BLOCK, ASSERT_TEXT); 98651c0b2f7Stbbdev region->allocSz = requestSize; 98751c0b2f7Stbbdev region->blockSz = alignedSize; 98851c0b2f7Stbbdev 98951c0b2f7Stbbdev FreeBlock *fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), 99051c0b2f7Stbbdev largeObjectAlignment); 99151c0b2f7Stbbdev 99251c0b2f7Stbbdev regionList.add(region); 99351c0b2f7Stbbdev startUseBlock(region, fBlock, /*addToBin=*/false); 99451c0b2f7Stbbdev MALLOC_ASSERT(fBlock->sizeTmp == region->blockSz, ASSERT_TEXT); 99551c0b2f7Stbbdev // matched blockConsumed() in startUseBlock(). 99651c0b2f7Stbbdev // TODO: get rid of useless pair blockConsumed()/blockReleased() 99751c0b2f7Stbbdev bkndSync.blockReleased(); 99851c0b2f7Stbbdev 99951c0b2f7Stbbdev // object must start at same offset from region's start 100051c0b2f7Stbbdev void *object = (void*)((uintptr_t)region + userOffset); 100151c0b2f7Stbbdev MALLOC_ASSERT(isAligned(object, alignment), ASSERT_TEXT); 100251c0b2f7Stbbdev LargeObjectHdr *header = (LargeObjectHdr*)object - 1; 100351c0b2f7Stbbdev setBackRef(header->backRefIdx, header); 100451c0b2f7Stbbdev 100551c0b2f7Stbbdev LargeMemoryBlock *lmb = (LargeMemoryBlock*)fBlock; 100651c0b2f7Stbbdev lmb->unalignedSize = region->blockSz; 100751c0b2f7Stbbdev lmb->objectSize = newSize; 100851c0b2f7Stbbdev lmb->backRefIdx = header->backRefIdx; 100951c0b2f7Stbbdev header->memoryBlock = lmb; 101051c0b2f7Stbbdev MALLOC_ASSERT((uintptr_t)lmb + lmb->unalignedSize >= 101151c0b2f7Stbbdev (uintptr_t)object + lmb->objectSize, "An object must fit to the block."); 101251c0b2f7Stbbdev 101351c0b2f7Stbbdev usedAddrRange.registerAlloc((uintptr_t)region, (uintptr_t)region + requestSize); 101451c0b2f7Stbbdev totalMemSize.fetch_add(region->allocSz - oldRegionSize); 101551c0b2f7Stbbdev 101651c0b2f7Stbbdev return object; 101751c0b2f7Stbbdev } 101851c0b2f7Stbbdev #endif /* BACKEND_HAS_MREMAP */ 101951c0b2f7Stbbdev 102051c0b2f7Stbbdev void Backend::releaseRegion(MemRegion *memRegion) 102151c0b2f7Stbbdev { 102251c0b2f7Stbbdev regionList.remove(memRegion); 102351c0b2f7Stbbdev freeRawMem(memRegion, memRegion->allocSz); 102451c0b2f7Stbbdev } 102551c0b2f7Stbbdev 102651c0b2f7Stbbdev // coalesce fBlock with its neighborhood 102751c0b2f7Stbbdev FreeBlock *Backend::doCoalesc(FreeBlock *fBlock, MemRegion **mRegion) 102851c0b2f7Stbbdev { 102951c0b2f7Stbbdev FreeBlock *resBlock = fBlock; 103051c0b2f7Stbbdev size_t resSize = fBlock->sizeTmp; 103157f524caSIlya Isaev MemRegion *memRegion = nullptr; 103251c0b2f7Stbbdev 103351c0b2f7Stbbdev fBlock->markCoalescing(resSize); 103451c0b2f7Stbbdev resBlock->blockInBin = false; 103551c0b2f7Stbbdev 103651c0b2f7Stbbdev // coalescing with left neighbor 103751c0b2f7Stbbdev size_t leftSz = fBlock->trySetLeftUsed(GuardedSize::COAL_BLOCK); 103851c0b2f7Stbbdev if (leftSz != GuardedSize::LOCKED) { 103951c0b2f7Stbbdev if (leftSz == GuardedSize::COAL_BLOCK) { 104051c0b2f7Stbbdev coalescQ.putBlock(fBlock); 104157f524caSIlya Isaev return nullptr; 104251c0b2f7Stbbdev } else { 104351c0b2f7Stbbdev FreeBlock *left = fBlock->leftNeig(leftSz); 104451c0b2f7Stbbdev size_t lSz = left->trySetMeUsed(GuardedSize::COAL_BLOCK); 104551c0b2f7Stbbdev if (lSz <= GuardedSize::MAX_LOCKED_VAL) { 104651c0b2f7Stbbdev fBlock->setLeftFree(leftSz); // rollback 104751c0b2f7Stbbdev coalescQ.putBlock(fBlock); 104857f524caSIlya Isaev return nullptr; 104951c0b2f7Stbbdev } else { 105051c0b2f7Stbbdev MALLOC_ASSERT(lSz == leftSz, "Invalid header"); 105151c0b2f7Stbbdev left->blockInBin = true; 105251c0b2f7Stbbdev resBlock = left; 105351c0b2f7Stbbdev resSize += leftSz; 105451c0b2f7Stbbdev resBlock->sizeTmp = resSize; 105551c0b2f7Stbbdev } 105651c0b2f7Stbbdev } 105751c0b2f7Stbbdev } 105851c0b2f7Stbbdev // coalescing with right neighbor 105951c0b2f7Stbbdev FreeBlock *right = fBlock->rightNeig(fBlock->sizeTmp); 106051c0b2f7Stbbdev size_t rightSz = right->trySetMeUsed(GuardedSize::COAL_BLOCK); 106151c0b2f7Stbbdev if (rightSz != GuardedSize::LOCKED) { 106251c0b2f7Stbbdev // LastFreeBlock is on the right side 106351c0b2f7Stbbdev if (GuardedSize::LAST_REGION_BLOCK == rightSz) { 106451c0b2f7Stbbdev right->setMeFree(GuardedSize::LAST_REGION_BLOCK); 106551c0b2f7Stbbdev memRegion = static_cast<LastFreeBlock*>(right)->memRegion; 106651c0b2f7Stbbdev } else if (GuardedSize::COAL_BLOCK == rightSz) { 106751c0b2f7Stbbdev if (resBlock->blockInBin) { 106851c0b2f7Stbbdev resBlock->blockInBin = false; 106951c0b2f7Stbbdev removeBlockFromBin(resBlock); 107051c0b2f7Stbbdev } 107151c0b2f7Stbbdev coalescQ.putBlock(resBlock); 107257f524caSIlya Isaev return nullptr; 107351c0b2f7Stbbdev } else { 107451c0b2f7Stbbdev size_t rSz = right->rightNeig(rightSz)-> 107551c0b2f7Stbbdev trySetLeftUsed(GuardedSize::COAL_BLOCK); 107651c0b2f7Stbbdev if (rSz <= GuardedSize::MAX_LOCKED_VAL) { 107751c0b2f7Stbbdev right->setMeFree(rightSz); // rollback 107851c0b2f7Stbbdev if (resBlock->blockInBin) { 107951c0b2f7Stbbdev resBlock->blockInBin = false; 108051c0b2f7Stbbdev removeBlockFromBin(resBlock); 108151c0b2f7Stbbdev } 108251c0b2f7Stbbdev coalescQ.putBlock(resBlock); 108357f524caSIlya Isaev return nullptr; 108451c0b2f7Stbbdev } else { 108551c0b2f7Stbbdev MALLOC_ASSERT(rSz == rightSz, "Invalid header"); 108651c0b2f7Stbbdev removeBlockFromBin(right); 108751c0b2f7Stbbdev resSize += rightSz; 108851c0b2f7Stbbdev 108951c0b2f7Stbbdev // Is LastFreeBlock on the right side of right? 109051c0b2f7Stbbdev FreeBlock *nextRight = right->rightNeig(rightSz); 109151c0b2f7Stbbdev size_t nextRightSz = nextRight-> 109251c0b2f7Stbbdev trySetMeUsed(GuardedSize::COAL_BLOCK); 109351c0b2f7Stbbdev if (nextRightSz > GuardedSize::MAX_LOCKED_VAL) { 109451c0b2f7Stbbdev if (nextRightSz == GuardedSize::LAST_REGION_BLOCK) 109551c0b2f7Stbbdev memRegion = static_cast<LastFreeBlock*>(nextRight)->memRegion; 109651c0b2f7Stbbdev 109751c0b2f7Stbbdev nextRight->setMeFree(nextRightSz); 109851c0b2f7Stbbdev } 109951c0b2f7Stbbdev } 110051c0b2f7Stbbdev } 110151c0b2f7Stbbdev } 110251c0b2f7Stbbdev if (memRegion) { 110351c0b2f7Stbbdev MALLOC_ASSERT((uintptr_t)memRegion + memRegion->allocSz >= 110451c0b2f7Stbbdev (uintptr_t)right + sizeof(LastFreeBlock), ASSERT_TEXT); 110551c0b2f7Stbbdev MALLOC_ASSERT((uintptr_t)memRegion < (uintptr_t)resBlock, ASSERT_TEXT); 110651c0b2f7Stbbdev *mRegion = memRegion; 110751c0b2f7Stbbdev } else 110857f524caSIlya Isaev *mRegion = nullptr; 110951c0b2f7Stbbdev resBlock->sizeTmp = resSize; 111051c0b2f7Stbbdev return resBlock; 111151c0b2f7Stbbdev } 111251c0b2f7Stbbdev 111351c0b2f7Stbbdev bool Backend::coalescAndPutList(FreeBlock *list, bool forceCoalescQDrop, bool reportBlocksProcessed) 111451c0b2f7Stbbdev { 111551c0b2f7Stbbdev bool regionReleased = false; 111651c0b2f7Stbbdev 111751c0b2f7Stbbdev for (FreeBlock *helper; list; 111851c0b2f7Stbbdev list = helper, 111951c0b2f7Stbbdev // matches block enqueue in CoalRequestQ::putBlock() 112051c0b2f7Stbbdev reportBlocksProcessed? coalescQ.blockWasProcessed() : (void)0) { 112151c0b2f7Stbbdev MemRegion *memRegion; 112251c0b2f7Stbbdev bool addToTail = false; 112351c0b2f7Stbbdev 112451c0b2f7Stbbdev helper = list->nextToFree; 112551c0b2f7Stbbdev FreeBlock *toRet = doCoalesc(list, &memRegion); 112651c0b2f7Stbbdev if (!toRet) 112751c0b2f7Stbbdev continue; 112851c0b2f7Stbbdev 112951c0b2f7Stbbdev if (memRegion && memRegion->blockSz == toRet->sizeTmp 113051c0b2f7Stbbdev && !extMemPool->fixedPool) { 113151c0b2f7Stbbdev if (extMemPool->regionsAreReleaseable()) { 113251c0b2f7Stbbdev // release the region, because there is no used blocks in it 113351c0b2f7Stbbdev if (toRet->blockInBin) 113451c0b2f7Stbbdev removeBlockFromBin(toRet); 113551c0b2f7Stbbdev releaseRegion(memRegion); 113651c0b2f7Stbbdev regionReleased = true; 113751c0b2f7Stbbdev continue; 113851c0b2f7Stbbdev } else // add block from empty region to end of bin, 113951c0b2f7Stbbdev addToTail = true; // preserving for exact fit 114051c0b2f7Stbbdev } 114151c0b2f7Stbbdev size_t currSz = toRet->sizeTmp; 114251c0b2f7Stbbdev int bin = sizeToBin(currSz); 114351c0b2f7Stbbdev bool toAligned = extMemPool->fixedPool ? toAlignedBin(toRet, currSz) : toRet->slabAligned; 114451c0b2f7Stbbdev bool needAddToBin = true; 114551c0b2f7Stbbdev 114651c0b2f7Stbbdev if (toRet->blockInBin) { 114751c0b2f7Stbbdev // Does it stay in same bin? 114851c0b2f7Stbbdev if (toRet->myBin == bin && toRet->slabAligned == toAligned) 114951c0b2f7Stbbdev needAddToBin = false; 115051c0b2f7Stbbdev else { 115151c0b2f7Stbbdev toRet->blockInBin = false; 115251c0b2f7Stbbdev removeBlockFromBin(toRet); 115351c0b2f7Stbbdev } 115451c0b2f7Stbbdev } 115551c0b2f7Stbbdev 115651c0b2f7Stbbdev // Does not stay in same bin, or bin-less; add it 115751c0b2f7Stbbdev if (needAddToBin) { 115857f524caSIlya Isaev toRet->prev = toRet->next = toRet->nextToFree = nullptr; 115951c0b2f7Stbbdev toRet->myBin = NO_BIN; 116051c0b2f7Stbbdev toRet->slabAligned = toAligned; 116151c0b2f7Stbbdev 116251c0b2f7Stbbdev // If the block is too small to fit in any bin, keep it bin-less. 116351c0b2f7Stbbdev // It's not a leak because the block later can be coalesced. 116451c0b2f7Stbbdev if (currSz >= minBinnedSize) { 116551c0b2f7Stbbdev toRet->sizeTmp = currSz; 116651c0b2f7Stbbdev IndexedBins *target = toRet->slabAligned ? &freeSlabAlignedBins : &freeLargeBlockBins; 116751c0b2f7Stbbdev if (forceCoalescQDrop) { 116851c0b2f7Stbbdev target->addBlock(bin, toRet, toRet->sizeTmp, addToTail); 116951c0b2f7Stbbdev } else if (!target->tryAddBlock(bin, toRet, addToTail)) { 117051c0b2f7Stbbdev coalescQ.putBlock(toRet); 117151c0b2f7Stbbdev continue; 117251c0b2f7Stbbdev } 117351c0b2f7Stbbdev } 117451c0b2f7Stbbdev toRet->sizeTmp = 0; 117551c0b2f7Stbbdev } 117651c0b2f7Stbbdev // Free (possibly coalesced) free block. 117751c0b2f7Stbbdev // Adding to bin must be done before this point, 117851c0b2f7Stbbdev // because after a block is free it can be coalesced, and 117951c0b2f7Stbbdev // using its pointer became unsafe. 118051c0b2f7Stbbdev // Remember that coalescing is not done under any global lock. 118151c0b2f7Stbbdev toRet->setMeFree(currSz); 118251c0b2f7Stbbdev toRet->rightNeig(currSz)->setLeftFree(currSz); 118351c0b2f7Stbbdev } 118451c0b2f7Stbbdev return regionReleased; 118551c0b2f7Stbbdev } 118651c0b2f7Stbbdev 118751c0b2f7Stbbdev // Coalesce fBlock and add it back to a bin; 118851c0b2f7Stbbdev // processing delayed coalescing requests. 118951c0b2f7Stbbdev void Backend::coalescAndPut(FreeBlock *fBlock, size_t blockSz, bool slabAligned) 119051c0b2f7Stbbdev { 119151c0b2f7Stbbdev fBlock->sizeTmp = blockSz; 119257f524caSIlya Isaev fBlock->nextToFree = nullptr; 119351c0b2f7Stbbdev fBlock->slabAligned = slabAligned; 119451c0b2f7Stbbdev 119551c0b2f7Stbbdev coalescAndPutList(fBlock, /*forceCoalescQDrop=*/false, /*reportBlocksProcessed=*/false); 119651c0b2f7Stbbdev } 119751c0b2f7Stbbdev 119851c0b2f7Stbbdev bool Backend::scanCoalescQ(bool forceCoalescQDrop) 119951c0b2f7Stbbdev { 120051c0b2f7Stbbdev FreeBlock *currCoalescList = coalescQ.getAll(); 120151c0b2f7Stbbdev 120251c0b2f7Stbbdev if (currCoalescList) 120351c0b2f7Stbbdev // reportBlocksProcessed=true informs that the blocks leave coalescQ, 120451c0b2f7Stbbdev // matches blockConsumed() from CoalRequestQ::putBlock() 120551c0b2f7Stbbdev coalescAndPutList(currCoalescList, forceCoalescQDrop, 120651c0b2f7Stbbdev /*reportBlocksProcessed=*/true); 120751c0b2f7Stbbdev // returns status of coalescQ.getAll(), as an indication of possible changes in backend 120851c0b2f7Stbbdev // TODO: coalescAndPutList() may report is some new free blocks became available or not 120951c0b2f7Stbbdev return currCoalescList; 121051c0b2f7Stbbdev } 121151c0b2f7Stbbdev 121251c0b2f7Stbbdev FreeBlock *Backend::findBlockInRegion(MemRegion *region, size_t exactBlockSize) 121351c0b2f7Stbbdev { 121451c0b2f7Stbbdev FreeBlock *fBlock; 121551c0b2f7Stbbdev size_t blockSz; 121651c0b2f7Stbbdev uintptr_t fBlockEnd, 121751c0b2f7Stbbdev lastFreeBlock = (uintptr_t)region + region->allocSz - sizeof(LastFreeBlock); 121851c0b2f7Stbbdev 121951c0b2f7Stbbdev static_assert(sizeof(LastFreeBlock) % sizeof(uintptr_t) == 0, 122051c0b2f7Stbbdev "Atomic applied on LastFreeBlock, and we put it at the end of region, that" 122151c0b2f7Stbbdev " is uintptr_t-aligned, so no unaligned atomic operations are possible."); 122251c0b2f7Stbbdev // right bound is slab-aligned, keep LastFreeBlock after it 122351c0b2f7Stbbdev if (region->type == MEMREG_SLAB_BLOCKS) { 122451c0b2f7Stbbdev fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), sizeof(uintptr_t)); 122551c0b2f7Stbbdev fBlockEnd = alignDown(lastFreeBlock, slabSize); 122651c0b2f7Stbbdev } else { 122751c0b2f7Stbbdev fBlock = (FreeBlock *)alignUp((uintptr_t)region + sizeof(MemRegion), largeObjectAlignment); 122851c0b2f7Stbbdev fBlockEnd = (uintptr_t)fBlock + exactBlockSize; 122951c0b2f7Stbbdev MALLOC_ASSERT(fBlockEnd <= lastFreeBlock, ASSERT_TEXT); 123051c0b2f7Stbbdev } 123151c0b2f7Stbbdev if (fBlockEnd <= (uintptr_t)fBlock) 123257f524caSIlya Isaev return nullptr; // allocSz is too small 123351c0b2f7Stbbdev blockSz = fBlockEnd - (uintptr_t)fBlock; 123451c0b2f7Stbbdev // TODO: extend getSlabBlock to support degradation, i.e. getting less blocks 123551c0b2f7Stbbdev // then requested, and then relax this check 123651c0b2f7Stbbdev // (now all or nothing is implemented, check according to this) 123751c0b2f7Stbbdev if (blockSz < numOfSlabAllocOnMiss*slabSize) 123857f524caSIlya Isaev return nullptr; 123951c0b2f7Stbbdev 124051c0b2f7Stbbdev region->blockSz = blockSz; 124151c0b2f7Stbbdev return fBlock; 124251c0b2f7Stbbdev } 124351c0b2f7Stbbdev 124451c0b2f7Stbbdev // startUseBlock may add the free block to a bin, the block can be used and 124551c0b2f7Stbbdev // even released after this, so the region must be added to regionList already 124651c0b2f7Stbbdev void Backend::startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin) 124751c0b2f7Stbbdev { 124851c0b2f7Stbbdev size_t blockSz = region->blockSz; 124951c0b2f7Stbbdev fBlock->initHeader(); 125051c0b2f7Stbbdev fBlock->setMeFree(blockSz); 125151c0b2f7Stbbdev 125251c0b2f7Stbbdev LastFreeBlock *lastBl = static_cast<LastFreeBlock*>(fBlock->rightNeig(blockSz)); 125351c0b2f7Stbbdev // to not get unaligned atomics during LastFreeBlock access 125457f524caSIlya Isaev MALLOC_ASSERT(isAligned(lastBl, sizeof(uintptr_t)), nullptr); 125551c0b2f7Stbbdev lastBl->initHeader(); 125651c0b2f7Stbbdev lastBl->setMeFree(GuardedSize::LAST_REGION_BLOCK); 125751c0b2f7Stbbdev lastBl->setLeftFree(blockSz); 125851c0b2f7Stbbdev lastBl->myBin = NO_BIN; 125951c0b2f7Stbbdev lastBl->memRegion = region; 126051c0b2f7Stbbdev 126151c0b2f7Stbbdev if (addToBin) { 126251c0b2f7Stbbdev unsigned targetBin = sizeToBin(blockSz); 126351c0b2f7Stbbdev // during adding advance regions, register bin for a largest block in region 126451c0b2f7Stbbdev advRegBins.registerBin(targetBin); 126551c0b2f7Stbbdev if (region->type == MEMREG_SLAB_BLOCKS) { 126651c0b2f7Stbbdev fBlock->slabAligned = true; 126751c0b2f7Stbbdev freeSlabAlignedBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); 126851c0b2f7Stbbdev } else { 126951c0b2f7Stbbdev fBlock->slabAligned = false; 127051c0b2f7Stbbdev freeLargeBlockBins.addBlock(targetBin, fBlock, blockSz, /*addToTail=*/false); 127151c0b2f7Stbbdev } 127251c0b2f7Stbbdev } else { 127351c0b2f7Stbbdev // to match with blockReleased() in genericGetBlock 127451c0b2f7Stbbdev bkndSync.blockConsumed(); 127551c0b2f7Stbbdev // Understand our alignment for correct splitBlock operation 127651c0b2f7Stbbdev fBlock->slabAligned = region->type == MEMREG_SLAB_BLOCKS ? true : false; 127751c0b2f7Stbbdev fBlock->sizeTmp = fBlock->tryLockBlock(); 127851c0b2f7Stbbdev MALLOC_ASSERT(fBlock->sizeTmp >= FreeBlock::minBlockSize, "Locking must be successful"); 127951c0b2f7Stbbdev } 128051c0b2f7Stbbdev } 128151c0b2f7Stbbdev 128251c0b2f7Stbbdev void MemRegionList::add(MemRegion *r) 128351c0b2f7Stbbdev { 128457f524caSIlya Isaev r->prev = nullptr; 128551c0b2f7Stbbdev MallocMutex::scoped_lock lock(regionListLock); 128651c0b2f7Stbbdev r->next = head; 128751c0b2f7Stbbdev head = r; 128851c0b2f7Stbbdev if (head->next) 128951c0b2f7Stbbdev head->next->prev = head; 129051c0b2f7Stbbdev } 129151c0b2f7Stbbdev 129251c0b2f7Stbbdev void MemRegionList::remove(MemRegion *r) 129351c0b2f7Stbbdev { 129451c0b2f7Stbbdev MallocMutex::scoped_lock lock(regionListLock); 129551c0b2f7Stbbdev if (head == r) 129651c0b2f7Stbbdev head = head->next; 129751c0b2f7Stbbdev if (r->next) 129851c0b2f7Stbbdev r->next->prev = r->prev; 129951c0b2f7Stbbdev if (r->prev) 130051c0b2f7Stbbdev r->prev->next = r->next; 130151c0b2f7Stbbdev } 130251c0b2f7Stbbdev 130351c0b2f7Stbbdev #if __TBB_MALLOC_BACKEND_STAT 130451c0b2f7Stbbdev int MemRegionList::reportStat(FILE *f) 130551c0b2f7Stbbdev { 130651c0b2f7Stbbdev int regNum = 0; 130751c0b2f7Stbbdev MallocMutex::scoped_lock lock(regionListLock); 130851c0b2f7Stbbdev for (MemRegion *curr = head; curr; curr = curr->next) { 130951c0b2f7Stbbdev fprintf(f, "%p: max block %lu B, ", curr, curr->blockSz); 131051c0b2f7Stbbdev regNum++; 131151c0b2f7Stbbdev } 131251c0b2f7Stbbdev return regNum; 131351c0b2f7Stbbdev } 131451c0b2f7Stbbdev #endif 131551c0b2f7Stbbdev 131651c0b2f7Stbbdev FreeBlock *Backend::addNewRegion(size_t size, MemRegionType memRegType, bool addToBin) 131751c0b2f7Stbbdev { 131851c0b2f7Stbbdev static_assert(sizeof(BlockMutexes) <= sizeof(BlockI), "Header must be not overwritten in used blocks"); 131951c0b2f7Stbbdev MALLOC_ASSERT(FreeBlock::minBlockSize > GuardedSize::MAX_SPEC_VAL, 132051c0b2f7Stbbdev "Block length must not conflict with special values of GuardedSize"); 132151c0b2f7Stbbdev // If the region is not "for slabs" we should reserve some space for 132251c0b2f7Stbbdev // a region header, the worst case alignment and the last block mark. 132351c0b2f7Stbbdev const size_t requestSize = memRegType == MEMREG_SLAB_BLOCKS ? size : 132451c0b2f7Stbbdev size + sizeof(MemRegion) + largeObjectAlignment 132551c0b2f7Stbbdev + FreeBlock::minBlockSize + sizeof(LastFreeBlock); 132651c0b2f7Stbbdev 132751c0b2f7Stbbdev size_t rawSize = requestSize; 132851c0b2f7Stbbdev MemRegion *region = (MemRegion*)allocRawMem(rawSize); 132951c0b2f7Stbbdev if (!region) { 133051c0b2f7Stbbdev MALLOC_ASSERT(rawSize==requestSize, "getRawMem has not allocated memory but changed the allocated size."); 133157f524caSIlya Isaev return nullptr; 133251c0b2f7Stbbdev } 133351c0b2f7Stbbdev if (rawSize < sizeof(MemRegion)) { 133451c0b2f7Stbbdev if (!extMemPool->fixedPool) 133551c0b2f7Stbbdev freeRawMem(region, rawSize); 133657f524caSIlya Isaev return nullptr; 133751c0b2f7Stbbdev } 133851c0b2f7Stbbdev 133951c0b2f7Stbbdev region->type = memRegType; 134051c0b2f7Stbbdev region->allocSz = rawSize; 134151c0b2f7Stbbdev FreeBlock *fBlock = findBlockInRegion(region, size); 134251c0b2f7Stbbdev if (!fBlock) { 134351c0b2f7Stbbdev if (!extMemPool->fixedPool) 134451c0b2f7Stbbdev freeRawMem(region, rawSize); 134557f524caSIlya Isaev return nullptr; 134651c0b2f7Stbbdev } 134751c0b2f7Stbbdev regionList.add(region); 134851c0b2f7Stbbdev startUseBlock(region, fBlock, addToBin); 134951c0b2f7Stbbdev bkndSync.binsModified(); 135051c0b2f7Stbbdev return addToBin? (FreeBlock*)VALID_BLOCK_IN_BIN : fBlock; 135151c0b2f7Stbbdev } 135251c0b2f7Stbbdev 135351c0b2f7Stbbdev void Backend::init(ExtMemoryPool *extMemoryPool) 135451c0b2f7Stbbdev { 135551c0b2f7Stbbdev extMemPool = extMemoryPool; 135651c0b2f7Stbbdev usedAddrRange.init(); 135751c0b2f7Stbbdev coalescQ.init(&bkndSync); 135851c0b2f7Stbbdev bkndSync.init(this); 135951c0b2f7Stbbdev } 136051c0b2f7Stbbdev 136151c0b2f7Stbbdev void Backend::reset() 136251c0b2f7Stbbdev { 136351c0b2f7Stbbdev MALLOC_ASSERT(extMemPool->userPool(), "Only user pool can be reset."); 136451c0b2f7Stbbdev // no active threads are allowed in backend while reset() called 136551c0b2f7Stbbdev verify(); 136651c0b2f7Stbbdev 136751c0b2f7Stbbdev freeLargeBlockBins.reset(); 136851c0b2f7Stbbdev freeSlabAlignedBins.reset(); 136951c0b2f7Stbbdev advRegBins.reset(); 137051c0b2f7Stbbdev 137151c0b2f7Stbbdev for (MemRegion *curr = regionList.head; curr; curr = curr->next) { 137251c0b2f7Stbbdev FreeBlock *fBlock = findBlockInRegion(curr, curr->blockSz); 137351c0b2f7Stbbdev MALLOC_ASSERT(fBlock, "A memory region unexpectedly got smaller"); 137451c0b2f7Stbbdev startUseBlock(curr, fBlock, /*addToBin=*/true); 137551c0b2f7Stbbdev } 137651c0b2f7Stbbdev } 137751c0b2f7Stbbdev 137851c0b2f7Stbbdev bool Backend::destroy() 137951c0b2f7Stbbdev { 138051c0b2f7Stbbdev bool noError = true; 138151c0b2f7Stbbdev // no active threads are allowed in backend while destroy() called 138251c0b2f7Stbbdev verify(); 138351c0b2f7Stbbdev if (!inUserPool()) { 138451c0b2f7Stbbdev freeLargeBlockBins.reset(); 138551c0b2f7Stbbdev freeSlabAlignedBins.reset(); 138651c0b2f7Stbbdev } 138751c0b2f7Stbbdev while (regionList.head) { 138851c0b2f7Stbbdev MemRegion *helper = regionList.head->next; 138951c0b2f7Stbbdev noError &= freeRawMem(regionList.head, regionList.head->allocSz); 139051c0b2f7Stbbdev regionList.head = helper; 139151c0b2f7Stbbdev } 139251c0b2f7Stbbdev return noError; 139351c0b2f7Stbbdev } 139451c0b2f7Stbbdev 139551c0b2f7Stbbdev bool Backend::clean() 139651c0b2f7Stbbdev { 139751c0b2f7Stbbdev scanCoalescQ(/*forceCoalescQDrop=*/false); 139851c0b2f7Stbbdev 139951c0b2f7Stbbdev bool res = false; 140051c0b2f7Stbbdev // We can have several blocks occupying a whole region, 140151c0b2f7Stbbdev // because such regions are added in advance (see askMemFromOS() and reset()), 140251c0b2f7Stbbdev // and never used. Release them all. 140351c0b2f7Stbbdev for (int i = advRegBins.getMinUsedBin(0); i != -1; i = advRegBins.getMinUsedBin(i+1)) { 140451c0b2f7Stbbdev if (i == freeSlabAlignedBins.getMinNonemptyBin(i)) 140551c0b2f7Stbbdev res |= freeSlabAlignedBins.tryReleaseRegions(i, this); 140651c0b2f7Stbbdev if (i == freeLargeBlockBins.getMinNonemptyBin(i)) 140751c0b2f7Stbbdev res |= freeLargeBlockBins.tryReleaseRegions(i, this); 140851c0b2f7Stbbdev } 140951c0b2f7Stbbdev 141051c0b2f7Stbbdev return res; 141151c0b2f7Stbbdev } 141251c0b2f7Stbbdev 141351c0b2f7Stbbdev void Backend::IndexedBins::verify() 141451c0b2f7Stbbdev { 1415478de5b1Stbbdev #if MALLOC_DEBUG 14162110128eSsarathnandu for (int i=0; i<(int)freeBinsNum; i++) { 1417478de5b1Stbbdev for (FreeBlock *fb = freeBins[i].head.load(std::memory_order_relaxed); fb; fb=fb->next) { 141851c0b2f7Stbbdev uintptr_t mySz = fb->myL.value; 141951c0b2f7Stbbdev MALLOC_ASSERT(mySz>GuardedSize::MAX_SPEC_VAL, ASSERT_TEXT); 142051c0b2f7Stbbdev FreeBlock *right = (FreeBlock*)((uintptr_t)fb + mySz); 142151c0b2f7Stbbdev suppress_unused_warning(right); 142251c0b2f7Stbbdev MALLOC_ASSERT(right->myL.value<=GuardedSize::MAX_SPEC_VAL, ASSERT_TEXT); 142351c0b2f7Stbbdev MALLOC_ASSERT(right->leftL.value==mySz, ASSERT_TEXT); 142451c0b2f7Stbbdev MALLOC_ASSERT(fb->leftL.value<=GuardedSize::MAX_SPEC_VAL, ASSERT_TEXT); 142551c0b2f7Stbbdev } 142651c0b2f7Stbbdev } 1427478de5b1Stbbdev #endif 142851c0b2f7Stbbdev } 142951c0b2f7Stbbdev 143051c0b2f7Stbbdev // For correct operation, it must be called when no other threads 143151c0b2f7Stbbdev // is changing backend. 143251c0b2f7Stbbdev void Backend::verify() 143351c0b2f7Stbbdev { 143451c0b2f7Stbbdev #if MALLOC_DEBUG 143551c0b2f7Stbbdev scanCoalescQ(/*forceCoalescQDrop=*/false); 1436478de5b1Stbbdev #endif // MALLOC_DEBUG 143751c0b2f7Stbbdev 143851c0b2f7Stbbdev freeLargeBlockBins.verify(); 143951c0b2f7Stbbdev freeSlabAlignedBins.verify(); 144051c0b2f7Stbbdev } 144151c0b2f7Stbbdev 144251c0b2f7Stbbdev #if __TBB_MALLOC_BACKEND_STAT 144351c0b2f7Stbbdev size_t Backend::Bin::countFreeBlocks() 144451c0b2f7Stbbdev { 144551c0b2f7Stbbdev size_t cnt = 0; 144651c0b2f7Stbbdev { 144751c0b2f7Stbbdev MallocMutex::scoped_lock lock(tLock); 144851c0b2f7Stbbdev for (FreeBlock *fb = head; fb; fb = fb->next) 144951c0b2f7Stbbdev cnt++; 145051c0b2f7Stbbdev } 145151c0b2f7Stbbdev return cnt; 145251c0b2f7Stbbdev } 145351c0b2f7Stbbdev 145451c0b2f7Stbbdev size_t Backend::Bin::reportFreeBlocks(FILE *f) 145551c0b2f7Stbbdev { 145651c0b2f7Stbbdev size_t totalSz = 0; 145751c0b2f7Stbbdev MallocMutex::scoped_lock lock(tLock); 145851c0b2f7Stbbdev for (FreeBlock *fb = head; fb; fb = fb->next) { 145951c0b2f7Stbbdev size_t sz = fb->tryLockBlock(); 146051c0b2f7Stbbdev fb->setMeFree(sz); 146151c0b2f7Stbbdev fprintf(f, " [%p;%p]", fb, (void*)((uintptr_t)fb+sz)); 146251c0b2f7Stbbdev totalSz += sz; 146351c0b2f7Stbbdev } 146451c0b2f7Stbbdev return totalSz; 146551c0b2f7Stbbdev } 146651c0b2f7Stbbdev 146751c0b2f7Stbbdev void Backend::IndexedBins::reportStat(FILE *f) 146851c0b2f7Stbbdev { 146951c0b2f7Stbbdev size_t totalSize = 0; 147051c0b2f7Stbbdev 147151c0b2f7Stbbdev for (int i=0; i<Backend::freeBinsNum; i++) 147251c0b2f7Stbbdev if (size_t cnt = freeBins[i].countFreeBlocks()) { 147351c0b2f7Stbbdev totalSize += freeBins[i].reportFreeBlocks(f); 147451c0b2f7Stbbdev fprintf(f, " %d:%lu, ", i, cnt); 147551c0b2f7Stbbdev } 147651c0b2f7Stbbdev fprintf(f, "\ttotal size %lu KB", totalSize/1024); 147751c0b2f7Stbbdev } 147851c0b2f7Stbbdev 147951c0b2f7Stbbdev void Backend::reportStat(FILE *f) 148051c0b2f7Stbbdev { 148151c0b2f7Stbbdev scanCoalescQ(/*forceCoalescQDrop=*/false); 148251c0b2f7Stbbdev 148351c0b2f7Stbbdev fprintf(f, "\n regions:\n"); 148451c0b2f7Stbbdev int regNum = regionList.reportStat(f); 148551c0b2f7Stbbdev fprintf(f, "\n%d regions, %lu KB in all regions\n free bins:\nlarge bins: ", 148651c0b2f7Stbbdev regNum, totalMemSize/1024); 148751c0b2f7Stbbdev freeLargeBlockBins.reportStat(f); 148851c0b2f7Stbbdev fprintf(f, "\naligned bins: "); 148951c0b2f7Stbbdev freeSlabAlignedBins.reportStat(f); 149051c0b2f7Stbbdev fprintf(f, "\n"); 149151c0b2f7Stbbdev } 149251c0b2f7Stbbdev #endif // __TBB_MALLOC_BACKEND_STAT 149351c0b2f7Stbbdev 149451c0b2f7Stbbdev } } // namespaces 1495