151c0b2f7Stbbdev /* 2*b15aabb3Stbbdev Copyright (c) 2005-2021 Intel Corporation 351c0b2f7Stbbdev 451c0b2f7Stbbdev Licensed under the Apache License, Version 2.0 (the "License"); 551c0b2f7Stbbdev you may not use this file except in compliance with the License. 651c0b2f7Stbbdev You may obtain a copy of the License at 751c0b2f7Stbbdev 851c0b2f7Stbbdev http://www.apache.org/licenses/LICENSE-2.0 951c0b2f7Stbbdev 1051c0b2f7Stbbdev Unless required by applicable law or agreed to in writing, software 1151c0b2f7Stbbdev distributed under the License is distributed on an "AS IS" BASIS, 1251c0b2f7Stbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 1351c0b2f7Stbbdev See the License for the specific language governing permissions and 1451c0b2f7Stbbdev limitations under the License. 1551c0b2f7Stbbdev */ 1651c0b2f7Stbbdev 1751c0b2f7Stbbdev #include "tbbmalloc_internal.h" 1851c0b2f7Stbbdev #include <new> /* for placement new */ 1951c0b2f7Stbbdev 2051c0b2f7Stbbdev namespace rml { 2151c0b2f7Stbbdev namespace internal { 2251c0b2f7Stbbdev 2351c0b2f7Stbbdev 2451c0b2f7Stbbdev /********* backreferences ***********************/ 2551c0b2f7Stbbdev /* Each slab block and each large memory object header contains BackRefIdx 2651c0b2f7Stbbdev * that points out in some BackRefBlock which points back to this block or header. 2751c0b2f7Stbbdev */ 2851c0b2f7Stbbdev struct BackRefBlock : public BlockI { 2951c0b2f7Stbbdev BackRefBlock *nextForUse; // the next in the chain of blocks with free items 3051c0b2f7Stbbdev FreeObject *bumpPtr; // bump pointer moves from the end to the beginning of the block 3151c0b2f7Stbbdev FreeObject *freeList; 3251c0b2f7Stbbdev // list of all blocks that were allocated from raw mem (i.e., not from backend) 3351c0b2f7Stbbdev BackRefBlock *nextRawMemBlock; 3451c0b2f7Stbbdev int allocatedCount; // the number of objects allocated 3551c0b2f7Stbbdev BackRefIdx::master_t myNum; // the index in the master 3651c0b2f7Stbbdev MallocMutex blockMutex; 3751c0b2f7Stbbdev // true if this block has been added to the listForUse chain, 3851c0b2f7Stbbdev // modifications protected by masterMutex 3951c0b2f7Stbbdev bool addedToForUse; 4051c0b2f7Stbbdev 4151c0b2f7Stbbdev BackRefBlock(const BackRefBlock *blockToUse, intptr_t num) : 4251c0b2f7Stbbdev nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))), 4351c0b2f7Stbbdev freeList(NULL), nextRawMemBlock(NULL), allocatedCount(0), myNum(num), 4451c0b2f7Stbbdev addedToForUse(false) { 4551c0b2f7Stbbdev memset(&blockMutex, 0, sizeof(MallocMutex)); 4651c0b2f7Stbbdev 4751c0b2f7Stbbdev MALLOC_ASSERT(!(num >> CHAR_BIT*sizeof(BackRefIdx::master_t)), 4851c0b2f7Stbbdev "index in BackRefMaster must fit to BackRefIdx::master"); 4951c0b2f7Stbbdev } 5051c0b2f7Stbbdev // clean all but header 5151c0b2f7Stbbdev void zeroSet() { memset(this+1, 0, BackRefBlock::bytes-sizeof(BackRefBlock)); } 5251c0b2f7Stbbdev static const int bytes = slabSize; 5351c0b2f7Stbbdev }; 5451c0b2f7Stbbdev 5551c0b2f7Stbbdev // max number of backreference pointers in slab block 5651c0b2f7Stbbdev static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*); 5751c0b2f7Stbbdev 5851c0b2f7Stbbdev struct BackRefMaster { 5951c0b2f7Stbbdev /* On 64-bit systems a slab block can hold up to ~2K back pointers to slab blocks 6051c0b2f7Stbbdev * or large objects, so it can address at least 32MB. The master array of 256KB 6151c0b2f7Stbbdev * holds 32K pointers to such blocks, addressing ~1 TB. 6251c0b2f7Stbbdev * On 32-bit systems there is ~4K back pointers in a slab block, so ~64MB can be addressed. 6351c0b2f7Stbbdev * The master array of 8KB holds 2K pointers to leaves, so ~128 GB can addressed. 6451c0b2f7Stbbdev */ 6551c0b2f7Stbbdev static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024; 6651c0b2f7Stbbdev static const int dataSz; 6751c0b2f7Stbbdev /* space is reserved for master table and 4 leaves 6851c0b2f7Stbbdev taking into account VirtualAlloc allocation granularity */ 6951c0b2f7Stbbdev static const int leaves = 4; 7051c0b2f7Stbbdev static const size_t masterSize = BackRefMaster::bytes+leaves*BackRefBlock::bytes; 7151c0b2f7Stbbdev // The size of memory request for a few more leaf blocks; 7251c0b2f7Stbbdev // selected to match VirtualAlloc granularity 7351c0b2f7Stbbdev static const size_t blockSpaceSize = 64*1024; 7451c0b2f7Stbbdev 7551c0b2f7Stbbdev Backend *backend; 7651c0b2f7Stbbdev BackRefBlock *active; // if defined, use it for allocations 7751c0b2f7Stbbdev BackRefBlock *listForUse; // the chain of data blocks with free items 7851c0b2f7Stbbdev BackRefBlock *allRawMemBlocks; 7951c0b2f7Stbbdev std::atomic <intptr_t> lastUsed; // index of the last used block 8051c0b2f7Stbbdev bool rawMemUsed; 8151c0b2f7Stbbdev MallocMutex requestNewSpaceMutex; 8251c0b2f7Stbbdev BackRefBlock *backRefBl[1]; // the real size of the array is dataSz 8351c0b2f7Stbbdev 8451c0b2f7Stbbdev BackRefBlock *findFreeBlock(); 8551c0b2f7Stbbdev void addToForUseList(BackRefBlock *bl); 8651c0b2f7Stbbdev void initEmptyBackRefBlock(BackRefBlock *newBl); 8751c0b2f7Stbbdev bool requestNewSpace(); 8851c0b2f7Stbbdev }; 8951c0b2f7Stbbdev 9051c0b2f7Stbbdev const int BackRefMaster::dataSz 9151c0b2f7Stbbdev = 1+(BackRefMaster::bytes-sizeof(BackRefMaster))/sizeof(BackRefBlock*); 9251c0b2f7Stbbdev 9351c0b2f7Stbbdev static MallocMutex masterMutex; 9451c0b2f7Stbbdev static std::atomic<BackRefMaster*> backRefMaster; 9551c0b2f7Stbbdev 9651c0b2f7Stbbdev bool initBackRefMaster(Backend *backend) 9751c0b2f7Stbbdev { 9851c0b2f7Stbbdev bool rawMemUsed; 9951c0b2f7Stbbdev BackRefMaster *master = 10051c0b2f7Stbbdev (BackRefMaster*)backend->getBackRefSpace(BackRefMaster::masterSize, 10151c0b2f7Stbbdev &rawMemUsed); 10251c0b2f7Stbbdev if (! master) 10351c0b2f7Stbbdev return false; 10451c0b2f7Stbbdev master->backend = backend; 10551c0b2f7Stbbdev master->listForUse = master->allRawMemBlocks = NULL; 10651c0b2f7Stbbdev master->rawMemUsed = rawMemUsed; 10751c0b2f7Stbbdev master->lastUsed = -1; 10851c0b2f7Stbbdev memset(&master->requestNewSpaceMutex, 0, sizeof(MallocMutex)); 10951c0b2f7Stbbdev for (int i=0; i<BackRefMaster::leaves; i++) { 11051c0b2f7Stbbdev BackRefBlock *bl = (BackRefBlock*)((uintptr_t)master + BackRefMaster::bytes + i*BackRefBlock::bytes); 11151c0b2f7Stbbdev bl->zeroSet(); 11251c0b2f7Stbbdev master->initEmptyBackRefBlock(bl); 11351c0b2f7Stbbdev if (i) 11451c0b2f7Stbbdev master->addToForUseList(bl); 11551c0b2f7Stbbdev else // active leaf is not needed in listForUse 11651c0b2f7Stbbdev master->active = bl; 11751c0b2f7Stbbdev } 11851c0b2f7Stbbdev // backRefMaster is read in getBackRef, so publish it in consistent state 11951c0b2f7Stbbdev backRefMaster.store(master, std::memory_order_release); 12051c0b2f7Stbbdev return true; 12151c0b2f7Stbbdev } 12251c0b2f7Stbbdev 12351c0b2f7Stbbdev void destroyBackRefMaster(Backend *backend) 12451c0b2f7Stbbdev { 12551c0b2f7Stbbdev if (backRefMaster.load(std::memory_order_acquire)) { // Is initBackRefMaster() called? 12651c0b2f7Stbbdev for (BackRefBlock *curr = backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks; curr; ) { 12751c0b2f7Stbbdev BackRefBlock *next = curr->nextRawMemBlock; 12851c0b2f7Stbbdev // allRawMemBlocks list is only for raw mem blocks 12951c0b2f7Stbbdev backend->putBackRefSpace(curr, BackRefMaster::blockSpaceSize, 13051c0b2f7Stbbdev /*rawMemUsed=*/true); 13151c0b2f7Stbbdev curr = next; 13251c0b2f7Stbbdev } 13351c0b2f7Stbbdev backend->putBackRefSpace(backRefMaster.load(std::memory_order_relaxed), BackRefMaster::masterSize, 13451c0b2f7Stbbdev backRefMaster.load(std::memory_order_relaxed)->rawMemUsed); 13551c0b2f7Stbbdev } 13651c0b2f7Stbbdev } 13751c0b2f7Stbbdev 13851c0b2f7Stbbdev void BackRefMaster::addToForUseList(BackRefBlock *bl) 13951c0b2f7Stbbdev { 14051c0b2f7Stbbdev bl->nextForUse = listForUse; 14151c0b2f7Stbbdev listForUse = bl; 14251c0b2f7Stbbdev bl->addedToForUse = true; 14351c0b2f7Stbbdev } 14451c0b2f7Stbbdev 14551c0b2f7Stbbdev void BackRefMaster::initEmptyBackRefBlock(BackRefBlock *newBl) 14651c0b2f7Stbbdev { 14751c0b2f7Stbbdev intptr_t nextLU = lastUsed+1; 14851c0b2f7Stbbdev new (newBl) BackRefBlock(newBl, nextLU); 14951c0b2f7Stbbdev MALLOC_ASSERT(nextLU < dataSz, NULL); 15051c0b2f7Stbbdev backRefBl[nextLU] = newBl; 15151c0b2f7Stbbdev // lastUsed is read in getBackRef, and access to backRefBl[lastUsed] 15251c0b2f7Stbbdev // is possible only after checking backref against current lastUsed 15351c0b2f7Stbbdev lastUsed.store(nextLU, std::memory_order_release); 15451c0b2f7Stbbdev } 15551c0b2f7Stbbdev 15651c0b2f7Stbbdev bool BackRefMaster::requestNewSpace() 15751c0b2f7Stbbdev { 15851c0b2f7Stbbdev bool isRawMemUsed; 15951c0b2f7Stbbdev static_assert(!(blockSpaceSize % BackRefBlock::bytes), 16051c0b2f7Stbbdev "Must request space for whole number of blocks."); 16151c0b2f7Stbbdev 16251c0b2f7Stbbdev if (backRefMaster.load(std::memory_order_relaxed)->dataSz <= lastUsed + 1) // no space in master 16351c0b2f7Stbbdev return false; 16451c0b2f7Stbbdev 16551c0b2f7Stbbdev // only one thread at a time may add blocks 16651c0b2f7Stbbdev MallocMutex::scoped_lock newSpaceLock(requestNewSpaceMutex); 16751c0b2f7Stbbdev 16851c0b2f7Stbbdev if (listForUse) // double check that only one block is available 16951c0b2f7Stbbdev return true; 17051c0b2f7Stbbdev BackRefBlock *newBl = (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &isRawMemUsed); 17151c0b2f7Stbbdev if (!newBl) return false; 17251c0b2f7Stbbdev 17351c0b2f7Stbbdev // touch a page for the 1st time without taking masterMutex ... 17451c0b2f7Stbbdev for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize; 17551c0b2f7Stbbdev bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) { 17651c0b2f7Stbbdev bl->zeroSet(); 17751c0b2f7Stbbdev } 17851c0b2f7Stbbdev 17951c0b2f7Stbbdev MallocMutex::scoped_lock lock(masterMutex); // ... and share under lock 18051c0b2f7Stbbdev 18151c0b2f7Stbbdev const size_t numOfUnusedIdxs = backRefMaster.load(std::memory_order_relaxed)->dataSz - lastUsed - 1; 18251c0b2f7Stbbdev if (numOfUnusedIdxs <= 0) { // no space in master under lock, roll back 18351c0b2f7Stbbdev backend->putBackRefSpace(newBl, blockSpaceSize, isRawMemUsed); 18451c0b2f7Stbbdev return false; 18551c0b2f7Stbbdev } 18651c0b2f7Stbbdev // It's possible that only part of newBl is used, due to lack of indices in master. 18751c0b2f7Stbbdev // This is OK as such underutilization is possible only once for backreferneces table. 18851c0b2f7Stbbdev int blocksToUse = min(numOfUnusedIdxs, blockSpaceSize / BackRefBlock::bytes); 18951c0b2f7Stbbdev 19051c0b2f7Stbbdev // use the first block in the batch to maintain the list of "raw" memory 19151c0b2f7Stbbdev // to be released at shutdown 19251c0b2f7Stbbdev if (isRawMemUsed) { 19351c0b2f7Stbbdev newBl->nextRawMemBlock = backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks; 19451c0b2f7Stbbdev backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks = newBl; 19551c0b2f7Stbbdev } 19651c0b2f7Stbbdev for (BackRefBlock *bl = newBl; blocksToUse>0; bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes), blocksToUse--) { 19751c0b2f7Stbbdev initEmptyBackRefBlock(bl); 19851c0b2f7Stbbdev if (active->allocatedCount == BR_MAX_CNT) { 19951c0b2f7Stbbdev active = bl; // active leaf is not needed in listForUse 20051c0b2f7Stbbdev } else { 20151c0b2f7Stbbdev addToForUseList(bl); 20251c0b2f7Stbbdev } 20351c0b2f7Stbbdev } 20451c0b2f7Stbbdev return true; 20551c0b2f7Stbbdev } 20651c0b2f7Stbbdev 20751c0b2f7Stbbdev BackRefBlock *BackRefMaster::findFreeBlock() 20851c0b2f7Stbbdev { 20951c0b2f7Stbbdev if (active->allocatedCount < BR_MAX_CNT) 21051c0b2f7Stbbdev return active; 21151c0b2f7Stbbdev 21251c0b2f7Stbbdev if (listForUse) { // use released list 21351c0b2f7Stbbdev MallocMutex::scoped_lock lock(masterMutex); 21451c0b2f7Stbbdev 21551c0b2f7Stbbdev if (active->allocatedCount == BR_MAX_CNT && listForUse) { 21651c0b2f7Stbbdev active = listForUse; 21751c0b2f7Stbbdev listForUse = listForUse->nextForUse; 21851c0b2f7Stbbdev MALLOC_ASSERT(active->addedToForUse, ASSERT_TEXT); 21951c0b2f7Stbbdev active->addedToForUse = false; 22051c0b2f7Stbbdev } 22151c0b2f7Stbbdev } else // allocate new data node 22251c0b2f7Stbbdev if (!requestNewSpace()) 22351c0b2f7Stbbdev return NULL; 22451c0b2f7Stbbdev return active; 22551c0b2f7Stbbdev } 22651c0b2f7Stbbdev 22751c0b2f7Stbbdev void *getBackRef(BackRefIdx backRefIdx) 22851c0b2f7Stbbdev { 22951c0b2f7Stbbdev // !backRefMaster means no initialization done, so it can't be valid memory 23051c0b2f7Stbbdev // see addEmptyBackRefBlock for fences around lastUsed 23151c0b2f7Stbbdev if (!(backRefMaster.load(std::memory_order_acquire)) 23251c0b2f7Stbbdev || backRefIdx.getMaster() > (backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_acquire)) 23351c0b2f7Stbbdev || backRefIdx.getOffset() >= BR_MAX_CNT) 23451c0b2f7Stbbdev return NULL; 23551c0b2f7Stbbdev return *(void**)((uintptr_t)backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()] 23651c0b2f7Stbbdev + sizeof(BackRefBlock)+backRefIdx.getOffset()*sizeof(void*)); 23751c0b2f7Stbbdev } 23851c0b2f7Stbbdev 23951c0b2f7Stbbdev void setBackRef(BackRefIdx backRefIdx, void *newPtr) 24051c0b2f7Stbbdev { 24151c0b2f7Stbbdev MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed) 24251c0b2f7Stbbdev && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT); 24351c0b2f7Stbbdev *(void**)((uintptr_t)backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()] 24451c0b2f7Stbbdev + sizeof(BackRefBlock) + backRefIdx.getOffset()*sizeof(void*)) = newPtr; 24551c0b2f7Stbbdev } 24651c0b2f7Stbbdev 24751c0b2f7Stbbdev BackRefIdx BackRefIdx::newBackRef(bool largeObj) 24851c0b2f7Stbbdev { 24951c0b2f7Stbbdev BackRefBlock *blockToUse; 25051c0b2f7Stbbdev void **toUse; 25151c0b2f7Stbbdev BackRefIdx res; 25251c0b2f7Stbbdev bool lastBlockFirstUsed = false; 25351c0b2f7Stbbdev 25451c0b2f7Stbbdev do { 25551c0b2f7Stbbdev MALLOC_ASSERT(backRefMaster.load(std::memory_order_relaxed), ASSERT_TEXT); 25651c0b2f7Stbbdev blockToUse = backRefMaster.load(std::memory_order_relaxed)->findFreeBlock(); 25751c0b2f7Stbbdev if (!blockToUse) 25851c0b2f7Stbbdev return BackRefIdx(); 25951c0b2f7Stbbdev toUse = NULL; 26051c0b2f7Stbbdev { // the block is locked to find a reference 26151c0b2f7Stbbdev MallocMutex::scoped_lock lock(blockToUse->blockMutex); 26251c0b2f7Stbbdev 26351c0b2f7Stbbdev if (blockToUse->freeList) { 26451c0b2f7Stbbdev toUse = (void**)blockToUse->freeList; 26551c0b2f7Stbbdev blockToUse->freeList = blockToUse->freeList->next; 26651c0b2f7Stbbdev MALLOC_ASSERT(!blockToUse->freeList || 26751c0b2f7Stbbdev ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse 26851c0b2f7Stbbdev && (uintptr_t)blockToUse->freeList < 26951c0b2f7Stbbdev (uintptr_t)blockToUse + slabSize), ASSERT_TEXT); 27051c0b2f7Stbbdev } else if (blockToUse->allocatedCount < BR_MAX_CNT) { 27151c0b2f7Stbbdev toUse = (void**)blockToUse->bumpPtr; 27251c0b2f7Stbbdev blockToUse->bumpPtr = 27351c0b2f7Stbbdev (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*)); 27451c0b2f7Stbbdev if (blockToUse->allocatedCount == BR_MAX_CNT-1) { 27551c0b2f7Stbbdev MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr 27651c0b2f7Stbbdev < (uintptr_t)blockToUse+sizeof(BackRefBlock), 27751c0b2f7Stbbdev ASSERT_TEXT); 27851c0b2f7Stbbdev blockToUse->bumpPtr = NULL; 27951c0b2f7Stbbdev } 28051c0b2f7Stbbdev } 28151c0b2f7Stbbdev if (toUse) { 28251c0b2f7Stbbdev if (!blockToUse->allocatedCount && !backRefMaster.load(std::memory_order_relaxed)->listForUse) 28351c0b2f7Stbbdev lastBlockFirstUsed = true; 28451c0b2f7Stbbdev blockToUse->allocatedCount++; 28551c0b2f7Stbbdev } 28651c0b2f7Stbbdev } // end of lock scope 28751c0b2f7Stbbdev } while (!toUse); 28851c0b2f7Stbbdev // The first thread that uses the last block requests new space in advance; 28951c0b2f7Stbbdev // possible failures are ignored. 29051c0b2f7Stbbdev if (lastBlockFirstUsed) 29151c0b2f7Stbbdev backRefMaster.load(std::memory_order_relaxed)->requestNewSpace(); 29251c0b2f7Stbbdev 29351c0b2f7Stbbdev res.master = blockToUse->myNum; 29451c0b2f7Stbbdev uintptr_t offset = 29551c0b2f7Stbbdev ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*); 29651c0b2f7Stbbdev // Is offset too big? 29751c0b2f7Stbbdev MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT); 29851c0b2f7Stbbdev res.offset = offset; 29951c0b2f7Stbbdev if (largeObj) res.largeObj = largeObj; 30051c0b2f7Stbbdev 30151c0b2f7Stbbdev return res; 30251c0b2f7Stbbdev } 30351c0b2f7Stbbdev 30451c0b2f7Stbbdev void removeBackRef(BackRefIdx backRefIdx) 30551c0b2f7Stbbdev { 30651c0b2f7Stbbdev MALLOC_ASSERT(!backRefIdx.isInvalid(), ASSERT_TEXT); 30751c0b2f7Stbbdev MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed) 30851c0b2f7Stbbdev && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT); 30951c0b2f7Stbbdev BackRefBlock *currBlock = backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()]; 31051c0b2f7Stbbdev FreeObject *freeObj = (FreeObject*)((uintptr_t)currBlock + sizeof(BackRefBlock) 31151c0b2f7Stbbdev + backRefIdx.getOffset()*sizeof(void*)); 31251c0b2f7Stbbdev MALLOC_ASSERT(((uintptr_t)freeObj>(uintptr_t)currBlock && 31351c0b2f7Stbbdev (uintptr_t)freeObj<(uintptr_t)currBlock + slabSize), ASSERT_TEXT); 31451c0b2f7Stbbdev { 31551c0b2f7Stbbdev MallocMutex::scoped_lock lock(currBlock->blockMutex); 31651c0b2f7Stbbdev 31751c0b2f7Stbbdev freeObj->next = currBlock->freeList; 31851c0b2f7Stbbdev MALLOC_ASSERT(!freeObj->next || 31951c0b2f7Stbbdev ((uintptr_t)freeObj->next > (uintptr_t)currBlock 32051c0b2f7Stbbdev && (uintptr_t)freeObj->next < 32151c0b2f7Stbbdev (uintptr_t)currBlock + slabSize), ASSERT_TEXT); 32251c0b2f7Stbbdev currBlock->freeList = freeObj; 32351c0b2f7Stbbdev currBlock->allocatedCount--; 32451c0b2f7Stbbdev } 32551c0b2f7Stbbdev // TODO: do we need double-check here? 32651c0b2f7Stbbdev if (!currBlock->addedToForUse && currBlock!=backRefMaster.load(std::memory_order_relaxed)->active) { 32751c0b2f7Stbbdev MallocMutex::scoped_lock lock(masterMutex); 32851c0b2f7Stbbdev 32951c0b2f7Stbbdev if (!currBlock->addedToForUse && currBlock!=backRefMaster.load(std::memory_order_relaxed)->active) 33051c0b2f7Stbbdev backRefMaster.load(std::memory_order_relaxed)->addToForUseList(currBlock); 33151c0b2f7Stbbdev } 33251c0b2f7Stbbdev } 33351c0b2f7Stbbdev 33451c0b2f7Stbbdev /********* End of backreferences ***********************/ 33551c0b2f7Stbbdev 33651c0b2f7Stbbdev } // namespace internal 33751c0b2f7Stbbdev } // namespace rml 33851c0b2f7Stbbdev 339