xref: /oneTBB/src/tbbmalloc/backref.cpp (revision 1ecde27f)
151c0b2f7Stbbdev /*
2b15aabb3Stbbdev     Copyright (c) 2005-2021 Intel Corporation
351c0b2f7Stbbdev 
451c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev     you may not use this file except in compliance with the License.
651c0b2f7Stbbdev     You may obtain a copy of the License at
751c0b2f7Stbbdev 
851c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev 
1051c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev     See the License for the specific language governing permissions and
1451c0b2f7Stbbdev     limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev 
1751c0b2f7Stbbdev #include "tbbmalloc_internal.h"
1851c0b2f7Stbbdev #include <new>        /* for placement new */
1951c0b2f7Stbbdev 
2051c0b2f7Stbbdev namespace rml {
2151c0b2f7Stbbdev namespace internal {
2251c0b2f7Stbbdev 
2351c0b2f7Stbbdev 
2451c0b2f7Stbbdev /********* backreferences ***********************/
2551c0b2f7Stbbdev /* Each slab block and each large memory object header contains BackRefIdx
2651c0b2f7Stbbdev  * that points out in some BackRefBlock which points back to this block or header.
2751c0b2f7Stbbdev  */
2851c0b2f7Stbbdev struct BackRefBlock : public BlockI {
2951c0b2f7Stbbdev     BackRefBlock *nextForUse;     // the next in the chain of blocks with free items
3051c0b2f7Stbbdev     FreeObject   *bumpPtr;        // bump pointer moves from the end to the beginning of the block
3151c0b2f7Stbbdev     FreeObject   *freeList;
3251c0b2f7Stbbdev     // list of all blocks that were allocated from raw mem (i.e., not from backend)
3351c0b2f7Stbbdev     BackRefBlock *nextRawMemBlock;
34478de5b1Stbbdev     std::atomic<int> allocatedCount; // the number of objects allocated
35*1ecde27fSIlya Mishin     BackRefIdx::main_t myNum;   // the index in the main
3651c0b2f7Stbbdev     MallocMutex   blockMutex;
3751c0b2f7Stbbdev     // true if this block has been added to the listForUse chain,
38*1ecde27fSIlya Mishin     // modifications protected by mainMutex
39478de5b1Stbbdev     std::atomic<bool> addedToForUse;
4051c0b2f7Stbbdev 
4151c0b2f7Stbbdev     BackRefBlock(const BackRefBlock *blockToUse, intptr_t num) :
4251c0b2f7Stbbdev         nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))),
4351c0b2f7Stbbdev         freeList(NULL), nextRawMemBlock(NULL), allocatedCount(0), myNum(num),
4451c0b2f7Stbbdev         addedToForUse(false) {
4551c0b2f7Stbbdev         memset(&blockMutex, 0, sizeof(MallocMutex));
4651c0b2f7Stbbdev 
47*1ecde27fSIlya Mishin         MALLOC_ASSERT(!(num >> CHAR_BIT*sizeof(BackRefIdx::main_t)),
48*1ecde27fSIlya Mishin                       "index in BackRefMain must fit to BackRefIdx::main");
4951c0b2f7Stbbdev     }
5051c0b2f7Stbbdev     // clean all but header
5151c0b2f7Stbbdev     void zeroSet() { memset(this+1, 0, BackRefBlock::bytes-sizeof(BackRefBlock)); }
5251c0b2f7Stbbdev     static const int bytes = slabSize;
5351c0b2f7Stbbdev };
5451c0b2f7Stbbdev 
5551c0b2f7Stbbdev // max number of backreference pointers in slab block
5651c0b2f7Stbbdev static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*);
5751c0b2f7Stbbdev 
58*1ecde27fSIlya Mishin struct BackRefMain {
5951c0b2f7Stbbdev /* On 64-bit systems a slab block can hold up to ~2K back pointers to slab blocks
60*1ecde27fSIlya Mishin  * or large objects, so it can address at least 32MB. The main array of 256KB
6151c0b2f7Stbbdev  * holds 32K pointers to such blocks, addressing ~1 TB.
6251c0b2f7Stbbdev  * On 32-bit systems there is ~4K back pointers in a slab block, so ~64MB can be addressed.
63*1ecde27fSIlya Mishin  * The main array of 8KB holds 2K pointers to leaves, so ~128 GB can addressed.
6451c0b2f7Stbbdev  */
6551c0b2f7Stbbdev     static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024;
6651c0b2f7Stbbdev     static const int dataSz;
67*1ecde27fSIlya Mishin /* space is reserved for main table and 4 leaves
6851c0b2f7Stbbdev    taking into account VirtualAlloc allocation granularity */
6951c0b2f7Stbbdev     static const int leaves = 4;
70*1ecde27fSIlya Mishin     static const size_t mainSize = BackRefMain::bytes+leaves*BackRefBlock::bytes;
7151c0b2f7Stbbdev     // The size of memory request for a few more leaf blocks;
7251c0b2f7Stbbdev     // selected to match VirtualAlloc granularity
7351c0b2f7Stbbdev     static const size_t blockSpaceSize = 64*1024;
7451c0b2f7Stbbdev 
7551c0b2f7Stbbdev     Backend       *backend;
76478de5b1Stbbdev     std::atomic<BackRefBlock*> active;         // if defined, use it for allocations
77478de5b1Stbbdev     std::atomic<BackRefBlock*> listForUse;     // the chain of data blocks with free items
7851c0b2f7Stbbdev     BackRefBlock  *allRawMemBlocks;
7951c0b2f7Stbbdev     std::atomic <intptr_t> lastUsed; // index of the last used block
8051c0b2f7Stbbdev     bool           rawMemUsed;
8151c0b2f7Stbbdev     MallocMutex    requestNewSpaceMutex;
8251c0b2f7Stbbdev     BackRefBlock  *backRefBl[1];   // the real size of the array is dataSz
8351c0b2f7Stbbdev 
8451c0b2f7Stbbdev     BackRefBlock *findFreeBlock();
8551c0b2f7Stbbdev     void          addToForUseList(BackRefBlock *bl);
8651c0b2f7Stbbdev     void          initEmptyBackRefBlock(BackRefBlock *newBl);
8751c0b2f7Stbbdev     bool          requestNewSpace();
8851c0b2f7Stbbdev };
8951c0b2f7Stbbdev 
90*1ecde27fSIlya Mishin const int BackRefMain::dataSz
91*1ecde27fSIlya Mishin     = 1+(BackRefMain::bytes-sizeof(BackRefMain))/sizeof(BackRefBlock*);
9251c0b2f7Stbbdev 
93*1ecde27fSIlya Mishin static MallocMutex mainMutex;
94*1ecde27fSIlya Mishin static std::atomic<BackRefMain*> backRefMain;
9551c0b2f7Stbbdev 
96*1ecde27fSIlya Mishin bool initBackRefMain(Backend *backend)
9751c0b2f7Stbbdev {
9851c0b2f7Stbbdev     bool rawMemUsed;
99*1ecde27fSIlya Mishin     BackRefMain *main =
100*1ecde27fSIlya Mishin         (BackRefMain*)backend->getBackRefSpace(BackRefMain::mainSize,
10151c0b2f7Stbbdev                                                  &rawMemUsed);
102*1ecde27fSIlya Mishin     if (! main)
10351c0b2f7Stbbdev         return false;
104*1ecde27fSIlya Mishin     main->backend = backend;
105*1ecde27fSIlya Mishin     main->listForUse.store(nullptr, std::memory_order_relaxed);
106*1ecde27fSIlya Mishin     main->allRawMemBlocks = nullptr;
107*1ecde27fSIlya Mishin     main->rawMemUsed = rawMemUsed;
108*1ecde27fSIlya Mishin     main->lastUsed = -1;
109*1ecde27fSIlya Mishin     memset(&main->requestNewSpaceMutex, 0, sizeof(MallocMutex));
110*1ecde27fSIlya Mishin     for (int i=0; i<BackRefMain::leaves; i++) {
111*1ecde27fSIlya Mishin         BackRefBlock *bl = (BackRefBlock*)((uintptr_t)main + BackRefMain::bytes + i*BackRefBlock::bytes);
11251c0b2f7Stbbdev         bl->zeroSet();
113*1ecde27fSIlya Mishin         main->initEmptyBackRefBlock(bl);
11451c0b2f7Stbbdev         if (i)
115*1ecde27fSIlya Mishin             main->addToForUseList(bl);
11651c0b2f7Stbbdev         else // active leaf is not needed in listForUse
117*1ecde27fSIlya Mishin             main->active.store(bl, std::memory_order_relaxed);
11851c0b2f7Stbbdev     }
119*1ecde27fSIlya Mishin     // backRefMain is read in getBackRef, so publish it in consistent state
120*1ecde27fSIlya Mishin     backRefMain.store(main, std::memory_order_release);
12151c0b2f7Stbbdev     return true;
12251c0b2f7Stbbdev }
12351c0b2f7Stbbdev 
124478de5b1Stbbdev #if __TBB_SOURCE_DIRECTLY_INCLUDED
125*1ecde27fSIlya Mishin void destroyBackRefMain(Backend *backend)
12651c0b2f7Stbbdev {
127*1ecde27fSIlya Mishin     if (backRefMain.load(std::memory_order_acquire)) { // Is initBackRefMain() called?
128*1ecde27fSIlya Mishin         for (BackRefBlock *curr = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks; curr; ) {
12951c0b2f7Stbbdev             BackRefBlock *next = curr->nextRawMemBlock;
13051c0b2f7Stbbdev             // allRawMemBlocks list is only for raw mem blocks
131*1ecde27fSIlya Mishin             backend->putBackRefSpace(curr, BackRefMain::blockSpaceSize,
13251c0b2f7Stbbdev                                      /*rawMemUsed=*/true);
13351c0b2f7Stbbdev             curr = next;
13451c0b2f7Stbbdev         }
135*1ecde27fSIlya Mishin         backend->putBackRefSpace(backRefMain.load(std::memory_order_relaxed), BackRefMain::mainSize,
136*1ecde27fSIlya Mishin                                  backRefMain.load(std::memory_order_relaxed)->rawMemUsed);
13751c0b2f7Stbbdev     }
13851c0b2f7Stbbdev }
139478de5b1Stbbdev #endif
14051c0b2f7Stbbdev 
141*1ecde27fSIlya Mishin void BackRefMain::addToForUseList(BackRefBlock *bl)
14251c0b2f7Stbbdev {
143478de5b1Stbbdev     bl->nextForUse = listForUse.load(std::memory_order_relaxed);
144478de5b1Stbbdev     listForUse.store(bl, std::memory_order_relaxed);
145478de5b1Stbbdev     bl->addedToForUse.store(true, std::memory_order_relaxed);
14651c0b2f7Stbbdev }
14751c0b2f7Stbbdev 
148*1ecde27fSIlya Mishin void BackRefMain::initEmptyBackRefBlock(BackRefBlock *newBl)
14951c0b2f7Stbbdev {
15051c0b2f7Stbbdev     intptr_t nextLU = lastUsed+1;
15151c0b2f7Stbbdev     new (newBl) BackRefBlock(newBl, nextLU);
15251c0b2f7Stbbdev     MALLOC_ASSERT(nextLU < dataSz, NULL);
15351c0b2f7Stbbdev     backRefBl[nextLU] = newBl;
15451c0b2f7Stbbdev     // lastUsed is read in getBackRef, and access to backRefBl[lastUsed]
15551c0b2f7Stbbdev     // is possible only after checking backref against current lastUsed
15651c0b2f7Stbbdev     lastUsed.store(nextLU, std::memory_order_release);
15751c0b2f7Stbbdev }
15851c0b2f7Stbbdev 
159*1ecde27fSIlya Mishin bool BackRefMain::requestNewSpace()
16051c0b2f7Stbbdev {
16151c0b2f7Stbbdev     bool isRawMemUsed;
16251c0b2f7Stbbdev     static_assert(!(blockSpaceSize % BackRefBlock::bytes),
16351c0b2f7Stbbdev                          "Must request space for whole number of blocks.");
16451c0b2f7Stbbdev 
165*1ecde27fSIlya Mishin     if (backRefMain.load(std::memory_order_relaxed)->dataSz <= lastUsed + 1) // no space in main
16651c0b2f7Stbbdev         return false;
16751c0b2f7Stbbdev 
16851c0b2f7Stbbdev     // only one thread at a time may add blocks
16951c0b2f7Stbbdev     MallocMutex::scoped_lock newSpaceLock(requestNewSpaceMutex);
17051c0b2f7Stbbdev 
171478de5b1Stbbdev     if (listForUse.load(std::memory_order_relaxed)) // double check that only one block is available
17251c0b2f7Stbbdev         return true;
17351c0b2f7Stbbdev     BackRefBlock *newBl = (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &isRawMemUsed);
17451c0b2f7Stbbdev     if (!newBl) return false;
17551c0b2f7Stbbdev 
176*1ecde27fSIlya Mishin     // touch a page for the 1st time without taking mainMutex ...
17751c0b2f7Stbbdev     for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize;
17851c0b2f7Stbbdev          bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) {
17951c0b2f7Stbbdev         bl->zeroSet();
18051c0b2f7Stbbdev     }
18151c0b2f7Stbbdev 
182*1ecde27fSIlya Mishin     MallocMutex::scoped_lock lock(mainMutex); // ... and share under lock
18351c0b2f7Stbbdev 
184*1ecde27fSIlya Mishin     const size_t numOfUnusedIdxs = backRefMain.load(std::memory_order_relaxed)->dataSz - lastUsed - 1;
185*1ecde27fSIlya Mishin     if (numOfUnusedIdxs <= 0) { // no space in main under lock, roll back
18651c0b2f7Stbbdev         backend->putBackRefSpace(newBl, blockSpaceSize, isRawMemUsed);
18751c0b2f7Stbbdev         return false;
18851c0b2f7Stbbdev     }
189*1ecde27fSIlya Mishin     // It's possible that only part of newBl is used, due to lack of indices in main.
19051c0b2f7Stbbdev     // This is OK as such underutilization is possible only once for backreferneces table.
19151c0b2f7Stbbdev     int blocksToUse = min(numOfUnusedIdxs, blockSpaceSize / BackRefBlock::bytes);
19251c0b2f7Stbbdev 
19351c0b2f7Stbbdev     // use the first block in the batch to maintain the list of "raw" memory
19451c0b2f7Stbbdev     // to be released at shutdown
19551c0b2f7Stbbdev     if (isRawMemUsed) {
196*1ecde27fSIlya Mishin         newBl->nextRawMemBlock = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks;
197*1ecde27fSIlya Mishin         backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks = newBl;
19851c0b2f7Stbbdev     }
19951c0b2f7Stbbdev     for (BackRefBlock *bl = newBl; blocksToUse>0; bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes), blocksToUse--) {
20051c0b2f7Stbbdev         initEmptyBackRefBlock(bl);
201478de5b1Stbbdev         if (active.load(std::memory_order_relaxed)->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) {
202478de5b1Stbbdev             active.store(bl, std::memory_order_release); // active leaf is not needed in listForUse
20351c0b2f7Stbbdev         } else {
20451c0b2f7Stbbdev             addToForUseList(bl);
20551c0b2f7Stbbdev         }
20651c0b2f7Stbbdev     }
20751c0b2f7Stbbdev     return true;
20851c0b2f7Stbbdev }
20951c0b2f7Stbbdev 
210*1ecde27fSIlya Mishin BackRefBlock *BackRefMain::findFreeBlock()
21151c0b2f7Stbbdev {
212478de5b1Stbbdev     BackRefBlock* active_block = active.load(std::memory_order_acquire);
213478de5b1Stbbdev     MALLOC_ASSERT(active_block, ASSERT_TEXT);
21451c0b2f7Stbbdev 
215478de5b1Stbbdev     if (active_block->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT)
216478de5b1Stbbdev         return active_block;
217478de5b1Stbbdev 
218478de5b1Stbbdev     if (listForUse.load(std::memory_order_relaxed)) { // use released list
219*1ecde27fSIlya Mishin         MallocMutex::scoped_lock lock(mainMutex);
22051c0b2f7Stbbdev 
221478de5b1Stbbdev         if (active_block->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) {
222478de5b1Stbbdev             active_block = listForUse.load(std::memory_order_relaxed);
223478de5b1Stbbdev             if (active_block) {
224478de5b1Stbbdev                 active.store(active_block, std::memory_order_release);
225478de5b1Stbbdev                 listForUse.store(active_block->nextForUse, std::memory_order_relaxed);
226478de5b1Stbbdev                 MALLOC_ASSERT(active_block->addedToForUse.load(std::memory_order_relaxed), ASSERT_TEXT);
227478de5b1Stbbdev                 active_block->addedToForUse.store(false, std::memory_order_relaxed);
228478de5b1Stbbdev             }
22951c0b2f7Stbbdev         }
23051c0b2f7Stbbdev     } else // allocate new data node
23151c0b2f7Stbbdev         if (!requestNewSpace())
23251c0b2f7Stbbdev             return NULL;
233478de5b1Stbbdev     return active.load(std::memory_order_acquire); // reread because of requestNewSpace
23451c0b2f7Stbbdev }
23551c0b2f7Stbbdev 
23651c0b2f7Stbbdev void *getBackRef(BackRefIdx backRefIdx)
23751c0b2f7Stbbdev {
238*1ecde27fSIlya Mishin     // !backRefMain means no initialization done, so it can't be valid memory
23951c0b2f7Stbbdev     // see addEmptyBackRefBlock for fences around lastUsed
240*1ecde27fSIlya Mishin     if (!(backRefMain.load(std::memory_order_acquire))
241*1ecde27fSIlya Mishin         || backRefIdx.getMain() > (backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_acquire))
24251c0b2f7Stbbdev         || backRefIdx.getOffset() >= BR_MAX_CNT)
243a080baf9SAlex     {
24451c0b2f7Stbbdev         return NULL;
245a080baf9SAlex     }
246a080baf9SAlex     std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)(
247*1ecde27fSIlya Mishin             (uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()]
248a080baf9SAlex             + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(std::atomic<void*>)
249a080baf9SAlex         );
250a080baf9SAlex     return backRefEntry.load(std::memory_order_relaxed);
25151c0b2f7Stbbdev }
25251c0b2f7Stbbdev 
25351c0b2f7Stbbdev void setBackRef(BackRefIdx backRefIdx, void *newPtr)
25451c0b2f7Stbbdev {
255*1ecde27fSIlya Mishin     MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
25651c0b2f7Stbbdev                                  && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
257*1ecde27fSIlya Mishin     ((std::atomic<void*>*)((uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()]
258a080baf9SAlex         + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(void*)))->store(newPtr, std::memory_order_relaxed);
25951c0b2f7Stbbdev }
26051c0b2f7Stbbdev 
26151c0b2f7Stbbdev BackRefIdx BackRefIdx::newBackRef(bool largeObj)
26251c0b2f7Stbbdev {
26351c0b2f7Stbbdev     BackRefBlock *blockToUse;
26451c0b2f7Stbbdev     void **toUse;
26551c0b2f7Stbbdev     BackRefIdx res;
26651c0b2f7Stbbdev     bool lastBlockFirstUsed = false;
26751c0b2f7Stbbdev 
26851c0b2f7Stbbdev     do {
269*1ecde27fSIlya Mishin         MALLOC_ASSERT(backRefMain.load(std::memory_order_relaxed), ASSERT_TEXT);
270*1ecde27fSIlya Mishin         blockToUse = backRefMain.load(std::memory_order_relaxed)->findFreeBlock();
27151c0b2f7Stbbdev         if (!blockToUse)
27251c0b2f7Stbbdev             return BackRefIdx();
27351c0b2f7Stbbdev         toUse = NULL;
27451c0b2f7Stbbdev         { // the block is locked to find a reference
27551c0b2f7Stbbdev             MallocMutex::scoped_lock lock(blockToUse->blockMutex);
27651c0b2f7Stbbdev 
27751c0b2f7Stbbdev             if (blockToUse->freeList) {
27851c0b2f7Stbbdev                 toUse = (void**)blockToUse->freeList;
27951c0b2f7Stbbdev                 blockToUse->freeList = blockToUse->freeList->next;
28051c0b2f7Stbbdev                 MALLOC_ASSERT(!blockToUse->freeList ||
28151c0b2f7Stbbdev                               ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse
28251c0b2f7Stbbdev                                && (uintptr_t)blockToUse->freeList <
28351c0b2f7Stbbdev                                (uintptr_t)blockToUse + slabSize), ASSERT_TEXT);
284478de5b1Stbbdev             } else if (blockToUse->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT) {
28551c0b2f7Stbbdev                 toUse = (void**)blockToUse->bumpPtr;
28651c0b2f7Stbbdev                 blockToUse->bumpPtr =
28751c0b2f7Stbbdev                     (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*));
288478de5b1Stbbdev                 if (blockToUse->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT-1) {
28951c0b2f7Stbbdev                     MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr
29051c0b2f7Stbbdev                                   < (uintptr_t)blockToUse+sizeof(BackRefBlock),
29151c0b2f7Stbbdev                                   ASSERT_TEXT);
29251c0b2f7Stbbdev                     blockToUse->bumpPtr = NULL;
29351c0b2f7Stbbdev                 }
29451c0b2f7Stbbdev             }
29551c0b2f7Stbbdev             if (toUse) {
296478de5b1Stbbdev                 if (!blockToUse->allocatedCount.load(std::memory_order_relaxed) &&
297*1ecde27fSIlya Mishin                     !backRefMain.load(std::memory_order_relaxed)->listForUse.load(std::memory_order_relaxed)) {
29851c0b2f7Stbbdev                     lastBlockFirstUsed = true;
299478de5b1Stbbdev                 }
300478de5b1Stbbdev                 blockToUse->allocatedCount.store(blockToUse->allocatedCount.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed);
30151c0b2f7Stbbdev             }
30251c0b2f7Stbbdev         } // end of lock scope
30351c0b2f7Stbbdev     } while (!toUse);
30451c0b2f7Stbbdev     // The first thread that uses the last block requests new space in advance;
30551c0b2f7Stbbdev     // possible failures are ignored.
30651c0b2f7Stbbdev     if (lastBlockFirstUsed)
307*1ecde27fSIlya Mishin         backRefMain.load(std::memory_order_relaxed)->requestNewSpace();
30851c0b2f7Stbbdev 
309*1ecde27fSIlya Mishin     res.main = blockToUse->myNum;
31051c0b2f7Stbbdev     uintptr_t offset =
31151c0b2f7Stbbdev         ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*);
31251c0b2f7Stbbdev     // Is offset too big?
31351c0b2f7Stbbdev     MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT);
31451c0b2f7Stbbdev     res.offset = offset;
31551c0b2f7Stbbdev     if (largeObj) res.largeObj = largeObj;
31651c0b2f7Stbbdev 
31751c0b2f7Stbbdev     return res;
31851c0b2f7Stbbdev }
31951c0b2f7Stbbdev 
32051c0b2f7Stbbdev void removeBackRef(BackRefIdx backRefIdx)
32151c0b2f7Stbbdev {
32251c0b2f7Stbbdev     MALLOC_ASSERT(!backRefIdx.isInvalid(), ASSERT_TEXT);
323*1ecde27fSIlya Mishin     MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
32451c0b2f7Stbbdev                   && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
325*1ecde27fSIlya Mishin     BackRefBlock *currBlock = backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()];
326a080baf9SAlex     std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)((uintptr_t)currBlock + sizeof(BackRefBlock)
327a080baf9SAlex                                         + backRefIdx.getOffset()*sizeof(std::atomic<void*>));
328a080baf9SAlex     MALLOC_ASSERT(((uintptr_t)&backRefEntry >(uintptr_t)currBlock &&
329a080baf9SAlex                    (uintptr_t)&backRefEntry <(uintptr_t)currBlock + slabSize), ASSERT_TEXT);
33051c0b2f7Stbbdev     {
33151c0b2f7Stbbdev         MallocMutex::scoped_lock lock(currBlock->blockMutex);
33251c0b2f7Stbbdev 
333a080baf9SAlex         backRefEntry.store(currBlock->freeList, std::memory_order_relaxed);
334a080baf9SAlex #if MALLOC_DEBUG
335a080baf9SAlex         uintptr_t backRefEntryValue = (uintptr_t)backRefEntry.load(std::memory_order_relaxed);
336a080baf9SAlex         MALLOC_ASSERT(!backRefEntryValue ||
337a080baf9SAlex                       (backRefEntryValue > (uintptr_t)currBlock
338a080baf9SAlex                        && backRefEntryValue < (uintptr_t)currBlock + slabSize), ASSERT_TEXT);
339a080baf9SAlex #endif
340a080baf9SAlex         currBlock->freeList = (FreeObject*)&backRefEntry;
341478de5b1Stbbdev         currBlock->allocatedCount.store(currBlock->allocatedCount.load(std::memory_order_relaxed)-1, std::memory_order_relaxed);
34251c0b2f7Stbbdev     }
34351c0b2f7Stbbdev     // TODO: do we need double-check here?
344478de5b1Stbbdev     if (!currBlock->addedToForUse.load(std::memory_order_relaxed) &&
345*1ecde27fSIlya Mishin         currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed)) {
346*1ecde27fSIlya Mishin         MallocMutex::scoped_lock lock(mainMutex);
34751c0b2f7Stbbdev 
348478de5b1Stbbdev         if (!currBlock->addedToForUse.load(std::memory_order_relaxed) &&
349*1ecde27fSIlya Mishin             currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed))
350*1ecde27fSIlya Mishin             backRefMain.load(std::memory_order_relaxed)->addToForUseList(currBlock);
35151c0b2f7Stbbdev     }
35251c0b2f7Stbbdev }
35351c0b2f7Stbbdev 
35451c0b2f7Stbbdev /********* End of backreferences ***********************/
35551c0b2f7Stbbdev 
35651c0b2f7Stbbdev } // namespace internal
35751c0b2f7Stbbdev } // namespace rml
35851c0b2f7Stbbdev 
359