xref: /oneTBB/src/tbbmalloc/backref.cpp (revision 51c0b2f7)
1*51c0b2f7Stbbdev /*
2*51c0b2f7Stbbdev     Copyright (c) 2005-2020 Intel Corporation
3*51c0b2f7Stbbdev 
4*51c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
5*51c0b2f7Stbbdev     you may not use this file except in compliance with the License.
6*51c0b2f7Stbbdev     You may obtain a copy of the License at
7*51c0b2f7Stbbdev 
8*51c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
9*51c0b2f7Stbbdev 
10*51c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
11*51c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
12*51c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13*51c0b2f7Stbbdev     See the License for the specific language governing permissions and
14*51c0b2f7Stbbdev     limitations under the License.
15*51c0b2f7Stbbdev */
16*51c0b2f7Stbbdev 
17*51c0b2f7Stbbdev #include "tbbmalloc_internal.h"
18*51c0b2f7Stbbdev #include <new>        /* for placement new */
19*51c0b2f7Stbbdev 
20*51c0b2f7Stbbdev namespace rml {
21*51c0b2f7Stbbdev namespace internal {
22*51c0b2f7Stbbdev 
23*51c0b2f7Stbbdev 
24*51c0b2f7Stbbdev /********* backreferences ***********************/
25*51c0b2f7Stbbdev /* Each slab block and each large memory object header contains BackRefIdx
26*51c0b2f7Stbbdev  * that points out in some BackRefBlock which points back to this block or header.
27*51c0b2f7Stbbdev  */
28*51c0b2f7Stbbdev struct BackRefBlock : public BlockI {
29*51c0b2f7Stbbdev     BackRefBlock *nextForUse;     // the next in the chain of blocks with free items
30*51c0b2f7Stbbdev     FreeObject   *bumpPtr;        // bump pointer moves from the end to the beginning of the block
31*51c0b2f7Stbbdev     FreeObject   *freeList;
32*51c0b2f7Stbbdev     // list of all blocks that were allocated from raw mem (i.e., not from backend)
33*51c0b2f7Stbbdev     BackRefBlock *nextRawMemBlock;
34*51c0b2f7Stbbdev     int           allocatedCount; // the number of objects allocated
35*51c0b2f7Stbbdev     BackRefIdx::master_t myNum;   // the index in the master
36*51c0b2f7Stbbdev     MallocMutex   blockMutex;
37*51c0b2f7Stbbdev     // true if this block has been added to the listForUse chain,
38*51c0b2f7Stbbdev     // modifications protected by masterMutex
39*51c0b2f7Stbbdev     bool          addedToForUse;
40*51c0b2f7Stbbdev 
41*51c0b2f7Stbbdev     BackRefBlock(const BackRefBlock *blockToUse, intptr_t num) :
42*51c0b2f7Stbbdev         nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))),
43*51c0b2f7Stbbdev         freeList(NULL), nextRawMemBlock(NULL), allocatedCount(0), myNum(num),
44*51c0b2f7Stbbdev         addedToForUse(false) {
45*51c0b2f7Stbbdev         memset(&blockMutex, 0, sizeof(MallocMutex));
46*51c0b2f7Stbbdev 
47*51c0b2f7Stbbdev         MALLOC_ASSERT(!(num >> CHAR_BIT*sizeof(BackRefIdx::master_t)),
48*51c0b2f7Stbbdev                       "index in BackRefMaster must fit to BackRefIdx::master");
49*51c0b2f7Stbbdev     }
50*51c0b2f7Stbbdev     // clean all but header
51*51c0b2f7Stbbdev     void zeroSet() { memset(this+1, 0, BackRefBlock::bytes-sizeof(BackRefBlock)); }
52*51c0b2f7Stbbdev     static const int bytes = slabSize;
53*51c0b2f7Stbbdev };
54*51c0b2f7Stbbdev 
55*51c0b2f7Stbbdev // max number of backreference pointers in slab block
56*51c0b2f7Stbbdev static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*);
57*51c0b2f7Stbbdev 
58*51c0b2f7Stbbdev struct BackRefMaster {
59*51c0b2f7Stbbdev /* On 64-bit systems a slab block can hold up to ~2K back pointers to slab blocks
60*51c0b2f7Stbbdev  * or large objects, so it can address at least 32MB. The master array of 256KB
61*51c0b2f7Stbbdev  * holds 32K pointers to such blocks, addressing ~1 TB.
62*51c0b2f7Stbbdev  * On 32-bit systems there is ~4K back pointers in a slab block, so ~64MB can be addressed.
63*51c0b2f7Stbbdev  * The master array of 8KB holds 2K pointers to leaves, so ~128 GB can addressed.
64*51c0b2f7Stbbdev  */
65*51c0b2f7Stbbdev     static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024;
66*51c0b2f7Stbbdev     static const int dataSz;
67*51c0b2f7Stbbdev /* space is reserved for master table and 4 leaves
68*51c0b2f7Stbbdev    taking into account VirtualAlloc allocation granularity */
69*51c0b2f7Stbbdev     static const int leaves = 4;
70*51c0b2f7Stbbdev     static const size_t masterSize = BackRefMaster::bytes+leaves*BackRefBlock::bytes;
71*51c0b2f7Stbbdev     // The size of memory request for a few more leaf blocks;
72*51c0b2f7Stbbdev     // selected to match VirtualAlloc granularity
73*51c0b2f7Stbbdev     static const size_t blockSpaceSize = 64*1024;
74*51c0b2f7Stbbdev 
75*51c0b2f7Stbbdev     Backend       *backend;
76*51c0b2f7Stbbdev     BackRefBlock  *active;         // if defined, use it for allocations
77*51c0b2f7Stbbdev     BackRefBlock  *listForUse;     // the chain of data blocks with free items
78*51c0b2f7Stbbdev     BackRefBlock  *allRawMemBlocks;
79*51c0b2f7Stbbdev     std::atomic <intptr_t> lastUsed; // index of the last used block
80*51c0b2f7Stbbdev     bool           rawMemUsed;
81*51c0b2f7Stbbdev     MallocMutex    requestNewSpaceMutex;
82*51c0b2f7Stbbdev     BackRefBlock  *backRefBl[1];   // the real size of the array is dataSz
83*51c0b2f7Stbbdev 
84*51c0b2f7Stbbdev     BackRefBlock *findFreeBlock();
85*51c0b2f7Stbbdev     void          addToForUseList(BackRefBlock *bl);
86*51c0b2f7Stbbdev     void          initEmptyBackRefBlock(BackRefBlock *newBl);
87*51c0b2f7Stbbdev     bool          requestNewSpace();
88*51c0b2f7Stbbdev };
89*51c0b2f7Stbbdev 
90*51c0b2f7Stbbdev const int BackRefMaster::dataSz
91*51c0b2f7Stbbdev     = 1+(BackRefMaster::bytes-sizeof(BackRefMaster))/sizeof(BackRefBlock*);
92*51c0b2f7Stbbdev 
93*51c0b2f7Stbbdev static MallocMutex masterMutex;
94*51c0b2f7Stbbdev static std::atomic<BackRefMaster*> backRefMaster;
95*51c0b2f7Stbbdev 
96*51c0b2f7Stbbdev bool initBackRefMaster(Backend *backend)
97*51c0b2f7Stbbdev {
98*51c0b2f7Stbbdev     bool rawMemUsed;
99*51c0b2f7Stbbdev     BackRefMaster *master =
100*51c0b2f7Stbbdev         (BackRefMaster*)backend->getBackRefSpace(BackRefMaster::masterSize,
101*51c0b2f7Stbbdev                                                  &rawMemUsed);
102*51c0b2f7Stbbdev     if (! master)
103*51c0b2f7Stbbdev         return false;
104*51c0b2f7Stbbdev     master->backend = backend;
105*51c0b2f7Stbbdev     master->listForUse = master->allRawMemBlocks = NULL;
106*51c0b2f7Stbbdev     master->rawMemUsed = rawMemUsed;
107*51c0b2f7Stbbdev     master->lastUsed = -1;
108*51c0b2f7Stbbdev     memset(&master->requestNewSpaceMutex, 0, sizeof(MallocMutex));
109*51c0b2f7Stbbdev     for (int i=0; i<BackRefMaster::leaves; i++) {
110*51c0b2f7Stbbdev         BackRefBlock *bl = (BackRefBlock*)((uintptr_t)master + BackRefMaster::bytes + i*BackRefBlock::bytes);
111*51c0b2f7Stbbdev         bl->zeroSet();
112*51c0b2f7Stbbdev         master->initEmptyBackRefBlock(bl);
113*51c0b2f7Stbbdev         if (i)
114*51c0b2f7Stbbdev             master->addToForUseList(bl);
115*51c0b2f7Stbbdev         else // active leaf is not needed in listForUse
116*51c0b2f7Stbbdev             master->active = bl;
117*51c0b2f7Stbbdev     }
118*51c0b2f7Stbbdev     // backRefMaster is read in getBackRef, so publish it in consistent state
119*51c0b2f7Stbbdev     backRefMaster.store(master, std::memory_order_release);
120*51c0b2f7Stbbdev     return true;
121*51c0b2f7Stbbdev }
122*51c0b2f7Stbbdev 
123*51c0b2f7Stbbdev void destroyBackRefMaster(Backend *backend)
124*51c0b2f7Stbbdev {
125*51c0b2f7Stbbdev     if (backRefMaster.load(std::memory_order_acquire)) { // Is initBackRefMaster() called?
126*51c0b2f7Stbbdev         for (BackRefBlock *curr = backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks; curr; ) {
127*51c0b2f7Stbbdev             BackRefBlock *next = curr->nextRawMemBlock;
128*51c0b2f7Stbbdev             // allRawMemBlocks list is only for raw mem blocks
129*51c0b2f7Stbbdev             backend->putBackRefSpace(curr, BackRefMaster::blockSpaceSize,
130*51c0b2f7Stbbdev                                      /*rawMemUsed=*/true);
131*51c0b2f7Stbbdev             curr = next;
132*51c0b2f7Stbbdev         }
133*51c0b2f7Stbbdev         backend->putBackRefSpace(backRefMaster.load(std::memory_order_relaxed), BackRefMaster::masterSize,
134*51c0b2f7Stbbdev                                  backRefMaster.load(std::memory_order_relaxed)->rawMemUsed);
135*51c0b2f7Stbbdev     }
136*51c0b2f7Stbbdev }
137*51c0b2f7Stbbdev 
138*51c0b2f7Stbbdev void BackRefMaster::addToForUseList(BackRefBlock *bl)
139*51c0b2f7Stbbdev {
140*51c0b2f7Stbbdev     bl->nextForUse = listForUse;
141*51c0b2f7Stbbdev     listForUse = bl;
142*51c0b2f7Stbbdev     bl->addedToForUse = true;
143*51c0b2f7Stbbdev }
144*51c0b2f7Stbbdev 
145*51c0b2f7Stbbdev void BackRefMaster::initEmptyBackRefBlock(BackRefBlock *newBl)
146*51c0b2f7Stbbdev {
147*51c0b2f7Stbbdev     intptr_t nextLU = lastUsed+1;
148*51c0b2f7Stbbdev     new (newBl) BackRefBlock(newBl, nextLU);
149*51c0b2f7Stbbdev     MALLOC_ASSERT(nextLU < dataSz, NULL);
150*51c0b2f7Stbbdev     backRefBl[nextLU] = newBl;
151*51c0b2f7Stbbdev     // lastUsed is read in getBackRef, and access to backRefBl[lastUsed]
152*51c0b2f7Stbbdev     // is possible only after checking backref against current lastUsed
153*51c0b2f7Stbbdev     lastUsed.store(nextLU, std::memory_order_release);
154*51c0b2f7Stbbdev }
155*51c0b2f7Stbbdev 
156*51c0b2f7Stbbdev bool BackRefMaster::requestNewSpace()
157*51c0b2f7Stbbdev {
158*51c0b2f7Stbbdev     bool isRawMemUsed;
159*51c0b2f7Stbbdev     static_assert(!(blockSpaceSize % BackRefBlock::bytes),
160*51c0b2f7Stbbdev                          "Must request space for whole number of blocks.");
161*51c0b2f7Stbbdev 
162*51c0b2f7Stbbdev     if (backRefMaster.load(std::memory_order_relaxed)->dataSz <= lastUsed + 1) // no space in master
163*51c0b2f7Stbbdev         return false;
164*51c0b2f7Stbbdev 
165*51c0b2f7Stbbdev     // only one thread at a time may add blocks
166*51c0b2f7Stbbdev     MallocMutex::scoped_lock newSpaceLock(requestNewSpaceMutex);
167*51c0b2f7Stbbdev 
168*51c0b2f7Stbbdev     if (listForUse) // double check that only one block is available
169*51c0b2f7Stbbdev         return true;
170*51c0b2f7Stbbdev     BackRefBlock *newBl = (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &isRawMemUsed);
171*51c0b2f7Stbbdev     if (!newBl) return false;
172*51c0b2f7Stbbdev 
173*51c0b2f7Stbbdev     // touch a page for the 1st time without taking masterMutex ...
174*51c0b2f7Stbbdev     for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize;
175*51c0b2f7Stbbdev          bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) {
176*51c0b2f7Stbbdev         bl->zeroSet();
177*51c0b2f7Stbbdev     }
178*51c0b2f7Stbbdev 
179*51c0b2f7Stbbdev     MallocMutex::scoped_lock lock(masterMutex); // ... and share under lock
180*51c0b2f7Stbbdev 
181*51c0b2f7Stbbdev     const size_t numOfUnusedIdxs = backRefMaster.load(std::memory_order_relaxed)->dataSz - lastUsed - 1;
182*51c0b2f7Stbbdev     if (numOfUnusedIdxs <= 0) { // no space in master under lock, roll back
183*51c0b2f7Stbbdev         backend->putBackRefSpace(newBl, blockSpaceSize, isRawMemUsed);
184*51c0b2f7Stbbdev         return false;
185*51c0b2f7Stbbdev     }
186*51c0b2f7Stbbdev     // It's possible that only part of newBl is used, due to lack of indices in master.
187*51c0b2f7Stbbdev     // This is OK as such underutilization is possible only once for backreferneces table.
188*51c0b2f7Stbbdev     int blocksToUse = min(numOfUnusedIdxs, blockSpaceSize / BackRefBlock::bytes);
189*51c0b2f7Stbbdev 
190*51c0b2f7Stbbdev     // use the first block in the batch to maintain the list of "raw" memory
191*51c0b2f7Stbbdev     // to be released at shutdown
192*51c0b2f7Stbbdev     if (isRawMemUsed) {
193*51c0b2f7Stbbdev         newBl->nextRawMemBlock = backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks;
194*51c0b2f7Stbbdev         backRefMaster.load(std::memory_order_relaxed)->allRawMemBlocks = newBl;
195*51c0b2f7Stbbdev     }
196*51c0b2f7Stbbdev     for (BackRefBlock *bl = newBl; blocksToUse>0; bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes), blocksToUse--) {
197*51c0b2f7Stbbdev         initEmptyBackRefBlock(bl);
198*51c0b2f7Stbbdev         if (active->allocatedCount == BR_MAX_CNT) {
199*51c0b2f7Stbbdev             active = bl; // active leaf is not needed in listForUse
200*51c0b2f7Stbbdev         } else {
201*51c0b2f7Stbbdev             addToForUseList(bl);
202*51c0b2f7Stbbdev         }
203*51c0b2f7Stbbdev     }
204*51c0b2f7Stbbdev     return true;
205*51c0b2f7Stbbdev }
206*51c0b2f7Stbbdev 
207*51c0b2f7Stbbdev BackRefBlock *BackRefMaster::findFreeBlock()
208*51c0b2f7Stbbdev {
209*51c0b2f7Stbbdev     if (active->allocatedCount < BR_MAX_CNT)
210*51c0b2f7Stbbdev         return active;
211*51c0b2f7Stbbdev 
212*51c0b2f7Stbbdev     if (listForUse) {                                   // use released list
213*51c0b2f7Stbbdev         MallocMutex::scoped_lock lock(masterMutex);
214*51c0b2f7Stbbdev 
215*51c0b2f7Stbbdev         if (active->allocatedCount == BR_MAX_CNT && listForUse) {
216*51c0b2f7Stbbdev             active = listForUse;
217*51c0b2f7Stbbdev             listForUse = listForUse->nextForUse;
218*51c0b2f7Stbbdev             MALLOC_ASSERT(active->addedToForUse, ASSERT_TEXT);
219*51c0b2f7Stbbdev             active->addedToForUse = false;
220*51c0b2f7Stbbdev         }
221*51c0b2f7Stbbdev     } else // allocate new data node
222*51c0b2f7Stbbdev         if (!requestNewSpace())
223*51c0b2f7Stbbdev             return NULL;
224*51c0b2f7Stbbdev     return active;
225*51c0b2f7Stbbdev }
226*51c0b2f7Stbbdev 
227*51c0b2f7Stbbdev void *getBackRef(BackRefIdx backRefIdx)
228*51c0b2f7Stbbdev {
229*51c0b2f7Stbbdev     // !backRefMaster means no initialization done, so it can't be valid memory
230*51c0b2f7Stbbdev     // see addEmptyBackRefBlock for fences around lastUsed
231*51c0b2f7Stbbdev     if (!(backRefMaster.load(std::memory_order_acquire))
232*51c0b2f7Stbbdev         || backRefIdx.getMaster() > (backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_acquire))
233*51c0b2f7Stbbdev         || backRefIdx.getOffset() >= BR_MAX_CNT)
234*51c0b2f7Stbbdev         return NULL;
235*51c0b2f7Stbbdev     return *(void**)((uintptr_t)backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()]
236*51c0b2f7Stbbdev                      + sizeof(BackRefBlock)+backRefIdx.getOffset()*sizeof(void*));
237*51c0b2f7Stbbdev }
238*51c0b2f7Stbbdev 
239*51c0b2f7Stbbdev void setBackRef(BackRefIdx backRefIdx, void *newPtr)
240*51c0b2f7Stbbdev {
241*51c0b2f7Stbbdev     MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
242*51c0b2f7Stbbdev                                  && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
243*51c0b2f7Stbbdev     *(void**)((uintptr_t)backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()]
244*51c0b2f7Stbbdev               + sizeof(BackRefBlock) + backRefIdx.getOffset()*sizeof(void*)) = newPtr;
245*51c0b2f7Stbbdev }
246*51c0b2f7Stbbdev 
247*51c0b2f7Stbbdev BackRefIdx BackRefIdx::newBackRef(bool largeObj)
248*51c0b2f7Stbbdev {
249*51c0b2f7Stbbdev     BackRefBlock *blockToUse;
250*51c0b2f7Stbbdev     void **toUse;
251*51c0b2f7Stbbdev     BackRefIdx res;
252*51c0b2f7Stbbdev     bool lastBlockFirstUsed = false;
253*51c0b2f7Stbbdev 
254*51c0b2f7Stbbdev     do {
255*51c0b2f7Stbbdev         MALLOC_ASSERT(backRefMaster.load(std::memory_order_relaxed), ASSERT_TEXT);
256*51c0b2f7Stbbdev         blockToUse = backRefMaster.load(std::memory_order_relaxed)->findFreeBlock();
257*51c0b2f7Stbbdev         if (!blockToUse)
258*51c0b2f7Stbbdev             return BackRefIdx();
259*51c0b2f7Stbbdev         toUse = NULL;
260*51c0b2f7Stbbdev         { // the block is locked to find a reference
261*51c0b2f7Stbbdev             MallocMutex::scoped_lock lock(blockToUse->blockMutex);
262*51c0b2f7Stbbdev 
263*51c0b2f7Stbbdev             if (blockToUse->freeList) {
264*51c0b2f7Stbbdev                 toUse = (void**)blockToUse->freeList;
265*51c0b2f7Stbbdev                 blockToUse->freeList = blockToUse->freeList->next;
266*51c0b2f7Stbbdev                 MALLOC_ASSERT(!blockToUse->freeList ||
267*51c0b2f7Stbbdev                               ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse
268*51c0b2f7Stbbdev                                && (uintptr_t)blockToUse->freeList <
269*51c0b2f7Stbbdev                                (uintptr_t)blockToUse + slabSize), ASSERT_TEXT);
270*51c0b2f7Stbbdev             } else if (blockToUse->allocatedCount < BR_MAX_CNT) {
271*51c0b2f7Stbbdev                 toUse = (void**)blockToUse->bumpPtr;
272*51c0b2f7Stbbdev                 blockToUse->bumpPtr =
273*51c0b2f7Stbbdev                     (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*));
274*51c0b2f7Stbbdev                 if (blockToUse->allocatedCount == BR_MAX_CNT-1) {
275*51c0b2f7Stbbdev                     MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr
276*51c0b2f7Stbbdev                                   < (uintptr_t)blockToUse+sizeof(BackRefBlock),
277*51c0b2f7Stbbdev                                   ASSERT_TEXT);
278*51c0b2f7Stbbdev                     blockToUse->bumpPtr = NULL;
279*51c0b2f7Stbbdev                 }
280*51c0b2f7Stbbdev             }
281*51c0b2f7Stbbdev             if (toUse) {
282*51c0b2f7Stbbdev                 if (!blockToUse->allocatedCount && !backRefMaster.load(std::memory_order_relaxed)->listForUse)
283*51c0b2f7Stbbdev                     lastBlockFirstUsed = true;
284*51c0b2f7Stbbdev                 blockToUse->allocatedCount++;
285*51c0b2f7Stbbdev             }
286*51c0b2f7Stbbdev         } // end of lock scope
287*51c0b2f7Stbbdev     } while (!toUse);
288*51c0b2f7Stbbdev     // The first thread that uses the last block requests new space in advance;
289*51c0b2f7Stbbdev     // possible failures are ignored.
290*51c0b2f7Stbbdev     if (lastBlockFirstUsed)
291*51c0b2f7Stbbdev         backRefMaster.load(std::memory_order_relaxed)->requestNewSpace();
292*51c0b2f7Stbbdev 
293*51c0b2f7Stbbdev     res.master = blockToUse->myNum;
294*51c0b2f7Stbbdev     uintptr_t offset =
295*51c0b2f7Stbbdev         ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*);
296*51c0b2f7Stbbdev     // Is offset too big?
297*51c0b2f7Stbbdev     MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT);
298*51c0b2f7Stbbdev     res.offset = offset;
299*51c0b2f7Stbbdev     if (largeObj) res.largeObj = largeObj;
300*51c0b2f7Stbbdev 
301*51c0b2f7Stbbdev     return res;
302*51c0b2f7Stbbdev }
303*51c0b2f7Stbbdev 
304*51c0b2f7Stbbdev void removeBackRef(BackRefIdx backRefIdx)
305*51c0b2f7Stbbdev {
306*51c0b2f7Stbbdev     MALLOC_ASSERT(!backRefIdx.isInvalid(), ASSERT_TEXT);
307*51c0b2f7Stbbdev     MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
308*51c0b2f7Stbbdev                   && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
309*51c0b2f7Stbbdev     BackRefBlock *currBlock = backRefMaster.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMaster()];
310*51c0b2f7Stbbdev     FreeObject *freeObj = (FreeObject*)((uintptr_t)currBlock + sizeof(BackRefBlock)
311*51c0b2f7Stbbdev                                         + backRefIdx.getOffset()*sizeof(void*));
312*51c0b2f7Stbbdev     MALLOC_ASSERT(((uintptr_t)freeObj>(uintptr_t)currBlock &&
313*51c0b2f7Stbbdev                    (uintptr_t)freeObj<(uintptr_t)currBlock + slabSize), ASSERT_TEXT);
314*51c0b2f7Stbbdev     {
315*51c0b2f7Stbbdev         MallocMutex::scoped_lock lock(currBlock->blockMutex);
316*51c0b2f7Stbbdev 
317*51c0b2f7Stbbdev         freeObj->next = currBlock->freeList;
318*51c0b2f7Stbbdev         MALLOC_ASSERT(!freeObj->next ||
319*51c0b2f7Stbbdev                       ((uintptr_t)freeObj->next > (uintptr_t)currBlock
320*51c0b2f7Stbbdev                        && (uintptr_t)freeObj->next <
321*51c0b2f7Stbbdev                        (uintptr_t)currBlock + slabSize), ASSERT_TEXT);
322*51c0b2f7Stbbdev         currBlock->freeList = freeObj;
323*51c0b2f7Stbbdev         currBlock->allocatedCount--;
324*51c0b2f7Stbbdev     }
325*51c0b2f7Stbbdev     // TODO: do we need double-check here?
326*51c0b2f7Stbbdev     if (!currBlock->addedToForUse && currBlock!=backRefMaster.load(std::memory_order_relaxed)->active) {
327*51c0b2f7Stbbdev         MallocMutex::scoped_lock lock(masterMutex);
328*51c0b2f7Stbbdev 
329*51c0b2f7Stbbdev         if (!currBlock->addedToForUse && currBlock!=backRefMaster.load(std::memory_order_relaxed)->active)
330*51c0b2f7Stbbdev             backRefMaster.load(std::memory_order_relaxed)->addToForUseList(currBlock);
331*51c0b2f7Stbbdev     }
332*51c0b2f7Stbbdev }
333*51c0b2f7Stbbdev 
334*51c0b2f7Stbbdev /********* End of backreferences ***********************/
335*51c0b2f7Stbbdev 
336*51c0b2f7Stbbdev } // namespace internal
337*51c0b2f7Stbbdev } // namespace rml
338*51c0b2f7Stbbdev 
339