151c0b2f7Stbbdev /*
2*2110128eSsarathnandu Copyright (c) 2005-2023 Intel Corporation
351c0b2f7Stbbdev
451c0b2f7Stbbdev Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev you may not use this file except in compliance with the License.
651c0b2f7Stbbdev You may obtain a copy of the License at
751c0b2f7Stbbdev
851c0b2f7Stbbdev http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev
1051c0b2f7Stbbdev Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev See the License for the specific language governing permissions and
1451c0b2f7Stbbdev limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev
1751c0b2f7Stbbdev #include "tbbmalloc_internal.h"
1851c0b2f7Stbbdev #include <new> /* for placement new */
1951c0b2f7Stbbdev
2051c0b2f7Stbbdev namespace rml {
2151c0b2f7Stbbdev namespace internal {
2251c0b2f7Stbbdev
2351c0b2f7Stbbdev
2451c0b2f7Stbbdev /********* backreferences ***********************/
2551c0b2f7Stbbdev /* Each slab block and each large memory object header contains BackRefIdx
2651c0b2f7Stbbdev * that points out in some BackRefBlock which points back to this block or header.
2751c0b2f7Stbbdev */
2851c0b2f7Stbbdev struct BackRefBlock : public BlockI {
2951c0b2f7Stbbdev BackRefBlock *nextForUse; // the next in the chain of blocks with free items
3051c0b2f7Stbbdev FreeObject *bumpPtr; // bump pointer moves from the end to the beginning of the block
3151c0b2f7Stbbdev FreeObject *freeList;
3251c0b2f7Stbbdev // list of all blocks that were allocated from raw mem (i.e., not from backend)
3351c0b2f7Stbbdev BackRefBlock *nextRawMemBlock;
34478de5b1Stbbdev std::atomic<int> allocatedCount; // the number of objects allocated
351ecde27fSIlya Mishin BackRefIdx::main_t myNum; // the index in the main
3651c0b2f7Stbbdev MallocMutex blockMutex;
3751c0b2f7Stbbdev // true if this block has been added to the listForUse chain,
381ecde27fSIlya Mishin // modifications protected by mainMutex
39478de5b1Stbbdev std::atomic<bool> addedToForUse;
4051c0b2f7Stbbdev
BackRefBlockrml::internal::BackRefBlock4151c0b2f7Stbbdev BackRefBlock(const BackRefBlock *blockToUse, intptr_t num) :
4257f524caSIlya Isaev nextForUse(nullptr), bumpPtr((FreeObject*)((uintptr_t)blockToUse + slabSize - sizeof(void*))),
4357f524caSIlya Isaev freeList(nullptr), nextRawMemBlock(nullptr), allocatedCount(0), myNum(num),
4451c0b2f7Stbbdev addedToForUse(false) {
45*2110128eSsarathnandu memset(static_cast<void*>(&blockMutex), 0, sizeof(MallocMutex));
4651c0b2f7Stbbdev
471ecde27fSIlya Mishin MALLOC_ASSERT(!(num >> CHAR_BIT*sizeof(BackRefIdx::main_t)),
481ecde27fSIlya Mishin "index in BackRefMain must fit to BackRefIdx::main");
4951c0b2f7Stbbdev }
5051c0b2f7Stbbdev // clean all but header
zeroSetrml::internal::BackRefBlock51*2110128eSsarathnandu void zeroSet() { memset(static_cast<void*>(this+1), 0, BackRefBlock::bytes-sizeof(BackRefBlock)); }
5251c0b2f7Stbbdev static const int bytes = slabSize;
5351c0b2f7Stbbdev };
5451c0b2f7Stbbdev
5551c0b2f7Stbbdev // max number of backreference pointers in slab block
5651c0b2f7Stbbdev static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*);
5751c0b2f7Stbbdev
581ecde27fSIlya Mishin struct BackRefMain {
5951c0b2f7Stbbdev /* On 64-bit systems a slab block can hold up to ~2K back pointers to slab blocks
601ecde27fSIlya Mishin * or large objects, so it can address at least 32MB. The main array of 256KB
6151c0b2f7Stbbdev * holds 32K pointers to such blocks, addressing ~1 TB.
6251c0b2f7Stbbdev * On 32-bit systems there is ~4K back pointers in a slab block, so ~64MB can be addressed.
631ecde27fSIlya Mishin * The main array of 8KB holds 2K pointers to leaves, so ~128 GB can addressed.
6451c0b2f7Stbbdev */
6551c0b2f7Stbbdev static const size_t bytes = sizeof(uintptr_t)>4? 256*1024 : 8*1024;
6651c0b2f7Stbbdev static const int dataSz;
671ecde27fSIlya Mishin /* space is reserved for main table and 4 leaves
6851c0b2f7Stbbdev taking into account VirtualAlloc allocation granularity */
6951c0b2f7Stbbdev static const int leaves = 4;
701ecde27fSIlya Mishin static const size_t mainSize = BackRefMain::bytes+leaves*BackRefBlock::bytes;
7151c0b2f7Stbbdev // The size of memory request for a few more leaf blocks;
7251c0b2f7Stbbdev // selected to match VirtualAlloc granularity
7351c0b2f7Stbbdev static const size_t blockSpaceSize = 64*1024;
7451c0b2f7Stbbdev
7551c0b2f7Stbbdev Backend *backend;
76478de5b1Stbbdev std::atomic<BackRefBlock*> active; // if defined, use it for allocations
77478de5b1Stbbdev std::atomic<BackRefBlock*> listForUse; // the chain of data blocks with free items
7851c0b2f7Stbbdev BackRefBlock *allRawMemBlocks;
7951c0b2f7Stbbdev std::atomic <intptr_t> lastUsed; // index of the last used block
8051c0b2f7Stbbdev bool rawMemUsed;
8151c0b2f7Stbbdev MallocMutex requestNewSpaceMutex;
8251c0b2f7Stbbdev BackRefBlock *backRefBl[1]; // the real size of the array is dataSz
8351c0b2f7Stbbdev
8451c0b2f7Stbbdev BackRefBlock *findFreeBlock();
8551c0b2f7Stbbdev void addToForUseList(BackRefBlock *bl);
8651c0b2f7Stbbdev void initEmptyBackRefBlock(BackRefBlock *newBl);
8751c0b2f7Stbbdev bool requestNewSpace();
8851c0b2f7Stbbdev };
8951c0b2f7Stbbdev
901ecde27fSIlya Mishin const int BackRefMain::dataSz
911ecde27fSIlya Mishin = 1+(BackRefMain::bytes-sizeof(BackRefMain))/sizeof(BackRefBlock*);
9251c0b2f7Stbbdev
931ecde27fSIlya Mishin static MallocMutex mainMutex;
941ecde27fSIlya Mishin static std::atomic<BackRefMain*> backRefMain;
9551c0b2f7Stbbdev
initBackRefMain(Backend * backend)961ecde27fSIlya Mishin bool initBackRefMain(Backend *backend)
9751c0b2f7Stbbdev {
9851c0b2f7Stbbdev bool rawMemUsed;
991ecde27fSIlya Mishin BackRefMain *main =
1001ecde27fSIlya Mishin (BackRefMain*)backend->getBackRefSpace(BackRefMain::mainSize,
10151c0b2f7Stbbdev &rawMemUsed);
1021ecde27fSIlya Mishin if (! main)
10351c0b2f7Stbbdev return false;
1041ecde27fSIlya Mishin main->backend = backend;
1051ecde27fSIlya Mishin main->listForUse.store(nullptr, std::memory_order_relaxed);
1061ecde27fSIlya Mishin main->allRawMemBlocks = nullptr;
1071ecde27fSIlya Mishin main->rawMemUsed = rawMemUsed;
1081ecde27fSIlya Mishin main->lastUsed = -1;
109*2110128eSsarathnandu memset(static_cast<void*>(&main->requestNewSpaceMutex), 0, sizeof(MallocMutex));
1101ecde27fSIlya Mishin for (int i=0; i<BackRefMain::leaves; i++) {
1111ecde27fSIlya Mishin BackRefBlock *bl = (BackRefBlock*)((uintptr_t)main + BackRefMain::bytes + i*BackRefBlock::bytes);
11251c0b2f7Stbbdev bl->zeroSet();
1131ecde27fSIlya Mishin main->initEmptyBackRefBlock(bl);
11451c0b2f7Stbbdev if (i)
1151ecde27fSIlya Mishin main->addToForUseList(bl);
11651c0b2f7Stbbdev else // active leaf is not needed in listForUse
1171ecde27fSIlya Mishin main->active.store(bl, std::memory_order_relaxed);
11851c0b2f7Stbbdev }
1191ecde27fSIlya Mishin // backRefMain is read in getBackRef, so publish it in consistent state
1201ecde27fSIlya Mishin backRefMain.store(main, std::memory_order_release);
12151c0b2f7Stbbdev return true;
12251c0b2f7Stbbdev }
12351c0b2f7Stbbdev
124478de5b1Stbbdev #if __TBB_SOURCE_DIRECTLY_INCLUDED
destroyBackRefMain(Backend * backend)1251ecde27fSIlya Mishin void destroyBackRefMain(Backend *backend)
12651c0b2f7Stbbdev {
1271ecde27fSIlya Mishin if (backRefMain.load(std::memory_order_acquire)) { // Is initBackRefMain() called?
1281ecde27fSIlya Mishin for (BackRefBlock *curr = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks; curr; ) {
12951c0b2f7Stbbdev BackRefBlock *next = curr->nextRawMemBlock;
13051c0b2f7Stbbdev // allRawMemBlocks list is only for raw mem blocks
1311ecde27fSIlya Mishin backend->putBackRefSpace(curr, BackRefMain::blockSpaceSize,
13251c0b2f7Stbbdev /*rawMemUsed=*/true);
13351c0b2f7Stbbdev curr = next;
13451c0b2f7Stbbdev }
1351ecde27fSIlya Mishin backend->putBackRefSpace(backRefMain.load(std::memory_order_relaxed), BackRefMain::mainSize,
1361ecde27fSIlya Mishin backRefMain.load(std::memory_order_relaxed)->rawMemUsed);
13751c0b2f7Stbbdev }
13851c0b2f7Stbbdev }
139478de5b1Stbbdev #endif
14051c0b2f7Stbbdev
addToForUseList(BackRefBlock * bl)1411ecde27fSIlya Mishin void BackRefMain::addToForUseList(BackRefBlock *bl)
14251c0b2f7Stbbdev {
143478de5b1Stbbdev bl->nextForUse = listForUse.load(std::memory_order_relaxed);
144478de5b1Stbbdev listForUse.store(bl, std::memory_order_relaxed);
145478de5b1Stbbdev bl->addedToForUse.store(true, std::memory_order_relaxed);
14651c0b2f7Stbbdev }
14751c0b2f7Stbbdev
initEmptyBackRefBlock(BackRefBlock * newBl)1481ecde27fSIlya Mishin void BackRefMain::initEmptyBackRefBlock(BackRefBlock *newBl)
14951c0b2f7Stbbdev {
15051c0b2f7Stbbdev intptr_t nextLU = lastUsed+1;
15151c0b2f7Stbbdev new (newBl) BackRefBlock(newBl, nextLU);
15257f524caSIlya Isaev MALLOC_ASSERT(nextLU < dataSz, nullptr);
15351c0b2f7Stbbdev backRefBl[nextLU] = newBl;
15451c0b2f7Stbbdev // lastUsed is read in getBackRef, and access to backRefBl[lastUsed]
15551c0b2f7Stbbdev // is possible only after checking backref against current lastUsed
15651c0b2f7Stbbdev lastUsed.store(nextLU, std::memory_order_release);
15751c0b2f7Stbbdev }
15851c0b2f7Stbbdev
requestNewSpace()1591ecde27fSIlya Mishin bool BackRefMain::requestNewSpace()
16051c0b2f7Stbbdev {
16151c0b2f7Stbbdev bool isRawMemUsed;
16251c0b2f7Stbbdev static_assert(!(blockSpaceSize % BackRefBlock::bytes),
16351c0b2f7Stbbdev "Must request space for whole number of blocks.");
16451c0b2f7Stbbdev
165ba947f18SIlya Isaev if (BackRefMain::dataSz <= lastUsed + 1) // no space in main
16651c0b2f7Stbbdev return false;
16751c0b2f7Stbbdev
16851c0b2f7Stbbdev // only one thread at a time may add blocks
16951c0b2f7Stbbdev MallocMutex::scoped_lock newSpaceLock(requestNewSpaceMutex);
17051c0b2f7Stbbdev
171478de5b1Stbbdev if (listForUse.load(std::memory_order_relaxed)) // double check that only one block is available
17251c0b2f7Stbbdev return true;
17351c0b2f7Stbbdev BackRefBlock *newBl = (BackRefBlock*)backend->getBackRefSpace(blockSpaceSize, &isRawMemUsed);
17451c0b2f7Stbbdev if (!newBl) return false;
17551c0b2f7Stbbdev
1761ecde27fSIlya Mishin // touch a page for the 1st time without taking mainMutex ...
17751c0b2f7Stbbdev for (BackRefBlock *bl = newBl; (uintptr_t)bl < (uintptr_t)newBl + blockSpaceSize;
17851c0b2f7Stbbdev bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes)) {
17951c0b2f7Stbbdev bl->zeroSet();
18051c0b2f7Stbbdev }
18151c0b2f7Stbbdev
1821ecde27fSIlya Mishin MallocMutex::scoped_lock lock(mainMutex); // ... and share under lock
18351c0b2f7Stbbdev
184ba947f18SIlya Isaev const size_t numOfUnusedIdxs = BackRefMain::dataSz - lastUsed - 1;
1851ecde27fSIlya Mishin if (numOfUnusedIdxs <= 0) { // no space in main under lock, roll back
18651c0b2f7Stbbdev backend->putBackRefSpace(newBl, blockSpaceSize, isRawMemUsed);
18751c0b2f7Stbbdev return false;
18851c0b2f7Stbbdev }
1891ecde27fSIlya Mishin // It's possible that only part of newBl is used, due to lack of indices in main.
19051c0b2f7Stbbdev // This is OK as such underutilization is possible only once for backreferneces table.
19151c0b2f7Stbbdev int blocksToUse = min(numOfUnusedIdxs, blockSpaceSize / BackRefBlock::bytes);
19251c0b2f7Stbbdev
19351c0b2f7Stbbdev // use the first block in the batch to maintain the list of "raw" memory
19451c0b2f7Stbbdev // to be released at shutdown
19551c0b2f7Stbbdev if (isRawMemUsed) {
1961ecde27fSIlya Mishin newBl->nextRawMemBlock = backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks;
1971ecde27fSIlya Mishin backRefMain.load(std::memory_order_relaxed)->allRawMemBlocks = newBl;
19851c0b2f7Stbbdev }
19951c0b2f7Stbbdev for (BackRefBlock *bl = newBl; blocksToUse>0; bl = (BackRefBlock*)((uintptr_t)bl + BackRefBlock::bytes), blocksToUse--) {
20051c0b2f7Stbbdev initEmptyBackRefBlock(bl);
201478de5b1Stbbdev if (active.load(std::memory_order_relaxed)->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) {
202478de5b1Stbbdev active.store(bl, std::memory_order_release); // active leaf is not needed in listForUse
20351c0b2f7Stbbdev } else {
20451c0b2f7Stbbdev addToForUseList(bl);
20551c0b2f7Stbbdev }
20651c0b2f7Stbbdev }
20751c0b2f7Stbbdev return true;
20851c0b2f7Stbbdev }
20951c0b2f7Stbbdev
findFreeBlock()2101ecde27fSIlya Mishin BackRefBlock *BackRefMain::findFreeBlock()
21151c0b2f7Stbbdev {
212478de5b1Stbbdev BackRefBlock* active_block = active.load(std::memory_order_acquire);
213478de5b1Stbbdev MALLOC_ASSERT(active_block, ASSERT_TEXT);
21451c0b2f7Stbbdev
215478de5b1Stbbdev if (active_block->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT)
216478de5b1Stbbdev return active_block;
217478de5b1Stbbdev
218478de5b1Stbbdev if (listForUse.load(std::memory_order_relaxed)) { // use released list
2191ecde27fSIlya Mishin MallocMutex::scoped_lock lock(mainMutex);
22051c0b2f7Stbbdev
221478de5b1Stbbdev if (active_block->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT) {
222478de5b1Stbbdev active_block = listForUse.load(std::memory_order_relaxed);
223478de5b1Stbbdev if (active_block) {
224478de5b1Stbbdev active.store(active_block, std::memory_order_release);
225478de5b1Stbbdev listForUse.store(active_block->nextForUse, std::memory_order_relaxed);
226478de5b1Stbbdev MALLOC_ASSERT(active_block->addedToForUse.load(std::memory_order_relaxed), ASSERT_TEXT);
227478de5b1Stbbdev active_block->addedToForUse.store(false, std::memory_order_relaxed);
228478de5b1Stbbdev }
22951c0b2f7Stbbdev }
23051c0b2f7Stbbdev } else // allocate new data node
23151c0b2f7Stbbdev if (!requestNewSpace())
23257f524caSIlya Isaev return nullptr;
233478de5b1Stbbdev return active.load(std::memory_order_acquire); // reread because of requestNewSpace
23451c0b2f7Stbbdev }
23551c0b2f7Stbbdev
getBackRef(BackRefIdx backRefIdx)23651c0b2f7Stbbdev void *getBackRef(BackRefIdx backRefIdx)
23751c0b2f7Stbbdev {
2381ecde27fSIlya Mishin // !backRefMain means no initialization done, so it can't be valid memory
23951c0b2f7Stbbdev // see addEmptyBackRefBlock for fences around lastUsed
2401ecde27fSIlya Mishin if (!(backRefMain.load(std::memory_order_acquire))
2411ecde27fSIlya Mishin || backRefIdx.getMain() > (backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_acquire))
24251c0b2f7Stbbdev || backRefIdx.getOffset() >= BR_MAX_CNT)
243a080baf9SAlex {
24457f524caSIlya Isaev return nullptr;
245a080baf9SAlex }
246a080baf9SAlex std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)(
2471ecde27fSIlya Mishin (uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()]
248a080baf9SAlex + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(std::atomic<void*>)
249a080baf9SAlex );
250a080baf9SAlex return backRefEntry.load(std::memory_order_relaxed);
25151c0b2f7Stbbdev }
25251c0b2f7Stbbdev
setBackRef(BackRefIdx backRefIdx,void * newPtr)25351c0b2f7Stbbdev void setBackRef(BackRefIdx backRefIdx, void *newPtr)
25451c0b2f7Stbbdev {
2551ecde27fSIlya Mishin MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
25651c0b2f7Stbbdev && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
2571ecde27fSIlya Mishin ((std::atomic<void*>*)((uintptr_t)backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()]
258a080baf9SAlex + sizeof(BackRefBlock) + backRefIdx.getOffset() * sizeof(void*)))->store(newPtr, std::memory_order_relaxed);
25951c0b2f7Stbbdev }
26051c0b2f7Stbbdev
newBackRef(bool largeObj)26151c0b2f7Stbbdev BackRefIdx BackRefIdx::newBackRef(bool largeObj)
26251c0b2f7Stbbdev {
26351c0b2f7Stbbdev BackRefBlock *blockToUse;
26451c0b2f7Stbbdev void **toUse;
26551c0b2f7Stbbdev BackRefIdx res;
26651c0b2f7Stbbdev bool lastBlockFirstUsed = false;
26751c0b2f7Stbbdev
26851c0b2f7Stbbdev do {
2691ecde27fSIlya Mishin MALLOC_ASSERT(backRefMain.load(std::memory_order_relaxed), ASSERT_TEXT);
2701ecde27fSIlya Mishin blockToUse = backRefMain.load(std::memory_order_relaxed)->findFreeBlock();
27151c0b2f7Stbbdev if (!blockToUse)
27251c0b2f7Stbbdev return BackRefIdx();
27357f524caSIlya Isaev toUse = nullptr;
27451c0b2f7Stbbdev { // the block is locked to find a reference
27551c0b2f7Stbbdev MallocMutex::scoped_lock lock(blockToUse->blockMutex);
27651c0b2f7Stbbdev
27751c0b2f7Stbbdev if (blockToUse->freeList) {
27851c0b2f7Stbbdev toUse = (void**)blockToUse->freeList;
27951c0b2f7Stbbdev blockToUse->freeList = blockToUse->freeList->next;
28051c0b2f7Stbbdev MALLOC_ASSERT(!blockToUse->freeList ||
28151c0b2f7Stbbdev ((uintptr_t)blockToUse->freeList>=(uintptr_t)blockToUse
28251c0b2f7Stbbdev && (uintptr_t)blockToUse->freeList <
28351c0b2f7Stbbdev (uintptr_t)blockToUse + slabSize), ASSERT_TEXT);
284478de5b1Stbbdev } else if (blockToUse->allocatedCount.load(std::memory_order_relaxed) < BR_MAX_CNT) {
28551c0b2f7Stbbdev toUse = (void**)blockToUse->bumpPtr;
28651c0b2f7Stbbdev blockToUse->bumpPtr =
28751c0b2f7Stbbdev (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*));
288478de5b1Stbbdev if (blockToUse->allocatedCount.load(std::memory_order_relaxed) == BR_MAX_CNT-1) {
28951c0b2f7Stbbdev MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr
29051c0b2f7Stbbdev < (uintptr_t)blockToUse+sizeof(BackRefBlock),
29151c0b2f7Stbbdev ASSERT_TEXT);
29257f524caSIlya Isaev blockToUse->bumpPtr = nullptr;
29351c0b2f7Stbbdev }
29451c0b2f7Stbbdev }
29551c0b2f7Stbbdev if (toUse) {
296478de5b1Stbbdev if (!blockToUse->allocatedCount.load(std::memory_order_relaxed) &&
2971ecde27fSIlya Mishin !backRefMain.load(std::memory_order_relaxed)->listForUse.load(std::memory_order_relaxed)) {
29851c0b2f7Stbbdev lastBlockFirstUsed = true;
299478de5b1Stbbdev }
300478de5b1Stbbdev blockToUse->allocatedCount.store(blockToUse->allocatedCount.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed);
30151c0b2f7Stbbdev }
30251c0b2f7Stbbdev } // end of lock scope
30351c0b2f7Stbbdev } while (!toUse);
30451c0b2f7Stbbdev // The first thread that uses the last block requests new space in advance;
30551c0b2f7Stbbdev // possible failures are ignored.
30651c0b2f7Stbbdev if (lastBlockFirstUsed)
3071ecde27fSIlya Mishin backRefMain.load(std::memory_order_relaxed)->requestNewSpace();
30851c0b2f7Stbbdev
3091ecde27fSIlya Mishin res.main = blockToUse->myNum;
31051c0b2f7Stbbdev uintptr_t offset =
31151c0b2f7Stbbdev ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*);
31251c0b2f7Stbbdev // Is offset too big?
31351c0b2f7Stbbdev MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT);
31451c0b2f7Stbbdev res.offset = offset;
31551c0b2f7Stbbdev if (largeObj) res.largeObj = largeObj;
31651c0b2f7Stbbdev
31751c0b2f7Stbbdev return res;
31851c0b2f7Stbbdev }
31951c0b2f7Stbbdev
removeBackRef(BackRefIdx backRefIdx)32051c0b2f7Stbbdev void removeBackRef(BackRefIdx backRefIdx)
32151c0b2f7Stbbdev {
32251c0b2f7Stbbdev MALLOC_ASSERT(!backRefIdx.isInvalid(), ASSERT_TEXT);
3231ecde27fSIlya Mishin MALLOC_ASSERT(backRefIdx.getMain()<=backRefMain.load(std::memory_order_relaxed)->lastUsed.load(std::memory_order_relaxed)
32451c0b2f7Stbbdev && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT);
3251ecde27fSIlya Mishin BackRefBlock *currBlock = backRefMain.load(std::memory_order_relaxed)->backRefBl[backRefIdx.getMain()];
326a080baf9SAlex std::atomic<void*>& backRefEntry = *(std::atomic<void*>*)((uintptr_t)currBlock + sizeof(BackRefBlock)
327a080baf9SAlex + backRefIdx.getOffset()*sizeof(std::atomic<void*>));
328a080baf9SAlex MALLOC_ASSERT(((uintptr_t)&backRefEntry >(uintptr_t)currBlock &&
329a080baf9SAlex (uintptr_t)&backRefEntry <(uintptr_t)currBlock + slabSize), ASSERT_TEXT);
33051c0b2f7Stbbdev {
33151c0b2f7Stbbdev MallocMutex::scoped_lock lock(currBlock->blockMutex);
33251c0b2f7Stbbdev
333a080baf9SAlex backRefEntry.store(currBlock->freeList, std::memory_order_relaxed);
334a080baf9SAlex #if MALLOC_DEBUG
335a080baf9SAlex uintptr_t backRefEntryValue = (uintptr_t)backRefEntry.load(std::memory_order_relaxed);
336a080baf9SAlex MALLOC_ASSERT(!backRefEntryValue ||
337a080baf9SAlex (backRefEntryValue > (uintptr_t)currBlock
338a080baf9SAlex && backRefEntryValue < (uintptr_t)currBlock + slabSize), ASSERT_TEXT);
339a080baf9SAlex #endif
340a080baf9SAlex currBlock->freeList = (FreeObject*)&backRefEntry;
341478de5b1Stbbdev currBlock->allocatedCount.store(currBlock->allocatedCount.load(std::memory_order_relaxed)-1, std::memory_order_relaxed);
34251c0b2f7Stbbdev }
34351c0b2f7Stbbdev // TODO: do we need double-check here?
344478de5b1Stbbdev if (!currBlock->addedToForUse.load(std::memory_order_relaxed) &&
3451ecde27fSIlya Mishin currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed)) {
3461ecde27fSIlya Mishin MallocMutex::scoped_lock lock(mainMutex);
34751c0b2f7Stbbdev
348478de5b1Stbbdev if (!currBlock->addedToForUse.load(std::memory_order_relaxed) &&
3491ecde27fSIlya Mishin currBlock!=backRefMain.load(std::memory_order_relaxed)->active.load(std::memory_order_relaxed))
3501ecde27fSIlya Mishin backRefMain.load(std::memory_order_relaxed)->addToForUseList(currBlock);
35151c0b2f7Stbbdev }
35251c0b2f7Stbbdev }
35351c0b2f7Stbbdev
35451c0b2f7Stbbdev /********* End of backreferences ***********************/
35551c0b2f7Stbbdev
35651c0b2f7Stbbdev } // namespace internal
35751c0b2f7Stbbdev } // namespace rml
35851c0b2f7Stbbdev
359