xref: /oneTBB/src/tbbmalloc/backend.h (revision 32d5ec1f)
151c0b2f7Stbbdev /*
2*32d5ec1fSŁukasz Plewa     Copyright (c) 2005-2023 Intel Corporation
351c0b2f7Stbbdev 
451c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev     you may not use this file except in compliance with the License.
651c0b2f7Stbbdev     You may obtain a copy of the License at
751c0b2f7Stbbdev 
851c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev 
1051c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev     See the License for the specific language governing permissions and
1451c0b2f7Stbbdev     limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev 
1751c0b2f7Stbbdev #ifndef __TBB_tbbmalloc_internal_H
1851c0b2f7Stbbdev     #error tbbmalloc_internal.h must be included at this point
1951c0b2f7Stbbdev #endif
2051c0b2f7Stbbdev 
2151c0b2f7Stbbdev #ifndef __TBB_backend_H
2251c0b2f7Stbbdev #define __TBB_backend_H
2351c0b2f7Stbbdev 
2451c0b2f7Stbbdev // Included from namespace rml::internal
2551c0b2f7Stbbdev 
2651c0b2f7Stbbdev // global state of blocks currently in processing
2751c0b2f7Stbbdev class BackendSync {
2851c0b2f7Stbbdev     // Class instances should reside in zero-initialized memory!
2951c0b2f7Stbbdev     // The number of blocks currently removed from a bin and not returned back
3051c0b2f7Stbbdev     std::atomic<intptr_t> inFlyBlocks;        // to another
3151c0b2f7Stbbdev     std::atomic<intptr_t> binsModifications;  // incremented on every bin modification
3251c0b2f7Stbbdev     Backend *backend;
3351c0b2f7Stbbdev public:
init(Backend * b)3451c0b2f7Stbbdev     void init(Backend *b) { backend = b; }
blockConsumed()3551c0b2f7Stbbdev     void blockConsumed() { inFlyBlocks++; }
binsModified()3651c0b2f7Stbbdev     void binsModified() { binsModifications++; }
blockReleased()3751c0b2f7Stbbdev     void blockReleased() {
3851c0b2f7Stbbdev #if __TBB_MALLOC_BACKEND_STAT
3951c0b2f7Stbbdev         MALLOC_ITT_SYNC_RELEASING(&inFlyBlocks);
4051c0b2f7Stbbdev #endif
4151c0b2f7Stbbdev         binsModifications++;
4251c0b2f7Stbbdev         intptr_t prev = inFlyBlocks.fetch_sub(1);
4351c0b2f7Stbbdev         MALLOC_ASSERT(prev > 0, ASSERT_TEXT);
4451c0b2f7Stbbdev         suppress_unused_warning(prev);
4551c0b2f7Stbbdev     }
getNumOfMods()4651c0b2f7Stbbdev     intptr_t getNumOfMods() const { return binsModifications.load(std::memory_order_acquire); }
4751c0b2f7Stbbdev     // return true if need re-do the blocks search
4851c0b2f7Stbbdev     inline bool waitTillBlockReleased(intptr_t startModifiedCnt);
4951c0b2f7Stbbdev };
5051c0b2f7Stbbdev 
5151c0b2f7Stbbdev class CoalRequestQ { // queue of free blocks that coalescing was delayed
5251c0b2f7Stbbdev private:
5351c0b2f7Stbbdev     std::atomic<FreeBlock*> blocksToFree;
5451c0b2f7Stbbdev     BackendSync *bkndSync;
5551c0b2f7Stbbdev     // counted blocks in blocksToFree and that are leaved blocksToFree
5651c0b2f7Stbbdev     // and still in active coalescing
5751c0b2f7Stbbdev     std::atomic<intptr_t> inFlyBlocks;
5851c0b2f7Stbbdev public:
init(BackendSync * bSync)5951c0b2f7Stbbdev     void init(BackendSync *bSync) { bkndSync = bSync; }
6051c0b2f7Stbbdev     FreeBlock *getAll(); // return current list of blocks and make queue empty
6151c0b2f7Stbbdev     void putBlock(FreeBlock *fBlock);
6251c0b2f7Stbbdev     inline void blockWasProcessed();
blocksInFly()6351c0b2f7Stbbdev     intptr_t blocksInFly() const { return inFlyBlocks.load(std::memory_order_acquire); }
6451c0b2f7Stbbdev };
6551c0b2f7Stbbdev 
6651c0b2f7Stbbdev class MemExtendingSema {
6751c0b2f7Stbbdev     std::atomic<intptr_t>    active;
6851c0b2f7Stbbdev public:
wait()6951c0b2f7Stbbdev     bool wait() {
7051c0b2f7Stbbdev         bool rescanBins = false;
7151c0b2f7Stbbdev         // up to 3 threads can add more memory from OS simultaneously,
7251c0b2f7Stbbdev         // rest of threads have to wait
7351c0b2f7Stbbdev         intptr_t prevCnt = active.load(std::memory_order_acquire);
7451c0b2f7Stbbdev         for (;;) {
7551c0b2f7Stbbdev             if (prevCnt < 3) {
7651c0b2f7Stbbdev                 if (active.compare_exchange_strong(prevCnt, prevCnt + 1)) {
7751c0b2f7Stbbdev                     break;
7851c0b2f7Stbbdev                 }
7951c0b2f7Stbbdev             } else {
8051c0b2f7Stbbdev                 SpinWaitWhileEq(active, prevCnt);
8151c0b2f7Stbbdev                 rescanBins = true;
8251c0b2f7Stbbdev                 break;
8351c0b2f7Stbbdev             }
8451c0b2f7Stbbdev         }
8551c0b2f7Stbbdev         return rescanBins;
8651c0b2f7Stbbdev     }
signal()8751c0b2f7Stbbdev     void signal() { active.fetch_sub(1); }
8851c0b2f7Stbbdev };
8951c0b2f7Stbbdev 
9051c0b2f7Stbbdev enum MemRegionType {
9151c0b2f7Stbbdev     // The region holds only slabs
9251c0b2f7Stbbdev     MEMREG_SLAB_BLOCKS = 0,
9351c0b2f7Stbbdev     // The region can hold several large object blocks
9451c0b2f7Stbbdev     MEMREG_LARGE_BLOCKS,
9551c0b2f7Stbbdev     // The region holds only one block with a requested size
9651c0b2f7Stbbdev     MEMREG_ONE_BLOCK
9751c0b2f7Stbbdev };
9851c0b2f7Stbbdev 
9951c0b2f7Stbbdev class MemRegionList {
10051c0b2f7Stbbdev     MallocMutex regionListLock;
10151c0b2f7Stbbdev public:
10251c0b2f7Stbbdev     MemRegion  *head;
10351c0b2f7Stbbdev     void add(MemRegion *r);
10451c0b2f7Stbbdev     void remove(MemRegion *r);
10551c0b2f7Stbbdev     int reportStat(FILE *f);
10651c0b2f7Stbbdev };
10751c0b2f7Stbbdev 
10851c0b2f7Stbbdev class Backend {
10951c0b2f7Stbbdev private:
11051c0b2f7Stbbdev /* Blocks in range [minBinnedSize; getMaxBinnedSize()] are kept in bins,
11151c0b2f7Stbbdev    one region can contains several blocks. Larger blocks are allocated directly
11251c0b2f7Stbbdev    and one region always contains one block.
11351c0b2f7Stbbdev */
11451c0b2f7Stbbdev     enum {
11551c0b2f7Stbbdev         minBinnedSize = 8*1024UL,
11651c0b2f7Stbbdev         /*   If huge pages are available, maxBinned_HugePage used.
11751c0b2f7Stbbdev              If not, maxBinned_SmallPage is the threshold.
11851c0b2f7Stbbdev              TODO: use pool's granularity for upper bound setting.*/
11951c0b2f7Stbbdev         maxBinned_SmallPage = 1024*1024UL,
12051c0b2f7Stbbdev         // TODO: support other page sizes
12151c0b2f7Stbbdev         maxBinned_HugePage = 4*1024*1024UL
12251c0b2f7Stbbdev     };
12351c0b2f7Stbbdev     enum {
12451c0b2f7Stbbdev         VALID_BLOCK_IN_BIN = 1 // valid block added to bin, not returned as result
12551c0b2f7Stbbdev     };
12651c0b2f7Stbbdev public:
12751c0b2f7Stbbdev     // Backend bins step is the same as CacheStep for large object cache
12851c0b2f7Stbbdev     static const size_t   freeBinsStep = LargeObjectCache::LargeBSProps::CacheStep;
12951c0b2f7Stbbdev     static const unsigned freeBinsNum = (maxBinned_HugePage-minBinnedSize)/freeBinsStep + 1;
13051c0b2f7Stbbdev 
13151c0b2f7Stbbdev     // if previous access missed per-thread slabs pool,
13251c0b2f7Stbbdev     // allocate numOfSlabAllocOnMiss blocks in advance
13351c0b2f7Stbbdev     static const int numOfSlabAllocOnMiss = 2;
13451c0b2f7Stbbdev 
13551c0b2f7Stbbdev     enum {
13651c0b2f7Stbbdev         NO_BIN = -1,
13751c0b2f7Stbbdev         // special bin for blocks >= maxBinned_HugePage, blocks go to this bin
13851c0b2f7Stbbdev         // when pool is created with keepAllMemory policy
13951c0b2f7Stbbdev         // TODO: currently this bin is scanned using "1st fit", as it accumulates
14051c0b2f7Stbbdev         // blocks of different sizes, "best fit" is preferred in terms of fragmentation
14151c0b2f7Stbbdev         HUGE_BIN = freeBinsNum-1
14251c0b2f7Stbbdev     };
14351c0b2f7Stbbdev 
14451c0b2f7Stbbdev     // Bin keeps 2-linked list of free blocks. It must be 2-linked
14551c0b2f7Stbbdev     // because during coalescing a block it's removed from a middle of the list.
14651c0b2f7Stbbdev     struct Bin {
147478de5b1Stbbdev         std::atomic<FreeBlock*> head;
148478de5b1Stbbdev         FreeBlock*              tail;
14951c0b2f7Stbbdev         MallocMutex             tLock;
15051c0b2f7Stbbdev 
15151c0b2f7Stbbdev         void removeBlock(FreeBlock *fBlock);
resetBin152478de5b1Stbbdev         void reset() {
153478de5b1Stbbdev             head.store(nullptr, std::memory_order_relaxed);
154478de5b1Stbbdev             tail = nullptr;
155478de5b1Stbbdev         }
emptyBin156478de5b1Stbbdev         bool empty() const { return !head.load(std::memory_order_relaxed); }
15751c0b2f7Stbbdev 
15851c0b2f7Stbbdev         size_t countFreeBlocks();
15951c0b2f7Stbbdev         size_t reportFreeBlocks(FILE *f);
16051c0b2f7Stbbdev         void reportStat(FILE *f);
16151c0b2f7Stbbdev     };
16251c0b2f7Stbbdev 
16351c0b2f7Stbbdev     typedef BitMaskMin<Backend::freeBinsNum> BitMaskBins;
16451c0b2f7Stbbdev 
16551c0b2f7Stbbdev     // array of bins supplemented with bitmask for fast finding of non-empty bins
16651c0b2f7Stbbdev     class IndexedBins {
16751c0b2f7Stbbdev         BitMaskBins bitMask;
16851c0b2f7Stbbdev         Bin         freeBins[Backend::freeBinsNum];
16951c0b2f7Stbbdev         FreeBlock *getFromBin(int binIdx, BackendSync *sync, size_t size,
17051c0b2f7Stbbdev                 bool needAlignedBlock, bool alignedBin, bool wait, int *resLocked);
17151c0b2f7Stbbdev     public:
17251c0b2f7Stbbdev         FreeBlock *findBlock(int nativeBin, BackendSync *sync, size_t size,
17351c0b2f7Stbbdev                 bool needAlignedBlock, bool alignedBin,int *numOfLockedBins);
17451c0b2f7Stbbdev         bool tryReleaseRegions(int binIdx, Backend *backend);
17551c0b2f7Stbbdev         void lockRemoveBlock(int binIdx, FreeBlock *fBlock);
17651c0b2f7Stbbdev         void addBlock(int binIdx, FreeBlock *fBlock, size_t blockSz, bool addToTail);
17751c0b2f7Stbbdev         bool tryAddBlock(int binIdx, FreeBlock *fBlock, bool addToTail);
17851c0b2f7Stbbdev         int  getMinNonemptyBin(unsigned startBin) const;
17951c0b2f7Stbbdev         void verify();
18051c0b2f7Stbbdev         void reset();
18151c0b2f7Stbbdev         void reportStat(FILE *f);
18251c0b2f7Stbbdev     };
18351c0b2f7Stbbdev 
18451c0b2f7Stbbdev private:
18551c0b2f7Stbbdev     class AdvRegionsBins {
18651c0b2f7Stbbdev         BitMaskBins bins;
18751c0b2f7Stbbdev     public:
registerBin(int regBin)18851c0b2f7Stbbdev         void registerBin(int regBin) { bins.set(regBin, 1); }
getMinUsedBin(int start)18951c0b2f7Stbbdev         int getMinUsedBin(int start) const { return bins.getMinTrue(start); }
reset()19051c0b2f7Stbbdev         void reset() { bins.reset(); }
19151c0b2f7Stbbdev     };
19251c0b2f7Stbbdev     // auxiliary class to atomic maximum request finding
19351c0b2f7Stbbdev     class MaxRequestComparator {
19451c0b2f7Stbbdev         const Backend *backend;
19551c0b2f7Stbbdev     public:
MaxRequestComparator(const Backend * be)19651c0b2f7Stbbdev         MaxRequestComparator(const Backend *be) : backend(be) {}
19751c0b2f7Stbbdev         inline bool operator()(size_t oldMaxReq, size_t requestSize) const;
19851c0b2f7Stbbdev     };
19951c0b2f7Stbbdev 
20051c0b2f7Stbbdev #if CHECK_ALLOCATION_RANGE
20151c0b2f7Stbbdev     // Keep min and max of all addresses requested from OS,
20251c0b2f7Stbbdev     // use it for checking memory possibly allocated by replaced allocators
20351c0b2f7Stbbdev     // and for debugging purposes. Valid only for default memory pool.
20451c0b2f7Stbbdev     class UsedAddressRange {
20551c0b2f7Stbbdev         static const uintptr_t ADDRESS_UPPER_BOUND = UINTPTR_MAX;
20651c0b2f7Stbbdev 
207478de5b1Stbbdev         std::atomic<uintptr_t> leftBound,
20851c0b2f7Stbbdev                                rightBound;
20951c0b2f7Stbbdev         MallocMutex mutex;
21051c0b2f7Stbbdev     public:
21151c0b2f7Stbbdev         // rightBound is zero-initialized
init()212478de5b1Stbbdev         void init() { leftBound.store(ADDRESS_UPPER_BOUND, std::memory_order_relaxed); }
21351c0b2f7Stbbdev         void registerAlloc(uintptr_t left, uintptr_t right);
21451c0b2f7Stbbdev         void registerFree(uintptr_t left, uintptr_t right);
21551c0b2f7Stbbdev         // as only left and right bounds are kept, we can return true
21651c0b2f7Stbbdev         // for pointer not allocated by us, if more than single region
21751c0b2f7Stbbdev         // was requested from OS
inRange(void * ptr)21851c0b2f7Stbbdev         bool inRange(void *ptr) const {
21951c0b2f7Stbbdev             const uintptr_t p = (uintptr_t)ptr;
220478de5b1Stbbdev             return leftBound.load(std::memory_order_relaxed)<=p &&
221478de5b1Stbbdev                    p<=rightBound.load(std::memory_order_relaxed);
22251c0b2f7Stbbdev         }
22351c0b2f7Stbbdev     };
22451c0b2f7Stbbdev #else
22551c0b2f7Stbbdev     class UsedAddressRange {
22651c0b2f7Stbbdev     public:
init()22751c0b2f7Stbbdev         void init() { }
registerAlloc(uintptr_t,uintptr_t)22851c0b2f7Stbbdev         void registerAlloc(uintptr_t, uintptr_t) {}
registerFree(uintptr_t,uintptr_t)22951c0b2f7Stbbdev         void registerFree(uintptr_t, uintptr_t) {}
inRange(void *)23051c0b2f7Stbbdev         bool inRange(void *) const { return true; }
23151c0b2f7Stbbdev     };
23251c0b2f7Stbbdev #endif
23351c0b2f7Stbbdev 
23451c0b2f7Stbbdev     ExtMemoryPool   *extMemPool;
23551c0b2f7Stbbdev     // used for release every region on pool destroying
23651c0b2f7Stbbdev     MemRegionList    regionList;
23751c0b2f7Stbbdev 
23851c0b2f7Stbbdev     CoalRequestQ     coalescQ; // queue of coalescing requests
23951c0b2f7Stbbdev     BackendSync      bkndSync;
24051c0b2f7Stbbdev     // semaphore protecting adding more more memory from OS
24151c0b2f7Stbbdev     MemExtendingSema memExtendingSema;
24251c0b2f7Stbbdev     //size_t           totalMemSize,
24351c0b2f7Stbbdev     //                 memSoftLimit;
24451c0b2f7Stbbdev     std::atomic<size_t> totalMemSize;
24551c0b2f7Stbbdev     std::atomic<size_t> memSoftLimit;
24651c0b2f7Stbbdev     UsedAddressRange usedAddrRange;
24751c0b2f7Stbbdev     // to keep 1st allocation large than requested, keep bootstrapping status
24851c0b2f7Stbbdev     enum {
24951c0b2f7Stbbdev         bootsrapMemNotDone = 0,
25051c0b2f7Stbbdev         bootsrapMemInitializing,
25151c0b2f7Stbbdev         bootsrapMemDone
25251c0b2f7Stbbdev     };
25351c0b2f7Stbbdev     std::atomic<intptr_t> bootsrapMemStatus;
25451c0b2f7Stbbdev     MallocMutex      bootsrapMemStatusMutex;
25551c0b2f7Stbbdev 
25651c0b2f7Stbbdev     // Using of maximal observed requested size allows decrease
25751c0b2f7Stbbdev     // memory consumption for small requests and decrease fragmentation
25851c0b2f7Stbbdev     // for workloads when small and large allocation requests are mixed.
25951c0b2f7Stbbdev     // TODO: decrease, not only increase it
26051c0b2f7Stbbdev     std::atomic<size_t> maxRequestedSize;
26151c0b2f7Stbbdev 
26251c0b2f7Stbbdev     // register bins related to advance regions
26351c0b2f7Stbbdev     AdvRegionsBins advRegBins;
26451c0b2f7Stbbdev     // Storage for split FreeBlocks
26551c0b2f7Stbbdev     IndexedBins freeLargeBlockBins,
26651c0b2f7Stbbdev                 freeSlabAlignedBins;
26751c0b2f7Stbbdev 
268*32d5ec1fSŁukasz Plewa     std::atomic<intptr_t> backendCleanCnt;
26951c0b2f7Stbbdev     // Our friends
27051c0b2f7Stbbdev     friend class BackendSync;
27151c0b2f7Stbbdev 
27251c0b2f7Stbbdev     /******************************** Backend methods ******************************/
27351c0b2f7Stbbdev 
27451c0b2f7Stbbdev     /*--------------------------- Coalescing functions ----------------------------*/
27551c0b2f7Stbbdev     void coalescAndPut(FreeBlock *fBlock, size_t blockSz, bool slabAligned);
27651c0b2f7Stbbdev     bool coalescAndPutList(FreeBlock *head, bool forceCoalescQDrop, bool reportBlocksProcessed);
27751c0b2f7Stbbdev 
27851c0b2f7Stbbdev     // Main coalescing operation
27951c0b2f7Stbbdev     FreeBlock *doCoalesc(FreeBlock *fBlock, MemRegion **memRegion);
28051c0b2f7Stbbdev 
28151c0b2f7Stbbdev     // Queue for conflicted blocks during coalescing
28251c0b2f7Stbbdev     bool scanCoalescQ(bool forceCoalescQDrop);
blocksInCoalescing()28351c0b2f7Stbbdev     intptr_t blocksInCoalescing() const { return coalescQ.blocksInFly(); }
28451c0b2f7Stbbdev 
28551c0b2f7Stbbdev     /*--------------------- FreeBlock backend accessors ---------------------------*/
28651c0b2f7Stbbdev     FreeBlock *genericGetBlock(int num, size_t size, bool slabAligned);
28751c0b2f7Stbbdev     void genericPutBlock(FreeBlock *fBlock, size_t blockSz, bool slabAligned);
28851c0b2f7Stbbdev 
28951c0b2f7Stbbdev     // Split the block and return remaining parts to backend if possible
29051c0b2f7Stbbdev     FreeBlock *splitBlock(FreeBlock *fBlock, int num, size_t size, bool isAligned, bool needAlignedBlock);
29151c0b2f7Stbbdev 
29251c0b2f7Stbbdev     void removeBlockFromBin(FreeBlock *fBlock);
29351c0b2f7Stbbdev 
29451c0b2f7Stbbdev     // TODO: combine with returnLargeObject
29551c0b2f7Stbbdev     void putLargeBlock(LargeMemoryBlock *lmb);
29651c0b2f7Stbbdev 
29751c0b2f7Stbbdev     /*------------------- Starting point for OS allocation ------------------------*/
29851c0b2f7Stbbdev     void requestBootstrapMem();
29951c0b2f7Stbbdev     FreeBlock *askMemFromOS(size_t totalReqSize, intptr_t startModifiedCnt,
30051c0b2f7Stbbdev                             int *lockedBinsThreshold, int numOfLockedBins,
30151c0b2f7Stbbdev                             bool *splittable, bool needSlabRegion);
30251c0b2f7Stbbdev 
30351c0b2f7Stbbdev     /*---------------------- Memory regions allocation ----------------------------*/
30451c0b2f7Stbbdev     FreeBlock *addNewRegion(size_t size, MemRegionType type, bool addToBin);
30551c0b2f7Stbbdev     void releaseRegion(MemRegion *region);
30651c0b2f7Stbbdev 
30751c0b2f7Stbbdev     // TODO: combine in one initMemoryRegion function
30851c0b2f7Stbbdev     FreeBlock *findBlockInRegion(MemRegion *region, size_t exactBlockSize);
30951c0b2f7Stbbdev     void startUseBlock(MemRegion *region, FreeBlock *fBlock, bool addToBin);
31051c0b2f7Stbbdev 
31151c0b2f7Stbbdev     /*------------------------- Raw memory accessors ------------------------------*/
31251c0b2f7Stbbdev     void *allocRawMem(size_t &size);
31351c0b2f7Stbbdev     bool freeRawMem(void *object, size_t size);
31451c0b2f7Stbbdev 
31551c0b2f7Stbbdev     /*------------------------------ Cleanup functions ----------------------------*/
31651c0b2f7Stbbdev     // Clean all memory from all caches (extMemPool hard cleanup)
31751c0b2f7Stbbdev     FreeBlock *releaseMemInCaches(intptr_t startModifiedCnt, int *lockedBinsThreshold, int numOfLockedBins);
31851c0b2f7Stbbdev     // Soft heap limit (regular cleanup, then maybe hard cleanup)
31951c0b2f7Stbbdev     void releaseCachesToLimit();
32051c0b2f7Stbbdev 
32151c0b2f7Stbbdev     /*---------------------------------- Utility ----------------------------------*/
32251c0b2f7Stbbdev     // TODO: move inside IndexedBins class
sizeToBin(size_t size)32351c0b2f7Stbbdev     static int sizeToBin(size_t size) {
32451c0b2f7Stbbdev         if (size >= maxBinned_HugePage)
32551c0b2f7Stbbdev             return HUGE_BIN;
32651c0b2f7Stbbdev         else if (size < minBinnedSize)
32751c0b2f7Stbbdev             return NO_BIN;
32851c0b2f7Stbbdev 
32951c0b2f7Stbbdev         int bin = (size - minBinnedSize)/freeBinsStep;
33051c0b2f7Stbbdev 
33151c0b2f7Stbbdev         MALLOC_ASSERT(bin < HUGE_BIN, "Invalid size.");
33251c0b2f7Stbbdev         return bin;
33351c0b2f7Stbbdev     }
toAlignedBin(FreeBlock * block,size_t size)33451c0b2f7Stbbdev     static bool toAlignedBin(FreeBlock *block, size_t size) {
33551c0b2f7Stbbdev         return isAligned((char*)block + size, slabSize) && size >= slabSize;
33651c0b2f7Stbbdev     }
33751c0b2f7Stbbdev 
33851c0b2f7Stbbdev public:
33951c0b2f7Stbbdev     /*--------------------- Init, reset, destroy, verify  -------------------------*/
34051c0b2f7Stbbdev     void init(ExtMemoryPool *extMemoryPool);
34151c0b2f7Stbbdev     bool destroy();
34251c0b2f7Stbbdev 
34351c0b2f7Stbbdev     void verify();
34451c0b2f7Stbbdev     void reset();
34551c0b2f7Stbbdev     bool clean(); // clean on caches cleanup
34651c0b2f7Stbbdev 
34751c0b2f7Stbbdev     /*------------------------- Slab block request --------------------------------*/
34851c0b2f7Stbbdev     BlockI *getSlabBlock(int num);
34951c0b2f7Stbbdev     void putSlabBlock(BlockI *block);
35051c0b2f7Stbbdev 
35151c0b2f7Stbbdev     /*-------------------------- Large object request -----------------------------*/
35251c0b2f7Stbbdev     LargeMemoryBlock *getLargeBlock(size_t size);
35351c0b2f7Stbbdev     // TODO: make consistent with getLargeBlock
35451c0b2f7Stbbdev     void returnLargeObject(LargeMemoryBlock *lmb);
35551c0b2f7Stbbdev 
35651c0b2f7Stbbdev     /*-------------------------- Backreference memory request ----------------------*/
35751c0b2f7Stbbdev     void *getBackRefSpace(size_t size, bool *rawMemUsed);
35851c0b2f7Stbbdev     void putBackRefSpace(void *b, size_t size, bool rawMemUsed);
35951c0b2f7Stbbdev 
36051c0b2f7Stbbdev     /*----------------------------- Remap object ----------------------------------*/
36151c0b2f7Stbbdev     void *remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment);
36251c0b2f7Stbbdev 
36351c0b2f7Stbbdev     /*---------------------------- Validation -------------------------------------*/
36451c0b2f7Stbbdev     bool inUserPool() const;
ptrCanBeValid(void * ptr)36551c0b2f7Stbbdev     bool ptrCanBeValid(void *ptr) const { return usedAddrRange.inRange(ptr); }
36651c0b2f7Stbbdev 
36751c0b2f7Stbbdev     /*-------------------------- Configuration API --------------------------------*/
36851c0b2f7Stbbdev     // Soft heap limit
setRecommendedMaxSize(size_t softLimit)36951c0b2f7Stbbdev     void setRecommendedMaxSize(size_t softLimit) {
37051c0b2f7Stbbdev         memSoftLimit = softLimit;
37151c0b2f7Stbbdev         releaseCachesToLimit();
37251c0b2f7Stbbdev     }
37351c0b2f7Stbbdev 
37451c0b2f7Stbbdev     /*------------------------------- Info ----------------------------------------*/
37551c0b2f7Stbbdev     size_t getMaxBinnedSize() const;
37651c0b2f7Stbbdev 
37751c0b2f7Stbbdev     /*-------------------------- Testing, statistics ------------------------------*/
37851c0b2f7Stbbdev #if __TBB_MALLOC_WHITEBOX_TEST
getTotalMemSize()37951c0b2f7Stbbdev     size_t getTotalMemSize() const { return totalMemSize.load(std::memory_order_relaxed); }
38051c0b2f7Stbbdev #endif
38151c0b2f7Stbbdev #if __TBB_MALLOC_BACKEND_STAT
38251c0b2f7Stbbdev     void reportStat(FILE *f);
38351c0b2f7Stbbdev private:
binToSize(int bin)38451c0b2f7Stbbdev     static size_t binToSize(int bin) {
38551c0b2f7Stbbdev         MALLOC_ASSERT(bin <= HUGE_BIN, "Invalid bin.");
38651c0b2f7Stbbdev 
38751c0b2f7Stbbdev         return bin*freeBinsStep + minBinnedSize;
38851c0b2f7Stbbdev     }
38951c0b2f7Stbbdev #endif
39051c0b2f7Stbbdev };
39151c0b2f7Stbbdev 
39251c0b2f7Stbbdev #endif // __TBB_backend_H
393