xref: /oneTBB/src/tbbmalloc/tbbmalloc_internal.h (revision c4a799df)
151c0b2f7Stbbdev /*
22110128eSsarathnandu     Copyright (c) 2005-2023 Intel Corporation
351c0b2f7Stbbdev 
451c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev     you may not use this file except in compliance with the License.
651c0b2f7Stbbdev     You may obtain a copy of the License at
751c0b2f7Stbbdev 
851c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev 
1051c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev     See the License for the specific language governing permissions and
1451c0b2f7Stbbdev     limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev 
1751c0b2f7Stbbdev #ifndef __TBB_tbbmalloc_internal_H
1851c0b2f7Stbbdev #define __TBB_tbbmalloc_internal_H
1951c0b2f7Stbbdev 
2051c0b2f7Stbbdev #include "TypeDefinitions.h" /* Also includes customization layer Customize.h */
2151c0b2f7Stbbdev 
2251c0b2f7Stbbdev #if USE_PTHREAD
2351c0b2f7Stbbdev     // Some pthreads documentation says that <pthreads.h> must be first header.
2451c0b2f7Stbbdev     #include <pthread.h>
2551c0b2f7Stbbdev     typedef pthread_key_t tls_key_t;
2651c0b2f7Stbbdev #elif USE_WINTHREAD
2751c0b2f7Stbbdev     #include <windows.h>
2851c0b2f7Stbbdev     typedef DWORD tls_key_t;
2951c0b2f7Stbbdev #else
3051c0b2f7Stbbdev     #error Must define USE_PTHREAD or USE_WINTHREAD
3151c0b2f7Stbbdev #endif
3251c0b2f7Stbbdev 
3351c0b2f7Stbbdev #include <atomic>
3451c0b2f7Stbbdev 
3551c0b2f7Stbbdev // TODO: *BSD also has it
3651c0b2f7Stbbdev #define BACKEND_HAS_MREMAP __linux__
3751c0b2f7Stbbdev #define CHECK_ALLOCATION_RANGE MALLOC_DEBUG || MALLOC_ZONE_OVERLOAD_ENABLED || MALLOC_UNIXLIKE_OVERLOAD_ENABLED
3851c0b2f7Stbbdev 
3949e08aacStbbdev #include "oneapi/tbb/detail/_config.h" // for __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN
4049e08aacStbbdev #include "oneapi/tbb/detail/_template_helpers.h"
4151c0b2f7Stbbdev #if __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN
4251c0b2f7Stbbdev   #define _EXCEPTION_PTR_H /* prevents exception_ptr.h inclusion */
4351c0b2f7Stbbdev   #define _GLIBCXX_NESTED_EXCEPTION_H /* prevents nested_exception.h inclusion */
4451c0b2f7Stbbdev #endif
4551c0b2f7Stbbdev 
4651c0b2f7Stbbdev #include <stdio.h>
4751c0b2f7Stbbdev #include <stdlib.h>
4851c0b2f7Stbbdev #include <limits.h> // for CHAR_BIT
4951c0b2f7Stbbdev #include <string.h> // for memset
5051c0b2f7Stbbdev #if MALLOC_CHECK_RECURSION
5151c0b2f7Stbbdev #include <new>        /* for placement new */
5251c0b2f7Stbbdev #endif
5349e08aacStbbdev #include "oneapi/tbb/scalable_allocator.h"
5451c0b2f7Stbbdev #include "tbbmalloc_internal_api.h"
5551c0b2f7Stbbdev 
5651c0b2f7Stbbdev /********* Various compile-time options        **************/
5751c0b2f7Stbbdev 
5851c0b2f7Stbbdev #if !__TBB_DEFINE_MIC && __TBB_MIC_NATIVE
5951c0b2f7Stbbdev  #error Intel(R) Many Integrated Core Compiler does not define __MIC__ anymore.
6051c0b2f7Stbbdev #endif
6151c0b2f7Stbbdev 
6251c0b2f7Stbbdev #define MALLOC_TRACE 0
6351c0b2f7Stbbdev 
6451c0b2f7Stbbdev #if MALLOC_TRACE
6551c0b2f7Stbbdev #define TRACEF(x) printf x
6651c0b2f7Stbbdev #else
6751c0b2f7Stbbdev #define TRACEF(x) ((void)0)
6851c0b2f7Stbbdev #endif /* MALLOC_TRACE */
6951c0b2f7Stbbdev 
7057f524caSIlya Isaev #define ASSERT_TEXT nullptr
7151c0b2f7Stbbdev 
7251c0b2f7Stbbdev #define COLLECT_STATISTICS ( MALLOC_DEBUG && MALLOCENV_COLLECT_STATISTICS )
7351c0b2f7Stbbdev #ifndef USE_INTERNAL_TID
7451c0b2f7Stbbdev #define USE_INTERNAL_TID COLLECT_STATISTICS || MALLOC_TRACE
7551c0b2f7Stbbdev #endif
7651c0b2f7Stbbdev 
7751c0b2f7Stbbdev #include "Statistics.h"
7851c0b2f7Stbbdev 
7951c0b2f7Stbbdev // call yield for whitebox testing, skip in real library
8051c0b2f7Stbbdev #ifndef WhiteboxTestingYield
8151c0b2f7Stbbdev #define WhiteboxTestingYield() ((void)0)
8251c0b2f7Stbbdev #endif
8351c0b2f7Stbbdev 
8451c0b2f7Stbbdev 
8551c0b2f7Stbbdev /********* End compile-time options        **************/
8651c0b2f7Stbbdev 
8751c0b2f7Stbbdev namespace rml {
8851c0b2f7Stbbdev 
8951c0b2f7Stbbdev namespace internal {
9051c0b2f7Stbbdev 
9151c0b2f7Stbbdev #if __TBB_MALLOC_LOCACHE_STAT
9251c0b2f7Stbbdev extern intptr_t mallocCalls, cacheHits;
9351c0b2f7Stbbdev extern intptr_t memAllocKB, memHitKB;
9451c0b2f7Stbbdev #endif
9551c0b2f7Stbbdev 
9651c0b2f7Stbbdev //! Utility template function to prevent "unused" warnings by various compilers.
9751c0b2f7Stbbdev template<typename T>
suppress_unused_warning(const T &)9851c0b2f7Stbbdev void suppress_unused_warning( const T& ) {}
9951c0b2f7Stbbdev 
10051c0b2f7Stbbdev /********** Various global default constants ********/
10151c0b2f7Stbbdev 
10251c0b2f7Stbbdev /*
10351c0b2f7Stbbdev  * Default huge page size
10451c0b2f7Stbbdev  */
10551c0b2f7Stbbdev static const size_t HUGE_PAGE_SIZE = 2 * 1024 * 1024;
10651c0b2f7Stbbdev 
107*c4a799dfSJhaShweta1 /********** End of global default constants *********/
10851c0b2f7Stbbdev 
10951c0b2f7Stbbdev /********** Various numeric parameters controlling allocations ********/
11051c0b2f7Stbbdev 
11151c0b2f7Stbbdev /*
11251c0b2f7Stbbdev  * slabSize - the size of a block for allocation of small objects,
11351c0b2f7Stbbdev  * it must be larger than maxSegregatedObjectSize.
11451c0b2f7Stbbdev  */
11551c0b2f7Stbbdev const uintptr_t slabSize = 16*1024;
11651c0b2f7Stbbdev 
11751c0b2f7Stbbdev /*
11851c0b2f7Stbbdev  * Large blocks cache cleanup frequency.
11951c0b2f7Stbbdev  * It should be power of 2 for the fast checking.
12051c0b2f7Stbbdev  */
12151c0b2f7Stbbdev const unsigned cacheCleanupFreq = 256;
12251c0b2f7Stbbdev 
12351c0b2f7Stbbdev /*
12451c0b2f7Stbbdev  * Alignment of large (>= minLargeObjectSize) objects.
12551c0b2f7Stbbdev  */
12651c0b2f7Stbbdev const size_t largeObjectAlignment = estimatedCacheLineSize;
12751c0b2f7Stbbdev 
12851c0b2f7Stbbdev /*
12951c0b2f7Stbbdev  * This number of bins in the TLS that leads to blocks that we can allocate in.
13051c0b2f7Stbbdev  */
13151c0b2f7Stbbdev const uint32_t numBlockBinLimit = 31;
13251c0b2f7Stbbdev 
13351c0b2f7Stbbdev /********** End of numeric parameters controlling allocations *********/
13451c0b2f7Stbbdev 
13551c0b2f7Stbbdev class BlockI;
13651c0b2f7Stbbdev class Block;
13751c0b2f7Stbbdev struct LargeMemoryBlock;
13851c0b2f7Stbbdev struct ExtMemoryPool;
13951c0b2f7Stbbdev struct MemRegion;
14051c0b2f7Stbbdev class FreeBlock;
14151c0b2f7Stbbdev class TLSData;
14251c0b2f7Stbbdev class Backend;
14351c0b2f7Stbbdev class MemoryPool;
14451c0b2f7Stbbdev struct CacheBinOperation;
14551c0b2f7Stbbdev extern const uint32_t minLargeObjectSize;
14651c0b2f7Stbbdev 
14751c0b2f7Stbbdev enum DecreaseOrIncrease {
14851c0b2f7Stbbdev     decrease, increase
14951c0b2f7Stbbdev };
15051c0b2f7Stbbdev 
15151c0b2f7Stbbdev class TLSKey {
15251c0b2f7Stbbdev     tls_key_t TLS_pointer_key;
15351c0b2f7Stbbdev public:
15451c0b2f7Stbbdev     bool init();
15551c0b2f7Stbbdev     bool destroy();
15651c0b2f7Stbbdev     TLSData* getThreadMallocTLS() const;
15751c0b2f7Stbbdev     void setThreadMallocTLS( TLSData * newvalue );
15851c0b2f7Stbbdev     TLSData* createTLS(MemoryPool *memPool, Backend *backend);
15951c0b2f7Stbbdev };
16051c0b2f7Stbbdev 
16151c0b2f7Stbbdev template<typename Arg, typename Compare>
AtomicUpdate(std::atomic<Arg> & location,Arg newVal,const Compare & cmp)16251c0b2f7Stbbdev inline void AtomicUpdate(std::atomic<Arg>& location, Arg newVal, const Compare &cmp)
16351c0b2f7Stbbdev {
16451c0b2f7Stbbdev     static_assert(sizeof(Arg) == sizeof(intptr_t), "Type of argument must match AtomicCompareExchange type.");
16551c0b2f7Stbbdev     Arg old = location.load(std::memory_order_acquire);
16651c0b2f7Stbbdev     for (; cmp(old, newVal); ) {
16751c0b2f7Stbbdev         if (location.compare_exchange_strong(old, newVal))
16851c0b2f7Stbbdev             break;
16951c0b2f7Stbbdev         // TODO: do we need backoff after unsuccessful CAS?
17051c0b2f7Stbbdev         //old = val;
17151c0b2f7Stbbdev     }
17251c0b2f7Stbbdev }
17351c0b2f7Stbbdev 
17451c0b2f7Stbbdev // TODO: make BitMaskBasic more general
17551c0b2f7Stbbdev // TODO: check that BitMaskBasic is not used for synchronization
17651c0b2f7Stbbdev // (currently, it fits BitMaskMin well, but not as suitable for BitMaskMax)
17751c0b2f7Stbbdev template<unsigned NUM>
17851c0b2f7Stbbdev class BitMaskBasic {
17951c0b2f7Stbbdev     static const unsigned SZ = (NUM-1)/(CHAR_BIT*sizeof(uintptr_t))+1;
18051c0b2f7Stbbdev     static const unsigned WORD_LEN = CHAR_BIT*sizeof(uintptr_t);
18151c0b2f7Stbbdev 
18251c0b2f7Stbbdev     std::atomic<uintptr_t> mask[SZ];
18351c0b2f7Stbbdev 
18451c0b2f7Stbbdev protected:
set(size_t idx,bool val)18551c0b2f7Stbbdev     void set(size_t idx, bool val) {
18651c0b2f7Stbbdev         MALLOC_ASSERT(idx<NUM, ASSERT_TEXT);
18751c0b2f7Stbbdev 
18851c0b2f7Stbbdev         size_t i = idx / WORD_LEN;
18951c0b2f7Stbbdev         int pos = WORD_LEN - idx % WORD_LEN - 1;
19051c0b2f7Stbbdev         if (val) {
19151c0b2f7Stbbdev             mask[i].fetch_or(1ULL << pos);
19251c0b2f7Stbbdev         } else {
19351c0b2f7Stbbdev             mask[i].fetch_and(~(1ULL << pos));
19451c0b2f7Stbbdev         }
19551c0b2f7Stbbdev     }
getMinTrue(unsigned startIdx)19651c0b2f7Stbbdev     int getMinTrue(unsigned startIdx) const {
19751c0b2f7Stbbdev         unsigned idx = startIdx / WORD_LEN;
19851c0b2f7Stbbdev         int pos;
19951c0b2f7Stbbdev 
20051c0b2f7Stbbdev         if (startIdx % WORD_LEN) {
20151c0b2f7Stbbdev             // only interested in part of a word, clear bits before startIdx
20251c0b2f7Stbbdev             pos = WORD_LEN - startIdx % WORD_LEN;
20351c0b2f7Stbbdev             uintptr_t actualMask = mask[idx].load(std::memory_order_relaxed) & (((uintptr_t)1<<pos) - 1);
20451c0b2f7Stbbdev             idx++;
20551c0b2f7Stbbdev             if (-1 != (pos = BitScanRev(actualMask)))
20651c0b2f7Stbbdev                 return idx*WORD_LEN - pos - 1;
20751c0b2f7Stbbdev         }
20851c0b2f7Stbbdev 
20951c0b2f7Stbbdev         while (idx<SZ)
21051c0b2f7Stbbdev             if (-1 != (pos = BitScanRev(mask[idx++].load(std::memory_order_relaxed))))
21151c0b2f7Stbbdev                 return idx*WORD_LEN - pos - 1;
21251c0b2f7Stbbdev         return -1;
21351c0b2f7Stbbdev     }
21451c0b2f7Stbbdev public:
reset()21551c0b2f7Stbbdev     void reset() { for (unsigned i=0; i<SZ; i++) mask[i].store(0, std::memory_order_relaxed); }
21651c0b2f7Stbbdev };
21751c0b2f7Stbbdev 
21851c0b2f7Stbbdev template<unsigned NUM>
21951c0b2f7Stbbdev class BitMaskMin : public BitMaskBasic<NUM> {
22051c0b2f7Stbbdev public:
set(size_t idx,bool val)22151c0b2f7Stbbdev     void set(size_t idx, bool val) { BitMaskBasic<NUM>::set(idx, val); }
getMinTrue(unsigned startIdx)22251c0b2f7Stbbdev     int getMinTrue(unsigned startIdx) const {
22351c0b2f7Stbbdev         return BitMaskBasic<NUM>::getMinTrue(startIdx);
22451c0b2f7Stbbdev     }
22551c0b2f7Stbbdev };
22651c0b2f7Stbbdev 
22751c0b2f7Stbbdev template<unsigned NUM>
22851c0b2f7Stbbdev class BitMaskMax : public BitMaskBasic<NUM> {
22951c0b2f7Stbbdev public:
set(size_t idx,bool val)23051c0b2f7Stbbdev     void set(size_t idx, bool val) {
23151c0b2f7Stbbdev         BitMaskBasic<NUM>::set(NUM - 1 - idx, val);
23251c0b2f7Stbbdev     }
getMaxTrue(unsigned startIdx)23351c0b2f7Stbbdev     int getMaxTrue(unsigned startIdx) const {
23451c0b2f7Stbbdev         int p = BitMaskBasic<NUM>::getMinTrue(NUM-startIdx-1);
23551c0b2f7Stbbdev         return -1==p? -1 : (int)NUM - 1 - p;
23651c0b2f7Stbbdev     }
23751c0b2f7Stbbdev };
23851c0b2f7Stbbdev 
23951c0b2f7Stbbdev 
24051c0b2f7Stbbdev // The part of thread-specific data that can be modified by other threads.
24151c0b2f7Stbbdev // Such modifications must be protected by AllLocalCaches::listLock.
24251c0b2f7Stbbdev struct TLSRemote {
24351c0b2f7Stbbdev     TLSRemote *next,
24451c0b2f7Stbbdev               *prev;
24551c0b2f7Stbbdev };
24651c0b2f7Stbbdev 
24751c0b2f7Stbbdev // The list of all thread-local data; supporting cleanup of thread caches
24851c0b2f7Stbbdev class AllLocalCaches {
24951c0b2f7Stbbdev     TLSRemote  *head;
25051c0b2f7Stbbdev     MallocMutex listLock; // protects operations in the list
25151c0b2f7Stbbdev public:
25251c0b2f7Stbbdev     void registerThread(TLSRemote *tls);
25351c0b2f7Stbbdev     void unregisterThread(TLSRemote *tls);
25451c0b2f7Stbbdev     bool cleanup(bool cleanOnlyUnused);
25551c0b2f7Stbbdev     void markUnused();
reset()25657f524caSIlya Isaev     void reset() { head = nullptr; }
25751c0b2f7Stbbdev };
25851c0b2f7Stbbdev 
25951c0b2f7Stbbdev class LifoList {
26051c0b2f7Stbbdev public:
26151c0b2f7Stbbdev     inline LifoList();
26251c0b2f7Stbbdev     inline void push(Block *block);
26351c0b2f7Stbbdev     inline Block *pop();
26451c0b2f7Stbbdev     inline Block *grab();
26551c0b2f7Stbbdev 
26651c0b2f7Stbbdev private:
267478de5b1Stbbdev     std::atomic<Block*> top;
26851c0b2f7Stbbdev     MallocMutex lock;
26951c0b2f7Stbbdev };
27051c0b2f7Stbbdev 
27151c0b2f7Stbbdev /*
27251c0b2f7Stbbdev  * When a block that is not completely free is returned for reuse by other threads
27351c0b2f7Stbbdev  * this is where the block goes.
27451c0b2f7Stbbdev  *
27551c0b2f7Stbbdev  * LifoList assumes zero initialization; so below its constructors are omitted,
27651c0b2f7Stbbdev  * to avoid linking with C++ libraries on Linux.
27751c0b2f7Stbbdev  */
27851c0b2f7Stbbdev 
27951c0b2f7Stbbdev class OrphanedBlocks {
28051c0b2f7Stbbdev     LifoList bins[numBlockBinLimit];
28151c0b2f7Stbbdev public:
28251c0b2f7Stbbdev     Block *get(TLSData *tls, unsigned int size);
28351c0b2f7Stbbdev     void put(intptr_t binTag, Block *block);
28451c0b2f7Stbbdev     void reset();
28551c0b2f7Stbbdev     bool cleanup(Backend* backend);
28651c0b2f7Stbbdev };
28751c0b2f7Stbbdev 
28851c0b2f7Stbbdev /* Large objects entities */
28951c0b2f7Stbbdev #include "large_objects.h"
29051c0b2f7Stbbdev 
2911ecde27fSIlya Mishin // select index size for BackRefMain based on word size: default is uint32_t,
29251c0b2f7Stbbdev // uint16_t for 32-bit platforms
29351c0b2f7Stbbdev template<bool>
2941ecde27fSIlya Mishin struct MainIndexSelect {
2951ecde27fSIlya Mishin     typedef uint32_t main_type;
29651c0b2f7Stbbdev };
29751c0b2f7Stbbdev 
29851c0b2f7Stbbdev template<>
2991ecde27fSIlya Mishin struct MainIndexSelect<false> {
3001ecde27fSIlya Mishin     typedef uint16_t main_type;
30151c0b2f7Stbbdev };
30251c0b2f7Stbbdev 
30351c0b2f7Stbbdev class BackRefIdx { // composite index to backreference array
30451c0b2f7Stbbdev public:
3051ecde27fSIlya Mishin     typedef MainIndexSelect<4 < sizeof(uintptr_t)>::main_type main_t;
30651c0b2f7Stbbdev private:
3071ecde27fSIlya Mishin     static const main_t invalid = ~main_t(0);
3081ecde27fSIlya Mishin     main_t main;      // index in BackRefMain
30951c0b2f7Stbbdev     uint16_t largeObj:1;  // is this object "large"?
31051c0b2f7Stbbdev     uint16_t offset  :15; // offset from beginning of BackRefBlock
31151c0b2f7Stbbdev public:
3121ecde27fSIlya Mishin     BackRefIdx() : main(invalid), largeObj(0), offset(0) {}
3131ecde27fSIlya Mishin     bool isInvalid() const { return main == invalid; }
31451c0b2f7Stbbdev     bool isLargeObject() const { return largeObj; }
3151ecde27fSIlya Mishin     main_t getMain() const { return main; }
31651c0b2f7Stbbdev     uint16_t getOffset() const { return offset; }
31751c0b2f7Stbbdev 
318478de5b1Stbbdev #if __TBB_USE_THREAD_SANITIZER
319478de5b1Stbbdev     friend
320478de5b1Stbbdev     __attribute__((no_sanitize("thread")))
321478de5b1Stbbdev      BackRefIdx dereference(const BackRefIdx* ptr) {
322478de5b1Stbbdev         BackRefIdx idx;
3231ecde27fSIlya Mishin         idx.main = ptr->main;
324478de5b1Stbbdev         idx.largeObj = ptr->largeObj;
325478de5b1Stbbdev         idx.offset = ptr->offset;
326478de5b1Stbbdev         return idx;
327478de5b1Stbbdev     }
328478de5b1Stbbdev #else
329478de5b1Stbbdev     friend
330478de5b1Stbbdev     BackRefIdx dereference(const BackRefIdx* ptr) {
331478de5b1Stbbdev         return *ptr;
332478de5b1Stbbdev     }
333478de5b1Stbbdev #endif
334478de5b1Stbbdev 
33551c0b2f7Stbbdev     // only newBackRef can modify BackRefIdx
33651c0b2f7Stbbdev     static BackRefIdx newBackRef(bool largeObj);
33751c0b2f7Stbbdev };
33851c0b2f7Stbbdev 
33951c0b2f7Stbbdev // Block header is used during block coalescing
34051c0b2f7Stbbdev // and must be preserved in used blocks.
34151c0b2f7Stbbdev class BlockI {
3422110128eSsarathnandu #if __clang__ && !__INTEL_COMPILER
343dbccbee9SIlya Mishin     #pragma clang diagnostic push
344dbccbee9SIlya Mishin     #pragma clang diagnostic ignored "-Wunused-private-field"
345dbccbee9SIlya Mishin #endif
34651c0b2f7Stbbdev     intptr_t     blockState[2];
3472110128eSsarathnandu #if __clang__ && !__INTEL_COMPILER
348dbccbee9SIlya Mishin     #pragma clang diagnostic pop // "-Wunused-private-field"
349dbccbee9SIlya Mishin #endif
35051c0b2f7Stbbdev };
35151c0b2f7Stbbdev 
35251c0b2f7Stbbdev struct LargeMemoryBlock : public BlockI {
35351c0b2f7Stbbdev     MemoryPool       *pool;          // owner pool
35451c0b2f7Stbbdev     LargeMemoryBlock *next,          // ptrs in list of cached blocks
35551c0b2f7Stbbdev                      *prev,
35651c0b2f7Stbbdev     // 2-linked list of pool's large objects
35751c0b2f7Stbbdev     // Used to destroy backrefs on pool destroy (backrefs are global)
35851c0b2f7Stbbdev     // and for object releasing during pool reset.
35951c0b2f7Stbbdev                      *gPrev,
36051c0b2f7Stbbdev                      *gNext;
36151c0b2f7Stbbdev     uintptr_t         age;           // age of block while in cache
36251c0b2f7Stbbdev     size_t            objectSize;    // the size requested by a client
36351c0b2f7Stbbdev     size_t            unalignedSize; // the size requested from backend
36451c0b2f7Stbbdev     BackRefIdx        backRefIdx;    // cached here, used copy is in LargeObjectHdr
36551c0b2f7Stbbdev };
36651c0b2f7Stbbdev 
36751c0b2f7Stbbdev // Classes and methods for backend.cpp
36851c0b2f7Stbbdev #include "backend.h"
36951c0b2f7Stbbdev 
37051c0b2f7Stbbdev // An TBB allocator mode that can be controlled by user
37151c0b2f7Stbbdev // via API/environment variable. Must be placed in zero-initialized memory.
37251c0b2f7Stbbdev // External synchronization assumed.
37351c0b2f7Stbbdev // TODO: TBB_VERSION support
37451c0b2f7Stbbdev class AllocControlledMode {
37551c0b2f7Stbbdev     intptr_t val;
37651c0b2f7Stbbdev     bool     setDone;
37751c0b2f7Stbbdev 
37851c0b2f7Stbbdev public:
37951c0b2f7Stbbdev     intptr_t get() const {
38051c0b2f7Stbbdev         MALLOC_ASSERT(setDone, ASSERT_TEXT);
38151c0b2f7Stbbdev         return val;
38251c0b2f7Stbbdev     }
38351c0b2f7Stbbdev 
38451c0b2f7Stbbdev     // Note: set() can be called before init()
38551c0b2f7Stbbdev     void set(intptr_t newVal) {
38651c0b2f7Stbbdev         val = newVal;
38751c0b2f7Stbbdev         setDone = true;
38851c0b2f7Stbbdev     }
38951c0b2f7Stbbdev 
39051c0b2f7Stbbdev     bool ready() const {
39151c0b2f7Stbbdev         return setDone;
39251c0b2f7Stbbdev     }
39351c0b2f7Stbbdev 
39451c0b2f7Stbbdev     // envName - environment variable to get controlled mode
39551c0b2f7Stbbdev     void initReadEnv(const char *envName, intptr_t defaultVal) {
39651c0b2f7Stbbdev         if (!setDone) {
39751c0b2f7Stbbdev             // unreferenced formal parameter warning
39851c0b2f7Stbbdev             tbb::detail::suppress_unused_warning(envName);
39951c0b2f7Stbbdev #if !__TBB_WIN8UI_SUPPORT
40051c0b2f7Stbbdev         // TODO: use strtol to get the actual value of the envirable
40151c0b2f7Stbbdev             const char *envVal = getenv(envName);
40251c0b2f7Stbbdev             if (envVal && !strcmp(envVal, "1"))
40351c0b2f7Stbbdev                 val = 1;
40451c0b2f7Stbbdev             else
40551c0b2f7Stbbdev #endif
40651c0b2f7Stbbdev                 val = defaultVal;
40751c0b2f7Stbbdev             setDone = true;
40851c0b2f7Stbbdev         }
40951c0b2f7Stbbdev     }
41051c0b2f7Stbbdev };
41151c0b2f7Stbbdev 
41251c0b2f7Stbbdev // Page type to be used inside MapMemory.
41351c0b2f7Stbbdev // Regular (4KB aligned), Huge and Transparent Huge Pages (2MB aligned).
41451c0b2f7Stbbdev enum PageType {
41551c0b2f7Stbbdev     REGULAR = 0,
41651c0b2f7Stbbdev     PREALLOCATED_HUGE_PAGE,
41751c0b2f7Stbbdev     TRANSPARENT_HUGE_PAGE
41851c0b2f7Stbbdev };
41951c0b2f7Stbbdev 
42051c0b2f7Stbbdev // init() and printStatus() is called only under global initialization lock.
42151c0b2f7Stbbdev // Race is possible between registerAllocation() and registerReleasing(),
42251c0b2f7Stbbdev // harm is that up to single huge page releasing is missed (because failure
42351c0b2f7Stbbdev // to get huge page is registered only 1st time), that is negligible.
42451c0b2f7Stbbdev // setMode is also can be called concurrently.
42551c0b2f7Stbbdev // Object must reside in zero-initialized memory
42651c0b2f7Stbbdev // TODO: can we check for huge page presence during every 10th mmap() call
42751c0b2f7Stbbdev // in case huge page is released by another process?
42851c0b2f7Stbbdev class HugePagesStatus {
42951c0b2f7Stbbdev private:
43051c0b2f7Stbbdev     AllocControlledMode requestedMode; // changed only by user
43151c0b2f7Stbbdev                                        // to keep enabled and requestedMode consistent
43251c0b2f7Stbbdev     MallocMutex setModeLock;
43351c0b2f7Stbbdev     size_t      pageSize;
43451c0b2f7Stbbdev     std::atomic<intptr_t> needActualStatusPrint;
43551c0b2f7Stbbdev 
43651c0b2f7Stbbdev     static void doPrintStatus(bool state, const char *stateName) {
43751c0b2f7Stbbdev         // Under macOS* fprintf/snprintf acquires an internal lock, so when
43851c0b2f7Stbbdev         // 1st allocation is done under the lock, we got a deadlock.
43951c0b2f7Stbbdev         // Do not use fprintf etc during initialization.
44051c0b2f7Stbbdev         fputs("TBBmalloc: huge pages\t", stderr);
44151c0b2f7Stbbdev         if (!state)
44251c0b2f7Stbbdev             fputs("not ", stderr);
44351c0b2f7Stbbdev         fputs(stateName, stderr);
44451c0b2f7Stbbdev         fputs("\n", stderr);
44551c0b2f7Stbbdev     }
44651c0b2f7Stbbdev 
44751c0b2f7Stbbdev     void parseSystemMemInfo() {
44851c0b2f7Stbbdev         bool hpAvailable  = false;
44951c0b2f7Stbbdev         bool thpAvailable = false;
450e82d2503SIlya Isaev         long long hugePageSize = -1;
45151c0b2f7Stbbdev 
452734f0bc0SPablo Romero #if __unix__
45351c0b2f7Stbbdev         // Check huge pages existence
454e82d2503SIlya Isaev         long long meminfoHugePagesTotal = 0;
45551c0b2f7Stbbdev 
45651c0b2f7Stbbdev         parseFileItem meminfoItems[] = {
45751c0b2f7Stbbdev             // Parse system huge page size
458e82d2503SIlya Isaev             { "Hugepagesize: %lld kB", hugePageSize },
45951c0b2f7Stbbdev             // Check if there are preallocated huge pages on the system
46051c0b2f7Stbbdev             // https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
461e82d2503SIlya Isaev             { "HugePages_Total: %lld", meminfoHugePagesTotal } };
46251c0b2f7Stbbdev 
46351c0b2f7Stbbdev         parseFile</*BUFF_SIZE=*/100>("/proc/meminfo", meminfoItems);
46451c0b2f7Stbbdev 
46551c0b2f7Stbbdev         // Double check another system information regarding preallocated
46651c0b2f7Stbbdev         // huge pages if there are no information in /proc/meminfo
467e82d2503SIlya Isaev         long long vmHugePagesTotal = 0;
46851c0b2f7Stbbdev 
469e82d2503SIlya Isaev         parseFileItem vmItem[] = { { "%lld", vmHugePagesTotal } };
47051c0b2f7Stbbdev 
47151c0b2f7Stbbdev         // We parse a counter number, it can't be huge
47251c0b2f7Stbbdev         parseFile</*BUFF_SIZE=*/100>("/proc/sys/vm/nr_hugepages", vmItem);
47351c0b2f7Stbbdev 
474e82d2503SIlya Isaev         if (hugePageSize > -1 && (meminfoHugePagesTotal > 0 || vmHugePagesTotal > 0)) {
47551c0b2f7Stbbdev             MALLOC_ASSERT(hugePageSize != 0, "Huge Page size can't be zero if we found preallocated.");
47651c0b2f7Stbbdev 
47751c0b2f7Stbbdev             // Any non zero value clearly states that there are preallocated
47851c0b2f7Stbbdev             // huge pages on the system
47951c0b2f7Stbbdev             hpAvailable = true;
48051c0b2f7Stbbdev         }
48151c0b2f7Stbbdev 
48251c0b2f7Stbbdev         // Check if there is transparent huge pages support on the system
483e82d2503SIlya Isaev         long long thpPresent = 'n';
48451c0b2f7Stbbdev         parseFileItem thpItem[] = { { "[alwa%cs] madvise never\n", thpPresent } };
48551c0b2f7Stbbdev         parseFile</*BUFF_SIZE=*/100>("/sys/kernel/mm/transparent_hugepage/enabled", thpItem);
48651c0b2f7Stbbdev 
487e82d2503SIlya Isaev         if (hugePageSize > -1 && thpPresent == 'y') {
48851c0b2f7Stbbdev             MALLOC_ASSERT(hugePageSize != 0, "Huge Page size can't be zero if we found thp existence.");
48951c0b2f7Stbbdev             thpAvailable = true;
49051c0b2f7Stbbdev         }
49151c0b2f7Stbbdev #endif
49251c0b2f7Stbbdev         MALLOC_ASSERT(!pageSize, "Huge page size can't be set twice. Double initialization.");
49351c0b2f7Stbbdev 
49451c0b2f7Stbbdev         // Initialize object variables
49551c0b2f7Stbbdev         pageSize       = hugePageSize * 1024; // was read in KB from meminfo
49651c0b2f7Stbbdev         isHPAvailable  = hpAvailable;
49751c0b2f7Stbbdev         isTHPAvailable = thpAvailable;
49851c0b2f7Stbbdev     }
49951c0b2f7Stbbdev 
50051c0b2f7Stbbdev public:
50151c0b2f7Stbbdev 
50251c0b2f7Stbbdev     // System information
50351c0b2f7Stbbdev     bool isHPAvailable;
50451c0b2f7Stbbdev     bool isTHPAvailable;
50551c0b2f7Stbbdev 
50651c0b2f7Stbbdev     // User defined value
50751c0b2f7Stbbdev     bool isEnabled;
50851c0b2f7Stbbdev 
50951c0b2f7Stbbdev     void init() {
51051c0b2f7Stbbdev         parseSystemMemInfo();
51151c0b2f7Stbbdev         MallocMutex::scoped_lock lock(setModeLock);
51251c0b2f7Stbbdev         requestedMode.initReadEnv("TBB_MALLOC_USE_HUGE_PAGES", 0);
51351c0b2f7Stbbdev         isEnabled = (isHPAvailable || isTHPAvailable) && requestedMode.get();
51451c0b2f7Stbbdev     }
51551c0b2f7Stbbdev 
51651c0b2f7Stbbdev     // Could be set from user code at any place.
51751c0b2f7Stbbdev     // If we didn't call init() at this place, isEnabled will be false
51851c0b2f7Stbbdev     void setMode(intptr_t newVal) {
51951c0b2f7Stbbdev         MallocMutex::scoped_lock lock(setModeLock);
52051c0b2f7Stbbdev         requestedMode.set(newVal);
52151c0b2f7Stbbdev         isEnabled = (isHPAvailable || isTHPAvailable) && newVal;
52251c0b2f7Stbbdev     }
52351c0b2f7Stbbdev 
52451c0b2f7Stbbdev     void reset() {
52551c0b2f7Stbbdev         needActualStatusPrint.store(0, std::memory_order_relaxed);
52651c0b2f7Stbbdev         pageSize = 0;
52751c0b2f7Stbbdev         isEnabled = isHPAvailable = isTHPAvailable = false;
52851c0b2f7Stbbdev     }
52951c0b2f7Stbbdev 
53051c0b2f7Stbbdev     // If memory mapping size is a multiple of huge page size, some OS kernels
53151c0b2f7Stbbdev     // can use huge pages transparently. Use this when huge pages are requested.
53251c0b2f7Stbbdev     size_t getGranularity() const {
53351c0b2f7Stbbdev         if (requestedMode.ready())
53451c0b2f7Stbbdev             return requestedMode.get() ? pageSize : 0;
53551c0b2f7Stbbdev         else
53651c0b2f7Stbbdev             return HUGE_PAGE_SIZE; // the mode is not yet known; assume typical 2MB huge pages
53751c0b2f7Stbbdev     }
53851c0b2f7Stbbdev 
53951c0b2f7Stbbdev     void printStatus() {
54051c0b2f7Stbbdev         doPrintStatus(requestedMode.get(), "requested");
54151c0b2f7Stbbdev         if (requestedMode.get()) { // report actual status iff requested
54251c0b2f7Stbbdev             if (pageSize)
54351c0b2f7Stbbdev                 needActualStatusPrint.store(1, std::memory_order_release);
54451c0b2f7Stbbdev             else
54551c0b2f7Stbbdev                 doPrintStatus(/*state=*/false, "available");
54651c0b2f7Stbbdev         }
54751c0b2f7Stbbdev     }
54851c0b2f7Stbbdev };
54951c0b2f7Stbbdev 
55051c0b2f7Stbbdev class AllLargeBlocksList {
55151c0b2f7Stbbdev     MallocMutex       largeObjLock;
55251c0b2f7Stbbdev     LargeMemoryBlock *loHead;
55351c0b2f7Stbbdev public:
55451c0b2f7Stbbdev     void add(LargeMemoryBlock *lmb);
55551c0b2f7Stbbdev     void remove(LargeMemoryBlock *lmb);
55651c0b2f7Stbbdev     template<bool poolDestroy> void releaseAll(Backend *backend);
55751c0b2f7Stbbdev };
55851c0b2f7Stbbdev 
55951c0b2f7Stbbdev struct ExtMemoryPool {
56051c0b2f7Stbbdev     Backend           backend;
56151c0b2f7Stbbdev     LargeObjectCache  loc;
56251c0b2f7Stbbdev     AllLocalCaches    allLocalCaches;
56351c0b2f7Stbbdev     OrphanedBlocks    orphanedBlocks;
56451c0b2f7Stbbdev 
56551c0b2f7Stbbdev     intptr_t          poolId;
56651c0b2f7Stbbdev     // To find all large objects. Used during user pool destruction,
56751c0b2f7Stbbdev     // to release all backreferences in large blocks (slab blocks do not have them).
56851c0b2f7Stbbdev     AllLargeBlocksList lmbList;
56951c0b2f7Stbbdev     // Callbacks to be used instead of MapMemory/UnmapMemory.
57051c0b2f7Stbbdev     rawAllocType      rawAlloc;
57151c0b2f7Stbbdev     rawFreeType       rawFree;
57251c0b2f7Stbbdev     size_t            granularity;
57351c0b2f7Stbbdev     bool              keepAllMemory,
57451c0b2f7Stbbdev                       delayRegsReleasing,
57551c0b2f7Stbbdev     // TODO: implements fixedPool with calling rawFree on destruction
57651c0b2f7Stbbdev                       fixedPool;
57751c0b2f7Stbbdev     TLSKey            tlsPointerKey;  // per-pool TLS key
57851c0b2f7Stbbdev 
57932d5ec1fSŁukasz Plewa     std::atomic<int> softCachesCleanupInProgress;
58032d5ec1fSŁukasz Plewa     std::atomic<int> hardCachesCleanupInProgress;
58132d5ec1fSŁukasz Plewa 
58251c0b2f7Stbbdev     bool init(intptr_t poolId, rawAllocType rawAlloc, rawFreeType rawFree,
58351c0b2f7Stbbdev               size_t granularity, bool keepAllMemory, bool fixedPool);
58451c0b2f7Stbbdev     bool initTLS();
58551c0b2f7Stbbdev 
58651c0b2f7Stbbdev     // i.e., not system default pool for scalable_malloc/scalable_free
58751c0b2f7Stbbdev     bool userPool() const { return rawAlloc; }
58851c0b2f7Stbbdev 
58951c0b2f7Stbbdev      // true if something has been released
59051c0b2f7Stbbdev     bool softCachesCleanup();
59151c0b2f7Stbbdev     bool releaseAllLocalCaches();
59232d5ec1fSŁukasz Plewa     bool hardCachesCleanup(bool wait);
59351c0b2f7Stbbdev     void *remap(void *ptr, size_t oldSize, size_t newSize, size_t alignment);
59451c0b2f7Stbbdev     bool reset() {
59551c0b2f7Stbbdev         loc.reset();
59651c0b2f7Stbbdev         allLocalCaches.reset();
59751c0b2f7Stbbdev         orphanedBlocks.reset();
59851c0b2f7Stbbdev         bool ret = tlsPointerKey.destroy();
59951c0b2f7Stbbdev         backend.reset();
60051c0b2f7Stbbdev         return ret;
60151c0b2f7Stbbdev     }
60251c0b2f7Stbbdev     bool destroy() {
60351c0b2f7Stbbdev         MALLOC_ASSERT(isPoolValid(),
60451c0b2f7Stbbdev                       "Possible double pool_destroy or heap corruption");
60551c0b2f7Stbbdev         if (!userPool()) {
60651c0b2f7Stbbdev             loc.reset();
60751c0b2f7Stbbdev             allLocalCaches.reset();
60851c0b2f7Stbbdev         }
60951c0b2f7Stbbdev         // pthread_key_dtors must be disabled before memory unmapping
61051c0b2f7Stbbdev         // TODO: race-free solution
61151c0b2f7Stbbdev         bool ret = tlsPointerKey.destroy();
61251c0b2f7Stbbdev         if (rawFree || !userPool())
61351c0b2f7Stbbdev             ret &= backend.destroy();
61451c0b2f7Stbbdev         // pool is not valid after this point
61551c0b2f7Stbbdev         granularity = 0;
61651c0b2f7Stbbdev         return ret;
61751c0b2f7Stbbdev     }
61851c0b2f7Stbbdev     void delayRegionsReleasing(bool mode) { delayRegsReleasing = mode; }
61951c0b2f7Stbbdev     inline bool regionsAreReleaseable() const;
62051c0b2f7Stbbdev 
62151c0b2f7Stbbdev     LargeMemoryBlock *mallocLargeObject(MemoryPool *pool, size_t allocationSize);
62251c0b2f7Stbbdev     void freeLargeObject(LargeMemoryBlock *lmb);
62351c0b2f7Stbbdev     void freeLargeObjectList(LargeMemoryBlock *head);
624478de5b1Stbbdev #if MALLOC_DEBUG
62551c0b2f7Stbbdev     // use granulatity as marker for pool validity
62651c0b2f7Stbbdev     bool isPoolValid() const { return granularity; }
627478de5b1Stbbdev #endif
62851c0b2f7Stbbdev };
62951c0b2f7Stbbdev 
63051c0b2f7Stbbdev inline bool Backend::inUserPool() const { return extMemPool->userPool(); }
63151c0b2f7Stbbdev 
63251c0b2f7Stbbdev struct LargeObjectHdr {
63351c0b2f7Stbbdev     LargeMemoryBlock *memoryBlock;
63451c0b2f7Stbbdev     /* Backreference points to LargeObjectHdr.
63551c0b2f7Stbbdev        Duplicated in LargeMemoryBlock to reuse in subsequent allocations. */
63651c0b2f7Stbbdev     BackRefIdx       backRefIdx;
63751c0b2f7Stbbdev };
63851c0b2f7Stbbdev 
63951c0b2f7Stbbdev struct FreeObject {
64051c0b2f7Stbbdev     FreeObject  *next;
64151c0b2f7Stbbdev };
64251c0b2f7Stbbdev 
64351c0b2f7Stbbdev 
64451c0b2f7Stbbdev /******* A helper class to support overriding malloc with scalable_malloc *******/
64551c0b2f7Stbbdev #if MALLOC_CHECK_RECURSION
64651c0b2f7Stbbdev 
64751c0b2f7Stbbdev class RecursiveMallocCallProtector {
64851c0b2f7Stbbdev     // pointer to an automatic data of holding thread
649478de5b1Stbbdev     static std::atomic<void*> autoObjPtr;
65051c0b2f7Stbbdev     static MallocMutex rmc_mutex;
6518b6f831cStbbdev     static std::atomic<pthread_t> owner_thread;
65251c0b2f7Stbbdev /* Under FreeBSD 8.0 1st call to any pthread function including pthread_self
65351c0b2f7Stbbdev    leads to pthread initialization, that causes malloc calls. As 1st usage of
65451c0b2f7Stbbdev    RecursiveMallocCallProtector can be before pthread initialized, pthread calls
65551c0b2f7Stbbdev    can't be used in 1st instance of RecursiveMallocCallProtector.
65651c0b2f7Stbbdev    RecursiveMallocCallProtector is used 1st time in checkInitialization(),
65751c0b2f7Stbbdev    so there is a guarantee that on 2nd usage pthread is initialized.
65851c0b2f7Stbbdev    No such situation observed with other supported OSes.
65951c0b2f7Stbbdev  */
66051c0b2f7Stbbdev #if __FreeBSD__
66151c0b2f7Stbbdev     static bool        canUsePthread;
66251c0b2f7Stbbdev #else
66351c0b2f7Stbbdev     static const bool  canUsePthread = true;
66451c0b2f7Stbbdev #endif
66551c0b2f7Stbbdev /*
66651c0b2f7Stbbdev   The variable modified in checkInitialization,
66751c0b2f7Stbbdev   so can be read without memory barriers.
66851c0b2f7Stbbdev  */
66951c0b2f7Stbbdev     static bool mallocRecursionDetected;
67051c0b2f7Stbbdev 
67151c0b2f7Stbbdev     MallocMutex::scoped_lock* lock_acquired;
67251c0b2f7Stbbdev     char scoped_lock_space[sizeof(MallocMutex::scoped_lock)+1];
67351c0b2f7Stbbdev 
67451c0b2f7Stbbdev public:
67557f524caSIlya Isaev     RecursiveMallocCallProtector() : lock_acquired(nullptr) {
67651c0b2f7Stbbdev         lock_acquired = new (scoped_lock_space) MallocMutex::scoped_lock( rmc_mutex );
67751c0b2f7Stbbdev         if (canUsePthread)
6788b6f831cStbbdev             owner_thread.store(pthread_self(), std::memory_order_relaxed);
679478de5b1Stbbdev         autoObjPtr.store(&scoped_lock_space, std::memory_order_relaxed);
68051c0b2f7Stbbdev     }
6813b378356SKrzysztof Filipek 
6823b378356SKrzysztof Filipek     RecursiveMallocCallProtector(RecursiveMallocCallProtector&) = delete;
6833b378356SKrzysztof Filipek     RecursiveMallocCallProtector& operator=(RecursiveMallocCallProtector) = delete;
6843b378356SKrzysztof Filipek 
68551c0b2f7Stbbdev     ~RecursiveMallocCallProtector() {
68651c0b2f7Stbbdev         if (lock_acquired) {
687478de5b1Stbbdev             autoObjPtr.store(nullptr, std::memory_order_relaxed);
68851c0b2f7Stbbdev             lock_acquired->~scoped_lock();
68951c0b2f7Stbbdev         }
69051c0b2f7Stbbdev     }
69151c0b2f7Stbbdev     static bool sameThreadActive() {
692478de5b1Stbbdev         if (!autoObjPtr.load(std::memory_order_relaxed)) // fast path
69351c0b2f7Stbbdev             return false;
69451c0b2f7Stbbdev         // Some thread has an active recursive call protector; check if the current one.
69551c0b2f7Stbbdev         // Exact pthread_self based test
69651c0b2f7Stbbdev         if (canUsePthread) {
6978b6f831cStbbdev             if (pthread_equal( owner_thread.load(std::memory_order_relaxed), pthread_self() )) {
69851c0b2f7Stbbdev                 mallocRecursionDetected = true;
69951c0b2f7Stbbdev                 return true;
70051c0b2f7Stbbdev             } else
70151c0b2f7Stbbdev                 return false;
70251c0b2f7Stbbdev         }
70351c0b2f7Stbbdev         // inexact stack size based test
70451c0b2f7Stbbdev         const uintptr_t threadStackSz = 2*1024*1024;
70551c0b2f7Stbbdev         int dummy;
706112076d0SIlya Isaev 
707112076d0SIlya Isaev         uintptr_t xi = (uintptr_t)autoObjPtr.load(std::memory_order_relaxed), yi = (uintptr_t)&dummy;
708112076d0SIlya Isaev         uintptr_t diffPtr = xi > yi ? xi - yi : yi - xi;
709112076d0SIlya Isaev 
710112076d0SIlya Isaev         return diffPtr < threadStackSz;
71151c0b2f7Stbbdev     }
712478de5b1Stbbdev 
71351c0b2f7Stbbdev /* The function is called on 1st scalable_malloc call to check if malloc calls
71451c0b2f7Stbbdev    scalable_malloc (nested call must set mallocRecursionDetected). */
71551c0b2f7Stbbdev     static void detectNaiveOverload() {
71651c0b2f7Stbbdev         if (!malloc_proxy) {
71751c0b2f7Stbbdev #if __FreeBSD__
71851c0b2f7Stbbdev /* If !canUsePthread, we can't call pthread_self() before, but now pthread
71951c0b2f7Stbbdev    is already on, so can do it. */
72051c0b2f7Stbbdev             if (!canUsePthread) {
72151c0b2f7Stbbdev                 canUsePthread = true;
7228b6f831cStbbdev                 owner_thread.store(pthread_self(), std::memory_order_relaxed);
72351c0b2f7Stbbdev             }
72451c0b2f7Stbbdev #endif
72551c0b2f7Stbbdev             free(malloc(1));
72651c0b2f7Stbbdev         }
72751c0b2f7Stbbdev     }
72851c0b2f7Stbbdev };
72951c0b2f7Stbbdev 
73051c0b2f7Stbbdev #else
73151c0b2f7Stbbdev 
73251c0b2f7Stbbdev class RecursiveMallocCallProtector {
73351c0b2f7Stbbdev public:
73451c0b2f7Stbbdev     RecursiveMallocCallProtector() {}
73551c0b2f7Stbbdev     ~RecursiveMallocCallProtector() {}
73651c0b2f7Stbbdev };
73751c0b2f7Stbbdev 
73851c0b2f7Stbbdev #endif  /* MALLOC_CHECK_RECURSION */
73951c0b2f7Stbbdev 
74051c0b2f7Stbbdev unsigned int getThreadId();
74151c0b2f7Stbbdev 
7421ecde27fSIlya Mishin bool initBackRefMain(Backend *backend);
7431ecde27fSIlya Mishin void destroyBackRefMain(Backend *backend);
74451c0b2f7Stbbdev void removeBackRef(BackRefIdx backRefIdx);
74551c0b2f7Stbbdev void setBackRef(BackRefIdx backRefIdx, void *newPtr);
74651c0b2f7Stbbdev void *getBackRef(BackRefIdx backRefIdx);
74751c0b2f7Stbbdev 
74851c0b2f7Stbbdev } // namespace internal
74951c0b2f7Stbbdev } // namespace rml
75051c0b2f7Stbbdev 
75151c0b2f7Stbbdev #endif // __TBB_tbbmalloc_internal_H
752