151c0b2f7Stbbdev /*
2*c21e688aSSergey Zheltov Copyright (c) 2005-2022 Intel Corporation
351c0b2f7Stbbdev
451c0b2f7Stbbdev Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev you may not use this file except in compliance with the License.
651c0b2f7Stbbdev You may obtain a copy of the License at
751c0b2f7Stbbdev
851c0b2f7Stbbdev http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev
1051c0b2f7Stbbdev Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev See the License for the specific language governing permissions and
1451c0b2f7Stbbdev limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev
1751c0b2f7Stbbdev //! \file test_malloc_pools.cpp
1851c0b2f7Stbbdev //! \brief Test for [memory_allocation] functionality
1951c0b2f7Stbbdev
2051c0b2f7Stbbdev #define __TBB_NO_IMPLICIT_LINKAGE 1
2151c0b2f7Stbbdev
2251c0b2f7Stbbdev #include "common/test.h"
2351c0b2f7Stbbdev
2451c0b2f7Stbbdev #define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1
2551c0b2f7Stbbdev
2651c0b2f7Stbbdev #include "common/utils.h"
2751c0b2f7Stbbdev #include "common/utils_assert.h"
2851c0b2f7Stbbdev #include "common/spin_barrier.h"
2951c0b2f7Stbbdev #include "common/tls_limit.h"
3051c0b2f7Stbbdev
3151c0b2f7Stbbdev #include "tbb/scalable_allocator.h"
3251c0b2f7Stbbdev
3351c0b2f7Stbbdev #include <atomic>
3451c0b2f7Stbbdev
3551c0b2f7Stbbdev template<typename T>
alignUp(T arg,uintptr_t alignment)3651c0b2f7Stbbdev static inline T alignUp (T arg, uintptr_t alignment) {
3751c0b2f7Stbbdev return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1));
3851c0b2f7Stbbdev }
3951c0b2f7Stbbdev
4051c0b2f7Stbbdev struct PoolSpace: utils::NoCopy {
4151c0b2f7Stbbdev size_t pos;
4251c0b2f7Stbbdev int regions;
4351c0b2f7Stbbdev size_t bufSize;
4451c0b2f7Stbbdev char *space;
4551c0b2f7Stbbdev
4651c0b2f7Stbbdev static const size_t BUF_SIZE = 8*1024*1024;
4751c0b2f7Stbbdev
PoolSpacePoolSpace4851c0b2f7Stbbdev PoolSpace(size_t bufSz = BUF_SIZE) :
4951c0b2f7Stbbdev pos(0), regions(0),
5051c0b2f7Stbbdev bufSize(bufSz), space(new char[bufSize]) {
5151c0b2f7Stbbdev memset(space, 0, bufSize);
5251c0b2f7Stbbdev }
~PoolSpacePoolSpace5351c0b2f7Stbbdev ~PoolSpace() {
5451c0b2f7Stbbdev delete []space;
5551c0b2f7Stbbdev }
5651c0b2f7Stbbdev };
5751c0b2f7Stbbdev
5851c0b2f7Stbbdev static PoolSpace *poolSpace;
5951c0b2f7Stbbdev
6051c0b2f7Stbbdev struct MallocPoolHeader {
6151c0b2f7Stbbdev void *rawPtr;
6251c0b2f7Stbbdev size_t userSize;
6351c0b2f7Stbbdev };
6451c0b2f7Stbbdev
6551c0b2f7Stbbdev static std::atomic<int> liveRegions;
6651c0b2f7Stbbdev
getMallocMem(intptr_t,size_t & bytes)6751c0b2f7Stbbdev static void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes)
6851c0b2f7Stbbdev {
6951c0b2f7Stbbdev void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)+1);
7051c0b2f7Stbbdev if (!rawPtr)
7157f524caSIlya Isaev return nullptr;
7251c0b2f7Stbbdev // +1 to check working with unaligned space
7351c0b2f7Stbbdev void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)+1);
7451c0b2f7Stbbdev
7551c0b2f7Stbbdev MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1;
7651c0b2f7Stbbdev hdr->rawPtr = rawPtr;
7751c0b2f7Stbbdev hdr->userSize = bytes;
7851c0b2f7Stbbdev
7951c0b2f7Stbbdev liveRegions++;
8051c0b2f7Stbbdev
8151c0b2f7Stbbdev return ret;
8251c0b2f7Stbbdev }
8351c0b2f7Stbbdev
putMallocMem(intptr_t,void * ptr,size_t bytes)8451c0b2f7Stbbdev static int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
8551c0b2f7Stbbdev {
8651c0b2f7Stbbdev MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
8751c0b2f7Stbbdev ASSERT(bytes == hdr->userSize, "Invalid size in pool callback.");
8851c0b2f7Stbbdev free(hdr->rawPtr);
8951c0b2f7Stbbdev
9051c0b2f7Stbbdev liveRegions--;
9151c0b2f7Stbbdev
9251c0b2f7Stbbdev return 0;
9351c0b2f7Stbbdev }
9451c0b2f7Stbbdev
TestPoolReset()9551c0b2f7Stbbdev void TestPoolReset()
9651c0b2f7Stbbdev {
9751c0b2f7Stbbdev rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
9851c0b2f7Stbbdev rml::MemoryPool *pool;
9951c0b2f7Stbbdev
10051c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
10151c0b2f7Stbbdev for (int i=0; i<100; i++) {
10251c0b2f7Stbbdev REQUIRE(pool_malloc(pool, 8));
10351c0b2f7Stbbdev REQUIRE(pool_malloc(pool, 50*1024));
10451c0b2f7Stbbdev }
10551c0b2f7Stbbdev int regionsBeforeReset = liveRegions.load(std::memory_order_acquire);
10651c0b2f7Stbbdev bool ok = pool_reset(pool);
10751c0b2f7Stbbdev REQUIRE(ok);
10851c0b2f7Stbbdev for (int i=0; i<100; i++) {
10951c0b2f7Stbbdev REQUIRE(pool_malloc(pool, 8));
11051c0b2f7Stbbdev REQUIRE(pool_malloc(pool, 50*1024));
11151c0b2f7Stbbdev }
11251c0b2f7Stbbdev REQUIRE_MESSAGE(regionsBeforeReset == liveRegions.load(std::memory_order_relaxed),
11351c0b2f7Stbbdev "Expected no new regions allocation.");
11451c0b2f7Stbbdev ok = pool_destroy(pool);
11551c0b2f7Stbbdev REQUIRE(ok);
11651c0b2f7Stbbdev REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
11751c0b2f7Stbbdev }
11851c0b2f7Stbbdev
11951c0b2f7Stbbdev class SharedPoolRun: utils::NoAssign {
12051c0b2f7Stbbdev static long threadNum;
12151c0b2f7Stbbdev static utils::SpinBarrier startB,
12251c0b2f7Stbbdev mallocDone;
12351c0b2f7Stbbdev static rml::MemoryPool *pool;
12451c0b2f7Stbbdev static void **crossThread,
12551c0b2f7Stbbdev **afterTerm;
12651c0b2f7Stbbdev public:
12751c0b2f7Stbbdev static const int OBJ_CNT = 100;
12851c0b2f7Stbbdev
init(int num,rml::MemoryPool * pl,void ** crThread,void ** aTerm)12951c0b2f7Stbbdev static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
13051c0b2f7Stbbdev threadNum = num;
13151c0b2f7Stbbdev pool = pl;
13251c0b2f7Stbbdev crossThread = crThread;
13351c0b2f7Stbbdev afterTerm = aTerm;
13451c0b2f7Stbbdev startB.initialize(threadNum);
13551c0b2f7Stbbdev mallocDone.initialize(threadNum);
13651c0b2f7Stbbdev }
13751c0b2f7Stbbdev
operator ()(int id) const13851c0b2f7Stbbdev void operator()( int id ) const {
13951c0b2f7Stbbdev const int ITERS = 1000;
14051c0b2f7Stbbdev void *local[ITERS];
14151c0b2f7Stbbdev
14251c0b2f7Stbbdev startB.wait();
14351c0b2f7Stbbdev for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
14451c0b2f7Stbbdev afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
14551c0b2f7Stbbdev memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
14651c0b2f7Stbbdev crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
14751c0b2f7Stbbdev memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
14851c0b2f7Stbbdev }
14951c0b2f7Stbbdev
15051c0b2f7Stbbdev for (int i=1; i<ITERS; i+=2) {
15151c0b2f7Stbbdev local[i-1] = pool_malloc(pool, 6*1024);
15251c0b2f7Stbbdev memset(local[i-1], i, 6*1024);
15351c0b2f7Stbbdev local[i] = pool_malloc(pool, 16*1024);
15451c0b2f7Stbbdev memset(local[i], i, 16*1024);
15551c0b2f7Stbbdev }
15651c0b2f7Stbbdev mallocDone.wait();
15751c0b2f7Stbbdev int myVictim = threadNum-id-1;
15851c0b2f7Stbbdev for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
15951c0b2f7Stbbdev pool_free(pool, crossThread[i]);
16051c0b2f7Stbbdev for (int i=0; i<ITERS; i++)
16151c0b2f7Stbbdev pool_free(pool, local[i]);
16251c0b2f7Stbbdev }
16351c0b2f7Stbbdev };
16451c0b2f7Stbbdev
16551c0b2f7Stbbdev long SharedPoolRun::threadNum;
16651c0b2f7Stbbdev utils::SpinBarrier SharedPoolRun::startB,
16751c0b2f7Stbbdev SharedPoolRun::mallocDone;
16851c0b2f7Stbbdev rml::MemoryPool *SharedPoolRun::pool;
16951c0b2f7Stbbdev void **SharedPoolRun::crossThread,
17051c0b2f7Stbbdev **SharedPoolRun::afterTerm;
17151c0b2f7Stbbdev
17251c0b2f7Stbbdev // single pool shared by different threads
TestSharedPool()17351c0b2f7Stbbdev void TestSharedPool()
17451c0b2f7Stbbdev {
17551c0b2f7Stbbdev rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
17651c0b2f7Stbbdev rml::MemoryPool *pool;
17751c0b2f7Stbbdev
17851c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
17951c0b2f7Stbbdev void **crossThread = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
18051c0b2f7Stbbdev void **afterTerm = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
18151c0b2f7Stbbdev
18255f9b178SIvan Kochin for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
18351c0b2f7Stbbdev SharedPoolRun::init(p, pool, crossThread, afterTerm);
18451c0b2f7Stbbdev SharedPoolRun thr;
18551c0b2f7Stbbdev
18651c0b2f7Stbbdev void *hugeObj = pool_malloc(pool, 10*1024*1024);
18751c0b2f7Stbbdev REQUIRE(hugeObj);
18851c0b2f7Stbbdev
18951c0b2f7Stbbdev utils::NativeParallelFor( p, thr );
19051c0b2f7Stbbdev
19151c0b2f7Stbbdev pool_free(pool, hugeObj);
19255f9b178SIvan Kochin for (int i=0; i<p*SharedPoolRun::OBJ_CNT; i++)
19351c0b2f7Stbbdev pool_free(pool, afterTerm[i]);
19451c0b2f7Stbbdev }
19551c0b2f7Stbbdev delete []afterTerm;
19651c0b2f7Stbbdev delete []crossThread;
19751c0b2f7Stbbdev
19851c0b2f7Stbbdev bool ok = pool_destroy(pool);
19951c0b2f7Stbbdev REQUIRE(ok);
20051c0b2f7Stbbdev REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
20151c0b2f7Stbbdev }
20251c0b2f7Stbbdev
CrossThreadGetMem(intptr_t pool_id,size_t & bytes)20351c0b2f7Stbbdev void *CrossThreadGetMem(intptr_t pool_id, size_t &bytes)
20451c0b2f7Stbbdev {
20551c0b2f7Stbbdev if (poolSpace[pool_id].pos + bytes > poolSpace[pool_id].bufSize)
20657f524caSIlya Isaev return nullptr;
20751c0b2f7Stbbdev
20851c0b2f7Stbbdev void *ret = poolSpace[pool_id].space + poolSpace[pool_id].pos;
20951c0b2f7Stbbdev poolSpace[pool_id].pos += bytes;
21051c0b2f7Stbbdev poolSpace[pool_id].regions++;
21151c0b2f7Stbbdev
21251c0b2f7Stbbdev return ret;
21351c0b2f7Stbbdev }
21451c0b2f7Stbbdev
CrossThreadPutMem(intptr_t pool_id,void *,size_t)21551c0b2f7Stbbdev int CrossThreadPutMem(intptr_t pool_id, void* /*raw_ptr*/, size_t /*raw_bytes*/)
21651c0b2f7Stbbdev {
21751c0b2f7Stbbdev poolSpace[pool_id].regions--;
21851c0b2f7Stbbdev return 0;
21951c0b2f7Stbbdev }
22051c0b2f7Stbbdev
22151c0b2f7Stbbdev class CrossThreadRun: utils::NoAssign {
22251c0b2f7Stbbdev static long number_of_threads;
22351c0b2f7Stbbdev static utils::SpinBarrier barrier;
22451c0b2f7Stbbdev static rml::MemoryPool **pool;
22551c0b2f7Stbbdev static char **obj;
22651c0b2f7Stbbdev public:
initBarrier(unsigned thrds)22751c0b2f7Stbbdev static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
init(long num)22851c0b2f7Stbbdev static void init(long num) {
22951c0b2f7Stbbdev number_of_threads = num;
23051c0b2f7Stbbdev pool = new rml::MemoryPool*[number_of_threads];
23151c0b2f7Stbbdev poolSpace = new PoolSpace[number_of_threads];
23251c0b2f7Stbbdev obj = new char*[number_of_threads];
23351c0b2f7Stbbdev }
destroy()23451c0b2f7Stbbdev static void destroy() {
23551c0b2f7Stbbdev for (long i=0; i<number_of_threads; i++)
23651c0b2f7Stbbdev REQUIRE_MESSAGE(!poolSpace[i].regions, "Memory leak detected");
23751c0b2f7Stbbdev delete []pool;
23851c0b2f7Stbbdev delete []poolSpace;
23951c0b2f7Stbbdev delete []obj;
24051c0b2f7Stbbdev }
CrossThreadRun()24151c0b2f7Stbbdev CrossThreadRun() {}
operator ()(int id) const24251c0b2f7Stbbdev void operator()( int id ) const {
24351c0b2f7Stbbdev rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
24451c0b2f7Stbbdev const int objLen = 10*id;
24551c0b2f7Stbbdev
24651c0b2f7Stbbdev pool_create_v1(id, &pol, &pool[id]);
24751c0b2f7Stbbdev obj[id] = (char*)pool_malloc(pool[id], objLen);
24851c0b2f7Stbbdev REQUIRE(obj[id]);
24951c0b2f7Stbbdev memset(obj[id], id, objLen);
25051c0b2f7Stbbdev
25151c0b2f7Stbbdev {
25251c0b2f7Stbbdev const size_t lrgSz = 2*16*1024;
25351c0b2f7Stbbdev void *ptrLarge = pool_malloc(pool[id], lrgSz);
25451c0b2f7Stbbdev REQUIRE(ptrLarge);
25551c0b2f7Stbbdev memset(ptrLarge, 1, lrgSz);
25651c0b2f7Stbbdev // consume all small objects
25751c0b2f7Stbbdev while (pool_malloc(pool[id], 5 * 1024));
25851c0b2f7Stbbdev // releasing of large object will not give a chance to allocate more
25951c0b2f7Stbbdev // since only fixed pool can look at other bins aligned/notAligned
26051c0b2f7Stbbdev pool_free(pool[id], ptrLarge);
26151c0b2f7Stbbdev CHECK(!pool_malloc(pool[id], 5*1024));
26251c0b2f7Stbbdev }
26351c0b2f7Stbbdev
26451c0b2f7Stbbdev barrier.wait();
26551c0b2f7Stbbdev int myPool = number_of_threads-id-1;
26651c0b2f7Stbbdev for (int i=0; i<10*myPool; i++)
26751c0b2f7Stbbdev REQUIRE(myPool==obj[myPool][i]);
26851c0b2f7Stbbdev pool_free(pool[myPool], obj[myPool]);
26951c0b2f7Stbbdev bool ok = pool_destroy(pool[myPool]);
27051c0b2f7Stbbdev REQUIRE(ok);
27151c0b2f7Stbbdev }
27251c0b2f7Stbbdev };
27351c0b2f7Stbbdev
27451c0b2f7Stbbdev long CrossThreadRun::number_of_threads;
27551c0b2f7Stbbdev utils::SpinBarrier CrossThreadRun::barrier;
27651c0b2f7Stbbdev rml::MemoryPool **CrossThreadRun::pool;
27751c0b2f7Stbbdev char **CrossThreadRun::obj;
27851c0b2f7Stbbdev
27951c0b2f7Stbbdev // pools created, used and destroyed by different threads
TestCrossThreadPools()28051c0b2f7Stbbdev void TestCrossThreadPools()
28151c0b2f7Stbbdev {
28255f9b178SIvan Kochin for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
28351c0b2f7Stbbdev CrossThreadRun::initBarrier(p);
28451c0b2f7Stbbdev CrossThreadRun::init(p);
28551c0b2f7Stbbdev utils::NativeParallelFor( p, CrossThreadRun() );
28655f9b178SIvan Kochin for (int i=0; i<p; i++)
28751c0b2f7Stbbdev REQUIRE_MESSAGE(!poolSpace[i].regions, "Region leak detected");
28851c0b2f7Stbbdev CrossThreadRun::destroy();
28951c0b2f7Stbbdev }
29051c0b2f7Stbbdev }
29151c0b2f7Stbbdev
29251c0b2f7Stbbdev // buffer is too small to pool be created, but must not leak resources
TestTooSmallBuffer()29351c0b2f7Stbbdev void TestTooSmallBuffer()
29451c0b2f7Stbbdev {
29551c0b2f7Stbbdev poolSpace = new PoolSpace(8*1024);
29651c0b2f7Stbbdev
29751c0b2f7Stbbdev rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
29851c0b2f7Stbbdev rml::MemoryPool *pool;
29951c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
30051c0b2f7Stbbdev bool ok = pool_destroy(pool);
30151c0b2f7Stbbdev REQUIRE(ok);
30251c0b2f7Stbbdev REQUIRE_MESSAGE(!poolSpace[0].regions, "No leaks.");
30351c0b2f7Stbbdev
30451c0b2f7Stbbdev delete poolSpace;
30551c0b2f7Stbbdev }
30651c0b2f7Stbbdev
30751c0b2f7Stbbdev class FixedPoolHeadBase : utils::NoAssign {
30851c0b2f7Stbbdev size_t size;
30951c0b2f7Stbbdev std::atomic<bool> used;
31051c0b2f7Stbbdev char* data;
31151c0b2f7Stbbdev public:
FixedPoolHeadBase(size_t s)31251c0b2f7Stbbdev FixedPoolHeadBase(size_t s) : size(s), used(false) {
31351c0b2f7Stbbdev data = new char[size];
31451c0b2f7Stbbdev }
useData(size_t & bytes)31551c0b2f7Stbbdev void *useData(size_t &bytes) {
31651c0b2f7Stbbdev bool wasUsed = used.exchange(true);
31751c0b2f7Stbbdev REQUIRE_MESSAGE(!wasUsed, "The buffer must not be used twice.");
31851c0b2f7Stbbdev bytes = size;
31951c0b2f7Stbbdev return data;
32051c0b2f7Stbbdev }
~FixedPoolHeadBase()32151c0b2f7Stbbdev ~FixedPoolHeadBase() {
32251c0b2f7Stbbdev delete []data;
32351c0b2f7Stbbdev }
32451c0b2f7Stbbdev };
32551c0b2f7Stbbdev
32651c0b2f7Stbbdev template<size_t SIZE>
32751c0b2f7Stbbdev class FixedPoolHead : FixedPoolHeadBase {
32851c0b2f7Stbbdev public:
FixedPoolHead()32951c0b2f7Stbbdev FixedPoolHead() : FixedPoolHeadBase(SIZE) { }
33051c0b2f7Stbbdev };
33151c0b2f7Stbbdev
fixedBufGetMem(intptr_t pool_id,size_t & bytes)33251c0b2f7Stbbdev static void *fixedBufGetMem(intptr_t pool_id, size_t &bytes)
33351c0b2f7Stbbdev {
33451c0b2f7Stbbdev return ((FixedPoolHeadBase*)pool_id)->useData(bytes);
33551c0b2f7Stbbdev }
33651c0b2f7Stbbdev
33751c0b2f7Stbbdev class FixedPoolUse: utils::NoAssign {
33851c0b2f7Stbbdev static utils::SpinBarrier startB;
33951c0b2f7Stbbdev rml::MemoryPool *pool;
34051c0b2f7Stbbdev size_t reqSize;
34151c0b2f7Stbbdev int iters;
34251c0b2f7Stbbdev public:
FixedPoolUse(unsigned threads,rml::MemoryPool * p,size_t sz,int it)34351c0b2f7Stbbdev FixedPoolUse(unsigned threads, rml::MemoryPool *p, size_t sz, int it) :
34451c0b2f7Stbbdev pool(p), reqSize(sz), iters(it) {
34551c0b2f7Stbbdev startB.initialize(threads);
34651c0b2f7Stbbdev }
operator ()(int) const34751c0b2f7Stbbdev void operator()( int /*id*/ ) const {
34851c0b2f7Stbbdev startB.wait();
34951c0b2f7Stbbdev for (int i=0; i<iters; i++) {
35051c0b2f7Stbbdev void *o = pool_malloc(pool, reqSize);
35151c0b2f7Stbbdev ASSERT(o, "Invalid object");
35251c0b2f7Stbbdev pool_free(pool, o);
35351c0b2f7Stbbdev }
35451c0b2f7Stbbdev }
35551c0b2f7Stbbdev };
35651c0b2f7Stbbdev
35751c0b2f7Stbbdev utils::SpinBarrier FixedPoolUse::startB;
35851c0b2f7Stbbdev
35951c0b2f7Stbbdev class FixedPoolNomem: utils::NoAssign {
36051c0b2f7Stbbdev utils::SpinBarrier *startB;
36151c0b2f7Stbbdev rml::MemoryPool *pool;
36251c0b2f7Stbbdev public:
FixedPoolNomem(utils::SpinBarrier * b,rml::MemoryPool * p)36351c0b2f7Stbbdev FixedPoolNomem(utils::SpinBarrier *b, rml::MemoryPool *p) :
36451c0b2f7Stbbdev startB(b), pool(p) {}
operator ()(int id) const36551c0b2f7Stbbdev void operator()(int id) const {
36651c0b2f7Stbbdev startB->wait();
36751c0b2f7Stbbdev void *o = pool_malloc(pool, id%2? 64 : 128*1024);
36851c0b2f7Stbbdev ASSERT(!o, "All memory must be consumed.");
36951c0b2f7Stbbdev }
37051c0b2f7Stbbdev };
37151c0b2f7Stbbdev
37251c0b2f7Stbbdev class FixedPoolSomeMem: utils::NoAssign {
37351c0b2f7Stbbdev utils::SpinBarrier *barrier;
37451c0b2f7Stbbdev rml::MemoryPool *pool;
37551c0b2f7Stbbdev public:
FixedPoolSomeMem(utils::SpinBarrier * b,rml::MemoryPool * p)37651c0b2f7Stbbdev FixedPoolSomeMem(utils::SpinBarrier *b, rml::MemoryPool *p) :
37751c0b2f7Stbbdev barrier(b), pool(p) {}
operator ()(int id) const37851c0b2f7Stbbdev void operator()(int id) const {
37951c0b2f7Stbbdev barrier->wait();
38051c0b2f7Stbbdev utils::Sleep(2*id);
38151c0b2f7Stbbdev void *o = pool_malloc(pool, id%2? 64 : 128*1024);
38251c0b2f7Stbbdev barrier->wait();
38351c0b2f7Stbbdev pool_free(pool, o);
38451c0b2f7Stbbdev }
38551c0b2f7Stbbdev };
38651c0b2f7Stbbdev
haveEnoughSpace(rml::MemoryPool * pool,size_t sz)38751c0b2f7Stbbdev bool haveEnoughSpace(rml::MemoryPool *pool, size_t sz)
38851c0b2f7Stbbdev {
38951c0b2f7Stbbdev if (void *p = pool_malloc(pool, sz)) {
39051c0b2f7Stbbdev pool_free(pool, p);
39151c0b2f7Stbbdev return true;
39251c0b2f7Stbbdev }
39351c0b2f7Stbbdev return false;
39451c0b2f7Stbbdev }
39551c0b2f7Stbbdev
TestFixedBufferPool()39651c0b2f7Stbbdev void TestFixedBufferPool()
39751c0b2f7Stbbdev {
39851c0b2f7Stbbdev const int ITERS = 7;
39951c0b2f7Stbbdev const size_t MAX_OBJECT = 7*1024*1024;
40051c0b2f7Stbbdev void *ptrs[ITERS];
40157f524caSIlya Isaev rml::MemPoolPolicy pol(fixedBufGetMem, nullptr, 0, /*fixedSizePool=*/true,
40251c0b2f7Stbbdev /*keepMemTillDestroy=*/false);
40351c0b2f7Stbbdev rml::MemoryPool *pool;
40451c0b2f7Stbbdev {
40551c0b2f7Stbbdev FixedPoolHead<MAX_OBJECT + 1024*1024> head;
40651c0b2f7Stbbdev
40751c0b2f7Stbbdev pool_create_v1((intptr_t)&head, &pol, &pool);
40851c0b2f7Stbbdev {
40951c0b2f7Stbbdev utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 2) );
41051c0b2f7Stbbdev
41151c0b2f7Stbbdev for (int i=0; i<ITERS; i++) {
41251c0b2f7Stbbdev ptrs[i] = pool_malloc(pool, MAX_OBJECT/ITERS);
41351c0b2f7Stbbdev REQUIRE(ptrs[i]);
41451c0b2f7Stbbdev }
41551c0b2f7Stbbdev for (int i=0; i<ITERS; i++)
41651c0b2f7Stbbdev pool_free(pool, ptrs[i]);
41751c0b2f7Stbbdev
41851c0b2f7Stbbdev utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 1) );
41951c0b2f7Stbbdev }
42051c0b2f7Stbbdev // each thread asks for an MAX_OBJECT/p/2 object,
42151c0b2f7Stbbdev // /2 is to cover fragmentation
42255f9b178SIvan Kochin for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
42351c0b2f7Stbbdev utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 10000) );
42451c0b2f7Stbbdev }
42551c0b2f7Stbbdev {
42651c0b2f7Stbbdev const int p = 128;
42751c0b2f7Stbbdev utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
42851c0b2f7Stbbdev }
42951c0b2f7Stbbdev {
43051c0b2f7Stbbdev size_t maxSz;
43151c0b2f7Stbbdev const int p = 256;
43251c0b2f7Stbbdev utils::SpinBarrier barrier(p);
43351c0b2f7Stbbdev
43451c0b2f7Stbbdev // Find maximal useful object size. Start with MAX_OBJECT/2,
43551c0b2f7Stbbdev // as the pool might be fragmented by BootStrapBlocks consumed during
43651c0b2f7Stbbdev // FixedPoolRun.
43751c0b2f7Stbbdev size_t l, r;
43851c0b2f7Stbbdev REQUIRE(haveEnoughSpace(pool, MAX_OBJECT/2));
43951c0b2f7Stbbdev for (l = MAX_OBJECT/2, r = MAX_OBJECT + 1024*1024; l < r-1; ) {
44051c0b2f7Stbbdev size_t mid = (l+r)/2;
44151c0b2f7Stbbdev if (haveEnoughSpace(pool, mid))
44251c0b2f7Stbbdev l = mid;
44351c0b2f7Stbbdev else
44451c0b2f7Stbbdev r = mid;
44551c0b2f7Stbbdev }
44651c0b2f7Stbbdev maxSz = l;
44751c0b2f7Stbbdev REQUIRE_MESSAGE(!haveEnoughSpace(pool, maxSz+1), "Expect to find boundary value.");
44851c0b2f7Stbbdev // consume all available memory
44951c0b2f7Stbbdev void *largeObj = pool_malloc(pool, maxSz);
45051c0b2f7Stbbdev REQUIRE(largeObj);
45151c0b2f7Stbbdev void *o = pool_malloc(pool, 64);
45251c0b2f7Stbbdev if (o) // pool fragmented, skip FixedPoolNomem
45351c0b2f7Stbbdev pool_free(pool, o);
45451c0b2f7Stbbdev else
45551c0b2f7Stbbdev utils::NativeParallelFor( p, FixedPoolNomem(&barrier, pool) );
45651c0b2f7Stbbdev pool_free(pool, largeObj);
45751c0b2f7Stbbdev // keep some space unoccupied
45851c0b2f7Stbbdev largeObj = pool_malloc(pool, maxSz-512*1024);
45951c0b2f7Stbbdev REQUIRE(largeObj);
46051c0b2f7Stbbdev utils::NativeParallelFor( p, FixedPoolSomeMem(&barrier, pool) );
46151c0b2f7Stbbdev pool_free(pool, largeObj);
46251c0b2f7Stbbdev }
46351c0b2f7Stbbdev bool ok = pool_destroy(pool);
46451c0b2f7Stbbdev REQUIRE(ok);
46551c0b2f7Stbbdev }
46651c0b2f7Stbbdev // check that fresh untouched pool can successfully fulfil requests from 128 threads
46751c0b2f7Stbbdev {
46851c0b2f7Stbbdev FixedPoolHead<MAX_OBJECT + 1024*1024> head;
46951c0b2f7Stbbdev pool_create_v1((intptr_t)&head, &pol, &pool);
47051c0b2f7Stbbdev int p=128;
47151c0b2f7Stbbdev utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
47251c0b2f7Stbbdev bool ok = pool_destroy(pool);
47351c0b2f7Stbbdev REQUIRE(ok);
47451c0b2f7Stbbdev }
47551c0b2f7Stbbdev }
47651c0b2f7Stbbdev
47751c0b2f7Stbbdev static size_t currGranularity;
47851c0b2f7Stbbdev
getGranMem(intptr_t,size_t & bytes)47951c0b2f7Stbbdev static void *getGranMem(intptr_t /*pool_id*/, size_t &bytes)
48051c0b2f7Stbbdev {
48151c0b2f7Stbbdev REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
48251c0b2f7Stbbdev return malloc(bytes);
48351c0b2f7Stbbdev }
48451c0b2f7Stbbdev
putGranMem(intptr_t,void * ptr,size_t bytes)48551c0b2f7Stbbdev static int putGranMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
48651c0b2f7Stbbdev {
48751c0b2f7Stbbdev REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
48851c0b2f7Stbbdev free(ptr);
48951c0b2f7Stbbdev return 0;
49051c0b2f7Stbbdev }
49151c0b2f7Stbbdev
TestPoolGranularity()49251c0b2f7Stbbdev void TestPoolGranularity()
49351c0b2f7Stbbdev {
49451c0b2f7Stbbdev rml::MemPoolPolicy pol(getGranMem, putGranMem);
49551c0b2f7Stbbdev const size_t grans[] = {4*1024, 2*1024*1024, 6*1024*1024, 10*1024*1024};
49651c0b2f7Stbbdev
49751c0b2f7Stbbdev for (unsigned i=0; i<sizeof(grans)/sizeof(grans[0]); i++) {
49851c0b2f7Stbbdev pol.granularity = currGranularity = grans[i];
49951c0b2f7Stbbdev rml::MemoryPool *pool;
50051c0b2f7Stbbdev
50151c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
50251c0b2f7Stbbdev for (int sz=500*1024; sz<16*1024*1024; sz+=101*1024) {
50351c0b2f7Stbbdev void *p = pool_malloc(pool, sz);
50451c0b2f7Stbbdev REQUIRE_MESSAGE(p, "Can't allocate memory in pool.");
50551c0b2f7Stbbdev pool_free(pool, p);
50651c0b2f7Stbbdev }
50751c0b2f7Stbbdev bool ok = pool_destroy(pool);
50851c0b2f7Stbbdev REQUIRE(ok);
50951c0b2f7Stbbdev }
51051c0b2f7Stbbdev }
51151c0b2f7Stbbdev
51251c0b2f7Stbbdev static size_t putMemAll, getMemAll, getMemSuccessful;
51351c0b2f7Stbbdev
getMemMalloc(intptr_t,size_t & bytes)51451c0b2f7Stbbdev static void *getMemMalloc(intptr_t /*pool_id*/, size_t &bytes)
51551c0b2f7Stbbdev {
51651c0b2f7Stbbdev getMemAll++;
51751c0b2f7Stbbdev void *p = malloc(bytes);
51851c0b2f7Stbbdev if (p)
51951c0b2f7Stbbdev getMemSuccessful++;
52051c0b2f7Stbbdev return p;
52151c0b2f7Stbbdev }
52251c0b2f7Stbbdev
putMemFree(intptr_t,void * ptr,size_t)52351c0b2f7Stbbdev static int putMemFree(intptr_t /*pool_id*/, void *ptr, size_t /*bytes*/)
52451c0b2f7Stbbdev {
52551c0b2f7Stbbdev putMemAll++;
52651c0b2f7Stbbdev free(ptr);
52751c0b2f7Stbbdev return 0;
52851c0b2f7Stbbdev }
52951c0b2f7Stbbdev
TestPoolKeepTillDestroy()53051c0b2f7Stbbdev void TestPoolKeepTillDestroy()
53151c0b2f7Stbbdev {
53251c0b2f7Stbbdev const int ITERS = 50*1024;
53351c0b2f7Stbbdev void *ptrs[2*ITERS+1];
53451c0b2f7Stbbdev rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
53551c0b2f7Stbbdev rml::MemoryPool *pool;
53651c0b2f7Stbbdev
53751c0b2f7Stbbdev // 1st create default pool that returns memory back to callback,
53851c0b2f7Stbbdev // then use keepMemTillDestroy policy
53951c0b2f7Stbbdev for (int keep=0; keep<2; keep++) {
54051c0b2f7Stbbdev getMemAll = putMemAll = 0;
54151c0b2f7Stbbdev if (keep)
54251c0b2f7Stbbdev pol.keepAllMemory = 1;
54351c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
54451c0b2f7Stbbdev for (int i=0; i<2*ITERS; i+=2) {
54551c0b2f7Stbbdev ptrs[i] = pool_malloc(pool, 7*1024);
54651c0b2f7Stbbdev ptrs[i+1] = pool_malloc(pool, 10*1024);
54751c0b2f7Stbbdev }
54851c0b2f7Stbbdev ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024);
54951c0b2f7Stbbdev REQUIRE(!putMemAll);
55051c0b2f7Stbbdev for (int i=0; i<2*ITERS; i++)
55151c0b2f7Stbbdev pool_free(pool, ptrs[i]);
55251c0b2f7Stbbdev pool_free(pool, ptrs[2*ITERS]);
55351c0b2f7Stbbdev size_t totalPutMemCalls = putMemAll;
55451c0b2f7Stbbdev if (keep)
55551c0b2f7Stbbdev REQUIRE(!putMemAll);
55651c0b2f7Stbbdev else {
55751c0b2f7Stbbdev REQUIRE(putMemAll);
55851c0b2f7Stbbdev putMemAll = 0;
55951c0b2f7Stbbdev }
56051c0b2f7Stbbdev size_t getCallsBefore = getMemAll;
56151c0b2f7Stbbdev void *p = pool_malloc(pool, 8*1024*1024);
56251c0b2f7Stbbdev REQUIRE(p);
56351c0b2f7Stbbdev if (keep)
56451c0b2f7Stbbdev REQUIRE_MESSAGE(getCallsBefore == getMemAll, "Must not lead to new getMem call");
56551c0b2f7Stbbdev size_t putCallsBefore = putMemAll;
56651c0b2f7Stbbdev bool ok = pool_reset(pool);
56751c0b2f7Stbbdev REQUIRE(ok);
56851c0b2f7Stbbdev REQUIRE_MESSAGE(putCallsBefore == putMemAll, "Pool is not releasing memory during reset.");
56951c0b2f7Stbbdev ok = pool_destroy(pool);
57051c0b2f7Stbbdev REQUIRE(ok);
57151c0b2f7Stbbdev REQUIRE(putMemAll);
57251c0b2f7Stbbdev totalPutMemCalls += putMemAll;
57351c0b2f7Stbbdev REQUIRE_MESSAGE(getMemAll == totalPutMemCalls, "Memory leak detected.");
57451c0b2f7Stbbdev }
57551c0b2f7Stbbdev
57651c0b2f7Stbbdev }
57751c0b2f7Stbbdev
memEqual(char * buf,size_t size,int val)57851c0b2f7Stbbdev static bool memEqual(char *buf, size_t size, int val)
57951c0b2f7Stbbdev {
58051c0b2f7Stbbdev bool memEq = true;
58151c0b2f7Stbbdev for (size_t k=0; k<size; k++)
58251c0b2f7Stbbdev if (buf[k] != val)
58351c0b2f7Stbbdev memEq = false;
58451c0b2f7Stbbdev return memEq;
58551c0b2f7Stbbdev }
58651c0b2f7Stbbdev
TestEntries()58751c0b2f7Stbbdev void TestEntries()
58851c0b2f7Stbbdev {
58951c0b2f7Stbbdev const int SZ = 4;
59051c0b2f7Stbbdev const int ALGN = 4;
59151c0b2f7Stbbdev size_t size[SZ] = {8, 8000, 9000, 100*1024};
59251c0b2f7Stbbdev size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024};
59351c0b2f7Stbbdev
59451c0b2f7Stbbdev rml::MemPoolPolicy pol(getGranMem, putGranMem);
59551c0b2f7Stbbdev currGranularity = 1; // not check granularity in the test
59651c0b2f7Stbbdev rml::MemoryPool *pool;
59751c0b2f7Stbbdev
59851c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
59951c0b2f7Stbbdev for (int i=0; i<SZ; i++)
60051c0b2f7Stbbdev for (int j=0; j<ALGN; j++) {
60151c0b2f7Stbbdev char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]);
60251c0b2f7Stbbdev REQUIRE((p && 0==((uintptr_t)p & (algn[j]-1))));
60351c0b2f7Stbbdev memset(p, j, size[i]);
60451c0b2f7Stbbdev
60551c0b2f7Stbbdev size_t curr_algn = algn[rand() % ALGN];
60651c0b2f7Stbbdev size_t curr_sz = size[rand() % SZ];
60751c0b2f7Stbbdev char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn);
60851c0b2f7Stbbdev REQUIRE((p1 && 0==((uintptr_t)p1 & (curr_algn-1))));
60951c0b2f7Stbbdev REQUIRE(memEqual(p1, utils::min(size[i], curr_sz), j));
61051c0b2f7Stbbdev
61151c0b2f7Stbbdev memset(p1, j+1, curr_sz);
61251c0b2f7Stbbdev size_t curr_sz1 = size[rand() % SZ];
61351c0b2f7Stbbdev char *p2 = (char*)pool_realloc(pool, p1, curr_sz1);
61451c0b2f7Stbbdev REQUIRE(p2);
61551c0b2f7Stbbdev REQUIRE(memEqual(p2, utils::min(curr_sz1, curr_sz), j+1));
61651c0b2f7Stbbdev
61751c0b2f7Stbbdev pool_free(pool, p2);
61851c0b2f7Stbbdev }
61951c0b2f7Stbbdev
62051c0b2f7Stbbdev bool ok = pool_destroy(pool);
62151c0b2f7Stbbdev REQUIRE(ok);
62251c0b2f7Stbbdev
62357f524caSIlya Isaev bool fail = rml::pool_destroy(nullptr);
62451c0b2f7Stbbdev REQUIRE(!fail);
62557f524caSIlya Isaev fail = rml::pool_reset(nullptr);
62651c0b2f7Stbbdev REQUIRE(!fail);
62751c0b2f7Stbbdev }
62851c0b2f7Stbbdev
CreateUsablePool(size_t size)62951c0b2f7Stbbdev rml::MemoryPool *CreateUsablePool(size_t size)
63051c0b2f7Stbbdev {
63151c0b2f7Stbbdev rml::MemoryPool *pool;
63251c0b2f7Stbbdev rml::MemPoolPolicy okPolicy(getMemMalloc, putMemFree);
63351c0b2f7Stbbdev
63451c0b2f7Stbbdev putMemAll = getMemAll = getMemSuccessful = 0;
63551c0b2f7Stbbdev rml::MemPoolError res = pool_create_v1(0, &okPolicy, &pool);
63651c0b2f7Stbbdev if (res != rml::POOL_OK) {
63751c0b2f7Stbbdev REQUIRE_MESSAGE((!getMemAll && !putMemAll), "No callbacks after fail.");
63857f524caSIlya Isaev return nullptr;
63951c0b2f7Stbbdev }
64051c0b2f7Stbbdev void *o = pool_malloc(pool, size);
64151c0b2f7Stbbdev if (!getMemSuccessful) {
64251c0b2f7Stbbdev // no memory from callback, valid reason to leave
64351c0b2f7Stbbdev REQUIRE_MESSAGE(!o, "The pool must be unusable.");
64457f524caSIlya Isaev return nullptr;
64551c0b2f7Stbbdev }
64651c0b2f7Stbbdev REQUIRE_MESSAGE(o, "Created pool must be useful.");
64751c0b2f7Stbbdev REQUIRE_MESSAGE((getMemSuccessful == 1 || getMemSuccessful == 5 || getMemAll > getMemSuccessful),
64851c0b2f7Stbbdev "Multiple requests are allowed when unsuccessful request occurred or cannot search in bootstrap memory. ");
64951c0b2f7Stbbdev REQUIRE(!putMemAll);
65051c0b2f7Stbbdev pool_free(pool, o);
65151c0b2f7Stbbdev
65251c0b2f7Stbbdev return pool;
65351c0b2f7Stbbdev }
65451c0b2f7Stbbdev
CheckPoolLeaks(size_t poolsAlwaysAvailable)65551c0b2f7Stbbdev void CheckPoolLeaks(size_t poolsAlwaysAvailable)
65651c0b2f7Stbbdev {
65751c0b2f7Stbbdev const size_t MAX_POOLS = 16*1000;
65851c0b2f7Stbbdev const int ITERS = 20, CREATED_STABLE = 3;
65951c0b2f7Stbbdev rml::MemoryPool *pools[MAX_POOLS];
66051c0b2f7Stbbdev size_t created, maxCreated = MAX_POOLS;
66151c0b2f7Stbbdev int maxNotChangedCnt = 0;
66251c0b2f7Stbbdev
66351c0b2f7Stbbdev // expecting that for ITERS runs, max number of pools that can be created
66451c0b2f7Stbbdev // can be stabilized and still stable CREATED_STABLE times
66551c0b2f7Stbbdev for (int j=0; j<ITERS && maxNotChangedCnt<CREATED_STABLE; j++) {
66651c0b2f7Stbbdev for (created=0; created<maxCreated; created++) {
66751c0b2f7Stbbdev rml::MemoryPool *p = CreateUsablePool(1024);
66851c0b2f7Stbbdev if (!p)
66951c0b2f7Stbbdev break;
67051c0b2f7Stbbdev pools[created] = p;
67151c0b2f7Stbbdev }
67251c0b2f7Stbbdev REQUIRE_MESSAGE(created>=poolsAlwaysAvailable,
67351c0b2f7Stbbdev "Expect that the reasonable number of pools can be always created.");
67451c0b2f7Stbbdev for (size_t i=0; i<created; i++) {
67551c0b2f7Stbbdev bool ok = pool_destroy(pools[i]);
67651c0b2f7Stbbdev REQUIRE(ok);
67751c0b2f7Stbbdev }
67851c0b2f7Stbbdev if (created < maxCreated) {
67951c0b2f7Stbbdev maxCreated = created;
68051c0b2f7Stbbdev maxNotChangedCnt = 0;
68151c0b2f7Stbbdev } else
68251c0b2f7Stbbdev maxNotChangedCnt++;
68351c0b2f7Stbbdev }
68451c0b2f7Stbbdev REQUIRE_MESSAGE(maxNotChangedCnt == CREATED_STABLE, "The number of created pools must be stabilized.");
68551c0b2f7Stbbdev }
68651c0b2f7Stbbdev
TestPoolCreation()68751c0b2f7Stbbdev void TestPoolCreation()
68851c0b2f7Stbbdev {
68951c0b2f7Stbbdev putMemAll = getMemAll = getMemSuccessful = 0;
69051c0b2f7Stbbdev
69157f524caSIlya Isaev rml::MemPoolPolicy nullPolicy(nullptr, putMemFree),
69257f524caSIlya Isaev emptyFreePolicy(getMemMalloc, nullptr),
69351c0b2f7Stbbdev okPolicy(getMemMalloc, putMemFree);
69451c0b2f7Stbbdev rml::MemoryPool *pool;
69551c0b2f7Stbbdev
69651c0b2f7Stbbdev rml::MemPoolError res = pool_create_v1(0, &nullPolicy, &pool);
69751c0b2f7Stbbdev REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pAlloc can't be created");
69851c0b2f7Stbbdev res = pool_create_v1(0, &emptyFreePolicy, &pool);
69951c0b2f7Stbbdev REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pFree can't be created");
70051c0b2f7Stbbdev REQUIRE_MESSAGE((!putMemAll && !getMemAll), "no callback calls are expected");
70151c0b2f7Stbbdev res = pool_create_v1(0, &okPolicy, &pool);
70251c0b2f7Stbbdev REQUIRE(res==rml::POOL_OK);
70351c0b2f7Stbbdev bool ok = pool_destroy(pool);
70451c0b2f7Stbbdev REQUIRE(ok);
70551c0b2f7Stbbdev REQUIRE_MESSAGE(putMemAll == getMemSuccessful, "no leaks after pool_destroy");
70651c0b2f7Stbbdev
70751c0b2f7Stbbdev // 32 is a guess for a number of pools that is acceptable everywere
70851c0b2f7Stbbdev CheckPoolLeaks(32);
70951c0b2f7Stbbdev // try to consume all but 16 TLS keys
71051c0b2f7Stbbdev LimitTLSKeysTo limitTLSTo(16);
71151c0b2f7Stbbdev // ...and check that we can create at least 16 pools
71251c0b2f7Stbbdev CheckPoolLeaks(16);
71351c0b2f7Stbbdev }
71451c0b2f7Stbbdev
71551c0b2f7Stbbdev struct AllocatedObject {
71651c0b2f7Stbbdev rml::MemoryPool *pool;
71751c0b2f7Stbbdev };
71851c0b2f7Stbbdev
71951c0b2f7Stbbdev const size_t BUF_SIZE = 1024*1024;
72051c0b2f7Stbbdev
72151c0b2f7Stbbdev class PoolIdentityCheck : utils::NoAssign {
72251c0b2f7Stbbdev rml::MemoryPool** const pools;
72351c0b2f7Stbbdev AllocatedObject** const objs;
72451c0b2f7Stbbdev public:
PoolIdentityCheck(rml::MemoryPool ** p,AllocatedObject ** o)72551c0b2f7Stbbdev PoolIdentityCheck(rml::MemoryPool** p, AllocatedObject** o) : pools(p), objs(o) {}
operator ()(int id) const72651c0b2f7Stbbdev void operator()(int id) const {
72751c0b2f7Stbbdev objs[id] = (AllocatedObject*)pool_malloc(pools[id], BUF_SIZE/2);
72851c0b2f7Stbbdev REQUIRE(objs[id]);
72951c0b2f7Stbbdev rml::MemoryPool *act_pool = rml::pool_identify(objs[id]);
73051c0b2f7Stbbdev REQUIRE(act_pool == pools[id]);
73151c0b2f7Stbbdev
73251c0b2f7Stbbdev for (size_t total=0; total<2*BUF_SIZE; total+=256) {
73351c0b2f7Stbbdev AllocatedObject *o = (AllocatedObject*)pool_malloc(pools[id], 256);
73451c0b2f7Stbbdev REQUIRE(o);
73551c0b2f7Stbbdev act_pool = rml::pool_identify(o);
73651c0b2f7Stbbdev REQUIRE(act_pool == pools[id]);
73751c0b2f7Stbbdev pool_free(act_pool, o);
73851c0b2f7Stbbdev }
73951c0b2f7Stbbdev if( id&1 ) { // make every second returned object "small"
74051c0b2f7Stbbdev pool_free(act_pool, objs[id]);
74151c0b2f7Stbbdev objs[id] = (AllocatedObject*)pool_malloc(pools[id], 16);
74251c0b2f7Stbbdev REQUIRE(objs[id]);
74351c0b2f7Stbbdev }
74451c0b2f7Stbbdev objs[id]->pool = act_pool;
74551c0b2f7Stbbdev }
74651c0b2f7Stbbdev };
74751c0b2f7Stbbdev
TestPoolDetection()74851c0b2f7Stbbdev void TestPoolDetection()
74951c0b2f7Stbbdev {
75051c0b2f7Stbbdev const int POOLS = 4;
75157f524caSIlya Isaev rml::MemPoolPolicy pol(fixedBufGetMem, nullptr, 0, /*fixedSizePool=*/true,
75251c0b2f7Stbbdev /*keepMemTillDestroy=*/false);
75351c0b2f7Stbbdev rml::MemoryPool *pools[POOLS];
75451c0b2f7Stbbdev FixedPoolHead<BUF_SIZE*POOLS> head[POOLS];
75551c0b2f7Stbbdev AllocatedObject *objs[POOLS];
75651c0b2f7Stbbdev
75751c0b2f7Stbbdev for (int i=0; i<POOLS; i++)
75851c0b2f7Stbbdev pool_create_v1((intptr_t)(head+i), &pol, &pools[i]);
75951c0b2f7Stbbdev // if object somehow released to different pools, subsequent allocation
76051c0b2f7Stbbdev // from affected pools became impossible
76151c0b2f7Stbbdev for (int k=0; k<10; k++) {
76251c0b2f7Stbbdev PoolIdentityCheck check(pools, objs);
76351c0b2f7Stbbdev if( k&1 )
76451c0b2f7Stbbdev utils::NativeParallelFor( POOLS, check);
76551c0b2f7Stbbdev else
76651c0b2f7Stbbdev for (int i=0; i<POOLS; i++) check(i);
76751c0b2f7Stbbdev
76851c0b2f7Stbbdev for (int i=0; i<POOLS; i++) {
76951c0b2f7Stbbdev rml::MemoryPool *p = rml::pool_identify(objs[i]);
77051c0b2f7Stbbdev REQUIRE(p == objs[i]->pool);
77151c0b2f7Stbbdev pool_free(p, objs[i]);
77251c0b2f7Stbbdev }
77351c0b2f7Stbbdev }
77451c0b2f7Stbbdev for (int i=0; i<POOLS; i++) {
77551c0b2f7Stbbdev bool ok = pool_destroy(pools[i]);
77651c0b2f7Stbbdev REQUIRE(ok);
77751c0b2f7Stbbdev }
77851c0b2f7Stbbdev }
77951c0b2f7Stbbdev
TestLazyBootstrap()78051c0b2f7Stbbdev void TestLazyBootstrap()
78151c0b2f7Stbbdev {
78251c0b2f7Stbbdev rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
78351c0b2f7Stbbdev const size_t sizes[] = {8, 9*1024, 0};
78451c0b2f7Stbbdev
78551c0b2f7Stbbdev for (int i=0; sizes[i]; i++) {
78651c0b2f7Stbbdev rml::MemoryPool *pool = CreateUsablePool(sizes[i]);
78751c0b2f7Stbbdev bool ok = pool_destroy(pool);
78851c0b2f7Stbbdev REQUIRE(ok);
78951c0b2f7Stbbdev REQUIRE_MESSAGE(getMemSuccessful == putMemAll, "No leak.");
79051c0b2f7Stbbdev }
79151c0b2f7Stbbdev }
79251c0b2f7Stbbdev
79351c0b2f7Stbbdev class NoLeakOnDestroyRun: utils::NoAssign {
79451c0b2f7Stbbdev rml::MemoryPool *pool;
79551c0b2f7Stbbdev utils::SpinBarrier *barrier;
79651c0b2f7Stbbdev public:
NoLeakOnDestroyRun(rml::MemoryPool * p,utils::SpinBarrier * b)79751c0b2f7Stbbdev NoLeakOnDestroyRun(rml::MemoryPool *p, utils::SpinBarrier *b) : pool(p), barrier(b) {}
operator ()(int id) const79851c0b2f7Stbbdev void operator()(int id) const {
79951c0b2f7Stbbdev void *p = pool_malloc(pool, id%2? 8 : 9000);
80051c0b2f7Stbbdev REQUIRE((p && liveRegions.load(std::memory_order_relaxed)));
80151c0b2f7Stbbdev barrier->wait();
80251c0b2f7Stbbdev if (!id) {
80351c0b2f7Stbbdev bool ok = pool_destroy(pool);
80451c0b2f7Stbbdev REQUIRE(ok);
80551c0b2f7Stbbdev REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
80651c0b2f7Stbbdev }
80751c0b2f7Stbbdev // other threads must wait till pool destruction,
80851c0b2f7Stbbdev // to not call thread destruction cleanup before this
80951c0b2f7Stbbdev barrier->wait();
81051c0b2f7Stbbdev }
81151c0b2f7Stbbdev };
81251c0b2f7Stbbdev
TestNoLeakOnDestroy()81351c0b2f7Stbbdev void TestNoLeakOnDestroy()
81451c0b2f7Stbbdev {
81551c0b2f7Stbbdev liveRegions.store(0, std::memory_order_release);
81655f9b178SIvan Kochin for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
81751c0b2f7Stbbdev rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
81851c0b2f7Stbbdev utils::SpinBarrier barrier(p);
81951c0b2f7Stbbdev rml::MemoryPool *pool;
82051c0b2f7Stbbdev
82151c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
82251c0b2f7Stbbdev utils::NativeParallelFor(p, NoLeakOnDestroyRun(pool, &barrier));
82351c0b2f7Stbbdev }
82451c0b2f7Stbbdev }
82551c0b2f7Stbbdev
putMallocMemError(intptr_t,void * ptr,size_t bytes)82651c0b2f7Stbbdev static int putMallocMemError(intptr_t /*pool_id*/, void *ptr, size_t bytes)
82751c0b2f7Stbbdev {
82851c0b2f7Stbbdev MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
82951c0b2f7Stbbdev REQUIRE_MESSAGE(bytes == hdr->userSize, "Invalid size in pool callback.");
83051c0b2f7Stbbdev free(hdr->rawPtr);
83151c0b2f7Stbbdev
83251c0b2f7Stbbdev liveRegions--;
83351c0b2f7Stbbdev
83451c0b2f7Stbbdev return -1;
83551c0b2f7Stbbdev }
83651c0b2f7Stbbdev
TestDestroyFailed()83751c0b2f7Stbbdev void TestDestroyFailed()
83851c0b2f7Stbbdev {
83951c0b2f7Stbbdev rml::MemPoolPolicy pol(getMallocMem, putMallocMemError);
84051c0b2f7Stbbdev rml::MemoryPool *pool;
84151c0b2f7Stbbdev pool_create_v1(0, &pol, &pool);
84251c0b2f7Stbbdev void *ptr = pool_malloc(pool, 16);
84351c0b2f7Stbbdev REQUIRE(ptr);
84451c0b2f7Stbbdev bool fail = pool_destroy(pool);
84551c0b2f7Stbbdev REQUIRE_MESSAGE(fail==false, "putMemPolicyError callback returns error, "
84651c0b2f7Stbbdev "expect pool_destroy() failure");
84751c0b2f7Stbbdev }
84851c0b2f7Stbbdev
TestPoolMSize()84951c0b2f7Stbbdev void TestPoolMSize() {
85051c0b2f7Stbbdev rml::MemoryPool *pool = CreateUsablePool(1024);
85151c0b2f7Stbbdev
85251c0b2f7Stbbdev const int SZ = 10;
85351c0b2f7Stbbdev // Original allocation requests, random numbers from small to large
85451c0b2f7Stbbdev size_t requestedSz[SZ] = {8, 16, 500, 1000, 2000, 4000, 8000, 1024*1024, 4242+4242, 8484+8484};
85551c0b2f7Stbbdev
85651c0b2f7Stbbdev // Unlike large objects, small objects do not store its original size along with the object itself
85751c0b2f7Stbbdev // On Power architecture TLS bins are divided differently.
85851c0b2f7Stbbdev size_t allocatedSz[SZ] =
85951c0b2f7Stbbdev #if __powerpc64__ || __ppc64__ || __bgp__
86051c0b2f7Stbbdev {8, 16, 512, 1024, 2688, 5376, 8064, 1024*1024, 4242+4242, 8484+8484};
86151c0b2f7Stbbdev #else
86251c0b2f7Stbbdev {8, 16, 512, 1024, 2688, 4032, 8128, 1024*1024, 4242+4242, 8484+8484};
86351c0b2f7Stbbdev #endif
86451c0b2f7Stbbdev for (int i = 0; i < SZ; i++) {
86551c0b2f7Stbbdev void* obj = pool_malloc(pool, requestedSz[i]);
86651c0b2f7Stbbdev size_t objSize = pool_msize(pool, obj);
86751c0b2f7Stbbdev REQUIRE_MESSAGE(objSize == allocatedSz[i], "pool_msize returned the wrong value");
86851c0b2f7Stbbdev pool_free(pool, obj);
86951c0b2f7Stbbdev }
87051c0b2f7Stbbdev bool destroyed = pool_destroy(pool);
87151c0b2f7Stbbdev REQUIRE(destroyed);
87251c0b2f7Stbbdev }
87351c0b2f7Stbbdev
87451c0b2f7Stbbdev //! \brief \ref error_guessing
87551c0b2f7Stbbdev TEST_CASE("Too small buffer") {
87651c0b2f7Stbbdev TestTooSmallBuffer();
87751c0b2f7Stbbdev }
87851c0b2f7Stbbdev
87951c0b2f7Stbbdev //! \brief \ref error_guessing
88051c0b2f7Stbbdev TEST_CASE("Pool reset") {
88151c0b2f7Stbbdev TestPoolReset();
88251c0b2f7Stbbdev }
88351c0b2f7Stbbdev TEST_CASE("Shared pool") {
88451c0b2f7Stbbdev TestSharedPool();
88551c0b2f7Stbbdev }
88651c0b2f7Stbbdev
88751c0b2f7Stbbdev //! \brief \ref error_guessing
88851c0b2f7Stbbdev TEST_CASE("Cross thread pools") {
88951c0b2f7Stbbdev TestCrossThreadPools();
89051c0b2f7Stbbdev }
89151c0b2f7Stbbdev
89251c0b2f7Stbbdev //! \brief \ref interface
89351c0b2f7Stbbdev TEST_CASE("Fixed buffer pool") {
89451c0b2f7Stbbdev TestFixedBufferPool();
89551c0b2f7Stbbdev }
89651c0b2f7Stbbdev
89751c0b2f7Stbbdev //! \brief \ref interface
89851c0b2f7Stbbdev TEST_CASE("Pool granularity") {
89951c0b2f7Stbbdev TestPoolGranularity();
90051c0b2f7Stbbdev }
90151c0b2f7Stbbdev
90251c0b2f7Stbbdev //! \brief \ref error_guessing
90351c0b2f7Stbbdev TEST_CASE("Keep pool till destroy") {
90451c0b2f7Stbbdev TestPoolKeepTillDestroy();
90551c0b2f7Stbbdev }
90651c0b2f7Stbbdev
90751c0b2f7Stbbdev //! \brief \ref error_guessing
90851c0b2f7Stbbdev TEST_CASE("Entries") {
90951c0b2f7Stbbdev TestEntries();
91051c0b2f7Stbbdev }
91151c0b2f7Stbbdev
91251c0b2f7Stbbdev //! \brief \ref interface
91351c0b2f7Stbbdev TEST_CASE("Pool creation") {
91451c0b2f7Stbbdev TestPoolCreation();
91551c0b2f7Stbbdev }
91651c0b2f7Stbbdev
91751c0b2f7Stbbdev //! \brief \ref error_guessing
91851c0b2f7Stbbdev TEST_CASE("Pool detection") {
91951c0b2f7Stbbdev TestPoolDetection();
92051c0b2f7Stbbdev }
92151c0b2f7Stbbdev
92251c0b2f7Stbbdev //! \brief \ref error_guessing
92351c0b2f7Stbbdev TEST_CASE("Lazy bootstrap") {
92451c0b2f7Stbbdev TestLazyBootstrap();
92551c0b2f7Stbbdev }
92651c0b2f7Stbbdev
92751c0b2f7Stbbdev //! \brief \ref error_guessing
92851c0b2f7Stbbdev TEST_CASE("No leak on destroy") {
92951c0b2f7Stbbdev TestNoLeakOnDestroy();
93051c0b2f7Stbbdev }
93151c0b2f7Stbbdev
93251c0b2f7Stbbdev //! \brief \ref error_guessing
93351c0b2f7Stbbdev TEST_CASE("Destroy failed") {
93451c0b2f7Stbbdev TestDestroyFailed();
93551c0b2f7Stbbdev }
93651c0b2f7Stbbdev
93751c0b2f7Stbbdev //! \brief \ref interface
93851c0b2f7Stbbdev TEST_CASE("Pool msize") {
93951c0b2f7Stbbdev TestPoolMSize();
94051c0b2f7Stbbdev }
941