1*51c0b2f7Stbbdev /*
2*51c0b2f7Stbbdev     Copyright (c) 2005-2020 Intel Corporation
3*51c0b2f7Stbbdev 
4*51c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
5*51c0b2f7Stbbdev     you may not use this file except in compliance with the License.
6*51c0b2f7Stbbdev     You may obtain a copy of the License at
7*51c0b2f7Stbbdev 
8*51c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
9*51c0b2f7Stbbdev 
10*51c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
11*51c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
12*51c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13*51c0b2f7Stbbdev     See the License for the specific language governing permissions and
14*51c0b2f7Stbbdev     limitations under the License.
15*51c0b2f7Stbbdev */
16*51c0b2f7Stbbdev 
17*51c0b2f7Stbbdev //! \file test_malloc_pools.cpp
18*51c0b2f7Stbbdev //! \brief Test for [memory_allocation] functionality
19*51c0b2f7Stbbdev 
20*51c0b2f7Stbbdev #define __TBB_NO_IMPLICIT_LINKAGE 1
21*51c0b2f7Stbbdev 
22*51c0b2f7Stbbdev #include "common/test.h"
23*51c0b2f7Stbbdev 
24*51c0b2f7Stbbdev #define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1
25*51c0b2f7Stbbdev 
26*51c0b2f7Stbbdev #include "common/utils.h"
27*51c0b2f7Stbbdev #include "common/utils_assert.h"
28*51c0b2f7Stbbdev #include "common/spin_barrier.h"
29*51c0b2f7Stbbdev #include "common/tls_limit.h"
30*51c0b2f7Stbbdev 
31*51c0b2f7Stbbdev #include "tbb/scalable_allocator.h"
32*51c0b2f7Stbbdev 
33*51c0b2f7Stbbdev #include <atomic>
34*51c0b2f7Stbbdev 
35*51c0b2f7Stbbdev template<typename T>
36*51c0b2f7Stbbdev static inline T alignUp  (T arg, uintptr_t alignment) {
37*51c0b2f7Stbbdev     return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1));
38*51c0b2f7Stbbdev }
39*51c0b2f7Stbbdev 
40*51c0b2f7Stbbdev struct PoolSpace: utils::NoCopy {
41*51c0b2f7Stbbdev     size_t pos;
42*51c0b2f7Stbbdev     int    regions;
43*51c0b2f7Stbbdev     size_t bufSize;
44*51c0b2f7Stbbdev     char  *space;
45*51c0b2f7Stbbdev 
46*51c0b2f7Stbbdev     static const size_t BUF_SIZE = 8*1024*1024;
47*51c0b2f7Stbbdev 
48*51c0b2f7Stbbdev     PoolSpace(size_t bufSz = BUF_SIZE) :
49*51c0b2f7Stbbdev         pos(0), regions(0),
50*51c0b2f7Stbbdev         bufSize(bufSz), space(new char[bufSize]) {
51*51c0b2f7Stbbdev         memset(space, 0, bufSize);
52*51c0b2f7Stbbdev     }
53*51c0b2f7Stbbdev     ~PoolSpace() {
54*51c0b2f7Stbbdev         delete []space;
55*51c0b2f7Stbbdev     }
56*51c0b2f7Stbbdev };
57*51c0b2f7Stbbdev 
58*51c0b2f7Stbbdev static PoolSpace *poolSpace;
59*51c0b2f7Stbbdev 
60*51c0b2f7Stbbdev struct MallocPoolHeader {
61*51c0b2f7Stbbdev     void  *rawPtr;
62*51c0b2f7Stbbdev     size_t userSize;
63*51c0b2f7Stbbdev };
64*51c0b2f7Stbbdev 
65*51c0b2f7Stbbdev static std::atomic<int> liveRegions;
66*51c0b2f7Stbbdev 
67*51c0b2f7Stbbdev static void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes)
68*51c0b2f7Stbbdev {
69*51c0b2f7Stbbdev     void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)+1);
70*51c0b2f7Stbbdev     if (!rawPtr)
71*51c0b2f7Stbbdev         return NULL;
72*51c0b2f7Stbbdev     // +1 to check working with unaligned space
73*51c0b2f7Stbbdev     void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)+1);
74*51c0b2f7Stbbdev 
75*51c0b2f7Stbbdev     MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1;
76*51c0b2f7Stbbdev     hdr->rawPtr = rawPtr;
77*51c0b2f7Stbbdev     hdr->userSize = bytes;
78*51c0b2f7Stbbdev 
79*51c0b2f7Stbbdev     liveRegions++;
80*51c0b2f7Stbbdev 
81*51c0b2f7Stbbdev     return ret;
82*51c0b2f7Stbbdev }
83*51c0b2f7Stbbdev 
84*51c0b2f7Stbbdev static int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
85*51c0b2f7Stbbdev {
86*51c0b2f7Stbbdev     MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
87*51c0b2f7Stbbdev     ASSERT(bytes == hdr->userSize, "Invalid size in pool callback.");
88*51c0b2f7Stbbdev     free(hdr->rawPtr);
89*51c0b2f7Stbbdev 
90*51c0b2f7Stbbdev     liveRegions--;
91*51c0b2f7Stbbdev 
92*51c0b2f7Stbbdev     return 0;
93*51c0b2f7Stbbdev }
94*51c0b2f7Stbbdev 
95*51c0b2f7Stbbdev void TestPoolReset()
96*51c0b2f7Stbbdev {
97*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
98*51c0b2f7Stbbdev     rml::MemoryPool *pool;
99*51c0b2f7Stbbdev 
100*51c0b2f7Stbbdev     pool_create_v1(0, &pol, &pool);
101*51c0b2f7Stbbdev     for (int i=0; i<100; i++) {
102*51c0b2f7Stbbdev         REQUIRE(pool_malloc(pool, 8));
103*51c0b2f7Stbbdev         REQUIRE(pool_malloc(pool, 50*1024));
104*51c0b2f7Stbbdev     }
105*51c0b2f7Stbbdev     int regionsBeforeReset = liveRegions.load(std::memory_order_acquire);
106*51c0b2f7Stbbdev     bool ok = pool_reset(pool);
107*51c0b2f7Stbbdev     REQUIRE(ok);
108*51c0b2f7Stbbdev     for (int i=0; i<100; i++) {
109*51c0b2f7Stbbdev         REQUIRE(pool_malloc(pool, 8));
110*51c0b2f7Stbbdev         REQUIRE(pool_malloc(pool, 50*1024));
111*51c0b2f7Stbbdev     }
112*51c0b2f7Stbbdev     REQUIRE_MESSAGE(regionsBeforeReset == liveRegions.load(std::memory_order_relaxed),
113*51c0b2f7Stbbdev            "Expected no new regions allocation.");
114*51c0b2f7Stbbdev     ok = pool_destroy(pool);
115*51c0b2f7Stbbdev     REQUIRE(ok);
116*51c0b2f7Stbbdev     REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
117*51c0b2f7Stbbdev }
118*51c0b2f7Stbbdev 
119*51c0b2f7Stbbdev class SharedPoolRun: utils::NoAssign {
120*51c0b2f7Stbbdev     static long                 threadNum;
121*51c0b2f7Stbbdev     static utils::SpinBarrier startB,
122*51c0b2f7Stbbdev                                 mallocDone;
123*51c0b2f7Stbbdev     static rml::MemoryPool     *pool;
124*51c0b2f7Stbbdev     static void               **crossThread,
125*51c0b2f7Stbbdev                               **afterTerm;
126*51c0b2f7Stbbdev public:
127*51c0b2f7Stbbdev     static const int OBJ_CNT = 100;
128*51c0b2f7Stbbdev 
129*51c0b2f7Stbbdev     static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
130*51c0b2f7Stbbdev         threadNum = num;
131*51c0b2f7Stbbdev         pool = pl;
132*51c0b2f7Stbbdev         crossThread = crThread;
133*51c0b2f7Stbbdev         afterTerm = aTerm;
134*51c0b2f7Stbbdev         startB.initialize(threadNum);
135*51c0b2f7Stbbdev         mallocDone.initialize(threadNum);
136*51c0b2f7Stbbdev     }
137*51c0b2f7Stbbdev 
138*51c0b2f7Stbbdev     void operator()( int id ) const {
139*51c0b2f7Stbbdev         const int ITERS = 1000;
140*51c0b2f7Stbbdev         void *local[ITERS];
141*51c0b2f7Stbbdev 
142*51c0b2f7Stbbdev         startB.wait();
143*51c0b2f7Stbbdev         for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
144*51c0b2f7Stbbdev             afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
145*51c0b2f7Stbbdev             memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
146*51c0b2f7Stbbdev             crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
147*51c0b2f7Stbbdev             memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
148*51c0b2f7Stbbdev         }
149*51c0b2f7Stbbdev 
150*51c0b2f7Stbbdev         for (int i=1; i<ITERS; i+=2) {
151*51c0b2f7Stbbdev             local[i-1] = pool_malloc(pool, 6*1024);
152*51c0b2f7Stbbdev             memset(local[i-1], i, 6*1024);
153*51c0b2f7Stbbdev             local[i] = pool_malloc(pool, 16*1024);
154*51c0b2f7Stbbdev             memset(local[i], i, 16*1024);
155*51c0b2f7Stbbdev         }
156*51c0b2f7Stbbdev         mallocDone.wait();
157*51c0b2f7Stbbdev         int myVictim = threadNum-id-1;
158*51c0b2f7Stbbdev         for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
159*51c0b2f7Stbbdev             pool_free(pool, crossThread[i]);
160*51c0b2f7Stbbdev         for (int i=0; i<ITERS; i++)
161*51c0b2f7Stbbdev             pool_free(pool, local[i]);
162*51c0b2f7Stbbdev     }
163*51c0b2f7Stbbdev };
164*51c0b2f7Stbbdev 
165*51c0b2f7Stbbdev long                 SharedPoolRun::threadNum;
166*51c0b2f7Stbbdev utils::SpinBarrier SharedPoolRun::startB,
167*51c0b2f7Stbbdev                      SharedPoolRun::mallocDone;
168*51c0b2f7Stbbdev rml::MemoryPool     *SharedPoolRun::pool;
169*51c0b2f7Stbbdev void               **SharedPoolRun::crossThread,
170*51c0b2f7Stbbdev                    **SharedPoolRun::afterTerm;
171*51c0b2f7Stbbdev 
172*51c0b2f7Stbbdev // single pool shared by different threads
173*51c0b2f7Stbbdev void TestSharedPool()
174*51c0b2f7Stbbdev {
175*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
176*51c0b2f7Stbbdev     rml::MemoryPool *pool;
177*51c0b2f7Stbbdev 
178*51c0b2f7Stbbdev     pool_create_v1(0, &pol, &pool);
179*51c0b2f7Stbbdev     void **crossThread = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
180*51c0b2f7Stbbdev     void **afterTerm = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
181*51c0b2f7Stbbdev 
182*51c0b2f7Stbbdev     for (size_t p=utils::MinThread; p<=utils::MaxThread; p++) {
183*51c0b2f7Stbbdev         SharedPoolRun::init(p, pool, crossThread, afterTerm);
184*51c0b2f7Stbbdev         SharedPoolRun thr;
185*51c0b2f7Stbbdev 
186*51c0b2f7Stbbdev         void *hugeObj = pool_malloc(pool, 10*1024*1024);
187*51c0b2f7Stbbdev         REQUIRE(hugeObj);
188*51c0b2f7Stbbdev 
189*51c0b2f7Stbbdev         utils::NativeParallelFor( p, thr );
190*51c0b2f7Stbbdev 
191*51c0b2f7Stbbdev         pool_free(pool, hugeObj);
192*51c0b2f7Stbbdev         for (size_t i=0; i<p*SharedPoolRun::OBJ_CNT; i++)
193*51c0b2f7Stbbdev             pool_free(pool, afterTerm[i]);
194*51c0b2f7Stbbdev     }
195*51c0b2f7Stbbdev     delete []afterTerm;
196*51c0b2f7Stbbdev     delete []crossThread;
197*51c0b2f7Stbbdev 
198*51c0b2f7Stbbdev     bool ok = pool_destroy(pool);
199*51c0b2f7Stbbdev     REQUIRE(ok);
200*51c0b2f7Stbbdev     REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
201*51c0b2f7Stbbdev }
202*51c0b2f7Stbbdev 
203*51c0b2f7Stbbdev void *CrossThreadGetMem(intptr_t pool_id, size_t &bytes)
204*51c0b2f7Stbbdev {
205*51c0b2f7Stbbdev     if (poolSpace[pool_id].pos + bytes > poolSpace[pool_id].bufSize)
206*51c0b2f7Stbbdev         return NULL;
207*51c0b2f7Stbbdev 
208*51c0b2f7Stbbdev     void *ret = poolSpace[pool_id].space + poolSpace[pool_id].pos;
209*51c0b2f7Stbbdev     poolSpace[pool_id].pos += bytes;
210*51c0b2f7Stbbdev     poolSpace[pool_id].regions++;
211*51c0b2f7Stbbdev 
212*51c0b2f7Stbbdev     return ret;
213*51c0b2f7Stbbdev }
214*51c0b2f7Stbbdev 
215*51c0b2f7Stbbdev int CrossThreadPutMem(intptr_t pool_id, void* /*raw_ptr*/, size_t /*raw_bytes*/)
216*51c0b2f7Stbbdev {
217*51c0b2f7Stbbdev     poolSpace[pool_id].regions--;
218*51c0b2f7Stbbdev     return 0;
219*51c0b2f7Stbbdev }
220*51c0b2f7Stbbdev 
221*51c0b2f7Stbbdev class CrossThreadRun: utils::NoAssign {
222*51c0b2f7Stbbdev     static long number_of_threads;
223*51c0b2f7Stbbdev     static utils::SpinBarrier barrier;
224*51c0b2f7Stbbdev     static rml::MemoryPool **pool;
225*51c0b2f7Stbbdev     static char **obj;
226*51c0b2f7Stbbdev public:
227*51c0b2f7Stbbdev     static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
228*51c0b2f7Stbbdev     static void init(long num) {
229*51c0b2f7Stbbdev         number_of_threads = num;
230*51c0b2f7Stbbdev         pool = new rml::MemoryPool*[number_of_threads];
231*51c0b2f7Stbbdev         poolSpace = new PoolSpace[number_of_threads];
232*51c0b2f7Stbbdev         obj = new char*[number_of_threads];
233*51c0b2f7Stbbdev     }
234*51c0b2f7Stbbdev     static void destroy() {
235*51c0b2f7Stbbdev         for (long i=0; i<number_of_threads; i++)
236*51c0b2f7Stbbdev             REQUIRE_MESSAGE(!poolSpace[i].regions, "Memory leak detected");
237*51c0b2f7Stbbdev         delete []pool;
238*51c0b2f7Stbbdev         delete []poolSpace;
239*51c0b2f7Stbbdev         delete []obj;
240*51c0b2f7Stbbdev     }
241*51c0b2f7Stbbdev     CrossThreadRun() {}
242*51c0b2f7Stbbdev     void operator()( int id ) const {
243*51c0b2f7Stbbdev         rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
244*51c0b2f7Stbbdev         const int objLen = 10*id;
245*51c0b2f7Stbbdev 
246*51c0b2f7Stbbdev         pool_create_v1(id, &pol, &pool[id]);
247*51c0b2f7Stbbdev         obj[id] = (char*)pool_malloc(pool[id], objLen);
248*51c0b2f7Stbbdev         REQUIRE(obj[id]);
249*51c0b2f7Stbbdev         memset(obj[id], id, objLen);
250*51c0b2f7Stbbdev 
251*51c0b2f7Stbbdev         {
252*51c0b2f7Stbbdev             const size_t lrgSz = 2*16*1024;
253*51c0b2f7Stbbdev             void *ptrLarge = pool_malloc(pool[id], lrgSz);
254*51c0b2f7Stbbdev             REQUIRE(ptrLarge);
255*51c0b2f7Stbbdev             memset(ptrLarge, 1, lrgSz);
256*51c0b2f7Stbbdev             // consume all small objects
257*51c0b2f7Stbbdev             while (pool_malloc(pool[id], 5 * 1024));
258*51c0b2f7Stbbdev             // releasing of large object will not give a chance to allocate more
259*51c0b2f7Stbbdev             // since only fixed pool can look at other bins aligned/notAligned
260*51c0b2f7Stbbdev             pool_free(pool[id], ptrLarge);
261*51c0b2f7Stbbdev             CHECK(!pool_malloc(pool[id], 5*1024));
262*51c0b2f7Stbbdev         }
263*51c0b2f7Stbbdev 
264*51c0b2f7Stbbdev         barrier.wait();
265*51c0b2f7Stbbdev         int myPool = number_of_threads-id-1;
266*51c0b2f7Stbbdev         for (int i=0; i<10*myPool; i++)
267*51c0b2f7Stbbdev             REQUIRE(myPool==obj[myPool][i]);
268*51c0b2f7Stbbdev         pool_free(pool[myPool], obj[myPool]);
269*51c0b2f7Stbbdev         bool ok = pool_destroy(pool[myPool]);
270*51c0b2f7Stbbdev         REQUIRE(ok);
271*51c0b2f7Stbbdev     }
272*51c0b2f7Stbbdev };
273*51c0b2f7Stbbdev 
274*51c0b2f7Stbbdev long CrossThreadRun::number_of_threads;
275*51c0b2f7Stbbdev utils::SpinBarrier CrossThreadRun::barrier;
276*51c0b2f7Stbbdev rml::MemoryPool **CrossThreadRun::pool;
277*51c0b2f7Stbbdev char **CrossThreadRun::obj;
278*51c0b2f7Stbbdev 
279*51c0b2f7Stbbdev // pools created, used and destroyed by different threads
280*51c0b2f7Stbbdev void TestCrossThreadPools()
281*51c0b2f7Stbbdev {
282*51c0b2f7Stbbdev     for (size_t p=utils::MinThread; p<=utils::MaxThread; p++) {
283*51c0b2f7Stbbdev         CrossThreadRun::initBarrier(p);
284*51c0b2f7Stbbdev         CrossThreadRun::init(p);
285*51c0b2f7Stbbdev         utils::NativeParallelFor( p, CrossThreadRun() );
286*51c0b2f7Stbbdev         for (size_t i=0; i<p; i++)
287*51c0b2f7Stbbdev             REQUIRE_MESSAGE(!poolSpace[i].regions, "Region leak detected");
288*51c0b2f7Stbbdev         CrossThreadRun::destroy();
289*51c0b2f7Stbbdev     }
290*51c0b2f7Stbbdev }
291*51c0b2f7Stbbdev 
292*51c0b2f7Stbbdev // buffer is too small to pool be created, but must not leak resources
293*51c0b2f7Stbbdev void TestTooSmallBuffer()
294*51c0b2f7Stbbdev {
295*51c0b2f7Stbbdev     poolSpace = new PoolSpace(8*1024);
296*51c0b2f7Stbbdev 
297*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
298*51c0b2f7Stbbdev     rml::MemoryPool *pool;
299*51c0b2f7Stbbdev     pool_create_v1(0, &pol, &pool);
300*51c0b2f7Stbbdev     bool ok = pool_destroy(pool);
301*51c0b2f7Stbbdev     REQUIRE(ok);
302*51c0b2f7Stbbdev     REQUIRE_MESSAGE(!poolSpace[0].regions, "No leaks.");
303*51c0b2f7Stbbdev 
304*51c0b2f7Stbbdev     delete poolSpace;
305*51c0b2f7Stbbdev }
306*51c0b2f7Stbbdev 
307*51c0b2f7Stbbdev class FixedPoolHeadBase : utils::NoAssign {
308*51c0b2f7Stbbdev     size_t size;
309*51c0b2f7Stbbdev     std::atomic<bool> used;
310*51c0b2f7Stbbdev     char* data;
311*51c0b2f7Stbbdev public:
312*51c0b2f7Stbbdev     FixedPoolHeadBase(size_t s) : size(s), used(false) {
313*51c0b2f7Stbbdev         data = new char[size];
314*51c0b2f7Stbbdev     }
315*51c0b2f7Stbbdev     void *useData(size_t &bytes) {
316*51c0b2f7Stbbdev         bool wasUsed = used.exchange(true);
317*51c0b2f7Stbbdev         REQUIRE_MESSAGE(!wasUsed, "The buffer must not be used twice.");
318*51c0b2f7Stbbdev         bytes = size;
319*51c0b2f7Stbbdev         return data;
320*51c0b2f7Stbbdev     }
321*51c0b2f7Stbbdev     ~FixedPoolHeadBase() {
322*51c0b2f7Stbbdev         delete []data;
323*51c0b2f7Stbbdev     }
324*51c0b2f7Stbbdev };
325*51c0b2f7Stbbdev 
326*51c0b2f7Stbbdev template<size_t SIZE>
327*51c0b2f7Stbbdev class FixedPoolHead : FixedPoolHeadBase {
328*51c0b2f7Stbbdev public:
329*51c0b2f7Stbbdev     FixedPoolHead() : FixedPoolHeadBase(SIZE) { }
330*51c0b2f7Stbbdev };
331*51c0b2f7Stbbdev 
332*51c0b2f7Stbbdev static void *fixedBufGetMem(intptr_t pool_id, size_t &bytes)
333*51c0b2f7Stbbdev {
334*51c0b2f7Stbbdev     return ((FixedPoolHeadBase*)pool_id)->useData(bytes);
335*51c0b2f7Stbbdev }
336*51c0b2f7Stbbdev 
337*51c0b2f7Stbbdev class FixedPoolUse: utils::NoAssign {
338*51c0b2f7Stbbdev     static utils::SpinBarrier startB;
339*51c0b2f7Stbbdev     rml::MemoryPool *pool;
340*51c0b2f7Stbbdev     size_t reqSize;
341*51c0b2f7Stbbdev     int iters;
342*51c0b2f7Stbbdev public:
343*51c0b2f7Stbbdev     FixedPoolUse(unsigned threads, rml::MemoryPool *p, size_t sz, int it) :
344*51c0b2f7Stbbdev         pool(p), reqSize(sz), iters(it) {
345*51c0b2f7Stbbdev         startB.initialize(threads);
346*51c0b2f7Stbbdev     }
347*51c0b2f7Stbbdev     void operator()( int /*id*/ ) const {
348*51c0b2f7Stbbdev         startB.wait();
349*51c0b2f7Stbbdev         for (int i=0; i<iters; i++) {
350*51c0b2f7Stbbdev             void *o = pool_malloc(pool, reqSize);
351*51c0b2f7Stbbdev             ASSERT(o, "Invalid object");
352*51c0b2f7Stbbdev             pool_free(pool, o);
353*51c0b2f7Stbbdev         }
354*51c0b2f7Stbbdev     }
355*51c0b2f7Stbbdev };
356*51c0b2f7Stbbdev 
357*51c0b2f7Stbbdev utils::SpinBarrier FixedPoolUse::startB;
358*51c0b2f7Stbbdev 
359*51c0b2f7Stbbdev class FixedPoolNomem: utils::NoAssign {
360*51c0b2f7Stbbdev     utils::SpinBarrier *startB;
361*51c0b2f7Stbbdev     rml::MemoryPool *pool;
362*51c0b2f7Stbbdev public:
363*51c0b2f7Stbbdev     FixedPoolNomem(utils::SpinBarrier *b, rml::MemoryPool *p) :
364*51c0b2f7Stbbdev         startB(b), pool(p) {}
365*51c0b2f7Stbbdev     void operator()(int id) const {
366*51c0b2f7Stbbdev         startB->wait();
367*51c0b2f7Stbbdev         void *o = pool_malloc(pool, id%2? 64 : 128*1024);
368*51c0b2f7Stbbdev         ASSERT(!o, "All memory must be consumed.");
369*51c0b2f7Stbbdev     }
370*51c0b2f7Stbbdev };
371*51c0b2f7Stbbdev 
372*51c0b2f7Stbbdev class FixedPoolSomeMem: utils::NoAssign {
373*51c0b2f7Stbbdev     utils::SpinBarrier *barrier;
374*51c0b2f7Stbbdev     rml::MemoryPool *pool;
375*51c0b2f7Stbbdev public:
376*51c0b2f7Stbbdev     FixedPoolSomeMem(utils::SpinBarrier *b, rml::MemoryPool *p) :
377*51c0b2f7Stbbdev         barrier(b), pool(p) {}
378*51c0b2f7Stbbdev     void operator()(int id) const {
379*51c0b2f7Stbbdev         barrier->wait();
380*51c0b2f7Stbbdev         utils::Sleep(2*id);
381*51c0b2f7Stbbdev         void *o = pool_malloc(pool, id%2? 64 : 128*1024);
382*51c0b2f7Stbbdev         barrier->wait();
383*51c0b2f7Stbbdev         pool_free(pool, o);
384*51c0b2f7Stbbdev     }
385*51c0b2f7Stbbdev };
386*51c0b2f7Stbbdev 
387*51c0b2f7Stbbdev bool haveEnoughSpace(rml::MemoryPool *pool, size_t sz)
388*51c0b2f7Stbbdev {
389*51c0b2f7Stbbdev     if (void *p = pool_malloc(pool, sz)) {
390*51c0b2f7Stbbdev         pool_free(pool, p);
391*51c0b2f7Stbbdev         return true;
392*51c0b2f7Stbbdev     }
393*51c0b2f7Stbbdev     return false;
394*51c0b2f7Stbbdev }
395*51c0b2f7Stbbdev 
396*51c0b2f7Stbbdev void TestFixedBufferPool()
397*51c0b2f7Stbbdev {
398*51c0b2f7Stbbdev     const int ITERS = 7;
399*51c0b2f7Stbbdev     const size_t MAX_OBJECT = 7*1024*1024;
400*51c0b2f7Stbbdev     void *ptrs[ITERS];
401*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true,
402*51c0b2f7Stbbdev                            /*keepMemTillDestroy=*/false);
403*51c0b2f7Stbbdev     rml::MemoryPool *pool;
404*51c0b2f7Stbbdev     {
405*51c0b2f7Stbbdev         FixedPoolHead<MAX_OBJECT + 1024*1024> head;
406*51c0b2f7Stbbdev 
407*51c0b2f7Stbbdev         pool_create_v1((intptr_t)&head, &pol, &pool);
408*51c0b2f7Stbbdev         {
409*51c0b2f7Stbbdev             utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 2) );
410*51c0b2f7Stbbdev 
411*51c0b2f7Stbbdev             for (int i=0; i<ITERS; i++) {
412*51c0b2f7Stbbdev                 ptrs[i] = pool_malloc(pool, MAX_OBJECT/ITERS);
413*51c0b2f7Stbbdev                 REQUIRE(ptrs[i]);
414*51c0b2f7Stbbdev             }
415*51c0b2f7Stbbdev             for (int i=0; i<ITERS; i++)
416*51c0b2f7Stbbdev                 pool_free(pool, ptrs[i]);
417*51c0b2f7Stbbdev 
418*51c0b2f7Stbbdev             utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 1) );
419*51c0b2f7Stbbdev         }
420*51c0b2f7Stbbdev         // each thread asks for an MAX_OBJECT/p/2 object,
421*51c0b2f7Stbbdev         // /2 is to cover fragmentation
422*51c0b2f7Stbbdev         for (size_t p=utils::MinThread; p<=utils::MaxThread; p++) {
423*51c0b2f7Stbbdev             utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 10000) );
424*51c0b2f7Stbbdev         }
425*51c0b2f7Stbbdev         {
426*51c0b2f7Stbbdev             const int p = 128;
427*51c0b2f7Stbbdev             utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
428*51c0b2f7Stbbdev         }
429*51c0b2f7Stbbdev         {
430*51c0b2f7Stbbdev             size_t maxSz;
431*51c0b2f7Stbbdev             const int p = 256;
432*51c0b2f7Stbbdev             utils::SpinBarrier barrier(p);
433*51c0b2f7Stbbdev 
434*51c0b2f7Stbbdev             // Find maximal useful object size. Start with MAX_OBJECT/2,
435*51c0b2f7Stbbdev             // as the pool might be fragmented by BootStrapBlocks consumed during
436*51c0b2f7Stbbdev             // FixedPoolRun.
437*51c0b2f7Stbbdev             size_t l, r;
438*51c0b2f7Stbbdev             REQUIRE(haveEnoughSpace(pool, MAX_OBJECT/2));
439*51c0b2f7Stbbdev             for (l = MAX_OBJECT/2, r = MAX_OBJECT + 1024*1024; l < r-1; ) {
440*51c0b2f7Stbbdev                 size_t mid = (l+r)/2;
441*51c0b2f7Stbbdev                 if (haveEnoughSpace(pool, mid))
442*51c0b2f7Stbbdev                     l = mid;
443*51c0b2f7Stbbdev                 else
444*51c0b2f7Stbbdev                     r = mid;
445*51c0b2f7Stbbdev             }
446*51c0b2f7Stbbdev             maxSz = l;
447*51c0b2f7Stbbdev             REQUIRE_MESSAGE(!haveEnoughSpace(pool, maxSz+1), "Expect to find boundary value.");
448*51c0b2f7Stbbdev             // consume all available memory
449*51c0b2f7Stbbdev             void *largeObj = pool_malloc(pool, maxSz);
450*51c0b2f7Stbbdev             REQUIRE(largeObj);
451*51c0b2f7Stbbdev             void *o = pool_malloc(pool, 64);
452*51c0b2f7Stbbdev             if (o) // pool fragmented, skip FixedPoolNomem
453*51c0b2f7Stbbdev                 pool_free(pool, o);
454*51c0b2f7Stbbdev             else
455*51c0b2f7Stbbdev                 utils::NativeParallelFor( p, FixedPoolNomem(&barrier, pool) );
456*51c0b2f7Stbbdev             pool_free(pool, largeObj);
457*51c0b2f7Stbbdev             // keep some space unoccupied
458*51c0b2f7Stbbdev             largeObj = pool_malloc(pool, maxSz-512*1024);
459*51c0b2f7Stbbdev             REQUIRE(largeObj);
460*51c0b2f7Stbbdev             utils::NativeParallelFor( p, FixedPoolSomeMem(&barrier, pool) );
461*51c0b2f7Stbbdev             pool_free(pool, largeObj);
462*51c0b2f7Stbbdev         }
463*51c0b2f7Stbbdev         bool ok = pool_destroy(pool);
464*51c0b2f7Stbbdev         REQUIRE(ok);
465*51c0b2f7Stbbdev     }
466*51c0b2f7Stbbdev     // check that fresh untouched pool can successfully fulfil requests from 128 threads
467*51c0b2f7Stbbdev     {
468*51c0b2f7Stbbdev         FixedPoolHead<MAX_OBJECT + 1024*1024> head;
469*51c0b2f7Stbbdev         pool_create_v1((intptr_t)&head, &pol, &pool);
470*51c0b2f7Stbbdev         int p=128;
471*51c0b2f7Stbbdev         utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
472*51c0b2f7Stbbdev         bool ok = pool_destroy(pool);
473*51c0b2f7Stbbdev         REQUIRE(ok);
474*51c0b2f7Stbbdev     }
475*51c0b2f7Stbbdev }
476*51c0b2f7Stbbdev 
477*51c0b2f7Stbbdev static size_t currGranularity;
478*51c0b2f7Stbbdev 
479*51c0b2f7Stbbdev static void *getGranMem(intptr_t /*pool_id*/, size_t &bytes)
480*51c0b2f7Stbbdev {
481*51c0b2f7Stbbdev     REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
482*51c0b2f7Stbbdev     return malloc(bytes);
483*51c0b2f7Stbbdev }
484*51c0b2f7Stbbdev 
485*51c0b2f7Stbbdev static int putGranMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
486*51c0b2f7Stbbdev {
487*51c0b2f7Stbbdev     REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
488*51c0b2f7Stbbdev     free(ptr);
489*51c0b2f7Stbbdev     return 0;
490*51c0b2f7Stbbdev }
491*51c0b2f7Stbbdev 
492*51c0b2f7Stbbdev void TestPoolGranularity()
493*51c0b2f7Stbbdev {
494*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getGranMem, putGranMem);
495*51c0b2f7Stbbdev     const size_t grans[] = {4*1024, 2*1024*1024, 6*1024*1024, 10*1024*1024};
496*51c0b2f7Stbbdev 
497*51c0b2f7Stbbdev     for (unsigned i=0; i<sizeof(grans)/sizeof(grans[0]); i++) {
498*51c0b2f7Stbbdev         pol.granularity = currGranularity = grans[i];
499*51c0b2f7Stbbdev         rml::MemoryPool *pool;
500*51c0b2f7Stbbdev 
501*51c0b2f7Stbbdev         pool_create_v1(0, &pol, &pool);
502*51c0b2f7Stbbdev         for (int sz=500*1024; sz<16*1024*1024; sz+=101*1024) {
503*51c0b2f7Stbbdev             void *p = pool_malloc(pool, sz);
504*51c0b2f7Stbbdev             REQUIRE_MESSAGE(p, "Can't allocate memory in pool.");
505*51c0b2f7Stbbdev             pool_free(pool, p);
506*51c0b2f7Stbbdev         }
507*51c0b2f7Stbbdev         bool ok = pool_destroy(pool);
508*51c0b2f7Stbbdev         REQUIRE(ok);
509*51c0b2f7Stbbdev     }
510*51c0b2f7Stbbdev }
511*51c0b2f7Stbbdev 
512*51c0b2f7Stbbdev static size_t putMemAll, getMemAll, getMemSuccessful;
513*51c0b2f7Stbbdev 
514*51c0b2f7Stbbdev static void *getMemMalloc(intptr_t /*pool_id*/, size_t &bytes)
515*51c0b2f7Stbbdev {
516*51c0b2f7Stbbdev     getMemAll++;
517*51c0b2f7Stbbdev     void *p = malloc(bytes);
518*51c0b2f7Stbbdev     if (p)
519*51c0b2f7Stbbdev         getMemSuccessful++;
520*51c0b2f7Stbbdev     return p;
521*51c0b2f7Stbbdev }
522*51c0b2f7Stbbdev 
523*51c0b2f7Stbbdev static int putMemFree(intptr_t /*pool_id*/, void *ptr, size_t /*bytes*/)
524*51c0b2f7Stbbdev {
525*51c0b2f7Stbbdev     putMemAll++;
526*51c0b2f7Stbbdev     free(ptr);
527*51c0b2f7Stbbdev     return 0;
528*51c0b2f7Stbbdev }
529*51c0b2f7Stbbdev 
530*51c0b2f7Stbbdev void TestPoolKeepTillDestroy()
531*51c0b2f7Stbbdev {
532*51c0b2f7Stbbdev     const int ITERS = 50*1024;
533*51c0b2f7Stbbdev     void *ptrs[2*ITERS+1];
534*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
535*51c0b2f7Stbbdev     rml::MemoryPool *pool;
536*51c0b2f7Stbbdev 
537*51c0b2f7Stbbdev     // 1st create default pool that returns memory back to callback,
538*51c0b2f7Stbbdev     // then use keepMemTillDestroy policy
539*51c0b2f7Stbbdev     for (int keep=0; keep<2; keep++) {
540*51c0b2f7Stbbdev         getMemAll = putMemAll = 0;
541*51c0b2f7Stbbdev         if (keep)
542*51c0b2f7Stbbdev             pol.keepAllMemory = 1;
543*51c0b2f7Stbbdev         pool_create_v1(0, &pol, &pool);
544*51c0b2f7Stbbdev         for (int i=0; i<2*ITERS; i+=2) {
545*51c0b2f7Stbbdev             ptrs[i] = pool_malloc(pool, 7*1024);
546*51c0b2f7Stbbdev             ptrs[i+1] = pool_malloc(pool, 10*1024);
547*51c0b2f7Stbbdev         }
548*51c0b2f7Stbbdev         ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024);
549*51c0b2f7Stbbdev         REQUIRE(!putMemAll);
550*51c0b2f7Stbbdev         for (int i=0; i<2*ITERS; i++)
551*51c0b2f7Stbbdev             pool_free(pool, ptrs[i]);
552*51c0b2f7Stbbdev         pool_free(pool, ptrs[2*ITERS]);
553*51c0b2f7Stbbdev         size_t totalPutMemCalls = putMemAll;
554*51c0b2f7Stbbdev         if (keep)
555*51c0b2f7Stbbdev             REQUIRE(!putMemAll);
556*51c0b2f7Stbbdev         else {
557*51c0b2f7Stbbdev             REQUIRE(putMemAll);
558*51c0b2f7Stbbdev             putMemAll = 0;
559*51c0b2f7Stbbdev         }
560*51c0b2f7Stbbdev         size_t getCallsBefore = getMemAll;
561*51c0b2f7Stbbdev         void *p = pool_malloc(pool, 8*1024*1024);
562*51c0b2f7Stbbdev         REQUIRE(p);
563*51c0b2f7Stbbdev         if (keep)
564*51c0b2f7Stbbdev             REQUIRE_MESSAGE(getCallsBefore == getMemAll, "Must not lead to new getMem call");
565*51c0b2f7Stbbdev         size_t putCallsBefore = putMemAll;
566*51c0b2f7Stbbdev         bool ok = pool_reset(pool);
567*51c0b2f7Stbbdev         REQUIRE(ok);
568*51c0b2f7Stbbdev         REQUIRE_MESSAGE(putCallsBefore == putMemAll, "Pool is not releasing memory during reset.");
569*51c0b2f7Stbbdev         ok = pool_destroy(pool);
570*51c0b2f7Stbbdev         REQUIRE(ok);
571*51c0b2f7Stbbdev         REQUIRE(putMemAll);
572*51c0b2f7Stbbdev         totalPutMemCalls += putMemAll;
573*51c0b2f7Stbbdev         REQUIRE_MESSAGE(getMemAll == totalPutMemCalls, "Memory leak detected.");
574*51c0b2f7Stbbdev     }
575*51c0b2f7Stbbdev 
576*51c0b2f7Stbbdev }
577*51c0b2f7Stbbdev 
578*51c0b2f7Stbbdev static bool memEqual(char *buf, size_t size, int val)
579*51c0b2f7Stbbdev {
580*51c0b2f7Stbbdev     bool memEq = true;
581*51c0b2f7Stbbdev     for (size_t k=0; k<size; k++)
582*51c0b2f7Stbbdev         if (buf[k] != val)
583*51c0b2f7Stbbdev              memEq = false;
584*51c0b2f7Stbbdev     return memEq;
585*51c0b2f7Stbbdev }
586*51c0b2f7Stbbdev 
587*51c0b2f7Stbbdev void TestEntries()
588*51c0b2f7Stbbdev {
589*51c0b2f7Stbbdev     const int SZ = 4;
590*51c0b2f7Stbbdev     const int ALGN = 4;
591*51c0b2f7Stbbdev     size_t size[SZ] = {8, 8000, 9000, 100*1024};
592*51c0b2f7Stbbdev     size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024};
593*51c0b2f7Stbbdev 
594*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getGranMem, putGranMem);
595*51c0b2f7Stbbdev     currGranularity = 1; // not check granularity in the test
596*51c0b2f7Stbbdev     rml::MemoryPool *pool;
597*51c0b2f7Stbbdev 
598*51c0b2f7Stbbdev     pool_create_v1(0, &pol, &pool);
599*51c0b2f7Stbbdev     for (int i=0; i<SZ; i++)
600*51c0b2f7Stbbdev         for (int j=0; j<ALGN; j++) {
601*51c0b2f7Stbbdev             char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]);
602*51c0b2f7Stbbdev             REQUIRE((p && 0==((uintptr_t)p & (algn[j]-1))));
603*51c0b2f7Stbbdev             memset(p, j, size[i]);
604*51c0b2f7Stbbdev 
605*51c0b2f7Stbbdev             size_t curr_algn = algn[rand() % ALGN];
606*51c0b2f7Stbbdev             size_t curr_sz = size[rand() % SZ];
607*51c0b2f7Stbbdev             char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn);
608*51c0b2f7Stbbdev             REQUIRE((p1 && 0==((uintptr_t)p1 & (curr_algn-1))));
609*51c0b2f7Stbbdev             REQUIRE(memEqual(p1, utils::min(size[i], curr_sz), j));
610*51c0b2f7Stbbdev 
611*51c0b2f7Stbbdev             memset(p1, j+1, curr_sz);
612*51c0b2f7Stbbdev             size_t curr_sz1 = size[rand() % SZ];
613*51c0b2f7Stbbdev             char *p2 = (char*)pool_realloc(pool, p1, curr_sz1);
614*51c0b2f7Stbbdev             REQUIRE(p2);
615*51c0b2f7Stbbdev             REQUIRE(memEqual(p2, utils::min(curr_sz1, curr_sz), j+1));
616*51c0b2f7Stbbdev 
617*51c0b2f7Stbbdev             pool_free(pool, p2);
618*51c0b2f7Stbbdev         }
619*51c0b2f7Stbbdev 
620*51c0b2f7Stbbdev     bool ok = pool_destroy(pool);
621*51c0b2f7Stbbdev     REQUIRE(ok);
622*51c0b2f7Stbbdev 
623*51c0b2f7Stbbdev     bool fail = rml::pool_destroy(NULL);
624*51c0b2f7Stbbdev     REQUIRE(!fail);
625*51c0b2f7Stbbdev     fail = rml::pool_reset(NULL);
626*51c0b2f7Stbbdev     REQUIRE(!fail);
627*51c0b2f7Stbbdev }
628*51c0b2f7Stbbdev 
629*51c0b2f7Stbbdev rml::MemoryPool *CreateUsablePool(size_t size)
630*51c0b2f7Stbbdev {
631*51c0b2f7Stbbdev     rml::MemoryPool *pool;
632*51c0b2f7Stbbdev     rml::MemPoolPolicy okPolicy(getMemMalloc, putMemFree);
633*51c0b2f7Stbbdev 
634*51c0b2f7Stbbdev     putMemAll = getMemAll = getMemSuccessful = 0;
635*51c0b2f7Stbbdev     rml::MemPoolError res = pool_create_v1(0, &okPolicy, &pool);
636*51c0b2f7Stbbdev     if (res != rml::POOL_OK) {
637*51c0b2f7Stbbdev         REQUIRE_MESSAGE((!getMemAll && !putMemAll), "No callbacks after fail.");
638*51c0b2f7Stbbdev         return NULL;
639*51c0b2f7Stbbdev     }
640*51c0b2f7Stbbdev     void *o = pool_malloc(pool, size);
641*51c0b2f7Stbbdev     if (!getMemSuccessful) {
642*51c0b2f7Stbbdev         // no memory from callback, valid reason to leave
643*51c0b2f7Stbbdev         REQUIRE_MESSAGE(!o, "The pool must be unusable.");
644*51c0b2f7Stbbdev         return NULL;
645*51c0b2f7Stbbdev     }
646*51c0b2f7Stbbdev     REQUIRE_MESSAGE(o, "Created pool must be useful.");
647*51c0b2f7Stbbdev     REQUIRE_MESSAGE((getMemSuccessful == 1 || getMemSuccessful == 5 || getMemAll > getMemSuccessful),
648*51c0b2f7Stbbdev            "Multiple requests are allowed when unsuccessful request occurred or cannot search in bootstrap memory. ");
649*51c0b2f7Stbbdev     REQUIRE(!putMemAll);
650*51c0b2f7Stbbdev     pool_free(pool, o);
651*51c0b2f7Stbbdev 
652*51c0b2f7Stbbdev     return pool;
653*51c0b2f7Stbbdev }
654*51c0b2f7Stbbdev 
655*51c0b2f7Stbbdev void CheckPoolLeaks(size_t poolsAlwaysAvailable)
656*51c0b2f7Stbbdev {
657*51c0b2f7Stbbdev     const size_t MAX_POOLS = 16*1000;
658*51c0b2f7Stbbdev     const int ITERS = 20, CREATED_STABLE = 3;
659*51c0b2f7Stbbdev     rml::MemoryPool *pools[MAX_POOLS];
660*51c0b2f7Stbbdev     size_t created, maxCreated = MAX_POOLS;
661*51c0b2f7Stbbdev     int maxNotChangedCnt = 0;
662*51c0b2f7Stbbdev 
663*51c0b2f7Stbbdev     // expecting that for ITERS runs, max number of pools that can be created
664*51c0b2f7Stbbdev     // can be stabilized and still stable CREATED_STABLE times
665*51c0b2f7Stbbdev     for (int j=0; j<ITERS && maxNotChangedCnt<CREATED_STABLE; j++) {
666*51c0b2f7Stbbdev         for (created=0; created<maxCreated; created++) {
667*51c0b2f7Stbbdev             rml::MemoryPool *p = CreateUsablePool(1024);
668*51c0b2f7Stbbdev             if (!p)
669*51c0b2f7Stbbdev                 break;
670*51c0b2f7Stbbdev             pools[created] = p;
671*51c0b2f7Stbbdev         }
672*51c0b2f7Stbbdev         REQUIRE_MESSAGE(created>=poolsAlwaysAvailable,
673*51c0b2f7Stbbdev                "Expect that the reasonable number of pools can be always created.");
674*51c0b2f7Stbbdev         for (size_t i=0; i<created; i++) {
675*51c0b2f7Stbbdev             bool ok = pool_destroy(pools[i]);
676*51c0b2f7Stbbdev             REQUIRE(ok);
677*51c0b2f7Stbbdev         }
678*51c0b2f7Stbbdev         if (created < maxCreated) {
679*51c0b2f7Stbbdev             maxCreated = created;
680*51c0b2f7Stbbdev             maxNotChangedCnt = 0;
681*51c0b2f7Stbbdev         } else
682*51c0b2f7Stbbdev             maxNotChangedCnt++;
683*51c0b2f7Stbbdev     }
684*51c0b2f7Stbbdev     REQUIRE_MESSAGE(maxNotChangedCnt == CREATED_STABLE, "The number of created pools must be stabilized.");
685*51c0b2f7Stbbdev }
686*51c0b2f7Stbbdev 
687*51c0b2f7Stbbdev void TestPoolCreation()
688*51c0b2f7Stbbdev {
689*51c0b2f7Stbbdev     putMemAll = getMemAll = getMemSuccessful = 0;
690*51c0b2f7Stbbdev 
691*51c0b2f7Stbbdev     rml::MemPoolPolicy nullPolicy(NULL, putMemFree),
692*51c0b2f7Stbbdev         emptyFreePolicy(getMemMalloc, NULL),
693*51c0b2f7Stbbdev         okPolicy(getMemMalloc, putMemFree);
694*51c0b2f7Stbbdev     rml::MemoryPool *pool;
695*51c0b2f7Stbbdev 
696*51c0b2f7Stbbdev     rml::MemPoolError res = pool_create_v1(0, &nullPolicy, &pool);
697*51c0b2f7Stbbdev     REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pAlloc can't be created");
698*51c0b2f7Stbbdev     res = pool_create_v1(0, &emptyFreePolicy, &pool);
699*51c0b2f7Stbbdev     REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pFree can't be created");
700*51c0b2f7Stbbdev     REQUIRE_MESSAGE((!putMemAll && !getMemAll), "no callback calls are expected");
701*51c0b2f7Stbbdev     res = pool_create_v1(0, &okPolicy, &pool);
702*51c0b2f7Stbbdev     REQUIRE(res==rml::POOL_OK);
703*51c0b2f7Stbbdev     bool ok = pool_destroy(pool);
704*51c0b2f7Stbbdev     REQUIRE(ok);
705*51c0b2f7Stbbdev     REQUIRE_MESSAGE(putMemAll == getMemSuccessful, "no leaks after pool_destroy");
706*51c0b2f7Stbbdev 
707*51c0b2f7Stbbdev     // 32 is a guess for a number of pools that is acceptable everywere
708*51c0b2f7Stbbdev     CheckPoolLeaks(32);
709*51c0b2f7Stbbdev     // try to consume all but 16 TLS keys
710*51c0b2f7Stbbdev     LimitTLSKeysTo limitTLSTo(16);
711*51c0b2f7Stbbdev     // ...and check that we can create at least 16 pools
712*51c0b2f7Stbbdev     CheckPoolLeaks(16);
713*51c0b2f7Stbbdev }
714*51c0b2f7Stbbdev 
715*51c0b2f7Stbbdev struct AllocatedObject {
716*51c0b2f7Stbbdev     rml::MemoryPool *pool;
717*51c0b2f7Stbbdev };
718*51c0b2f7Stbbdev 
719*51c0b2f7Stbbdev const size_t BUF_SIZE = 1024*1024;
720*51c0b2f7Stbbdev 
721*51c0b2f7Stbbdev class PoolIdentityCheck : utils::NoAssign {
722*51c0b2f7Stbbdev     rml::MemoryPool** const pools;
723*51c0b2f7Stbbdev     AllocatedObject** const objs;
724*51c0b2f7Stbbdev public:
725*51c0b2f7Stbbdev     PoolIdentityCheck(rml::MemoryPool** p, AllocatedObject** o) : pools(p), objs(o) {}
726*51c0b2f7Stbbdev     void operator()(int id) const {
727*51c0b2f7Stbbdev         objs[id] = (AllocatedObject*)pool_malloc(pools[id], BUF_SIZE/2);
728*51c0b2f7Stbbdev         REQUIRE(objs[id]);
729*51c0b2f7Stbbdev         rml::MemoryPool *act_pool = rml::pool_identify(objs[id]);
730*51c0b2f7Stbbdev         REQUIRE(act_pool == pools[id]);
731*51c0b2f7Stbbdev 
732*51c0b2f7Stbbdev         for (size_t total=0; total<2*BUF_SIZE; total+=256) {
733*51c0b2f7Stbbdev             AllocatedObject *o = (AllocatedObject*)pool_malloc(pools[id], 256);
734*51c0b2f7Stbbdev             REQUIRE(o);
735*51c0b2f7Stbbdev             act_pool = rml::pool_identify(o);
736*51c0b2f7Stbbdev             REQUIRE(act_pool == pools[id]);
737*51c0b2f7Stbbdev             pool_free(act_pool, o);
738*51c0b2f7Stbbdev         }
739*51c0b2f7Stbbdev         if( id&1 ) { // make every second returned object "small"
740*51c0b2f7Stbbdev             pool_free(act_pool, objs[id]);
741*51c0b2f7Stbbdev             objs[id] = (AllocatedObject*)pool_malloc(pools[id], 16);
742*51c0b2f7Stbbdev             REQUIRE(objs[id]);
743*51c0b2f7Stbbdev         }
744*51c0b2f7Stbbdev         objs[id]->pool = act_pool;
745*51c0b2f7Stbbdev     }
746*51c0b2f7Stbbdev };
747*51c0b2f7Stbbdev 
748*51c0b2f7Stbbdev void TestPoolDetection()
749*51c0b2f7Stbbdev {
750*51c0b2f7Stbbdev     const int POOLS = 4;
751*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(fixedBufGetMem, NULL, 0, /*fixedSizePool=*/true,
752*51c0b2f7Stbbdev                            /*keepMemTillDestroy=*/false);
753*51c0b2f7Stbbdev     rml::MemoryPool *pools[POOLS];
754*51c0b2f7Stbbdev     FixedPoolHead<BUF_SIZE*POOLS> head[POOLS];
755*51c0b2f7Stbbdev     AllocatedObject *objs[POOLS];
756*51c0b2f7Stbbdev 
757*51c0b2f7Stbbdev     for (int i=0; i<POOLS; i++)
758*51c0b2f7Stbbdev         pool_create_v1((intptr_t)(head+i), &pol, &pools[i]);
759*51c0b2f7Stbbdev     // if object somehow released to different pools, subsequent allocation
760*51c0b2f7Stbbdev     // from affected pools became impossible
761*51c0b2f7Stbbdev     for (int k=0; k<10; k++) {
762*51c0b2f7Stbbdev         PoolIdentityCheck check(pools, objs);
763*51c0b2f7Stbbdev         if( k&1 )
764*51c0b2f7Stbbdev             utils::NativeParallelFor( POOLS, check);
765*51c0b2f7Stbbdev         else
766*51c0b2f7Stbbdev             for (int i=0; i<POOLS; i++) check(i);
767*51c0b2f7Stbbdev 
768*51c0b2f7Stbbdev         for (int i=0; i<POOLS; i++) {
769*51c0b2f7Stbbdev             rml::MemoryPool *p = rml::pool_identify(objs[i]);
770*51c0b2f7Stbbdev             REQUIRE(p == objs[i]->pool);
771*51c0b2f7Stbbdev             pool_free(p, objs[i]);
772*51c0b2f7Stbbdev         }
773*51c0b2f7Stbbdev     }
774*51c0b2f7Stbbdev     for (int i=0; i<POOLS; i++) {
775*51c0b2f7Stbbdev         bool ok = pool_destroy(pools[i]);
776*51c0b2f7Stbbdev         REQUIRE(ok);
777*51c0b2f7Stbbdev     }
778*51c0b2f7Stbbdev }
779*51c0b2f7Stbbdev 
780*51c0b2f7Stbbdev void TestLazyBootstrap()
781*51c0b2f7Stbbdev {
782*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
783*51c0b2f7Stbbdev     const size_t sizes[] = {8, 9*1024, 0};
784*51c0b2f7Stbbdev 
785*51c0b2f7Stbbdev     for (int i=0; sizes[i]; i++) {
786*51c0b2f7Stbbdev         rml::MemoryPool *pool = CreateUsablePool(sizes[i]);
787*51c0b2f7Stbbdev         bool ok = pool_destroy(pool);
788*51c0b2f7Stbbdev         REQUIRE(ok);
789*51c0b2f7Stbbdev         REQUIRE_MESSAGE(getMemSuccessful == putMemAll, "No leak.");
790*51c0b2f7Stbbdev     }
791*51c0b2f7Stbbdev }
792*51c0b2f7Stbbdev 
793*51c0b2f7Stbbdev class NoLeakOnDestroyRun: utils::NoAssign {
794*51c0b2f7Stbbdev     rml::MemoryPool      *pool;
795*51c0b2f7Stbbdev     utils::SpinBarrier *barrier;
796*51c0b2f7Stbbdev public:
797*51c0b2f7Stbbdev     NoLeakOnDestroyRun(rml::MemoryPool *p, utils::SpinBarrier *b) : pool(p), barrier(b) {}
798*51c0b2f7Stbbdev     void operator()(int id) const {
799*51c0b2f7Stbbdev         void *p = pool_malloc(pool, id%2? 8 : 9000);
800*51c0b2f7Stbbdev         REQUIRE((p && liveRegions.load(std::memory_order_relaxed)));
801*51c0b2f7Stbbdev         barrier->wait();
802*51c0b2f7Stbbdev         if (!id) {
803*51c0b2f7Stbbdev             bool ok = pool_destroy(pool);
804*51c0b2f7Stbbdev             REQUIRE(ok);
805*51c0b2f7Stbbdev             REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
806*51c0b2f7Stbbdev         }
807*51c0b2f7Stbbdev         // other threads must wait till pool destruction,
808*51c0b2f7Stbbdev         // to not call thread destruction cleanup before this
809*51c0b2f7Stbbdev         barrier->wait();
810*51c0b2f7Stbbdev     }
811*51c0b2f7Stbbdev };
812*51c0b2f7Stbbdev 
813*51c0b2f7Stbbdev void TestNoLeakOnDestroy()
814*51c0b2f7Stbbdev {
815*51c0b2f7Stbbdev     liveRegions.store(0, std::memory_order_release);
816*51c0b2f7Stbbdev     for (size_t p=utils::MinThread; p<=utils::MaxThread; p++) {
817*51c0b2f7Stbbdev         rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
818*51c0b2f7Stbbdev         utils::SpinBarrier barrier(p);
819*51c0b2f7Stbbdev         rml::MemoryPool *pool;
820*51c0b2f7Stbbdev 
821*51c0b2f7Stbbdev         pool_create_v1(0, &pol, &pool);
822*51c0b2f7Stbbdev         utils::NativeParallelFor(p, NoLeakOnDestroyRun(pool, &barrier));
823*51c0b2f7Stbbdev     }
824*51c0b2f7Stbbdev }
825*51c0b2f7Stbbdev 
826*51c0b2f7Stbbdev static int putMallocMemError(intptr_t /*pool_id*/, void *ptr, size_t bytes)
827*51c0b2f7Stbbdev {
828*51c0b2f7Stbbdev     MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
829*51c0b2f7Stbbdev     REQUIRE_MESSAGE(bytes == hdr->userSize, "Invalid size in pool callback.");
830*51c0b2f7Stbbdev     free(hdr->rawPtr);
831*51c0b2f7Stbbdev 
832*51c0b2f7Stbbdev     liveRegions--;
833*51c0b2f7Stbbdev 
834*51c0b2f7Stbbdev     return -1;
835*51c0b2f7Stbbdev }
836*51c0b2f7Stbbdev 
837*51c0b2f7Stbbdev void TestDestroyFailed()
838*51c0b2f7Stbbdev {
839*51c0b2f7Stbbdev     rml::MemPoolPolicy pol(getMallocMem, putMallocMemError);
840*51c0b2f7Stbbdev     rml::MemoryPool *pool;
841*51c0b2f7Stbbdev     pool_create_v1(0, &pol, &pool);
842*51c0b2f7Stbbdev     void *ptr = pool_malloc(pool, 16);
843*51c0b2f7Stbbdev     REQUIRE(ptr);
844*51c0b2f7Stbbdev     bool fail = pool_destroy(pool);
845*51c0b2f7Stbbdev     REQUIRE_MESSAGE(fail==false, "putMemPolicyError callback returns error, "
846*51c0b2f7Stbbdev            "expect pool_destroy() failure");
847*51c0b2f7Stbbdev }
848*51c0b2f7Stbbdev 
849*51c0b2f7Stbbdev void TestPoolMSize() {
850*51c0b2f7Stbbdev     rml::MemoryPool *pool = CreateUsablePool(1024);
851*51c0b2f7Stbbdev 
852*51c0b2f7Stbbdev     const int SZ = 10;
853*51c0b2f7Stbbdev     // Original allocation requests, random numbers from small to large
854*51c0b2f7Stbbdev     size_t requestedSz[SZ] = {8, 16, 500, 1000, 2000, 4000, 8000, 1024*1024, 4242+4242, 8484+8484};
855*51c0b2f7Stbbdev 
856*51c0b2f7Stbbdev     // Unlike large objects, small objects do not store its original size along with the object itself
857*51c0b2f7Stbbdev     // On Power architecture TLS bins are divided differently.
858*51c0b2f7Stbbdev     size_t allocatedSz[SZ] =
859*51c0b2f7Stbbdev #if __powerpc64__ || __ppc64__ || __bgp__
860*51c0b2f7Stbbdev         {8, 16, 512, 1024, 2688, 5376, 8064, 1024*1024, 4242+4242, 8484+8484};
861*51c0b2f7Stbbdev #else
862*51c0b2f7Stbbdev         {8, 16, 512, 1024, 2688, 4032, 8128, 1024*1024, 4242+4242, 8484+8484};
863*51c0b2f7Stbbdev #endif
864*51c0b2f7Stbbdev     for (int i = 0; i < SZ; i++) {
865*51c0b2f7Stbbdev         void* obj = pool_malloc(pool, requestedSz[i]);
866*51c0b2f7Stbbdev         size_t objSize = pool_msize(pool, obj);
867*51c0b2f7Stbbdev         REQUIRE_MESSAGE(objSize == allocatedSz[i], "pool_msize returned the wrong value");
868*51c0b2f7Stbbdev         pool_free(pool, obj);
869*51c0b2f7Stbbdev     }
870*51c0b2f7Stbbdev     bool destroyed = pool_destroy(pool);
871*51c0b2f7Stbbdev     REQUIRE(destroyed);
872*51c0b2f7Stbbdev }
873*51c0b2f7Stbbdev 
874*51c0b2f7Stbbdev //! \brief \ref error_guessing
875*51c0b2f7Stbbdev TEST_CASE("Too small buffer") {
876*51c0b2f7Stbbdev     TestTooSmallBuffer();
877*51c0b2f7Stbbdev }
878*51c0b2f7Stbbdev 
879*51c0b2f7Stbbdev //! \brief \ref error_guessing
880*51c0b2f7Stbbdev TEST_CASE("Pool reset") {
881*51c0b2f7Stbbdev     TestPoolReset();
882*51c0b2f7Stbbdev }
883*51c0b2f7Stbbdev TEST_CASE("Shared pool") {
884*51c0b2f7Stbbdev     TestSharedPool();
885*51c0b2f7Stbbdev }
886*51c0b2f7Stbbdev 
887*51c0b2f7Stbbdev //! \brief \ref error_guessing
888*51c0b2f7Stbbdev TEST_CASE("Cross thread pools") {
889*51c0b2f7Stbbdev     TestCrossThreadPools();
890*51c0b2f7Stbbdev }
891*51c0b2f7Stbbdev 
892*51c0b2f7Stbbdev //! \brief \ref interface
893*51c0b2f7Stbbdev TEST_CASE("Fixed buffer pool") {
894*51c0b2f7Stbbdev     TestFixedBufferPool();
895*51c0b2f7Stbbdev }
896*51c0b2f7Stbbdev 
897*51c0b2f7Stbbdev //! \brief \ref interface
898*51c0b2f7Stbbdev TEST_CASE("Pool granularity") {
899*51c0b2f7Stbbdev     TestPoolGranularity();
900*51c0b2f7Stbbdev }
901*51c0b2f7Stbbdev 
902*51c0b2f7Stbbdev //! \brief \ref error_guessing
903*51c0b2f7Stbbdev TEST_CASE("Keep pool till destroy") {
904*51c0b2f7Stbbdev     TestPoolKeepTillDestroy();
905*51c0b2f7Stbbdev }
906*51c0b2f7Stbbdev 
907*51c0b2f7Stbbdev //! \brief \ref error_guessing
908*51c0b2f7Stbbdev TEST_CASE("Entries") {
909*51c0b2f7Stbbdev     TestEntries();
910*51c0b2f7Stbbdev }
911*51c0b2f7Stbbdev 
912*51c0b2f7Stbbdev //! \brief \ref interface
913*51c0b2f7Stbbdev TEST_CASE("Pool creation") {
914*51c0b2f7Stbbdev     TestPoolCreation();
915*51c0b2f7Stbbdev }
916*51c0b2f7Stbbdev 
917*51c0b2f7Stbbdev //! \brief \ref error_guessing
918*51c0b2f7Stbbdev TEST_CASE("Pool detection") {
919*51c0b2f7Stbbdev     TestPoolDetection();
920*51c0b2f7Stbbdev }
921*51c0b2f7Stbbdev 
922*51c0b2f7Stbbdev //! \brief \ref error_guessing
923*51c0b2f7Stbbdev TEST_CASE("Lazy bootstrap") {
924*51c0b2f7Stbbdev     TestLazyBootstrap();
925*51c0b2f7Stbbdev }
926*51c0b2f7Stbbdev 
927*51c0b2f7Stbbdev //! \brief \ref error_guessing
928*51c0b2f7Stbbdev TEST_CASE("No leak on destroy") {
929*51c0b2f7Stbbdev     TestNoLeakOnDestroy();
930*51c0b2f7Stbbdev }
931*51c0b2f7Stbbdev 
932*51c0b2f7Stbbdev //! \brief \ref error_guessing
933*51c0b2f7Stbbdev TEST_CASE("Destroy failed") {
934*51c0b2f7Stbbdev     TestDestroyFailed();
935*51c0b2f7Stbbdev }
936*51c0b2f7Stbbdev 
937*51c0b2f7Stbbdev //! \brief \ref interface
938*51c0b2f7Stbbdev TEST_CASE("Pool msize") {
939*51c0b2f7Stbbdev     TestPoolMSize();
940*51c0b2f7Stbbdev }
941