1 /*
2 Copyright (c) 2005-2022 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 //! \file test_malloc_pools.cpp
18 //! \brief Test for [memory_allocation] functionality
19
20 #define __TBB_NO_IMPLICIT_LINKAGE 1
21
22 #include "common/test.h"
23
24 #define HARNESS_TBBMALLOC_THREAD_SHUTDOWN 1
25
26 #include "common/utils.h"
27 #include "common/utils_assert.h"
28 #include "common/spin_barrier.h"
29 #include "common/tls_limit.h"
30
31 #include "tbb/scalable_allocator.h"
32
33 #include <atomic>
34
35 template<typename T>
alignUp(T arg,uintptr_t alignment)36 static inline T alignUp (T arg, uintptr_t alignment) {
37 return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1));
38 }
39
40 struct PoolSpace: utils::NoCopy {
41 size_t pos;
42 int regions;
43 size_t bufSize;
44 char *space;
45
46 static const size_t BUF_SIZE = 8*1024*1024;
47
PoolSpacePoolSpace48 PoolSpace(size_t bufSz = BUF_SIZE) :
49 pos(0), regions(0),
50 bufSize(bufSz), space(new char[bufSize]) {
51 memset(space, 0, bufSize);
52 }
~PoolSpacePoolSpace53 ~PoolSpace() {
54 delete []space;
55 }
56 };
57
58 static PoolSpace *poolSpace;
59
60 struct MallocPoolHeader {
61 void *rawPtr;
62 size_t userSize;
63 };
64
65 static std::atomic<int> liveRegions;
66
getMallocMem(intptr_t,size_t & bytes)67 static void *getMallocMem(intptr_t /*pool_id*/, size_t &bytes)
68 {
69 void *rawPtr = malloc(bytes+sizeof(MallocPoolHeader)+1);
70 if (!rawPtr)
71 return nullptr;
72 // +1 to check working with unaligned space
73 void *ret = (void *)((uintptr_t)rawPtr+sizeof(MallocPoolHeader)+1);
74
75 MallocPoolHeader *hdr = (MallocPoolHeader*)ret-1;
76 hdr->rawPtr = rawPtr;
77 hdr->userSize = bytes;
78
79 liveRegions++;
80
81 return ret;
82 }
83
putMallocMem(intptr_t,void * ptr,size_t bytes)84 static int putMallocMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
85 {
86 MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
87 ASSERT(bytes == hdr->userSize, "Invalid size in pool callback.");
88 free(hdr->rawPtr);
89
90 liveRegions--;
91
92 return 0;
93 }
94
TestPoolReset()95 void TestPoolReset()
96 {
97 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
98 rml::MemoryPool *pool;
99
100 pool_create_v1(0, &pol, &pool);
101 for (int i=0; i<100; i++) {
102 REQUIRE(pool_malloc(pool, 8));
103 REQUIRE(pool_malloc(pool, 50*1024));
104 }
105 int regionsBeforeReset = liveRegions.load(std::memory_order_acquire);
106 bool ok = pool_reset(pool);
107 REQUIRE(ok);
108 for (int i=0; i<100; i++) {
109 REQUIRE(pool_malloc(pool, 8));
110 REQUIRE(pool_malloc(pool, 50*1024));
111 }
112 REQUIRE_MESSAGE(regionsBeforeReset == liveRegions.load(std::memory_order_relaxed),
113 "Expected no new regions allocation.");
114 ok = pool_destroy(pool);
115 REQUIRE(ok);
116 REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
117 }
118
119 class SharedPoolRun: utils::NoAssign {
120 static long threadNum;
121 static utils::SpinBarrier startB,
122 mallocDone;
123 static rml::MemoryPool *pool;
124 static void **crossThread,
125 **afterTerm;
126 public:
127 static const int OBJ_CNT = 100;
128
init(int num,rml::MemoryPool * pl,void ** crThread,void ** aTerm)129 static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
130 threadNum = num;
131 pool = pl;
132 crossThread = crThread;
133 afterTerm = aTerm;
134 startB.initialize(threadNum);
135 mallocDone.initialize(threadNum);
136 }
137
operator ()(int id) const138 void operator()( int id ) const {
139 const int ITERS = 1000;
140 void *local[ITERS];
141
142 startB.wait();
143 for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
144 afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
145 memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
146 crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
147 memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
148 }
149
150 for (int i=1; i<ITERS; i+=2) {
151 local[i-1] = pool_malloc(pool, 6*1024);
152 memset(local[i-1], i, 6*1024);
153 local[i] = pool_malloc(pool, 16*1024);
154 memset(local[i], i, 16*1024);
155 }
156 mallocDone.wait();
157 int myVictim = threadNum-id-1;
158 for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
159 pool_free(pool, crossThread[i]);
160 for (int i=0; i<ITERS; i++)
161 pool_free(pool, local[i]);
162 }
163 };
164
165 long SharedPoolRun::threadNum;
166 utils::SpinBarrier SharedPoolRun::startB,
167 SharedPoolRun::mallocDone;
168 rml::MemoryPool *SharedPoolRun::pool;
169 void **SharedPoolRun::crossThread,
170 **SharedPoolRun::afterTerm;
171
172 // single pool shared by different threads
TestSharedPool()173 void TestSharedPool()
174 {
175 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
176 rml::MemoryPool *pool;
177
178 pool_create_v1(0, &pol, &pool);
179 void **crossThread = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
180 void **afterTerm = new void*[utils::MaxThread * SharedPoolRun::OBJ_CNT];
181
182 for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
183 SharedPoolRun::init(p, pool, crossThread, afterTerm);
184 SharedPoolRun thr;
185
186 void *hugeObj = pool_malloc(pool, 10*1024*1024);
187 REQUIRE(hugeObj);
188
189 utils::NativeParallelFor( p, thr );
190
191 pool_free(pool, hugeObj);
192 for (int i=0; i<p*SharedPoolRun::OBJ_CNT; i++)
193 pool_free(pool, afterTerm[i]);
194 }
195 delete []afterTerm;
196 delete []crossThread;
197
198 bool ok = pool_destroy(pool);
199 REQUIRE(ok);
200 REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
201 }
202
CrossThreadGetMem(intptr_t pool_id,size_t & bytes)203 void *CrossThreadGetMem(intptr_t pool_id, size_t &bytes)
204 {
205 if (poolSpace[pool_id].pos + bytes > poolSpace[pool_id].bufSize)
206 return nullptr;
207
208 void *ret = poolSpace[pool_id].space + poolSpace[pool_id].pos;
209 poolSpace[pool_id].pos += bytes;
210 poolSpace[pool_id].regions++;
211
212 return ret;
213 }
214
CrossThreadPutMem(intptr_t pool_id,void *,size_t)215 int CrossThreadPutMem(intptr_t pool_id, void* /*raw_ptr*/, size_t /*raw_bytes*/)
216 {
217 poolSpace[pool_id].regions--;
218 return 0;
219 }
220
221 class CrossThreadRun: utils::NoAssign {
222 static long number_of_threads;
223 static utils::SpinBarrier barrier;
224 static rml::MemoryPool **pool;
225 static char **obj;
226 public:
initBarrier(unsigned thrds)227 static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
init(long num)228 static void init(long num) {
229 number_of_threads = num;
230 pool = new rml::MemoryPool*[number_of_threads];
231 poolSpace = new PoolSpace[number_of_threads];
232 obj = new char*[number_of_threads];
233 }
destroy()234 static void destroy() {
235 for (long i=0; i<number_of_threads; i++)
236 REQUIRE_MESSAGE(!poolSpace[i].regions, "Memory leak detected");
237 delete []pool;
238 delete []poolSpace;
239 delete []obj;
240 }
CrossThreadRun()241 CrossThreadRun() {}
operator ()(int id) const242 void operator()( int id ) const {
243 rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
244 const int objLen = 10*id;
245
246 pool_create_v1(id, &pol, &pool[id]);
247 obj[id] = (char*)pool_malloc(pool[id], objLen);
248 REQUIRE(obj[id]);
249 memset(obj[id], id, objLen);
250
251 {
252 const size_t lrgSz = 2*16*1024;
253 void *ptrLarge = pool_malloc(pool[id], lrgSz);
254 REQUIRE(ptrLarge);
255 memset(ptrLarge, 1, lrgSz);
256 // consume all small objects
257 while (pool_malloc(pool[id], 5 * 1024));
258 // releasing of large object will not give a chance to allocate more
259 // since only fixed pool can look at other bins aligned/notAligned
260 pool_free(pool[id], ptrLarge);
261 CHECK(!pool_malloc(pool[id], 5*1024));
262 }
263
264 barrier.wait();
265 int myPool = number_of_threads-id-1;
266 for (int i=0; i<10*myPool; i++)
267 REQUIRE(myPool==obj[myPool][i]);
268 pool_free(pool[myPool], obj[myPool]);
269 bool ok = pool_destroy(pool[myPool]);
270 REQUIRE(ok);
271 }
272 };
273
274 long CrossThreadRun::number_of_threads;
275 utils::SpinBarrier CrossThreadRun::barrier;
276 rml::MemoryPool **CrossThreadRun::pool;
277 char **CrossThreadRun::obj;
278
279 // pools created, used and destroyed by different threads
TestCrossThreadPools()280 void TestCrossThreadPools()
281 {
282 for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
283 CrossThreadRun::initBarrier(p);
284 CrossThreadRun::init(p);
285 utils::NativeParallelFor( p, CrossThreadRun() );
286 for (int i=0; i<p; i++)
287 REQUIRE_MESSAGE(!poolSpace[i].regions, "Region leak detected");
288 CrossThreadRun::destroy();
289 }
290 }
291
292 // buffer is too small to pool be created, but must not leak resources
TestTooSmallBuffer()293 void TestTooSmallBuffer()
294 {
295 poolSpace = new PoolSpace(8*1024);
296
297 rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
298 rml::MemoryPool *pool;
299 pool_create_v1(0, &pol, &pool);
300 bool ok = pool_destroy(pool);
301 REQUIRE(ok);
302 REQUIRE_MESSAGE(!poolSpace[0].regions, "No leaks.");
303
304 delete poolSpace;
305 }
306
307 class FixedPoolHeadBase : utils::NoAssign {
308 size_t size;
309 std::atomic<bool> used;
310 char* data;
311 public:
FixedPoolHeadBase(size_t s)312 FixedPoolHeadBase(size_t s) : size(s), used(false) {
313 data = new char[size];
314 }
useData(size_t & bytes)315 void *useData(size_t &bytes) {
316 bool wasUsed = used.exchange(true);
317 REQUIRE_MESSAGE(!wasUsed, "The buffer must not be used twice.");
318 bytes = size;
319 return data;
320 }
~FixedPoolHeadBase()321 ~FixedPoolHeadBase() {
322 delete []data;
323 }
324 };
325
326 template<size_t SIZE>
327 class FixedPoolHead : FixedPoolHeadBase {
328 public:
FixedPoolHead()329 FixedPoolHead() : FixedPoolHeadBase(SIZE) { }
330 };
331
fixedBufGetMem(intptr_t pool_id,size_t & bytes)332 static void *fixedBufGetMem(intptr_t pool_id, size_t &bytes)
333 {
334 return ((FixedPoolHeadBase*)pool_id)->useData(bytes);
335 }
336
337 class FixedPoolUse: utils::NoAssign {
338 static utils::SpinBarrier startB;
339 rml::MemoryPool *pool;
340 size_t reqSize;
341 int iters;
342 public:
FixedPoolUse(unsigned threads,rml::MemoryPool * p,size_t sz,int it)343 FixedPoolUse(unsigned threads, rml::MemoryPool *p, size_t sz, int it) :
344 pool(p), reqSize(sz), iters(it) {
345 startB.initialize(threads);
346 }
operator ()(int) const347 void operator()( int /*id*/ ) const {
348 startB.wait();
349 for (int i=0; i<iters; i++) {
350 void *o = pool_malloc(pool, reqSize);
351 ASSERT(o, "Invalid object");
352 pool_free(pool, o);
353 }
354 }
355 };
356
357 utils::SpinBarrier FixedPoolUse::startB;
358
359 class FixedPoolNomem: utils::NoAssign {
360 utils::SpinBarrier *startB;
361 rml::MemoryPool *pool;
362 public:
FixedPoolNomem(utils::SpinBarrier * b,rml::MemoryPool * p)363 FixedPoolNomem(utils::SpinBarrier *b, rml::MemoryPool *p) :
364 startB(b), pool(p) {}
operator ()(int id) const365 void operator()(int id) const {
366 startB->wait();
367 void *o = pool_malloc(pool, id%2? 64 : 128*1024);
368 ASSERT(!o, "All memory must be consumed.");
369 }
370 };
371
372 class FixedPoolSomeMem: utils::NoAssign {
373 utils::SpinBarrier *barrier;
374 rml::MemoryPool *pool;
375 public:
FixedPoolSomeMem(utils::SpinBarrier * b,rml::MemoryPool * p)376 FixedPoolSomeMem(utils::SpinBarrier *b, rml::MemoryPool *p) :
377 barrier(b), pool(p) {}
operator ()(int id) const378 void operator()(int id) const {
379 barrier->wait();
380 utils::Sleep(2*id);
381 void *o = pool_malloc(pool, id%2? 64 : 128*1024);
382 barrier->wait();
383 pool_free(pool, o);
384 }
385 };
386
haveEnoughSpace(rml::MemoryPool * pool,size_t sz)387 bool haveEnoughSpace(rml::MemoryPool *pool, size_t sz)
388 {
389 if (void *p = pool_malloc(pool, sz)) {
390 pool_free(pool, p);
391 return true;
392 }
393 return false;
394 }
395
TestFixedBufferPool()396 void TestFixedBufferPool()
397 {
398 const int ITERS = 7;
399 const size_t MAX_OBJECT = 7*1024*1024;
400 void *ptrs[ITERS];
401 rml::MemPoolPolicy pol(fixedBufGetMem, nullptr, 0, /*fixedSizePool=*/true,
402 /*keepMemTillDestroy=*/false);
403 rml::MemoryPool *pool;
404 {
405 FixedPoolHead<MAX_OBJECT + 1024*1024> head;
406
407 pool_create_v1((intptr_t)&head, &pol, &pool);
408 {
409 utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 2) );
410
411 for (int i=0; i<ITERS; i++) {
412 ptrs[i] = pool_malloc(pool, MAX_OBJECT/ITERS);
413 REQUIRE(ptrs[i]);
414 }
415 for (int i=0; i<ITERS; i++)
416 pool_free(pool, ptrs[i]);
417
418 utils::NativeParallelFor( 1, FixedPoolUse(1, pool, MAX_OBJECT, 1) );
419 }
420 // each thread asks for an MAX_OBJECT/p/2 object,
421 // /2 is to cover fragmentation
422 for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
423 utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 10000) );
424 }
425 {
426 const int p = 128;
427 utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
428 }
429 {
430 size_t maxSz;
431 const int p = 256;
432 utils::SpinBarrier barrier(p);
433
434 // Find maximal useful object size. Start with MAX_OBJECT/2,
435 // as the pool might be fragmented by BootStrapBlocks consumed during
436 // FixedPoolRun.
437 size_t l, r;
438 REQUIRE(haveEnoughSpace(pool, MAX_OBJECT/2));
439 for (l = MAX_OBJECT/2, r = MAX_OBJECT + 1024*1024; l < r-1; ) {
440 size_t mid = (l+r)/2;
441 if (haveEnoughSpace(pool, mid))
442 l = mid;
443 else
444 r = mid;
445 }
446 maxSz = l;
447 REQUIRE_MESSAGE(!haveEnoughSpace(pool, maxSz+1), "Expect to find boundary value.");
448 // consume all available memory
449 void *largeObj = pool_malloc(pool, maxSz);
450 REQUIRE(largeObj);
451 void *o = pool_malloc(pool, 64);
452 if (o) // pool fragmented, skip FixedPoolNomem
453 pool_free(pool, o);
454 else
455 utils::NativeParallelFor( p, FixedPoolNomem(&barrier, pool) );
456 pool_free(pool, largeObj);
457 // keep some space unoccupied
458 largeObj = pool_malloc(pool, maxSz-512*1024);
459 REQUIRE(largeObj);
460 utils::NativeParallelFor( p, FixedPoolSomeMem(&barrier, pool) );
461 pool_free(pool, largeObj);
462 }
463 bool ok = pool_destroy(pool);
464 REQUIRE(ok);
465 }
466 // check that fresh untouched pool can successfully fulfil requests from 128 threads
467 {
468 FixedPoolHead<MAX_OBJECT + 1024*1024> head;
469 pool_create_v1((intptr_t)&head, &pol, &pool);
470 int p=128;
471 utils::NativeParallelFor( p, FixedPoolUse(p, pool, MAX_OBJECT/p/2, 1) );
472 bool ok = pool_destroy(pool);
473 REQUIRE(ok);
474 }
475 }
476
477 static size_t currGranularity;
478
getGranMem(intptr_t,size_t & bytes)479 static void *getGranMem(intptr_t /*pool_id*/, size_t &bytes)
480 {
481 REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
482 return malloc(bytes);
483 }
484
putGranMem(intptr_t,void * ptr,size_t bytes)485 static int putGranMem(intptr_t /*pool_id*/, void *ptr, size_t bytes)
486 {
487 REQUIRE_MESSAGE(!(bytes%currGranularity), "Region size mismatch granularity.");
488 free(ptr);
489 return 0;
490 }
491
TestPoolGranularity()492 void TestPoolGranularity()
493 {
494 rml::MemPoolPolicy pol(getGranMem, putGranMem);
495 const size_t grans[] = {4*1024, 2*1024*1024, 6*1024*1024, 10*1024*1024};
496
497 for (unsigned i=0; i<sizeof(grans)/sizeof(grans[0]); i++) {
498 pol.granularity = currGranularity = grans[i];
499 rml::MemoryPool *pool;
500
501 pool_create_v1(0, &pol, &pool);
502 for (int sz=500*1024; sz<16*1024*1024; sz+=101*1024) {
503 void *p = pool_malloc(pool, sz);
504 REQUIRE_MESSAGE(p, "Can't allocate memory in pool.");
505 pool_free(pool, p);
506 }
507 bool ok = pool_destroy(pool);
508 REQUIRE(ok);
509 }
510 }
511
512 static size_t putMemAll, getMemAll, getMemSuccessful;
513
getMemMalloc(intptr_t,size_t & bytes)514 static void *getMemMalloc(intptr_t /*pool_id*/, size_t &bytes)
515 {
516 getMemAll++;
517 void *p = malloc(bytes);
518 if (p)
519 getMemSuccessful++;
520 return p;
521 }
522
putMemFree(intptr_t,void * ptr,size_t)523 static int putMemFree(intptr_t /*pool_id*/, void *ptr, size_t /*bytes*/)
524 {
525 putMemAll++;
526 free(ptr);
527 return 0;
528 }
529
TestPoolKeepTillDestroy()530 void TestPoolKeepTillDestroy()
531 {
532 const int ITERS = 50*1024;
533 void *ptrs[2*ITERS+1];
534 rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
535 rml::MemoryPool *pool;
536
537 // 1st create default pool that returns memory back to callback,
538 // then use keepMemTillDestroy policy
539 for (int keep=0; keep<2; keep++) {
540 getMemAll = putMemAll = 0;
541 if (keep)
542 pol.keepAllMemory = 1;
543 pool_create_v1(0, &pol, &pool);
544 for (int i=0; i<2*ITERS; i+=2) {
545 ptrs[i] = pool_malloc(pool, 7*1024);
546 ptrs[i+1] = pool_malloc(pool, 10*1024);
547 }
548 ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024);
549 REQUIRE(!putMemAll);
550 for (int i=0; i<2*ITERS; i++)
551 pool_free(pool, ptrs[i]);
552 pool_free(pool, ptrs[2*ITERS]);
553 size_t totalPutMemCalls = putMemAll;
554 if (keep)
555 REQUIRE(!putMemAll);
556 else {
557 REQUIRE(putMemAll);
558 putMemAll = 0;
559 }
560 size_t getCallsBefore = getMemAll;
561 void *p = pool_malloc(pool, 8*1024*1024);
562 REQUIRE(p);
563 if (keep)
564 REQUIRE_MESSAGE(getCallsBefore == getMemAll, "Must not lead to new getMem call");
565 size_t putCallsBefore = putMemAll;
566 bool ok = pool_reset(pool);
567 REQUIRE(ok);
568 REQUIRE_MESSAGE(putCallsBefore == putMemAll, "Pool is not releasing memory during reset.");
569 ok = pool_destroy(pool);
570 REQUIRE(ok);
571 REQUIRE(putMemAll);
572 totalPutMemCalls += putMemAll;
573 REQUIRE_MESSAGE(getMemAll == totalPutMemCalls, "Memory leak detected.");
574 }
575
576 }
577
memEqual(char * buf,size_t size,int val)578 static bool memEqual(char *buf, size_t size, int val)
579 {
580 bool memEq = true;
581 for (size_t k=0; k<size; k++)
582 if (buf[k] != val)
583 memEq = false;
584 return memEq;
585 }
586
TestEntries()587 void TestEntries()
588 {
589 const int SZ = 4;
590 const int ALGN = 4;
591 size_t size[SZ] = {8, 8000, 9000, 100*1024};
592 size_t algn[ALGN] = {8, 64, 4*1024, 8*1024*1024};
593
594 rml::MemPoolPolicy pol(getGranMem, putGranMem);
595 currGranularity = 1; // not check granularity in the test
596 rml::MemoryPool *pool;
597
598 pool_create_v1(0, &pol, &pool);
599 for (int i=0; i<SZ; i++)
600 for (int j=0; j<ALGN; j++) {
601 char *p = (char*)pool_aligned_malloc(pool, size[i], algn[j]);
602 REQUIRE((p && 0==((uintptr_t)p & (algn[j]-1))));
603 memset(p, j, size[i]);
604
605 size_t curr_algn = algn[rand() % ALGN];
606 size_t curr_sz = size[rand() % SZ];
607 char *p1 = (char*)pool_aligned_realloc(pool, p, curr_sz, curr_algn);
608 REQUIRE((p1 && 0==((uintptr_t)p1 & (curr_algn-1))));
609 REQUIRE(memEqual(p1, utils::min(size[i], curr_sz), j));
610
611 memset(p1, j+1, curr_sz);
612 size_t curr_sz1 = size[rand() % SZ];
613 char *p2 = (char*)pool_realloc(pool, p1, curr_sz1);
614 REQUIRE(p2);
615 REQUIRE(memEqual(p2, utils::min(curr_sz1, curr_sz), j+1));
616
617 pool_free(pool, p2);
618 }
619
620 bool ok = pool_destroy(pool);
621 REQUIRE(ok);
622
623 bool fail = rml::pool_destroy(nullptr);
624 REQUIRE(!fail);
625 fail = rml::pool_reset(nullptr);
626 REQUIRE(!fail);
627 }
628
CreateUsablePool(size_t size)629 rml::MemoryPool *CreateUsablePool(size_t size)
630 {
631 rml::MemoryPool *pool;
632 rml::MemPoolPolicy okPolicy(getMemMalloc, putMemFree);
633
634 putMemAll = getMemAll = getMemSuccessful = 0;
635 rml::MemPoolError res = pool_create_v1(0, &okPolicy, &pool);
636 if (res != rml::POOL_OK) {
637 REQUIRE_MESSAGE((!getMemAll && !putMemAll), "No callbacks after fail.");
638 return nullptr;
639 }
640 void *o = pool_malloc(pool, size);
641 if (!getMemSuccessful) {
642 // no memory from callback, valid reason to leave
643 REQUIRE_MESSAGE(!o, "The pool must be unusable.");
644 return nullptr;
645 }
646 REQUIRE_MESSAGE(o, "Created pool must be useful.");
647 REQUIRE_MESSAGE((getMemSuccessful == 1 || getMemSuccessful == 5 || getMemAll > getMemSuccessful),
648 "Multiple requests are allowed when unsuccessful request occurred or cannot search in bootstrap memory. ");
649 REQUIRE(!putMemAll);
650 pool_free(pool, o);
651
652 return pool;
653 }
654
CheckPoolLeaks(size_t poolsAlwaysAvailable)655 void CheckPoolLeaks(size_t poolsAlwaysAvailable)
656 {
657 const size_t MAX_POOLS = 16*1000;
658 const int ITERS = 20, CREATED_STABLE = 3;
659 rml::MemoryPool *pools[MAX_POOLS];
660 size_t created, maxCreated = MAX_POOLS;
661 int maxNotChangedCnt = 0;
662
663 // expecting that for ITERS runs, max number of pools that can be created
664 // can be stabilized and still stable CREATED_STABLE times
665 for (int j=0; j<ITERS && maxNotChangedCnt<CREATED_STABLE; j++) {
666 for (created=0; created<maxCreated; created++) {
667 rml::MemoryPool *p = CreateUsablePool(1024);
668 if (!p)
669 break;
670 pools[created] = p;
671 }
672 REQUIRE_MESSAGE(created>=poolsAlwaysAvailable,
673 "Expect that the reasonable number of pools can be always created.");
674 for (size_t i=0; i<created; i++) {
675 bool ok = pool_destroy(pools[i]);
676 REQUIRE(ok);
677 }
678 if (created < maxCreated) {
679 maxCreated = created;
680 maxNotChangedCnt = 0;
681 } else
682 maxNotChangedCnt++;
683 }
684 REQUIRE_MESSAGE(maxNotChangedCnt == CREATED_STABLE, "The number of created pools must be stabilized.");
685 }
686
TestPoolCreation()687 void TestPoolCreation()
688 {
689 putMemAll = getMemAll = getMemSuccessful = 0;
690
691 rml::MemPoolPolicy nullPolicy(nullptr, putMemFree),
692 emptyFreePolicy(getMemMalloc, nullptr),
693 okPolicy(getMemMalloc, putMemFree);
694 rml::MemoryPool *pool;
695
696 rml::MemPoolError res = pool_create_v1(0, &nullPolicy, &pool);
697 REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pAlloc can't be created");
698 res = pool_create_v1(0, &emptyFreePolicy, &pool);
699 REQUIRE_MESSAGE(res==rml::INVALID_POLICY, "pool with empty pFree can't be created");
700 REQUIRE_MESSAGE((!putMemAll && !getMemAll), "no callback calls are expected");
701 res = pool_create_v1(0, &okPolicy, &pool);
702 REQUIRE(res==rml::POOL_OK);
703 bool ok = pool_destroy(pool);
704 REQUIRE(ok);
705 REQUIRE_MESSAGE(putMemAll == getMemSuccessful, "no leaks after pool_destroy");
706
707 // 32 is a guess for a number of pools that is acceptable everywere
708 CheckPoolLeaks(32);
709 // try to consume all but 16 TLS keys
710 LimitTLSKeysTo limitTLSTo(16);
711 // ...and check that we can create at least 16 pools
712 CheckPoolLeaks(16);
713 }
714
715 struct AllocatedObject {
716 rml::MemoryPool *pool;
717 };
718
719 const size_t BUF_SIZE = 1024*1024;
720
721 class PoolIdentityCheck : utils::NoAssign {
722 rml::MemoryPool** const pools;
723 AllocatedObject** const objs;
724 public:
PoolIdentityCheck(rml::MemoryPool ** p,AllocatedObject ** o)725 PoolIdentityCheck(rml::MemoryPool** p, AllocatedObject** o) : pools(p), objs(o) {}
operator ()(int id) const726 void operator()(int id) const {
727 objs[id] = (AllocatedObject*)pool_malloc(pools[id], BUF_SIZE/2);
728 REQUIRE(objs[id]);
729 rml::MemoryPool *act_pool = rml::pool_identify(objs[id]);
730 REQUIRE(act_pool == pools[id]);
731
732 for (size_t total=0; total<2*BUF_SIZE; total+=256) {
733 AllocatedObject *o = (AllocatedObject*)pool_malloc(pools[id], 256);
734 REQUIRE(o);
735 act_pool = rml::pool_identify(o);
736 REQUIRE(act_pool == pools[id]);
737 pool_free(act_pool, o);
738 }
739 if( id&1 ) { // make every second returned object "small"
740 pool_free(act_pool, objs[id]);
741 objs[id] = (AllocatedObject*)pool_malloc(pools[id], 16);
742 REQUIRE(objs[id]);
743 }
744 objs[id]->pool = act_pool;
745 }
746 };
747
TestPoolDetection()748 void TestPoolDetection()
749 {
750 const int POOLS = 4;
751 rml::MemPoolPolicy pol(fixedBufGetMem, nullptr, 0, /*fixedSizePool=*/true,
752 /*keepMemTillDestroy=*/false);
753 rml::MemoryPool *pools[POOLS];
754 FixedPoolHead<BUF_SIZE*POOLS> head[POOLS];
755 AllocatedObject *objs[POOLS];
756
757 for (int i=0; i<POOLS; i++)
758 pool_create_v1((intptr_t)(head+i), &pol, &pools[i]);
759 // if object somehow released to different pools, subsequent allocation
760 // from affected pools became impossible
761 for (int k=0; k<10; k++) {
762 PoolIdentityCheck check(pools, objs);
763 if( k&1 )
764 utils::NativeParallelFor( POOLS, check);
765 else
766 for (int i=0; i<POOLS; i++) check(i);
767
768 for (int i=0; i<POOLS; i++) {
769 rml::MemoryPool *p = rml::pool_identify(objs[i]);
770 REQUIRE(p == objs[i]->pool);
771 pool_free(p, objs[i]);
772 }
773 }
774 for (int i=0; i<POOLS; i++) {
775 bool ok = pool_destroy(pools[i]);
776 REQUIRE(ok);
777 }
778 }
779
TestLazyBootstrap()780 void TestLazyBootstrap()
781 {
782 rml::MemPoolPolicy pol(getMemMalloc, putMemFree);
783 const size_t sizes[] = {8, 9*1024, 0};
784
785 for (int i=0; sizes[i]; i++) {
786 rml::MemoryPool *pool = CreateUsablePool(sizes[i]);
787 bool ok = pool_destroy(pool);
788 REQUIRE(ok);
789 REQUIRE_MESSAGE(getMemSuccessful == putMemAll, "No leak.");
790 }
791 }
792
793 class NoLeakOnDestroyRun: utils::NoAssign {
794 rml::MemoryPool *pool;
795 utils::SpinBarrier *barrier;
796 public:
NoLeakOnDestroyRun(rml::MemoryPool * p,utils::SpinBarrier * b)797 NoLeakOnDestroyRun(rml::MemoryPool *p, utils::SpinBarrier *b) : pool(p), barrier(b) {}
operator ()(int id) const798 void operator()(int id) const {
799 void *p = pool_malloc(pool, id%2? 8 : 9000);
800 REQUIRE((p && liveRegions.load(std::memory_order_relaxed)));
801 barrier->wait();
802 if (!id) {
803 bool ok = pool_destroy(pool);
804 REQUIRE(ok);
805 REQUIRE_MESSAGE(!liveRegions.load(std::memory_order_relaxed), "Expected all regions were released.");
806 }
807 // other threads must wait till pool destruction,
808 // to not call thread destruction cleanup before this
809 barrier->wait();
810 }
811 };
812
TestNoLeakOnDestroy()813 void TestNoLeakOnDestroy()
814 {
815 liveRegions.store(0, std::memory_order_release);
816 for (int p=utils::MinThread; p<=utils::MaxThread; p++) {
817 rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
818 utils::SpinBarrier barrier(p);
819 rml::MemoryPool *pool;
820
821 pool_create_v1(0, &pol, &pool);
822 utils::NativeParallelFor(p, NoLeakOnDestroyRun(pool, &barrier));
823 }
824 }
825
putMallocMemError(intptr_t,void * ptr,size_t bytes)826 static int putMallocMemError(intptr_t /*pool_id*/, void *ptr, size_t bytes)
827 {
828 MallocPoolHeader *hdr = (MallocPoolHeader*)ptr-1;
829 REQUIRE_MESSAGE(bytes == hdr->userSize, "Invalid size in pool callback.");
830 free(hdr->rawPtr);
831
832 liveRegions--;
833
834 return -1;
835 }
836
TestDestroyFailed()837 void TestDestroyFailed()
838 {
839 rml::MemPoolPolicy pol(getMallocMem, putMallocMemError);
840 rml::MemoryPool *pool;
841 pool_create_v1(0, &pol, &pool);
842 void *ptr = pool_malloc(pool, 16);
843 REQUIRE(ptr);
844 bool fail = pool_destroy(pool);
845 REQUIRE_MESSAGE(fail==false, "putMemPolicyError callback returns error, "
846 "expect pool_destroy() failure");
847 }
848
TestPoolMSize()849 void TestPoolMSize() {
850 rml::MemoryPool *pool = CreateUsablePool(1024);
851
852 const int SZ = 10;
853 // Original allocation requests, random numbers from small to large
854 size_t requestedSz[SZ] = {8, 16, 500, 1000, 2000, 4000, 8000, 1024*1024, 4242+4242, 8484+8484};
855
856 // Unlike large objects, small objects do not store its original size along with the object itself
857 // On Power architecture TLS bins are divided differently.
858 size_t allocatedSz[SZ] =
859 #if __powerpc64__ || __ppc64__ || __bgp__
860 {8, 16, 512, 1024, 2688, 5376, 8064, 1024*1024, 4242+4242, 8484+8484};
861 #else
862 {8, 16, 512, 1024, 2688, 4032, 8128, 1024*1024, 4242+4242, 8484+8484};
863 #endif
864 for (int i = 0; i < SZ; i++) {
865 void* obj = pool_malloc(pool, requestedSz[i]);
866 size_t objSize = pool_msize(pool, obj);
867 REQUIRE_MESSAGE(objSize == allocatedSz[i], "pool_msize returned the wrong value");
868 pool_free(pool, obj);
869 }
870 bool destroyed = pool_destroy(pool);
871 REQUIRE(destroyed);
872 }
873
874 //! \brief \ref error_guessing
875 TEST_CASE("Too small buffer") {
876 TestTooSmallBuffer();
877 }
878
879 //! \brief \ref error_guessing
880 TEST_CASE("Pool reset") {
881 TestPoolReset();
882 }
883 TEST_CASE("Shared pool") {
884 TestSharedPool();
885 }
886
887 //! \brief \ref error_guessing
888 TEST_CASE("Cross thread pools") {
889 TestCrossThreadPools();
890 }
891
892 //! \brief \ref interface
893 TEST_CASE("Fixed buffer pool") {
894 TestFixedBufferPool();
895 }
896
897 //! \brief \ref interface
898 TEST_CASE("Pool granularity") {
899 TestPoolGranularity();
900 }
901
902 //! \brief \ref error_guessing
903 TEST_CASE("Keep pool till destroy") {
904 TestPoolKeepTillDestroy();
905 }
906
907 //! \brief \ref error_guessing
908 TEST_CASE("Entries") {
909 TestEntries();
910 }
911
912 //! \brief \ref interface
913 TEST_CASE("Pool creation") {
914 TestPoolCreation();
915 }
916
917 //! \brief \ref error_guessing
918 TEST_CASE("Pool detection") {
919 TestPoolDetection();
920 }
921
922 //! \brief \ref error_guessing
923 TEST_CASE("Lazy bootstrap") {
924 TestLazyBootstrap();
925 }
926
927 //! \brief \ref error_guessing
928 TEST_CASE("No leak on destroy") {
929 TestNoLeakOnDestroy();
930 }
931
932 //! \brief \ref error_guessing
933 TEST_CASE("Destroy failed") {
934 TestDestroyFailed();
935 }
936
937 //! \brief \ref interface
938 TEST_CASE("Pool msize") {
939 TestPoolMSize();
940 }
941