1 /*
2 * Active memory defragmentation
3 * Try to find key / value allocations that need to be re-allocated in order
4 * to reduce external fragmentation.
5 * We do that by scanning the keyspace and for each pointer we have, we can try to
6 * ask the allocator if moving it to a new address will help reduce fragmentation.
7 *
8 * Copyright (c) 2017, Oran Agra
9 * Copyright (c) 2017, Redis Labs, Inc
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are met:
14 *
15 * * Redistributions of source code must retain the above copyright notice,
16 * this list of conditions and the following disclaimer.
17 * * Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * * Neither the name of Redis nor the names of its contributors may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "server.h"
38 #include <time.h>
39 #include <assert.h>
40 #include <stddef.h>
41
42 #ifdef HAVE_DEFRAG
43
44 /* this method was added to jemalloc in order to help us understand which
45 * pointers are worthwhile moving and which aren't */
46 int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util);
47
48 /* forward declarations*/
49 void defragDictBucketCallback(void *privdata, dictEntry **bucketref);
50 dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged);
51
52 /* Defrag helper for generic allocations.
53 *
54 * returns NULL in case the allocatoin wasn't moved.
55 * when it returns a non-null value, the old pointer was already released
56 * and should NOT be accessed. */
activeDefragAlloc(void * ptr)57 void* activeDefragAlloc(void *ptr) {
58 int bin_util, run_util;
59 size_t size;
60 void *newptr;
61 if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) {
62 server.stat_active_defrag_misses++;
63 return NULL;
64 }
65 /* if this run is more utilized than the average utilization in this bin
66 * (or it is full), skip it. This will eventually move all the allocations
67 * from relatively empty runs into relatively full runs. */
68 if (run_util > bin_util || run_util == 1<<16) {
69 server.stat_active_defrag_misses++;
70 return NULL;
71 }
72 /* move this allocation to a new allocation.
73 * make sure not to use the thread cache. so that we don't get back the same
74 * pointers we try to free */
75 size = zmalloc_size(ptr);
76 newptr = zmalloc_no_tcache(size);
77 memcpy(newptr, ptr, size);
78 zfree_no_tcache(ptr);
79 return newptr;
80 }
81
82 /*Defrag helper for sds strings
83 *
84 * returns NULL in case the allocatoin wasn't moved.
85 * when it returns a non-null value, the old pointer was already released
86 * and should NOT be accessed. */
activeDefragSds(sds sdsptr)87 sds activeDefragSds(sds sdsptr) {
88 void* ptr = sdsAllocPtr(sdsptr);
89 void* newptr = activeDefragAlloc(ptr);
90 if (newptr) {
91 size_t offset = sdsptr - (char*)ptr;
92 sdsptr = (char*)newptr + offset;
93 return sdsptr;
94 }
95 return NULL;
96 }
97
98 /* Defrag helper for robj and/or string objects
99 *
100 * returns NULL in case the allocatoin wasn't moved.
101 * when it returns a non-null value, the old pointer was already released
102 * and should NOT be accessed. */
activeDefragStringOb(robj * ob,long * defragged)103 robj *activeDefragStringOb(robj* ob, long *defragged) {
104 robj *ret = NULL;
105 if (ob->refcount!=1)
106 return NULL;
107
108 /* try to defrag robj (only if not an EMBSTR type (handled below). */
109 if (ob->type!=OBJ_STRING || ob->encoding!=OBJ_ENCODING_EMBSTR) {
110 if ((ret = activeDefragAlloc(ob))) {
111 ob = ret;
112 (*defragged)++;
113 }
114 }
115
116 /* try to defrag string object */
117 if (ob->type == OBJ_STRING) {
118 if(ob->encoding==OBJ_ENCODING_RAW) {
119 sds newsds = activeDefragSds((sds)ob->ptr);
120 if (newsds) {
121 ob->ptr = newsds;
122 (*defragged)++;
123 }
124 } else if (ob->encoding==OBJ_ENCODING_EMBSTR) {
125 /* The sds is embedded in the object allocation, calculate the
126 * offset and update the pointer in the new allocation. */
127 long ofs = (intptr_t)ob->ptr - (intptr_t)ob;
128 if ((ret = activeDefragAlloc(ob))) {
129 ret->ptr = (void*)((intptr_t)ret + ofs);
130 (*defragged)++;
131 }
132 } else if (ob->encoding!=OBJ_ENCODING_INT) {
133 serverPanic("Unknown string encoding");
134 }
135 }
136 return ret;
137 }
138
139 /* Defrag helper for dictEntries to be used during dict iteration (called on
140 * each step). Teturns a stat of how many pointers were moved. */
dictIterDefragEntry(dictIterator * iter)141 long dictIterDefragEntry(dictIterator *iter) {
142 /* This function is a little bit dirty since it messes with the internals
143 * of the dict and it's iterator, but the benefit is that it is very easy
144 * to use, and require no other chagnes in the dict. */
145 long defragged = 0;
146 dictht *ht;
147 /* Handle the next entry (if there is one), and update the pointer in the
148 * current entry. */
149 if (iter->nextEntry) {
150 dictEntry *newde = activeDefragAlloc(iter->nextEntry);
151 if (newde) {
152 defragged++;
153 iter->nextEntry = newde;
154 iter->entry->next = newde;
155 }
156 }
157 /* handle the case of the first entry in the hash bucket. */
158 ht = &iter->d->ht[iter->table];
159 if (ht->table[iter->index] == iter->entry) {
160 dictEntry *newde = activeDefragAlloc(iter->entry);
161 if (newde) {
162 iter->entry = newde;
163 ht->table[iter->index] = newde;
164 defragged++;
165 }
166 }
167 return defragged;
168 }
169
170 /* Defrag helper for dict main allocations (dict struct, and hash tables).
171 * receives a pointer to the dict* and implicitly updates it when the dict
172 * struct itself was moved. Returns a stat of how many pointers were moved. */
dictDefragTables(dict * d)173 long dictDefragTables(dict* d) {
174 dictEntry **newtable;
175 long defragged = 0;
176 /* handle the first hash table */
177 newtable = activeDefragAlloc(d->ht[0].table);
178 if (newtable)
179 defragged++, d->ht[0].table = newtable;
180 /* handle the second hash table */
181 if (d->ht[1].table) {
182 newtable = activeDefragAlloc(d->ht[1].table);
183 if (newtable)
184 defragged++, d->ht[1].table = newtable;
185 }
186 return defragged;
187 }
188
189 /* Internal function used by zslDefrag */
zslUpdateNode(zskiplist * zsl,zskiplistNode * oldnode,zskiplistNode * newnode,zskiplistNode ** update)190 void zslUpdateNode(zskiplist *zsl, zskiplistNode *oldnode, zskiplistNode *newnode, zskiplistNode **update) {
191 int i;
192 for (i = 0; i < zsl->level; i++) {
193 if (update[i]->level[i].forward == oldnode)
194 update[i]->level[i].forward = newnode;
195 }
196 serverAssert(zsl->header!=oldnode);
197 if (newnode->level[0].forward) {
198 serverAssert(newnode->level[0].forward->backward==oldnode);
199 newnode->level[0].forward->backward = newnode;
200 } else {
201 serverAssert(zsl->tail==oldnode);
202 zsl->tail = newnode;
203 }
204 }
205
206 /* Defrag helper for sorted set.
207 * Update the robj pointer, defrag the skiplist struct and return the new score
208 * reference. We may not access oldele pointer (not even the pointer stored in
209 * the skiplist), as it was already freed. Newele may be null, in which case we
210 * only need to defrag the skiplist, but not update the obj pointer.
211 * When return value is non-NULL, it is the score reference that must be updated
212 * in the dict record. */
zslDefrag(zskiplist * zsl,double score,sds oldele,sds newele)213 double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) {
214 zskiplistNode *update[ZSKIPLIST_MAXLEVEL], *x, *newx;
215 int i;
216 sds ele = newele? newele: oldele;
217
218 /* find the skiplist node referring to the object that was moved,
219 * and all pointers that need to be updated if we'll end up moving the skiplist node. */
220 x = zsl->header;
221 for (i = zsl->level-1; i >= 0; i--) {
222 while (x->level[i].forward &&
223 x->level[i].forward->ele != oldele && /* make sure not to access the
224 ->obj pointer if it matches
225 oldele */
226 (x->level[i].forward->score < score ||
227 (x->level[i].forward->score == score &&
228 sdscmp(x->level[i].forward->ele,ele) < 0)))
229 x = x->level[i].forward;
230 update[i] = x;
231 }
232
233 /* update the robj pointer inside the skip list record. */
234 x = x->level[0].forward;
235 serverAssert(x && score == x->score && x->ele==oldele);
236 if (newele)
237 x->ele = newele;
238
239 /* try to defrag the skiplist record itself */
240 newx = activeDefragAlloc(x);
241 if (newx) {
242 zslUpdateNode(zsl, x, newx, update);
243 return &newx->score;
244 }
245 return NULL;
246 }
247
248 /* Defrag helpler for sorted set.
249 * Defrag a single dict entry key name, and corresponding skiplist struct */
activeDefragZsetEntry(zset * zs,dictEntry * de)250 long activeDefragZsetEntry(zset *zs, dictEntry *de) {
251 sds newsds;
252 double* newscore;
253 long defragged = 0;
254 sds sdsele = dictGetKey(de);
255 if ((newsds = activeDefragSds(sdsele)))
256 defragged++, de->key = newsds;
257 newscore = zslDefrag(zs->zsl, *(double*)dictGetVal(de), sdsele, newsds);
258 if (newscore) {
259 dictSetVal(zs->dict, de, newscore);
260 defragged++;
261 }
262 return defragged;
263 }
264
265 #define DEFRAG_SDS_DICT_NO_VAL 0
266 #define DEFRAG_SDS_DICT_VAL_IS_SDS 1
267 #define DEFRAG_SDS_DICT_VAL_IS_STROB 2
268 #define DEFRAG_SDS_DICT_VAL_VOID_PTR 3
269
270 /* Defrag a dict with sds key and optional value (either ptr, sds or robj string) */
activeDefragSdsDict(dict * d,int val_type)271 long activeDefragSdsDict(dict* d, int val_type) {
272 dictIterator *di;
273 dictEntry *de;
274 long defragged = 0;
275 di = dictGetIterator(d);
276 while((de = dictNext(di)) != NULL) {
277 sds sdsele = dictGetKey(de), newsds;
278 if ((newsds = activeDefragSds(sdsele)))
279 de->key = newsds, defragged++;
280 /* defrag the value */
281 if (val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) {
282 sdsele = dictGetVal(de);
283 if ((newsds = activeDefragSds(sdsele)))
284 de->v.val = newsds, defragged++;
285 } else if (val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) {
286 robj *newele, *ele = dictGetVal(de);
287 if ((newele = activeDefragStringOb(ele, &defragged)))
288 de->v.val = newele;
289 } else if (val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) {
290 void *newptr, *ptr = dictGetVal(de);
291 if ((newptr = activeDefragAlloc(ptr)))
292 de->v.val = newptr, defragged++;
293 }
294 defragged += dictIterDefragEntry(di);
295 }
296 dictReleaseIterator(di);
297 return defragged;
298 }
299
300 /* Defrag a list of ptr, sds or robj string values */
activeDefragList(list * l,int val_type)301 long activeDefragList(list *l, int val_type) {
302 long defragged = 0;
303 listNode *ln, *newln;
304 for (ln = l->head; ln; ln = ln->next) {
305 if ((newln = activeDefragAlloc(ln))) {
306 if (newln->prev)
307 newln->prev->next = newln;
308 else
309 l->head = newln;
310 if (newln->next)
311 newln->next->prev = newln;
312 else
313 l->tail = newln;
314 ln = newln;
315 defragged++;
316 }
317 if (val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) {
318 sds newsds, sdsele = ln->value;
319 if ((newsds = activeDefragSds(sdsele)))
320 ln->value = newsds, defragged++;
321 } else if (val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) {
322 robj *newele, *ele = ln->value;
323 if ((newele = activeDefragStringOb(ele, &defragged)))
324 ln->value = newele;
325 } else if (val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) {
326 void *newptr, *ptr = ln->value;
327 if ((newptr = activeDefragAlloc(ptr)))
328 ln->value = newptr, defragged++;
329 }
330 }
331 return defragged;
332 }
333
334 /* Defrag a list of sds values and a dict with the same sds keys */
activeDefragSdsListAndDict(list * l,dict * d,int dict_val_type)335 long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) {
336 long defragged = 0;
337 sds newsds, sdsele;
338 listNode *ln, *newln;
339 dictIterator *di;
340 dictEntry *de;
341 /* Defrag the list and it's sds values */
342 for (ln = l->head; ln; ln = ln->next) {
343 if ((newln = activeDefragAlloc(ln))) {
344 if (newln->prev)
345 newln->prev->next = newln;
346 else
347 l->head = newln;
348 if (newln->next)
349 newln->next->prev = newln;
350 else
351 l->tail = newln;
352 ln = newln;
353 defragged++;
354 }
355 sdsele = ln->value;
356 if ((newsds = activeDefragSds(sdsele))) {
357 /* When defragging an sds value, we need to update the dict key */
358 uint64_t hash = dictGetHash(d, sdsele);
359 replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged);
360 ln->value = newsds;
361 defragged++;
362 }
363 }
364
365 /* Defrag the dict values (keys were already handled) */
366 di = dictGetIterator(d);
367 while((de = dictNext(di)) != NULL) {
368 if (dict_val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) {
369 sds newsds, sdsele = dictGetVal(de);
370 if ((newsds = activeDefragSds(sdsele)))
371 de->v.val = newsds, defragged++;
372 } else if (dict_val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) {
373 robj *newele, *ele = dictGetVal(de);
374 if ((newele = activeDefragStringOb(ele, &defragged)))
375 de->v.val = newele, defragged++;
376 } else if (dict_val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) {
377 void *newptr, *ptr = ln->value;
378 if ((newptr = activeDefragAlloc(ptr)))
379 ln->value = newptr, defragged++;
380 }
381 defragged += dictIterDefragEntry(di);
382 }
383 dictReleaseIterator(di);
384
385 return defragged;
386 }
387
388 /* Utility function that replaces an old key pointer in the dictionary with a
389 * new pointer. Additionally, we try to defrag the dictEntry in that dict.
390 * Oldkey mey be a dead pointer and should not be accessed (we get a
391 * pre-calculated hash value). Newkey may be null if the key pointer wasn't
392 * moved. Return value is the the dictEntry if found, or NULL if not found.
393 * NOTE: this is very ugly code, but it let's us avoid the complication of
394 * doing a scan on another dict. */
replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict * d,sds oldkey,sds newkey,uint64_t hash,long * defragged)395 dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, uint64_t hash, long *defragged) {
396 dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash);
397 if (deref) {
398 dictEntry *de = *deref;
399 dictEntry *newde = activeDefragAlloc(de);
400 if (newde) {
401 de = *deref = newde;
402 (*defragged)++;
403 }
404 if (newkey)
405 de->key = newkey;
406 return de;
407 }
408 return NULL;
409 }
410
activeDefragQuickListNodes(quicklist * ql)411 long activeDefragQuickListNodes(quicklist *ql) {
412 quicklistNode *node = ql->head, *newnode;
413 long defragged = 0;
414 unsigned char *newzl;
415 while (node) {
416 if ((newnode = activeDefragAlloc(node))) {
417 if (newnode->prev)
418 newnode->prev->next = newnode;
419 else
420 ql->head = newnode;
421 if (newnode->next)
422 newnode->next->prev = newnode;
423 else
424 ql->tail = newnode;
425 node = newnode;
426 defragged++;
427 }
428 if ((newzl = activeDefragAlloc(node->zl)))
429 defragged++, node->zl = newzl;
430 node = node->next;
431 }
432 return defragged;
433 }
434
435 /* when the value has lots of elements, we want to handle it later and not as
436 * oart of the main dictionary scan. this is needed in order to prevent latency
437 * spikes when handling large items */
defragLater(redisDb * db,dictEntry * kde)438 void defragLater(redisDb *db, dictEntry *kde) {
439 sds key = sdsdup(dictGetKey(kde));
440 listAddNodeTail(db->defrag_later, key);
441 }
442
scanLaterList(robj * ob)443 long scanLaterList(robj *ob) {
444 quicklist *ql = ob->ptr;
445 if (ob->type != OBJ_LIST || ob->encoding != OBJ_ENCODING_QUICKLIST)
446 return 0;
447 server.stat_active_defrag_scanned+=ql->len;
448 return activeDefragQuickListNodes(ql);
449 }
450
451 typedef struct {
452 zset *zs;
453 long defragged;
454 } scanLaterZsetData;
455
scanLaterZsetCallback(void * privdata,const dictEntry * _de)456 void scanLaterZsetCallback(void *privdata, const dictEntry *_de) {
457 dictEntry *de = (dictEntry*)_de;
458 scanLaterZsetData *data = privdata;
459 data->defragged += activeDefragZsetEntry(data->zs, de);
460 server.stat_active_defrag_scanned++;
461 }
462
scanLaterZset(robj * ob,unsigned long * cursor)463 long scanLaterZset(robj *ob, unsigned long *cursor) {
464 if (ob->type != OBJ_ZSET || ob->encoding != OBJ_ENCODING_SKIPLIST)
465 return 0;
466 zset *zs = (zset*)ob->ptr;
467 dict *d = zs->dict;
468 scanLaterZsetData data = {zs, 0};
469 *cursor = dictScan(d, *cursor, scanLaterZsetCallback, defragDictBucketCallback, &data);
470 return data.defragged;
471 }
472
scanLaterSetCallback(void * privdata,const dictEntry * _de)473 void scanLaterSetCallback(void *privdata, const dictEntry *_de) {
474 dictEntry *de = (dictEntry*)_de;
475 long *defragged = privdata;
476 sds sdsele = dictGetKey(de), newsds;
477 if ((newsds = activeDefragSds(sdsele)))
478 (*defragged)++, de->key = newsds;
479 server.stat_active_defrag_scanned++;
480 }
481
scanLaterSet(robj * ob,unsigned long * cursor)482 long scanLaterSet(robj *ob, unsigned long *cursor) {
483 long defragged = 0;
484 if (ob->type != OBJ_SET || ob->encoding != OBJ_ENCODING_HT)
485 return 0;
486 dict *d = ob->ptr;
487 *cursor = dictScan(d, *cursor, scanLaterSetCallback, defragDictBucketCallback, &defragged);
488 return defragged;
489 }
490
scanLaterHashCallback(void * privdata,const dictEntry * _de)491 void scanLaterHashCallback(void *privdata, const dictEntry *_de) {
492 dictEntry *de = (dictEntry*)_de;
493 long *defragged = privdata;
494 sds sdsele = dictGetKey(de), newsds;
495 if ((newsds = activeDefragSds(sdsele)))
496 (*defragged)++, de->key = newsds;
497 sdsele = dictGetVal(de);
498 if ((newsds = activeDefragSds(sdsele)))
499 (*defragged)++, de->v.val = newsds;
500 server.stat_active_defrag_scanned++;
501 }
502
scanLaterHash(robj * ob,unsigned long * cursor)503 long scanLaterHash(robj *ob, unsigned long *cursor) {
504 long defragged = 0;
505 if (ob->type != OBJ_HASH || ob->encoding != OBJ_ENCODING_HT)
506 return 0;
507 dict *d = ob->ptr;
508 *cursor = dictScan(d, *cursor, scanLaterHashCallback, defragDictBucketCallback, &defragged);
509 return defragged;
510 }
511
defragQuicklist(redisDb * db,dictEntry * kde)512 long defragQuicklist(redisDb *db, dictEntry *kde) {
513 robj *ob = dictGetVal(kde);
514 long defragged = 0;
515 quicklist *ql = ob->ptr, *newql;
516 serverAssert(ob->type == OBJ_LIST && ob->encoding == OBJ_ENCODING_QUICKLIST);
517 if ((newql = activeDefragAlloc(ql)))
518 defragged++, ob->ptr = ql = newql;
519 if (ql->len > server.active_defrag_max_scan_fields)
520 defragLater(db, kde);
521 else
522 defragged += activeDefragQuickListNodes(ql);
523 return defragged;
524 }
525
defragZsetSkiplist(redisDb * db,dictEntry * kde)526 long defragZsetSkiplist(redisDb *db, dictEntry *kde) {
527 robj *ob = dictGetVal(kde);
528 long defragged = 0;
529 zset *zs = (zset*)ob->ptr;
530 zset *newzs;
531 zskiplist *newzsl;
532 dict *newdict;
533 dictEntry *de;
534 struct zskiplistNode *newheader;
535 serverAssert(ob->type == OBJ_ZSET && ob->encoding == OBJ_ENCODING_SKIPLIST);
536 if ((newzs = activeDefragAlloc(zs)))
537 defragged++, ob->ptr = zs = newzs;
538 if ((newzsl = activeDefragAlloc(zs->zsl)))
539 defragged++, zs->zsl = newzsl;
540 if ((newheader = activeDefragAlloc(zs->zsl->header)))
541 defragged++, zs->zsl->header = newheader;
542 if (dictSize(zs->dict) > server.active_defrag_max_scan_fields)
543 defragLater(db, kde);
544 else {
545 dictIterator *di = dictGetIterator(zs->dict);
546 while((de = dictNext(di)) != NULL) {
547 defragged += activeDefragZsetEntry(zs, de);
548 }
549 dictReleaseIterator(di);
550 }
551 /* handle the dict struct */
552 if ((newdict = activeDefragAlloc(zs->dict)))
553 defragged++, zs->dict = newdict;
554 /* defrag the dict tables */
555 defragged += dictDefragTables(zs->dict);
556 return defragged;
557 }
558
defragHash(redisDb * db,dictEntry * kde)559 long defragHash(redisDb *db, dictEntry *kde) {
560 long defragged = 0;
561 robj *ob = dictGetVal(kde);
562 dict *d, *newd;
563 serverAssert(ob->type == OBJ_HASH && ob->encoding == OBJ_ENCODING_HT);
564 d = ob->ptr;
565 if (dictSize(d) > server.active_defrag_max_scan_fields)
566 defragLater(db, kde);
567 else
568 defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_VAL_IS_SDS);
569 /* handle the dict struct */
570 if ((newd = activeDefragAlloc(ob->ptr)))
571 defragged++, ob->ptr = newd;
572 /* defrag the dict tables */
573 defragged += dictDefragTables(ob->ptr);
574 return defragged;
575 }
576
defragSet(redisDb * db,dictEntry * kde)577 long defragSet(redisDb *db, dictEntry *kde) {
578 long defragged = 0;
579 robj *ob = dictGetVal(kde);
580 dict *d, *newd;
581 serverAssert(ob->type == OBJ_SET && ob->encoding == OBJ_ENCODING_HT);
582 d = ob->ptr;
583 if (dictSize(d) > server.active_defrag_max_scan_fields)
584 defragLater(db, kde);
585 else
586 defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_NO_VAL);
587 /* handle the dict struct */
588 if ((newd = activeDefragAlloc(ob->ptr)))
589 defragged++, ob->ptr = newd;
590 /* defrag the dict tables */
591 defragged += dictDefragTables(ob->ptr);
592 return defragged;
593 }
594
595 /* Defrag callback for radix tree iterator, called for each node,
596 * used in order to defrag the nodes allocations. */
defragRaxNode(raxNode ** noderef)597 int defragRaxNode(raxNode **noderef) {
598 raxNode *newnode = activeDefragAlloc(*noderef);
599 if (newnode) {
600 *noderef = newnode;
601 return 1;
602 }
603 return 0;
604 }
605
606 /* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
scanLaterStraemListpacks(robj * ob,unsigned long * cursor,long long endtime,long long * defragged)607 int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) {
608 static unsigned char last[sizeof(streamID)];
609 raxIterator ri;
610 long iterations = 0;
611 if (ob->type != OBJ_STREAM || ob->encoding != OBJ_ENCODING_STREAM) {
612 *cursor = 0;
613 return 0;
614 }
615
616 stream *s = ob->ptr;
617 raxStart(&ri,s->rax);
618 if (*cursor == 0) {
619 /* if cursor is 0, we start new iteration */
620 defragRaxNode(&s->rax->head);
621 /* assign the iterator node callback before the seek, so that the
622 * initial nodes that are processed till the first item are covered */
623 ri.node_cb = defragRaxNode;
624 raxSeek(&ri,"^",NULL,0);
625 } else {
626 /* if cursor is non-zero, we seek to the static 'last' */
627 if (!raxSeek(&ri,">", last, sizeof(last))) {
628 *cursor = 0;
629 return 0;
630 }
631 /* assign the iterator node callback after the seek, so that the
632 * initial nodes that are processed till now aren't covered */
633 ri.node_cb = defragRaxNode;
634 }
635
636 (*cursor)++;
637 while (raxNext(&ri)) {
638 void *newdata = activeDefragAlloc(ri.data);
639 if (newdata)
640 raxSetData(ri.node, ri.data=newdata), (*defragged)++;
641 if (++iterations > 16) {
642 if (ustime() > endtime) {
643 serverAssert(ri.key_len==sizeof(last));
644 memcpy(last,ri.key,ri.key_len);
645 raxStop(&ri);
646 return 1;
647 }
648 iterations = 0;
649 }
650 }
651 raxStop(&ri);
652 *cursor = 0;
653 return 0;
654 }
655
656 /* optional callback used defrag each rax element (not including the element pointer itself) */
657 typedef void *(raxDefragFunction)(raxIterator *ri, void *privdata, long *defragged);
658
659 /* defrag radix tree including:
660 * 1) rax struct
661 * 2) rax nodes
662 * 3) rax entry data (only if defrag_data is specified)
663 * 4) call a callback per element, and allow the callback to return a new pointer for the element */
defragRadixTree(rax ** raxref,int defrag_data,raxDefragFunction * element_cb,void * element_cb_data)664 long defragRadixTree(rax **raxref, int defrag_data, raxDefragFunction *element_cb, void *element_cb_data) {
665 long defragged = 0;
666 raxIterator ri;
667 rax* rax;
668 if ((rax = activeDefragAlloc(*raxref)))
669 defragged++, *raxref = rax;
670 rax = *raxref;
671 raxStart(&ri,rax);
672 ri.node_cb = defragRaxNode;
673 defragRaxNode(&rax->head);
674 raxSeek(&ri,"^",NULL,0);
675 while (raxNext(&ri)) {
676 void *newdata = NULL;
677 if (element_cb)
678 newdata = element_cb(&ri, element_cb_data, &defragged);
679 if (defrag_data && !newdata)
680 newdata = activeDefragAlloc(ri.data);
681 if (newdata)
682 raxSetData(ri.node, ri.data=newdata), defragged++;
683 }
684 raxStop(&ri);
685 return defragged;
686 }
687
688 typedef struct {
689 streamCG *cg;
690 streamConsumer *c;
691 } PendingEntryContext;
692
defragStreamConsumerPendingEntry(raxIterator * ri,void * privdata,long * defragged)693 void* defragStreamConsumerPendingEntry(raxIterator *ri, void *privdata, long *defragged) {
694 UNUSED(defragged);
695 PendingEntryContext *ctx = privdata;
696 streamNACK *nack = ri->data, *newnack;
697 nack->consumer = ctx->c; /* update nack pointer to consumer */
698 newnack = activeDefragAlloc(nack);
699 if (newnack) {
700 /* update consumer group pointer to the nack */
701 void *prev;
702 raxInsert(ctx->cg->pel, ri->key, ri->key_len, newnack, &prev);
703 serverAssert(prev==nack);
704 /* note: we don't increment 'defragged' that's done by the caller */
705 }
706 return newnack;
707 }
708
defragStreamConsumer(raxIterator * ri,void * privdata,long * defragged)709 void* defragStreamConsumer(raxIterator *ri, void *privdata, long *defragged) {
710 streamConsumer *c = ri->data;
711 streamCG *cg = privdata;
712 void *newc = activeDefragAlloc(c);
713 if (newc) {
714 /* note: we don't increment 'defragged' that's done by the caller */
715 c = newc;
716 }
717 sds newsds = activeDefragSds(c->name);
718 if (newsds)
719 (*defragged)++, c->name = newsds;
720 if (c->pel) {
721 PendingEntryContext pel_ctx = {cg, c};
722 *defragged += defragRadixTree(&c->pel, 0, defragStreamConsumerPendingEntry, &pel_ctx);
723 }
724 return newc; /* returns NULL if c was not defragged */
725 }
726
defragStreamConsumerGroup(raxIterator * ri,void * privdata,long * defragged)727 void* defragStreamConsumerGroup(raxIterator *ri, void *privdata, long *defragged) {
728 streamCG *cg = ri->data;
729 UNUSED(privdata);
730 if (cg->consumers)
731 *defragged += defragRadixTree(&cg->consumers, 0, defragStreamConsumer, cg);
732 if (cg->pel)
733 *defragged += defragRadixTree(&cg->pel, 0, NULL, NULL);
734 return NULL;
735 }
736
defragStream(redisDb * db,dictEntry * kde)737 long defragStream(redisDb *db, dictEntry *kde) {
738 long defragged = 0;
739 robj *ob = dictGetVal(kde);
740 serverAssert(ob->type == OBJ_STREAM && ob->encoding == OBJ_ENCODING_STREAM);
741 stream *s = ob->ptr, *news;
742
743 /* handle the main struct */
744 if ((news = activeDefragAlloc(s)))
745 defragged++, ob->ptr = s = news;
746
747 if (raxSize(s->rax) > server.active_defrag_max_scan_fields) {
748 rax *newrax = activeDefragAlloc(s->rax);
749 if (newrax)
750 defragged++, s->rax = newrax;
751 defragLater(db, kde);
752 } else
753 defragged += defragRadixTree(&s->rax, 1, NULL, NULL);
754
755 if (s->cgroups)
756 defragged += defragRadixTree(&s->cgroups, 1, defragStreamConsumerGroup, NULL);
757 return defragged;
758 }
759
760 /* for each key we scan in the main dict, this function will attempt to defrag
761 * all the various pointers it has. Returns a stat of how many pointers were
762 * moved. */
defragKey(redisDb * db,dictEntry * de)763 long defragKey(redisDb *db, dictEntry *de) {
764 sds keysds = dictGetKey(de);
765 robj *newob, *ob;
766 unsigned char *newzl;
767 long defragged = 0;
768 sds newsds;
769
770 /* Try to defrag the key name. */
771 newsds = activeDefragSds(keysds);
772 if (newsds)
773 defragged++, de->key = newsds;
774 if (dictSize(db->expires)) {
775 /* Dirty code:
776 * I can't search in db->expires for that key after i already released
777 * the pointer it holds it won't be able to do the string compare */
778 uint64_t hash = dictGetHash(db->dict, de->key);
779 replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged);
780 }
781
782 /* Try to defrag robj and / or string value. */
783 ob = dictGetVal(de);
784 if ((newob = activeDefragStringOb(ob, &defragged))) {
785 de->v.val = newob;
786 ob = newob;
787 }
788
789 if (ob->type == OBJ_STRING) {
790 /* Already handled in activeDefragStringOb. */
791 } else if (ob->type == OBJ_LIST) {
792 if (ob->encoding == OBJ_ENCODING_QUICKLIST) {
793 defragged += defragQuicklist(db, de);
794 } else if (ob->encoding == OBJ_ENCODING_ZIPLIST) {
795 if ((newzl = activeDefragAlloc(ob->ptr)))
796 defragged++, ob->ptr = newzl;
797 } else {
798 serverPanic("Unknown list encoding");
799 }
800 } else if (ob->type == OBJ_SET) {
801 if (ob->encoding == OBJ_ENCODING_HT) {
802 defragged += defragSet(db, de);
803 } else if (ob->encoding == OBJ_ENCODING_INTSET) {
804 intset *newis, *is = ob->ptr;
805 if ((newis = activeDefragAlloc(is)))
806 defragged++, ob->ptr = newis;
807 } else {
808 serverPanic("Unknown set encoding");
809 }
810 } else if (ob->type == OBJ_ZSET) {
811 if (ob->encoding == OBJ_ENCODING_ZIPLIST) {
812 if ((newzl = activeDefragAlloc(ob->ptr)))
813 defragged++, ob->ptr = newzl;
814 } else if (ob->encoding == OBJ_ENCODING_SKIPLIST) {
815 defragged += defragZsetSkiplist(db, de);
816 } else {
817 serverPanic("Unknown sorted set encoding");
818 }
819 } else if (ob->type == OBJ_HASH) {
820 if (ob->encoding == OBJ_ENCODING_ZIPLIST) {
821 if ((newzl = activeDefragAlloc(ob->ptr)))
822 defragged++, ob->ptr = newzl;
823 } else if (ob->encoding == OBJ_ENCODING_HT) {
824 defragged += defragHash(db, de);
825 } else {
826 serverPanic("Unknown hash encoding");
827 }
828 } else if (ob->type == OBJ_STREAM) {
829 defragged += defragStream(db, de);
830 } else if (ob->type == OBJ_MODULE) {
831 /* Currently defragmenting modules private data types
832 * is not supported. */
833 } else {
834 serverPanic("Unknown object type");
835 }
836 return defragged;
837 }
838
839 /* Defrag scan callback for the main db dictionary. */
defragScanCallback(void * privdata,const dictEntry * de)840 void defragScanCallback(void *privdata, const dictEntry *de) {
841 long defragged = defragKey((redisDb*)privdata, (dictEntry*)de);
842 server.stat_active_defrag_hits += defragged;
843 if(defragged)
844 server.stat_active_defrag_key_hits++;
845 else
846 server.stat_active_defrag_key_misses++;
847 server.stat_active_defrag_scanned++;
848 }
849
850 /* Defrag scan callback for each hash table bicket,
851 * used in order to defrag the dictEntry allocations. */
defragDictBucketCallback(void * privdata,dictEntry ** bucketref)852 void defragDictBucketCallback(void *privdata, dictEntry **bucketref) {
853 UNUSED(privdata); /* NOTE: this function is also used by both activeDefragCycle and scanLaterHash, etc. don't use privdata */
854 while(*bucketref) {
855 dictEntry *de = *bucketref, *newde;
856 if ((newde = activeDefragAlloc(de))) {
857 *bucketref = newde;
858 }
859 bucketref = &(*bucketref)->next;
860 }
861 }
862
863 /* Utility function to get the fragmentation ratio from jemalloc.
864 * It is critical to do that by comparing only heap maps that belong to
865 * jemalloc, and skip ones the jemalloc keeps as spare. Since we use this
866 * fragmentation ratio in order to decide if a defrag action should be taken
867 * or not, a false detection can cause the defragmenter to waste a lot of CPU
868 * without the possibility of getting any results. */
getAllocatorFragmentation(size_t * out_frag_bytes)869 float getAllocatorFragmentation(size_t *out_frag_bytes) {
870 size_t resident, active, allocated;
871 zmalloc_get_allocator_info(&allocated, &active, &resident);
872 float frag_pct = ((float)active / allocated)*100 - 100;
873 size_t frag_bytes = active - allocated;
874 float rss_pct = ((float)resident / allocated)*100 - 100;
875 size_t rss_bytes = resident - allocated;
876 if(out_frag_bytes)
877 *out_frag_bytes = frag_bytes;
878 serverLog(LL_DEBUG,
879 "allocated=%zu, active=%zu, resident=%zu, frag=%.0f%% (%.0f%% rss), frag_bytes=%zu (%zu rss)",
880 allocated, active, resident, frag_pct, rss_pct, frag_bytes, rss_bytes);
881 return frag_pct;
882 }
883
884 /* We may need to defrag other globals, one small allcation can hold a full allocator run.
885 * so although small, it is still important to defrag these */
defragOtherGlobals()886 long defragOtherGlobals() {
887 long defragged = 0;
888
889 /* there are many more pointers to defrag (e.g. client argv, output / aof buffers, etc.
890 * but we assume most of these are short lived, we only need to defrag allocations
891 * that remain static for a long time */
892 defragged += activeDefragSdsDict(server.lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB);
893 defragged += activeDefragSdsListAndDict(server.repl_scriptcache_fifo, server.repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL);
894 return defragged;
895 }
896
897 /* returns 0 more work may or may not be needed (see non-zero cursor),
898 * and 1 if time is up and more work is needed. */
defragLaterItem(dictEntry * de,unsigned long * cursor,long long endtime)899 int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) {
900 if (de) {
901 robj *ob = dictGetVal(de);
902 if (ob->type == OBJ_LIST) {
903 server.stat_active_defrag_hits += scanLaterList(ob);
904 *cursor = 0; /* list has no scan, we must finish it in one go */
905 } else if (ob->type == OBJ_SET) {
906 server.stat_active_defrag_hits += scanLaterSet(ob, cursor);
907 } else if (ob->type == OBJ_ZSET) {
908 server.stat_active_defrag_hits += scanLaterZset(ob, cursor);
909 } else if (ob->type == OBJ_HASH) {
910 server.stat_active_defrag_hits += scanLaterHash(ob, cursor);
911 } else if (ob->type == OBJ_STREAM) {
912 return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits);
913 } else {
914 *cursor = 0; /* object type may have changed since we schedule it for later */
915 }
916 } else {
917 *cursor = 0; /* object may have been deleted already */
918 }
919 return 0;
920 }
921
922 /* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
defragLaterStep(redisDb * db,long long endtime)923 int defragLaterStep(redisDb *db, long long endtime) {
924 static sds current_key = NULL;
925 static unsigned long cursor = 0;
926 unsigned int iterations = 0;
927 unsigned long long prev_defragged = server.stat_active_defrag_hits;
928 unsigned long long prev_scanned = server.stat_active_defrag_scanned;
929 long long key_defragged;
930
931 do {
932 /* if we're not continuing a scan from the last call or loop, start a new one */
933 if (!cursor) {
934 listNode *head = listFirst(db->defrag_later);
935
936 /* Move on to next key */
937 if (current_key) {
938 serverAssert(current_key == head->value);
939 sdsfree(head->value);
940 listDelNode(db->defrag_later, head);
941 cursor = 0;
942 current_key = NULL;
943 }
944
945 /* stop if we reached the last one. */
946 head = listFirst(db->defrag_later);
947 if (!head)
948 return 0;
949
950 /* start a new key */
951 current_key = head->value;
952 cursor = 0;
953 }
954
955 /* each time we enter this function we need to fetch the key from the dict again (if it still exists) */
956 dictEntry *de = dictFind(db->dict, current_key);
957 key_defragged = server.stat_active_defrag_hits;
958 do {
959 int quit = 0;
960 if (defragLaterItem(de, &cursor, endtime))
961 quit = 1; /* time is up, we didn't finish all the work */
962
963 /* Don't start a new BIG key in this loop, this is because the
964 * next key can be a list, and scanLaterList must be done in once cycle */
965 if (!cursor)
966 quit = 1;
967
968 /* Once in 16 scan iterations, 512 pointer reallocations, or 64 fields
969 * (if we have a lot of pointers in one hash bucket, or rehashing),
970 * check if we reached the time limit. */
971 if (quit || (++iterations > 16 ||
972 server.stat_active_defrag_hits - prev_defragged > 512 ||
973 server.stat_active_defrag_scanned - prev_scanned > 64)) {
974 if (quit || ustime() > endtime) {
975 if(key_defragged != server.stat_active_defrag_hits)
976 server.stat_active_defrag_key_hits++;
977 else
978 server.stat_active_defrag_key_misses++;
979 return 1;
980 }
981 iterations = 0;
982 prev_defragged = server.stat_active_defrag_hits;
983 prev_scanned = server.stat_active_defrag_scanned;
984 }
985 } while(cursor);
986 if(key_defragged != server.stat_active_defrag_hits)
987 server.stat_active_defrag_key_hits++;
988 else
989 server.stat_active_defrag_key_misses++;
990 } while(1);
991 }
992
993 #define INTERPOLATE(x, x1, x2, y1, y2) ( (y1) + ((x)-(x1)) * ((y2)-(y1)) / ((x2)-(x1)) )
994 #define LIMIT(y, min, max) ((y)<(min)? min: ((y)>(max)? max: (y)))
995
996 /* decide if defrag is needed, and at what CPU effort to invest in it */
computeDefragCycles()997 void computeDefragCycles() {
998 size_t frag_bytes;
999 float frag_pct = getAllocatorFragmentation(&frag_bytes);
1000 /* If we're not already running, and below the threshold, exit. */
1001 if (!server.active_defrag_running) {
1002 if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes)
1003 return;
1004 }
1005
1006 /* Calculate the adaptive aggressiveness of the defrag */
1007 int cpu_pct = INTERPOLATE(frag_pct,
1008 server.active_defrag_threshold_lower,
1009 server.active_defrag_threshold_upper,
1010 server.active_defrag_cycle_min,
1011 server.active_defrag_cycle_max);
1012 cpu_pct = LIMIT(cpu_pct,
1013 server.active_defrag_cycle_min,
1014 server.active_defrag_cycle_max);
1015 /* We allow increasing the aggressiveness during a scan, but don't
1016 * reduce it. */
1017 if (!server.active_defrag_running ||
1018 cpu_pct > server.active_defrag_running)
1019 {
1020 server.active_defrag_running = cpu_pct;
1021 serverLog(LL_VERBOSE,
1022 "Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%",
1023 frag_pct, frag_bytes, cpu_pct);
1024 }
1025 }
1026
1027 /* Perform incremental defragmentation work from the serverCron.
1028 * This works in a similar way to activeExpireCycle, in the sense that
1029 * we do incremental work across calls. */
activeDefragCycle(void)1030 void activeDefragCycle(void) {
1031 static int current_db = -1;
1032 static unsigned long cursor = 0;
1033 static redisDb *db = NULL;
1034 static long long start_scan, start_stat;
1035 unsigned int iterations = 0;
1036 unsigned long long prev_defragged = server.stat_active_defrag_hits;
1037 unsigned long long prev_scanned = server.stat_active_defrag_scanned;
1038 long long start, timelimit, endtime;
1039 mstime_t latency;
1040 int quit = 0;
1041
1042 if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1)
1043 return; /* Defragging memory while there's a fork will just do damage. */
1044
1045 /* Once a second, check if we the fragmentation justfies starting a scan
1046 * or making it more aggressive. */
1047 run_with_period(1000) {
1048 computeDefragCycles();
1049 }
1050 if (!server.active_defrag_running)
1051 return;
1052
1053 /* See activeExpireCycle for how timelimit is handled. */
1054 start = ustime();
1055 timelimit = 1000000*server.active_defrag_running/server.hz/100;
1056 if (timelimit <= 0) timelimit = 1;
1057 endtime = start + timelimit;
1058 latencyStartMonitor(latency);
1059
1060 do {
1061 /* if we're not continuing a scan from the last call or loop, start a new one */
1062 if (!cursor) {
1063 /* finish any leftovers from previous db before moving to the next one */
1064 if (db && defragLaterStep(db, endtime)) {
1065 quit = 1; /* time is up, we didn't finish all the work */
1066 break; /* this will exit the function and we'll continue on the next cycle */
1067 }
1068
1069 /* Move on to next database, and stop if we reached the last one. */
1070 if (++current_db >= server.dbnum) {
1071 /* defrag other items not part of the db / keys */
1072 defragOtherGlobals();
1073
1074 long long now = ustime();
1075 size_t frag_bytes;
1076 float frag_pct = getAllocatorFragmentation(&frag_bytes);
1077 serverLog(LL_VERBOSE,
1078 "Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu",
1079 (int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes);
1080
1081 start_scan = now;
1082 current_db = -1;
1083 cursor = 0;
1084 db = NULL;
1085 server.active_defrag_running = 0;
1086
1087 computeDefragCycles(); /* if another scan is needed, start it right away */
1088 if (server.active_defrag_running != 0 && ustime() < endtime)
1089 continue;
1090 break;
1091 }
1092 else if (current_db==0) {
1093 /* Start a scan from the first database. */
1094 start_scan = ustime();
1095 start_stat = server.stat_active_defrag_hits;
1096 }
1097
1098 db = &server.db[current_db];
1099 cursor = 0;
1100 }
1101
1102 do {
1103 /* before scanning the next bucket, see if we have big keys left from the previous bucket to scan */
1104 if (defragLaterStep(db, endtime)) {
1105 quit = 1; /* time is up, we didn't finish all the work */
1106 break; /* this will exit the function and we'll continue on the next cycle */
1107 }
1108
1109 cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
1110
1111 /* Once in 16 scan iterations, 512 pointer reallocations. or 64 keys
1112 * (if we have a lot of pointers in one hash bucket or rehasing),
1113 * check if we reached the time limit.
1114 * But regardless, don't start a new db in this loop, this is because after
1115 * the last db we call defragOtherGlobals, which must be done in once cycle */
1116 if (!cursor || (++iterations > 16 ||
1117 server.stat_active_defrag_hits - prev_defragged > 512 ||
1118 server.stat_active_defrag_scanned - prev_scanned > 64)) {
1119 if (!cursor || ustime() > endtime) {
1120 quit = 1;
1121 break;
1122 }
1123 iterations = 0;
1124 prev_defragged = server.stat_active_defrag_hits;
1125 prev_scanned = server.stat_active_defrag_scanned;
1126 }
1127 } while(cursor && !quit);
1128 } while(!quit);
1129
1130 latencyEndMonitor(latency);
1131 latencyAddSampleIfNeeded("active-defrag-cycle",latency);
1132 }
1133
1134 #else /* HAVE_DEFRAG */
1135
activeDefragCycle(void)1136 void activeDefragCycle(void) {
1137 /* Not implemented yet. */
1138 }
1139
1140 #endif
1141