1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "rocksdb/cache.h"
11
12 #include <forward_list>
13 #include <functional>
14 #include <iostream>
15 #include <string>
16 #include <vector>
17 #include "cache/clock_cache.h"
18 #include "cache/lru_cache.h"
19 #include "cache/simple_deleter.h"
20 #include "test_util/testharness.h"
21 #include "util/coding.h"
22 #include "util/string_util.h"
23
24 namespace ROCKSDB_NAMESPACE {
25
26 // Conversions between numeric keys/values and the types expected by Cache.
EncodeKey(int k)27 static std::string EncodeKey(int k) {
28 std::string result;
29 PutFixed32(&result, k);
30 return result;
31 }
DecodeKey(const Slice & k)32 static int DecodeKey(const Slice& k) {
33 assert(k.size() == 4);
34 return DecodeFixed32(k.data());
35 }
EncodeValue(uintptr_t v)36 static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
DecodeValue(void * v)37 static int DecodeValue(void* v) {
38 return static_cast<int>(reinterpret_cast<uintptr_t>(v));
39 }
40
41 const std::string kLRU = "lru";
42 const std::string kClock = "clock";
43
44 class CacheTest : public testing::TestWithParam<std::string> {
45 public:
46 static CacheTest* current_;
47
48 class Deleter : public Cache::Deleter {
49 public:
operator ()(const Slice & key,void * v)50 void operator()(const Slice& key, void* v) override {
51 current_->deleted_keys_.push_back(DecodeKey(key));
52 current_->deleted_values_.push_back(DecodeValue(v));
53 }
54 };
55
56 static const int kCacheSize = 1000;
57 static const int kNumShardBits = 4;
58
59 static const int kCacheSize2 = 100;
60 static const int kNumShardBits2 = 2;
61
62 std::vector<int> deleted_keys_;
63 std::vector<int> deleted_values_;
64 Deleter deleter_;
65 std::shared_ptr<Cache> cache_;
66 std::shared_ptr<Cache> cache2_;
67
CacheTest()68 CacheTest()
69 : cache_(NewCache(kCacheSize, kNumShardBits, false)),
70 cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
71 current_ = this;
72 }
73
~CacheTest()74 ~CacheTest() override {}
75
NewCache(size_t capacity)76 std::shared_ptr<Cache> NewCache(size_t capacity) {
77 auto type = GetParam();
78 if (type == kLRU) {
79 return NewLRUCache(capacity);
80 }
81 if (type == kClock) {
82 return NewClockCache(capacity);
83 }
84 return nullptr;
85 }
86
NewCache(size_t capacity,int num_shard_bits,bool strict_capacity_limit,CacheMetadataChargePolicy charge_policy=kDontChargeCacheMetadata)87 std::shared_ptr<Cache> NewCache(
88 size_t capacity, int num_shard_bits, bool strict_capacity_limit,
89 CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
90 auto type = GetParam();
91 if (type == kLRU) {
92 LRUCacheOptions co;
93 co.capacity = capacity;
94 co.num_shard_bits = num_shard_bits;
95 co.strict_capacity_limit = strict_capacity_limit;
96 co.high_pri_pool_ratio = 0;
97 co.metadata_charge_policy = charge_policy;
98 return NewLRUCache(co);
99 }
100 if (type == kClock) {
101 return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
102 charge_policy);
103 }
104 return nullptr;
105 }
106
Lookup(std::shared_ptr<Cache> cache,int key)107 int Lookup(std::shared_ptr<Cache> cache, int key) {
108 Cache::Handle* handle = cache->Lookup(EncodeKey(key));
109 const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
110 if (handle != nullptr) {
111 cache->Release(handle);
112 }
113 return r;
114 }
115
Insert(std::shared_ptr<Cache> cache,int key,int value,int charge=1)116 void Insert(std::shared_ptr<Cache> cache, int key, int value,
117 int charge = 1) {
118 cache->Insert(EncodeKey(key), EncodeValue(value), charge, &deleter_);
119 }
120
Erase(std::shared_ptr<Cache> cache,int key)121 void Erase(std::shared_ptr<Cache> cache, int key) {
122 cache->Erase(EncodeKey(key));
123 }
124
Lookup(int key)125 int Lookup(int key) {
126 return Lookup(cache_, key);
127 }
128
Insert(int key,int value,int charge=1)129 void Insert(int key, int value, int charge = 1) {
130 Insert(cache_, key, value, charge);
131 }
132
Erase(int key)133 void Erase(int key) {
134 Erase(cache_, key);
135 }
136
Lookup2(int key)137 int Lookup2(int key) {
138 return Lookup(cache2_, key);
139 }
140
Insert2(int key,int value,int charge=1)141 void Insert2(int key, int value, int charge = 1) {
142 Insert(cache2_, key, value, charge);
143 }
144
Erase2(int key)145 void Erase2(int key) {
146 Erase(cache2_, key);
147 }
148 };
149 CacheTest* CacheTest::current_;
150
151 class LRUCacheTest : public CacheTest {};
152
TEST_P(CacheTest,UsageTest)153 TEST_P(CacheTest, UsageTest) {
154 // cache is std::shared_ptr and will be automatically cleaned up.
155 const uint64_t kCapacity = 100000;
156 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
157 auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
158 ASSERT_EQ(0, cache->GetUsage());
159 ASSERT_EQ(0, precise_cache->GetUsage());
160
161 size_t usage = 0;
162 char value[10] = "abcdef";
163 // make sure everything will be cached
164 for (int i = 1; i < 100; ++i) {
165 std::string key(i, 'a');
166 auto kv_size = key.size() + 5;
167 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, nullptr);
168 precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
169 nullptr);
170 usage += kv_size;
171 ASSERT_EQ(usage, cache->GetUsage());
172 ASSERT_LT(usage, precise_cache->GetUsage());
173 }
174
175 cache->EraseUnRefEntries();
176 precise_cache->EraseUnRefEntries();
177 ASSERT_EQ(0, cache->GetUsage());
178 ASSERT_EQ(0, precise_cache->GetUsage());
179
180 // make sure the cache will be overloaded
181 for (uint64_t i = 1; i < kCapacity; ++i) {
182 auto key = ToString(i);
183 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5, nullptr);
184 precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
185 nullptr);
186 }
187
188 // the usage should be close to the capacity
189 ASSERT_GT(kCapacity, cache->GetUsage());
190 ASSERT_GT(kCapacity, precise_cache->GetUsage());
191 ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
192 ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
193 }
194
TEST_P(CacheTest,PinnedUsageTest)195 TEST_P(CacheTest, PinnedUsageTest) {
196 // cache is std::shared_ptr and will be automatically cleaned up.
197 const uint64_t kCapacity = 200000;
198 auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
199 auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
200
201 size_t pinned_usage = 0;
202 char value[10] = "abcdef";
203
204 std::forward_list<Cache::Handle*> unreleased_handles;
205 std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
206
207 // Add entries. Unpin some of them after insertion. Then, pin some of them
208 // again. Check GetPinnedUsage().
209 for (int i = 1; i < 100; ++i) {
210 std::string key(i, 'a');
211 auto kv_size = key.size() + 5;
212 Cache::Handle* handle;
213 Cache::Handle* handle_in_precise_cache;
214 cache->Insert(key, reinterpret_cast<void*>(value), kv_size, nullptr,
215 &handle);
216 assert(handle);
217 precise_cache->Insert(key, reinterpret_cast<void*>(value), kv_size, nullptr,
218 &handle_in_precise_cache);
219 assert(handle_in_precise_cache);
220 pinned_usage += kv_size;
221 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
222 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
223 if (i % 2 == 0) {
224 cache->Release(handle);
225 precise_cache->Release(handle_in_precise_cache);
226 pinned_usage -= kv_size;
227 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
228 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
229 } else {
230 unreleased_handles.push_front(handle);
231 unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
232 }
233 if (i % 3 == 0) {
234 unreleased_handles.push_front(cache->Lookup(key));
235 auto x = precise_cache->Lookup(key);
236 assert(x);
237 unreleased_handles_in_precise_cache.push_front(x);
238 // If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
239 // usage increased
240 if (i % 2 == 0) {
241 pinned_usage += kv_size;
242 }
243 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
244 ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
245 }
246 }
247 auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
248 ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
249
250 // check that overloading the cache does not change the pinned usage
251 for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
252 auto key = ToString(i);
253 cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5, nullptr);
254 precise_cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
255 nullptr);
256 }
257 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
258 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
259
260 cache->EraseUnRefEntries();
261 precise_cache->EraseUnRefEntries();
262 ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
263 ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
264
265 // release handles for pinned entries to prevent memory leaks
266 for (auto handle : unreleased_handles) {
267 cache->Release(handle);
268 }
269 for (auto handle : unreleased_handles_in_precise_cache) {
270 precise_cache->Release(handle);
271 }
272 ASSERT_EQ(0, cache->GetPinnedUsage());
273 ASSERT_EQ(0, precise_cache->GetPinnedUsage());
274 cache->EraseUnRefEntries();
275 precise_cache->EraseUnRefEntries();
276 ASSERT_EQ(0, cache->GetUsage());
277 ASSERT_EQ(0, precise_cache->GetUsage());
278 }
279
TEST_P(CacheTest,HitAndMiss)280 TEST_P(CacheTest, HitAndMiss) {
281 ASSERT_EQ(-1, Lookup(100));
282
283 Insert(100, 101);
284 ASSERT_EQ(101, Lookup(100));
285 ASSERT_EQ(-1, Lookup(200));
286 ASSERT_EQ(-1, Lookup(300));
287
288 Insert(200, 201);
289 ASSERT_EQ(101, Lookup(100));
290 ASSERT_EQ(201, Lookup(200));
291 ASSERT_EQ(-1, Lookup(300));
292
293 Insert(100, 102);
294 ASSERT_EQ(102, Lookup(100));
295 ASSERT_EQ(201, Lookup(200));
296 ASSERT_EQ(-1, Lookup(300));
297
298 ASSERT_EQ(1U, deleted_keys_.size());
299 ASSERT_EQ(100, deleted_keys_[0]);
300 ASSERT_EQ(101, deleted_values_[0]);
301 }
302
TEST_P(CacheTest,InsertSameKey)303 TEST_P(CacheTest, InsertSameKey) {
304 Insert(1, 1);
305 Insert(1, 2);
306 ASSERT_EQ(2, Lookup(1));
307 }
308
TEST_P(CacheTest,Erase)309 TEST_P(CacheTest, Erase) {
310 Erase(200);
311 ASSERT_EQ(0U, deleted_keys_.size());
312
313 Insert(100, 101);
314 Insert(200, 201);
315 Erase(100);
316 ASSERT_EQ(-1, Lookup(100));
317 ASSERT_EQ(201, Lookup(200));
318 ASSERT_EQ(1U, deleted_keys_.size());
319 ASSERT_EQ(100, deleted_keys_[0]);
320 ASSERT_EQ(101, deleted_values_[0]);
321
322 Erase(100);
323 ASSERT_EQ(-1, Lookup(100));
324 ASSERT_EQ(201, Lookup(200));
325 ASSERT_EQ(1U, deleted_keys_.size());
326 }
327
TEST_P(CacheTest,EntriesArePinned)328 TEST_P(CacheTest, EntriesArePinned) {
329 Insert(100, 101);
330 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
331 ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
332 ASSERT_EQ(1U, cache_->GetUsage());
333
334 Insert(100, 102);
335 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
336 ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
337 ASSERT_EQ(0U, deleted_keys_.size());
338 ASSERT_EQ(2U, cache_->GetUsage());
339
340 cache_->Release(h1);
341 ASSERT_EQ(1U, deleted_keys_.size());
342 ASSERT_EQ(100, deleted_keys_[0]);
343 ASSERT_EQ(101, deleted_values_[0]);
344 ASSERT_EQ(1U, cache_->GetUsage());
345
346 Erase(100);
347 ASSERT_EQ(-1, Lookup(100));
348 ASSERT_EQ(1U, deleted_keys_.size());
349 ASSERT_EQ(1U, cache_->GetUsage());
350
351 cache_->Release(h2);
352 ASSERT_EQ(2U, deleted_keys_.size());
353 ASSERT_EQ(100, deleted_keys_[1]);
354 ASSERT_EQ(102, deleted_values_[1]);
355 ASSERT_EQ(0U, cache_->GetUsage());
356 }
357
TEST_P(CacheTest,EvictionPolicy)358 TEST_P(CacheTest, EvictionPolicy) {
359 Insert(100, 101);
360 Insert(200, 201);
361
362 // Frequently used entry must be kept around
363 for (int i = 0; i < kCacheSize * 2; i++) {
364 Insert(1000+i, 2000+i);
365 ASSERT_EQ(101, Lookup(100));
366 }
367 ASSERT_EQ(101, Lookup(100));
368 ASSERT_EQ(-1, Lookup(200));
369 }
370
TEST_P(CacheTest,ExternalRefPinsEntries)371 TEST_P(CacheTest, ExternalRefPinsEntries) {
372 Insert(100, 101);
373 Cache::Handle* h = cache_->Lookup(EncodeKey(100));
374 ASSERT_TRUE(cache_->Ref(h));
375 ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
376 ASSERT_EQ(1U, cache_->GetUsage());
377
378 for (int i = 0; i < 3; ++i) {
379 if (i > 0) {
380 // First release (i == 1) corresponds to Ref(), second release (i == 2)
381 // corresponds to Lookup(). Then, since all external refs are released,
382 // the below insertions should push out the cache entry.
383 cache_->Release(h);
384 }
385 // double cache size because the usage bit in block cache prevents 100 from
386 // being evicted in the first kCacheSize iterations
387 for (int j = 0; j < 2 * kCacheSize + 100; j++) {
388 Insert(1000 + j, 2000 + j);
389 }
390 if (i < 2) {
391 ASSERT_EQ(101, Lookup(100));
392 }
393 }
394 ASSERT_EQ(-1, Lookup(100));
395 }
396
TEST_P(CacheTest,EvictionPolicyRef)397 TEST_P(CacheTest, EvictionPolicyRef) {
398 Insert(100, 101);
399 Insert(101, 102);
400 Insert(102, 103);
401 Insert(103, 104);
402 Insert(200, 101);
403 Insert(201, 102);
404 Insert(202, 103);
405 Insert(203, 104);
406 Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
407 Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
408 Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
409 Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
410 Insert(300, 101);
411 Insert(301, 102);
412 Insert(302, 103);
413 Insert(303, 104);
414
415 // Insert entries much more than Cache capacity
416 for (int i = 0; i < kCacheSize * 2; i++) {
417 Insert(1000 + i, 2000 + i);
418 }
419
420 // Check whether the entries inserted in the beginning
421 // are evicted. Ones without extra ref are evicted and
422 // those with are not.
423 ASSERT_EQ(-1, Lookup(100));
424 ASSERT_EQ(-1, Lookup(101));
425 ASSERT_EQ(-1, Lookup(102));
426 ASSERT_EQ(-1, Lookup(103));
427
428 ASSERT_EQ(-1, Lookup(300));
429 ASSERT_EQ(-1, Lookup(301));
430 ASSERT_EQ(-1, Lookup(302));
431 ASSERT_EQ(-1, Lookup(303));
432
433 ASSERT_EQ(101, Lookup(200));
434 ASSERT_EQ(102, Lookup(201));
435 ASSERT_EQ(103, Lookup(202));
436 ASSERT_EQ(104, Lookup(203));
437
438 // Cleaning up all the handles
439 cache_->Release(h201);
440 cache_->Release(h202);
441 cache_->Release(h203);
442 cache_->Release(h204);
443 }
444
TEST_P(CacheTest,EvictEmptyCache)445 TEST_P(CacheTest, EvictEmptyCache) {
446 // Insert item large than capacity to trigger eviction on empty cache.
447 auto cache = NewCache(1, 0, false);
448 ASSERT_OK(cache->Insert("foo", nullptr, 10, nullptr));
449 }
450
TEST_P(CacheTest,EraseFromDeleter)451 TEST_P(CacheTest, EraseFromDeleter) {
452 // Have deleter which will erase item from cache, which will re-enter
453 // the cache at that point.
454 class EraseDeleter : public Cache::Deleter {
455 public:
456 void operator()(const Slice& /*key*/, void* value) override {
457 Cache* const cache = static_cast<Cache*>(value);
458 cache->Erase("foo");
459 }
460 };
461
462 EraseDeleter erase_deleter;
463
464 std::shared_ptr<Cache> cache = NewCache(10, 0, false);
465 ASSERT_OK(cache->Insert("foo", nullptr, 1, nullptr));
466 ASSERT_OK(cache->Insert("bar", cache.get(), 1, &erase_deleter));
467 cache->Erase("bar");
468 ASSERT_EQ(nullptr, cache->Lookup("foo"));
469 ASSERT_EQ(nullptr, cache->Lookup("bar"));
470 }
471
TEST_P(CacheTest,ErasedHandleState)472 TEST_P(CacheTest, ErasedHandleState) {
473 // insert a key and get two handles
474 Insert(100, 1000);
475 Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
476 Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
477 ASSERT_EQ(h1, h2);
478 ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
479 ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
480
481 // delete the key from the cache
482 Erase(100);
483 // can no longer find in the cache
484 ASSERT_EQ(-1, Lookup(100));
485
486 // release one handle
487 cache_->Release(h1);
488 // still can't find in cache
489 ASSERT_EQ(-1, Lookup(100));
490
491 cache_->Release(h2);
492 }
493
TEST_P(CacheTest,HeavyEntries)494 TEST_P(CacheTest, HeavyEntries) {
495 // Add a bunch of light and heavy entries and then count the combined
496 // size of items still in the cache, which must be approximately the
497 // same as the total capacity.
498 const int kLight = 1;
499 const int kHeavy = 10;
500 int added = 0;
501 int index = 0;
502 while (added < 2*kCacheSize) {
503 const int weight = (index & 1) ? kLight : kHeavy;
504 Insert(index, 1000+index, weight);
505 added += weight;
506 index++;
507 }
508
509 int cached_weight = 0;
510 for (int i = 0; i < index; i++) {
511 const int weight = (i & 1 ? kLight : kHeavy);
512 int r = Lookup(i);
513 if (r >= 0) {
514 cached_weight += weight;
515 ASSERT_EQ(1000+i, r);
516 }
517 }
518 ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
519 }
520
TEST_P(CacheTest,NewId)521 TEST_P(CacheTest, NewId) {
522 uint64_t a = cache_->NewId();
523 uint64_t b = cache_->NewId();
524 ASSERT_NE(a, b);
525 }
526
527
528 class Value {
529 public:
Value(size_t v)530 explicit Value(size_t v) : v_(v) { }
531
532 size_t v_;
533 };
534
TEST_P(CacheTest,ReleaseAndErase)535 TEST_P(CacheTest, ReleaseAndErase) {
536 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
537 Cache::Handle* handle;
538 Status s =
539 cache->Insert(EncodeKey(100), EncodeValue(100), 1, &deleter_, &handle);
540 ASSERT_TRUE(s.ok());
541 ASSERT_EQ(5U, cache->GetCapacity());
542 ASSERT_EQ(1U, cache->GetUsage());
543 ASSERT_EQ(0U, deleted_keys_.size());
544 auto erased = cache->Release(handle, true);
545 ASSERT_TRUE(erased);
546 // This tests that deleter has been called
547 ASSERT_EQ(1U, deleted_keys_.size());
548 }
549
TEST_P(CacheTest,ReleaseWithoutErase)550 TEST_P(CacheTest, ReleaseWithoutErase) {
551 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
552 Cache::Handle* handle;
553 Status s =
554 cache->Insert(EncodeKey(100), EncodeValue(100), 1, &deleter_, &handle);
555 ASSERT_TRUE(s.ok());
556 ASSERT_EQ(5U, cache->GetCapacity());
557 ASSERT_EQ(1U, cache->GetUsage());
558 ASSERT_EQ(0U, deleted_keys_.size());
559 auto erased = cache->Release(handle);
560 ASSERT_FALSE(erased);
561 // This tests that deleter is not called. When cache has free capacity it is
562 // not expected to immediately erase the released items.
563 ASSERT_EQ(0U, deleted_keys_.size());
564 }
565
TEST_P(CacheTest,SetCapacity)566 TEST_P(CacheTest, SetCapacity) {
567 // test1: increase capacity
568 // lets create a cache with capacity 5,
569 // then, insert 5 elements, then increase capacity
570 // to 10, returned capacity should be 10, usage=5
571 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
572 std::vector<Cache::Handle*> handles(10);
573 // Insert 5 entries, but not releasing.
574 for (size_t i = 0; i < 5; i++) {
575 std::string key = ToString(i+1);
576 Status s = cache->Insert(key, new Value(i + 1), 1,
577 SimpleDeleter<Value>::GetInstance(), &handles[i]);
578 ASSERT_TRUE(s.ok());
579 }
580 ASSERT_EQ(5U, cache->GetCapacity());
581 ASSERT_EQ(5U, cache->GetUsage());
582 cache->SetCapacity(10);
583 ASSERT_EQ(10U, cache->GetCapacity());
584 ASSERT_EQ(5U, cache->GetUsage());
585
586 // test2: decrease capacity
587 // insert 5 more elements to cache, then release 5,
588 // then decrease capacity to 7, final capacity should be 7
589 // and usage should be 7
590 for (size_t i = 5; i < 10; i++) {
591 std::string key = ToString(i+1);
592 Status s = cache->Insert(key, new Value(i + 1), 1,
593 SimpleDeleter<Value>::GetInstance(), &handles[i]);
594 ASSERT_TRUE(s.ok());
595 }
596 ASSERT_EQ(10U, cache->GetCapacity());
597 ASSERT_EQ(10U, cache->GetUsage());
598 for (size_t i = 0; i < 5; i++) {
599 cache->Release(handles[i]);
600 }
601 ASSERT_EQ(10U, cache->GetCapacity());
602 ASSERT_EQ(10U, cache->GetUsage());
603 cache->SetCapacity(7);
604 ASSERT_EQ(7, cache->GetCapacity());
605 ASSERT_EQ(7, cache->GetUsage());
606
607 // release remaining 5 to keep valgrind happy
608 for (size_t i = 5; i < 10; i++) {
609 cache->Release(handles[i]);
610 }
611 }
612
TEST_P(LRUCacheTest,SetStrictCapacityLimit)613 TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
614 // test1: set the flag to false. Insert more keys than capacity. See if they
615 // all go through.
616 std::shared_ptr<Cache> cache = NewCache(5, 0, false);
617 std::vector<Cache::Handle*> handles(10);
618 Status s;
619 for (size_t i = 0; i < 10; i++) {
620 std::string key = ToString(i + 1);
621 s = cache->Insert(key, new Value(i + 1), 1,
622 SimpleDeleter<Value>::GetInstance(), &handles[i]);
623 ASSERT_OK(s);
624 ASSERT_NE(nullptr, handles[i]);
625 }
626 ASSERT_EQ(10, cache->GetUsage());
627
628 // test2: set the flag to true. Insert and check if it fails.
629 std::string extra_key = "extra";
630 Value* extra_value = new Value(0);
631 cache->SetStrictCapacityLimit(true);
632 Cache::Handle* handle;
633 s = cache->Insert(extra_key, extra_value, 1,
634 SimpleDeleter<Value>::GetInstance(), &handle);
635 ASSERT_TRUE(s.IsIncomplete());
636 ASSERT_EQ(nullptr, handle);
637 ASSERT_EQ(10, cache->GetUsage());
638
639 for (size_t i = 0; i < 10; i++) {
640 cache->Release(handles[i]);
641 }
642
643 // test3: init with flag being true.
644 std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
645 for (size_t i = 0; i < 5; i++) {
646 std::string key = ToString(i + 1);
647 s = cache2->Insert(key, new Value(i + 1), 1,
648 SimpleDeleter<Value>::GetInstance(), &handles[i]);
649 ASSERT_OK(s);
650 ASSERT_NE(nullptr, handles[i]);
651 }
652 s = cache2->Insert(extra_key, extra_value, 1,
653 SimpleDeleter<Value>::GetInstance(), &handle);
654 ASSERT_TRUE(s.IsIncomplete());
655 ASSERT_EQ(nullptr, handle);
656 // test insert without handle
657 s = cache2->Insert(extra_key, extra_value, 1,
658 SimpleDeleter<Value>::GetInstance());
659 // AS if the key have been inserted into cache but get evicted immediately.
660 ASSERT_OK(s);
661 ASSERT_EQ(5, cache2->GetUsage());
662 ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
663
664 for (size_t i = 0; i < 5; i++) {
665 cache2->Release(handles[i]);
666 }
667 }
668
TEST_P(CacheTest,OverCapacity)669 TEST_P(CacheTest, OverCapacity) {
670 size_t n = 10;
671
672 // a LRUCache with n entries and one shard only
673 std::shared_ptr<Cache> cache = NewCache(n, 0, false);
674
675 std::vector<Cache::Handle*> handles(n+1);
676
677 // Insert n+1 entries, but not releasing.
678 for (size_t i = 0; i < n + 1; i++) {
679 std::string key = ToString(i+1);
680 Status s = cache->Insert(key, new Value(i + 1), 1,
681 SimpleDeleter<Value>::GetInstance(), &handles[i]);
682 ASSERT_TRUE(s.ok());
683 }
684
685 // Guess what's in the cache now?
686 for (size_t i = 0; i < n + 1; i++) {
687 std::string key = ToString(i+1);
688 auto h = cache->Lookup(key);
689 ASSERT_TRUE(h != nullptr);
690 if (h) cache->Release(h);
691 }
692
693 // the cache is over capacity since nothing could be evicted
694 ASSERT_EQ(n + 1U, cache->GetUsage());
695 for (size_t i = 0; i < n + 1; i++) {
696 cache->Release(handles[i]);
697 }
698 // Make sure eviction is triggered.
699 cache->SetCapacity(n);
700
701 // cache is under capacity now since elements were released
702 ASSERT_EQ(n, cache->GetUsage());
703
704 // element 0 is evicted and the rest is there
705 // This is consistent with the LRU policy since the element 0
706 // was released first
707 for (size_t i = 0; i < n + 1; i++) {
708 std::string key = ToString(i+1);
709 auto h = cache->Lookup(key);
710 if (h) {
711 ASSERT_NE(i, 0U);
712 cache->Release(h);
713 } else {
714 ASSERT_EQ(i, 0U);
715 }
716 }
717 }
718
719 namespace {
720 std::vector<std::pair<int, int>> callback_state;
callback(void * entry,size_t charge)721 void callback(void* entry, size_t charge) {
722 callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
723 }
724 };
725
TEST_P(CacheTest,ApplyToAllCacheEntiresTest)726 TEST_P(CacheTest, ApplyToAllCacheEntiresTest) {
727 std::vector<std::pair<int, int>> inserted;
728 callback_state.clear();
729
730 for (int i = 0; i < 10; ++i) {
731 Insert(i, i * 2, i + 1);
732 inserted.push_back({i * 2, i + 1});
733 }
734 cache_->ApplyToAllCacheEntries(callback, true);
735
736 std::sort(inserted.begin(), inserted.end());
737 std::sort(callback_state.begin(), callback_state.end());
738 ASSERT_TRUE(inserted == callback_state);
739 }
740
TEST_P(CacheTest,DefaultShardBits)741 TEST_P(CacheTest, DefaultShardBits) {
742 // test1: set the flag to false. Insert more keys than capacity. See if they
743 // all go through.
744 std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
745 ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
746 ASSERT_EQ(5, sc->GetNumShardBits());
747
748 cache = NewLRUCache(511 * 1024L, -1, true);
749 sc = dynamic_cast<ShardedCache*>(cache.get());
750 ASSERT_EQ(0, sc->GetNumShardBits());
751
752 cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
753 sc = dynamic_cast<ShardedCache*>(cache.get());
754 ASSERT_EQ(6, sc->GetNumShardBits());
755 }
756
TEST_P(CacheTest,GetCharge)757 TEST_P(CacheTest, GetCharge) {
758 Insert(1, 2);
759 Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
760 ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
761 ASSERT_EQ(1, cache_->GetCharge(h1));
762 cache_->Release(h1);
763 }
764
765 #ifdef SUPPORT_CLOCK_CACHE
766 std::shared_ptr<Cache> (*new_clock_cache_func)(
767 size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
768 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
769 testing::Values(kLRU, kClock));
770 #else
771 INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
772 #endif // SUPPORT_CLOCK_CACHE
773 INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
774
775 } // namespace ROCKSDB_NAMESPACE
776
main(int argc,char ** argv)777 int main(int argc, char** argv) {
778 ::testing::InitGoogleTest(&argc, argv);
779 return RUN_ALL_TESTS();
780 }
781