1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 #include <cstdlib>
10 #include "cache/lru_cache.h"
11 #include "db/db_test_util.h"
12 #include "port/stack_trace.h"
13 #include "util/compression.h"
14
15 namespace ROCKSDB_NAMESPACE {
16
17 class DBBlockCacheTest : public DBTestBase {
18 private:
19 size_t miss_count_ = 0;
20 size_t hit_count_ = 0;
21 size_t insert_count_ = 0;
22 size_t failure_count_ = 0;
23 size_t compression_dict_miss_count_ = 0;
24 size_t compression_dict_hit_count_ = 0;
25 size_t compression_dict_insert_count_ = 0;
26 size_t compressed_miss_count_ = 0;
27 size_t compressed_hit_count_ = 0;
28 size_t compressed_insert_count_ = 0;
29 size_t compressed_failure_count_ = 0;
30
31 public:
32 const size_t kNumBlocks = 10;
33 const size_t kValueSize = 100;
34
DBBlockCacheTest()35 DBBlockCacheTest() : DBTestBase("/db_block_cache_test") {}
36
GetTableOptions()37 BlockBasedTableOptions GetTableOptions() {
38 BlockBasedTableOptions table_options;
39 // Set a small enough block size so that each key-value get its own block.
40 table_options.block_size = 1;
41 return table_options;
42 }
43
GetOptions(const BlockBasedTableOptions & table_options)44 Options GetOptions(const BlockBasedTableOptions& table_options) {
45 Options options = CurrentOptions();
46 options.create_if_missing = true;
47 options.avoid_flush_during_recovery = false;
48 // options.compression = kNoCompression;
49 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
50 options.table_factory.reset(new BlockBasedTableFactory(table_options));
51 return options;
52 }
53
InitTable(const Options &)54 void InitTable(const Options& /*options*/) {
55 std::string value(kValueSize, 'a');
56 for (size_t i = 0; i < kNumBlocks; i++) {
57 ASSERT_OK(Put(ToString(i), value.c_str()));
58 }
59 }
60
RecordCacheCounters(const Options & options)61 void RecordCacheCounters(const Options& options) {
62 miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS);
63 hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT);
64 insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD);
65 failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
66 compressed_miss_count_ =
67 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
68 compressed_hit_count_ =
69 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
70 compressed_insert_count_ =
71 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
72 compressed_failure_count_ =
73 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
74 }
75
RecordCacheCountersForCompressionDict(const Options & options)76 void RecordCacheCountersForCompressionDict(const Options& options) {
77 compression_dict_miss_count_ =
78 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
79 compression_dict_hit_count_ =
80 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_HIT);
81 compression_dict_insert_count_ =
82 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD);
83 }
84
CheckCacheCounters(const Options & options,size_t expected_misses,size_t expected_hits,size_t expected_inserts,size_t expected_failures)85 void CheckCacheCounters(const Options& options, size_t expected_misses,
86 size_t expected_hits, size_t expected_inserts,
87 size_t expected_failures) {
88 size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS);
89 size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT);
90 size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD);
91 size_t new_failure_count =
92 TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES);
93 ASSERT_EQ(miss_count_ + expected_misses, new_miss_count);
94 ASSERT_EQ(hit_count_ + expected_hits, new_hit_count);
95 ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count);
96 ASSERT_EQ(failure_count_ + expected_failures, new_failure_count);
97 miss_count_ = new_miss_count;
98 hit_count_ = new_hit_count;
99 insert_count_ = new_insert_count;
100 failure_count_ = new_failure_count;
101 }
102
CheckCacheCountersForCompressionDict(const Options & options,size_t expected_compression_dict_misses,size_t expected_compression_dict_hits,size_t expected_compression_dict_inserts)103 void CheckCacheCountersForCompressionDict(
104 const Options& options, size_t expected_compression_dict_misses,
105 size_t expected_compression_dict_hits,
106 size_t expected_compression_dict_inserts) {
107 size_t new_compression_dict_miss_count =
108 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_MISS);
109 size_t new_compression_dict_hit_count =
110 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_HIT);
111 size_t new_compression_dict_insert_count =
112 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSION_DICT_ADD);
113 ASSERT_EQ(compression_dict_miss_count_ + expected_compression_dict_misses,
114 new_compression_dict_miss_count);
115 ASSERT_EQ(compression_dict_hit_count_ + expected_compression_dict_hits,
116 new_compression_dict_hit_count);
117 ASSERT_EQ(
118 compression_dict_insert_count_ + expected_compression_dict_inserts,
119 new_compression_dict_insert_count);
120 compression_dict_miss_count_ = new_compression_dict_miss_count;
121 compression_dict_hit_count_ = new_compression_dict_hit_count;
122 compression_dict_insert_count_ = new_compression_dict_insert_count;
123 }
124
CheckCompressedCacheCounters(const Options & options,size_t expected_misses,size_t expected_hits,size_t expected_inserts,size_t expected_failures)125 void CheckCompressedCacheCounters(const Options& options,
126 size_t expected_misses,
127 size_t expected_hits,
128 size_t expected_inserts,
129 size_t expected_failures) {
130 size_t new_miss_count =
131 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS);
132 size_t new_hit_count =
133 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT);
134 size_t new_insert_count =
135 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD);
136 size_t new_failure_count =
137 TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
138 ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count);
139 ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count);
140 ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count);
141 ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count);
142 compressed_miss_count_ = new_miss_count;
143 compressed_hit_count_ = new_hit_count;
144 compressed_insert_count_ = new_insert_count;
145 compressed_failure_count_ = new_failure_count;
146 }
147 };
148
TEST_F(DBBlockCacheTest,IteratorBlockCacheUsage)149 TEST_F(DBBlockCacheTest, IteratorBlockCacheUsage) {
150 ReadOptions read_options;
151 read_options.fill_cache = false;
152 auto table_options = GetTableOptions();
153 auto options = GetOptions(table_options);
154 InitTable(options);
155
156 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
157 table_options.block_cache = cache;
158 options.table_factory.reset(new BlockBasedTableFactory(table_options));
159 Reopen(options);
160 RecordCacheCounters(options);
161
162 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
163 Iterator* iter = nullptr;
164
165 ASSERT_EQ(0, cache->GetUsage());
166 iter = db_->NewIterator(read_options);
167 iter->Seek(ToString(0));
168 ASSERT_LT(0, cache->GetUsage());
169 delete iter;
170 iter = nullptr;
171 ASSERT_EQ(0, cache->GetUsage());
172 }
173
TEST_F(DBBlockCacheTest,TestWithoutCompressedBlockCache)174 TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
175 ReadOptions read_options;
176 auto table_options = GetTableOptions();
177 auto options = GetOptions(table_options);
178 InitTable(options);
179
180 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
181 table_options.block_cache = cache;
182 options.table_factory.reset(new BlockBasedTableFactory(table_options));
183 Reopen(options);
184 RecordCacheCounters(options);
185
186 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
187 Iterator* iter = nullptr;
188
189 // Load blocks into cache.
190 for (size_t i = 0; i < kNumBlocks - 1; i++) {
191 iter = db_->NewIterator(read_options);
192 iter->Seek(ToString(i));
193 ASSERT_OK(iter->status());
194 CheckCacheCounters(options, 1, 0, 1, 0);
195 iterators[i].reset(iter);
196 }
197 size_t usage = cache->GetUsage();
198 ASSERT_LT(0, usage);
199 cache->SetCapacity(usage);
200 ASSERT_EQ(usage, cache->GetPinnedUsage());
201
202 // Test with strict capacity limit.
203 cache->SetStrictCapacityLimit(true);
204 iter = db_->NewIterator(read_options);
205 iter->Seek(ToString(kNumBlocks - 1));
206 ASSERT_TRUE(iter->status().IsIncomplete());
207 CheckCacheCounters(options, 1, 0, 0, 1);
208 delete iter;
209 iter = nullptr;
210
211 // Release iterators and access cache again.
212 for (size_t i = 0; i < kNumBlocks - 1; i++) {
213 iterators[i].reset();
214 CheckCacheCounters(options, 0, 0, 0, 0);
215 }
216 ASSERT_EQ(0, cache->GetPinnedUsage());
217 for (size_t i = 0; i < kNumBlocks - 1; i++) {
218 iter = db_->NewIterator(read_options);
219 iter->Seek(ToString(i));
220 ASSERT_OK(iter->status());
221 CheckCacheCounters(options, 0, 1, 0, 0);
222 iterators[i].reset(iter);
223 }
224 }
225
226 #ifdef SNAPPY
TEST_F(DBBlockCacheTest,TestWithCompressedBlockCache)227 TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
228 ReadOptions read_options;
229 auto table_options = GetTableOptions();
230 auto options = GetOptions(table_options);
231 options.compression = CompressionType::kSnappyCompression;
232 InitTable(options);
233
234 std::shared_ptr<Cache> cache = NewLRUCache(0, 0, false);
235 std::shared_ptr<Cache> compressed_cache = NewLRUCache(1 << 25, 0, false);
236 table_options.block_cache = cache;
237 table_options.block_cache_compressed = compressed_cache;
238 options.table_factory.reset(new BlockBasedTableFactory(table_options));
239 Reopen(options);
240 RecordCacheCounters(options);
241
242 std::vector<std::unique_ptr<Iterator>> iterators(kNumBlocks - 1);
243 Iterator* iter = nullptr;
244
245 // Load blocks into cache.
246 for (size_t i = 0; i < kNumBlocks - 1; i++) {
247 iter = db_->NewIterator(read_options);
248 iter->Seek(ToString(i));
249 ASSERT_OK(iter->status());
250 CheckCacheCounters(options, 1, 0, 1, 0);
251 CheckCompressedCacheCounters(options, 1, 0, 1, 0);
252 iterators[i].reset(iter);
253 }
254 size_t usage = cache->GetUsage();
255 ASSERT_LT(0, usage);
256 ASSERT_EQ(usage, cache->GetPinnedUsage());
257 size_t compressed_usage = compressed_cache->GetUsage();
258 ASSERT_LT(0, compressed_usage);
259 // Compressed block cache cannot be pinned.
260 ASSERT_EQ(0, compressed_cache->GetPinnedUsage());
261
262 // Set strict capacity limit flag. Now block will only load into compressed
263 // block cache.
264 cache->SetCapacity(usage);
265 cache->SetStrictCapacityLimit(true);
266 ASSERT_EQ(usage, cache->GetPinnedUsage());
267 iter = db_->NewIterator(read_options);
268 iter->Seek(ToString(kNumBlocks - 1));
269 ASSERT_TRUE(iter->status().IsIncomplete());
270 CheckCacheCounters(options, 1, 0, 0, 1);
271 CheckCompressedCacheCounters(options, 1, 0, 1, 0);
272 delete iter;
273 iter = nullptr;
274
275 // Clear strict capacity limit flag. This time we shall hit compressed block
276 // cache.
277 cache->SetStrictCapacityLimit(false);
278 iter = db_->NewIterator(read_options);
279 iter->Seek(ToString(kNumBlocks - 1));
280 ASSERT_OK(iter->status());
281 CheckCacheCounters(options, 1, 0, 1, 0);
282 CheckCompressedCacheCounters(options, 0, 1, 0, 0);
283 delete iter;
284 iter = nullptr;
285 }
286 #endif // SNAPPY
287
288 #ifndef ROCKSDB_LITE
289
290 // Make sure that when options.block_cache is set, after a new table is
291 // created its index/filter blocks are added to block cache.
TEST_F(DBBlockCacheTest,IndexAndFilterBlocksOfNewTableAddedToCache)292 TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
293 Options options = CurrentOptions();
294 options.create_if_missing = true;
295 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
296 BlockBasedTableOptions table_options;
297 table_options.cache_index_and_filter_blocks = true;
298 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
299 options.table_factory.reset(new BlockBasedTableFactory(table_options));
300 CreateAndReopenWithCF({"pikachu"}, options);
301
302 ASSERT_OK(Put(1, "key", "val"));
303 // Create a new table.
304 ASSERT_OK(Flush(1));
305
306 // index/filter blocks added to block cache right after table creation.
307 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
308 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
309 ASSERT_EQ(2, /* only index/filter were added */
310 TestGetTickerCount(options, BLOCK_CACHE_ADD));
311 ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
312 uint64_t int_num;
313 ASSERT_TRUE(
314 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
315 ASSERT_EQ(int_num, 0U);
316
317 // Make sure filter block is in cache.
318 std::string value;
319 ReadOptions ropt;
320 db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
321
322 // Miss count should remain the same.
323 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
324 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
325
326 db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value);
327 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
328 ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
329
330 // Make sure index block is in cache.
331 auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT);
332 value = Get(1, "key");
333 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
334 ASSERT_EQ(index_block_hit + 1,
335 TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
336
337 value = Get(1, "key");
338 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
339 ASSERT_EQ(index_block_hit + 2,
340 TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT));
341 }
342
343 // With fill_cache = false, fills up the cache, then iterates over the entire
344 // db, verify dummy entries inserted in `BlockBasedTable::NewDataBlockIterator`
345 // does not cause heap-use-after-free errors in COMPILE_WITH_ASAN=1 runs
TEST_F(DBBlockCacheTest,FillCacheAndIterateDB)346 TEST_F(DBBlockCacheTest, FillCacheAndIterateDB) {
347 ReadOptions read_options;
348 read_options.fill_cache = false;
349 auto table_options = GetTableOptions();
350 auto options = GetOptions(table_options);
351 InitTable(options);
352
353 std::shared_ptr<Cache> cache = NewLRUCache(10, 0, true);
354 table_options.block_cache = cache;
355 options.table_factory.reset(new BlockBasedTableFactory(table_options));
356 Reopen(options);
357 ASSERT_OK(Put("key1", "val1"));
358 ASSERT_OK(Put("key2", "val2"));
359 ASSERT_OK(Flush());
360 ASSERT_OK(Put("key3", "val3"));
361 ASSERT_OK(Put("key4", "val4"));
362 ASSERT_OK(Flush());
363 ASSERT_OK(Put("key5", "val5"));
364 ASSERT_OK(Put("key6", "val6"));
365 ASSERT_OK(Flush());
366
367 Iterator* iter = nullptr;
368
369 iter = db_->NewIterator(read_options);
370 iter->Seek(ToString(0));
371 while (iter->Valid()) {
372 iter->Next();
373 }
374 delete iter;
375 iter = nullptr;
376 }
377
TEST_F(DBBlockCacheTest,IndexAndFilterBlocksStats)378 TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) {
379 Options options = CurrentOptions();
380 options.create_if_missing = true;
381 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
382 BlockBasedTableOptions table_options;
383 table_options.cache_index_and_filter_blocks = true;
384 LRUCacheOptions co;
385 // 500 bytes are enough to hold the first two blocks
386 co.capacity = 500;
387 co.num_shard_bits = 0;
388 co.strict_capacity_limit = false;
389 co.metadata_charge_policy = kDontChargeCacheMetadata;
390 std::shared_ptr<Cache> cache = NewLRUCache(co);
391 table_options.block_cache = cache;
392 table_options.filter_policy.reset(NewBloomFilterPolicy(20, true));
393 options.table_factory.reset(new BlockBasedTableFactory(table_options));
394 CreateAndReopenWithCF({"pikachu"}, options);
395
396 ASSERT_OK(Put(1, "longer_key", "val"));
397 // Create a new table
398 ASSERT_OK(Flush(1));
399 size_t index_bytes_insert =
400 TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT);
401 size_t filter_bytes_insert =
402 TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT);
403 ASSERT_GT(index_bytes_insert, 0);
404 ASSERT_GT(filter_bytes_insert, 0);
405 ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert);
406 // set the cache capacity to the current usage
407 cache->SetCapacity(index_bytes_insert + filter_bytes_insert);
408 // The index and filter eviction statistics were broken by the refactoring
409 // that moved the readers out of the block cache. Disabling these until we can
410 // bring the stats back.
411 // ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0);
412 // ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0);
413 // Note that the second key needs to be no longer than the first one.
414 // Otherwise the second index block may not fit in cache.
415 ASSERT_OK(Put(1, "key", "val"));
416 // Create a new table
417 ASSERT_OK(Flush(1));
418 // cache evicted old index and block entries
419 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT),
420 index_bytes_insert);
421 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT),
422 filter_bytes_insert);
423 // The index and filter eviction statistics were broken by the refactoring
424 // that moved the readers out of the block cache. Disabling these until we can
425 // bring the stats back.
426 // ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT),
427 // index_bytes_insert);
428 // ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT),
429 // filter_bytes_insert);
430 }
431
432 namespace {
433
434 // A mock cache wraps LRUCache, and record how many entries have been
435 // inserted for each priority.
436 class MockCache : public LRUCache {
437 public:
438 static uint32_t high_pri_insert_count;
439 static uint32_t low_pri_insert_count;
440
MockCache()441 MockCache()
442 : LRUCache((size_t)1 << 25 /*capacity*/, 0 /*num_shard_bits*/,
443 false /*strict_capacity_limit*/, 0.0 /*high_pri_pool_ratio*/) {
444 }
445
Insert(const Slice & key,void * value,size_t charge,Deleter * deleter,Handle ** handle,Priority priority)446 Status Insert(const Slice& key, void* value, size_t charge, Deleter* deleter,
447 Handle** handle, Priority priority) override {
448 if (priority == Priority::LOW) {
449 low_pri_insert_count++;
450 } else {
451 high_pri_insert_count++;
452 }
453 return LRUCache::Insert(key, value, charge, deleter, handle, priority);
454 }
455 };
456
457 uint32_t MockCache::high_pri_insert_count = 0;
458 uint32_t MockCache::low_pri_insert_count = 0;
459
460 } // anonymous namespace
461
TEST_F(DBBlockCacheTest,IndexAndFilterBlocksCachePriority)462 TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
463 for (auto priority : {Cache::Priority::LOW, Cache::Priority::HIGH}) {
464 Options options = CurrentOptions();
465 options.create_if_missing = true;
466 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
467 BlockBasedTableOptions table_options;
468 table_options.cache_index_and_filter_blocks = true;
469 table_options.block_cache.reset(new MockCache());
470 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
471 table_options.cache_index_and_filter_blocks_with_high_priority =
472 priority == Cache::Priority::HIGH ? true : false;
473 options.table_factory.reset(new BlockBasedTableFactory(table_options));
474 DestroyAndReopen(options);
475
476 MockCache::high_pri_insert_count = 0;
477 MockCache::low_pri_insert_count = 0;
478
479 // Create a new table.
480 ASSERT_OK(Put("foo", "value"));
481 ASSERT_OK(Put("bar", "value"));
482 ASSERT_OK(Flush());
483 ASSERT_EQ(1, NumTableFilesAtLevel(0));
484
485 // index/filter blocks added to block cache right after table creation.
486 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
487 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
488 ASSERT_EQ(2, /* only index/filter were added */
489 TestGetTickerCount(options, BLOCK_CACHE_ADD));
490 ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
491 if (priority == Cache::Priority::LOW) {
492 ASSERT_EQ(0u, MockCache::high_pri_insert_count);
493 ASSERT_EQ(2u, MockCache::low_pri_insert_count);
494 } else {
495 ASSERT_EQ(2u, MockCache::high_pri_insert_count);
496 ASSERT_EQ(0u, MockCache::low_pri_insert_count);
497 }
498
499 // Access data block.
500 ASSERT_EQ("value", Get("foo"));
501
502 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
503 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
504 ASSERT_EQ(3, /*adding data block*/
505 TestGetTickerCount(options, BLOCK_CACHE_ADD));
506 ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
507
508 // Data block should be inserted with low priority.
509 if (priority == Cache::Priority::LOW) {
510 ASSERT_EQ(0u, MockCache::high_pri_insert_count);
511 ASSERT_EQ(3u, MockCache::low_pri_insert_count);
512 } else {
513 ASSERT_EQ(2u, MockCache::high_pri_insert_count);
514 ASSERT_EQ(1u, MockCache::low_pri_insert_count);
515 }
516 }
517 }
518
TEST_F(DBBlockCacheTest,ParanoidFileChecks)519 TEST_F(DBBlockCacheTest, ParanoidFileChecks) {
520 Options options = CurrentOptions();
521 options.create_if_missing = true;
522 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
523 options.level0_file_num_compaction_trigger = 2;
524 options.paranoid_file_checks = true;
525 BlockBasedTableOptions table_options;
526 table_options.cache_index_and_filter_blocks = false;
527 table_options.filter_policy.reset(NewBloomFilterPolicy(20));
528 options.table_factory.reset(new BlockBasedTableFactory(table_options));
529 CreateAndReopenWithCF({"pikachu"}, options);
530
531 ASSERT_OK(Put(1, "1_key", "val"));
532 ASSERT_OK(Put(1, "9_key", "val"));
533 // Create a new table.
534 ASSERT_OK(Flush(1));
535 ASSERT_EQ(1, /* read and cache data block */
536 TestGetTickerCount(options, BLOCK_CACHE_ADD));
537
538 ASSERT_OK(Put(1, "1_key2", "val2"));
539 ASSERT_OK(Put(1, "9_key2", "val2"));
540 // Create a new SST file. This will further trigger a compaction
541 // and generate another file.
542 ASSERT_OK(Flush(1));
543 dbfull()->TEST_WaitForCompact();
544 ASSERT_EQ(3, /* Totally 3 files created up to now */
545 TestGetTickerCount(options, BLOCK_CACHE_ADD));
546
547 // After disabling options.paranoid_file_checks. NO further block
548 // is added after generating a new file.
549 ASSERT_OK(
550 dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "false"}}));
551
552 ASSERT_OK(Put(1, "1_key3", "val3"));
553 ASSERT_OK(Put(1, "9_key3", "val3"));
554 ASSERT_OK(Flush(1));
555 ASSERT_OK(Put(1, "1_key4", "val4"));
556 ASSERT_OK(Put(1, "9_key4", "val4"));
557 ASSERT_OK(Flush(1));
558 dbfull()->TEST_WaitForCompact();
559 ASSERT_EQ(3, /* Totally 3 files created up to now */
560 TestGetTickerCount(options, BLOCK_CACHE_ADD));
561 }
562
TEST_F(DBBlockCacheTest,CompressedCache)563 TEST_F(DBBlockCacheTest, CompressedCache) {
564 if (!Snappy_Supported()) {
565 return;
566 }
567 int num_iter = 80;
568
569 // Run this test three iterations.
570 // Iteration 1: only a uncompressed block cache
571 // Iteration 2: only a compressed block cache
572 // Iteration 3: both block cache and compressed cache
573 // Iteration 4: both block cache and compressed cache, but DB is not
574 // compressed
575 for (int iter = 0; iter < 4; iter++) {
576 Options options = CurrentOptions();
577 options.write_buffer_size = 64 * 1024; // small write buffer
578 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
579
580 BlockBasedTableOptions table_options;
581 switch (iter) {
582 case 0:
583 // only uncompressed block cache
584 table_options.block_cache = NewLRUCache(8 * 1024);
585 table_options.block_cache_compressed = nullptr;
586 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
587 break;
588 case 1:
589 // no block cache, only compressed cache
590 table_options.no_block_cache = true;
591 table_options.block_cache = nullptr;
592 table_options.block_cache_compressed = NewLRUCache(8 * 1024);
593 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
594 break;
595 case 2:
596 // both compressed and uncompressed block cache
597 table_options.block_cache = NewLRUCache(1024);
598 table_options.block_cache_compressed = NewLRUCache(8 * 1024);
599 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
600 break;
601 case 3:
602 // both block cache and compressed cache, but DB is not compressed
603 // also, make block cache sizes bigger, to trigger block cache hits
604 table_options.block_cache = NewLRUCache(1024 * 1024);
605 table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024);
606 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
607 options.compression = kNoCompression;
608 break;
609 default:
610 FAIL();
611 }
612 CreateAndReopenWithCF({"pikachu"}, options);
613 // default column family doesn't have block cache
614 Options no_block_cache_opts;
615 no_block_cache_opts.statistics = options.statistics;
616 no_block_cache_opts = CurrentOptions(no_block_cache_opts);
617 BlockBasedTableOptions table_options_no_bc;
618 table_options_no_bc.no_block_cache = true;
619 no_block_cache_opts.table_factory.reset(
620 NewBlockBasedTableFactory(table_options_no_bc));
621 ReopenWithColumnFamilies(
622 {"default", "pikachu"},
623 std::vector<Options>({no_block_cache_opts, options}));
624
625 Random rnd(301);
626
627 // Write 8MB (80 values, each 100K)
628 ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
629 std::vector<std::string> values;
630 std::string str;
631 for (int i = 0; i < num_iter; i++) {
632 if (i % 4 == 0) { // high compression ratio
633 str = RandomString(&rnd, 1000);
634 }
635 values.push_back(str);
636 ASSERT_OK(Put(1, Key(i), values[i]));
637 }
638
639 // flush all data from memtable so that reads are from block cache
640 ASSERT_OK(Flush(1));
641
642 for (int i = 0; i < num_iter; i++) {
643 ASSERT_EQ(Get(1, Key(i)), values[i]);
644 }
645
646 // check that we triggered the appropriate code paths in the cache
647 switch (iter) {
648 case 0:
649 // only uncompressed block cache
650 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
651 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
652 break;
653 case 1:
654 // no block cache, only compressed cache
655 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
656 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
657 break;
658 case 2:
659 // both compressed and uncompressed block cache
660 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
661 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
662 break;
663 case 3:
664 // both compressed and uncompressed block cache
665 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
666 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0);
667 ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
668 // compressed doesn't have any hits since blocks are not compressed on
669 // storage
670 ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0);
671 break;
672 default:
673 FAIL();
674 }
675
676 options.create_if_missing = true;
677 DestroyAndReopen(options);
678 }
679 }
680
TEST_F(DBBlockCacheTest,CacheCompressionDict)681 TEST_F(DBBlockCacheTest, CacheCompressionDict) {
682 const int kNumFiles = 4;
683 const int kNumEntriesPerFile = 128;
684 const int kNumBytesPerEntry = 1024;
685
686 // Try all the available libraries that support dictionary compression
687 std::vector<CompressionType> compression_types;
688 if (Zlib_Supported()) {
689 compression_types.push_back(kZlibCompression);
690 }
691 if (LZ4_Supported()) {
692 compression_types.push_back(kLZ4Compression);
693 compression_types.push_back(kLZ4HCCompression);
694 }
695 if (ZSTD_Supported()) {
696 compression_types.push_back(kZSTD);
697 } else if (ZSTDNotFinal_Supported()) {
698 compression_types.push_back(kZSTDNotFinalCompression);
699 }
700 Random rnd(301);
701 for (auto compression_type : compression_types) {
702 Options options = CurrentOptions();
703 options.compression = compression_type;
704 options.compression_opts.max_dict_bytes = 4096;
705 options.create_if_missing = true;
706 options.num_levels = 2;
707 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
708 options.target_file_size_base = kNumEntriesPerFile * kNumBytesPerEntry;
709 BlockBasedTableOptions table_options;
710 table_options.cache_index_and_filter_blocks = true;
711 table_options.block_cache.reset(new MockCache());
712 options.table_factory.reset(new BlockBasedTableFactory(table_options));
713 DestroyAndReopen(options);
714
715 RecordCacheCountersForCompressionDict(options);
716
717 for (int i = 0; i < kNumFiles; ++i) {
718 ASSERT_EQ(i, NumTableFilesAtLevel(0, 0));
719 for (int j = 0; j < kNumEntriesPerFile; ++j) {
720 std::string value = RandomString(&rnd, kNumBytesPerEntry);
721 ASSERT_OK(Put(Key(j * kNumFiles + i), value.c_str()));
722 }
723 ASSERT_OK(Flush());
724 }
725 dbfull()->TEST_WaitForCompact();
726 ASSERT_EQ(0, NumTableFilesAtLevel(0));
727 ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(1));
728
729 // Compression dictionary blocks are preloaded.
730 CheckCacheCountersForCompressionDict(
731 options, kNumFiles /* expected_compression_dict_misses */,
732 0 /* expected_compression_dict_hits */,
733 kNumFiles /* expected_compression_dict_inserts */);
734
735 // Seek to a key in a file. It should cause the SST's dictionary meta-block
736 // to be read.
737 RecordCacheCounters(options);
738 RecordCacheCountersForCompressionDict(options);
739 ReadOptions read_options;
740 ASSERT_NE("NOT_FOUND", Get(Key(kNumFiles * kNumEntriesPerFile - 1)));
741 // Two block hits: index and dictionary since they are prefetched
742 // One block missed/added: data block
743 CheckCacheCounters(options, 1 /* expected_misses */, 2 /* expected_hits */,
744 1 /* expected_inserts */, 0 /* expected_failures */);
745 CheckCacheCountersForCompressionDict(
746 options, 0 /* expected_compression_dict_misses */,
747 1 /* expected_compression_dict_hits */,
748 0 /* expected_compression_dict_inserts */);
749 }
750 }
751
752 #endif // ROCKSDB_LITE
753
754 } // namespace ROCKSDB_NAMESPACE
755
main(int argc,char ** argv)756 int main(int argc, char** argv) {
757 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
758 ::testing::InitGoogleTest(&argc, argv);
759 return RUN_ALL_TESTS();
760 }
761