1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include <stdio.h>
11
12 #include <algorithm>
13 #include <string>
14
15 #include "db/db_test_util.h"
16 #include "port/stack_trace.h"
17 #include "rocksdb/listener.h"
18 #include "rocksdb/options.h"
19 #include "rocksdb/perf_context.h"
20 #include "rocksdb/perf_level.h"
21 #include "rocksdb/table.h"
22 #include "util/random.h"
23 #include "util/string_util.h"
24
25 namespace ROCKSDB_NAMESPACE {
26
27 class DBPropertiesTest : public DBTestBase {
28 public:
DBPropertiesTest()29 DBPropertiesTest() : DBTestBase("/db_properties_test") {}
30 };
31
32 #ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest,Empty)33 TEST_F(DBPropertiesTest, Empty) {
34 do {
35 Options options;
36 options.env = env_;
37 options.write_buffer_size = 100000; // Small write buffer
38 options.allow_concurrent_memtable_write = false;
39 options = CurrentOptions(options);
40 CreateAndReopenWithCF({"pikachu"}, options);
41
42 std::string num;
43 ASSERT_TRUE(dbfull()->GetProperty(
44 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
45 ASSERT_EQ("0", num);
46
47 ASSERT_OK(Put(1, "foo", "v1"));
48 ASSERT_EQ("v1", Get(1, "foo"));
49 ASSERT_TRUE(dbfull()->GetProperty(
50 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
51 ASSERT_EQ("1", num);
52
53 // Block sync calls
54 env_->delay_sstable_sync_.store(true, std::memory_order_release);
55 Put(1, "k1", std::string(100000, 'x')); // Fill memtable
56 ASSERT_TRUE(dbfull()->GetProperty(
57 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
58 ASSERT_EQ("2", num);
59
60 Put(1, "k2", std::string(100000, 'y')); // Trigger compaction
61 ASSERT_TRUE(dbfull()->GetProperty(
62 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
63 ASSERT_EQ("1", num);
64
65 ASSERT_EQ("v1", Get(1, "foo"));
66 // Release sync calls
67 env_->delay_sstable_sync_.store(false, std::memory_order_release);
68
69 ASSERT_OK(db_->DisableFileDeletions());
70 ASSERT_TRUE(
71 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
72 ASSERT_EQ("0", num);
73
74 ASSERT_OK(db_->DisableFileDeletions());
75 ASSERT_TRUE(
76 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
77 ASSERT_EQ("0", num);
78
79 ASSERT_OK(db_->DisableFileDeletions());
80 ASSERT_TRUE(
81 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
82 ASSERT_EQ("0", num);
83
84 ASSERT_OK(db_->EnableFileDeletions(false));
85 ASSERT_TRUE(
86 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
87 ASSERT_EQ("0", num);
88
89 ASSERT_OK(db_->EnableFileDeletions());
90 ASSERT_TRUE(
91 dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
92 ASSERT_EQ("1", num);
93 } while (ChangeOptions());
94 }
95
TEST_F(DBPropertiesTest,CurrentVersionNumber)96 TEST_F(DBPropertiesTest, CurrentVersionNumber) {
97 uint64_t v1, v2, v3;
98 ASSERT_TRUE(
99 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1));
100 Put("12345678", "");
101 ASSERT_TRUE(
102 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2));
103 Flush();
104 ASSERT_TRUE(
105 dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3));
106
107 ASSERT_EQ(v1, v2);
108 ASSERT_GT(v3, v2);
109 }
110
TEST_F(DBPropertiesTest,GetAggregatedIntPropertyTest)111 TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) {
112 const int kKeySize = 100;
113 const int kValueSize = 500;
114 const int kKeyNum = 100;
115
116 Options options;
117 options.env = env_;
118 options.create_if_missing = true;
119 options.write_buffer_size = (kKeySize + kValueSize) * kKeyNum / 10;
120 // Make them never flush
121 options.min_write_buffer_number_to_merge = 1000;
122 options.max_write_buffer_number = 1000;
123 options = CurrentOptions(options);
124 CreateAndReopenWithCF({"one", "two", "three", "four"}, options);
125
126 Random rnd(301);
127 for (auto* handle : handles_) {
128 for (int i = 0; i < kKeyNum; ++i) {
129 db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize),
130 RandomString(&rnd, kValueSize));
131 }
132 }
133
134 uint64_t manual_sum = 0;
135 uint64_t api_sum = 0;
136 uint64_t value = 0;
137 for (auto* handle : handles_) {
138 ASSERT_TRUE(
139 db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value));
140 manual_sum += value;
141 }
142 ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables,
143 &api_sum));
144 ASSERT_GT(manual_sum, 0);
145 ASSERT_EQ(manual_sum, api_sum);
146
147 ASSERT_FALSE(db_->GetAggregatedIntProperty(DB::Properties::kDBStats, &value));
148
149 uint64_t before_flush_trm;
150 uint64_t after_flush_trm;
151 for (auto* handle : handles_) {
152 ASSERT_TRUE(db_->GetAggregatedIntProperty(
153 DB::Properties::kEstimateTableReadersMem, &before_flush_trm));
154
155 // Issue flush and expect larger memory usage of table readers.
156 db_->Flush(FlushOptions(), handle);
157
158 ASSERT_TRUE(db_->GetAggregatedIntProperty(
159 DB::Properties::kEstimateTableReadersMem, &after_flush_trm));
160 ASSERT_GT(after_flush_trm, before_flush_trm);
161 }
162 }
163
164 namespace {
ResetTableProperties(TableProperties * tp)165 void ResetTableProperties(TableProperties* tp) {
166 tp->data_size = 0;
167 tp->index_size = 0;
168 tp->filter_size = 0;
169 tp->raw_key_size = 0;
170 tp->raw_value_size = 0;
171 tp->num_data_blocks = 0;
172 tp->num_entries = 0;
173 tp->num_deletions = 0;
174 tp->num_merge_operands = 0;
175 tp->num_range_deletions = 0;
176 }
177
ParseTablePropertiesString(std::string tp_string,TableProperties * tp)178 void ParseTablePropertiesString(std::string tp_string, TableProperties* tp) {
179 double dummy_double;
180 std::replace(tp_string.begin(), tp_string.end(), ';', ' ');
181 std::replace(tp_string.begin(), tp_string.end(), '=', ' ');
182 ResetTableProperties(tp);
183 sscanf(tp_string.c_str(),
184 "# data blocks %" SCNu64 " # entries %" SCNu64 " # deletions %" SCNu64
185 " # merge operands %" SCNu64 " # range deletions %" SCNu64
186 " raw key size %" SCNu64
187 " raw average key size %lf "
188 " raw value size %" SCNu64
189 " raw average value size %lf "
190 " data block size %" SCNu64 " index block size (user-key? %" SCNu64
191 ", delta-value? %" SCNu64 ") %" SCNu64 " filter block size %" SCNu64,
192 &tp->num_data_blocks, &tp->num_entries, &tp->num_deletions,
193 &tp->num_merge_operands, &tp->num_range_deletions, &tp->raw_key_size,
194 &dummy_double, &tp->raw_value_size, &dummy_double, &tp->data_size,
195 &tp->index_key_is_user_key, &tp->index_value_is_delta_encoded,
196 &tp->index_size, &tp->filter_size);
197 }
198
VerifySimilar(uint64_t a,uint64_t b,double bias)199 void VerifySimilar(uint64_t a, uint64_t b, double bias) {
200 ASSERT_EQ(a == 0U, b == 0U);
201 if (a == 0) {
202 return;
203 }
204 double dbl_a = static_cast<double>(a);
205 double dbl_b = static_cast<double>(b);
206 if (dbl_a > dbl_b) {
207 ASSERT_LT(static_cast<double>(dbl_a - dbl_b) / (dbl_a + dbl_b), bias);
208 } else {
209 ASSERT_LT(static_cast<double>(dbl_b - dbl_a) / (dbl_a + dbl_b), bias);
210 }
211 }
212
VerifyTableProperties(const TableProperties & base_tp,const TableProperties & new_tp,double filter_size_bias=CACHE_LINE_SIZE>=256?0.15:0.1,double index_size_bias=0.1,double data_size_bias=0.1,double num_data_blocks_bias=0.05)213 void VerifyTableProperties(
214 const TableProperties& base_tp, const TableProperties& new_tp,
215 double filter_size_bias = CACHE_LINE_SIZE >= 256 ? 0.15 : 0.1,
216 double index_size_bias = 0.1, double data_size_bias = 0.1,
217 double num_data_blocks_bias = 0.05) {
218 VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias);
219 VerifySimilar(base_tp.index_size, new_tp.index_size, index_size_bias);
220 VerifySimilar(base_tp.filter_size, new_tp.filter_size, filter_size_bias);
221 VerifySimilar(base_tp.num_data_blocks, new_tp.num_data_blocks,
222 num_data_blocks_bias);
223
224 ASSERT_EQ(base_tp.raw_key_size, new_tp.raw_key_size);
225 ASSERT_EQ(base_tp.raw_value_size, new_tp.raw_value_size);
226 ASSERT_EQ(base_tp.num_entries, new_tp.num_entries);
227 ASSERT_EQ(base_tp.num_deletions, new_tp.num_deletions);
228 ASSERT_EQ(base_tp.num_range_deletions, new_tp.num_range_deletions);
229
230 // Merge operands may become Puts, so we only have an upper bound the exact
231 // number of merge operands.
232 ASSERT_GE(base_tp.num_merge_operands, new_tp.num_merge_operands);
233 }
234
GetExpectedTableProperties(TableProperties * expected_tp,const int kKeySize,const int kValueSize,const int kPutsPerTable,const int kDeletionsPerTable,const int kMergeOperandsPerTable,const int kRangeDeletionsPerTable,const int kTableCount,const int kBloomBitsPerKey,const size_t kBlockSize,const bool index_key_is_user_key,const bool value_delta_encoding)235 void GetExpectedTableProperties(
236 TableProperties* expected_tp, const int kKeySize, const int kValueSize,
237 const int kPutsPerTable, const int kDeletionsPerTable,
238 const int kMergeOperandsPerTable, const int kRangeDeletionsPerTable,
239 const int kTableCount, const int kBloomBitsPerKey, const size_t kBlockSize,
240 const bool index_key_is_user_key, const bool value_delta_encoding) {
241 const int kKeysPerTable =
242 kPutsPerTable + kDeletionsPerTable + kMergeOperandsPerTable;
243 const int kPutCount = kTableCount * kPutsPerTable;
244 const int kDeletionCount = kTableCount * kDeletionsPerTable;
245 const int kMergeCount = kTableCount * kMergeOperandsPerTable;
246 const int kRangeDeletionCount = kTableCount * kRangeDeletionsPerTable;
247 const int kKeyCount = kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount;
248 const int kAvgSuccessorSize = kKeySize / 5;
249 const int kEncodingSavePerKey = kKeySize / 4;
250 expected_tp->raw_key_size = kKeyCount * (kKeySize + 8);
251 expected_tp->raw_value_size =
252 (kPutCount + kMergeCount + kRangeDeletionCount) * kValueSize;
253 expected_tp->num_entries = kKeyCount;
254 expected_tp->num_deletions = kDeletionCount + kRangeDeletionCount;
255 expected_tp->num_merge_operands = kMergeCount;
256 expected_tp->num_range_deletions = kRangeDeletionCount;
257 expected_tp->num_data_blocks =
258 kTableCount * (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
259 kBlockSize;
260 expected_tp->data_size =
261 kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize));
262 expected_tp->index_size =
263 expected_tp->num_data_blocks *
264 (kAvgSuccessorSize + (index_key_is_user_key ? 0 : 8) -
265 // discount 1 byte as value size is not encoded in value delta encoding
266 (value_delta_encoding ? 1 : 0));
267 expected_tp->filter_size =
268 kTableCount * ((kKeysPerTable * kBloomBitsPerKey + 7) / 8 +
269 /*average-ish overhead*/ CACHE_LINE_SIZE / 2);
270 }
271 } // anonymous namespace
272
TEST_F(DBPropertiesTest,ValidatePropertyInfo)273 TEST_F(DBPropertiesTest, ValidatePropertyInfo) {
274 for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) {
275 // If C++ gets a std::string_literal, this would be better to check at
276 // compile-time using static_assert.
277 ASSERT_TRUE(ppt_name_and_info.first.empty() ||
278 !isdigit(ppt_name_and_info.first.back()));
279
280 int count = 0;
281 count += (ppt_name_and_info.second.handle_string == nullptr) ? 0 : 1;
282 count += (ppt_name_and_info.second.handle_int == nullptr) ? 0 : 1;
283 count += (ppt_name_and_info.second.handle_string_dbimpl == nullptr) ? 0 : 1;
284 ASSERT_TRUE(count == 1);
285 }
286 }
287
TEST_F(DBPropertiesTest,ValidateSampleNumber)288 TEST_F(DBPropertiesTest, ValidateSampleNumber) {
289 // When "max_open_files" is -1, we read all the files for
290 // "rocksdb.estimate-num-keys" computation, which is the ground truth.
291 // Otherwise, we sample 20 newest files to make an estimation.
292 // Formula: lastest_20_files_active_key_ratio * total_files
293 Options options = CurrentOptions();
294 options.disable_auto_compactions = true;
295 options.level0_stop_writes_trigger = 1000;
296 DestroyAndReopen(options);
297 int key = 0;
298 for (int files = 20; files >= 10; files -= 10) {
299 for (int i = 0; i < files; i++) {
300 int rows = files / 10;
301 for (int j = 0; j < rows; j++) {
302 db_->Put(WriteOptions(), std::to_string(++key), "foo");
303 }
304 db_->Flush(FlushOptions());
305 }
306 }
307 std::string num;
308 Reopen(options);
309 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
310 ASSERT_EQ("45", num);
311 options.max_open_files = -1;
312 Reopen(options);
313 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
314 ASSERT_EQ("50", num);
315 }
316
TEST_F(DBPropertiesTest,AggregatedTableProperties)317 TEST_F(DBPropertiesTest, AggregatedTableProperties) {
318 for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) {
319 const int kDeletionsPerTable = 5;
320 const int kMergeOperandsPerTable = 15;
321 const int kRangeDeletionsPerTable = 5;
322 const int kPutsPerTable = 100;
323 const int kKeySize = 80;
324 const int kValueSize = 200;
325 const int kBloomBitsPerKey = 20;
326
327 Options options = CurrentOptions();
328 options.level0_file_num_compaction_trigger = 8;
329 options.compression = kNoCompression;
330 options.create_if_missing = true;
331 options.preserve_deletes = true;
332 options.merge_operator.reset(new TestPutOperator());
333
334 BlockBasedTableOptions table_options;
335 table_options.filter_policy.reset(
336 NewBloomFilterPolicy(kBloomBitsPerKey, false));
337 table_options.block_size = 1024;
338 options.table_factory.reset(new BlockBasedTableFactory(table_options));
339
340 DestroyAndReopen(options);
341
342 // Hold open a snapshot to prevent range tombstones from being compacted
343 // away.
344 ManagedSnapshot snapshot(db_);
345
346 Random rnd(5632);
347 for (int table = 1; table <= kTableCount; ++table) {
348 for (int i = 0; i < kPutsPerTable; ++i) {
349 db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
350 RandomString(&rnd, kValueSize));
351 }
352 for (int i = 0; i < kDeletionsPerTable; i++) {
353 db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize));
354 }
355 for (int i = 0; i < kMergeOperandsPerTable; i++) {
356 db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize),
357 RandomString(&rnd, kValueSize));
358 }
359 for (int i = 0; i < kRangeDeletionsPerTable; i++) {
360 std::string start = RandomString(&rnd, kKeySize);
361 std::string end = start;
362 end.resize(kValueSize);
363 db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
364 }
365 db_->Flush(FlushOptions());
366 }
367 std::string property;
368 db_->GetProperty(DB::Properties::kAggregatedTableProperties, &property);
369 TableProperties output_tp;
370 ParseTablePropertiesString(property, &output_tp);
371 bool index_key_is_user_key = output_tp.index_key_is_user_key > 0;
372 bool value_is_delta_encoded = output_tp.index_value_is_delta_encoded > 0;
373
374 TableProperties expected_tp;
375 GetExpectedTableProperties(
376 &expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
377 kMergeOperandsPerTable, kRangeDeletionsPerTable, kTableCount,
378 kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
379 value_is_delta_encoded);
380
381 VerifyTableProperties(expected_tp, output_tp);
382 }
383 }
384
TEST_F(DBPropertiesTest,ReadLatencyHistogramByLevel)385 TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) {
386 Options options = CurrentOptions();
387 options.write_buffer_size = 110 << 10;
388 options.level0_file_num_compaction_trigger = 6;
389 options.num_levels = 4;
390 options.compression = kNoCompression;
391 options.max_bytes_for_level_base = 4500 << 10;
392 options.target_file_size_base = 98 << 10;
393 options.max_write_buffer_number = 2;
394 options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
395 options.max_open_files = 11; // Make sure no proloading of table readers
396
397 // RocksDB sanitize max open files to at least 20. Modify it back.
398 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
399 "SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg) {
400 int* max_open_files = static_cast<int*>(arg);
401 *max_open_files = 11;
402 });
403 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
404
405 BlockBasedTableOptions table_options;
406 table_options.no_block_cache = true;
407
408 CreateAndReopenWithCF({"pikachu"}, options);
409 int key_index = 0;
410 Random rnd(301);
411 for (int num = 0; num < 8; num++) {
412 Put("foo", "bar");
413 GenerateNewFile(&rnd, &key_index);
414 dbfull()->TEST_WaitForCompact();
415 }
416 dbfull()->TEST_WaitForCompact();
417
418 std::string prop;
419 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
420
421 // Get() after flushes, See latency histogram tracked.
422 for (int key = 0; key < key_index; key++) {
423 Get(Key(key));
424 }
425 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
426 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
427 ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
428 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
429
430 // Reopen and issue Get(). See thee latency tracked
431 ReopenWithColumnFamilies({"default", "pikachu"}, options);
432 dbfull()->TEST_WaitForCompact();
433 for (int key = 0; key < key_index; key++) {
434 Get(Key(key));
435 }
436
437 // Test for getting immutable_db_options_.statistics
438 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
439 "rocksdb.options-statistics", &prop));
440 ASSERT_NE(std::string::npos, prop.find("rocksdb.block.cache.miss"));
441 ASSERT_EQ(std::string::npos, prop.find("rocksdb.db.f.micros"));
442
443 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
444 "rocksdb.cf-file-histogram", &prop));
445 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
446 ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
447 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
448
449 // Reopen and issue iterating. See thee latency tracked
450 ReopenWithColumnFamilies({"default", "pikachu"}, options);
451 ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
452 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
453 ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
454 ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
455 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
456 {
457 std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
458 for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
459 }
460 }
461 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
462 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
463 ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
464 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
465
466 // CF 1 should show no histogram.
467 ASSERT_TRUE(
468 dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
469 ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
470 ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
471 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
472 // put something and read it back , CF 1 should show histogram.
473 Put(1, "foo", "bar");
474 Flush(1);
475 dbfull()->TEST_WaitForCompact();
476 ASSERT_EQ("bar", Get(1, "foo"));
477
478 ASSERT_TRUE(
479 dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
480 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
481 ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
482 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
483
484 // options.max_open_files preloads table readers.
485 options.max_open_files = -1;
486 ReopenWithColumnFamilies({"default", "pikachu"}, options);
487 ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
488 "rocksdb.cf-file-histogram", &prop));
489 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
490 ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
491 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
492 for (int key = 0; key < key_index; key++) {
493 Get(Key(key));
494 }
495 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
496 ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
497 ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
498 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
499
500 // Clear internal stats
501 dbfull()->ResetStats();
502 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
503 ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
504 ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
505 ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
506 }
507
TEST_F(DBPropertiesTest,AggregatedTablePropertiesAtLevel)508 TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
509 const int kTableCount = 100;
510 const int kDeletionsPerTable = 2;
511 const int kMergeOperandsPerTable = 2;
512 const int kRangeDeletionsPerTable = 2;
513 const int kPutsPerTable = 10;
514 const int kKeySize = 50;
515 const int kValueSize = 400;
516 const int kMaxLevel = 7;
517 const int kBloomBitsPerKey = 20;
518 Random rnd(301);
519 Options options = CurrentOptions();
520 options.level0_file_num_compaction_trigger = 8;
521 options.compression = kNoCompression;
522 options.create_if_missing = true;
523 options.level0_file_num_compaction_trigger = 2;
524 options.target_file_size_base = 8192;
525 options.max_bytes_for_level_base = 10000;
526 options.max_bytes_for_level_multiplier = 2;
527 // This ensures there no compaction happening when we call GetProperty().
528 options.disable_auto_compactions = true;
529 options.preserve_deletes = true;
530 options.merge_operator.reset(new TestPutOperator());
531
532 BlockBasedTableOptions table_options;
533 table_options.filter_policy.reset(
534 NewBloomFilterPolicy(kBloomBitsPerKey, false));
535 table_options.block_size = 1024;
536 options.table_factory.reset(new BlockBasedTableFactory(table_options));
537
538 DestroyAndReopen(options);
539
540 // Hold open a snapshot to prevent range tombstones from being compacted away.
541 ManagedSnapshot snapshot(db_);
542
543 std::string level_tp_strings[kMaxLevel];
544 std::string tp_string;
545 TableProperties level_tps[kMaxLevel];
546 TableProperties tp, sum_tp, expected_tp;
547 for (int table = 1; table <= kTableCount; ++table) {
548 for (int i = 0; i < kPutsPerTable; ++i) {
549 db_->Put(WriteOptions(), RandomString(&rnd, kKeySize),
550 RandomString(&rnd, kValueSize));
551 }
552 for (int i = 0; i < kDeletionsPerTable; i++) {
553 db_->Delete(WriteOptions(), RandomString(&rnd, kKeySize));
554 }
555 for (int i = 0; i < kMergeOperandsPerTable; i++) {
556 db_->Merge(WriteOptions(), RandomString(&rnd, kKeySize),
557 RandomString(&rnd, kValueSize));
558 }
559 for (int i = 0; i < kRangeDeletionsPerTable; i++) {
560 std::string start = RandomString(&rnd, kKeySize);
561 std::string end = start;
562 end.resize(kValueSize);
563 db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), start, end);
564 }
565 db_->Flush(FlushOptions());
566 db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
567 ResetTableProperties(&sum_tp);
568 for (int level = 0; level < kMaxLevel; ++level) {
569 db_->GetProperty(
570 DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level),
571 &level_tp_strings[level]);
572 ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
573 sum_tp.data_size += level_tps[level].data_size;
574 sum_tp.index_size += level_tps[level].index_size;
575 sum_tp.filter_size += level_tps[level].filter_size;
576 sum_tp.raw_key_size += level_tps[level].raw_key_size;
577 sum_tp.raw_value_size += level_tps[level].raw_value_size;
578 sum_tp.num_data_blocks += level_tps[level].num_data_blocks;
579 sum_tp.num_entries += level_tps[level].num_entries;
580 sum_tp.num_deletions += level_tps[level].num_deletions;
581 sum_tp.num_merge_operands += level_tps[level].num_merge_operands;
582 sum_tp.num_range_deletions += level_tps[level].num_range_deletions;
583 }
584 db_->GetProperty(DB::Properties::kAggregatedTableProperties, &tp_string);
585 ParseTablePropertiesString(tp_string, &tp);
586 bool index_key_is_user_key = tp.index_key_is_user_key > 0;
587 bool value_is_delta_encoded = tp.index_value_is_delta_encoded > 0;
588 ASSERT_EQ(sum_tp.data_size, tp.data_size);
589 ASSERT_EQ(sum_tp.index_size, tp.index_size);
590 ASSERT_EQ(sum_tp.filter_size, tp.filter_size);
591 ASSERT_EQ(sum_tp.raw_key_size, tp.raw_key_size);
592 ASSERT_EQ(sum_tp.raw_value_size, tp.raw_value_size);
593 ASSERT_EQ(sum_tp.num_data_blocks, tp.num_data_blocks);
594 ASSERT_EQ(sum_tp.num_entries, tp.num_entries);
595 ASSERT_EQ(sum_tp.num_deletions, tp.num_deletions);
596 ASSERT_EQ(sum_tp.num_merge_operands, tp.num_merge_operands);
597 ASSERT_EQ(sum_tp.num_range_deletions, tp.num_range_deletions);
598 if (table > 3) {
599 GetExpectedTableProperties(
600 &expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
601 kMergeOperandsPerTable, kRangeDeletionsPerTable, table,
602 kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
603 value_is_delta_encoded);
604 // Gives larger bias here as index block size, filter block size,
605 // and data block size become much harder to estimate in this test.
606 VerifyTableProperties(expected_tp, tp, 0.5, 0.4, 0.4, 0.25);
607 }
608 }
609 }
610
TEST_F(DBPropertiesTest,NumImmutableMemTable)611 TEST_F(DBPropertiesTest, NumImmutableMemTable) {
612 do {
613 Options options = CurrentOptions();
614 WriteOptions writeOpt = WriteOptions();
615 writeOpt.disableWAL = true;
616 options.max_write_buffer_number = 4;
617 options.min_write_buffer_number_to_merge = 3;
618 options.write_buffer_size = 1000000;
619 options.max_write_buffer_size_to_maintain =
620 5 * static_cast<int64_t>(options.write_buffer_size);
621 CreateAndReopenWithCF({"pikachu"}, options);
622
623 std::string big_value(1000000 * 2, 'x');
624 std::string num;
625 uint64_t value;
626 SetPerfLevel(kEnableTime);
627 ASSERT_TRUE(GetPerfLevel() == kEnableTime);
628
629 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value));
630 ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
631 "rocksdb.num-immutable-mem-table", &num));
632 ASSERT_EQ(num, "0");
633 ASSERT_TRUE(dbfull()->GetProperty(
634 handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
635 ASSERT_EQ(num, "0");
636 ASSERT_TRUE(dbfull()->GetProperty(
637 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
638 ASSERT_EQ(num, "1");
639 get_perf_context()->Reset();
640 Get(1, "k1");
641 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
642
643 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
644 ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
645 "rocksdb.num-immutable-mem-table", &num));
646 ASSERT_EQ(num, "1");
647 ASSERT_TRUE(dbfull()->GetProperty(
648 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
649 ASSERT_EQ(num, "1");
650 ASSERT_TRUE(dbfull()->GetProperty(
651 handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
652 ASSERT_EQ(num, "1");
653
654 get_perf_context()->Reset();
655 Get(1, "k1");
656 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
657 get_perf_context()->Reset();
658 Get(1, "k2");
659 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
660
661 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value));
662 ASSERT_TRUE(dbfull()->GetProperty(
663 handles_[1], "rocksdb.cur-size-active-mem-table", &num));
664 ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
665 "rocksdb.num-immutable-mem-table", &num));
666 ASSERT_EQ(num, "2");
667 ASSERT_TRUE(dbfull()->GetProperty(
668 handles_[1], "rocksdb.num-entries-active-mem-table", &num));
669 ASSERT_EQ(num, "1");
670 ASSERT_TRUE(dbfull()->GetProperty(
671 handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
672 ASSERT_EQ(num, "2");
673 get_perf_context()->Reset();
674 Get(1, "k2");
675 ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
676 get_perf_context()->Reset();
677 Get(1, "k3");
678 ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
679 get_perf_context()->Reset();
680 Get(1, "k1");
681 ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count));
682
683 ASSERT_OK(Flush(1));
684 ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
685 "rocksdb.num-immutable-mem-table", &num));
686 ASSERT_EQ(num, "0");
687 ASSERT_TRUE(dbfull()->GetProperty(
688 handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
689 ASSERT_EQ(num, "3");
690 ASSERT_TRUE(dbfull()->GetIntProperty(
691 handles_[1], "rocksdb.cur-size-active-mem-table", &value));
692 // "192" is the size of the metadata of two empty skiplists, this would
693 // break if we change the default skiplist implementation
694 ASSERT_GE(value, 192);
695
696 uint64_t int_num;
697 uint64_t base_total_size;
698 ASSERT_TRUE(dbfull()->GetIntProperty(
699 handles_[1], "rocksdb.estimate-num-keys", &base_total_size));
700
701 ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2"));
702 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", ""));
703 ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3"));
704 ASSERT_TRUE(dbfull()->GetIntProperty(
705 handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num));
706 ASSERT_EQ(int_num, 2U);
707 ASSERT_TRUE(dbfull()->GetIntProperty(
708 handles_[1], "rocksdb.num-entries-active-mem-table", &int_num));
709 ASSERT_EQ(int_num, 3U);
710
711 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
712 ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
713 ASSERT_TRUE(dbfull()->GetIntProperty(
714 handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num));
715 ASSERT_EQ(int_num, 4U);
716 ASSERT_TRUE(dbfull()->GetIntProperty(
717 handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num));
718 ASSERT_EQ(int_num, 2U);
719
720 ASSERT_TRUE(dbfull()->GetIntProperty(
721 handles_[1], "rocksdb.estimate-num-keys", &int_num));
722 ASSERT_EQ(int_num, base_total_size + 1);
723
724 SetPerfLevel(kDisable);
725 ASSERT_TRUE(GetPerfLevel() == kDisable);
726 } while (ChangeCompactOptions());
727 }
728
729 // TODO(techdept) : Disabled flaky test #12863555
TEST_F(DBPropertiesTest,DISABLED_GetProperty)730 TEST_F(DBPropertiesTest, DISABLED_GetProperty) {
731 // Set sizes to both background thread pool to be 1 and block them.
732 env_->SetBackgroundThreads(1, Env::HIGH);
733 env_->SetBackgroundThreads(1, Env::LOW);
734 test::SleepingBackgroundTask sleeping_task_low;
735 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
736 Env::Priority::LOW);
737 test::SleepingBackgroundTask sleeping_task_high;
738 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
739 &sleeping_task_high, Env::Priority::HIGH);
740
741 Options options = CurrentOptions();
742 WriteOptions writeOpt = WriteOptions();
743 writeOpt.disableWAL = true;
744 options.compaction_style = kCompactionStyleUniversal;
745 options.level0_file_num_compaction_trigger = 1;
746 options.compaction_options_universal.size_ratio = 50;
747 options.max_background_compactions = 1;
748 options.max_background_flushes = 1;
749 options.max_write_buffer_number = 10;
750 options.min_write_buffer_number_to_merge = 1;
751 options.max_write_buffer_size_to_maintain = 0;
752 options.write_buffer_size = 1000000;
753 Reopen(options);
754
755 std::string big_value(1000000 * 2, 'x');
756 std::string num;
757 uint64_t int_num;
758 SetPerfLevel(kEnableTime);
759
760 ASSERT_TRUE(
761 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
762 ASSERT_EQ(int_num, 0U);
763 ASSERT_TRUE(
764 dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num));
765 ASSERT_EQ(int_num, 0U);
766
767 ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
768 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
769 ASSERT_EQ(num, "0");
770 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
771 ASSERT_EQ(num, "0");
772 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
773 ASSERT_EQ(num, "0");
774 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
775 ASSERT_EQ(num, "1");
776 get_perf_context()->Reset();
777
778 ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
779 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
780 ASSERT_EQ(num, "1");
781 ASSERT_OK(dbfull()->Delete(writeOpt, "k-non-existing"));
782 ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
783 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
784 ASSERT_EQ(num, "2");
785 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
786 ASSERT_EQ(num, "1");
787 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
788 ASSERT_EQ(num, "0");
789 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
790 ASSERT_EQ(num, "2");
791 // Verify the same set of properties through GetIntProperty
792 ASSERT_TRUE(
793 dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num));
794 ASSERT_EQ(int_num, 2U);
795 ASSERT_TRUE(
796 dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num));
797 ASSERT_EQ(int_num, 1U);
798 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num));
799 ASSERT_EQ(int_num, 0U);
800 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
801 ASSERT_EQ(int_num, 2U);
802
803 ASSERT_TRUE(
804 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
805 ASSERT_EQ(int_num, 0U);
806
807 sleeping_task_high.WakeUp();
808 sleeping_task_high.WaitUntilDone();
809 dbfull()->TEST_WaitForFlushMemTable();
810
811 ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value));
812 ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value));
813 dbfull()->TEST_WaitForFlushMemTable();
814 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
815 ASSERT_EQ(num, "0");
816 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
817 ASSERT_EQ(num, "1");
818 ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
819 ASSERT_EQ(num, "4");
820
821 ASSERT_TRUE(
822 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
823 ASSERT_GT(int_num, 0U);
824
825 sleeping_task_low.WakeUp();
826 sleeping_task_low.WaitUntilDone();
827
828 // Wait for compaction to be done. This is important because otherwise RocksDB
829 // might schedule a compaction when reopening the database, failing assertion
830 // (A) as a result.
831 dbfull()->TEST_WaitForCompact();
832 options.max_open_files = 10;
833 Reopen(options);
834 // After reopening, no table reader is loaded, so no memory for table readers
835 ASSERT_TRUE(
836 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
837 ASSERT_EQ(int_num, 0U); // (A)
838 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
839 ASSERT_GT(int_num, 0U);
840
841 // After reading a key, at least one table reader is loaded.
842 Get("k5");
843 ASSERT_TRUE(
844 dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
845 ASSERT_GT(int_num, 0U);
846
847 // Test rocksdb.num-live-versions
848 {
849 options.level0_file_num_compaction_trigger = 20;
850 Reopen(options);
851 ASSERT_TRUE(
852 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
853 ASSERT_EQ(int_num, 1U);
854
855 // Use an iterator to hold current version
856 std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
857
858 ASSERT_OK(dbfull()->Put(writeOpt, "k6", big_value));
859 Flush();
860 ASSERT_TRUE(
861 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
862 ASSERT_EQ(int_num, 2U);
863
864 // Use an iterator to hold current version
865 std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
866
867 ASSERT_OK(dbfull()->Put(writeOpt, "k7", big_value));
868 Flush();
869 ASSERT_TRUE(
870 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
871 ASSERT_EQ(int_num, 3U);
872
873 iter2.reset();
874 ASSERT_TRUE(
875 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
876 ASSERT_EQ(int_num, 2U);
877
878 iter1.reset();
879 ASSERT_TRUE(
880 dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
881 ASSERT_EQ(int_num, 1U);
882 }
883 }
884
TEST_F(DBPropertiesTest,ApproximateMemoryUsage)885 TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
886 const int kNumRounds = 10;
887 // TODO(noetzli) kFlushesPerRound does not really correlate with how many
888 // flushes happen.
889 const int kFlushesPerRound = 10;
890 const int kWritesPerFlush = 10;
891 const int kKeySize = 100;
892 const int kValueSize = 1000;
893 Options options;
894 options.write_buffer_size = 1000; // small write buffer
895 options.min_write_buffer_number_to_merge = 4;
896 options.compression = kNoCompression;
897 options.create_if_missing = true;
898 options = CurrentOptions(options);
899 DestroyAndReopen(options);
900
901 Random rnd(301);
902
903 std::vector<Iterator*> iters;
904
905 uint64_t active_mem;
906 uint64_t unflushed_mem;
907 uint64_t all_mem;
908 uint64_t prev_all_mem;
909
910 // Phase 0. The verify the initial value of all these properties are the same
911 // as we have no mem-tables.
912 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
913 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
914 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
915 ASSERT_EQ(all_mem, active_mem);
916 ASSERT_EQ(all_mem, unflushed_mem);
917
918 // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
919 // "size-all-mem-tables"
920 for (int r = 0; r < kNumRounds; ++r) {
921 for (int f = 0; f < kFlushesPerRound; ++f) {
922 for (int w = 0; w < kWritesPerFlush; ++w) {
923 Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
924 }
925 }
926 // Make sure that there is no flush between getting the two properties.
927 dbfull()->TEST_WaitForFlushMemTable();
928 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
929 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
930 // in no iterator case, these two number should be the same.
931 ASSERT_EQ(unflushed_mem, all_mem);
932 }
933 prev_all_mem = all_mem;
934
935 // Phase 2. Keep issuing Put() but also create new iterators. This time we
936 // expect "size-all-mem-tables" > "cur-size-all-mem-tables".
937 for (int r = 0; r < kNumRounds; ++r) {
938 iters.push_back(db_->NewIterator(ReadOptions()));
939 for (int f = 0; f < kFlushesPerRound; ++f) {
940 for (int w = 0; w < kWritesPerFlush; ++w) {
941 Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize));
942 }
943 }
944 // Force flush to prevent flush from happening between getting the
945 // properties or after getting the properties and before the new round.
946 Flush();
947
948 // In the second round, add iterators.
949 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
950 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
951 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
952 ASSERT_GT(all_mem, active_mem);
953 ASSERT_GT(all_mem, unflushed_mem);
954 ASSERT_GT(all_mem, prev_all_mem);
955 prev_all_mem = all_mem;
956 }
957
958 // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
959 // whenever we release an iterator.
960 for (auto* iter : iters) {
961 delete iter;
962 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
963 // Expect the size shrinking
964 ASSERT_LT(all_mem, prev_all_mem);
965 prev_all_mem = all_mem;
966 }
967
968 // Expect all these three counters to be the same.
969 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
970 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
971 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
972 ASSERT_EQ(active_mem, unflushed_mem);
973 ASSERT_EQ(unflushed_mem, all_mem);
974
975 // Phase 5. Reopen, and expect all these three counters to be the same again.
976 Reopen(options);
977 dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
978 dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
979 dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
980 ASSERT_EQ(active_mem, unflushed_mem);
981 ASSERT_EQ(unflushed_mem, all_mem);
982 }
983
TEST_F(DBPropertiesTest,EstimatePendingCompBytes)984 TEST_F(DBPropertiesTest, EstimatePendingCompBytes) {
985 // Set sizes to both background thread pool to be 1 and block them.
986 env_->SetBackgroundThreads(1, Env::HIGH);
987 env_->SetBackgroundThreads(1, Env::LOW);
988 test::SleepingBackgroundTask sleeping_task_low;
989 env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
990 Env::Priority::LOW);
991
992 Options options = CurrentOptions();
993 WriteOptions writeOpt = WriteOptions();
994 writeOpt.disableWAL = true;
995 options.compaction_style = kCompactionStyleLevel;
996 options.level0_file_num_compaction_trigger = 2;
997 options.max_background_compactions = 1;
998 options.max_background_flushes = 1;
999 options.max_write_buffer_number = 10;
1000 options.min_write_buffer_number_to_merge = 1;
1001 options.max_write_buffer_size_to_maintain = 0;
1002 options.write_buffer_size = 1000000;
1003 Reopen(options);
1004
1005 std::string big_value(1000000 * 2, 'x');
1006 std::string num;
1007 uint64_t int_num;
1008
1009 ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
1010 Flush();
1011 ASSERT_TRUE(dbfull()->GetIntProperty(
1012 "rocksdb.estimate-pending-compaction-bytes", &int_num));
1013 ASSERT_EQ(int_num, 0U);
1014
1015 ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
1016 Flush();
1017 ASSERT_TRUE(dbfull()->GetIntProperty(
1018 "rocksdb.estimate-pending-compaction-bytes", &int_num));
1019 ASSERT_GT(int_num, 0U);
1020
1021 ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
1022 Flush();
1023 ASSERT_TRUE(dbfull()->GetIntProperty(
1024 "rocksdb.estimate-pending-compaction-bytes", &int_num));
1025 ASSERT_GT(int_num, 0U);
1026
1027 sleeping_task_low.WakeUp();
1028 sleeping_task_low.WaitUntilDone();
1029
1030 dbfull()->TEST_WaitForCompact();
1031 ASSERT_TRUE(dbfull()->GetIntProperty(
1032 "rocksdb.estimate-pending-compaction-bytes", &int_num));
1033 ASSERT_EQ(int_num, 0U);
1034 }
1035
TEST_F(DBPropertiesTest,EstimateCompressionRatio)1036 TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
1037 if (!Snappy_Supported()) {
1038 return;
1039 }
1040 const int kNumL0Files = 3;
1041 const int kNumEntriesPerFile = 1000;
1042
1043 Options options = CurrentOptions();
1044 options.compression_per_level = {kNoCompression, kSnappyCompression};
1045 options.disable_auto_compactions = true;
1046 options.num_levels = 2;
1047 Reopen(options);
1048
1049 // compression ratio is -1.0 when no open files at level
1050 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1051
1052 const std::string kVal(100, 'a');
1053 for (int i = 0; i < kNumL0Files; ++i) {
1054 for (int j = 0; j < kNumEntriesPerFile; ++j) {
1055 // Put common data ("key") at end to prevent delta encoding from
1056 // compressing the key effectively
1057 std::string key = ToString(i) + ToString(j) + "key";
1058 ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
1059 }
1060 Flush();
1061 }
1062
1063 // no compression at L0, so ratio is less than one
1064 ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
1065 ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
1066 ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
1067
1068 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1069
1070 ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
1071 // Data at L1 should be highly compressed thanks to Snappy and redundant data
1072 // in values (ratio is 12.846 as of 4/19/2016).
1073 ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
1074 }
1075
1076 #endif // ROCKSDB_LITE
1077
1078 class CountingUserTblPropCollector : public TablePropertiesCollector {
1079 public:
Name() const1080 const char* Name() const override { return "CountingUserTblPropCollector"; }
1081
Finish(UserCollectedProperties * properties)1082 Status Finish(UserCollectedProperties* properties) override {
1083 std::string encoded;
1084 PutVarint32(&encoded, count_);
1085 *properties = UserCollectedProperties{
1086 {"CountingUserTblPropCollector", message_}, {"Count", encoded},
1087 };
1088 return Status::OK();
1089 }
1090
AddUserKey(const Slice &,const Slice &,EntryType,SequenceNumber,uint64_t)1091 Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
1092 EntryType /*type*/, SequenceNumber /*seq*/,
1093 uint64_t /*file_size*/) override {
1094 ++count_;
1095 return Status::OK();
1096 }
1097
GetReadableProperties() const1098 UserCollectedProperties GetReadableProperties() const override {
1099 return UserCollectedProperties{};
1100 }
1101
1102 private:
1103 std::string message_ = "Rocksdb";
1104 uint32_t count_ = 0;
1105 };
1106
1107 class CountingUserTblPropCollectorFactory
1108 : public TablePropertiesCollectorFactory {
1109 public:
CountingUserTblPropCollectorFactory(uint32_t expected_column_family_id)1110 explicit CountingUserTblPropCollectorFactory(
1111 uint32_t expected_column_family_id)
1112 : expected_column_family_id_(expected_column_family_id),
1113 num_created_(0) {}
CreateTablePropertiesCollector(TablePropertiesCollectorFactory::Context context)1114 TablePropertiesCollector* CreateTablePropertiesCollector(
1115 TablePropertiesCollectorFactory::Context context) override {
1116 EXPECT_EQ(expected_column_family_id_, context.column_family_id);
1117 num_created_++;
1118 return new CountingUserTblPropCollector();
1119 }
Name() const1120 const char* Name() const override {
1121 return "CountingUserTblPropCollectorFactory";
1122 }
set_expected_column_family_id(uint32_t v)1123 void set_expected_column_family_id(uint32_t v) {
1124 expected_column_family_id_ = v;
1125 }
1126 uint32_t expected_column_family_id_;
1127 uint32_t num_created_;
1128 };
1129
1130 class CountingDeleteTabPropCollector : public TablePropertiesCollector {
1131 public:
Name() const1132 const char* Name() const override { return "CountingDeleteTabPropCollector"; }
1133
AddUserKey(const Slice &,const Slice &,EntryType type,SequenceNumber,uint64_t)1134 Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
1135 EntryType type, SequenceNumber /*seq*/,
1136 uint64_t /*file_size*/) override {
1137 if (type == kEntryDelete) {
1138 num_deletes_++;
1139 }
1140 return Status::OK();
1141 }
1142
NeedCompact() const1143 bool NeedCompact() const override { return num_deletes_ > 10; }
1144
GetReadableProperties() const1145 UserCollectedProperties GetReadableProperties() const override {
1146 return UserCollectedProperties{};
1147 }
1148
Finish(UserCollectedProperties * properties)1149 Status Finish(UserCollectedProperties* properties) override {
1150 *properties =
1151 UserCollectedProperties{{"num_delete", ToString(num_deletes_)}};
1152 return Status::OK();
1153 }
1154
1155 private:
1156 uint32_t num_deletes_ = 0;
1157 };
1158
1159 class CountingDeleteTabPropCollectorFactory
1160 : public TablePropertiesCollectorFactory {
1161 public:
CreateTablePropertiesCollector(TablePropertiesCollectorFactory::Context)1162 TablePropertiesCollector* CreateTablePropertiesCollector(
1163 TablePropertiesCollectorFactory::Context /*context*/) override {
1164 return new CountingDeleteTabPropCollector();
1165 }
Name() const1166 const char* Name() const override {
1167 return "CountingDeleteTabPropCollectorFactory";
1168 }
1169 };
1170
1171 #ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest,GetUserDefinedTableProperties)1172 TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
1173 Options options = CurrentOptions();
1174 options.level0_file_num_compaction_trigger = (1 << 30);
1175 options.table_properties_collector_factories.resize(1);
1176 std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
1177 std::make_shared<CountingUserTblPropCollectorFactory>(0);
1178 options.table_properties_collector_factories[0] = collector_factory;
1179 Reopen(options);
1180 // Create 4 tables
1181 for (int table = 0; table < 4; ++table) {
1182 for (int i = 0; i < 10 + table; ++i) {
1183 db_->Put(WriteOptions(), ToString(table * 100 + i), "val");
1184 }
1185 db_->Flush(FlushOptions());
1186 }
1187
1188 TablePropertiesCollection props;
1189 ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
1190 ASSERT_EQ(4U, props.size());
1191 uint32_t sum = 0;
1192 for (const auto& item : props) {
1193 auto& user_collected = item.second->user_collected_properties;
1194 ASSERT_TRUE(user_collected.find("CountingUserTblPropCollector") !=
1195 user_collected.end());
1196 ASSERT_EQ(user_collected.at("CountingUserTblPropCollector"), "Rocksdb");
1197 ASSERT_TRUE(user_collected.find("Count") != user_collected.end());
1198 Slice key(user_collected.at("Count"));
1199 uint32_t count;
1200 ASSERT_TRUE(GetVarint32(&key, &count));
1201 sum += count;
1202 }
1203 ASSERT_EQ(10u + 11u + 12u + 13u, sum);
1204
1205 ASSERT_GT(collector_factory->num_created_, 0U);
1206 collector_factory->num_created_ = 0;
1207 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1208 ASSERT_GT(collector_factory->num_created_, 0U);
1209 }
1210 #endif // ROCKSDB_LITE
1211
TEST_F(DBPropertiesTest,UserDefinedTablePropertiesContext)1212 TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
1213 Options options = CurrentOptions();
1214 options.level0_file_num_compaction_trigger = 3;
1215 options.table_properties_collector_factories.resize(1);
1216 std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
1217 std::make_shared<CountingUserTblPropCollectorFactory>(1);
1218 options.table_properties_collector_factories[0] = collector_factory,
1219 CreateAndReopenWithCF({"pikachu"}, options);
1220 // Create 2 files
1221 for (int table = 0; table < 2; ++table) {
1222 for (int i = 0; i < 10 + table; ++i) {
1223 Put(1, ToString(table * 100 + i), "val");
1224 }
1225 Flush(1);
1226 }
1227 ASSERT_GT(collector_factory->num_created_, 0U);
1228
1229 collector_factory->num_created_ = 0;
1230 // Trigger automatic compactions.
1231 for (int table = 0; table < 3; ++table) {
1232 for (int i = 0; i < 10 + table; ++i) {
1233 Put(1, ToString(table * 100 + i), "val");
1234 }
1235 Flush(1);
1236 dbfull()->TEST_WaitForCompact();
1237 }
1238 ASSERT_GT(collector_factory->num_created_, 0U);
1239
1240 collector_factory->num_created_ = 0;
1241 dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
1242 ASSERT_GT(collector_factory->num_created_, 0U);
1243
1244 // Come back to write to default column family
1245 collector_factory->num_created_ = 0;
1246 collector_factory->set_expected_column_family_id(0); // default CF
1247 // Create 4 tables in default column family
1248 for (int table = 0; table < 2; ++table) {
1249 for (int i = 0; i < 10 + table; ++i) {
1250 Put(ToString(table * 100 + i), "val");
1251 }
1252 Flush();
1253 }
1254 ASSERT_GT(collector_factory->num_created_, 0U);
1255
1256 collector_factory->num_created_ = 0;
1257 // Trigger automatic compactions.
1258 for (int table = 0; table < 3; ++table) {
1259 for (int i = 0; i < 10 + table; ++i) {
1260 Put(ToString(table * 100 + i), "val");
1261 }
1262 Flush();
1263 dbfull()->TEST_WaitForCompact();
1264 }
1265 ASSERT_GT(collector_factory->num_created_, 0U);
1266
1267 collector_factory->num_created_ = 0;
1268 dbfull()->TEST_CompactRange(0, nullptr, nullptr);
1269 ASSERT_GT(collector_factory->num_created_, 0U);
1270 }
1271
1272 #ifndef ROCKSDB_LITE
TEST_F(DBPropertiesTest,TablePropertiesNeedCompactTest)1273 TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
1274 Random rnd(301);
1275
1276 Options options;
1277 options.create_if_missing = true;
1278 options.write_buffer_size = 4096;
1279 options.max_write_buffer_number = 8;
1280 options.level0_file_num_compaction_trigger = 2;
1281 options.level0_slowdown_writes_trigger = 2;
1282 options.level0_stop_writes_trigger = 4;
1283 options.target_file_size_base = 2048;
1284 options.max_bytes_for_level_base = 10240;
1285 options.max_bytes_for_level_multiplier = 4;
1286 options.soft_pending_compaction_bytes_limit = 1024 * 1024;
1287 options.num_levels = 8;
1288 options.env = env_;
1289
1290 std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
1291 std::make_shared<CountingDeleteTabPropCollectorFactory>();
1292 options.table_properties_collector_factories.resize(1);
1293 options.table_properties_collector_factories[0] = collector_factory;
1294
1295 DestroyAndReopen(options);
1296
1297 const int kMaxKey = 1000;
1298 for (int i = 0; i < kMaxKey; i++) {
1299 ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
1300 ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102)));
1301 }
1302 Flush();
1303 dbfull()->TEST_WaitForCompact();
1304 if (NumTableFilesAtLevel(0) == 1) {
1305 // Clear Level 0 so that when later flush a file with deletions,
1306 // we don't trigger an organic compaction.
1307 ASSERT_OK(Put(Key(0), ""));
1308 ASSERT_OK(Put(Key(kMaxKey * 2), ""));
1309 Flush();
1310 dbfull()->TEST_WaitForCompact();
1311 }
1312 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1313
1314 {
1315 int c = 0;
1316 std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
1317 iter->Seek(Key(kMaxKey - 100));
1318 while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
1319 iter->Next();
1320 ++c;
1321 }
1322 ASSERT_EQ(c, 200);
1323 }
1324
1325 Delete(Key(0));
1326 for (int i = kMaxKey - 100; i < kMaxKey + 100; i++) {
1327 Delete(Key(i));
1328 }
1329 Delete(Key(kMaxKey * 2));
1330
1331 Flush();
1332 dbfull()->TEST_WaitForCompact();
1333
1334 {
1335 SetPerfLevel(kEnableCount);
1336 get_perf_context()->Reset();
1337 int c = 0;
1338 std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
1339 iter->Seek(Key(kMaxKey - 100));
1340 while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
1341 iter->Next();
1342 }
1343 ASSERT_EQ(c, 0);
1344 ASSERT_LT(get_perf_context()->internal_delete_skipped_count, 30u);
1345 ASSERT_LT(get_perf_context()->internal_key_skipped_count, 30u);
1346 SetPerfLevel(kDisable);
1347 }
1348 }
1349
TEST_F(DBPropertiesTest,NeedCompactHintPersistentTest)1350 TEST_F(DBPropertiesTest, NeedCompactHintPersistentTest) {
1351 Random rnd(301);
1352
1353 Options options;
1354 options.create_if_missing = true;
1355 options.max_write_buffer_number = 8;
1356 options.level0_file_num_compaction_trigger = 10;
1357 options.level0_slowdown_writes_trigger = 10;
1358 options.level0_stop_writes_trigger = 10;
1359 options.disable_auto_compactions = true;
1360 options.env = env_;
1361
1362 std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
1363 std::make_shared<CountingDeleteTabPropCollectorFactory>();
1364 options.table_properties_collector_factories.resize(1);
1365 options.table_properties_collector_factories[0] = collector_factory;
1366
1367 DestroyAndReopen(options);
1368
1369 const int kMaxKey = 100;
1370 for (int i = 0; i < kMaxKey; i++) {
1371 ASSERT_OK(Put(Key(i), ""));
1372 }
1373 Flush();
1374 dbfull()->TEST_WaitForFlushMemTable();
1375
1376 for (int i = 1; i < kMaxKey - 1; i++) {
1377 Delete(Key(i));
1378 }
1379 Flush();
1380 dbfull()->TEST_WaitForFlushMemTable();
1381 ASSERT_EQ(NumTableFilesAtLevel(0), 2);
1382
1383 // Restart the DB. Although number of files didn't reach
1384 // options.level0_file_num_compaction_trigger, compaction should
1385 // still be triggered because of the need-compaction hint.
1386 options.disable_auto_compactions = false;
1387 Reopen(options);
1388 dbfull()->TEST_WaitForCompact();
1389 ASSERT_EQ(NumTableFilesAtLevel(0), 0);
1390 {
1391 SetPerfLevel(kEnableCount);
1392 get_perf_context()->Reset();
1393 int c = 0;
1394 std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
1395 for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
1396 c++;
1397 }
1398 ASSERT_EQ(c, 2);
1399 ASSERT_EQ(get_perf_context()->internal_delete_skipped_count, 0);
1400 // We iterate every key twice. Is it a bug?
1401 ASSERT_LE(get_perf_context()->internal_key_skipped_count, 2);
1402 SetPerfLevel(kDisable);
1403 }
1404 }
1405
TEST_F(DBPropertiesTest,EstimateNumKeysUnderflow)1406 TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) {
1407 Options options;
1408 Reopen(options);
1409 Put("foo", "bar");
1410 Delete("foo");
1411 Delete("foo");
1412 uint64_t num_keys = 0;
1413 ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys));
1414 ASSERT_EQ(0, num_keys);
1415 }
1416
TEST_F(DBPropertiesTest,EstimateOldestKeyTime)1417 TEST_F(DBPropertiesTest, EstimateOldestKeyTime) {
1418 std::unique_ptr<MockTimeEnv> mock_env(new MockTimeEnv(Env::Default()));
1419 uint64_t oldest_key_time = 0;
1420 Options options;
1421 options.env = mock_env.get();
1422
1423 // "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
1424 mock_env->set_current_time(100);
1425 for (auto compaction : {kCompactionStyleLevel, kCompactionStyleUniversal,
1426 kCompactionStyleNone}) {
1427 options.compaction_style = compaction;
1428 options.create_if_missing = true;
1429 DestroyAndReopen(options);
1430 ASSERT_OK(Put("foo", "bar"));
1431 ASSERT_FALSE(dbfull()->GetIntProperty(
1432 DB::Properties::kEstimateOldestKeyTime, &oldest_key_time));
1433 }
1434
1435 options.compaction_style = kCompactionStyleFIFO;
1436 options.ttl = 300;
1437 options.compaction_options_fifo.allow_compaction = false;
1438 DestroyAndReopen(options);
1439
1440 mock_env->set_current_time(100);
1441 ASSERT_OK(Put("k1", "v1"));
1442 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1443 &oldest_key_time));
1444 ASSERT_EQ(100, oldest_key_time);
1445 ASSERT_OK(Flush());
1446 ASSERT_EQ("1", FilesPerLevel());
1447 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1448 &oldest_key_time));
1449 ASSERT_EQ(100, oldest_key_time);
1450
1451 mock_env->set_current_time(200);
1452 ASSERT_OK(Put("k2", "v2"));
1453 ASSERT_OK(Flush());
1454 ASSERT_EQ("2", FilesPerLevel());
1455 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1456 &oldest_key_time));
1457 ASSERT_EQ(100, oldest_key_time);
1458
1459 mock_env->set_current_time(300);
1460 ASSERT_OK(Put("k3", "v3"));
1461 ASSERT_OK(Flush());
1462 ASSERT_EQ("3", FilesPerLevel());
1463 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1464 &oldest_key_time));
1465 ASSERT_EQ(100, oldest_key_time);
1466
1467 mock_env->set_current_time(450);
1468 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1469 ASSERT_EQ("2", FilesPerLevel());
1470 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1471 &oldest_key_time));
1472 ASSERT_EQ(200, oldest_key_time);
1473
1474 mock_env->set_current_time(550);
1475 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1476 ASSERT_EQ("1", FilesPerLevel());
1477 ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1478 &oldest_key_time));
1479 ASSERT_EQ(300, oldest_key_time);
1480
1481 mock_env->set_current_time(650);
1482 ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1483 ASSERT_EQ("", FilesPerLevel());
1484 ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
1485 &oldest_key_time));
1486
1487 // Close before mock_env destructs.
1488 Close();
1489 }
1490
TEST_F(DBPropertiesTest,SstFilesSize)1491 TEST_F(DBPropertiesTest, SstFilesSize) {
1492 struct TestListener : public EventListener {
1493 void OnCompactionCompleted(DB* db,
1494 const CompactionJobInfo& /*info*/) override {
1495 assert(callback_triggered == false);
1496 assert(size_before_compaction > 0);
1497 callback_triggered = true;
1498 uint64_t total_sst_size = 0;
1499 uint64_t live_sst_size = 0;
1500 bool ok = db->GetIntProperty(DB::Properties::kTotalSstFilesSize,
1501 &total_sst_size);
1502 ASSERT_TRUE(ok);
1503 // total_sst_size include files before and after compaction.
1504 ASSERT_GT(total_sst_size, size_before_compaction);
1505 ok =
1506 db->GetIntProperty(DB::Properties::kLiveSstFilesSize, &live_sst_size);
1507 ASSERT_TRUE(ok);
1508 // live_sst_size only include files after compaction.
1509 ASSERT_GT(live_sst_size, 0);
1510 ASSERT_LT(live_sst_size, size_before_compaction);
1511 }
1512
1513 uint64_t size_before_compaction = 0;
1514 bool callback_triggered = false;
1515 };
1516 std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
1517
1518 Options options;
1519 options.disable_auto_compactions = true;
1520 options.listeners.push_back(listener);
1521 Reopen(options);
1522
1523 for (int i = 0; i < 10; i++) {
1524 ASSERT_OK(Put("key" + ToString(i), std::string(1000, 'v')));
1525 }
1526 ASSERT_OK(Flush());
1527 for (int i = 0; i < 5; i++) {
1528 ASSERT_OK(Delete("key" + ToString(i)));
1529 }
1530 ASSERT_OK(Flush());
1531 uint64_t sst_size;
1532 bool ok = db_->GetIntProperty(DB::Properties::kTotalSstFilesSize, &sst_size);
1533 ASSERT_TRUE(ok);
1534 ASSERT_GT(sst_size, 0);
1535 listener->size_before_compaction = sst_size;
1536 // Compact to clean all keys and trigger listener.
1537 ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
1538 ASSERT_TRUE(listener->callback_triggered);
1539 }
1540
TEST_F(DBPropertiesTest,MinObsoleteSstNumberToKeep)1541 TEST_F(DBPropertiesTest, MinObsoleteSstNumberToKeep) {
1542 class TestListener : public EventListener {
1543 public:
1544 void OnTableFileCreated(const TableFileCreationInfo& info) override {
1545 if (info.reason == TableFileCreationReason::kCompaction) {
1546 // Verify the property indicates that SSTs created by a running
1547 // compaction cannot be deleted.
1548 uint64_t created_file_num;
1549 FileType created_file_type;
1550 std::string filename =
1551 info.file_path.substr(info.file_path.rfind('/') + 1);
1552 ASSERT_TRUE(
1553 ParseFileName(filename, &created_file_num, &created_file_type));
1554 ASSERT_EQ(kTableFile, created_file_type);
1555
1556 uint64_t keep_sst_lower_bound;
1557 ASSERT_TRUE(
1558 db_->GetIntProperty(DB::Properties::kMinObsoleteSstNumberToKeep,
1559 &keep_sst_lower_bound));
1560
1561 ASSERT_LE(keep_sst_lower_bound, created_file_num);
1562 validated_ = true;
1563 }
1564 }
1565
1566 void SetDB(DB* db) { db_ = db; }
1567
1568 int GetNumCompactions() { return num_compactions_; }
1569
1570 // True if we've verified the property for at least one output file
1571 bool Validated() { return validated_; }
1572
1573 private:
1574 int num_compactions_ = 0;
1575 bool validated_ = false;
1576 DB* db_ = nullptr;
1577 };
1578
1579 const int kNumL0Files = 4;
1580
1581 std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
1582
1583 Options options = CurrentOptions();
1584 options.listeners.push_back(listener);
1585 options.level0_file_num_compaction_trigger = kNumL0Files;
1586 DestroyAndReopen(options);
1587 listener->SetDB(db_);
1588
1589 for (int i = 0; i < kNumL0Files; ++i) {
1590 // Make sure they overlap in keyspace to prevent trivial move
1591 Put("key1", "val");
1592 Put("key2", "val");
1593 Flush();
1594 }
1595 dbfull()->TEST_WaitForCompact();
1596 ASSERT_TRUE(listener->Validated());
1597 }
1598
TEST_F(DBPropertiesTest,BlockCacheProperties)1599 TEST_F(DBPropertiesTest, BlockCacheProperties) {
1600 Options options;
1601 uint64_t value;
1602
1603 // Block cache properties are not available for tables other than
1604 // block-based table.
1605 options.table_factory.reset(NewPlainTableFactory());
1606 Reopen(options);
1607 ASSERT_FALSE(
1608 db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1609 ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1610 ASSERT_FALSE(
1611 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1612
1613 options.table_factory.reset(NewCuckooTableFactory());
1614 Reopen(options);
1615 ASSERT_FALSE(
1616 db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1617 ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1618 ASSERT_FALSE(
1619 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1620
1621 // Block cache properties are not available if block cache is not used.
1622 BlockBasedTableOptions table_options;
1623 table_options.no_block_cache = true;
1624 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
1625 Reopen(options);
1626 ASSERT_FALSE(
1627 db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1628 ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1629 ASSERT_FALSE(
1630 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1631
1632 // Test with empty block cache.
1633 constexpr size_t kCapacity = 100;
1634 LRUCacheOptions co;
1635 co.capacity = kCapacity;
1636 co.num_shard_bits = 0;
1637 co.metadata_charge_policy = kDontChargeCacheMetadata;
1638 auto block_cache = NewLRUCache(co);
1639 table_options.block_cache = block_cache;
1640 table_options.no_block_cache = false;
1641 options.table_factory.reset(NewBlockBasedTableFactory(table_options));
1642 Reopen(options);
1643 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1644 ASSERT_EQ(kCapacity, value);
1645 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1646 ASSERT_EQ(0, value);
1647 ASSERT_TRUE(
1648 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1649 ASSERT_EQ(0, value);
1650
1651 // Insert unpinned item to the cache and check size.
1652 constexpr size_t kSize1 = 50;
1653 block_cache->Insert("item1", nullptr /*value*/, kSize1, nullptr /*deleter*/);
1654 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1655 ASSERT_EQ(kCapacity, value);
1656 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1657 ASSERT_EQ(kSize1, value);
1658 ASSERT_TRUE(
1659 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1660 ASSERT_EQ(0, value);
1661
1662 // Insert pinned item to the cache and check size.
1663 constexpr size_t kSize2 = 30;
1664 Cache::Handle* item2 = nullptr;
1665 block_cache->Insert("item2", nullptr /*value*/, kSize2, nullptr /*deleter*/,
1666 &item2);
1667 ASSERT_NE(nullptr, item2);
1668 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1669 ASSERT_EQ(kCapacity, value);
1670 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1671 ASSERT_EQ(kSize1 + kSize2, value);
1672 ASSERT_TRUE(
1673 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1674 ASSERT_EQ(kSize2, value);
1675
1676 // Insert another pinned item to make the cache over-sized.
1677 constexpr size_t kSize3 = 80;
1678 Cache::Handle* item3 = nullptr;
1679 block_cache->Insert("item3", nullptr /*value*/, kSize3, nullptr /*deleter*/,
1680 &item3);
1681 ASSERT_NE(nullptr, item2);
1682 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1683 ASSERT_EQ(kCapacity, value);
1684 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1685 // Item 1 is evicted.
1686 ASSERT_EQ(kSize2 + kSize3, value);
1687 ASSERT_TRUE(
1688 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1689 ASSERT_EQ(kSize2 + kSize3, value);
1690
1691 // Check size after release.
1692 block_cache->Release(item2);
1693 block_cache->Release(item3);
1694 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
1695 ASSERT_EQ(kCapacity, value);
1696 ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
1697 // item2 will be evicted, while item3 remain in cache after release.
1698 ASSERT_EQ(kSize3, value);
1699 ASSERT_TRUE(
1700 db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
1701 ASSERT_EQ(0, value);
1702 }
1703
1704 #endif // ROCKSDB_LITE
1705 } // namespace ROCKSDB_NAMESPACE
1706
main(int argc,char ** argv)1707 int main(int argc, char** argv) {
1708 ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
1709 ::testing::InitGoogleTest(&argc, argv);
1710 return RUN_ALL_TESTS();
1711 }
1712