1 // This source code is licensed under both the GPLv2 (found in the
2 // COPYING file in the root directory) and Apache 2.0 License
3 // (found in the LICENSE.Apache file in the root directory).
4 //
5 // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
6 //
7 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
8 // Use of this source code is governed by a BSD-style license that can be
9 // found in the LICENSE file. See the AUTHORS file for names of contributors.
10
11 #include "db/internal_stats.h"
12
13 #include <algorithm>
14 #include <cinttypes>
15 #include <limits>
16 #include <string>
17 #include <utility>
18 #include <vector>
19
20 #include "db/column_family.h"
21 #include "db/db_impl/db_impl.h"
22 #include "table/block_based/block_based_table_factory.h"
23 #include "util/string_util.h"
24
25 namespace ROCKSDB_NAMESPACE {
26
27 #ifndef ROCKSDB_LITE
28
29 const std::map<LevelStatType, LevelStat> InternalStats::compaction_level_stats =
30 {
31 {LevelStatType::NUM_FILES, LevelStat{"NumFiles", "Files"}},
32 {LevelStatType::COMPACTED_FILES,
33 LevelStat{"CompactedFiles", "CompactedFiles"}},
34 {LevelStatType::SIZE_BYTES, LevelStat{"SizeBytes", "Size"}},
35 {LevelStatType::SCORE, LevelStat{"Score", "Score"}},
36 {LevelStatType::READ_GB, LevelStat{"ReadGB", "Read(GB)"}},
37 {LevelStatType::RN_GB, LevelStat{"RnGB", "Rn(GB)"}},
38 {LevelStatType::RNP1_GB, LevelStat{"Rnp1GB", "Rnp1(GB)"}},
39 {LevelStatType::WRITE_GB, LevelStat{"WriteGB", "Write(GB)"}},
40 {LevelStatType::W_NEW_GB, LevelStat{"WnewGB", "Wnew(GB)"}},
41 {LevelStatType::MOVED_GB, LevelStat{"MovedGB", "Moved(GB)"}},
42 {LevelStatType::WRITE_AMP, LevelStat{"WriteAmp", "W-Amp"}},
43 {LevelStatType::READ_MBPS, LevelStat{"ReadMBps", "Rd(MB/s)"}},
44 {LevelStatType::WRITE_MBPS, LevelStat{"WriteMBps", "Wr(MB/s)"}},
45 {LevelStatType::COMP_SEC, LevelStat{"CompSec", "Comp(sec)"}},
46 {LevelStatType::COMP_CPU_SEC,
47 LevelStat{"CompMergeCPU", "CompMergeCPU(sec)"}},
48 {LevelStatType::COMP_COUNT, LevelStat{"CompCount", "Comp(cnt)"}},
49 {LevelStatType::AVG_SEC, LevelStat{"AvgSec", "Avg(sec)"}},
50 {LevelStatType::KEY_IN, LevelStat{"KeyIn", "KeyIn"}},
51 {LevelStatType::KEY_DROP, LevelStat{"KeyDrop", "KeyDrop"}},
52 };
53
54 namespace {
55 const double kMB = 1048576.0;
56 const double kGB = kMB * 1024;
57 const double kMicrosInSec = 1000000.0;
58
PrintLevelStatsHeader(char * buf,size_t len,const std::string & cf_name,const std::string & group_by)59 void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name,
60 const std::string& group_by) {
61 int written_size =
62 snprintf(buf, len, "\n** Compaction Stats [%s] **\n", cf_name.c_str());
63 auto hdr = [](LevelStatType t) {
64 return InternalStats::compaction_level_stats.at(t).header_name.c_str();
65 };
66 int line_size = snprintf(
67 buf + written_size, len - written_size,
68 "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
69 // Note that we skip COMPACTED_FILES and merge it with Files column
70 group_by.c_str(), hdr(LevelStatType::NUM_FILES),
71 hdr(LevelStatType::SIZE_BYTES), hdr(LevelStatType::SCORE),
72 hdr(LevelStatType::READ_GB), hdr(LevelStatType::RN_GB),
73 hdr(LevelStatType::RNP1_GB), hdr(LevelStatType::WRITE_GB),
74 hdr(LevelStatType::W_NEW_GB), hdr(LevelStatType::MOVED_GB),
75 hdr(LevelStatType::WRITE_AMP), hdr(LevelStatType::READ_MBPS),
76 hdr(LevelStatType::WRITE_MBPS), hdr(LevelStatType::COMP_SEC),
77 hdr(LevelStatType::COMP_CPU_SEC), hdr(LevelStatType::COMP_COUNT),
78 hdr(LevelStatType::AVG_SEC), hdr(LevelStatType::KEY_IN),
79 hdr(LevelStatType::KEY_DROP));
80
81 written_size += line_size;
82 snprintf(buf + written_size, len - written_size, "%s\n",
83 std::string(line_size, '-').c_str());
84 }
85
PrepareLevelStats(std::map<LevelStatType,double> * level_stats,int num_files,int being_compacted,double total_file_size,double score,double w_amp,const InternalStats::CompactionStats & stats)86 void PrepareLevelStats(std::map<LevelStatType, double>* level_stats,
87 int num_files, int being_compacted,
88 double total_file_size, double score, double w_amp,
89 const InternalStats::CompactionStats& stats) {
90 uint64_t bytes_read =
91 stats.bytes_read_non_output_levels + stats.bytes_read_output_level;
92 int64_t bytes_new = stats.bytes_written - stats.bytes_read_output_level;
93 double elapsed = (stats.micros + 1) / kMicrosInSec;
94
95 (*level_stats)[LevelStatType::NUM_FILES] = num_files;
96 (*level_stats)[LevelStatType::COMPACTED_FILES] = being_compacted;
97 (*level_stats)[LevelStatType::SIZE_BYTES] = total_file_size;
98 (*level_stats)[LevelStatType::SCORE] = score;
99 (*level_stats)[LevelStatType::READ_GB] = bytes_read / kGB;
100 (*level_stats)[LevelStatType::RN_GB] =
101 stats.bytes_read_non_output_levels / kGB;
102 (*level_stats)[LevelStatType::RNP1_GB] = stats.bytes_read_output_level / kGB;
103 (*level_stats)[LevelStatType::WRITE_GB] = stats.bytes_written / kGB;
104 (*level_stats)[LevelStatType::W_NEW_GB] = bytes_new / kGB;
105 (*level_stats)[LevelStatType::MOVED_GB] = stats.bytes_moved / kGB;
106 (*level_stats)[LevelStatType::WRITE_AMP] = w_amp;
107 (*level_stats)[LevelStatType::READ_MBPS] = bytes_read / kMB / elapsed;
108 (*level_stats)[LevelStatType::WRITE_MBPS] =
109 stats.bytes_written / kMB / elapsed;
110 (*level_stats)[LevelStatType::COMP_SEC] = stats.micros / kMicrosInSec;
111 (*level_stats)[LevelStatType::COMP_CPU_SEC] = stats.cpu_micros / kMicrosInSec;
112 (*level_stats)[LevelStatType::COMP_COUNT] = stats.count;
113 (*level_stats)[LevelStatType::AVG_SEC] =
114 stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count;
115 (*level_stats)[LevelStatType::KEY_IN] =
116 static_cast<double>(stats.num_input_records);
117 (*level_stats)[LevelStatType::KEY_DROP] =
118 static_cast<double>(stats.num_dropped_records);
119 }
120
PrintLevelStats(char * buf,size_t len,const std::string & name,const std::map<LevelStatType,double> & stat_value)121 void PrintLevelStats(char* buf, size_t len, const std::string& name,
122 const std::map<LevelStatType, double>& stat_value) {
123 snprintf(
124 buf, len,
125 "%4s " /* Level */
126 "%6d/%-3d " /* Files */
127 "%8s " /* Size */
128 "%5.1f " /* Score */
129 "%8.1f " /* Read(GB) */
130 "%7.1f " /* Rn(GB) */
131 "%8.1f " /* Rnp1(GB) */
132 "%9.1f " /* Write(GB) */
133 "%8.1f " /* Wnew(GB) */
134 "%9.1f " /* Moved(GB) */
135 "%5.1f " /* W-Amp */
136 "%8.1f " /* Rd(MB/s) */
137 "%8.1f " /* Wr(MB/s) */
138 "%9.2f " /* Comp(sec) */
139 "%17.2f " /* CompMergeCPU(sec) */
140 "%9d " /* Comp(cnt) */
141 "%8.3f " /* Avg(sec) */
142 "%7s " /* KeyIn */
143 "%6s\n", /* KeyDrop */
144 name.c_str(), static_cast<int>(stat_value.at(LevelStatType::NUM_FILES)),
145 static_cast<int>(stat_value.at(LevelStatType::COMPACTED_FILES)),
146 BytesToHumanString(
147 static_cast<uint64_t>(stat_value.at(LevelStatType::SIZE_BYTES)))
148 .c_str(),
149 stat_value.at(LevelStatType::SCORE),
150 stat_value.at(LevelStatType::READ_GB),
151 stat_value.at(LevelStatType::RN_GB),
152 stat_value.at(LevelStatType::RNP1_GB),
153 stat_value.at(LevelStatType::WRITE_GB),
154 stat_value.at(LevelStatType::W_NEW_GB),
155 stat_value.at(LevelStatType::MOVED_GB),
156 stat_value.at(LevelStatType::WRITE_AMP),
157 stat_value.at(LevelStatType::READ_MBPS),
158 stat_value.at(LevelStatType::WRITE_MBPS),
159 stat_value.at(LevelStatType::COMP_SEC),
160 stat_value.at(LevelStatType::COMP_CPU_SEC),
161 static_cast<int>(stat_value.at(LevelStatType::COMP_COUNT)),
162 stat_value.at(LevelStatType::AVG_SEC),
163 NumberToHumanString(
164 static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_IN)))
165 .c_str(),
166 NumberToHumanString(
167 static_cast<std::int64_t>(stat_value.at(LevelStatType::KEY_DROP)))
168 .c_str());
169 }
170
PrintLevelStats(char * buf,size_t len,const std::string & name,int num_files,int being_compacted,double total_file_size,double score,double w_amp,const InternalStats::CompactionStats & stats)171 void PrintLevelStats(char* buf, size_t len, const std::string& name,
172 int num_files, int being_compacted, double total_file_size,
173 double score, double w_amp,
174 const InternalStats::CompactionStats& stats) {
175 std::map<LevelStatType, double> level_stats;
176 PrepareLevelStats(&level_stats, num_files, being_compacted, total_file_size,
177 score, w_amp, stats);
178 PrintLevelStats(buf, len, name, level_stats);
179 }
180
181 // Assumes that trailing numbers represent an optional argument. This requires
182 // property names to not end with numbers.
GetPropertyNameAndArg(const Slice & property)183 std::pair<Slice, Slice> GetPropertyNameAndArg(const Slice& property) {
184 Slice name = property, arg = property;
185 size_t sfx_len = 0;
186 while (sfx_len < property.size() &&
187 isdigit(property[property.size() - sfx_len - 1])) {
188 ++sfx_len;
189 }
190 name.remove_suffix(sfx_len);
191 arg.remove_prefix(property.size() - sfx_len);
192 return {name, arg};
193 }
194 } // anonymous namespace
195
196 static const std::string rocksdb_prefix = "rocksdb.";
197
198 static const std::string num_files_at_level_prefix = "num-files-at-level";
199 static const std::string compression_ratio_at_level_prefix =
200 "compression-ratio-at-level";
201 static const std::string allstats = "stats";
202 static const std::string sstables = "sstables";
203 static const std::string cfstats = "cfstats";
204 static const std::string cfstats_no_file_histogram =
205 "cfstats-no-file-histogram";
206 static const std::string cf_file_histogram = "cf-file-histogram";
207 static const std::string dbstats = "dbstats";
208 static const std::string levelstats = "levelstats";
209 static const std::string num_immutable_mem_table = "num-immutable-mem-table";
210 static const std::string num_immutable_mem_table_flushed =
211 "num-immutable-mem-table-flushed";
212 static const std::string mem_table_flush_pending = "mem-table-flush-pending";
213 static const std::string compaction_pending = "compaction-pending";
214 static const std::string background_errors = "background-errors";
215 static const std::string cur_size_active_mem_table =
216 "cur-size-active-mem-table";
217 static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables";
218 static const std::string size_all_mem_tables = "size-all-mem-tables";
219 static const std::string num_entries_active_mem_table =
220 "num-entries-active-mem-table";
221 static const std::string num_entries_imm_mem_tables =
222 "num-entries-imm-mem-tables";
223 static const std::string num_deletes_active_mem_table =
224 "num-deletes-active-mem-table";
225 static const std::string num_deletes_imm_mem_tables =
226 "num-deletes-imm-mem-tables";
227 static const std::string estimate_num_keys = "estimate-num-keys";
228 static const std::string estimate_table_readers_mem =
229 "estimate-table-readers-mem";
230 static const std::string is_file_deletions_enabled =
231 "is-file-deletions-enabled";
232 static const std::string num_snapshots = "num-snapshots";
233 static const std::string oldest_snapshot_time = "oldest-snapshot-time";
234 static const std::string oldest_snapshot_sequence = "oldest-snapshot-sequence";
235 static const std::string num_live_versions = "num-live-versions";
236 static const std::string current_version_number =
237 "current-super-version-number";
238 static const std::string estimate_live_data_size = "estimate-live-data-size";
239 static const std::string min_log_number_to_keep_str = "min-log-number-to-keep";
240 static const std::string min_obsolete_sst_number_to_keep_str =
241 "min-obsolete-sst-number-to-keep";
242 static const std::string base_level_str = "base-level";
243 static const std::string total_sst_files_size = "total-sst-files-size";
244 static const std::string live_sst_files_size = "live-sst-files-size";
245 static const std::string estimate_pending_comp_bytes =
246 "estimate-pending-compaction-bytes";
247 static const std::string aggregated_table_properties =
248 "aggregated-table-properties";
249 static const std::string aggregated_table_properties_at_level =
250 aggregated_table_properties + "-at-level";
251 static const std::string num_running_compactions = "num-running-compactions";
252 static const std::string num_running_flushes = "num-running-flushes";
253 static const std::string actual_delayed_write_rate =
254 "actual-delayed-write-rate";
255 static const std::string is_write_stopped = "is-write-stopped";
256 static const std::string estimate_oldest_key_time = "estimate-oldest-key-time";
257 static const std::string block_cache_capacity = "block-cache-capacity";
258 static const std::string block_cache_usage = "block-cache-usage";
259 static const std::string block_cache_pinned_usage = "block-cache-pinned-usage";
260 static const std::string options_statistics = "options-statistics";
261
262 const std::string DB::Properties::kNumFilesAtLevelPrefix =
263 rocksdb_prefix + num_files_at_level_prefix;
264 const std::string DB::Properties::kCompressionRatioAtLevelPrefix =
265 rocksdb_prefix + compression_ratio_at_level_prefix;
266 const std::string DB::Properties::kStats = rocksdb_prefix + allstats;
267 const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables;
268 const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats;
269 const std::string DB::Properties::kCFStatsNoFileHistogram =
270 rocksdb_prefix + cfstats_no_file_histogram;
271 const std::string DB::Properties::kCFFileHistogram =
272 rocksdb_prefix + cf_file_histogram;
273 const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats;
274 const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats;
275 const std::string DB::Properties::kNumImmutableMemTable =
276 rocksdb_prefix + num_immutable_mem_table;
277 const std::string DB::Properties::kNumImmutableMemTableFlushed =
278 rocksdb_prefix + num_immutable_mem_table_flushed;
279 const std::string DB::Properties::kMemTableFlushPending =
280 rocksdb_prefix + mem_table_flush_pending;
281 const std::string DB::Properties::kCompactionPending =
282 rocksdb_prefix + compaction_pending;
283 const std::string DB::Properties::kNumRunningCompactions =
284 rocksdb_prefix + num_running_compactions;
285 const std::string DB::Properties::kNumRunningFlushes =
286 rocksdb_prefix + num_running_flushes;
287 const std::string DB::Properties::kBackgroundErrors =
288 rocksdb_prefix + background_errors;
289 const std::string DB::Properties::kCurSizeActiveMemTable =
290 rocksdb_prefix + cur_size_active_mem_table;
291 const std::string DB::Properties::kCurSizeAllMemTables =
292 rocksdb_prefix + cur_size_all_mem_tables;
293 const std::string DB::Properties::kSizeAllMemTables =
294 rocksdb_prefix + size_all_mem_tables;
295 const std::string DB::Properties::kNumEntriesActiveMemTable =
296 rocksdb_prefix + num_entries_active_mem_table;
297 const std::string DB::Properties::kNumEntriesImmMemTables =
298 rocksdb_prefix + num_entries_imm_mem_tables;
299 const std::string DB::Properties::kNumDeletesActiveMemTable =
300 rocksdb_prefix + num_deletes_active_mem_table;
301 const std::string DB::Properties::kNumDeletesImmMemTables =
302 rocksdb_prefix + num_deletes_imm_mem_tables;
303 const std::string DB::Properties::kEstimateNumKeys =
304 rocksdb_prefix + estimate_num_keys;
305 const std::string DB::Properties::kEstimateTableReadersMem =
306 rocksdb_prefix + estimate_table_readers_mem;
307 const std::string DB::Properties::kIsFileDeletionsEnabled =
308 rocksdb_prefix + is_file_deletions_enabled;
309 const std::string DB::Properties::kNumSnapshots =
310 rocksdb_prefix + num_snapshots;
311 const std::string DB::Properties::kOldestSnapshotTime =
312 rocksdb_prefix + oldest_snapshot_time;
313 const std::string DB::Properties::kOldestSnapshotSequence =
314 rocksdb_prefix + oldest_snapshot_sequence;
315 const std::string DB::Properties::kNumLiveVersions =
316 rocksdb_prefix + num_live_versions;
317 const std::string DB::Properties::kCurrentSuperVersionNumber =
318 rocksdb_prefix + current_version_number;
319 const std::string DB::Properties::kEstimateLiveDataSize =
320 rocksdb_prefix + estimate_live_data_size;
321 const std::string DB::Properties::kMinLogNumberToKeep =
322 rocksdb_prefix + min_log_number_to_keep_str;
323 const std::string DB::Properties::kMinObsoleteSstNumberToKeep =
324 rocksdb_prefix + min_obsolete_sst_number_to_keep_str;
325 const std::string DB::Properties::kTotalSstFilesSize =
326 rocksdb_prefix + total_sst_files_size;
327 const std::string DB::Properties::kLiveSstFilesSize =
328 rocksdb_prefix + live_sst_files_size;
329 const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level_str;
330 const std::string DB::Properties::kEstimatePendingCompactionBytes =
331 rocksdb_prefix + estimate_pending_comp_bytes;
332 const std::string DB::Properties::kAggregatedTableProperties =
333 rocksdb_prefix + aggregated_table_properties;
334 const std::string DB::Properties::kAggregatedTablePropertiesAtLevel =
335 rocksdb_prefix + aggregated_table_properties_at_level;
336 const std::string DB::Properties::kActualDelayedWriteRate =
337 rocksdb_prefix + actual_delayed_write_rate;
338 const std::string DB::Properties::kIsWriteStopped =
339 rocksdb_prefix + is_write_stopped;
340 const std::string DB::Properties::kEstimateOldestKeyTime =
341 rocksdb_prefix + estimate_oldest_key_time;
342 const std::string DB::Properties::kBlockCacheCapacity =
343 rocksdb_prefix + block_cache_capacity;
344 const std::string DB::Properties::kBlockCacheUsage =
345 rocksdb_prefix + block_cache_usage;
346 const std::string DB::Properties::kBlockCachePinnedUsage =
347 rocksdb_prefix + block_cache_pinned_usage;
348 const std::string DB::Properties::kOptionsStatistics =
349 rocksdb_prefix + options_statistics;
350
351 const std::unordered_map<std::string, DBPropertyInfo>
352 InternalStats::ppt_name_to_info = {
353 {DB::Properties::kNumFilesAtLevelPrefix,
354 {false, &InternalStats::HandleNumFilesAtLevel, nullptr, nullptr,
355 nullptr}},
356 {DB::Properties::kCompressionRatioAtLevelPrefix,
357 {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr,
358 nullptr, nullptr}},
359 {DB::Properties::kLevelStats,
360 {false, &InternalStats::HandleLevelStats, nullptr, nullptr, nullptr}},
361 {DB::Properties::kStats,
362 {false, &InternalStats::HandleStats, nullptr, nullptr, nullptr}},
363 {DB::Properties::kCFStats,
364 {false, &InternalStats::HandleCFStats, nullptr,
365 &InternalStats::HandleCFMapStats, nullptr}},
366 {DB::Properties::kCFStatsNoFileHistogram,
367 {false, &InternalStats::HandleCFStatsNoFileHistogram, nullptr, nullptr,
368 nullptr}},
369 {DB::Properties::kCFFileHistogram,
370 {false, &InternalStats::HandleCFFileHistogram, nullptr, nullptr,
371 nullptr}},
372 {DB::Properties::kDBStats,
373 {false, &InternalStats::HandleDBStats, nullptr, nullptr, nullptr}},
374 {DB::Properties::kSSTables,
375 {false, &InternalStats::HandleSsTables, nullptr, nullptr, nullptr}},
376 {DB::Properties::kAggregatedTableProperties,
377 {false, &InternalStats::HandleAggregatedTableProperties, nullptr,
378 nullptr, nullptr}},
379 {DB::Properties::kAggregatedTablePropertiesAtLevel,
380 {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel,
381 nullptr, nullptr, nullptr}},
382 {DB::Properties::kNumImmutableMemTable,
383 {false, nullptr, &InternalStats::HandleNumImmutableMemTable, nullptr,
384 nullptr}},
385 {DB::Properties::kNumImmutableMemTableFlushed,
386 {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed,
387 nullptr, nullptr}},
388 {DB::Properties::kMemTableFlushPending,
389 {false, nullptr, &InternalStats::HandleMemTableFlushPending, nullptr,
390 nullptr}},
391 {DB::Properties::kCompactionPending,
392 {false, nullptr, &InternalStats::HandleCompactionPending, nullptr,
393 nullptr}},
394 {DB::Properties::kBackgroundErrors,
395 {false, nullptr, &InternalStats::HandleBackgroundErrors, nullptr,
396 nullptr}},
397 {DB::Properties::kCurSizeActiveMemTable,
398 {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable, nullptr,
399 nullptr}},
400 {DB::Properties::kCurSizeAllMemTables,
401 {false, nullptr, &InternalStats::HandleCurSizeAllMemTables, nullptr,
402 nullptr}},
403 {DB::Properties::kSizeAllMemTables,
404 {false, nullptr, &InternalStats::HandleSizeAllMemTables, nullptr,
405 nullptr}},
406 {DB::Properties::kNumEntriesActiveMemTable,
407 {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable,
408 nullptr, nullptr}},
409 {DB::Properties::kNumEntriesImmMemTables,
410 {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables, nullptr,
411 nullptr}},
412 {DB::Properties::kNumDeletesActiveMemTable,
413 {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable,
414 nullptr, nullptr}},
415 {DB::Properties::kNumDeletesImmMemTables,
416 {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables, nullptr,
417 nullptr}},
418 {DB::Properties::kEstimateNumKeys,
419 {false, nullptr, &InternalStats::HandleEstimateNumKeys, nullptr,
420 nullptr}},
421 {DB::Properties::kEstimateTableReadersMem,
422 {true, nullptr, &InternalStats::HandleEstimateTableReadersMem, nullptr,
423 nullptr}},
424 {DB::Properties::kIsFileDeletionsEnabled,
425 {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled, nullptr,
426 nullptr}},
427 {DB::Properties::kNumSnapshots,
428 {false, nullptr, &InternalStats::HandleNumSnapshots, nullptr,
429 nullptr}},
430 {DB::Properties::kOldestSnapshotTime,
431 {false, nullptr, &InternalStats::HandleOldestSnapshotTime, nullptr,
432 nullptr}},
433 {DB::Properties::kOldestSnapshotSequence,
434 {false, nullptr, &InternalStats::HandleOldestSnapshotSequence, nullptr,
435 nullptr}},
436 {DB::Properties::kNumLiveVersions,
437 {false, nullptr, &InternalStats::HandleNumLiveVersions, nullptr,
438 nullptr}},
439 {DB::Properties::kCurrentSuperVersionNumber,
440 {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber,
441 nullptr, nullptr}},
442 {DB::Properties::kEstimateLiveDataSize,
443 {true, nullptr, &InternalStats::HandleEstimateLiveDataSize, nullptr,
444 nullptr}},
445 {DB::Properties::kMinLogNumberToKeep,
446 {false, nullptr, &InternalStats::HandleMinLogNumberToKeep, nullptr,
447 nullptr}},
448 {DB::Properties::kMinObsoleteSstNumberToKeep,
449 {false, nullptr, &InternalStats::HandleMinObsoleteSstNumberToKeep,
450 nullptr, nullptr}},
451 {DB::Properties::kBaseLevel,
452 {false, nullptr, &InternalStats::HandleBaseLevel, nullptr, nullptr}},
453 {DB::Properties::kTotalSstFilesSize,
454 {false, nullptr, &InternalStats::HandleTotalSstFilesSize, nullptr,
455 nullptr}},
456 {DB::Properties::kLiveSstFilesSize,
457 {false, nullptr, &InternalStats::HandleLiveSstFilesSize, nullptr,
458 nullptr}},
459 {DB::Properties::kEstimatePendingCompactionBytes,
460 {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes,
461 nullptr, nullptr}},
462 {DB::Properties::kNumRunningFlushes,
463 {false, nullptr, &InternalStats::HandleNumRunningFlushes, nullptr,
464 nullptr}},
465 {DB::Properties::kNumRunningCompactions,
466 {false, nullptr, &InternalStats::HandleNumRunningCompactions, nullptr,
467 nullptr}},
468 {DB::Properties::kActualDelayedWriteRate,
469 {false, nullptr, &InternalStats::HandleActualDelayedWriteRate, nullptr,
470 nullptr}},
471 {DB::Properties::kIsWriteStopped,
472 {false, nullptr, &InternalStats::HandleIsWriteStopped, nullptr,
473 nullptr}},
474 {DB::Properties::kEstimateOldestKeyTime,
475 {false, nullptr, &InternalStats::HandleEstimateOldestKeyTime, nullptr,
476 nullptr}},
477 {DB::Properties::kBlockCacheCapacity,
478 {false, nullptr, &InternalStats::HandleBlockCacheCapacity, nullptr,
479 nullptr}},
480 {DB::Properties::kBlockCacheUsage,
481 {false, nullptr, &InternalStats::HandleBlockCacheUsage, nullptr,
482 nullptr}},
483 {DB::Properties::kBlockCachePinnedUsage,
484 {false, nullptr, &InternalStats::HandleBlockCachePinnedUsage, nullptr,
485 nullptr}},
486 {DB::Properties::kOptionsStatistics,
487 {false, nullptr, nullptr, nullptr,
488 &DBImpl::GetPropertyHandleOptionsStatistics}},
489 };
490
GetPropertyInfo(const Slice & property)491 const DBPropertyInfo* GetPropertyInfo(const Slice& property) {
492 std::string ppt_name = GetPropertyNameAndArg(property).first.ToString();
493 auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name);
494 if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) {
495 return nullptr;
496 }
497 return &ppt_info_iter->second;
498 }
499
GetStringProperty(const DBPropertyInfo & property_info,const Slice & property,std::string * value)500 bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info,
501 const Slice& property,
502 std::string* value) {
503 assert(value != nullptr);
504 assert(property_info.handle_string != nullptr);
505 Slice arg = GetPropertyNameAndArg(property).second;
506 return (this->*(property_info.handle_string))(value, arg);
507 }
508
GetMapProperty(const DBPropertyInfo & property_info,const Slice &,std::map<std::string,std::string> * value)509 bool InternalStats::GetMapProperty(const DBPropertyInfo& property_info,
510 const Slice& /*property*/,
511 std::map<std::string, std::string>* value) {
512 assert(value != nullptr);
513 assert(property_info.handle_map != nullptr);
514 return (this->*(property_info.handle_map))(value);
515 }
516
GetIntProperty(const DBPropertyInfo & property_info,uint64_t * value,DBImpl * db)517 bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info,
518 uint64_t* value, DBImpl* db) {
519 assert(value != nullptr);
520 assert(property_info.handle_int != nullptr &&
521 !property_info.need_out_of_mutex);
522 db->mutex_.AssertHeld();
523 return (this->*(property_info.handle_int))(value, db, nullptr /* version */);
524 }
525
GetIntPropertyOutOfMutex(const DBPropertyInfo & property_info,Version * version,uint64_t * value)526 bool InternalStats::GetIntPropertyOutOfMutex(
527 const DBPropertyInfo& property_info, Version* version, uint64_t* value) {
528 assert(value != nullptr);
529 assert(property_info.handle_int != nullptr &&
530 property_info.need_out_of_mutex);
531 return (this->*(property_info.handle_int))(value, nullptr /* db */, version);
532 }
533
HandleNumFilesAtLevel(std::string * value,Slice suffix)534 bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) {
535 uint64_t level;
536 const auto* vstorage = cfd_->current()->storage_info();
537 bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
538 if (!ok || static_cast<int>(level) >= number_levels_) {
539 return false;
540 } else {
541 char buf[100];
542 snprintf(buf, sizeof(buf), "%d",
543 vstorage->NumLevelFiles(static_cast<int>(level)));
544 *value = buf;
545 return true;
546 }
547 }
548
HandleCompressionRatioAtLevelPrefix(std::string * value,Slice suffix)549 bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value,
550 Slice suffix) {
551 uint64_t level;
552 const auto* vstorage = cfd_->current()->storage_info();
553 bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
554 if (!ok || level >= static_cast<uint64_t>(number_levels_)) {
555 return false;
556 }
557 *value = ToString(
558 vstorage->GetEstimatedCompressionRatioAtLevel(static_cast<int>(level)));
559 return true;
560 }
561
HandleLevelStats(std::string * value,Slice)562 bool InternalStats::HandleLevelStats(std::string* value, Slice /*suffix*/) {
563 char buf[1000];
564 const auto* vstorage = cfd_->current()->storage_info();
565 snprintf(buf, sizeof(buf),
566 "Level Files Size(MB)\n"
567 "--------------------\n");
568 value->append(buf);
569
570 for (int level = 0; level < number_levels_; level++) {
571 snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level,
572 vstorage->NumLevelFiles(level),
573 vstorage->NumLevelBytes(level) / kMB);
574 value->append(buf);
575 }
576 return true;
577 }
578
HandleStats(std::string * value,Slice suffix)579 bool InternalStats::HandleStats(std::string* value, Slice suffix) {
580 if (!HandleCFStats(value, suffix)) {
581 return false;
582 }
583 if (!HandleDBStats(value, suffix)) {
584 return false;
585 }
586 return true;
587 }
588
HandleCFMapStats(std::map<std::string,std::string> * cf_stats)589 bool InternalStats::HandleCFMapStats(
590 std::map<std::string, std::string>* cf_stats) {
591 DumpCFMapStats(cf_stats);
592 return true;
593 }
594
HandleCFStats(std::string * value,Slice)595 bool InternalStats::HandleCFStats(std::string* value, Slice /*suffix*/) {
596 DumpCFStats(value);
597 return true;
598 }
599
HandleCFStatsNoFileHistogram(std::string * value,Slice)600 bool InternalStats::HandleCFStatsNoFileHistogram(std::string* value,
601 Slice /*suffix*/) {
602 DumpCFStatsNoFileHistogram(value);
603 return true;
604 }
605
HandleCFFileHistogram(std::string * value,Slice)606 bool InternalStats::HandleCFFileHistogram(std::string* value,
607 Slice /*suffix*/) {
608 DumpCFFileHistogram(value);
609 return true;
610 }
611
HandleDBStats(std::string * value,Slice)612 bool InternalStats::HandleDBStats(std::string* value, Slice /*suffix*/) {
613 DumpDBStats(value);
614 return true;
615 }
616
HandleSsTables(std::string * value,Slice)617 bool InternalStats::HandleSsTables(std::string* value, Slice /*suffix*/) {
618 auto* current = cfd_->current();
619 *value = current->DebugString(true, true);
620 return true;
621 }
622
HandleAggregatedTableProperties(std::string * value,Slice)623 bool InternalStats::HandleAggregatedTableProperties(std::string* value,
624 Slice /*suffix*/) {
625 std::shared_ptr<const TableProperties> tp;
626 auto s = cfd_->current()->GetAggregatedTableProperties(&tp);
627 if (!s.ok()) {
628 return false;
629 }
630 *value = tp->ToString();
631 return true;
632 }
633
HandleAggregatedTablePropertiesAtLevel(std::string * value,Slice suffix)634 bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value,
635 Slice suffix) {
636 uint64_t level;
637 bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty();
638 if (!ok || static_cast<int>(level) >= number_levels_) {
639 return false;
640 }
641 std::shared_ptr<const TableProperties> tp;
642 auto s = cfd_->current()->GetAggregatedTableProperties(
643 &tp, static_cast<int>(level));
644 if (!s.ok()) {
645 return false;
646 }
647 *value = tp->ToString();
648 return true;
649 }
650
HandleNumImmutableMemTable(uint64_t * value,DBImpl *,Version *)651 bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* /*db*/,
652 Version* /*version*/) {
653 *value = cfd_->imm()->NumNotFlushed();
654 return true;
655 }
656
HandleNumImmutableMemTableFlushed(uint64_t * value,DBImpl *,Version *)657 bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value,
658 DBImpl* /*db*/,
659 Version* /*version*/) {
660 *value = cfd_->imm()->NumFlushed();
661 return true;
662 }
663
HandleMemTableFlushPending(uint64_t * value,DBImpl *,Version *)664 bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* /*db*/,
665 Version* /*version*/) {
666 *value = (cfd_->imm()->IsFlushPending() ? 1 : 0);
667 return true;
668 }
669
HandleNumRunningFlushes(uint64_t * value,DBImpl * db,Version *)670 bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db,
671 Version* /*version*/) {
672 *value = db->num_running_flushes();
673 return true;
674 }
675
HandleCompactionPending(uint64_t * value,DBImpl *,Version *)676 bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* /*db*/,
677 Version* /*version*/) {
678 // 1 if the system already determines at least one compaction is needed.
679 // 0 otherwise,
680 const auto* vstorage = cfd_->current()->storage_info();
681 *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0);
682 return true;
683 }
684
HandleNumRunningCompactions(uint64_t * value,DBImpl * db,Version *)685 bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db,
686 Version* /*version*/) {
687 *value = db->num_running_compactions_;
688 return true;
689 }
690
HandleBackgroundErrors(uint64_t * value,DBImpl *,Version *)691 bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* /*db*/,
692 Version* /*version*/) {
693 // Accumulated number of errors in background flushes or compactions.
694 *value = GetBackgroundErrorCount();
695 return true;
696 }
697
HandleCurSizeActiveMemTable(uint64_t * value,DBImpl *,Version *)698 bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* /*db*/,
699 Version* /*version*/) {
700 // Current size of the active memtable
701 *value = cfd_->mem()->ApproximateMemoryUsage();
702 return true;
703 }
704
HandleCurSizeAllMemTables(uint64_t * value,DBImpl *,Version *)705 bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
706 Version* /*version*/) {
707 // Current size of the active memtable + immutable memtables
708 *value = cfd_->mem()->ApproximateMemoryUsage() +
709 cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage();
710 return true;
711 }
712
HandleSizeAllMemTables(uint64_t * value,DBImpl *,Version *)713 bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* /*db*/,
714 Version* /*version*/) {
715 *value = cfd_->mem()->ApproximateMemoryUsage() +
716 cfd_->imm()->ApproximateMemoryUsage();
717 return true;
718 }
719
HandleNumEntriesActiveMemTable(uint64_t * value,DBImpl *,Version *)720 bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value,
721 DBImpl* /*db*/,
722 Version* /*version*/) {
723 // Current number of entires in the active memtable
724 *value = cfd_->mem()->num_entries();
725 return true;
726 }
727
HandleNumEntriesImmMemTables(uint64_t * value,DBImpl *,Version *)728 bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value,
729 DBImpl* /*db*/,
730 Version* /*version*/) {
731 // Current number of entries in the immutable memtables
732 *value = cfd_->imm()->current()->GetTotalNumEntries();
733 return true;
734 }
735
HandleNumDeletesActiveMemTable(uint64_t * value,DBImpl *,Version *)736 bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value,
737 DBImpl* /*db*/,
738 Version* /*version*/) {
739 // Current number of entires in the active memtable
740 *value = cfd_->mem()->num_deletes();
741 return true;
742 }
743
HandleNumDeletesImmMemTables(uint64_t * value,DBImpl *,Version *)744 bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value,
745 DBImpl* /*db*/,
746 Version* /*version*/) {
747 // Current number of entries in the immutable memtables
748 *value = cfd_->imm()->current()->GetTotalNumDeletes();
749 return true;
750 }
751
HandleEstimateNumKeys(uint64_t * value,DBImpl *,Version *)752 bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* /*db*/,
753 Version* /*version*/) {
754 // Estimate number of entries in the column family:
755 // Use estimated entries in tables + total entries in memtables.
756 const auto* vstorage = cfd_->current()->storage_info();
757 uint64_t estimate_keys = cfd_->mem()->num_entries() +
758 cfd_->imm()->current()->GetTotalNumEntries() +
759 vstorage->GetEstimatedActiveKeys();
760 uint64_t estimate_deletes =
761 cfd_->mem()->num_deletes() + cfd_->imm()->current()->GetTotalNumDeletes();
762 *value = estimate_keys > estimate_deletes * 2
763 ? estimate_keys - (estimate_deletes * 2)
764 : 0;
765 return true;
766 }
767
HandleNumSnapshots(uint64_t * value,DBImpl * db,Version *)768 bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db,
769 Version* /*version*/) {
770 *value = db->snapshots().count();
771 return true;
772 }
773
HandleOldestSnapshotTime(uint64_t * value,DBImpl * db,Version *)774 bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db,
775 Version* /*version*/) {
776 *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotTime());
777 return true;
778 }
779
HandleOldestSnapshotSequence(uint64_t * value,DBImpl * db,Version *)780 bool InternalStats::HandleOldestSnapshotSequence(uint64_t* value, DBImpl* db,
781 Version* /*version*/) {
782 *value = static_cast<uint64_t>(db->snapshots().GetOldestSnapshotSequence());
783 return true;
784 }
785
HandleNumLiveVersions(uint64_t * value,DBImpl *,Version *)786 bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* /*db*/,
787 Version* /*version*/) {
788 *value = cfd_->GetNumLiveVersions();
789 return true;
790 }
791
HandleCurrentSuperVersionNumber(uint64_t * value,DBImpl *,Version *)792 bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value,
793 DBImpl* /*db*/,
794 Version* /*version*/) {
795 *value = cfd_->GetSuperVersionNumber();
796 return true;
797 }
798
HandleIsFileDeletionsEnabled(uint64_t * value,DBImpl * db,Version *)799 bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db,
800 Version* /*version*/) {
801 *value = db->IsFileDeletionsEnabled();
802 return true;
803 }
804
HandleBaseLevel(uint64_t * value,DBImpl *,Version *)805 bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* /*db*/,
806 Version* /*version*/) {
807 const auto* vstorage = cfd_->current()->storage_info();
808 *value = vstorage->base_level();
809 return true;
810 }
811
HandleTotalSstFilesSize(uint64_t * value,DBImpl *,Version *)812 bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* /*db*/,
813 Version* /*version*/) {
814 *value = cfd_->GetTotalSstFilesSize();
815 return true;
816 }
817
HandleLiveSstFilesSize(uint64_t * value,DBImpl *,Version *)818 bool InternalStats::HandleLiveSstFilesSize(uint64_t* value, DBImpl* /*db*/,
819 Version* /*version*/) {
820 *value = cfd_->GetLiveSstFilesSize();
821 return true;
822 }
823
HandleEstimatePendingCompactionBytes(uint64_t * value,DBImpl *,Version *)824 bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value,
825 DBImpl* /*db*/,
826 Version* /*version*/) {
827 const auto* vstorage = cfd_->current()->storage_info();
828 *value = vstorage->estimated_compaction_needed_bytes();
829 return true;
830 }
831
HandleEstimateTableReadersMem(uint64_t * value,DBImpl *,Version * version)832 bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value,
833 DBImpl* /*db*/,
834 Version* version) {
835 *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders();
836 return true;
837 }
838
HandleEstimateLiveDataSize(uint64_t * value,DBImpl *,Version * version)839 bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* /*db*/,
840 Version* version) {
841 const auto* vstorage = version->storage_info();
842 *value = vstorage->EstimateLiveDataSize();
843 return true;
844 }
845
HandleMinLogNumberToKeep(uint64_t * value,DBImpl * db,Version *)846 bool InternalStats::HandleMinLogNumberToKeep(uint64_t* value, DBImpl* db,
847 Version* /*version*/) {
848 *value = db->MinLogNumberToKeep();
849 return true;
850 }
851
HandleMinObsoleteSstNumberToKeep(uint64_t * value,DBImpl * db,Version *)852 bool InternalStats::HandleMinObsoleteSstNumberToKeep(uint64_t* value,
853 DBImpl* db,
854 Version* /*version*/) {
855 *value = db->MinObsoleteSstNumberToKeep();
856 return true;
857 }
858
HandleActualDelayedWriteRate(uint64_t * value,DBImpl * db,Version *)859 bool InternalStats::HandleActualDelayedWriteRate(uint64_t* value, DBImpl* db,
860 Version* /*version*/) {
861 const WriteController& wc = db->write_controller();
862 if (!wc.NeedsDelay()) {
863 *value = 0;
864 } else {
865 *value = wc.delayed_write_rate();
866 }
867 return true;
868 }
869
HandleIsWriteStopped(uint64_t * value,DBImpl * db,Version *)870 bool InternalStats::HandleIsWriteStopped(uint64_t* value, DBImpl* db,
871 Version* /*version*/) {
872 *value = db->write_controller().IsStopped() ? 1 : 0;
873 return true;
874 }
875
HandleEstimateOldestKeyTime(uint64_t * value,DBImpl *,Version *)876 bool InternalStats::HandleEstimateOldestKeyTime(uint64_t* value, DBImpl* /*db*/,
877 Version* /*version*/) {
878 // TODO(yiwu): The property is currently available for fifo compaction
879 // with allow_compaction = false. This is because we don't propagate
880 // oldest_key_time on compaction.
881 if (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO ||
882 cfd_->GetCurrentMutableCFOptions()
883 ->compaction_options_fifo.allow_compaction) {
884 return false;
885 }
886
887 TablePropertiesCollection collection;
888 auto s = cfd_->current()->GetPropertiesOfAllTables(&collection);
889 if (!s.ok()) {
890 return false;
891 }
892 *value = std::numeric_limits<uint64_t>::max();
893 for (auto& p : collection) {
894 *value = std::min(*value, p.second->oldest_key_time);
895 if (*value == 0) {
896 break;
897 }
898 }
899 if (*value > 0) {
900 *value = std::min({cfd_->mem()->ApproximateOldestKeyTime(),
901 cfd_->imm()->ApproximateOldestKeyTime(), *value});
902 }
903 return *value > 0 && *value < std::numeric_limits<uint64_t>::max();
904 }
905
HandleBlockCacheStat(Cache ** block_cache)906 bool InternalStats::HandleBlockCacheStat(Cache** block_cache) {
907 assert(block_cache != nullptr);
908 auto* table_factory = cfd_->ioptions()->table_factory;
909 assert(table_factory != nullptr);
910 if (BlockBasedTableFactory::kName != table_factory->Name()) {
911 return false;
912 }
913 auto* table_options =
914 reinterpret_cast<BlockBasedTableOptions*>(table_factory->GetOptions());
915 if (table_options == nullptr) {
916 return false;
917 }
918 *block_cache = table_options->block_cache.get();
919 if (table_options->no_block_cache || *block_cache == nullptr) {
920 return false;
921 }
922 return true;
923 }
924
HandleBlockCacheCapacity(uint64_t * value,DBImpl *,Version *)925 bool InternalStats::HandleBlockCacheCapacity(uint64_t* value, DBImpl* /*db*/,
926 Version* /*version*/) {
927 Cache* block_cache;
928 bool ok = HandleBlockCacheStat(&block_cache);
929 if (!ok) {
930 return false;
931 }
932 *value = static_cast<uint64_t>(block_cache->GetCapacity());
933 return true;
934 }
935
HandleBlockCacheUsage(uint64_t * value,DBImpl *,Version *)936 bool InternalStats::HandleBlockCacheUsage(uint64_t* value, DBImpl* /*db*/,
937 Version* /*version*/) {
938 Cache* block_cache;
939 bool ok = HandleBlockCacheStat(&block_cache);
940 if (!ok) {
941 return false;
942 }
943 *value = static_cast<uint64_t>(block_cache->GetUsage());
944 return true;
945 }
946
HandleBlockCachePinnedUsage(uint64_t * value,DBImpl *,Version *)947 bool InternalStats::HandleBlockCachePinnedUsage(uint64_t* value, DBImpl* /*db*/,
948 Version* /*version*/) {
949 Cache* block_cache;
950 bool ok = HandleBlockCacheStat(&block_cache);
951 if (!ok) {
952 return false;
953 }
954 *value = static_cast<uint64_t>(block_cache->GetPinnedUsage());
955 return true;
956 }
957
DumpDBStats(std::string * value)958 void InternalStats::DumpDBStats(std::string* value) {
959 char buf[1000];
960 // DB-level stats, only available from default column family
961 double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
962 double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up;
963 snprintf(buf, sizeof(buf),
964 "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n",
965 seconds_up, interval_seconds_up);
966 value->append(buf);
967 // Cumulative
968 uint64_t user_bytes_written =
969 GetDBStats(InternalStats::kIntStatsBytesWritten);
970 uint64_t num_keys_written =
971 GetDBStats(InternalStats::kIntStatsNumKeysWritten);
972 uint64_t write_other = GetDBStats(InternalStats::kIntStatsWriteDoneByOther);
973 uint64_t write_self = GetDBStats(InternalStats::kIntStatsWriteDoneBySelf);
974 uint64_t wal_bytes = GetDBStats(InternalStats::kIntStatsWalFileBytes);
975 uint64_t wal_synced = GetDBStats(InternalStats::kIntStatsWalFileSynced);
976 uint64_t write_with_wal = GetDBStats(InternalStats::kIntStatsWriteWithWal);
977 uint64_t write_stall_micros =
978 GetDBStats(InternalStats::kIntStatsWriteStallMicros);
979
980 const int kHumanMicrosLen = 32;
981 char human_micros[kHumanMicrosLen];
982
983 // Data
984 // writes: total number of write requests.
985 // keys: total number of key updates issued by all the write requests
986 // commit groups: number of group commits issued to the DB. Each group can
987 // contain one or more writes.
988 // so writes/keys is the average number of put in multi-put or put
989 // writes/groups is the average group commit size.
990 //
991 // The format is the same for interval stats.
992 snprintf(buf, sizeof(buf),
993 "Cumulative writes: %s writes, %s keys, %s commit groups, "
994 "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n",
995 NumberToHumanString(write_other + write_self).c_str(),
996 NumberToHumanString(num_keys_written).c_str(),
997 NumberToHumanString(write_self).c_str(),
998 (write_other + write_self) / static_cast<double>(write_self + 1),
999 user_bytes_written / kGB, user_bytes_written / kMB / seconds_up);
1000 value->append(buf);
1001 // WAL
1002 snprintf(buf, sizeof(buf),
1003 "Cumulative WAL: %s writes, %s syncs, "
1004 "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n",
1005 NumberToHumanString(write_with_wal).c_str(),
1006 NumberToHumanString(wal_synced).c_str(),
1007 write_with_wal / static_cast<double>(wal_synced + 1),
1008 wal_bytes / kGB, wal_bytes / kMB / seconds_up);
1009 value->append(buf);
1010 // Stall
1011 AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true);
1012 snprintf(buf, sizeof(buf), "Cumulative stall: %s, %.1f percent\n",
1013 human_micros,
1014 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
1015 write_stall_micros / 10000.0 / std::max(seconds_up, 0.001));
1016 value->append(buf);
1017
1018 // Interval
1019 uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other;
1020 uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self;
1021 uint64_t interval_num_keys_written =
1022 num_keys_written - db_stats_snapshot_.num_keys_written;
1023 snprintf(
1024 buf, sizeof(buf),
1025 "Interval writes: %s writes, %s keys, %s commit groups, "
1026 "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n",
1027 NumberToHumanString(interval_write_other + interval_write_self).c_str(),
1028 NumberToHumanString(interval_num_keys_written).c_str(),
1029 NumberToHumanString(interval_write_self).c_str(),
1030 static_cast<double>(interval_write_other + interval_write_self) /
1031 (interval_write_self + 1),
1032 (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB,
1033 (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB /
1034 std::max(interval_seconds_up, 0.001)),
1035 value->append(buf);
1036
1037 uint64_t interval_write_with_wal =
1038 write_with_wal - db_stats_snapshot_.write_with_wal;
1039 uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced;
1040 uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes;
1041
1042 snprintf(
1043 buf, sizeof(buf),
1044 "Interval WAL: %s writes, %s syncs, "
1045 "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n",
1046 NumberToHumanString(interval_write_with_wal).c_str(),
1047 NumberToHumanString(interval_wal_synced).c_str(),
1048 interval_write_with_wal / static_cast<double>(interval_wal_synced + 1),
1049 interval_wal_bytes / kGB,
1050 interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001));
1051 value->append(buf);
1052
1053 // Stall
1054 AppendHumanMicros(write_stall_micros - db_stats_snapshot_.write_stall_micros,
1055 human_micros, kHumanMicrosLen, true);
1056 snprintf(buf, sizeof(buf), "Interval stall: %s, %.1f percent\n", human_micros,
1057 // 10000 = divide by 1M to get secs, then multiply by 100 for pct
1058 (write_stall_micros - db_stats_snapshot_.write_stall_micros) /
1059 10000.0 / std::max(interval_seconds_up, 0.001));
1060 value->append(buf);
1061
1062 db_stats_snapshot_.seconds_up = seconds_up;
1063 db_stats_snapshot_.ingest_bytes = user_bytes_written;
1064 db_stats_snapshot_.write_other = write_other;
1065 db_stats_snapshot_.write_self = write_self;
1066 db_stats_snapshot_.num_keys_written = num_keys_written;
1067 db_stats_snapshot_.wal_bytes = wal_bytes;
1068 db_stats_snapshot_.wal_synced = wal_synced;
1069 db_stats_snapshot_.write_with_wal = write_with_wal;
1070 db_stats_snapshot_.write_stall_micros = write_stall_micros;
1071 }
1072
1073 /**
1074 * Dump Compaction Level stats to a map of stat name with "compaction." prefix
1075 * to value in double as string. The level in stat name is represented with
1076 * a prefix "Lx" where "x" is the level number. A special level "Sum"
1077 * represents the sum of a stat for all levels.
1078 * The result also contains IO stall counters which keys start with "io_stalls."
1079 * and values represent uint64 encoded as strings.
1080 */
DumpCFMapStats(std::map<std::string,std::string> * cf_stats)1081 void InternalStats::DumpCFMapStats(
1082 std::map<std::string, std::string>* cf_stats) {
1083 CompactionStats compaction_stats_sum;
1084 std::map<int, std::map<LevelStatType, double>> levels_stats;
1085 DumpCFMapStats(&levels_stats, &compaction_stats_sum);
1086 for (auto const& level_ent : levels_stats) {
1087 auto level_str =
1088 level_ent.first == -1 ? "Sum" : "L" + ToString(level_ent.first);
1089 for (auto const& stat_ent : level_ent.second) {
1090 auto stat_type = stat_ent.first;
1091 auto key_str =
1092 "compaction." + level_str + "." +
1093 InternalStats::compaction_level_stats.at(stat_type).property_name;
1094 (*cf_stats)[key_str] = std::to_string(stat_ent.second);
1095 }
1096 }
1097
1098 DumpCFMapStatsIOStalls(cf_stats);
1099 }
1100
DumpCFMapStats(std::map<int,std::map<LevelStatType,double>> * levels_stats,CompactionStats * compaction_stats_sum)1101 void InternalStats::DumpCFMapStats(
1102 std::map<int, std::map<LevelStatType, double>>* levels_stats,
1103 CompactionStats* compaction_stats_sum) {
1104 const VersionStorageInfo* vstorage = cfd_->current()->storage_info();
1105
1106 int num_levels_to_check =
1107 (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO)
1108 ? vstorage->num_levels() - 1
1109 : 1;
1110
1111 // Compaction scores are sorted based on its value. Restore them to the
1112 // level order
1113 std::vector<double> compaction_score(number_levels_, 0);
1114 for (int i = 0; i < num_levels_to_check; ++i) {
1115 compaction_score[vstorage->CompactionScoreLevel(i)] =
1116 vstorage->CompactionScore(i);
1117 }
1118 // Count # of files being compacted for each level
1119 std::vector<int> files_being_compacted(number_levels_, 0);
1120 for (int level = 0; level < number_levels_; ++level) {
1121 for (auto* f : vstorage->LevelFiles(level)) {
1122 if (f->being_compacted) {
1123 ++files_being_compacted[level];
1124 }
1125 }
1126 }
1127
1128 int total_files = 0;
1129 int total_files_being_compacted = 0;
1130 double total_file_size = 0;
1131 uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
1132 uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
1133 uint64_t curr_ingest = flush_ingest + add_file_ingest;
1134 for (int level = 0; level < number_levels_; level++) {
1135 int files = vstorage->NumLevelFiles(level);
1136 total_files += files;
1137 total_files_being_compacted += files_being_compacted[level];
1138 if (comp_stats_[level].micros > 0 || files > 0) {
1139 compaction_stats_sum->Add(comp_stats_[level]);
1140 total_file_size += vstorage->NumLevelBytes(level);
1141 uint64_t input_bytes;
1142 if (level == 0) {
1143 input_bytes = curr_ingest;
1144 } else {
1145 input_bytes = comp_stats_[level].bytes_read_non_output_levels;
1146 }
1147 double w_amp =
1148 (input_bytes == 0)
1149 ? 0.0
1150 : static_cast<double>(comp_stats_[level].bytes_written) /
1151 input_bytes;
1152 std::map<LevelStatType, double> level_stats;
1153 PrepareLevelStats(&level_stats, files, files_being_compacted[level],
1154 static_cast<double>(vstorage->NumLevelBytes(level)),
1155 compaction_score[level], w_amp, comp_stats_[level]);
1156 (*levels_stats)[level] = level_stats;
1157 }
1158 }
1159 // Cumulative summary
1160 double w_amp = compaction_stats_sum->bytes_written /
1161 static_cast<double>(curr_ingest + 1);
1162 // Stats summary across levels
1163 std::map<LevelStatType, double> sum_stats;
1164 PrepareLevelStats(&sum_stats, total_files, total_files_being_compacted,
1165 total_file_size, 0, w_amp, *compaction_stats_sum);
1166 (*levels_stats)[-1] = sum_stats; // -1 is for the Sum level
1167 }
1168
DumpCFMapStatsByPriority(std::map<int,std::map<LevelStatType,double>> * priorities_stats)1169 void InternalStats::DumpCFMapStatsByPriority(
1170 std::map<int, std::map<LevelStatType, double>>* priorities_stats) {
1171 for (size_t priority = 0; priority < comp_stats_by_pri_.size(); priority++) {
1172 if (comp_stats_by_pri_[priority].micros > 0) {
1173 std::map<LevelStatType, double> priority_stats;
1174 PrepareLevelStats(&priority_stats, 0 /* num_files */,
1175 0 /* being_compacted */, 0 /* total_file_size */,
1176 0 /* compaction_score */, 0 /* w_amp */,
1177 comp_stats_by_pri_[priority]);
1178 (*priorities_stats)[static_cast<int>(priority)] = priority_stats;
1179 }
1180 }
1181 }
1182
DumpCFMapStatsIOStalls(std::map<std::string,std::string> * cf_stats)1183 void InternalStats::DumpCFMapStatsIOStalls(
1184 std::map<std::string, std::string>* cf_stats) {
1185 (*cf_stats)["io_stalls.level0_slowdown"] =
1186 std::to_string(cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS]);
1187 (*cf_stats)["io_stalls.level0_slowdown_with_compaction"] =
1188 std::to_string(cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS]);
1189 (*cf_stats)["io_stalls.level0_numfiles"] =
1190 std::to_string(cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS]);
1191 (*cf_stats)["io_stalls.level0_numfiles_with_compaction"] =
1192 std::to_string(cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_STOPS]);
1193 (*cf_stats)["io_stalls.stop_for_pending_compaction_bytes"] =
1194 std::to_string(cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS]);
1195 (*cf_stats)["io_stalls.slowdown_for_pending_compaction_bytes"] =
1196 std::to_string(cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS]);
1197 (*cf_stats)["io_stalls.memtable_compaction"] =
1198 std::to_string(cf_stats_count_[MEMTABLE_LIMIT_STOPS]);
1199 (*cf_stats)["io_stalls.memtable_slowdown"] =
1200 std::to_string(cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS]);
1201
1202 uint64_t total_stop = cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS] +
1203 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS] +
1204 cf_stats_count_[MEMTABLE_LIMIT_STOPS];
1205
1206 uint64_t total_slowdown =
1207 cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS] +
1208 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS] +
1209 cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS];
1210
1211 (*cf_stats)["io_stalls.total_stop"] = std::to_string(total_stop);
1212 (*cf_stats)["io_stalls.total_slowdown"] = std::to_string(total_slowdown);
1213 }
1214
DumpCFStats(std::string * value)1215 void InternalStats::DumpCFStats(std::string* value) {
1216 DumpCFStatsNoFileHistogram(value);
1217 DumpCFFileHistogram(value);
1218 }
1219
DumpCFStatsNoFileHistogram(std::string * value)1220 void InternalStats::DumpCFStatsNoFileHistogram(std::string* value) {
1221 char buf[2000];
1222 // Per-ColumnFamily stats
1223 PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName(), "Level");
1224 value->append(buf);
1225
1226 // Print stats for each level
1227 std::map<int, std::map<LevelStatType, double>> levels_stats;
1228 CompactionStats compaction_stats_sum;
1229 DumpCFMapStats(&levels_stats, &compaction_stats_sum);
1230 for (int l = 0; l < number_levels_; ++l) {
1231 if (levels_stats.find(l) != levels_stats.end()) {
1232 PrintLevelStats(buf, sizeof(buf), "L" + ToString(l), levels_stats[l]);
1233 value->append(buf);
1234 }
1235 }
1236
1237 // Print sum of level stats
1238 PrintLevelStats(buf, sizeof(buf), "Sum", levels_stats[-1]);
1239 value->append(buf);
1240
1241 uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED];
1242 uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE];
1243 uint64_t ingest_files_addfile = cf_stats_value_[INGESTED_NUM_FILES_TOTAL];
1244 uint64_t ingest_l0_files_addfile =
1245 cf_stats_value_[INGESTED_LEVEL0_NUM_FILES_TOTAL];
1246 uint64_t ingest_keys_addfile = cf_stats_value_[INGESTED_NUM_KEYS_TOTAL];
1247 // Cumulative summary
1248 uint64_t total_stall_count =
1249 cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS] +
1250 cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS] +
1251 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS] +
1252 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS] +
1253 cf_stats_count_[MEMTABLE_LIMIT_STOPS] +
1254 cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS];
1255 // Interval summary
1256 uint64_t interval_flush_ingest =
1257 flush_ingest - cf_stats_snapshot_.ingest_bytes_flush;
1258 uint64_t interval_add_file_inget =
1259 add_file_ingest - cf_stats_snapshot_.ingest_bytes_addfile;
1260 uint64_t interval_ingest =
1261 interval_flush_ingest + interval_add_file_inget + 1;
1262 CompactionStats interval_stats(compaction_stats_sum);
1263 interval_stats.Subtract(cf_stats_snapshot_.comp_stats);
1264 double w_amp =
1265 interval_stats.bytes_written / static_cast<double>(interval_ingest);
1266 PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats);
1267 value->append(buf);
1268
1269 PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName(), "Priority");
1270 value->append(buf);
1271 std::map<int, std::map<LevelStatType, double>> priorities_stats;
1272 DumpCFMapStatsByPriority(&priorities_stats);
1273 for (size_t priority = 0; priority < comp_stats_by_pri_.size(); ++priority) {
1274 if (priorities_stats.find(static_cast<int>(priority)) !=
1275 priorities_stats.end()) {
1276 PrintLevelStats(
1277 buf, sizeof(buf),
1278 Env::PriorityToString(static_cast<Env::Priority>(priority)),
1279 priorities_stats[static_cast<int>(priority)]);
1280 value->append(buf);
1281 }
1282 }
1283
1284 double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec;
1285 double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up;
1286 snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
1287 seconds_up, interval_seconds_up);
1288 value->append(buf);
1289 snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n",
1290 flush_ingest / kGB, interval_flush_ingest / kGB);
1291 value->append(buf);
1292 snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n",
1293 add_file_ingest / kGB, interval_add_file_inget / kGB);
1294 value->append(buf);
1295
1296 uint64_t interval_ingest_files_addfile =
1297 ingest_files_addfile - cf_stats_snapshot_.ingest_files_addfile;
1298 snprintf(buf, sizeof(buf),
1299 "AddFile(Total Files): cumulative %" PRIu64 ", interval %" PRIu64
1300 "\n",
1301 ingest_files_addfile, interval_ingest_files_addfile);
1302 value->append(buf);
1303
1304 uint64_t interval_ingest_l0_files_addfile =
1305 ingest_l0_files_addfile - cf_stats_snapshot_.ingest_l0_files_addfile;
1306 snprintf(buf, sizeof(buf),
1307 "AddFile(L0 Files): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
1308 ingest_l0_files_addfile, interval_ingest_l0_files_addfile);
1309 value->append(buf);
1310
1311 uint64_t interval_ingest_keys_addfile =
1312 ingest_keys_addfile - cf_stats_snapshot_.ingest_keys_addfile;
1313 snprintf(buf, sizeof(buf),
1314 "AddFile(Keys): cumulative %" PRIu64 ", interval %" PRIu64 "\n",
1315 ingest_keys_addfile, interval_ingest_keys_addfile);
1316 value->append(buf);
1317
1318 // Compact
1319 uint64_t compact_bytes_read = 0;
1320 uint64_t compact_bytes_write = 0;
1321 uint64_t compact_micros = 0;
1322 for (int level = 0; level < number_levels_; level++) {
1323 compact_bytes_read += comp_stats_[level].bytes_read_output_level +
1324 comp_stats_[level].bytes_read_non_output_levels;
1325 compact_bytes_write += comp_stats_[level].bytes_written;
1326 compact_micros += comp_stats_[level].micros;
1327 }
1328
1329 snprintf(buf, sizeof(buf),
1330 "Cumulative compaction: %.2f GB write, %.2f MB/s write, "
1331 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1332 compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up,
1333 compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up,
1334 compact_micros / kMicrosInSec);
1335 value->append(buf);
1336
1337 // Compaction interval
1338 uint64_t interval_compact_bytes_write =
1339 compact_bytes_write - cf_stats_snapshot_.compact_bytes_write;
1340 uint64_t interval_compact_bytes_read =
1341 compact_bytes_read - cf_stats_snapshot_.compact_bytes_read;
1342 uint64_t interval_compact_micros =
1343 compact_micros - cf_stats_snapshot_.compact_micros;
1344
1345 snprintf(
1346 buf, sizeof(buf),
1347 "Interval compaction: %.2f GB write, %.2f MB/s write, "
1348 "%.2f GB read, %.2f MB/s read, %.1f seconds\n",
1349 interval_compact_bytes_write / kGB,
1350 interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 0.001),
1351 interval_compact_bytes_read / kGB,
1352 interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001),
1353 interval_compact_micros / kMicrosInSec);
1354 value->append(buf);
1355 cf_stats_snapshot_.compact_bytes_write = compact_bytes_write;
1356 cf_stats_snapshot_.compact_bytes_read = compact_bytes_read;
1357 cf_stats_snapshot_.compact_micros = compact_micros;
1358
1359 snprintf(buf, sizeof(buf),
1360 "Stalls(count): %" PRIu64
1361 " level0_slowdown, "
1362 "%" PRIu64
1363 " level0_slowdown_with_compaction, "
1364 "%" PRIu64
1365 " level0_numfiles, "
1366 "%" PRIu64
1367 " level0_numfiles_with_compaction, "
1368 "%" PRIu64
1369 " stop for pending_compaction_bytes, "
1370 "%" PRIu64
1371 " slowdown for pending_compaction_bytes, "
1372 "%" PRIu64
1373 " memtable_compaction, "
1374 "%" PRIu64
1375 " memtable_slowdown, "
1376 "interval %" PRIu64 " total count\n",
1377 cf_stats_count_[L0_FILE_COUNT_LIMIT_SLOWDOWNS],
1378 cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_SLOWDOWNS],
1379 cf_stats_count_[L0_FILE_COUNT_LIMIT_STOPS],
1380 cf_stats_count_[LOCKED_L0_FILE_COUNT_LIMIT_STOPS],
1381 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_STOPS],
1382 cf_stats_count_[PENDING_COMPACTION_BYTES_LIMIT_SLOWDOWNS],
1383 cf_stats_count_[MEMTABLE_LIMIT_STOPS],
1384 cf_stats_count_[MEMTABLE_LIMIT_SLOWDOWNS],
1385 total_stall_count - cf_stats_snapshot_.stall_count);
1386 value->append(buf);
1387
1388 cf_stats_snapshot_.seconds_up = seconds_up;
1389 cf_stats_snapshot_.ingest_bytes_flush = flush_ingest;
1390 cf_stats_snapshot_.ingest_bytes_addfile = add_file_ingest;
1391 cf_stats_snapshot_.ingest_files_addfile = ingest_files_addfile;
1392 cf_stats_snapshot_.ingest_l0_files_addfile = ingest_l0_files_addfile;
1393 cf_stats_snapshot_.ingest_keys_addfile = ingest_keys_addfile;
1394 cf_stats_snapshot_.comp_stats = compaction_stats_sum;
1395 cf_stats_snapshot_.stall_count = total_stall_count;
1396 }
1397
DumpCFFileHistogram(std::string * value)1398 void InternalStats::DumpCFFileHistogram(std::string* value) {
1399 char buf[2000];
1400 snprintf(buf, sizeof(buf),
1401 "\n** File Read Latency Histogram By Level [%s] **\n",
1402 cfd_->GetName().c_str());
1403 value->append(buf);
1404
1405 for (int level = 0; level < number_levels_; level++) {
1406 if (!file_read_latency_[level].Empty()) {
1407 char buf2[5000];
1408 snprintf(buf2, sizeof(buf2),
1409 "** Level %d read latency histogram (micros):\n%s\n", level,
1410 file_read_latency_[level].ToString().c_str());
1411 value->append(buf2);
1412 }
1413 }
1414 }
1415
1416 #else
1417
1418 const DBPropertyInfo* GetPropertyInfo(const Slice& /*property*/) {
1419 return nullptr;
1420 }
1421
1422 #endif // !ROCKSDB_LITE
1423
1424 } // namespace ROCKSDB_NAMESPACE
1425