1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 #ifndef ROCKSDB_LITE
7 #ifdef GFLAGS
8 #include "tools/block_cache_analyzer/block_cache_trace_analyzer.h"
9
10 #include <algorithm>
11 #include <cinttypes>
12 #include <cstdio>
13 #include <cstdlib>
14 #include <fstream>
15 #include <iomanip>
16 #include <iostream>
17 #include <memory>
18 #include <random>
19 #include <sstream>
20
21 #include "monitoring/histogram.h"
22 #include "util/gflags_compat.h"
23 #include "util/string_util.h"
24
25 using GFLAGS_NAMESPACE::ParseCommandLineFlags;
26
27 DEFINE_string(block_cache_trace_path, "", "The trace file path.");
28 DEFINE_bool(is_block_cache_human_readable_trace, false,
29 "Is the trace file provided for analysis generated by running "
30 "block_cache_trace_analyzer with "
31 "FLAGS_human_readable_trace_file_path is specified.");
32 DEFINE_string(
33 block_cache_sim_config_path, "",
34 "The config file path. One cache configuration per line. The format of a "
35 "cache configuration is "
36 "cache_name,num_shard_bits,ghost_capacity,cache_capacity_1,...,cache_"
37 "capacity_N. Supported cache names are lru, lru_priority, lru_hybrid, and "
38 "lru_hybrid_no_insert_on_row_miss. User may also add a prefix 'ghost_' to "
39 "a cache_name to add a ghost cache in front of the real cache. "
40 "ghost_capacity and cache_capacity can be xK, xM or xG where x is a "
41 "positive number.");
42 DEFINE_int32(block_cache_trace_downsample_ratio, 1,
43 "The trace collected accesses on one in every "
44 "block_cache_trace_downsample_ratio blocks. We scale "
45 "down the simulated cache size by this ratio.");
46 DEFINE_bool(print_block_size_stats, false,
47 "Print block size distribution and the distribution break down by "
48 "block type and column family.");
49 DEFINE_bool(print_access_count_stats, false,
50 "Print access count distribution and the distribution break down "
51 "by block type and column family.");
52 DEFINE_bool(print_data_block_access_count_stats, false,
53 "Print data block accesses by user Get and Multi-Get.");
54 DEFINE_int32(cache_sim_warmup_seconds, 0,
55 "The number of seconds to warmup simulated caches. The hit/miss "
56 "counters are reset after the warmup completes.");
57 DEFINE_int32(analyze_bottom_k_access_count_blocks, 0,
58 "Print out detailed access information for blocks with their "
59 "number of accesses are the bottom k among all blocks.");
60 DEFINE_int32(analyze_top_k_access_count_blocks, 0,
61 "Print out detailed access information for blocks with their "
62 "number of accesses are the top k among all blocks.");
63 DEFINE_string(block_cache_analysis_result_dir, "",
64 "The directory that saves block cache analysis results.");
65 DEFINE_string(
66 timeline_labels, "",
67 "Group the number of accesses per block per second using these labels. "
68 "Possible labels are a combination of the following: cf (column family), "
69 "sst, level, bt (block type), caller, block. For example, label \"cf_bt\" "
70 "means the number of access per second is grouped by unique pairs of "
71 "\"cf_bt\". A label \"all\" contains the aggregated number of accesses per "
72 "second across all possible labels.");
73 DEFINE_string(reuse_distance_labels, "",
74 "Group the reuse distance of a block using these labels. Reuse "
75 "distance is defined as the cumulated size of unique blocks read "
76 "between two consecutive accesses on the same block.");
77 DEFINE_string(
78 reuse_distance_buckets, "",
79 "Group blocks by their reuse distances given these buckets. For "
80 "example, if 'reuse_distance_buckets' is '1K,1M,1G', we will "
81 "create four buckets. The first three buckets contain the number of "
82 "blocks with reuse distance less than 1KB, between 1K and 1M, between 1M "
83 "and 1G, respectively. The last bucket contains the number of blocks with "
84 "reuse distance larger than 1G. ");
85 DEFINE_string(
86 reuse_interval_labels, "",
87 "Group the reuse interval of a block using these labels. Reuse "
88 "interval is defined as the time between two consecutive accesses "
89 "on the same block.");
90 DEFINE_string(
91 reuse_interval_buckets, "",
92 "Group blocks by their reuse interval given these buckets. For "
93 "example, if 'reuse_distance_buckets' is '1,10,100', we will "
94 "create four buckets. The first three buckets contain the number of "
95 "blocks with reuse interval less than 1 second, between 1 second and 10 "
96 "seconds, between 10 seconds and 100 seconds, respectively. The last "
97 "bucket contains the number of blocks with reuse interval longer than 100 "
98 "seconds.");
99 DEFINE_string(
100 reuse_lifetime_labels, "",
101 "Group the reuse lifetime of a block using these labels. Reuse "
102 "lifetime is defined as the time interval between the first access on a "
103 "block and the last access on the same block. For blocks that are only "
104 "accessed once, its lifetime is set to kMaxUint64.");
105 DEFINE_string(
106 reuse_lifetime_buckets, "",
107 "Group blocks by their reuse lifetime given these buckets. For "
108 "example, if 'reuse_lifetime_buckets' is '1,10,100', we will "
109 "create four buckets. The first three buckets contain the number of "
110 "blocks with reuse lifetime less than 1 second, between 1 second and 10 "
111 "seconds, between 10 seconds and 100 seconds, respectively. The last "
112 "bucket contains the number of blocks with reuse lifetime longer than 100 "
113 "seconds.");
114 DEFINE_string(
115 analyze_callers, "",
116 "The list of callers to perform a detailed analysis on. If speicfied, the "
117 "analyzer will output a detailed percentage of accesses for each caller "
118 "break down by column family, level, and block type. A list of available "
119 "callers are: Get, MultiGet, Iterator, ApproximateSize, VerifyChecksum, "
120 "SSTDumpTool, ExternalSSTIngestion, Repair, Prefetch, Compaction, "
121 "CompactionRefill, Flush, SSTFileReader, Uncategorized.");
122 DEFINE_string(access_count_buckets, "",
123 "Group number of blocks by their access count given these "
124 "buckets. If specified, the analyzer will output a detailed "
125 "analysis on the number of blocks grouped by their access count "
126 "break down by block type and column family.");
127 DEFINE_int32(analyze_blocks_reuse_k_reuse_window, 0,
128 "Analyze the percentage of blocks that are accessed in the "
129 "[k, 2*k] seconds are accessed again in the next [2*k, 3*k], "
130 "[3*k, 4*k],...,[k*(n-1), k*n] seconds. ");
131 DEFINE_string(analyze_get_spatial_locality_labels, "",
132 "Group data blocks using these labels.");
133 DEFINE_string(analyze_get_spatial_locality_buckets, "",
134 "Group data blocks by their statistics using these buckets.");
135 DEFINE_string(skew_labels, "",
136 "Group the access count of a block using these labels.");
137 DEFINE_string(skew_buckets, "", "Group the skew labels using these buckets.");
138 DEFINE_bool(mrc_only, false,
139 "Evaluate alternative cache policies only. When this flag is true, "
140 "the analyzer does NOT maintain states of each block in memory for "
141 "analysis. It only feeds the accesses into the cache simulators.");
142 DEFINE_string(
143 analyze_correlation_coefficients_labels, "",
144 "Analyze the correlation coefficients of features such as number of past "
145 "accesses with regard to the number of accesses till the next access.");
146 DEFINE_int32(analyze_correlation_coefficients_max_number_of_values, 1000000,
147 "The maximum number of values for a feature. If the number of "
148 "values for a feature is larger than this max, it randomly "
149 "selects 'max' number of values.");
150 DEFINE_string(human_readable_trace_file_path, "",
151 "The filt path that saves human readable access records.");
152
153 namespace ROCKSDB_NAMESPACE {
154 namespace {
155
156 const std::string kMissRatioCurveFileName = "mrc";
157 const std::string kGroupbyBlock = "block";
158 const std::string kGroupbyTable = "table";
159 const std::string kGroupbyColumnFamily = "cf";
160 const std::string kGroupbySSTFile = "sst";
161 const std::string kGroupbyBlockType = "bt";
162 const std::string kGroupbyCaller = "caller";
163 const std::string kGroupbyLevel = "level";
164 const std::string kGroupbyAll = "all";
165 const std::set<std::string> kGroupbyLabels{
166 kGroupbyBlock, kGroupbyColumnFamily, kGroupbySSTFile, kGroupbyLevel,
167 kGroupbyBlockType, kGroupbyCaller, kGroupbyAll};
168 const std::string kSupportedCacheNames =
169 " lru ghost_lru lru_priority ghost_lru_priority lru_hybrid "
170 "ghost_lru_hybrid lru_hybrid_no_insert_on_row_miss "
171 "ghost_lru_hybrid_no_insert_on_row_miss ";
172
173 // The suffix for the generated csv files.
174 const std::string kFileNameSuffixMissRatioTimeline = "miss_ratio_timeline";
175 const std::string kFileNameSuffixMissTimeline = "miss_timeline";
176 const std::string kFileNameSuffixSkew = "skewness";
177 const std::string kFileNameSuffixAccessTimeline = "access_timeline";
178 const std::string kFileNameSuffixCorrelation = "correlation_input";
179 const std::string kFileNameSuffixAvgReuseIntervalNaccesses =
180 "avg_reuse_interval_naccesses";
181 const std::string kFileNameSuffixAvgReuseInterval = "avg_reuse_interval";
182 const std::string kFileNameSuffixReuseInterval = "access_reuse_interval";
183 const std::string kFileNameSuffixReuseLifetime = "reuse_lifetime";
184 const std::string kFileNameSuffixAccessReuseBlocksTimeline =
185 "reuse_blocks_timeline";
186 const std::string kFileNameSuffixPercentOfAccessSummary =
187 "percentage_of_accesses_summary";
188 const std::string kFileNameSuffixPercentRefKeys = "percent_ref_keys";
189 const std::string kFileNameSuffixPercentDataSizeOnRefKeys =
190 "percent_data_size_on_ref_keys";
191 const std::string kFileNameSuffixPercentAccessesOnRefKeys =
192 "percent_accesses_on_ref_keys";
193 const std::string kFileNameSuffixAccessCountSummary = "access_count_summary";
194
block_type_to_string(TraceType type)195 std::string block_type_to_string(TraceType type) {
196 switch (type) {
197 case kBlockTraceFilterBlock:
198 return "Filter";
199 case kBlockTraceDataBlock:
200 return "Data";
201 case kBlockTraceIndexBlock:
202 return "Index";
203 case kBlockTraceRangeDeletionBlock:
204 return "RangeDeletion";
205 case kBlockTraceUncompressionDictBlock:
206 return "UncompressionDict";
207 default:
208 break;
209 }
210 // This cannot happen.
211 return "InvalidType";
212 }
213
caller_to_string(TableReaderCaller caller)214 std::string caller_to_string(TableReaderCaller caller) {
215 switch (caller) {
216 case kUserGet:
217 return "Get";
218 case kUserMultiGet:
219 return "MultiGet";
220 case kUserIterator:
221 return "Iterator";
222 case kUserApproximateSize:
223 return "ApproximateSize";
224 case kUserVerifyChecksum:
225 return "VerifyChecksum";
226 case kSSTDumpTool:
227 return "SSTDumpTool";
228 case kExternalSSTIngestion:
229 return "ExternalSSTIngestion";
230 case kRepair:
231 return "Repair";
232 case kPrefetch:
233 return "Prefetch";
234 case kCompaction:
235 return "Compaction";
236 case kCompactionRefill:
237 return "CompactionRefill";
238 case kFlush:
239 return "Flush";
240 case kSSTFileReader:
241 return "SSTFileReader";
242 case kUncategorized:
243 return "Uncategorized";
244 default:
245 break;
246 }
247 // This cannot happen.
248 return "InvalidCaller";
249 }
250
string_to_caller(std::string caller_str)251 TableReaderCaller string_to_caller(std::string caller_str) {
252 if (caller_str == "Get") {
253 return kUserGet;
254 } else if (caller_str == "MultiGet") {
255 return kUserMultiGet;
256 } else if (caller_str == "Iterator") {
257 return kUserIterator;
258 } else if (caller_str == "ApproximateSize") {
259 return kUserApproximateSize;
260 } else if (caller_str == "VerifyChecksum") {
261 return kUserVerifyChecksum;
262 } else if (caller_str == "SSTDumpTool") {
263 return kSSTDumpTool;
264 } else if (caller_str == "ExternalSSTIngestion") {
265 return kExternalSSTIngestion;
266 } else if (caller_str == "Repair") {
267 return kRepair;
268 } else if (caller_str == "Prefetch") {
269 return kPrefetch;
270 } else if (caller_str == "Compaction") {
271 return kCompaction;
272 } else if (caller_str == "CompactionRefill") {
273 return kCompactionRefill;
274 } else if (caller_str == "Flush") {
275 return kFlush;
276 } else if (caller_str == "SSTFileReader") {
277 return kSSTFileReader;
278 } else if (caller_str == "Uncategorized") {
279 return kUncategorized;
280 }
281 return TableReaderCaller::kMaxBlockCacheLookupCaller;
282 }
283
is_user_access(TableReaderCaller caller)284 bool is_user_access(TableReaderCaller caller) {
285 switch (caller) {
286 case kUserGet:
287 case kUserMultiGet:
288 case kUserIterator:
289 case kUserApproximateSize:
290 case kUserVerifyChecksum:
291 return true;
292 default:
293 break;
294 }
295 return false;
296 }
297
298 const char kBreakLine[] =
299 "***************************************************************\n";
300
print_break_lines(uint32_t num_break_lines)301 void print_break_lines(uint32_t num_break_lines) {
302 for (uint32_t i = 0; i < num_break_lines; i++) {
303 fprintf(stdout, kBreakLine);
304 }
305 }
306
percent(uint64_t numerator,uint64_t denomenator)307 double percent(uint64_t numerator, uint64_t denomenator) {
308 if (denomenator == 0) {
309 return -1;
310 }
311 return static_cast<double>(numerator * 100.0 / denomenator);
312 }
313
adjust_time_unit(const std::map<uint64_t,uint64_t> & time_stats,uint64_t time_unit)314 std::map<uint64_t, uint64_t> adjust_time_unit(
315 const std::map<uint64_t, uint64_t>& time_stats, uint64_t time_unit) {
316 if (time_unit == 1) {
317 return time_stats;
318 }
319 std::map<uint64_t, uint64_t> adjusted_time_stats;
320 for (auto const& time : time_stats) {
321 adjusted_time_stats[static_cast<uint64_t>(time.first / time_unit)] +=
322 time.second;
323 }
324 return adjusted_time_stats;
325 }
326 } // namespace
327
WriteMissRatioCurves() const328 void BlockCacheTraceAnalyzer::WriteMissRatioCurves() const {
329 if (!cache_simulator_) {
330 return;
331 }
332 if (output_dir_.empty()) {
333 return;
334 }
335 uint64_t trace_duration =
336 trace_end_timestamp_in_seconds_ - trace_start_timestamp_in_seconds_;
337 uint64_t total_accesses = access_sequence_number_;
338 const std::string output_miss_ratio_curve_path =
339 output_dir_ + "/" + std::to_string(trace_duration) + "_" +
340 std::to_string(total_accesses) + "_" + kMissRatioCurveFileName;
341 std::ofstream out(output_miss_ratio_curve_path);
342 if (!out.is_open()) {
343 return;
344 }
345 // Write header.
346 const std::string header =
347 "cache_name,num_shard_bits,ghost_capacity,capacity,miss_ratio,total_"
348 "accesses";
349 out << header << std::endl;
350 for (auto const& config_caches : cache_simulator_->sim_caches()) {
351 const CacheConfiguration& config = config_caches.first;
352 for (uint32_t i = 0; i < config.cache_capacities.size(); i++) {
353 double miss_ratio =
354 config_caches.second[i]->miss_ratio_stats().miss_ratio();
355 // Write the body.
356 out << config.cache_name;
357 out << ",";
358 out << config.num_shard_bits;
359 out << ",";
360 out << config.ghost_cache_capacity;
361 out << ",";
362 out << config.cache_capacities[i];
363 out << ",";
364 out << std::fixed << std::setprecision(4) << miss_ratio;
365 out << ",";
366 out << config_caches.second[i]->miss_ratio_stats().total_accesses();
367 out << std::endl;
368 }
369 }
370 out.close();
371 }
372
UpdateFeatureVectors(const std::vector<uint64_t> & access_sequence_number_timeline,const std::vector<uint64_t> & access_timeline,const std::string & label,std::map<std::string,Features> * label_features,std::map<std::string,Predictions> * label_predictions) const373 void BlockCacheTraceAnalyzer::UpdateFeatureVectors(
374 const std::vector<uint64_t>& access_sequence_number_timeline,
375 const std::vector<uint64_t>& access_timeline, const std::string& label,
376 std::map<std::string, Features>* label_features,
377 std::map<std::string, Predictions>* label_predictions) const {
378 if (access_sequence_number_timeline.empty() || access_timeline.empty()) {
379 return;
380 }
381 assert(access_timeline.size() == access_sequence_number_timeline.size());
382 uint64_t prev_access_sequence_number = access_sequence_number_timeline[0];
383 uint64_t prev_access_timestamp = access_timeline[0];
384 for (uint32_t i = 0; i < access_sequence_number_timeline.size(); i++) {
385 uint64_t num_accesses_since_last_access =
386 access_sequence_number_timeline[i] - prev_access_sequence_number;
387 uint64_t elapsed_time_since_last_access =
388 access_timeline[i] - prev_access_timestamp;
389 prev_access_sequence_number = access_sequence_number_timeline[i];
390 prev_access_timestamp = access_timeline[i];
391 if (i < access_sequence_number_timeline.size() - 1) {
392 (*label_features)[label].num_accesses_since_last_access.push_back(
393 num_accesses_since_last_access);
394 (*label_features)[label].num_past_accesses.push_back(i);
395 (*label_features)[label].elapsed_time_since_last_access.push_back(
396 elapsed_time_since_last_access);
397 }
398 if (i >= 1) {
399 (*label_predictions)[label].num_accesses_till_next_access.push_back(
400 num_accesses_since_last_access);
401 (*label_predictions)[label].elapsed_time_till_next_access.push_back(
402 elapsed_time_since_last_access);
403 }
404 }
405 }
406
WriteMissRatioTimeline(uint64_t time_unit) const407 void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const {
408 if (!cache_simulator_ || output_dir_.empty()) {
409 return;
410 }
411 std::map<uint64_t, std::map<std::string, std::map<uint64_t, double>>>
412 cs_name_timeline;
413 uint64_t start_time = port::kMaxUint64;
414 uint64_t end_time = 0;
415 const std::map<uint64_t, uint64_t>& trace_num_misses =
416 adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
417 const std::map<uint64_t, uint64_t>& trace_num_accesses =
418 adjust_time_unit(miss_ratio_stats_.num_accesses_timeline(), time_unit);
419 assert(trace_num_misses.size() == trace_num_accesses.size());
420 for (auto const& num_miss : trace_num_misses) {
421 uint64_t time = num_miss.first;
422 start_time = std::min(start_time, time);
423 end_time = std::max(end_time, time);
424 uint64_t miss = num_miss.second;
425 auto it = trace_num_accesses.find(time);
426 assert(it != trace_num_accesses.end());
427 uint64_t access = it->second;
428 cs_name_timeline[port::kMaxUint64]["trace"][time] = percent(miss, access);
429 }
430 for (auto const& config_caches : cache_simulator_->sim_caches()) {
431 const CacheConfiguration& config = config_caches.first;
432 std::string cache_label = config.cache_name + "-" +
433 std::to_string(config.num_shard_bits) + "-" +
434 std::to_string(config.ghost_cache_capacity);
435 for (uint32_t i = 0; i < config.cache_capacities.size(); i++) {
436 const std::map<uint64_t, uint64_t>& num_misses = adjust_time_unit(
437 config_caches.second[i]->miss_ratio_stats().num_misses_timeline(),
438 time_unit);
439 const std::map<uint64_t, uint64_t>& num_accesses = adjust_time_unit(
440 config_caches.second[i]->miss_ratio_stats().num_accesses_timeline(),
441 time_unit);
442 assert(num_misses.size() == num_accesses.size());
443 for (auto const& num_miss : num_misses) {
444 uint64_t time = num_miss.first;
445 start_time = std::min(start_time, time);
446 end_time = std::max(end_time, time);
447 uint64_t miss = num_miss.second;
448 auto it = num_accesses.find(time);
449 assert(it != num_accesses.end());
450 uint64_t access = it->second;
451 cs_name_timeline[config.cache_capacities[i]][cache_label][time] =
452 percent(miss, access);
453 }
454 }
455 }
456 for (auto const& it : cs_name_timeline) {
457 const std::string output_miss_ratio_timeline_path =
458 output_dir_ + "/" + std::to_string(it.first) + "_" +
459 std::to_string(time_unit) + "_" + kFileNameSuffixMissRatioTimeline;
460 std::ofstream out(output_miss_ratio_timeline_path);
461 if (!out.is_open()) {
462 return;
463 }
464 std::string header("time");
465 for (uint64_t now = start_time; now <= end_time; now++) {
466 header += ",";
467 header += std::to_string(now);
468 }
469 out << header << std::endl;
470 for (auto const& label : it.second) {
471 std::string row(label.first);
472 for (uint64_t now = start_time; now <= end_time; now++) {
473 auto misses = label.second.find(now);
474 row += ",";
475 if (misses != label.second.end()) {
476 row += std::to_string(misses->second);
477 } else {
478 row += "0";
479 }
480 }
481 out << row << std::endl;
482 }
483 out.close();
484 }
485 }
486
WriteMissTimeline(uint64_t time_unit) const487 void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const {
488 if (!cache_simulator_ || output_dir_.empty()) {
489 return;
490 }
491 std::map<uint64_t, std::map<std::string, std::map<uint64_t, uint64_t>>>
492 cs_name_timeline;
493 uint64_t start_time = port::kMaxUint64;
494 uint64_t end_time = 0;
495 const std::map<uint64_t, uint64_t>& trace_num_misses =
496 adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit);
497 for (auto const& num_miss : trace_num_misses) {
498 uint64_t time = num_miss.first;
499 start_time = std::min(start_time, time);
500 end_time = std::max(end_time, time);
501 uint64_t miss = num_miss.second;
502 cs_name_timeline[port::kMaxUint64]["trace"][time] = miss;
503 }
504 for (auto const& config_caches : cache_simulator_->sim_caches()) {
505 const CacheConfiguration& config = config_caches.first;
506 std::string cache_label = config.cache_name + "-" +
507 std::to_string(config.num_shard_bits) + "-" +
508 std::to_string(config.ghost_cache_capacity);
509 for (uint32_t i = 0; i < config.cache_capacities.size(); i++) {
510 const std::map<uint64_t, uint64_t>& num_misses = adjust_time_unit(
511 config_caches.second[i]->miss_ratio_stats().num_misses_timeline(),
512 time_unit);
513 for (auto const& num_miss : num_misses) {
514 uint64_t time = num_miss.first;
515 start_time = std::min(start_time, time);
516 end_time = std::max(end_time, time);
517 uint64_t miss = num_miss.second;
518 cs_name_timeline[config.cache_capacities[i]][cache_label][time] = miss;
519 }
520 }
521 }
522 for (auto const& it : cs_name_timeline) {
523 const std::string output_miss_ratio_timeline_path =
524 output_dir_ + "/" + std::to_string(it.first) + "_" +
525 std::to_string(time_unit) + "_" + kFileNameSuffixMissTimeline;
526 std::ofstream out(output_miss_ratio_timeline_path);
527 if (!out.is_open()) {
528 return;
529 }
530 std::string header("time");
531 for (uint64_t now = start_time; now <= end_time; now++) {
532 header += ",";
533 header += std::to_string(now);
534 }
535 out << header << std::endl;
536 for (auto const& label : it.second) {
537 std::string row(label.first);
538 for (uint64_t now = start_time; now <= end_time; now++) {
539 auto misses = label.second.find(now);
540 row += ",";
541 if (misses != label.second.end()) {
542 row += std::to_string(misses->second);
543 } else {
544 row += "0";
545 }
546 }
547 out << row << std::endl;
548 }
549 out.close();
550 }
551 }
552
WriteSkewness(const std::string & label_str,const std::vector<uint64_t> & percent_buckets,TraceType target_block_type) const553 void BlockCacheTraceAnalyzer::WriteSkewness(
554 const std::string& label_str, const std::vector<uint64_t>& percent_buckets,
555 TraceType target_block_type) const {
556 std::set<std::string> labels = ParseLabelStr(label_str);
557 std::map<std::string, uint64_t> label_naccesses;
558 uint64_t total_naccesses = 0;
559 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
560 uint32_t level, TraceType type,
561 const std::string& /*block_key*/, uint64_t block_id,
562 const BlockAccessInfo& block) {
563 if (target_block_type != TraceType::kTraceMax &&
564 target_block_type != type) {
565 return;
566 }
567 const std::string label = BuildLabel(
568 labels, cf_name, fd, level, type,
569 TableReaderCaller::kMaxBlockCacheLookupCaller, block_id, block);
570 label_naccesses[label] += block.num_accesses;
571 total_naccesses += block.num_accesses;
572 };
573 TraverseBlocks(block_callback, &labels);
574 std::map<std::string, std::map<uint64_t, uint64_t>> label_bucket_naccesses;
575 std::vector<std::pair<std::string, uint64_t>> pairs;
576 for (auto const& itr : label_naccesses) {
577 pairs.push_back(itr);
578 }
579 // Sort in descending order.
580 sort(pairs.begin(), pairs.end(),
581 [=](const std::pair<std::string, uint64_t>& a,
582 const std::pair<std::string, uint64_t>& b) {
583 return b.second < a.second;
584 });
585
586 size_t prev_start_index = 0;
587 for (auto const& percent : percent_buckets) {
588 label_bucket_naccesses[label_str][percent] = 0;
589 size_t end_index = 0;
590 if (percent == port::kMaxUint64) {
591 end_index = label_naccesses.size();
592 } else {
593 end_index = percent * label_naccesses.size() / 100;
594 }
595 for (size_t i = prev_start_index; i < end_index; i++) {
596 label_bucket_naccesses[label_str][percent] += pairs[i].second;
597 }
598 prev_start_index = end_index;
599 }
600 std::string filename_suffix;
601 if (target_block_type != TraceType::kTraceMax) {
602 filename_suffix = block_type_to_string(target_block_type);
603 filename_suffix += "_";
604 }
605 filename_suffix += kFileNameSuffixSkew;
606 WriteStatsToFile(label_str, percent_buckets, filename_suffix,
607 label_bucket_naccesses, total_naccesses);
608 }
609
WriteCorrelationFeatures(const std::string & label_str,uint32_t max_number_of_values) const610 void BlockCacheTraceAnalyzer::WriteCorrelationFeatures(
611 const std::string& label_str, uint32_t max_number_of_values) const {
612 std::set<std::string> labels = ParseLabelStr(label_str);
613 std::map<std::string, Features> label_features;
614 std::map<std::string, Predictions> label_predictions;
615 auto block_callback =
616 [&](const std::string& cf_name, uint64_t fd, uint32_t level,
617 TraceType block_type, const std::string& /*block_key*/,
618 uint64_t /*block_key_id*/, const BlockAccessInfo& block) {
619 if (block.table_id == 0 && labels.find(kGroupbyTable) != labels.end()) {
620 // We only know table id information for get requests.
621 return;
622 }
623 if (labels.find(kGroupbyCaller) != labels.end()) {
624 // Group by caller.
625 for (auto const& caller_map : block.caller_access_timeline) {
626 const std::string label =
627 BuildLabel(labels, cf_name, fd, level, block_type,
628 caller_map.first, /*block_id=*/0, block);
629 auto it = block.caller_access_sequence__number_timeline.find(
630 caller_map.first);
631 assert(it != block.caller_access_sequence__number_timeline.end());
632 UpdateFeatureVectors(it->second, caller_map.second, label,
633 &label_features, &label_predictions);
634 }
635 return;
636 }
637 const std::string label =
638 BuildLabel(labels, cf_name, fd, level, block_type,
639 TableReaderCaller::kMaxBlockCacheLookupCaller,
640 /*block_id=*/0, block);
641 UpdateFeatureVectors(block.access_sequence_number_timeline,
642 block.access_timeline, label, &label_features,
643 &label_predictions);
644 };
645 TraverseBlocks(block_callback, &labels);
646 WriteCorrelationFeaturesToFile(label_str, label_features, label_predictions,
647 max_number_of_values);
648 }
649
WriteCorrelationFeaturesToFile(const std::string & label,const std::map<std::string,Features> & label_features,const std::map<std::string,Predictions> & label_predictions,uint32_t max_number_of_values) const650 void BlockCacheTraceAnalyzer::WriteCorrelationFeaturesToFile(
651 const std::string& label,
652 const std::map<std::string, Features>& label_features,
653 const std::map<std::string, Predictions>& label_predictions,
654 uint32_t max_number_of_values) const {
655 std::default_random_engine rand_engine(static_cast<std::default_random_engine::result_type>(env_->NowMicros()));
656 for (auto const& label_feature_vectors : label_features) {
657 const Features& past = label_feature_vectors.second;
658 auto it = label_predictions.find(label_feature_vectors.first);
659 assert(it != label_predictions.end());
660 const Predictions& future = it->second;
661 const std::string output_path = output_dir_ + "/" + label + "_" +
662 label_feature_vectors.first + "_" +
663 kFileNameSuffixCorrelation;
664 std::ofstream out(output_path);
665 if (!out.is_open()) {
666 return;
667 }
668 std::string header(
669 "num_accesses_since_last_access,elapsed_time_since_last_access,num_"
670 "past_accesses,num_accesses_till_next_access,elapsed_time_till_next_"
671 "access");
672 out << header << std::endl;
673 std::vector<uint32_t> indexes;
674 for (uint32_t i = 0; i < past.num_accesses_since_last_access.size(); i++) {
675 indexes.push_back(i);
676 }
677 std::shuffle(indexes.begin(), indexes.end(), rand_engine);
678 for (uint32_t i = 0; i < max_number_of_values && i < indexes.size(); i++) {
679 uint32_t rand_index = indexes[i];
680 out << std::to_string(past.num_accesses_since_last_access[rand_index])
681 << ",";
682 out << std::to_string(past.elapsed_time_since_last_access[rand_index])
683 << ",";
684 out << std::to_string(past.num_past_accesses[rand_index]) << ",";
685 out << std::to_string(future.num_accesses_till_next_access[rand_index])
686 << ",";
687 out << std::to_string(future.elapsed_time_till_next_access[rand_index])
688 << std::endl;
689 }
690 out.close();
691 }
692 }
693
WriteCorrelationFeaturesForGet(uint32_t max_number_of_values) const694 void BlockCacheTraceAnalyzer::WriteCorrelationFeaturesForGet(
695 uint32_t max_number_of_values) const {
696 std::string label = "GetKeyInfo";
697 std::map<std::string, Features> label_features;
698 std::map<std::string, Predictions> label_predictions;
699 for (auto const& get_info : get_key_info_map_) {
700 const GetKeyInfo& info = get_info.second;
701 UpdateFeatureVectors(info.access_sequence_number_timeline,
702 info.access_timeline, label, &label_features,
703 &label_predictions);
704 }
705 WriteCorrelationFeaturesToFile(label, label_features, label_predictions,
706 max_number_of_values);
707 }
708
ParseLabelStr(const std::string & label_str) const709 std::set<std::string> BlockCacheTraceAnalyzer::ParseLabelStr(
710 const std::string& label_str) const {
711 std::stringstream ss(label_str);
712 std::set<std::string> labels;
713 // label_str is in the form of "label1_label2_label3", e.g., cf_bt.
714 while (ss.good()) {
715 std::string label_name;
716 getline(ss, label_name, '_');
717 if (kGroupbyLabels.find(label_name) == kGroupbyLabels.end()) {
718 // Unknown label name.
719 fprintf(stderr, "Unknown label name %s, label string %s\n",
720 label_name.c_str(), label_str.c_str());
721 return {};
722 }
723 labels.insert(label_name);
724 }
725 return labels;
726 }
727
BuildLabel(const std::set<std::string> & labels,const std::string & cf_name,uint64_t fd,uint32_t level,TraceType type,TableReaderCaller caller,uint64_t block_key,const BlockAccessInfo & block) const728 std::string BlockCacheTraceAnalyzer::BuildLabel(
729 const std::set<std::string>& labels, const std::string& cf_name,
730 uint64_t fd, uint32_t level, TraceType type, TableReaderCaller caller,
731 uint64_t block_key, const BlockAccessInfo& block) const {
732 std::map<std::string, std::string> label_value_map;
733 label_value_map[kGroupbyAll] = kGroupbyAll;
734 label_value_map[kGroupbyLevel] = std::to_string(level);
735 label_value_map[kGroupbyCaller] = caller_to_string(caller);
736 label_value_map[kGroupbySSTFile] = std::to_string(fd);
737 label_value_map[kGroupbyBlockType] = block_type_to_string(type);
738 label_value_map[kGroupbyColumnFamily] = cf_name;
739 label_value_map[kGroupbyBlock] = std::to_string(block_key);
740 label_value_map[kGroupbyTable] = std::to_string(block.table_id);
741 // Concatenate the label values.
742 std::string label;
743 for (auto const& l : labels) {
744 label += label_value_map[l];
745 label += "-";
746 }
747 if (!label.empty()) {
748 label.pop_back();
749 }
750 return label;
751 }
752
TraverseBlocks(std::function<void (const std::string &,uint64_t,uint32_t,TraceType,const std::string &,uint64_t,const BlockAccessInfo &)> block_callback,std::set<std::string> * labels) const753 void BlockCacheTraceAnalyzer::TraverseBlocks(
754 std::function<void(const std::string& /*cf_name*/, uint64_t /*fd*/,
755 uint32_t /*level*/, TraceType /*block_type*/,
756 const std::string& /*block_key*/,
757 uint64_t /*block_key_id*/,
758 const BlockAccessInfo& /*block_access_info*/)>
759 block_callback,
760 std::set<std::string>* labels) const {
761 for (auto const& cf_aggregates : cf_aggregates_map_) {
762 // Stats per column family.
763 const std::string& cf_name = cf_aggregates.first;
764 for (auto const& file_aggregates : cf_aggregates.second.fd_aggregates_map) {
765 // Stats per SST file.
766 const uint64_t fd = file_aggregates.first;
767 const uint32_t level = file_aggregates.second.level;
768 for (auto const& block_type_aggregates :
769 file_aggregates.second.block_type_aggregates_map) {
770 // Stats per block type.
771 const TraceType type = block_type_aggregates.first;
772 for (auto const& block_access_info :
773 block_type_aggregates.second.block_access_info_map) {
774 // Stats per block.
775 if (labels && block_access_info.second.table_id == 0 &&
776 labels->find(kGroupbyTable) != labels->end()) {
777 // We only know table id information for get requests.
778 return;
779 }
780 block_callback(cf_name, fd, level, type, block_access_info.first,
781 block_access_info.second.block_id,
782 block_access_info.second);
783 }
784 }
785 }
786 }
787 }
788
WriteGetSpatialLocality(const std::string & label_str,const std::vector<uint64_t> & percent_buckets) const789 void BlockCacheTraceAnalyzer::WriteGetSpatialLocality(
790 const std::string& label_str,
791 const std::vector<uint64_t>& percent_buckets) const {
792 std::set<std::string> labels = ParseLabelStr(label_str);
793 std::map<std::string, std::map<uint64_t, uint64_t>> label_pnrefkeys_nblocks;
794 std::map<std::string, std::map<uint64_t, uint64_t>> label_pnrefs_nblocks;
795 std::map<std::string, std::map<uint64_t, uint64_t>> label_pndatasize_nblocks;
796 uint64_t nblocks = 0;
797 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
798 uint32_t level, TraceType /*block_type*/,
799 const std::string& /*block_key*/,
800 uint64_t /*block_key_id*/,
801 const BlockAccessInfo& block) {
802 if (block.num_keys == 0) {
803 return;
804 }
805 uint64_t naccesses = 0;
806 for (auto const& key_access : block.key_num_access_map) {
807 for (auto const& caller_access : key_access.second) {
808 if (caller_access.first == TableReaderCaller::kUserGet) {
809 naccesses += caller_access.second;
810 }
811 }
812 }
813 const std::string label =
814 BuildLabel(labels, cf_name, fd, level, TraceType::kBlockTraceDataBlock,
815 TableReaderCaller::kUserGet, /*block_id=*/0, block);
816
817 const uint64_t percent_referenced_for_existing_keys =
818 static_cast<uint64_t>(std::max(
819 percent(block.key_num_access_map.size(), block.num_keys), 0.0));
820 const uint64_t percent_accesses_for_existing_keys =
821 static_cast<uint64_t>(std::max(
822 percent(block.num_referenced_key_exist_in_block, naccesses), 0.0));
823 const uint64_t percent_referenced_data_size = static_cast<uint64_t>(
824 std::max(percent(block.referenced_data_size, block.block_size), 0.0));
825 if (label_pnrefkeys_nblocks.find(label) == label_pnrefkeys_nblocks.end()) {
826 for (auto const& percent_bucket : percent_buckets) {
827 label_pnrefkeys_nblocks[label][percent_bucket] = 0;
828 label_pnrefs_nblocks[label][percent_bucket] = 0;
829 label_pndatasize_nblocks[label][percent_bucket] = 0;
830 }
831 }
832 label_pnrefkeys_nblocks[label]
833 .upper_bound(percent_referenced_for_existing_keys)
834 ->second += 1;
835 label_pnrefs_nblocks[label]
836 .upper_bound(percent_accesses_for_existing_keys)
837 ->second += 1;
838 label_pndatasize_nblocks[label]
839 .upper_bound(percent_referenced_data_size)
840 ->second += 1;
841 nblocks += 1;
842 };
843 TraverseBlocks(block_callback, &labels);
844 WriteStatsToFile(label_str, percent_buckets, kFileNameSuffixPercentRefKeys,
845 label_pnrefkeys_nblocks, nblocks);
846 WriteStatsToFile(label_str, percent_buckets,
847 kFileNameSuffixPercentAccessesOnRefKeys,
848 label_pnrefs_nblocks, nblocks);
849 WriteStatsToFile(label_str, percent_buckets,
850 kFileNameSuffixPercentDataSizeOnRefKeys,
851 label_pndatasize_nblocks, nblocks);
852 }
853
WriteAccessTimeline(const std::string & label_str,uint64_t time_unit,bool user_access_only) const854 void BlockCacheTraceAnalyzer::WriteAccessTimeline(const std::string& label_str,
855 uint64_t time_unit,
856 bool user_access_only) const {
857 std::set<std::string> labels = ParseLabelStr(label_str);
858 uint64_t start_time = port::kMaxUint64;
859 uint64_t end_time = 0;
860 std::map<std::string, std::map<uint64_t, uint64_t>> label_access_timeline;
861 std::map<uint64_t, std::vector<std::string>> access_count_block_id_map;
862
863 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
864 uint32_t level, TraceType type,
865 const std::string& /*block_key*/, uint64_t block_id,
866 const BlockAccessInfo& block) {
867 uint64_t naccesses = 0;
868 for (auto const& timeline : block.caller_num_accesses_timeline) {
869 const TableReaderCaller caller = timeline.first;
870 if (user_access_only && !is_user_access(caller)) {
871 continue;
872 }
873 const std::string label =
874 BuildLabel(labels, cf_name, fd, level, type, caller, block_id, block);
875 for (auto const& naccess : timeline.second) {
876 const uint64_t timestamp = naccess.first / time_unit;
877 const uint64_t num = naccess.second;
878 label_access_timeline[label][timestamp] += num;
879 start_time = std::min(start_time, timestamp);
880 end_time = std::max(end_time, timestamp);
881 naccesses += num;
882 }
883 }
884 if (naccesses > 0) {
885 access_count_block_id_map[naccesses].push_back(std::to_string(block_id));
886 }
887 };
888 TraverseBlocks(block_callback, &labels);
889
890 // We have label_access_timeline now. Write them into a file.
891 const std::string user_access_prefix =
892 user_access_only ? "user_access_only_" : "all_access_";
893 const std::string output_path = output_dir_ + "/" + user_access_prefix +
894 label_str + "_" + std::to_string(time_unit) +
895 "_" + kFileNameSuffixAccessTimeline;
896 std::ofstream out(output_path);
897 if (!out.is_open()) {
898 return;
899 }
900 std::string header("time");
901 if (labels.find("block") != labels.end()) {
902 for (uint64_t now = start_time; now <= end_time; now++) {
903 header += ",";
904 header += std::to_string(now);
905 }
906 out << header << std::endl;
907 // Write the most frequently accessed blocks first.
908 for (auto naccess_it = access_count_block_id_map.rbegin();
909 naccess_it != access_count_block_id_map.rend(); naccess_it++) {
910 for (auto& block_id_it : naccess_it->second) {
911 std::string row(block_id_it);
912 for (uint64_t now = start_time; now <= end_time; now++) {
913 auto it = label_access_timeline[block_id_it].find(now);
914 row += ",";
915 if (it != label_access_timeline[block_id_it].end()) {
916 row += std::to_string(it->second);
917 } else {
918 row += "0";
919 }
920 }
921 out << row << std::endl;
922 }
923 }
924 out.close();
925 return;
926 }
927 for (uint64_t now = start_time; now <= end_time; now++) {
928 header += ",";
929 header += std::to_string(now);
930 }
931 out << header << std::endl;
932 for (auto const& label : label_access_timeline) {
933 std::string row(label.first);
934 for (uint64_t now = start_time; now <= end_time; now++) {
935 auto it = label.second.find(now);
936 row += ",";
937 if (it != label.second.end()) {
938 row += std::to_string(it->second);
939 } else {
940 row += "0";
941 }
942 }
943 out << row << std::endl;
944 }
945
946 out.close();
947 }
948
WriteReuseDistance(const std::string & label_str,const std::vector<uint64_t> & distance_buckets) const949 void BlockCacheTraceAnalyzer::WriteReuseDistance(
950 const std::string& label_str,
951 const std::vector<uint64_t>& distance_buckets) const {
952 std::set<std::string> labels = ParseLabelStr(label_str);
953 std::map<std::string, std::map<uint64_t, uint64_t>> label_distance_num_reuses;
954 uint64_t total_num_reuses = 0;
955 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
956 uint32_t level, TraceType type,
957 const std::string& /*block_key*/, uint64_t block_id,
958 const BlockAccessInfo& block) {
959 const std::string label = BuildLabel(
960 labels, cf_name, fd, level, type,
961 TableReaderCaller::kMaxBlockCacheLookupCaller, block_id, block);
962 if (label_distance_num_reuses.find(label) ==
963 label_distance_num_reuses.end()) {
964 // The first time we encounter this label.
965 for (auto const& distance_bucket : distance_buckets) {
966 label_distance_num_reuses[label][distance_bucket] = 0;
967 }
968 }
969 for (auto const& reuse_distance : block.reuse_distance_count) {
970 label_distance_num_reuses[label]
971 .upper_bound(reuse_distance.first)
972 ->second += reuse_distance.second;
973 total_num_reuses += reuse_distance.second;
974 }
975 };
976 TraverseBlocks(block_callback, &labels);
977 // We have label_naccesses and label_distance_num_reuses now. Write them into
978 // a file.
979 const std::string output_path =
980 output_dir_ + "/" + label_str + "_reuse_distance";
981 std::ofstream out(output_path);
982 if (!out.is_open()) {
983 return;
984 }
985 std::string header("bucket");
986 for (auto const& label_it : label_distance_num_reuses) {
987 header += ",";
988 header += label_it.first;
989 }
990 out << header << std::endl;
991 for (auto const& bucket : distance_buckets) {
992 std::string row(std::to_string(bucket));
993 for (auto const& label_it : label_distance_num_reuses) {
994 auto const& it = label_it.second.find(bucket);
995 assert(it != label_it.second.end());
996 row += ",";
997 row += std::to_string(percent(it->second, total_num_reuses));
998 }
999 out << row << std::endl;
1000 }
1001 out.close();
1002 }
1003
UpdateReuseIntervalStats(const std::string & label,const std::vector<uint64_t> & time_buckets,const std::map<uint64_t,uint64_t> timeline,std::map<std::string,std::map<uint64_t,uint64_t>> * label_time_num_reuses,uint64_t * total_num_reuses) const1004 void BlockCacheTraceAnalyzer::UpdateReuseIntervalStats(
1005 const std::string& label, const std::vector<uint64_t>& time_buckets,
1006 const std::map<uint64_t, uint64_t> timeline,
1007 std::map<std::string, std::map<uint64_t, uint64_t>>* label_time_num_reuses,
1008 uint64_t* total_num_reuses) const {
1009 assert(label_time_num_reuses);
1010 assert(total_num_reuses);
1011 if (label_time_num_reuses->find(label) == label_time_num_reuses->end()) {
1012 // The first time we encounter this label.
1013 for (auto const& time_bucket : time_buckets) {
1014 (*label_time_num_reuses)[label][time_bucket] = 0;
1015 }
1016 }
1017 auto it = timeline.begin();
1018 uint64_t prev_timestamp = it->first;
1019 const uint64_t prev_num = it->second;
1020 it++;
1021 // Reused within one second.
1022 if (prev_num > 1) {
1023 (*label_time_num_reuses)[label].upper_bound(0)->second += prev_num - 1;
1024 *total_num_reuses += prev_num - 1;
1025 }
1026 while (it != timeline.end()) {
1027 const uint64_t timestamp = it->first;
1028 const uint64_t num = it->second;
1029 const uint64_t reuse_interval = timestamp - prev_timestamp;
1030 (*label_time_num_reuses)[label].upper_bound(reuse_interval)->second += 1;
1031 if (num > 1) {
1032 (*label_time_num_reuses)[label].upper_bound(0)->second += num - 1;
1033 }
1034 prev_timestamp = timestamp;
1035 *total_num_reuses += num;
1036 it++;
1037 }
1038 }
1039
WriteStatsToFile(const std::string & label_str,const std::vector<uint64_t> & time_buckets,const std::string & filename_suffix,const std::map<std::string,std::map<uint64_t,uint64_t>> & label_data,uint64_t ntotal) const1040 void BlockCacheTraceAnalyzer::WriteStatsToFile(
1041 const std::string& label_str, const std::vector<uint64_t>& time_buckets,
1042 const std::string& filename_suffix,
1043 const std::map<std::string, std::map<uint64_t, uint64_t>>& label_data,
1044 uint64_t ntotal) const {
1045 const std::string output_path =
1046 output_dir_ + "/" + label_str + "_" + filename_suffix;
1047 std::ofstream out(output_path);
1048 if (!out.is_open()) {
1049 return;
1050 }
1051 std::string header("bucket");
1052 for (auto const& label_it : label_data) {
1053 header += ",";
1054 header += label_it.first;
1055 }
1056 out << header << std::endl;
1057 for (auto const& bucket : time_buckets) {
1058 std::string row(std::to_string(bucket));
1059 for (auto const& label_it : label_data) {
1060 auto const& it = label_it.second.find(bucket);
1061 assert(it != label_it.second.end());
1062 row += ",";
1063 row += std::to_string(percent(it->second, ntotal));
1064 }
1065 out << row << std::endl;
1066 }
1067 out.close();
1068 }
1069
WriteReuseInterval(const std::string & label_str,const std::vector<uint64_t> & time_buckets) const1070 void BlockCacheTraceAnalyzer::WriteReuseInterval(
1071 const std::string& label_str,
1072 const std::vector<uint64_t>& time_buckets) const {
1073 std::set<std::string> labels = ParseLabelStr(label_str);
1074 std::map<std::string, std::map<uint64_t, uint64_t>> label_time_num_reuses;
1075 std::map<std::string, std::map<uint64_t, uint64_t>> label_avg_reuse_nblocks;
1076 std::map<std::string, std::map<uint64_t, uint64_t>> label_avg_reuse_naccesses;
1077
1078 uint64_t total_num_reuses = 0;
1079 uint64_t total_nblocks = 0;
1080 uint64_t total_accesses = 0;
1081 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
1082 uint32_t level, TraceType type,
1083 const std::string& /*block_key*/, uint64_t block_id,
1084 const BlockAccessInfo& block) {
1085 total_nblocks++;
1086 total_accesses += block.num_accesses;
1087 uint64_t avg_reuse_interval = 0;
1088 if (block.num_accesses > 1) {
1089 avg_reuse_interval = ((block.last_access_time - block.first_access_time) /
1090 kMicrosInSecond) /
1091 block.num_accesses;
1092 } else {
1093 avg_reuse_interval = port::kMaxUint64 - 1;
1094 }
1095 if (labels.find(kGroupbyCaller) != labels.end()) {
1096 for (auto const& timeline : block.caller_num_accesses_timeline) {
1097 const TableReaderCaller caller = timeline.first;
1098 const std::string label = BuildLabel(labels, cf_name, fd, level, type,
1099 caller, block_id, block);
1100 UpdateReuseIntervalStats(label, time_buckets, timeline.second,
1101 &label_time_num_reuses, &total_num_reuses);
1102 }
1103 return;
1104 }
1105 // Does not group by caller so we need to flatten the access timeline.
1106 const std::string label = BuildLabel(
1107 labels, cf_name, fd, level, type,
1108 TableReaderCaller::kMaxBlockCacheLookupCaller, block_id, block);
1109 std::map<uint64_t, uint64_t> timeline;
1110 for (auto const& caller_timeline : block.caller_num_accesses_timeline) {
1111 for (auto const& time_naccess : caller_timeline.second) {
1112 timeline[time_naccess.first] += time_naccess.second;
1113 }
1114 }
1115 UpdateReuseIntervalStats(label, time_buckets, timeline,
1116 &label_time_num_reuses, &total_num_reuses);
1117 if (label_avg_reuse_nblocks.find(label) == label_avg_reuse_nblocks.end()) {
1118 for (auto const& time_bucket : time_buckets) {
1119 label_avg_reuse_nblocks[label][time_bucket] = 0;
1120 label_avg_reuse_naccesses[label][time_bucket] = 0;
1121 }
1122 }
1123 label_avg_reuse_nblocks[label].upper_bound(avg_reuse_interval)->second += 1;
1124 label_avg_reuse_naccesses[label].upper_bound(avg_reuse_interval)->second +=
1125 block.num_accesses;
1126 };
1127 TraverseBlocks(block_callback, &labels);
1128
1129 // Write the stats into files.
1130 WriteStatsToFile(label_str, time_buckets, kFileNameSuffixReuseInterval,
1131 label_time_num_reuses, total_num_reuses);
1132 WriteStatsToFile(label_str, time_buckets, kFileNameSuffixAvgReuseInterval,
1133 label_avg_reuse_nblocks, total_nblocks);
1134 WriteStatsToFile(label_str, time_buckets,
1135 kFileNameSuffixAvgReuseIntervalNaccesses,
1136 label_avg_reuse_naccesses, total_accesses);
1137 }
1138
WriteReuseLifetime(const std::string & label_str,const std::vector<uint64_t> & time_buckets) const1139 void BlockCacheTraceAnalyzer::WriteReuseLifetime(
1140 const std::string& label_str,
1141 const std::vector<uint64_t>& time_buckets) const {
1142 std::set<std::string> labels = ParseLabelStr(label_str);
1143 std::map<std::string, std::map<uint64_t, uint64_t>> label_lifetime_nblocks;
1144 uint64_t total_nblocks = 0;
1145 auto block_callback = [&](const std::string& cf_name, uint64_t fd,
1146 uint32_t level, TraceType type,
1147 const std::string& /*block_key*/, uint64_t block_id,
1148 const BlockAccessInfo& block) {
1149 uint64_t lifetime = 0;
1150 if (block.num_accesses > 1) {
1151 lifetime =
1152 (block.last_access_time - block.first_access_time) / kMicrosInSecond;
1153 } else {
1154 lifetime = port::kMaxUint64 - 1;
1155 }
1156 const std::string label = BuildLabel(
1157 labels, cf_name, fd, level, type,
1158 TableReaderCaller::kMaxBlockCacheLookupCaller, block_id, block);
1159
1160 if (label_lifetime_nblocks.find(label) == label_lifetime_nblocks.end()) {
1161 // The first time we encounter this label.
1162 for (auto const& time_bucket : time_buckets) {
1163 label_lifetime_nblocks[label][time_bucket] = 0;
1164 }
1165 }
1166 label_lifetime_nblocks[label].upper_bound(lifetime)->second += 1;
1167 total_nblocks += 1;
1168 };
1169 TraverseBlocks(block_callback, &labels);
1170 WriteStatsToFile(label_str, time_buckets, kFileNameSuffixReuseLifetime,
1171 label_lifetime_nblocks, total_nblocks);
1172 }
1173
WriteBlockReuseTimeline(const uint64_t reuse_window,bool user_access_only,TraceType block_type) const1174 void BlockCacheTraceAnalyzer::WriteBlockReuseTimeline(
1175 const uint64_t reuse_window, bool user_access_only, TraceType block_type) const {
1176 // A map from block key to an array of bools that states whether a block is
1177 // accessed in a time window.
1178 std::map<uint64_t, std::vector<bool>> block_accessed;
1179 const uint64_t trace_duration =
1180 trace_end_timestamp_in_seconds_ - trace_start_timestamp_in_seconds_;
1181 const uint64_t reuse_vector_size = (trace_duration / reuse_window);
1182 if (reuse_vector_size < 2) {
1183 // The reuse window is less than 2. We cannot calculate the reused
1184 // percentage of blocks.
1185 return;
1186 }
1187 auto block_callback = [&](const std::string& /*cf_name*/, uint64_t /*fd*/,
1188 uint32_t /*level*/, TraceType /*type*/,
1189 const std::string& /*block_key*/, uint64_t block_id,
1190 const BlockAccessInfo& block) {
1191 if (block_accessed.find(block_id) == block_accessed.end()) {
1192 block_accessed[block_id].resize(reuse_vector_size);
1193 for (uint64_t i = 0; i < reuse_vector_size; i++) {
1194 block_accessed[block_id][i] = false;
1195 }
1196 }
1197 for (auto const& caller_num : block.caller_num_accesses_timeline) {
1198 const TableReaderCaller caller = caller_num.first;
1199 for (auto const& timeline : caller_num.second) {
1200 const uint64_t timestamp = timeline.first;
1201 const uint64_t elapsed_time =
1202 timestamp - trace_start_timestamp_in_seconds_;
1203 if (!user_access_only || is_user_access(caller)) {
1204 uint64_t index =
1205 std::min(elapsed_time / reuse_window, reuse_vector_size - 1);
1206 block_accessed[block_id][index] = true;
1207 }
1208 }
1209 }
1210 };
1211 TraverseBlocks(block_callback);
1212
1213 // A cell is the number of blocks accessed in a reuse window.
1214 std::unique_ptr<uint64_t[]> reuse_table(new uint64_t[reuse_vector_size * reuse_vector_size]);
1215 for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
1216 // Initialize the reuse_table.
1217 for (uint64_t i = 0; i < reuse_vector_size; i++) {
1218 reuse_table[start_time * reuse_vector_size + i] = 0;
1219 }
1220 // Examine all blocks.
1221 for (auto const& block : block_accessed) {
1222 for (uint64_t i = start_time; i < reuse_vector_size; i++) {
1223 if (block.second[start_time] && block.second[i]) {
1224 // This block is accessed at start time and at the current time. We
1225 // increment reuse_table[start_time][i] since it is reused at the ith
1226 // window.
1227 reuse_table[start_time * reuse_vector_size + i]++;
1228 }
1229 }
1230 }
1231 }
1232 const std::string user_access_prefix =
1233 user_access_only ? "_user_access_only_" : "_all_access_";
1234 const std::string output_path =
1235 output_dir_ + "/" + block_type_to_string(block_type) +
1236 user_access_prefix + std::to_string(reuse_window) + "_" +
1237 kFileNameSuffixAccessReuseBlocksTimeline;
1238 std::ofstream out(output_path);
1239 if (!out.is_open()) {
1240 return;
1241 }
1242 std::string header("start_time");
1243 for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
1244 header += ",";
1245 header += std::to_string(start_time);
1246 }
1247 out << header << std::endl;
1248 for (uint64_t start_time = 0; start_time < reuse_vector_size; start_time++) {
1249 std::string row(std::to_string(start_time * reuse_window));
1250 for (uint64_t j = 0; j < reuse_vector_size; j++) {
1251 row += ",";
1252 if (j < start_time) {
1253 row += "100.0";
1254 } else {
1255 row += std::to_string(percent(reuse_table[start_time * reuse_vector_size + j],
1256 reuse_table[start_time * reuse_vector_size + start_time]));
1257 }
1258 }
1259 out << row << std::endl;
1260 }
1261 out.close();
1262 }
1263
OutputPercentAccessStats(uint64_t total_accesses,const std::map<std::string,uint64_t> & cf_access_count) const1264 std::string BlockCacheTraceAnalyzer::OutputPercentAccessStats(
1265 uint64_t total_accesses,
1266 const std::map<std::string, uint64_t>& cf_access_count) const {
1267 std::string row;
1268 for (auto const& cf_aggregates : cf_aggregates_map_) {
1269 const std::string& cf_name = cf_aggregates.first;
1270 const auto& naccess = cf_access_count.find(cf_name);
1271 row += ",";
1272 if (naccess != cf_access_count.end()) {
1273 row += std::to_string(percent(naccess->second, total_accesses));
1274 } else {
1275 row += "0";
1276 }
1277 }
1278 return row;
1279 }
1280
WritePercentAccessSummaryStats() const1281 void BlockCacheTraceAnalyzer::WritePercentAccessSummaryStats() const {
1282 std::map<TableReaderCaller, std::map<std::string, uint64_t>>
1283 caller_cf_accesses;
1284 uint64_t total_accesses = 0;
1285 auto block_callback =
1286 [&](const std::string& cf_name, uint64_t /*fd*/, uint32_t /*level*/,
1287 TraceType /*type*/, const std::string& /*block_key*/,
1288 uint64_t /*block_id*/, const BlockAccessInfo& block) {
1289 for (auto const& caller_num : block.caller_num_access_map) {
1290 const TableReaderCaller caller = caller_num.first;
1291 const uint64_t naccess = caller_num.second;
1292 caller_cf_accesses[caller][cf_name] += naccess;
1293 total_accesses += naccess;
1294 }
1295 };
1296 TraverseBlocks(block_callback);
1297
1298 const std::string output_path =
1299 output_dir_ + "/" + kFileNameSuffixPercentOfAccessSummary;
1300 std::ofstream out(output_path);
1301 if (!out.is_open()) {
1302 return;
1303 }
1304 std::string header("caller");
1305 for (auto const& cf_name : cf_aggregates_map_) {
1306 header += ",";
1307 header += cf_name.first;
1308 }
1309 out << header << std::endl;
1310 for (auto const& cf_naccess_it : caller_cf_accesses) {
1311 const TableReaderCaller caller = cf_naccess_it.first;
1312 std::string row;
1313 row += caller_to_string(caller);
1314 row += OutputPercentAccessStats(total_accesses, cf_naccess_it.second);
1315 out << row << std::endl;
1316 }
1317 out.close();
1318 }
1319
WriteDetailedPercentAccessSummaryStats(TableReaderCaller analyzing_caller) const1320 void BlockCacheTraceAnalyzer::WriteDetailedPercentAccessSummaryStats(
1321 TableReaderCaller analyzing_caller) const {
1322 std::map<uint32_t, std::map<std::string, uint64_t>> level_cf_accesses;
1323 std::map<TraceType, std::map<std::string, uint64_t>> bt_cf_accesses;
1324 uint64_t total_accesses = 0;
1325 auto block_callback =
1326 [&](const std::string& cf_name, uint64_t /*fd*/, uint32_t level,
1327 TraceType type, const std::string& /*block_key*/,
1328 uint64_t /*block_id*/, const BlockAccessInfo& block) {
1329 for (auto const& caller_num : block.caller_num_access_map) {
1330 const TableReaderCaller caller = caller_num.first;
1331 if (caller == analyzing_caller) {
1332 const uint64_t naccess = caller_num.second;
1333 level_cf_accesses[level][cf_name] += naccess;
1334 bt_cf_accesses[type][cf_name] += naccess;
1335 total_accesses += naccess;
1336 }
1337 }
1338 };
1339 TraverseBlocks(block_callback);
1340 {
1341 const std::string output_path =
1342 output_dir_ + "/" + caller_to_string(analyzing_caller) + "_level_" +
1343 kFileNameSuffixPercentOfAccessSummary;
1344 std::ofstream out(output_path);
1345 if (!out.is_open()) {
1346 return;
1347 }
1348 std::string header("level");
1349 for (auto const& cf_name : cf_aggregates_map_) {
1350 header += ",";
1351 header += cf_name.first;
1352 }
1353 out << header << std::endl;
1354 for (auto const& level_naccess_it : level_cf_accesses) {
1355 const uint32_t level = level_naccess_it.first;
1356 std::string row;
1357 row += std::to_string(level);
1358 row += OutputPercentAccessStats(total_accesses, level_naccess_it.second);
1359 out << row << std::endl;
1360 }
1361 out.close();
1362 }
1363 {
1364 const std::string output_path =
1365 output_dir_ + "/" + caller_to_string(analyzing_caller) + "_bt_" +
1366 kFileNameSuffixPercentOfAccessSummary;
1367 std::ofstream out(output_path);
1368 if (!out.is_open()) {
1369 return;
1370 }
1371 std::string header("bt");
1372 for (auto const& cf_name : cf_aggregates_map_) {
1373 header += ",";
1374 header += cf_name.first;
1375 }
1376 out << header << std::endl;
1377 for (auto const& bt_naccess_it : bt_cf_accesses) {
1378 const TraceType bt = bt_naccess_it.first;
1379 std::string row;
1380 row += block_type_to_string(bt);
1381 row += OutputPercentAccessStats(total_accesses, bt_naccess_it.second);
1382 out << row << std::endl;
1383 }
1384 out.close();
1385 }
1386 }
1387
WriteAccessCountSummaryStats(const std::vector<uint64_t> & access_count_buckets,bool user_access_only) const1388 void BlockCacheTraceAnalyzer::WriteAccessCountSummaryStats(
1389 const std::vector<uint64_t>& access_count_buckets,
1390 bool user_access_only) const {
1391 // x: buckets.
1392 // y: # of accesses.
1393 std::map<std::string, std::map<uint64_t, uint64_t>> bt_access_nblocks;
1394 std::map<std::string, std::map<uint64_t, uint64_t>> cf_access_nblocks;
1395 uint64_t total_nblocks = 0;
1396 auto block_callback =
1397 [&](const std::string& cf_name, uint64_t /*fd*/, uint32_t /*level*/,
1398 TraceType type, const std::string& /*block_key*/,
1399 uint64_t /*block_id*/, const BlockAccessInfo& block) {
1400 const std::string type_str = block_type_to_string(type);
1401 if (cf_access_nblocks.find(cf_name) == cf_access_nblocks.end()) {
1402 // initialize.
1403 for (auto& access : access_count_buckets) {
1404 cf_access_nblocks[cf_name][access] = 0;
1405 }
1406 }
1407 if (bt_access_nblocks.find(type_str) == bt_access_nblocks.end()) {
1408 // initialize.
1409 for (auto& access : access_count_buckets) {
1410 bt_access_nblocks[type_str][access] = 0;
1411 }
1412 }
1413 uint64_t naccesses = 0;
1414 for (auto const& caller_access : block.caller_num_access_map) {
1415 if (!user_access_only || is_user_access(caller_access.first)) {
1416 naccesses += caller_access.second;
1417 }
1418 }
1419 if (naccesses == 0) {
1420 return;
1421 }
1422 total_nblocks += 1;
1423 bt_access_nblocks[type_str].upper_bound(naccesses)->second += 1;
1424 cf_access_nblocks[cf_name].upper_bound(naccesses)->second += 1;
1425 };
1426 TraverseBlocks(block_callback);
1427 const std::string user_access_prefix =
1428 user_access_only ? "user_access_only_" : "all_access_";
1429 WriteStatsToFile("cf", access_count_buckets,
1430 user_access_prefix + kFileNameSuffixAccessCountSummary,
1431 cf_access_nblocks, total_nblocks);
1432 WriteStatsToFile("bt", access_count_buckets,
1433 user_access_prefix + kFileNameSuffixAccessCountSummary,
1434 bt_access_nblocks, total_nblocks);
1435 }
1436
BlockCacheTraceAnalyzer(const std::string & trace_file_path,const std::string & output_dir,const std::string & human_readable_trace_file_path,bool compute_reuse_distance,bool mrc_only,bool is_human_readable_trace_file,std::unique_ptr<BlockCacheTraceSimulator> && cache_simulator)1437 BlockCacheTraceAnalyzer::BlockCacheTraceAnalyzer(
1438 const std::string& trace_file_path, const std::string& output_dir,
1439 const std::string& human_readable_trace_file_path,
1440 bool compute_reuse_distance, bool mrc_only,
1441 bool is_human_readable_trace_file,
1442 std::unique_ptr<BlockCacheTraceSimulator>&& cache_simulator)
1443 : env_(ROCKSDB_NAMESPACE::Env::Default()),
1444 trace_file_path_(trace_file_path),
1445 output_dir_(output_dir),
1446 human_readable_trace_file_path_(human_readable_trace_file_path),
1447 compute_reuse_distance_(compute_reuse_distance),
1448 mrc_only_(mrc_only),
1449 is_human_readable_trace_file_(is_human_readable_trace_file),
1450 cache_simulator_(std::move(cache_simulator)) {}
1451
ComputeReuseDistance(BlockAccessInfo * info) const1452 void BlockCacheTraceAnalyzer::ComputeReuseDistance(
1453 BlockAccessInfo* info) const {
1454 assert(info);
1455 if (info->num_accesses == 0) {
1456 return;
1457 }
1458 uint64_t reuse_distance = 0;
1459 for (auto const& block_key : info->unique_blocks_since_last_access) {
1460 auto const& it = block_info_map_.find(block_key);
1461 // This block must exist.
1462 assert(it != block_info_map_.end());
1463 reuse_distance += it->second->block_size;
1464 }
1465 info->reuse_distance_count[reuse_distance] += 1;
1466 // We clear this hash set since this is the second access on this block.
1467 info->unique_blocks_since_last_access.clear();
1468 }
1469
RecordAccess(const BlockCacheTraceRecord & access)1470 Status BlockCacheTraceAnalyzer::RecordAccess(
1471 const BlockCacheTraceRecord& access) {
1472 ColumnFamilyAccessInfoAggregate& cf_aggr = cf_aggregates_map_[access.cf_name];
1473 SSTFileAccessInfoAggregate& file_aggr =
1474 cf_aggr.fd_aggregates_map[access.sst_fd_number];
1475 file_aggr.level = access.level;
1476 BlockTypeAccessInfoAggregate& block_type_aggr =
1477 file_aggr.block_type_aggregates_map[access.block_type];
1478 if (block_type_aggr.block_access_info_map.find(access.block_key) ==
1479 block_type_aggr.block_access_info_map.end()) {
1480 block_type_aggr.block_access_info_map[access.block_key].block_id =
1481 unique_block_id_;
1482 unique_block_id_++;
1483 }
1484 BlockAccessInfo& block_access_info =
1485 block_type_aggr.block_access_info_map[access.block_key];
1486 if (compute_reuse_distance_) {
1487 ComputeReuseDistance(&block_access_info);
1488 }
1489 block_access_info.AddAccess(access, access_sequence_number_);
1490 block_info_map_[access.block_key] = &block_access_info;
1491 uint64_t get_key_id = 0;
1492 if (access.caller == TableReaderCaller::kUserGet &&
1493 access.get_id != BlockCacheTraceHelper::kReservedGetId) {
1494 std::string user_key = ExtractUserKey(access.referenced_key).ToString();
1495 if (get_key_info_map_.find(user_key) == get_key_info_map_.end()) {
1496 get_key_info_map_[user_key].key_id = unique_get_key_id_;
1497 unique_get_key_id_++;
1498 }
1499 get_key_id = get_key_info_map_[user_key].key_id;
1500 get_key_info_map_[user_key].AddAccess(access, access_sequence_number_);
1501 }
1502
1503 if (compute_reuse_distance_) {
1504 // Add this block to all existing blocks.
1505 for (auto& cf_aggregates : cf_aggregates_map_) {
1506 for (auto& file_aggregates : cf_aggregates.second.fd_aggregates_map) {
1507 for (auto& block_type_aggregates :
1508 file_aggregates.second.block_type_aggregates_map) {
1509 for (auto& existing_block :
1510 block_type_aggregates.second.block_access_info_map) {
1511 existing_block.second.unique_blocks_since_last_access.insert(
1512 access.block_key);
1513 }
1514 }
1515 }
1516 }
1517 }
1518 return human_readable_trace_writer_.WriteHumanReadableTraceRecord(
1519 access, block_access_info.block_id, get_key_id);
1520 }
1521
Analyze()1522 Status BlockCacheTraceAnalyzer::Analyze() {
1523 std::unique_ptr<BlockCacheTraceReader> reader;
1524 Status s = Status::OK();
1525 if (is_human_readable_trace_file_) {
1526 reader.reset(new BlockCacheHumanReadableTraceReader(trace_file_path_));
1527 } else {
1528 std::unique_ptr<TraceReader> trace_reader;
1529 s = NewFileTraceReader(env_, EnvOptions(), trace_file_path_, &trace_reader);
1530 if (!s.ok()) {
1531 return s;
1532 }
1533 reader.reset(new BlockCacheTraceReader(std::move(trace_reader)));
1534 s = reader->ReadHeader(&header_);
1535 if (!s.ok()) {
1536 return s;
1537 }
1538 }
1539 if (!human_readable_trace_file_path_.empty()) {
1540 s = human_readable_trace_writer_.NewWritableFile(
1541 human_readable_trace_file_path_, env_);
1542 if (!s.ok()) {
1543 return s;
1544 }
1545 }
1546 uint64_t start = env_->NowMicros();
1547 uint64_t time_interval = 0;
1548 while (s.ok()) {
1549 BlockCacheTraceRecord access;
1550 s = reader->ReadAccess(&access);
1551 if (!s.ok()) {
1552 break;
1553 }
1554 if (!mrc_only_) {
1555 s = RecordAccess(access);
1556 if (!s.ok()) {
1557 break;
1558 }
1559 }
1560 if (trace_start_timestamp_in_seconds_ == 0) {
1561 trace_start_timestamp_in_seconds_ =
1562 access.access_timestamp / kMicrosInSecond;
1563 }
1564 trace_end_timestamp_in_seconds_ = access.access_timestamp / kMicrosInSecond;
1565 miss_ratio_stats_.UpdateMetrics(access.access_timestamp,
1566 is_user_access(access.caller),
1567 access.is_cache_hit == Boolean::kFalse);
1568 if (cache_simulator_) {
1569 cache_simulator_->Access(access);
1570 }
1571 access_sequence_number_++;
1572 uint64_t now = env_->NowMicros();
1573 uint64_t duration = (now - start) / kMicrosInSecond;
1574 if (duration > 10 * time_interval) {
1575 uint64_t trace_duration =
1576 trace_end_timestamp_in_seconds_ - trace_start_timestamp_in_seconds_;
1577 fprintf(stdout,
1578 "Running for %" PRIu64 " seconds: Processed %" PRIu64
1579 " records/second. Trace duration %" PRIu64
1580 " seconds. Observed miss ratio %.2f\n",
1581 duration, duration > 0 ? access_sequence_number_ / duration : 0,
1582 trace_duration, miss_ratio_stats_.miss_ratio());
1583 time_interval++;
1584 }
1585 }
1586 uint64_t now = env_->NowMicros();
1587 uint64_t duration = (now - start) / kMicrosInSecond;
1588 uint64_t trace_duration =
1589 trace_end_timestamp_in_seconds_ - trace_start_timestamp_in_seconds_;
1590 fprintf(stdout,
1591 "Running for %" PRIu64 " seconds: Processed %" PRIu64
1592 " records/second. Trace duration %" PRIu64
1593 " seconds. Observed miss ratio %.2f\n",
1594 duration, duration > 0 ? access_sequence_number_ / duration : 0,
1595 trace_duration, miss_ratio_stats_.miss_ratio());
1596 return s;
1597 }
1598
PrintBlockSizeStats() const1599 void BlockCacheTraceAnalyzer::PrintBlockSizeStats() const {
1600 HistogramStat bs_stats;
1601 std::map<TraceType, HistogramStat> bt_stats_map;
1602 std::map<std::string, std::map<TraceType, HistogramStat>> cf_bt_stats_map;
1603 auto block_callback =
1604 [&](const std::string& cf_name, uint64_t /*fd*/, uint32_t /*level*/,
1605 TraceType type, const std::string& /*block_key*/,
1606 uint64_t /*block_id*/, const BlockAccessInfo& block) {
1607 if (block.block_size == 0) {
1608 // Block size may be 0 when 1) compaction observes a cache miss and
1609 // does not insert the missing block into the cache again. 2)
1610 // fetching filter blocks in SST files at the last level.
1611 return;
1612 }
1613 bs_stats.Add(block.block_size);
1614 bt_stats_map[type].Add(block.block_size);
1615 cf_bt_stats_map[cf_name][type].Add(block.block_size);
1616 };
1617 TraverseBlocks(block_callback);
1618 fprintf(stdout, "Block size stats: \n%s", bs_stats.ToString().c_str());
1619 for (auto const& bt_stats : bt_stats_map) {
1620 print_break_lines(/*num_break_lines=*/1);
1621 fprintf(stdout, "Block size stats for block type %s: \n%s",
1622 block_type_to_string(bt_stats.first).c_str(),
1623 bt_stats.second.ToString().c_str());
1624 }
1625 for (auto const& cf_bt_stats : cf_bt_stats_map) {
1626 const std::string& cf_name = cf_bt_stats.first;
1627 for (auto const& bt_stats : cf_bt_stats.second) {
1628 print_break_lines(/*num_break_lines=*/1);
1629 fprintf(stdout,
1630 "Block size stats for column family %s and block type %s: \n%s",
1631 cf_name.c_str(), block_type_to_string(bt_stats.first).c_str(),
1632 bt_stats.second.ToString().c_str());
1633 }
1634 }
1635 }
1636
PrintAccessCountStats(bool user_access_only,uint32_t bottom_k,uint32_t top_k) const1637 void BlockCacheTraceAnalyzer::PrintAccessCountStats(bool user_access_only,
1638 uint32_t bottom_k,
1639 uint32_t top_k) const {
1640 HistogramStat access_stats;
1641 std::map<TraceType, HistogramStat> bt_stats_map;
1642 std::map<std::string, std::map<TraceType, HistogramStat>> cf_bt_stats_map;
1643 std::map<uint64_t, std::vector<std::string>> access_count_blocks;
1644 auto block_callback = [&](const std::string& cf_name, uint64_t /*fd*/,
1645 uint32_t /*level*/, TraceType type,
1646 const std::string& block_key, uint64_t /*block_id*/,
1647 const BlockAccessInfo& block) {
1648 uint64_t naccesses = 0;
1649 for (auto const& caller_access : block.caller_num_access_map) {
1650 if (!user_access_only || is_user_access(caller_access.first)) {
1651 naccesses += caller_access.second;
1652 }
1653 }
1654 if (naccesses == 0) {
1655 return;
1656 }
1657 if (type == TraceType::kBlockTraceDataBlock) {
1658 access_count_blocks[naccesses].push_back(block_key);
1659 }
1660 access_stats.Add(naccesses);
1661 bt_stats_map[type].Add(naccesses);
1662 cf_bt_stats_map[cf_name][type].Add(naccesses);
1663 };
1664 TraverseBlocks(block_callback);
1665 fprintf(stdout,
1666 "Block access count stats: The number of accesses per block. %s\n%s",
1667 user_access_only ? "User accesses only" : "All accesses",
1668 access_stats.ToString().c_str());
1669 uint32_t bottom_k_index = 0;
1670 for (auto naccess_it = access_count_blocks.begin();
1671 naccess_it != access_count_blocks.end(); naccess_it++) {
1672 bottom_k_index++;
1673 if (bottom_k_index >= bottom_k) {
1674 break;
1675 }
1676 std::map<TableReaderCaller, uint64_t> caller_naccesses;
1677 uint64_t naccesses = 0;
1678 for (auto const& block_id : naccess_it->second) {
1679 BlockAccessInfo* block = block_info_map_.find(block_id)->second;
1680 for (auto const& caller_access : block->caller_num_access_map) {
1681 if (!user_access_only || is_user_access(caller_access.first)) {
1682 caller_naccesses[caller_access.first] += caller_access.second;
1683 naccesses += caller_access.second;
1684 }
1685 }
1686 }
1687 std::string statistics("Caller:");
1688 for (auto const& caller_naccessess_it : caller_naccesses) {
1689 statistics += caller_to_string(caller_naccessess_it.first);
1690 statistics += ":";
1691 statistics +=
1692 std::to_string(percent(caller_naccessess_it.second, naccesses));
1693 statistics += ",";
1694 }
1695 fprintf(stdout,
1696 "Bottom %" PRIu32 " access count. Access count=%" PRIu64
1697 " nblocks=%" ROCKSDB_PRIszt " %s\n",
1698 bottom_k, naccess_it->first, naccess_it->second.size(),
1699 statistics.c_str());
1700 }
1701
1702 uint32_t top_k_index = 0;
1703 for (auto naccess_it = access_count_blocks.rbegin();
1704 naccess_it != access_count_blocks.rend(); naccess_it++) {
1705 top_k_index++;
1706 if (top_k_index >= top_k) {
1707 break;
1708 }
1709 for (auto const& block_id : naccess_it->second) {
1710 BlockAccessInfo* block = block_info_map_.find(block_id)->second;
1711 std::string statistics("Caller:");
1712 uint64_t naccesses = 0;
1713 for (auto const& caller_access : block->caller_num_access_map) {
1714 if (!user_access_only || is_user_access(caller_access.first)) {
1715 naccesses += caller_access.second;
1716 }
1717 }
1718 assert(naccesses > 0);
1719 for (auto const& caller_access : block->caller_num_access_map) {
1720 if (!user_access_only || is_user_access(caller_access.first)) {
1721 statistics += ",";
1722 statistics += caller_to_string(caller_access.first);
1723 statistics += ":";
1724 statistics +=
1725 std::to_string(percent(caller_access.second, naccesses));
1726 }
1727 }
1728 uint64_t ref_keys_accesses = 0;
1729 uint64_t ref_keys_does_not_exist_accesses = 0;
1730 for (auto const& ref_key_caller_access : block->key_num_access_map) {
1731 for (auto const& caller_access : ref_key_caller_access.second) {
1732 if (!user_access_only || is_user_access(caller_access.first)) {
1733 ref_keys_accesses += caller_access.second;
1734 }
1735 }
1736 }
1737 for (auto const& ref_key_caller_access :
1738 block->non_exist_key_num_access_map) {
1739 for (auto const& caller_access : ref_key_caller_access.second) {
1740 if (!user_access_only || is_user_access(caller_access.first)) {
1741 ref_keys_does_not_exist_accesses += caller_access.second;
1742 }
1743 }
1744 }
1745 statistics += ",nkeys=";
1746 statistics += std::to_string(block->num_keys);
1747 statistics += ",block_size=";
1748 statistics += std::to_string(block->block_size);
1749 statistics += ",num_ref_keys=";
1750 statistics += std::to_string(block->key_num_access_map.size());
1751 statistics += ",percent_access_ref_keys=";
1752 statistics += std::to_string(percent(ref_keys_accesses, naccesses));
1753 statistics += ",num_ref_keys_does_not_exist=";
1754 statistics += std::to_string(block->non_exist_key_num_access_map.size());
1755 statistics += ",percent_access_ref_keys_does_not_exist=";
1756 statistics +=
1757 std::to_string(percent(ref_keys_does_not_exist_accesses, naccesses));
1758 statistics += ",ref_data_size=";
1759 statistics += std::to_string(block->referenced_data_size);
1760 fprintf(stdout,
1761 "Top %" PRIu32 " access count blocks access_count=%" PRIu64
1762 " %s\n",
1763 top_k, naccess_it->first, statistics.c_str());
1764 }
1765 }
1766
1767 for (auto const& bt_stats : bt_stats_map) {
1768 print_break_lines(/*num_break_lines=*/1);
1769 fprintf(stdout, "Break down by block type %s: \n%s",
1770 block_type_to_string(bt_stats.first).c_str(),
1771 bt_stats.second.ToString().c_str());
1772 }
1773 for (auto const& cf_bt_stats : cf_bt_stats_map) {
1774 const std::string& cf_name = cf_bt_stats.first;
1775 for (auto const& bt_stats : cf_bt_stats.second) {
1776 print_break_lines(/*num_break_lines=*/1);
1777 fprintf(stdout,
1778 "Break down by column family %s and block type "
1779 "%s: \n%s",
1780 cf_name.c_str(), block_type_to_string(bt_stats.first).c_str(),
1781 bt_stats.second.ToString().c_str());
1782 }
1783 }
1784 }
1785
PrintDataBlockAccessStats() const1786 void BlockCacheTraceAnalyzer::PrintDataBlockAccessStats() const {
1787 HistogramStat existing_keys_stats;
1788 std::map<std::string, HistogramStat> cf_existing_keys_stats_map;
1789 HistogramStat non_existing_keys_stats;
1790 std::map<std::string, HistogramStat> cf_non_existing_keys_stats_map;
1791 HistogramStat block_access_stats;
1792 std::map<std::string, HistogramStat> cf_block_access_info;
1793 HistogramStat percent_referenced_bytes;
1794 std::map<std::string, HistogramStat> cf_percent_referenced_bytes;
1795 // Total number of accesses in a data block / number of keys in a data block.
1796 HistogramStat avg_naccesses_per_key_in_a_data_block;
1797 std::map<std::string, HistogramStat> cf_avg_naccesses_per_key_in_a_data_block;
1798 // The standard deviation on the number of accesses of a key in a data block.
1799 HistogramStat stdev_naccesses_per_key_in_a_data_block;
1800 std::map<std::string, HistogramStat>
1801 cf_stdev_naccesses_per_key_in_a_data_block;
1802 auto block_callback =
1803 [&](const std::string& cf_name, uint64_t /*fd*/, uint32_t /*level*/,
1804 TraceType /*type*/, const std::string& /*block_key*/,
1805 uint64_t /*block_id*/, const BlockAccessInfo& block) {
1806 if (block.num_keys == 0) {
1807 return;
1808 }
1809 // Use four decimal points.
1810 uint64_t percent_referenced_for_existing_keys = (uint64_t)(
1811 ((double)block.key_num_access_map.size() / (double)block.num_keys) *
1812 10000.0);
1813 uint64_t percent_referenced_for_non_existing_keys =
1814 (uint64_t)(((double)block.non_exist_key_num_access_map.size() /
1815 (double)block.num_keys) *
1816 10000.0);
1817 uint64_t percent_accesses_for_existing_keys =
1818 (uint64_t)(((double)block.num_referenced_key_exist_in_block /
1819 (double)block.num_accesses) *
1820 10000.0);
1821
1822 HistogramStat hist_naccess_per_key;
1823 for (auto const& key_access : block.key_num_access_map) {
1824 for (auto const& caller_access : key_access.second) {
1825 hist_naccess_per_key.Add(caller_access.second);
1826 }
1827 }
1828 uint64_t avg_accesses =
1829 static_cast<uint64_t>(hist_naccess_per_key.Average());
1830 uint64_t stdev_accesses =
1831 static_cast<uint64_t>(hist_naccess_per_key.StandardDeviation());
1832 avg_naccesses_per_key_in_a_data_block.Add(avg_accesses);
1833 cf_avg_naccesses_per_key_in_a_data_block[cf_name].Add(avg_accesses);
1834 stdev_naccesses_per_key_in_a_data_block.Add(stdev_accesses);
1835 cf_stdev_naccesses_per_key_in_a_data_block[cf_name].Add(stdev_accesses);
1836
1837 existing_keys_stats.Add(percent_referenced_for_existing_keys);
1838 cf_existing_keys_stats_map[cf_name].Add(
1839 percent_referenced_for_existing_keys);
1840 non_existing_keys_stats.Add(percent_referenced_for_non_existing_keys);
1841 cf_non_existing_keys_stats_map[cf_name].Add(
1842 percent_referenced_for_non_existing_keys);
1843 block_access_stats.Add(percent_accesses_for_existing_keys);
1844 cf_block_access_info[cf_name].Add(percent_accesses_for_existing_keys);
1845 };
1846 TraverseBlocks(block_callback);
1847 fprintf(stdout,
1848 "Histogram on the number of referenced keys existing in a block over "
1849 "the total number of keys in a block: \n%s",
1850 existing_keys_stats.ToString().c_str());
1851 for (auto const& cf_stats : cf_existing_keys_stats_map) {
1852 print_break_lines(/*num_break_lines=*/1);
1853 fprintf(stdout, "Break down by column family %s: \n%s",
1854 cf_stats.first.c_str(), cf_stats.second.ToString().c_str());
1855 }
1856 print_break_lines(/*num_break_lines=*/1);
1857 fprintf(
1858 stdout,
1859 "Histogram on the number of referenced keys DO NOT exist in a block over "
1860 "the total number of keys in a block: \n%s",
1861 non_existing_keys_stats.ToString().c_str());
1862 for (auto const& cf_stats : cf_non_existing_keys_stats_map) {
1863 print_break_lines(/*num_break_lines=*/1);
1864 fprintf(stdout, "Break down by column family %s: \n%s",
1865 cf_stats.first.c_str(), cf_stats.second.ToString().c_str());
1866 }
1867 print_break_lines(/*num_break_lines=*/1);
1868 fprintf(stdout,
1869 "Histogram on the number of accesses on keys exist in a block over "
1870 "the total number of accesses in a block: \n%s",
1871 block_access_stats.ToString().c_str());
1872 for (auto const& cf_stats : cf_block_access_info) {
1873 print_break_lines(/*num_break_lines=*/1);
1874 fprintf(stdout, "Break down by column family %s: \n%s",
1875 cf_stats.first.c_str(), cf_stats.second.ToString().c_str());
1876 }
1877 print_break_lines(/*num_break_lines=*/1);
1878 fprintf(
1879 stdout,
1880 "Histogram on the average number of accesses per key in a block: \n%s",
1881 avg_naccesses_per_key_in_a_data_block.ToString().c_str());
1882 for (auto const& cf_stats : cf_avg_naccesses_per_key_in_a_data_block) {
1883 fprintf(stdout, "Break down by column family %s: \n%s",
1884 cf_stats.first.c_str(), cf_stats.second.ToString().c_str());
1885 }
1886 print_break_lines(/*num_break_lines=*/1);
1887 fprintf(stdout,
1888 "Histogram on the standard deviation of the number of accesses per "
1889 "key in a block: \n%s",
1890 stdev_naccesses_per_key_in_a_data_block.ToString().c_str());
1891 for (auto const& cf_stats : cf_stdev_naccesses_per_key_in_a_data_block) {
1892 fprintf(stdout, "Break down by column family %s: \n%s",
1893 cf_stats.first.c_str(), cf_stats.second.ToString().c_str());
1894 }
1895 }
1896
PrintStatsSummary() const1897 void BlockCacheTraceAnalyzer::PrintStatsSummary() const {
1898 uint64_t total_num_files = 0;
1899 uint64_t total_num_blocks = 0;
1900 uint64_t total_num_accesses = 0;
1901 std::map<TraceType, uint64_t> bt_num_blocks_map;
1902 std::map<TableReaderCaller, uint64_t> caller_num_access_map;
1903 std::map<TableReaderCaller, std::map<TraceType, uint64_t>>
1904 caller_bt_num_access_map;
1905 std::map<TableReaderCaller, std::map<uint32_t, uint64_t>>
1906 caller_level_num_access_map;
1907 for (auto const& cf_aggregates : cf_aggregates_map_) {
1908 // Stats per column family.
1909 const std::string& cf_name = cf_aggregates.first;
1910 uint64_t cf_num_files = 0;
1911 uint64_t cf_num_blocks = 0;
1912 std::map<TraceType, uint64_t> cf_bt_blocks;
1913 uint64_t cf_num_accesses = 0;
1914 std::map<TableReaderCaller, uint64_t> cf_caller_num_accesses_map;
1915 std::map<TableReaderCaller, std::map<uint64_t, uint64_t>>
1916 cf_caller_level_num_accesses_map;
1917 std::map<TableReaderCaller, std::map<uint64_t, uint64_t>>
1918 cf_caller_file_num_accesses_map;
1919 std::map<TableReaderCaller, std::map<TraceType, uint64_t>>
1920 cf_caller_bt_num_accesses_map;
1921 total_num_files += cf_aggregates.second.fd_aggregates_map.size();
1922 for (auto const& file_aggregates : cf_aggregates.second.fd_aggregates_map) {
1923 // Stats per SST file.
1924 const uint64_t fd = file_aggregates.first;
1925 const uint32_t level = file_aggregates.second.level;
1926 cf_num_files++;
1927 for (auto const& block_type_aggregates :
1928 file_aggregates.second.block_type_aggregates_map) {
1929 // Stats per block type.
1930 const TraceType type = block_type_aggregates.first;
1931 cf_bt_blocks[type] +=
1932 block_type_aggregates.second.block_access_info_map.size();
1933 total_num_blocks +=
1934 block_type_aggregates.second.block_access_info_map.size();
1935 bt_num_blocks_map[type] +=
1936 block_type_aggregates.second.block_access_info_map.size();
1937 for (auto const& block_access_info :
1938 block_type_aggregates.second.block_access_info_map) {
1939 // Stats per block.
1940 cf_num_blocks++;
1941 for (auto const& stats :
1942 block_access_info.second.caller_num_access_map) {
1943 // Stats per caller.
1944 const TableReaderCaller caller = stats.first;
1945 const uint64_t num_accesses = stats.second;
1946 // Overall stats.
1947 total_num_accesses += num_accesses;
1948 caller_num_access_map[caller] += num_accesses;
1949 caller_bt_num_access_map[caller][type] += num_accesses;
1950 caller_level_num_access_map[caller][level] += num_accesses;
1951 // Column Family stats.
1952 cf_num_accesses += num_accesses;
1953 cf_caller_num_accesses_map[caller] += num_accesses;
1954 cf_caller_level_num_accesses_map[caller][level] += num_accesses;
1955 cf_caller_file_num_accesses_map[caller][fd] += num_accesses;
1956 cf_caller_bt_num_accesses_map[caller][type] += num_accesses;
1957 }
1958 }
1959 }
1960 }
1961
1962 // Print stats.
1963 print_break_lines(/*num_break_lines=*/3);
1964 fprintf(stdout, "Statistics for column family %s:\n", cf_name.c_str());
1965 fprintf(stdout,
1966 " Number of files:%" PRIu64 " Number of blocks: %" PRIu64
1967 " Number of accesses: %" PRIu64 "\n",
1968 cf_num_files, cf_num_blocks, cf_num_accesses);
1969 for (auto block_type : cf_bt_blocks) {
1970 fprintf(stdout, "Number of %s blocks: %" PRIu64 " Percent: %.2f\n",
1971 block_type_to_string(block_type.first).c_str(), block_type.second,
1972 percent(block_type.second, cf_num_blocks));
1973 }
1974 for (auto caller : cf_caller_num_accesses_map) {
1975 const uint64_t naccesses = caller.second;
1976 print_break_lines(/*num_break_lines=*/1);
1977 fprintf(stdout,
1978 "Caller %s: Number of accesses %" PRIu64 " Percent: %.2f\n",
1979 caller_to_string(caller.first).c_str(), naccesses,
1980 percent(naccesses, cf_num_accesses));
1981 fprintf(stdout, "Caller %s: Number of accesses per level break down\n",
1982 caller_to_string(caller.first).c_str());
1983 for (auto naccess_level :
1984 cf_caller_level_num_accesses_map[caller.first]) {
1985 fprintf(stdout,
1986 "\t Level %" PRIu64 ": Number of accesses: %" PRIu64
1987 " Percent: %.2f\n",
1988 naccess_level.first, naccess_level.second,
1989 percent(naccess_level.second, naccesses));
1990 }
1991 fprintf(stdout, "Caller %s: Number of accesses per file break down\n",
1992 caller_to_string(caller.first).c_str());
1993 for (auto naccess_file : cf_caller_file_num_accesses_map[caller.first]) {
1994 fprintf(stdout,
1995 "\t File %" PRIu64 ": Number of accesses: %" PRIu64
1996 " Percent: %.2f\n",
1997 naccess_file.first, naccess_file.second,
1998 percent(naccess_file.second, naccesses));
1999 }
2000 fprintf(stdout,
2001 "Caller %s: Number of accesses per block type break down\n",
2002 caller_to_string(caller.first).c_str());
2003 for (auto naccess_type : cf_caller_bt_num_accesses_map[caller.first]) {
2004 fprintf(stdout,
2005 "\t Block Type %s: Number of accesses: %" PRIu64
2006 " Percent: %.2f\n",
2007 block_type_to_string(naccess_type.first).c_str(),
2008 naccess_type.second, percent(naccess_type.second, naccesses));
2009 }
2010 }
2011 }
2012 print_break_lines(/*num_break_lines=*/3);
2013 fprintf(stdout, "Overall statistics:\n");
2014 fprintf(stdout,
2015 "Number of files: %" PRIu64 " Number of blocks: %" PRIu64
2016 " Number of accesses: %" PRIu64 "\n",
2017 total_num_files, total_num_blocks, total_num_accesses);
2018 for (auto block_type : bt_num_blocks_map) {
2019 fprintf(stdout, "Number of %s blocks: %" PRIu64 " Percent: %.2f\n",
2020 block_type_to_string(block_type.first).c_str(), block_type.second,
2021 percent(block_type.second, total_num_blocks));
2022 }
2023 for (auto caller : caller_num_access_map) {
2024 print_break_lines(/*num_break_lines=*/1);
2025 uint64_t naccesses = caller.second;
2026 fprintf(stdout, "Caller %s: Number of accesses %" PRIu64 " Percent: %.2f\n",
2027 caller_to_string(caller.first).c_str(), naccesses,
2028 percent(naccesses, total_num_accesses));
2029 fprintf(stdout, "Caller %s: Number of accesses per level break down\n",
2030 caller_to_string(caller.first).c_str());
2031 for (auto naccess_level : caller_level_num_access_map[caller.first]) {
2032 fprintf(stdout,
2033 "\t Level %d: Number of accesses: %" PRIu64 " Percent: %.2f\n",
2034 naccess_level.first, naccess_level.second,
2035 percent(naccess_level.second, naccesses));
2036 }
2037 fprintf(stdout, "Caller %s: Number of accesses per block type break down\n",
2038 caller_to_string(caller.first).c_str());
2039 for (auto naccess_type : caller_bt_num_access_map[caller.first]) {
2040 fprintf(stdout,
2041 "\t Block Type %s: Number of accesses: %" PRIu64
2042 " Percent: %.2f\n",
2043 block_type_to_string(naccess_type.first).c_str(),
2044 naccess_type.second, percent(naccess_type.second, naccesses));
2045 }
2046 }
2047 }
2048
parse_cache_config_file(const std::string & config_path)2049 std::vector<CacheConfiguration> parse_cache_config_file(
2050 const std::string& config_path) {
2051 std::ifstream file(config_path);
2052 if (!file.is_open()) {
2053 return {};
2054 }
2055 std::vector<CacheConfiguration> configs;
2056 std::string line;
2057 while (getline(file, line)) {
2058 CacheConfiguration cache_config;
2059 std::stringstream ss(line);
2060 std::vector<std::string> config_strs;
2061 while (ss.good()) {
2062 std::string substr;
2063 getline(ss, substr, ',');
2064 config_strs.push_back(substr);
2065 }
2066 // Sanity checks.
2067 if (config_strs.size() < 4) {
2068 fprintf(stderr, "Invalid cache simulator configuration %s\n",
2069 line.c_str());
2070 exit(1);
2071 }
2072 if (kSupportedCacheNames.find(" " + config_strs[0] + " ") ==
2073 std::string::npos) {
2074 fprintf(stderr, "Invalid cache name %s. Supported cache names are %s\n",
2075 line.c_str(), kSupportedCacheNames.c_str());
2076 exit(1);
2077 }
2078 cache_config.cache_name = config_strs[0];
2079 cache_config.num_shard_bits = ParseUint32(config_strs[1]);
2080 cache_config.ghost_cache_capacity = ParseUint64(config_strs[2]);
2081 for (uint32_t i = 3; i < config_strs.size(); i++) {
2082 uint64_t capacity = ParseUint64(config_strs[i]);
2083 if (capacity == 0) {
2084 fprintf(stderr, "Invalid cache capacity %s, %s\n",
2085 config_strs[i].c_str(), line.c_str());
2086 exit(1);
2087 }
2088 cache_config.cache_capacities.push_back(capacity);
2089 }
2090 configs.push_back(cache_config);
2091 }
2092 file.close();
2093 return configs;
2094 }
2095
parse_buckets(const std::string & bucket_str)2096 std::vector<uint64_t> parse_buckets(const std::string& bucket_str) {
2097 std::vector<uint64_t> buckets;
2098 std::stringstream ss(bucket_str);
2099 while (ss.good()) {
2100 std::string bucket;
2101 getline(ss, bucket, ',');
2102 buckets.push_back(ParseUint64(bucket));
2103 }
2104 buckets.push_back(port::kMaxUint64);
2105 return buckets;
2106 }
2107
block_cache_trace_analyzer_tool(int argc,char ** argv)2108 int block_cache_trace_analyzer_tool(int argc, char** argv) {
2109 ParseCommandLineFlags(&argc, &argv, true);
2110 if (FLAGS_block_cache_trace_path.empty()) {
2111 fprintf(stderr, "block cache trace path is empty\n");
2112 exit(1);
2113 }
2114 uint64_t warmup_seconds =
2115 FLAGS_cache_sim_warmup_seconds > 0 ? FLAGS_cache_sim_warmup_seconds : 0;
2116 uint32_t downsample_ratio = FLAGS_block_cache_trace_downsample_ratio > 0
2117 ? FLAGS_block_cache_trace_downsample_ratio
2118 : 0;
2119 std::vector<CacheConfiguration> cache_configs =
2120 parse_cache_config_file(FLAGS_block_cache_sim_config_path);
2121 std::unique_ptr<BlockCacheTraceSimulator> cache_simulator;
2122 if (!cache_configs.empty()) {
2123 cache_simulator.reset(new BlockCacheTraceSimulator(
2124 warmup_seconds, downsample_ratio, cache_configs));
2125 Status s = cache_simulator->InitializeCaches();
2126 if (!s.ok()) {
2127 fprintf(stderr, "Cannot initialize cache simulators %s\n",
2128 s.ToString().c_str());
2129 exit(1);
2130 }
2131 }
2132 BlockCacheTraceAnalyzer analyzer(
2133 FLAGS_block_cache_trace_path, FLAGS_block_cache_analysis_result_dir,
2134 FLAGS_human_readable_trace_file_path,
2135 !FLAGS_reuse_distance_labels.empty(), FLAGS_mrc_only,
2136 FLAGS_is_block_cache_human_readable_trace, std::move(cache_simulator));
2137 Status s = analyzer.Analyze();
2138 if (!s.IsIncomplete() && !s.ok()) {
2139 // Read all traces.
2140 fprintf(stderr, "Cannot process the trace %s\n", s.ToString().c_str());
2141 exit(1);
2142 }
2143 fprintf(stdout, "Status: %s\n", s.ToString().c_str());
2144 analyzer.WriteMissRatioCurves();
2145 analyzer.WriteMissRatioTimeline(1);
2146 analyzer.WriteMissRatioTimeline(kSecondInMinute);
2147 analyzer.WriteMissRatioTimeline(kSecondInHour);
2148 analyzer.WriteMissTimeline(1);
2149 analyzer.WriteMissTimeline(kSecondInMinute);
2150 analyzer.WriteMissTimeline(kSecondInHour);
2151
2152 if (FLAGS_mrc_only) {
2153 fprintf(stdout,
2154 "Skipping the analysis statistics since the user wants to compute "
2155 "MRC only");
2156 return 0;
2157 }
2158
2159 analyzer.PrintStatsSummary();
2160 if (FLAGS_print_access_count_stats) {
2161 print_break_lines(/*num_break_lines=*/3);
2162 analyzer.PrintAccessCountStats(
2163 /*user_access_only=*/false, FLAGS_analyze_bottom_k_access_count_blocks,
2164 FLAGS_analyze_top_k_access_count_blocks);
2165 print_break_lines(/*num_break_lines=*/3);
2166 analyzer.PrintAccessCountStats(
2167 /*user_access_only=*/true, FLAGS_analyze_bottom_k_access_count_blocks,
2168 FLAGS_analyze_top_k_access_count_blocks);
2169 }
2170 if (FLAGS_print_block_size_stats) {
2171 print_break_lines(/*num_break_lines=*/3);
2172 analyzer.PrintBlockSizeStats();
2173 }
2174 if (FLAGS_print_data_block_access_count_stats) {
2175 print_break_lines(/*num_break_lines=*/3);
2176 analyzer.PrintDataBlockAccessStats();
2177 }
2178 print_break_lines(/*num_break_lines=*/3);
2179
2180 if (!FLAGS_timeline_labels.empty()) {
2181 std::stringstream ss(FLAGS_timeline_labels);
2182 while (ss.good()) {
2183 std::string label;
2184 getline(ss, label, ',');
2185 if (label.find("block") != std::string::npos) {
2186 analyzer.WriteAccessTimeline(label, kSecondInMinute, true);
2187 analyzer.WriteAccessTimeline(label, kSecondInMinute, false);
2188 analyzer.WriteAccessTimeline(label, kSecondInHour, true);
2189 analyzer.WriteAccessTimeline(label, kSecondInHour, false);
2190 } else {
2191 analyzer.WriteAccessTimeline(label, kSecondInMinute, false);
2192 analyzer.WriteAccessTimeline(label, kSecondInHour, false);
2193 }
2194 }
2195 }
2196
2197 if (!FLAGS_analyze_callers.empty()) {
2198 analyzer.WritePercentAccessSummaryStats();
2199 std::stringstream ss(FLAGS_analyze_callers);
2200 while (ss.good()) {
2201 std::string caller;
2202 getline(ss, caller, ',');
2203 analyzer.WriteDetailedPercentAccessSummaryStats(string_to_caller(caller));
2204 }
2205 }
2206
2207 if (!FLAGS_access_count_buckets.empty()) {
2208 std::vector<uint64_t> buckets = parse_buckets(FLAGS_access_count_buckets);
2209 analyzer.WriteAccessCountSummaryStats(buckets, /*user_access_only=*/true);
2210 analyzer.WriteAccessCountSummaryStats(buckets, /*user_access_only=*/false);
2211 }
2212
2213 if (!FLAGS_reuse_distance_labels.empty() &&
2214 !FLAGS_reuse_distance_buckets.empty()) {
2215 std::vector<uint64_t> buckets = parse_buckets(FLAGS_reuse_distance_buckets);
2216 std::stringstream ss(FLAGS_reuse_distance_labels);
2217 while (ss.good()) {
2218 std::string label;
2219 getline(ss, label, ',');
2220 analyzer.WriteReuseDistance(label, buckets);
2221 }
2222 }
2223
2224 if (!FLAGS_reuse_interval_labels.empty() &&
2225 !FLAGS_reuse_interval_buckets.empty()) {
2226 std::vector<uint64_t> buckets = parse_buckets(FLAGS_reuse_interval_buckets);
2227 std::stringstream ss(FLAGS_reuse_interval_labels);
2228 while (ss.good()) {
2229 std::string label;
2230 getline(ss, label, ',');
2231 analyzer.WriteReuseInterval(label, buckets);
2232 }
2233 }
2234
2235 if (!FLAGS_reuse_lifetime_labels.empty() &&
2236 !FLAGS_reuse_lifetime_buckets.empty()) {
2237 std::vector<uint64_t> buckets = parse_buckets(FLAGS_reuse_lifetime_buckets);
2238 std::stringstream ss(FLAGS_reuse_lifetime_labels);
2239 while (ss.good()) {
2240 std::string label;
2241 getline(ss, label, ',');
2242 analyzer.WriteReuseLifetime(label, buckets);
2243 }
2244 }
2245
2246 if (FLAGS_analyze_blocks_reuse_k_reuse_window != 0) {
2247 std::vector<TraceType> block_types{TraceType::kBlockTraceIndexBlock,
2248 TraceType::kBlockTraceDataBlock,
2249 TraceType::kBlockTraceFilterBlock};
2250 for (auto block_type : block_types) {
2251 analyzer.WriteBlockReuseTimeline(
2252 FLAGS_analyze_blocks_reuse_k_reuse_window,
2253 /*user_access_only=*/true, block_type);
2254 analyzer.WriteBlockReuseTimeline(
2255 FLAGS_analyze_blocks_reuse_k_reuse_window,
2256 /*user_access_only=*/false, block_type);
2257 }
2258 }
2259
2260 if (!FLAGS_analyze_get_spatial_locality_labels.empty() &&
2261 !FLAGS_analyze_get_spatial_locality_buckets.empty()) {
2262 std::vector<uint64_t> buckets =
2263 parse_buckets(FLAGS_analyze_get_spatial_locality_buckets);
2264 std::stringstream ss(FLAGS_analyze_get_spatial_locality_labels);
2265 while (ss.good()) {
2266 std::string label;
2267 getline(ss, label, ',');
2268 analyzer.WriteGetSpatialLocality(label, buckets);
2269 }
2270 }
2271
2272 if (!FLAGS_analyze_correlation_coefficients_labels.empty()) {
2273 std::stringstream ss(FLAGS_analyze_correlation_coefficients_labels);
2274 while (ss.good()) {
2275 std::string label;
2276 getline(ss, label, ',');
2277 analyzer.WriteCorrelationFeatures(
2278 label, FLAGS_analyze_correlation_coefficients_max_number_of_values);
2279 }
2280 analyzer.WriteCorrelationFeaturesForGet(
2281 FLAGS_analyze_correlation_coefficients_max_number_of_values);
2282 }
2283
2284 if (!FLAGS_skew_labels.empty() && !FLAGS_skew_buckets.empty()) {
2285 std::vector<uint64_t> buckets = parse_buckets(FLAGS_skew_buckets);
2286 std::stringstream ss(FLAGS_skew_labels);
2287 while (ss.good()) {
2288 std::string label;
2289 getline(ss, label, ',');
2290 if (label.find("block") != std::string::npos) {
2291 analyzer.WriteSkewness(label, buckets,
2292 TraceType::kBlockTraceIndexBlock);
2293 analyzer.WriteSkewness(label, buckets,
2294 TraceType::kBlockTraceFilterBlock);
2295 analyzer.WriteSkewness(label, buckets, TraceType::kBlockTraceDataBlock);
2296 analyzer.WriteSkewness(label, buckets, TraceType::kTraceMax);
2297 } else {
2298 analyzer.WriteSkewness(label, buckets, TraceType::kTraceMax);
2299 }
2300 }
2301 }
2302 return 0;
2303 }
2304
2305 } // namespace ROCKSDB_NAMESPACE
2306
2307 #endif // GFLAGS
2308 #endif // ROCKSDB_LITE
2309