1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. See the AUTHORS file for names of contributors.
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <sqlite3.h>
8 #include "util/histogram.h"
9 #include "util/random.h"
10 #include "util/testutil.h"
11
12 // Comma-separated list of operations to run in the specified order
13 // Actual benchmarks:
14 //
15 // fillseq -- write N values in sequential key order in async mode
16 // fillseqsync -- write N/100 values in sequential key order in sync mode
17 // fillseqbatch -- batch write N values in sequential key order in async mode
18 // fillrandom -- write N values in random key order in async mode
19 // fillrandsync -- write N/100 values in random key order in sync mode
20 // fillrandbatch -- batch write N values in sequential key order in async mode
21 // overwrite -- overwrite N values in random key order in async mode
22 // fillrand100K -- write N/1000 100K values in random order in async mode
23 // fillseq100K -- write N/1000 100K values in sequential order in async mode
24 // readseq -- read N times sequentially
25 // readrandom -- read N times in random order
26 // readrand100K -- read N/1000 100K values in sequential order in async mode
27 static const char* FLAGS_benchmarks =
28 "fillseq,"
29 "fillseqsync,"
30 "fillseqbatch,"
31 "fillrandom,"
32 "fillrandsync,"
33 "fillrandbatch,"
34 "overwrite,"
35 "overwritebatch,"
36 "readrandom,"
37 "readseq,"
38 "fillrand100K,"
39 "fillseq100K,"
40 "readseq,"
41 "readrand100K,"
42 ;
43
44 // Number of key/values to place in database
45 static int FLAGS_num = 1000000;
46
47 // Number of read operations to do. If negative, do FLAGS_num reads.
48 static int FLAGS_reads = -1;
49
50 // Size of each value
51 static int FLAGS_value_size = 100;
52
53 // Print histogram of operation timings
54 static bool FLAGS_histogram = false;
55
56 // Arrange to generate values that shrink to this fraction of
57 // their original size after compression
58 static double FLAGS_compression_ratio = 0.5;
59
60 // Page size. Default 1 KB.
61 static int FLAGS_page_size = 1024;
62
63 // Number of pages.
64 // Default cache size = FLAGS_page_size * FLAGS_num_pages = 4 MB.
65 static int FLAGS_num_pages = 4096;
66
67 // If true, do not destroy the existing database. If you set this
68 // flag and also specify a benchmark that wants a fresh database, that
69 // benchmark will fail.
70 static bool FLAGS_use_existing_db = false;
71
72 // If true, we allow batch writes to occur
73 static bool FLAGS_transaction = true;
74
75 // If true, we enable Write-Ahead Logging
76 static bool FLAGS_WAL_enabled = true;
77
78 // Use the db with the following name.
79 static const char* FLAGS_db = NULL;
80
81 inline
ExecErrorCheck(int status,char * err_msg)82 static void ExecErrorCheck(int status, char *err_msg) {
83 if (status != SQLITE_OK) {
84 fprintf(stderr, "SQL error: %s\n", err_msg);
85 sqlite3_free(err_msg);
86 exit(1);
87 }
88 }
89
90 inline
StepErrorCheck(int status)91 static void StepErrorCheck(int status) {
92 if (status != SQLITE_DONE) {
93 fprintf(stderr, "SQL step error: status = %d\n", status);
94 exit(1);
95 }
96 }
97
98 inline
ErrorCheck(int status)99 static void ErrorCheck(int status) {
100 if (status != SQLITE_OK) {
101 fprintf(stderr, "sqlite3 error: status = %d\n", status);
102 exit(1);
103 }
104 }
105
106 inline
WalCheckpoint(sqlite3 * db_)107 static void WalCheckpoint(sqlite3* db_) {
108 // Flush all writes to disk
109 if (FLAGS_WAL_enabled) {
110 sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL);
111 }
112 }
113
114 namespace leveldb {
115
116 // Helper for quickly generating random data.
117 namespace {
118 class RandomGenerator {
119 private:
120 std::string data_;
121 int pos_;
122
123 public:
RandomGenerator()124 RandomGenerator() {
125 // We use a limited amount of data over and over again and ensure
126 // that it is larger than the compression window (32KB), and also
127 // large enough to serve all typical value sizes we want to write.
128 Random rnd(301);
129 std::string piece;
130 while (data_.size() < 1048576) {
131 // Add a short fragment that is as compressible as specified
132 // by FLAGS_compression_ratio.
133 test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
134 data_.append(piece);
135 }
136 pos_ = 0;
137 }
138
Generate(int len)139 Slice Generate(int len) {
140 if (pos_ + len > data_.size()) {
141 pos_ = 0;
142 assert(len < data_.size());
143 }
144 pos_ += len;
145 return Slice(data_.data() + pos_ - len, len);
146 }
147 };
148
TrimSpace(Slice s)149 static Slice TrimSpace(Slice s) {
150 int start = 0;
151 while (start < s.size() && isspace(s[start])) {
152 start++;
153 }
154 int limit = s.size();
155 while (limit > start && isspace(s[limit-1])) {
156 limit--;
157 }
158 return Slice(s.data() + start, limit - start);
159 }
160
161 } // namespace
162
163 class Benchmark {
164 private:
165 sqlite3* db_;
166 int db_num_;
167 int num_;
168 int reads_;
169 double start_;
170 double last_op_finish_;
171 int64_t bytes_;
172 std::string message_;
173 Histogram hist_;
174 RandomGenerator gen_;
175 Random rand_;
176
177 // State kept for progress messages
178 int done_;
179 int next_report_; // When to report next
180
PrintHeader()181 void PrintHeader() {
182 const int kKeySize = 16;
183 PrintEnvironment();
184 fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
185 fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
186 fprintf(stdout, "Entries: %d\n", num_);
187 fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
188 ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
189 / 1048576.0));
190 PrintWarnings();
191 fprintf(stdout, "------------------------------------------------\n");
192 }
193
PrintWarnings()194 void PrintWarnings() {
195 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
196 fprintf(stdout,
197 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
198 );
199 #endif
200 #ifndef NDEBUG
201 fprintf(stdout,
202 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
203 #endif
204 }
205
PrintEnvironment()206 void PrintEnvironment() {
207 fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
208
209 #if defined(__linux)
210 time_t now = time(NULL);
211 fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
212
213 FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
214 if (cpuinfo != NULL) {
215 char line[1000];
216 int num_cpus = 0;
217 std::string cpu_type;
218 std::string cache_size;
219 while (fgets(line, sizeof(line), cpuinfo) != NULL) {
220 const char* sep = strchr(line, ':');
221 if (sep == NULL) {
222 continue;
223 }
224 Slice key = TrimSpace(Slice(line, sep - 1 - line));
225 Slice val = TrimSpace(Slice(sep + 1));
226 if (key == "model name") {
227 ++num_cpus;
228 cpu_type = val.ToString();
229 } else if (key == "cache size") {
230 cache_size = val.ToString();
231 }
232 }
233 fclose(cpuinfo);
234 fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
235 fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
236 }
237 #endif
238 }
239
Start()240 void Start() {
241 start_ = Env::Default()->NowMicros() * 1e-6;
242 bytes_ = 0;
243 message_.clear();
244 last_op_finish_ = start_;
245 hist_.Clear();
246 done_ = 0;
247 next_report_ = 100;
248 }
249
FinishedSingleOp()250 void FinishedSingleOp() {
251 if (FLAGS_histogram) {
252 double now = Env::Default()->NowMicros() * 1e-6;
253 double micros = (now - last_op_finish_) * 1e6;
254 hist_.Add(micros);
255 if (micros > 20000) {
256 fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
257 fflush(stderr);
258 }
259 last_op_finish_ = now;
260 }
261
262 done_++;
263 if (done_ >= next_report_) {
264 if (next_report_ < 1000) next_report_ += 100;
265 else if (next_report_ < 5000) next_report_ += 500;
266 else if (next_report_ < 10000) next_report_ += 1000;
267 else if (next_report_ < 50000) next_report_ += 5000;
268 else if (next_report_ < 100000) next_report_ += 10000;
269 else if (next_report_ < 500000) next_report_ += 50000;
270 else next_report_ += 100000;
271 fprintf(stderr, "... finished %d ops%30s\r", done_, "");
272 fflush(stderr);
273 }
274 }
275
Stop(const Slice & name)276 void Stop(const Slice& name) {
277 double finish = Env::Default()->NowMicros() * 1e-6;
278
279 // Pretend at least one op was done in case we are running a benchmark
280 // that does not call FinishedSingleOp().
281 if (done_ < 1) done_ = 1;
282
283 if (bytes_ > 0) {
284 char rate[100];
285 snprintf(rate, sizeof(rate), "%6.1f MB/s",
286 (bytes_ / 1048576.0) / (finish - start_));
287 if (!message_.empty()) {
288 message_ = std::string(rate) + " " + message_;
289 } else {
290 message_ = rate;
291 }
292 }
293
294 fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
295 name.ToString().c_str(),
296 (finish - start_) * 1e6 / done_,
297 (message_.empty() ? "" : " "),
298 message_.c_str());
299 if (FLAGS_histogram) {
300 fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
301 }
302 fflush(stdout);
303 }
304
305 public:
306 enum Order {
307 SEQUENTIAL,
308 RANDOM
309 };
310 enum DBState {
311 FRESH,
312 EXISTING
313 };
314
Benchmark()315 Benchmark()
316 : db_(NULL),
317 db_num_(0),
318 num_(FLAGS_num),
319 reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
320 bytes_(0),
321 rand_(301) {
322 std::vector<std::string> files;
323 std::string test_dir;
324 Env::Default()->GetTestDirectory(&test_dir);
325 Env::Default()->GetChildren(test_dir, &files);
326 if (!FLAGS_use_existing_db) {
327 for (int i = 0; i < files.size(); i++) {
328 if (Slice(files[i]).starts_with("dbbench_sqlite3")) {
329 std::string file_name(test_dir);
330 file_name += "/";
331 file_name += files[i];
332 Env::Default()->DeleteFile(file_name.c_str());
333 }
334 }
335 }
336 }
337
~Benchmark()338 ~Benchmark() {
339 int status = sqlite3_close(db_);
340 ErrorCheck(status);
341 }
342
Run()343 void Run() {
344 PrintHeader();
345 Open();
346
347 const char* benchmarks = FLAGS_benchmarks;
348 while (benchmarks != NULL) {
349 const char* sep = strchr(benchmarks, ',');
350 Slice name;
351 if (sep == NULL) {
352 name = benchmarks;
353 benchmarks = NULL;
354 } else {
355 name = Slice(benchmarks, sep - benchmarks);
356 benchmarks = sep + 1;
357 }
358
359 bytes_ = 0;
360 Start();
361
362 bool known = true;
363 bool write_sync = false;
364 if (name == Slice("fillseq")) {
365 Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
366 WalCheckpoint(db_);
367 } else if (name == Slice("fillseqbatch")) {
368 Write(write_sync, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000);
369 WalCheckpoint(db_);
370 } else if (name == Slice("fillrandom")) {
371 Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1);
372 WalCheckpoint(db_);
373 } else if (name == Slice("fillrandbatch")) {
374 Write(write_sync, RANDOM, FRESH, num_, FLAGS_value_size, 1000);
375 WalCheckpoint(db_);
376 } else if (name == Slice("overwrite")) {
377 Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1);
378 WalCheckpoint(db_);
379 } else if (name == Slice("overwritebatch")) {
380 Write(write_sync, RANDOM, EXISTING, num_, FLAGS_value_size, 1000);
381 WalCheckpoint(db_);
382 } else if (name == Slice("fillrandsync")) {
383 write_sync = true;
384 Write(write_sync, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1);
385 WalCheckpoint(db_);
386 } else if (name == Slice("fillseqsync")) {
387 write_sync = true;
388 Write(write_sync, SEQUENTIAL, FRESH, num_ / 100, FLAGS_value_size, 1);
389 WalCheckpoint(db_);
390 } else if (name == Slice("fillrand100K")) {
391 Write(write_sync, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1);
392 WalCheckpoint(db_);
393 } else if (name == Slice("fillseq100K")) {
394 Write(write_sync, SEQUENTIAL, FRESH, num_ / 1000, 100 * 1000, 1);
395 WalCheckpoint(db_);
396 } else if (name == Slice("readseq")) {
397 ReadSequential();
398 } else if (name == Slice("readrandom")) {
399 Read(RANDOM, 1);
400 } else if (name == Slice("readrand100K")) {
401 int n = reads_;
402 reads_ /= 1000;
403 Read(RANDOM, 1);
404 reads_ = n;
405 } else {
406 known = false;
407 if (name != Slice()) { // No error message for empty name
408 fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
409 }
410 }
411 if (known) {
412 Stop(name);
413 }
414 }
415 }
416
Open()417 void Open() {
418 assert(db_ == NULL);
419
420 int status;
421 char file_name[100];
422 char* err_msg = NULL;
423 db_num_++;
424
425 // Open database
426 std::string tmp_dir;
427 Env::Default()->GetTestDirectory(&tmp_dir);
428 snprintf(file_name, sizeof(file_name),
429 "%s/dbbench_sqlite3-%d.db",
430 tmp_dir.c_str(),
431 db_num_);
432 status = sqlite3_open(file_name, &db_);
433 if (status) {
434 fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
435 exit(1);
436 }
437
438 // Change SQLite cache size
439 char cache_size[100];
440 snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
441 FLAGS_num_pages);
442 status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg);
443 ExecErrorCheck(status, err_msg);
444
445 // FLAGS_page_size is defaulted to 1024
446 if (FLAGS_page_size != 1024) {
447 char page_size[100];
448 snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
449 FLAGS_page_size);
450 status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg);
451 ExecErrorCheck(status, err_msg);
452 }
453
454 // Change journal mode to WAL if WAL enabled flag is on
455 if (FLAGS_WAL_enabled) {
456 std::string WAL_stmt = "PRAGMA journal_mode = WAL";
457
458 // LevelDB's default cache size is a combined 4 MB
459 std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
460 status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg);
461 ExecErrorCheck(status, err_msg);
462 status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg);
463 ExecErrorCheck(status, err_msg);
464 }
465
466 // Change locking mode to exclusive and create tables/index for database
467 std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
468 std::string create_stmt =
469 "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
470 std::string stmt_array[] = { locking_stmt, create_stmt };
471 int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
472 for (int i = 0; i < stmt_array_length; i++) {
473 status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg);
474 ExecErrorCheck(status, err_msg);
475 }
476 }
477
Write(bool write_sync,Order order,DBState state,int num_entries,int value_size,int entries_per_batch)478 void Write(bool write_sync, Order order, DBState state,
479 int num_entries, int value_size, int entries_per_batch) {
480 // Create new database if state == FRESH
481 if (state == FRESH) {
482 if (FLAGS_use_existing_db) {
483 message_ = "skipping (--use_existing_db is true)";
484 return;
485 }
486 sqlite3_close(db_);
487 db_ = NULL;
488 Open();
489 Start();
490 }
491
492 if (num_entries != num_) {
493 char msg[100];
494 snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
495 message_ = msg;
496 }
497
498 char* err_msg = NULL;
499 int status;
500
501 sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt;
502 std::string replace_str = "REPLACE INTO test (key, value) VALUES (?, ?)";
503 std::string begin_trans_str = "BEGIN TRANSACTION;";
504 std::string end_trans_str = "END TRANSACTION;";
505
506 // Check for synchronous flag in options
507 std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
508 "PRAGMA synchronous = OFF";
509 status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg);
510 ExecErrorCheck(status, err_msg);
511
512 // Preparing sqlite3 statements
513 status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
514 &replace_stmt, NULL);
515 ErrorCheck(status);
516 status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
517 &begin_trans_stmt, NULL);
518 ErrorCheck(status);
519 status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
520 &end_trans_stmt, NULL);
521 ErrorCheck(status);
522
523 bool transaction = (entries_per_batch > 1);
524 for (int i = 0; i < num_entries; i += entries_per_batch) {
525 // Begin write transaction
526 if (FLAGS_transaction && transaction) {
527 status = sqlite3_step(begin_trans_stmt);
528 StepErrorCheck(status);
529 status = sqlite3_reset(begin_trans_stmt);
530 ErrorCheck(status);
531 }
532
533 // Create and execute SQL statements
534 for (int j = 0; j < entries_per_batch; j++) {
535 const char* value = gen_.Generate(value_size).data();
536
537 // Create values for key-value pair
538 const int k = (order == SEQUENTIAL) ? i + j :
539 (rand_.Next() % num_entries);
540 char key[100];
541 snprintf(key, sizeof(key), "%016d", k);
542
543 // Bind KV values into replace_stmt
544 status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
545 ErrorCheck(status);
546 status = sqlite3_bind_blob(replace_stmt, 2, value,
547 value_size, SQLITE_STATIC);
548 ErrorCheck(status);
549
550 // Execute replace_stmt
551 bytes_ += value_size + strlen(key);
552 status = sqlite3_step(replace_stmt);
553 StepErrorCheck(status);
554
555 // Reset SQLite statement for another use
556 status = sqlite3_clear_bindings(replace_stmt);
557 ErrorCheck(status);
558 status = sqlite3_reset(replace_stmt);
559 ErrorCheck(status);
560
561 FinishedSingleOp();
562 }
563
564 // End write transaction
565 if (FLAGS_transaction && transaction) {
566 status = sqlite3_step(end_trans_stmt);
567 StepErrorCheck(status);
568 status = sqlite3_reset(end_trans_stmt);
569 ErrorCheck(status);
570 }
571 }
572
573 status = sqlite3_finalize(replace_stmt);
574 ErrorCheck(status);
575 status = sqlite3_finalize(begin_trans_stmt);
576 ErrorCheck(status);
577 status = sqlite3_finalize(end_trans_stmt);
578 ErrorCheck(status);
579 }
580
Read(Order order,int entries_per_batch)581 void Read(Order order, int entries_per_batch) {
582 int status;
583 sqlite3_stmt *read_stmt, *begin_trans_stmt, *end_trans_stmt;
584
585 std::string read_str = "SELECT * FROM test WHERE key = ?";
586 std::string begin_trans_str = "BEGIN TRANSACTION;";
587 std::string end_trans_str = "END TRANSACTION;";
588
589 // Preparing sqlite3 statements
590 status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
591 &begin_trans_stmt, NULL);
592 ErrorCheck(status);
593 status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
594 &end_trans_stmt, NULL);
595 ErrorCheck(status);
596 status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL);
597 ErrorCheck(status);
598
599 bool transaction = (entries_per_batch > 1);
600 for (int i = 0; i < reads_; i += entries_per_batch) {
601 // Begin read transaction
602 if (FLAGS_transaction && transaction) {
603 status = sqlite3_step(begin_trans_stmt);
604 StepErrorCheck(status);
605 status = sqlite3_reset(begin_trans_stmt);
606 ErrorCheck(status);
607 }
608
609 // Create and execute SQL statements
610 for (int j = 0; j < entries_per_batch; j++) {
611 // Create key value
612 char key[100];
613 int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
614 snprintf(key, sizeof(key), "%016d", k);
615
616 // Bind key value into read_stmt
617 status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
618 ErrorCheck(status);
619
620 // Execute read statement
621 while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
622 StepErrorCheck(status);
623
624 // Reset SQLite statement for another use
625 status = sqlite3_clear_bindings(read_stmt);
626 ErrorCheck(status);
627 status = sqlite3_reset(read_stmt);
628 ErrorCheck(status);
629 FinishedSingleOp();
630 }
631
632 // End read transaction
633 if (FLAGS_transaction && transaction) {
634 status = sqlite3_step(end_trans_stmt);
635 StepErrorCheck(status);
636 status = sqlite3_reset(end_trans_stmt);
637 ErrorCheck(status);
638 }
639 }
640
641 status = sqlite3_finalize(read_stmt);
642 ErrorCheck(status);
643 status = sqlite3_finalize(begin_trans_stmt);
644 ErrorCheck(status);
645 status = sqlite3_finalize(end_trans_stmt);
646 ErrorCheck(status);
647 }
648
ReadSequential()649 void ReadSequential() {
650 int status;
651 sqlite3_stmt *pStmt;
652 std::string read_str = "SELECT * FROM test ORDER BY key";
653
654 status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL);
655 ErrorCheck(status);
656 for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) {
657 bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2);
658 FinishedSingleOp();
659 }
660
661 status = sqlite3_finalize(pStmt);
662 ErrorCheck(status);
663 }
664
665 };
666
667 } // namespace leveldb
668
main(int argc,char ** argv)669 int main(int argc, char** argv) {
670 std::string default_db_path;
671 for (int i = 1; i < argc; i++) {
672 double d;
673 int n;
674 char junk;
675 if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
676 FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
677 } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
678 (n == 0 || n == 1)) {
679 FLAGS_histogram = n;
680 } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
681 FLAGS_compression_ratio = d;
682 } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
683 (n == 0 || n == 1)) {
684 FLAGS_use_existing_db = n;
685 } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
686 FLAGS_num = n;
687 } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
688 FLAGS_reads = n;
689 } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
690 FLAGS_value_size = n;
691 } else if (leveldb::Slice(argv[i]) == leveldb::Slice("--no_transaction")) {
692 FLAGS_transaction = false;
693 } else if (sscanf(argv[i], "--page_size=%d%c", &n, &junk) == 1) {
694 FLAGS_page_size = n;
695 } else if (sscanf(argv[i], "--num_pages=%d%c", &n, &junk) == 1) {
696 FLAGS_num_pages = n;
697 } else if (sscanf(argv[i], "--WAL_enabled=%d%c", &n, &junk) == 1 &&
698 (n == 0 || n == 1)) {
699 FLAGS_WAL_enabled = n;
700 } else if (strncmp(argv[i], "--db=", 5) == 0) {
701 FLAGS_db = argv[i] + 5;
702 } else {
703 fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
704 exit(1);
705 }
706 }
707
708 // Choose a location for the test database if none given with --db=<path>
709 if (FLAGS_db == NULL) {
710 leveldb::Env::Default()->GetTestDirectory(&default_db_path);
711 default_db_path += "/dbbench";
712 FLAGS_db = default_db_path.c_str();
713 }
714
715 leveldb::Benchmark benchmark;
716 benchmark.Run();
717 return 0;
718 }
719