1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 #ifndef GFLAGS
7 #include <cstdio>
main()8 int main() {
9 fprintf(stderr, "Please install gflags to run rocksdb tools\n");
10 return 1;
11 }
12 #else
13
14 #include <stdio.h>
15 #include <sys/types.h>
16 #include <cinttypes>
17
18 #include "cache/simple_deleter.h"
19 #include "port/port.h"
20 #include "rocksdb/cache.h"
21 #include "rocksdb/db.h"
22 #include "rocksdb/env.h"
23 #include "util/gflags_compat.h"
24 #include "util/mutexlock.h"
25 #include "util/random.h"
26
27 using GFLAGS_NAMESPACE::ParseCommandLineFlags;
28
29 static const uint32_t KB = 1024;
30
31 DEFINE_int32(threads, 16, "Number of concurrent threads to run.");
32 DEFINE_int64(cache_size, 8 * KB * KB,
33 "Number of bytes to use as a cache of uncompressed data.");
34 DEFINE_int32(num_shard_bits, 4, "shard_bits.");
35
36 DEFINE_int64(max_key, 1 * KB * KB * KB, "Max number of key to place in cache");
37 DEFINE_uint64(ops_per_thread, 1200000, "Number of operations per thread.");
38
39 DEFINE_bool(populate_cache, false, "Populate cache before operations");
40 DEFINE_int32(insert_percent, 40,
41 "Ratio of insert to total workload (expressed as a percentage)");
42 DEFINE_int32(lookup_percent, 50,
43 "Ratio of lookup to total workload (expressed as a percentage)");
44 DEFINE_int32(erase_percent, 10,
45 "Ratio of erase to total workload (expressed as a percentage)");
46
47 DEFINE_bool(use_clock_cache, false, "");
48
49 namespace ROCKSDB_NAMESPACE {
50
51 class CacheBench;
52 namespace {
53
54 // State shared by all concurrent executions of the same benchmark.
55 class SharedState {
56 public:
SharedState(CacheBench * cache_bench)57 explicit SharedState(CacheBench* cache_bench)
58 : cv_(&mu_),
59 num_threads_(FLAGS_threads),
60 num_initialized_(0),
61 start_(false),
62 num_done_(0),
63 cache_bench_(cache_bench) {
64 }
65
~SharedState()66 ~SharedState() {}
67
GetMutex()68 port::Mutex* GetMutex() {
69 return &mu_;
70 }
71
GetCondVar()72 port::CondVar* GetCondVar() {
73 return &cv_;
74 }
75
GetCacheBench() const76 CacheBench* GetCacheBench() const {
77 return cache_bench_;
78 }
79
IncInitialized()80 void IncInitialized() {
81 num_initialized_++;
82 }
83
IncDone()84 void IncDone() {
85 num_done_++;
86 }
87
AllInitialized() const88 bool AllInitialized() const {
89 return num_initialized_ >= num_threads_;
90 }
91
AllDone() const92 bool AllDone() const {
93 return num_done_ >= num_threads_;
94 }
95
SetStart()96 void SetStart() {
97 start_ = true;
98 }
99
Started() const100 bool Started() const {
101 return start_;
102 }
103
104 private:
105 port::Mutex mu_;
106 port::CondVar cv_;
107
108 const uint64_t num_threads_;
109 uint64_t num_initialized_;
110 bool start_;
111 uint64_t num_done_;
112
113 CacheBench* cache_bench_;
114 };
115
116 // Per-thread state for concurrent executions of the same benchmark.
117 struct ThreadState {
118 uint32_t tid;
119 Random rnd;
120 SharedState* shared;
121
ThreadStateROCKSDB_NAMESPACE::__anonccaf61260111::ThreadState122 ThreadState(uint32_t index, SharedState* _shared)
123 : tid(index), rnd(1000 + index), shared(_shared) {}
124 };
125 } // namespace
126
127 class CacheBench {
128 public:
CacheBench()129 CacheBench() : num_threads_(FLAGS_threads) {
130 if (FLAGS_use_clock_cache) {
131 cache_ = NewClockCache(FLAGS_cache_size, FLAGS_num_shard_bits);
132 if (!cache_) {
133 fprintf(stderr, "Clock cache not supported.\n");
134 exit(1);
135 }
136 } else {
137 cache_ = NewLRUCache(FLAGS_cache_size, FLAGS_num_shard_bits);
138 }
139 }
140
~CacheBench()141 ~CacheBench() {}
142
PopulateCache()143 void PopulateCache() {
144 Random rnd(1);
145 for (int64_t i = 0; i < FLAGS_cache_size; i++) {
146 uint64_t rand_key = rnd.Next() % FLAGS_max_key;
147 // Cast uint64* to be char*, data would be copied to cache
148 Slice key(reinterpret_cast<char*>(&rand_key), 8);
149 // do insert
150 cache_->Insert(key, new char[10], 1,
151 SimpleDeleter<char[]>::GetInstance());
152 }
153 }
154
Run()155 bool Run() {
156 ROCKSDB_NAMESPACE::Env* env = ROCKSDB_NAMESPACE::Env::Default();
157
158 PrintEnv();
159 SharedState shared(this);
160 std::vector<ThreadState*> threads(num_threads_);
161 for (uint32_t i = 0; i < num_threads_; i++) {
162 threads[i] = new ThreadState(i, &shared);
163 env->StartThread(ThreadBody, threads[i]);
164 }
165 {
166 MutexLock l(shared.GetMutex());
167 while (!shared.AllInitialized()) {
168 shared.GetCondVar()->Wait();
169 }
170 // Record start time
171 uint64_t start_time = env->NowMicros();
172
173 // Start all threads
174 shared.SetStart();
175 shared.GetCondVar()->SignalAll();
176
177 // Wait threads to complete
178 while (!shared.AllDone()) {
179 shared.GetCondVar()->Wait();
180 }
181
182 // Record end time
183 uint64_t end_time = env->NowMicros();
184 double elapsed = static_cast<double>(end_time - start_time) * 1e-6;
185 uint32_t qps = static_cast<uint32_t>(
186 static_cast<double>(FLAGS_threads * FLAGS_ops_per_thread) / elapsed);
187 fprintf(stdout, "Complete in %.3f s; QPS = %u\n", elapsed, qps);
188 }
189 return true;
190 }
191
192 private:
193 std::shared_ptr<Cache> cache_;
194 uint32_t num_threads_;
195
ThreadBody(void * v)196 static void ThreadBody(void* v) {
197 ThreadState* thread = reinterpret_cast<ThreadState*>(v);
198 SharedState* shared = thread->shared;
199
200 {
201 MutexLock l(shared->GetMutex());
202 shared->IncInitialized();
203 if (shared->AllInitialized()) {
204 shared->GetCondVar()->SignalAll();
205 }
206 while (!shared->Started()) {
207 shared->GetCondVar()->Wait();
208 }
209 }
210 thread->shared->GetCacheBench()->OperateCache(thread);
211
212 {
213 MutexLock l(shared->GetMutex());
214 shared->IncDone();
215 if (shared->AllDone()) {
216 shared->GetCondVar()->SignalAll();
217 }
218 }
219 }
220
OperateCache(ThreadState * thread)221 void OperateCache(ThreadState* thread) {
222 for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
223 uint64_t rand_key = thread->rnd.Next() % FLAGS_max_key;
224 // Cast uint64* to be char*, data would be copied to cache
225 Slice key(reinterpret_cast<char*>(&rand_key), 8);
226 int32_t prob_op = thread->rnd.Uniform(100);
227 if (prob_op >= 0 && prob_op < FLAGS_insert_percent) {
228 // do insert
229 cache_->Insert(key, new char[10], 1,
230 SimpleDeleter<char[]>::GetInstance());
231 } else if (prob_op -= FLAGS_insert_percent &&
232 prob_op < FLAGS_lookup_percent) {
233 // do lookup
234 auto handle = cache_->Lookup(key);
235 if (handle) {
236 cache_->Release(handle);
237 }
238 } else if (prob_op -= FLAGS_lookup_percent &&
239 prob_op < FLAGS_erase_percent) {
240 // do erase
241 cache_->Erase(key);
242 }
243 }
244 }
245
PrintEnv() const246 void PrintEnv() const {
247 printf("RocksDB version : %d.%d\n", kMajorVersion, kMinorVersion);
248 printf("Number of threads : %d\n", FLAGS_threads);
249 printf("Ops per thread : %" PRIu64 "\n", FLAGS_ops_per_thread);
250 printf("Cache size : %" PRIu64 "\n", FLAGS_cache_size);
251 printf("Num shard bits : %d\n", FLAGS_num_shard_bits);
252 printf("Max key : %" PRIu64 "\n", FLAGS_max_key);
253 printf("Populate cache : %d\n", FLAGS_populate_cache);
254 printf("Insert percentage : %d%%\n", FLAGS_insert_percent);
255 printf("Lookup percentage : %d%%\n", FLAGS_lookup_percent);
256 printf("Erase percentage : %d%%\n", FLAGS_erase_percent);
257 printf("----------------------------\n");
258 }
259 };
260 } // namespace ROCKSDB_NAMESPACE
261
main(int argc,char ** argv)262 int main(int argc, char** argv) {
263 ParseCommandLineFlags(&argc, &argv, true);
264
265 if (FLAGS_threads <= 0) {
266 fprintf(stderr, "threads number <= 0\n");
267 exit(1);
268 }
269
270 ROCKSDB_NAMESPACE::CacheBench bench;
271 if (FLAGS_populate_cache) {
272 bench.PopulateCache();
273 }
274 if (bench.Run()) {
275 return 0;
276 } else {
277 return 1;
278 }
279 }
280
281 #endif // GFLAGS
282