1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #include "env/mock_env.h"
11 #include <algorithm>
12 #include <chrono>
13 #include "file/filename.h"
14 #include "port/sys_time.h"
15 #include "util/cast_util.h"
16 #include "util/murmurhash.h"
17 #include "util/random.h"
18 #include "util/rate_limiter.h"
19
20 namespace ROCKSDB_NAMESPACE {
21
22 class MemFile {
23 public:
MemFile(Env * env,const std::string & fn,bool _is_lock_file=false)24 explicit MemFile(Env* env, const std::string& fn, bool _is_lock_file = false)
25 : env_(env),
26 fn_(fn),
27 refs_(0),
28 is_lock_file_(_is_lock_file),
29 locked_(false),
30 size_(0),
31 modified_time_(Now()),
32 rnd_(static_cast<uint32_t>(
33 MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
34 fsynced_bytes_(0) {}
35 // No copying allowed.
36 MemFile(const MemFile&) = delete;
37 void operator=(const MemFile&) = delete;
38
Ref()39 void Ref() {
40 MutexLock lock(&mutex_);
41 ++refs_;
42 }
43
is_lock_file() const44 bool is_lock_file() const { return is_lock_file_; }
45
Lock()46 bool Lock() {
47 assert(is_lock_file_);
48 MutexLock lock(&mutex_);
49 if (locked_) {
50 return false;
51 } else {
52 locked_ = true;
53 return true;
54 }
55 }
56
Unlock()57 void Unlock() {
58 assert(is_lock_file_);
59 MutexLock lock(&mutex_);
60 locked_ = false;
61 }
62
Unref()63 void Unref() {
64 bool do_delete = false;
65 {
66 MutexLock lock(&mutex_);
67 --refs_;
68 assert(refs_ >= 0);
69 if (refs_ <= 0) {
70 do_delete = true;
71 }
72 }
73
74 if (do_delete) {
75 delete this;
76 }
77 }
78
Size() const79 uint64_t Size() const { return size_; }
80
Truncate(size_t size)81 void Truncate(size_t size) {
82 MutexLock lock(&mutex_);
83 if (size < size_) {
84 data_.resize(size);
85 size_ = size;
86 }
87 }
88
CorruptBuffer()89 void CorruptBuffer() {
90 if (fsynced_bytes_ >= size_) {
91 return;
92 }
93 uint64_t buffered_bytes = size_ - fsynced_bytes_;
94 uint64_t start =
95 fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
96 uint64_t end = std::min(start + 512, size_.load());
97 MutexLock lock(&mutex_);
98 for (uint64_t pos = start; pos < end; ++pos) {
99 data_[static_cast<size_t>(pos)] = static_cast<char>(rnd_.Uniform(256));
100 }
101 }
102
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const103 Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
104 MutexLock lock(&mutex_);
105 const uint64_t available = Size() - std::min(Size(), offset);
106 size_t offset_ = static_cast<size_t>(offset);
107 if (n > available) {
108 n = static_cast<size_t>(available);
109 }
110 if (n == 0) {
111 *result = Slice();
112 return Status::OK();
113 }
114 if (scratch) {
115 memcpy(scratch, &(data_[offset_]), n);
116 *result = Slice(scratch, n);
117 } else {
118 *result = Slice(&(data_[offset_]), n);
119 }
120 return Status::OK();
121 }
122
Write(uint64_t offset,const Slice & data)123 Status Write(uint64_t offset, const Slice& data) {
124 MutexLock lock(&mutex_);
125 size_t offset_ = static_cast<size_t>(offset);
126 if (offset + data.size() > data_.size()) {
127 data_.resize(offset_ + data.size());
128 }
129 data_.replace(offset_, data.size(), data.data(), data.size());
130 size_ = data_.size();
131 modified_time_ = Now();
132 return Status::OK();
133 }
134
Append(const Slice & data)135 Status Append(const Slice& data) {
136 MutexLock lock(&mutex_);
137 data_.append(data.data(), data.size());
138 size_ = data_.size();
139 modified_time_ = Now();
140 return Status::OK();
141 }
142
Fsync()143 Status Fsync() {
144 fsynced_bytes_ = size_.load();
145 return Status::OK();
146 }
147
ModifiedTime() const148 uint64_t ModifiedTime() const { return modified_time_; }
149
150 private:
Now()151 uint64_t Now() {
152 int64_t unix_time = 0;
153 auto s = env_->GetCurrentTime(&unix_time);
154 assert(s.ok());
155 return static_cast<uint64_t>(unix_time);
156 }
157
158 // Private since only Unref() should be used to delete it.
~MemFile()159 ~MemFile() { assert(refs_ == 0); }
160
161 Env* env_;
162 const std::string fn_;
163 mutable port::Mutex mutex_;
164 int refs_;
165 bool is_lock_file_;
166 bool locked_;
167
168 // Data written into this file, all bytes before fsynced_bytes are
169 // persistent.
170 std::string data_;
171 std::atomic<uint64_t> size_;
172 std::atomic<uint64_t> modified_time_;
173
174 Random rnd_;
175 std::atomic<uint64_t> fsynced_bytes_;
176 };
177
178 namespace {
179
180 class MockSequentialFile : public SequentialFile {
181 public:
MockSequentialFile(MemFile * file)182 explicit MockSequentialFile(MemFile* file) : file_(file), pos_(0) {
183 file_->Ref();
184 }
185
~MockSequentialFile()186 ~MockSequentialFile() override { file_->Unref(); }
187
Read(size_t n,Slice * result,char * scratch)188 Status Read(size_t n, Slice* result, char* scratch) override {
189 Status s = file_->Read(pos_, n, result, scratch);
190 if (s.ok()) {
191 pos_ += result->size();
192 }
193 return s;
194 }
195
Skip(uint64_t n)196 Status Skip(uint64_t n) override {
197 if (pos_ > file_->Size()) {
198 return Status::IOError("pos_ > file_->Size()");
199 }
200 const uint64_t available = file_->Size() - pos_;
201 if (n > available) {
202 n = available;
203 }
204 pos_ += static_cast<size_t>(n);
205 return Status::OK();
206 }
207
208 private:
209 MemFile* file_;
210 size_t pos_;
211 };
212
213 class MockRandomAccessFile : public RandomAccessFile {
214 public:
MockRandomAccessFile(MemFile * file)215 explicit MockRandomAccessFile(MemFile* file) : file_(file) { file_->Ref(); }
216
~MockRandomAccessFile()217 ~MockRandomAccessFile() override { file_->Unref(); }
218
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const219 Status Read(uint64_t offset, size_t n, Slice* result,
220 char* scratch) const override {
221 return file_->Read(offset, n, result, scratch);
222 }
223
224 private:
225 MemFile* file_;
226 };
227
228 class MockRandomRWFile : public RandomRWFile {
229 public:
MockRandomRWFile(MemFile * file)230 explicit MockRandomRWFile(MemFile* file) : file_(file) { file_->Ref(); }
231
~MockRandomRWFile()232 ~MockRandomRWFile() override { file_->Unref(); }
233
Write(uint64_t offset,const Slice & data)234 Status Write(uint64_t offset, const Slice& data) override {
235 return file_->Write(offset, data);
236 }
237
Read(uint64_t offset,size_t n,Slice * result,char * scratch) const238 Status Read(uint64_t offset, size_t n, Slice* result,
239 char* scratch) const override {
240 return file_->Read(offset, n, result, scratch);
241 }
242
Close()243 Status Close() override { return file_->Fsync(); }
244
Flush()245 Status Flush() override { return Status::OK(); }
246
Sync()247 Status Sync() override { return file_->Fsync(); }
248
249 private:
250 MemFile* file_;
251 };
252
253 class MockWritableFile : public WritableFile {
254 public:
MockWritableFile(MemFile * file,RateLimiter * rate_limiter)255 MockWritableFile(MemFile* file, RateLimiter* rate_limiter)
256 : file_(file), rate_limiter_(rate_limiter) {
257 file_->Ref();
258 }
259
~MockWritableFile()260 ~MockWritableFile() override { file_->Unref(); }
261
Append(const Slice & data)262 Status Append(const Slice& data) override {
263 size_t bytes_written = 0;
264 while (bytes_written < data.size()) {
265 auto bytes = RequestToken(data.size() - bytes_written);
266 Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
267 if (!s.ok()) {
268 return s;
269 }
270 bytes_written += bytes;
271 }
272 return Status::OK();
273 }
Truncate(uint64_t size)274 Status Truncate(uint64_t size) override {
275 file_->Truncate(static_cast<size_t>(size));
276 return Status::OK();
277 }
Close()278 Status Close() override { return file_->Fsync(); }
279
Flush()280 Status Flush() override { return Status::OK(); }
281
Sync()282 Status Sync() override { return file_->Fsync(); }
283
GetFileSize()284 uint64_t GetFileSize() override { return file_->Size(); }
285
286 private:
RequestToken(size_t bytes)287 inline size_t RequestToken(size_t bytes) {
288 if (rate_limiter_ && io_priority_ < Env::IO_TOTAL) {
289 bytes = std::min(
290 bytes, static_cast<size_t>(rate_limiter_->GetSingleBurstBytes()));
291 rate_limiter_->Request(bytes, io_priority_);
292 }
293 return bytes;
294 }
295
296 MemFile* file_;
297 RateLimiter* rate_limiter_;
298 };
299
300 class MockEnvDirectory : public Directory {
301 public:
Fsync()302 Status Fsync() override { return Status::OK(); }
303 };
304
305 class MockEnvFileLock : public FileLock {
306 public:
MockEnvFileLock(const std::string & fname)307 explicit MockEnvFileLock(const std::string& fname) : fname_(fname) {}
308
FileName() const309 std::string FileName() const { return fname_; }
310
311 private:
312 const std::string fname_;
313 };
314
315 class TestMemLogger : public Logger {
316 private:
317 std::unique_ptr<WritableFile> file_;
318 std::atomic_size_t log_size_;
319 static const uint64_t flush_every_seconds_ = 5;
320 std::atomic_uint_fast64_t last_flush_micros_;
321 Env* env_;
322 std::atomic<bool> flush_pending_;
323
324 public:
TestMemLogger(std::unique_ptr<WritableFile> f,Env * env,const InfoLogLevel log_level=InfoLogLevel::ERROR_LEVEL)325 TestMemLogger(std::unique_ptr<WritableFile> f, Env* env,
326 const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL)
327 : Logger(log_level),
328 file_(std::move(f)),
329 log_size_(0),
330 last_flush_micros_(0),
331 env_(env),
332 flush_pending_(false) {}
~TestMemLogger()333 ~TestMemLogger() override {}
334
Flush()335 void Flush() override {
336 if (flush_pending_) {
337 flush_pending_ = false;
338 }
339 last_flush_micros_ = env_->NowMicros();
340 }
341
342 using Logger::Logv;
Logv(const char * format,va_list ap)343 void Logv(const char* format, va_list ap) override {
344 // We try twice: the first time with a fixed-size stack allocated buffer,
345 // and the second time with a much larger dynamically allocated buffer.
346 char buffer[500];
347 for (int iter = 0; iter < 2; iter++) {
348 char* base;
349 int bufsize;
350 if (iter == 0) {
351 bufsize = sizeof(buffer);
352 base = buffer;
353 } else {
354 bufsize = 30000;
355 base = new char[bufsize];
356 }
357 char* p = base;
358 char* limit = base + bufsize;
359
360 struct timeval now_tv;
361 gettimeofday(&now_tv, nullptr);
362 const time_t seconds = now_tv.tv_sec;
363 struct tm t;
364 memset(&t, 0, sizeof(t));
365 struct tm* ret __attribute__((__unused__));
366 ret = localtime_r(&seconds, &t);
367 assert(ret);
368 p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d ",
369 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour,
370 t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec));
371
372 // Print the message
373 if (p < limit) {
374 va_list backup_ap;
375 va_copy(backup_ap, ap);
376 p += vsnprintf(p, limit - p, format, backup_ap);
377 va_end(backup_ap);
378 }
379
380 // Truncate to available space if necessary
381 if (p >= limit) {
382 if (iter == 0) {
383 continue; // Try again with larger buffer
384 } else {
385 p = limit - 1;
386 }
387 }
388
389 // Add newline if necessary
390 if (p == base || p[-1] != '\n') {
391 *p++ = '\n';
392 }
393
394 assert(p <= limit);
395 const size_t write_size = p - base;
396
397 file_->Append(Slice(base, write_size));
398 flush_pending_ = true;
399 log_size_ += write_size;
400 uint64_t now_micros =
401 static_cast<uint64_t>(now_tv.tv_sec) * 1000000 + now_tv.tv_usec;
402 if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {
403 flush_pending_ = false;
404 last_flush_micros_ = now_micros;
405 }
406 if (base != buffer) {
407 delete[] base;
408 }
409 break;
410 }
411 }
GetLogFileSize() const412 size_t GetLogFileSize() const override { return log_size_; }
413 };
414
415 } // Anonymous namespace
416
MockEnv(Env * base_env)417 MockEnv::MockEnv(Env* base_env) : EnvWrapper(base_env), fake_sleep_micros_(0) {}
418
~MockEnv()419 MockEnv::~MockEnv() {
420 for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i) {
421 i->second->Unref();
422 }
423 }
424
425 // Partial implementation of the Env interface.
NewSequentialFile(const std::string & fname,std::unique_ptr<SequentialFile> * result,const EnvOptions &)426 Status MockEnv::NewSequentialFile(const std::string& fname,
427 std::unique_ptr<SequentialFile>* result,
428 const EnvOptions& /*soptions*/) {
429 auto fn = NormalizePath(fname);
430 MutexLock lock(&mutex_);
431 if (file_map_.find(fn) == file_map_.end()) {
432 *result = nullptr;
433 return Status::IOError(fn, "File not found");
434 }
435 auto* f = file_map_[fn];
436 if (f->is_lock_file()) {
437 return Status::InvalidArgument(fn, "Cannot open a lock file.");
438 }
439 result->reset(new MockSequentialFile(f));
440 return Status::OK();
441 }
442
NewRandomAccessFile(const std::string & fname,std::unique_ptr<RandomAccessFile> * result,const EnvOptions &)443 Status MockEnv::NewRandomAccessFile(const std::string& fname,
444 std::unique_ptr<RandomAccessFile>* result,
445 const EnvOptions& /*soptions*/) {
446 auto fn = NormalizePath(fname);
447 MutexLock lock(&mutex_);
448 if (file_map_.find(fn) == file_map_.end()) {
449 *result = nullptr;
450 return Status::IOError(fn, "File not found");
451 }
452 auto* f = file_map_[fn];
453 if (f->is_lock_file()) {
454 return Status::InvalidArgument(fn, "Cannot open a lock file.");
455 }
456 result->reset(new MockRandomAccessFile(f));
457 return Status::OK();
458 }
459
NewRandomRWFile(const std::string & fname,std::unique_ptr<RandomRWFile> * result,const EnvOptions &)460 Status MockEnv::NewRandomRWFile(const std::string& fname,
461 std::unique_ptr<RandomRWFile>* result,
462 const EnvOptions& /*soptions*/) {
463 auto fn = NormalizePath(fname);
464 MutexLock lock(&mutex_);
465 if (file_map_.find(fn) == file_map_.end()) {
466 *result = nullptr;
467 return Status::IOError(fn, "File not found");
468 }
469 auto* f = file_map_[fn];
470 if (f->is_lock_file()) {
471 return Status::InvalidArgument(fn, "Cannot open a lock file.");
472 }
473 result->reset(new MockRandomRWFile(f));
474 return Status::OK();
475 }
476
ReuseWritableFile(const std::string & fname,const std::string & old_fname,std::unique_ptr<WritableFile> * result,const EnvOptions & options)477 Status MockEnv::ReuseWritableFile(const std::string& fname,
478 const std::string& old_fname,
479 std::unique_ptr<WritableFile>* result,
480 const EnvOptions& options) {
481 auto s = RenameFile(old_fname, fname);
482 if (!s.ok()) {
483 return s;
484 }
485 result->reset();
486 return NewWritableFile(fname, result, options);
487 }
488
NewWritableFile(const std::string & fname,std::unique_ptr<WritableFile> * result,const EnvOptions & env_options)489 Status MockEnv::NewWritableFile(const std::string& fname,
490 std::unique_ptr<WritableFile>* result,
491 const EnvOptions& env_options) {
492 auto fn = NormalizePath(fname);
493 MutexLock lock(&mutex_);
494 if (file_map_.find(fn) != file_map_.end()) {
495 DeleteFileInternal(fn);
496 }
497 MemFile* file = new MemFile(this, fn, false);
498 file->Ref();
499 file_map_[fn] = file;
500
501 result->reset(new MockWritableFile(file, env_options.rate_limiter));
502 return Status::OK();
503 }
504
NewDirectory(const std::string &,std::unique_ptr<Directory> * result)505 Status MockEnv::NewDirectory(const std::string& /*name*/,
506 std::unique_ptr<Directory>* result) {
507 result->reset(new MockEnvDirectory());
508 return Status::OK();
509 }
510
FileExists(const std::string & fname)511 Status MockEnv::FileExists(const std::string& fname) {
512 auto fn = NormalizePath(fname);
513 MutexLock lock(&mutex_);
514 if (file_map_.find(fn) != file_map_.end()) {
515 // File exists
516 return Status::OK();
517 }
518 // Now also check if fn exists as a dir
519 for (const auto& iter : file_map_) {
520 const std::string& filename = iter.first;
521 if (filename.size() >= fn.size() + 1 && filename[fn.size()] == '/' &&
522 Slice(filename).starts_with(Slice(fn))) {
523 return Status::OK();
524 }
525 }
526 return Status::NotFound();
527 }
528
GetChildren(const std::string & dir,std::vector<std::string> * result)529 Status MockEnv::GetChildren(const std::string& dir,
530 std::vector<std::string>* result) {
531 auto d = NormalizePath(dir);
532 bool found_dir = false;
533 {
534 MutexLock lock(&mutex_);
535 result->clear();
536 for (const auto& iter : file_map_) {
537 const std::string& filename = iter.first;
538
539 if (filename == d) {
540 found_dir = true;
541 } else if (filename.size() >= d.size() + 1 && filename[d.size()] == '/' &&
542 Slice(filename).starts_with(Slice(d))) {
543 found_dir = true;
544 size_t next_slash = filename.find('/', d.size() + 1);
545 if (next_slash != std::string::npos) {
546 result->push_back(
547 filename.substr(d.size() + 1, next_slash - d.size() - 1));
548 } else {
549 result->push_back(filename.substr(d.size() + 1));
550 }
551 }
552 }
553 }
554 result->erase(std::unique(result->begin(), result->end()), result->end());
555 return found_dir ? Status::OK() : Status::NotFound();
556 }
557
DeleteFileInternal(const std::string & fname)558 void MockEnv::DeleteFileInternal(const std::string& fname) {
559 assert(fname == NormalizePath(fname));
560 const auto& pair = file_map_.find(fname);
561 if (pair != file_map_.end()) {
562 pair->second->Unref();
563 file_map_.erase(fname);
564 }
565 }
566
DeleteFile(const std::string & fname)567 Status MockEnv::DeleteFile(const std::string& fname) {
568 auto fn = NormalizePath(fname);
569 MutexLock lock(&mutex_);
570 if (file_map_.find(fn) == file_map_.end()) {
571 return Status::IOError(fn, "File not found");
572 }
573
574 DeleteFileInternal(fn);
575 return Status::OK();
576 }
577
Truncate(const std::string & fname,size_t size)578 Status MockEnv::Truncate(const std::string& fname, size_t size) {
579 auto fn = NormalizePath(fname);
580 MutexLock lock(&mutex_);
581 auto iter = file_map_.find(fn);
582 if (iter == file_map_.end()) {
583 return Status::IOError(fn, "File not found");
584 }
585 iter->second->Truncate(size);
586 return Status::OK();
587 }
588
CreateDir(const std::string & dirname)589 Status MockEnv::CreateDir(const std::string& dirname) {
590 auto dn = NormalizePath(dirname);
591 MutexLock lock(&mutex_);
592 if (file_map_.find(dn) == file_map_.end()) {
593 MemFile* file = new MemFile(this, dn, false);
594 file->Ref();
595 file_map_[dn] = file;
596 } else {
597 return Status::IOError();
598 }
599 return Status::OK();
600 }
601
CreateDirIfMissing(const std::string & dirname)602 Status MockEnv::CreateDirIfMissing(const std::string& dirname) {
603 CreateDir(dirname);
604 return Status::OK();
605 }
606
DeleteDir(const std::string & dirname)607 Status MockEnv::DeleteDir(const std::string& dirname) {
608 return DeleteFile(dirname);
609 }
610
GetFileSize(const std::string & fname,uint64_t * file_size)611 Status MockEnv::GetFileSize(const std::string& fname, uint64_t* file_size) {
612 auto fn = NormalizePath(fname);
613 MutexLock lock(&mutex_);
614 auto iter = file_map_.find(fn);
615 if (iter == file_map_.end()) {
616 return Status::IOError(fn, "File not found");
617 }
618
619 *file_size = iter->second->Size();
620 return Status::OK();
621 }
622
GetFileModificationTime(const std::string & fname,uint64_t * time)623 Status MockEnv::GetFileModificationTime(const std::string& fname,
624 uint64_t* time) {
625 auto fn = NormalizePath(fname);
626 MutexLock lock(&mutex_);
627 auto iter = file_map_.find(fn);
628 if (iter == file_map_.end()) {
629 return Status::IOError(fn, "File not found");
630 }
631 *time = iter->second->ModifiedTime();
632 return Status::OK();
633 }
634
RenameFile(const std::string & src,const std::string & dest)635 Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
636 auto s = NormalizePath(src);
637 auto t = NormalizePath(dest);
638 MutexLock lock(&mutex_);
639 if (file_map_.find(s) == file_map_.end()) {
640 return Status::IOError(s, "File not found");
641 }
642
643 DeleteFileInternal(t);
644 file_map_[t] = file_map_[s];
645 file_map_.erase(s);
646 return Status::OK();
647 }
648
LinkFile(const std::string & src,const std::string & dest)649 Status MockEnv::LinkFile(const std::string& src, const std::string& dest) {
650 auto s = NormalizePath(src);
651 auto t = NormalizePath(dest);
652 MutexLock lock(&mutex_);
653 if (file_map_.find(s) == file_map_.end()) {
654 return Status::IOError(s, "File not found");
655 }
656
657 DeleteFileInternal(t);
658 file_map_[t] = file_map_[s];
659 file_map_[t]->Ref(); // Otherwise it might get deleted when noone uses s
660 return Status::OK();
661 }
662
NewLogger(const std::string & fname,std::shared_ptr<Logger> * result)663 Status MockEnv::NewLogger(const std::string& fname,
664 std::shared_ptr<Logger>* result) {
665 auto fn = NormalizePath(fname);
666 MutexLock lock(&mutex_);
667 auto iter = file_map_.find(fn);
668 MemFile* file = nullptr;
669 if (iter == file_map_.end()) {
670 file = new MemFile(this, fn, false);
671 file->Ref();
672 file_map_[fn] = file;
673 } else {
674 file = iter->second;
675 }
676 std::unique_ptr<WritableFile> f(new MockWritableFile(file, nullptr));
677 result->reset(new TestMemLogger(std::move(f), this));
678 return Status::OK();
679 }
680
LockFile(const std::string & fname,FileLock ** flock)681 Status MockEnv::LockFile(const std::string& fname, FileLock** flock) {
682 auto fn = NormalizePath(fname);
683 {
684 MutexLock lock(&mutex_);
685 if (file_map_.find(fn) != file_map_.end()) {
686 if (!file_map_[fn]->is_lock_file()) {
687 return Status::InvalidArgument(fname, "Not a lock file.");
688 }
689 if (!file_map_[fn]->Lock()) {
690 return Status::IOError(fn, "Lock is already held.");
691 }
692 } else {
693 auto* file = new MemFile(this, fn, true);
694 file->Ref();
695 file->Lock();
696 file_map_[fn] = file;
697 }
698 }
699 *flock = new MockEnvFileLock(fn);
700 return Status::OK();
701 }
702
UnlockFile(FileLock * flock)703 Status MockEnv::UnlockFile(FileLock* flock) {
704 std::string fn =
705 static_cast_with_check<MockEnvFileLock, FileLock>(flock)->FileName();
706 {
707 MutexLock lock(&mutex_);
708 if (file_map_.find(fn) != file_map_.end()) {
709 if (!file_map_[fn]->is_lock_file()) {
710 return Status::InvalidArgument(fn, "Not a lock file.");
711 }
712 file_map_[fn]->Unlock();
713 }
714 }
715 delete flock;
716 return Status::OK();
717 }
718
GetTestDirectory(std::string * path)719 Status MockEnv::GetTestDirectory(std::string* path) {
720 *path = "/test";
721 return Status::OK();
722 }
723
GetCurrentTime(int64_t * unix_time)724 Status MockEnv::GetCurrentTime(int64_t* unix_time) {
725 auto s = EnvWrapper::GetCurrentTime(unix_time);
726 if (s.ok()) {
727 *unix_time += fake_sleep_micros_.load() / (1000 * 1000);
728 }
729 return s;
730 }
731
NowMicros()732 uint64_t MockEnv::NowMicros() {
733 return EnvWrapper::NowMicros() + fake_sleep_micros_.load();
734 }
735
NowNanos()736 uint64_t MockEnv::NowNanos() {
737 return EnvWrapper::NowNanos() + fake_sleep_micros_.load() * 1000;
738 }
739
CorruptBuffer(const std::string & fname)740 Status MockEnv::CorruptBuffer(const std::string& fname) {
741 auto fn = NormalizePath(fname);
742 MutexLock lock(&mutex_);
743 auto iter = file_map_.find(fn);
744 if (iter == file_map_.end()) {
745 return Status::IOError(fn, "File not found");
746 }
747 iter->second->CorruptBuffer();
748 return Status::OK();
749 }
750
FakeSleepForMicroseconds(int64_t micros)751 void MockEnv::FakeSleepForMicroseconds(int64_t micros) {
752 fake_sleep_micros_.fetch_add(micros);
753 }
754
755 #ifndef ROCKSDB_LITE
756 // This is to maintain the behavior before swithcing from InMemoryEnv to MockEnv
NewMemEnv(Env * base_env)757 Env* NewMemEnv(Env* base_env) { return new MockEnv(base_env); }
758
759 #else // ROCKSDB_LITE
760
NewMemEnv(Env *)761 Env* NewMemEnv(Env* /*base_env*/) { return nullptr; }
762
763 #endif // !ROCKSDB_LITE
764
765 } // namespace ROCKSDB_NAMESPACE
766