1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10 #ifndef NDEBUG
11
12 #include "db/column_family.h"
13 #include "db/db_impl/db_impl.h"
14 #include "db/error_handler.h"
15 #include "monitoring/thread_status_updater.h"
16 #include "util/cast_util.h"
17
18 namespace ROCKSDB_NAMESPACE {
TEST_GetLevel0TotalSize()19 uint64_t DBImpl::TEST_GetLevel0TotalSize() {
20 InstrumentedMutexLock l(&mutex_);
21 return default_cf_handle_->cfd()->current()->storage_info()->NumLevelBytes(0);
22 }
23
TEST_SwitchWAL()24 void DBImpl::TEST_SwitchWAL() {
25 WriteContext write_context;
26 InstrumentedMutexLock l(&mutex_);
27 void* writer = TEST_BeginWrite();
28 SwitchWAL(&write_context);
29 TEST_EndWrite(writer);
30 }
31
TEST_WALBufferIsEmpty(bool lock)32 bool DBImpl::TEST_WALBufferIsEmpty(bool lock) {
33 if (lock) {
34 log_write_mutex_.Lock();
35 }
36 log::Writer* cur_log_writer = logs_.back().writer;
37 auto res = cur_log_writer->TEST_BufferIsEmpty();
38 if (lock) {
39 log_write_mutex_.Unlock();
40 }
41 return res;
42 }
43
TEST_MaxNextLevelOverlappingBytes(ColumnFamilyHandle * column_family)44 int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes(
45 ColumnFamilyHandle* column_family) {
46 ColumnFamilyData* cfd;
47 if (column_family == nullptr) {
48 cfd = default_cf_handle_->cfd();
49 } else {
50 auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
51 cfd = cfh->cfd();
52 }
53 InstrumentedMutexLock l(&mutex_);
54 return cfd->current()->storage_info()->MaxNextLevelOverlappingBytes();
55 }
56
TEST_GetFilesMetaData(ColumnFamilyHandle * column_family,std::vector<std::vector<FileMetaData>> * metadata)57 void DBImpl::TEST_GetFilesMetaData(
58 ColumnFamilyHandle* column_family,
59 std::vector<std::vector<FileMetaData>>* metadata) {
60 auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
61 auto cfd = cfh->cfd();
62 InstrumentedMutexLock l(&mutex_);
63 metadata->resize(NumberLevels());
64 for (int level = 0; level < NumberLevels(); level++) {
65 const std::vector<FileMetaData*>& files =
66 cfd->current()->storage_info()->LevelFiles(level);
67
68 (*metadata)[level].clear();
69 for (const auto& f : files) {
70 (*metadata)[level].push_back(*f);
71 }
72 }
73 }
74
TEST_Current_Manifest_FileNo()75 uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
76 return versions_->manifest_file_number();
77 }
78
TEST_Current_Next_FileNo()79 uint64_t DBImpl::TEST_Current_Next_FileNo() {
80 return versions_->current_next_file_number();
81 }
82
TEST_CompactRange(int level,const Slice * begin,const Slice * end,ColumnFamilyHandle * column_family,bool disallow_trivial_move)83 Status DBImpl::TEST_CompactRange(int level, const Slice* begin,
84 const Slice* end,
85 ColumnFamilyHandle* column_family,
86 bool disallow_trivial_move) {
87 ColumnFamilyData* cfd;
88 if (column_family == nullptr) {
89 cfd = default_cf_handle_->cfd();
90 } else {
91 auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
92 cfd = cfh->cfd();
93 }
94 int output_level =
95 (cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
96 cfd->ioptions()->compaction_style == kCompactionStyleFIFO)
97 ? level
98 : level + 1;
99 return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(),
100 begin, end, true, disallow_trivial_move,
101 port::kMaxUint64 /*max_file_num_to_ignore*/);
102 }
103
TEST_SwitchMemtable(ColumnFamilyData * cfd)104 Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) {
105 WriteContext write_context;
106 InstrumentedMutexLock l(&mutex_);
107 if (cfd == nullptr) {
108 cfd = default_cf_handle_->cfd();
109 }
110
111 Status s;
112 void* writer = TEST_BeginWrite();
113 if (two_write_queues_) {
114 WriteThread::Writer nonmem_w;
115 nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
116 s = SwitchMemtable(cfd, &write_context);
117 nonmem_write_thread_.ExitUnbatched(&nonmem_w);
118 } else {
119 s = SwitchMemtable(cfd, &write_context);
120 }
121 TEST_EndWrite(writer);
122 return s;
123 }
124
TEST_FlushMemTable(bool wait,bool allow_write_stall,ColumnFamilyHandle * cfh)125 Status DBImpl::TEST_FlushMemTable(bool wait, bool allow_write_stall,
126 ColumnFamilyHandle* cfh) {
127 FlushOptions fo;
128 fo.wait = wait;
129 fo.allow_write_stall = allow_write_stall;
130 ColumnFamilyData* cfd;
131 if (cfh == nullptr) {
132 cfd = default_cf_handle_->cfd();
133 } else {
134 auto cfhi = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh);
135 cfd = cfhi->cfd();
136 }
137 return FlushMemTable(cfd, fo, FlushReason::kTest);
138 }
139
TEST_FlushMemTable(ColumnFamilyData * cfd,const FlushOptions & flush_opts)140 Status DBImpl::TEST_FlushMemTable(ColumnFamilyData* cfd,
141 const FlushOptions& flush_opts) {
142 return FlushMemTable(cfd, flush_opts, FlushReason::kTest);
143 }
144
TEST_AtomicFlushMemTables(const autovector<ColumnFamilyData * > & cfds,const FlushOptions & flush_opts)145 Status DBImpl::TEST_AtomicFlushMemTables(
146 const autovector<ColumnFamilyData*>& cfds, const FlushOptions& flush_opts) {
147 return AtomicFlushMemTables(cfds, flush_opts, FlushReason::kTest);
148 }
149
TEST_WaitForFlushMemTable(ColumnFamilyHandle * column_family)150 Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
151 ColumnFamilyData* cfd;
152 if (column_family == nullptr) {
153 cfd = default_cf_handle_->cfd();
154 } else {
155 auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
156 cfd = cfh->cfd();
157 }
158 return WaitForFlushMemTable(cfd, nullptr, false);
159 }
160
TEST_WaitForCompact(bool wait_unscheduled)161 Status DBImpl::TEST_WaitForCompact(bool wait_unscheduled) {
162 // Wait until the compaction completes
163
164 // TODO: a bug here. This function actually does not necessarily
165 // wait for compact. It actually waits for scheduled compaction
166 // OR flush to finish.
167
168 InstrumentedMutexLock l(&mutex_);
169 while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
170 bg_flush_scheduled_ ||
171 (wait_unscheduled && unscheduled_compactions_)) &&
172 (error_handler_.GetBGError() == Status::OK())) {
173 bg_cv_.Wait();
174 }
175 return error_handler_.GetBGError();
176 }
177
TEST_LockMutex()178 void DBImpl::TEST_LockMutex() { mutex_.Lock(); }
179
TEST_UnlockMutex()180 void DBImpl::TEST_UnlockMutex() { mutex_.Unlock(); }
181
TEST_BeginWrite()182 void* DBImpl::TEST_BeginWrite() {
183 auto w = new WriteThread::Writer();
184 write_thread_.EnterUnbatched(w, &mutex_);
185 return reinterpret_cast<void*>(w);
186 }
187
TEST_EndWrite(void * w)188 void DBImpl::TEST_EndWrite(void* w) {
189 auto writer = reinterpret_cast<WriteThread::Writer*>(w);
190 write_thread_.ExitUnbatched(writer);
191 delete writer;
192 }
193
TEST_LogsToFreeSize()194 size_t DBImpl::TEST_LogsToFreeSize() {
195 InstrumentedMutexLock l(&mutex_);
196 return logs_to_free_.size();
197 }
198
TEST_LogfileNumber()199 uint64_t DBImpl::TEST_LogfileNumber() {
200 InstrumentedMutexLock l(&mutex_);
201 return logfile_number_;
202 }
203
TEST_GetAllImmutableCFOptions(std::unordered_map<std::string,const ImmutableCFOptions * > * iopts_map)204 Status DBImpl::TEST_GetAllImmutableCFOptions(
205 std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map) {
206 std::vector<std::string> cf_names;
207 std::vector<const ImmutableCFOptions*> iopts;
208 {
209 InstrumentedMutexLock l(&mutex_);
210 for (auto cfd : *versions_->GetColumnFamilySet()) {
211 cf_names.push_back(cfd->GetName());
212 iopts.push_back(cfd->ioptions());
213 }
214 }
215 iopts_map->clear();
216 for (size_t i = 0; i < cf_names.size(); ++i) {
217 iopts_map->insert({cf_names[i], iopts[i]});
218 }
219
220 return Status::OK();
221 }
222
TEST_FindMinLogContainingOutstandingPrep()223 uint64_t DBImpl::TEST_FindMinLogContainingOutstandingPrep() {
224 return logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep();
225 }
226
TEST_PreparedSectionCompletedSize()227 size_t DBImpl::TEST_PreparedSectionCompletedSize() {
228 return logs_with_prep_tracker_.TEST_PreparedSectionCompletedSize();
229 }
230
TEST_LogsWithPrepSize()231 size_t DBImpl::TEST_LogsWithPrepSize() {
232 return logs_with_prep_tracker_.TEST_LogsWithPrepSize();
233 }
234
TEST_FindMinPrepLogReferencedByMemTable()235 uint64_t DBImpl::TEST_FindMinPrepLogReferencedByMemTable() {
236 autovector<MemTable*> empty_list;
237 return FindMinPrepLogReferencedByMemTable(versions_.get(), nullptr,
238 empty_list);
239 }
240
TEST_GetLatestMutableCFOptions(ColumnFamilyHandle * column_family,MutableCFOptions * mutable_cf_options)241 Status DBImpl::TEST_GetLatestMutableCFOptions(
242 ColumnFamilyHandle* column_family, MutableCFOptions* mutable_cf_options) {
243 InstrumentedMutexLock l(&mutex_);
244
245 auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
246 *mutable_cf_options = *cfh->cfd()->GetLatestMutableCFOptions();
247 return Status::OK();
248 }
249
TEST_BGCompactionsAllowed() const250 int DBImpl::TEST_BGCompactionsAllowed() const {
251 InstrumentedMutexLock l(&mutex_);
252 return GetBGJobLimits().max_compactions;
253 }
254
TEST_BGFlushesAllowed() const255 int DBImpl::TEST_BGFlushesAllowed() const {
256 InstrumentedMutexLock l(&mutex_);
257 return GetBGJobLimits().max_flushes;
258 }
259
TEST_GetLastVisibleSequence() const260 SequenceNumber DBImpl::TEST_GetLastVisibleSequence() const {
261 if (last_seq_same_as_publish_seq_) {
262 return versions_->LastSequence();
263 } else {
264 return versions_->LastAllocatedSequence();
265 }
266 }
267
TEST_GetWalPreallocateBlockSize(uint64_t write_buffer_size) const268 size_t DBImpl::TEST_GetWalPreallocateBlockSize(
269 uint64_t write_buffer_size) const {
270 InstrumentedMutexLock l(&mutex_);
271 return GetWalPreallocateBlockSize(write_buffer_size);
272 }
273
TEST_WaitForDumpStatsRun(std::function<void ()> callback) const274 void DBImpl::TEST_WaitForDumpStatsRun(std::function<void()> callback) const {
275 if (thread_dump_stats_ != nullptr) {
276 thread_dump_stats_->TEST_WaitForRun(callback);
277 }
278 }
279
TEST_WaitForPersistStatsRun(std::function<void ()> callback) const280 void DBImpl::TEST_WaitForPersistStatsRun(std::function<void()> callback) const {
281 if (thread_persist_stats_ != nullptr) {
282 thread_persist_stats_->TEST_WaitForRun(callback);
283 }
284 }
285
TEST_IsPersistentStatsEnabled() const286 bool DBImpl::TEST_IsPersistentStatsEnabled() const {
287 return thread_persist_stats_ && thread_persist_stats_->IsRunning();
288 }
289
TEST_EstimateInMemoryStatsHistorySize() const290 size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const {
291 return EstimateInMemoryStatsHistorySize();
292 }
293 } // namespace ROCKSDB_NAMESPACE
294 #endif // NDEBUG
295