1 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
2 // This source code is licensed under both the GPLv2 (found in the
3 // COPYING file in the root directory) and Apache 2.0 License
4 // (found in the LICENSE.Apache file in the root directory).
5
6 #ifndef ROCKSDB_LITE
7
8 #include "db/db_impl/db_impl.h"
9 #include "rocksdb/cache.h"
10 #include "rocksdb/table.h"
11 #include "rocksdb/utilities/memory_util.h"
12 #include "rocksdb/utilities/stackable_db.h"
13 #include "table/block_based/block_based_table_factory.h"
14 #include "test_util/testharness.h"
15 #include "test_util/testutil.h"
16 #include "util/string_util.h"
17
18 namespace ROCKSDB_NAMESPACE {
19
20 class MemoryTest : public testing::Test {
21 public:
MemoryTest()22 MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) {
23 assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
24 }
25
GetDBName(int id)26 std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
27
RandomString(int len)28 std::string RandomString(int len) {
29 std::string r;
30 test::RandomString(&rnd_, len, &r);
31 return r;
32 }
33
UpdateUsagesHistory(const std::vector<DB * > & dbs)34 void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
35 std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
36 ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
37 for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
38 usage_history_[i].push_back(
39 usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
40 }
41 }
42
GetCachePointersFromTableFactory(const TableFactory * factory,std::unordered_set<const Cache * > * cache_set)43 void GetCachePointersFromTableFactory(
44 const TableFactory* factory,
45 std::unordered_set<const Cache*>* cache_set) {
46 const BlockBasedTableFactory* bbtf =
47 dynamic_cast<const BlockBasedTableFactory*>(factory);
48 if (bbtf != nullptr) {
49 const auto bbt_opts = bbtf->table_options();
50 cache_set->insert(bbt_opts.block_cache.get());
51 cache_set->insert(bbt_opts.block_cache_compressed.get());
52 }
53 }
54
GetCachePointers(const std::vector<DB * > & dbs,std::unordered_set<const Cache * > * cache_set)55 void GetCachePointers(const std::vector<DB*>& dbs,
56 std::unordered_set<const Cache*>* cache_set) {
57 cache_set->clear();
58
59 for (auto* db : dbs) {
60 assert(db);
61
62 // Cache from DBImpl
63 StackableDB* sdb = dynamic_cast<StackableDB*>(db);
64 DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
65 if (db_impl != nullptr) {
66 cache_set->insert(db_impl->TEST_table_cache());
67 }
68
69 // Cache from DBOptions
70 cache_set->insert(db->GetDBOptions().row_cache.get());
71
72 // Cache from table factories
73 std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
74 if (db_impl != nullptr) {
75 ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
76 }
77 for (auto pair : iopts_map) {
78 GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
79 }
80 }
81 }
82
GetApproximateMemoryUsageByType(const std::vector<DB * > & dbs,std::map<MemoryUtil::UsageType,uint64_t> * usage_by_type)83 Status GetApproximateMemoryUsageByType(
84 const std::vector<DB*>& dbs,
85 std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
86 std::unordered_set<const Cache*> cache_set;
87 GetCachePointers(dbs, &cache_set);
88
89 return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
90 usage_by_type);
91 }
92
93 const std::string kDbDir;
94 Random rnd_;
95 std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
96 };
97
TEST_F(MemoryTest,SharedBlockCacheTotal)98 TEST_F(MemoryTest, SharedBlockCacheTotal) {
99 std::vector<DB*> dbs;
100 std::vector<uint64_t> usage_by_type;
101 const int kNumDBs = 10;
102 const int kKeySize = 100;
103 const int kValueSize = 500;
104 Options opt;
105 opt.create_if_missing = true;
106 opt.write_buffer_size = kKeySize + kValueSize;
107 opt.max_write_buffer_number = 10;
108 opt.min_write_buffer_number_to_merge = 10;
109 opt.disable_auto_compactions = true;
110 BlockBasedTableOptions bbt_opts;
111 bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
112 for (int i = 0; i < kNumDBs; ++i) {
113 DestroyDB(GetDBName(i), opt);
114 DB* db = nullptr;
115 ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
116 dbs.push_back(db);
117 }
118
119 std::vector<std::string> keys_by_db[kNumDBs];
120
121 // Fill one memtable per Put to make memtable use more memory.
122 for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
123 for (int i = 0; i < kNumDBs; ++i) {
124 for (int j = 0; j < 100; ++j) {
125 keys_by_db[i].emplace_back(RandomString(kKeySize));
126 dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
127 RandomString(kValueSize));
128 }
129 dbs[i]->Flush(FlushOptions());
130 }
131 }
132 for (int i = 0; i < kNumDBs; ++i) {
133 for (auto& key : keys_by_db[i]) {
134 std::string value;
135 dbs[i]->Get(ReadOptions(), key, &value);
136 }
137 UpdateUsagesHistory(dbs);
138 }
139 for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
140 ++i) {
141 // Expect EQ as we didn't flush more memtables.
142 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
143 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
144 }
145 for (int i = 0; i < kNumDBs; ++i) {
146 delete dbs[i];
147 }
148 }
149
TEST_F(MemoryTest,MemTableAndTableReadersTotal)150 TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
151 std::vector<DB*> dbs;
152 std::vector<uint64_t> usage_by_type;
153 std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
154 const int kNumDBs = 10;
155 const int kKeySize = 100;
156 const int kValueSize = 500;
157 Options opt;
158 opt.create_if_missing = true;
159 opt.create_missing_column_families = true;
160 opt.write_buffer_size = kKeySize + kValueSize;
161 opt.max_write_buffer_number = 10;
162 opt.min_write_buffer_number_to_merge = 10;
163 opt.disable_auto_compactions = true;
164
165 std::vector<ColumnFamilyDescriptor> cf_descs = {
166 {kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
167 {"one", ColumnFamilyOptions(opt)},
168 {"two", ColumnFamilyOptions(opt)},
169 };
170
171 for (int i = 0; i < kNumDBs; ++i) {
172 DestroyDB(GetDBName(i), opt);
173 std::vector<ColumnFamilyHandle*> handles;
174 dbs.emplace_back();
175 vec_handles.emplace_back();
176 ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
177 &vec_handles.back(), &dbs.back()));
178 }
179
180 // Fill one memtable per Put to make memtable use more memory.
181 for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
182 for (int i = 0; i < kNumDBs; ++i) {
183 for (auto* handle : vec_handles[i]) {
184 dbs[i]->Put(WriteOptions(), handle, RandomString(kKeySize),
185 RandomString(kValueSize));
186 UpdateUsagesHistory(dbs);
187 }
188 }
189 }
190 // Expect the usage history is monotonically increasing
191 for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
192 ++i) {
193 ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
194 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
195 ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
196 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
197 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
198 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
199 }
200
201 size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
202 std::vector<Iterator*> iters;
203
204 // Create an iterator and flush all memtables for each db
205 for (int i = 0; i < kNumDBs; ++i) {
206 iters.push_back(dbs[i]->NewIterator(ReadOptions()));
207 dbs[i]->Flush(FlushOptions());
208
209 for (int j = 0; j < 100; ++j) {
210 std::string value;
211 dbs[i]->Get(ReadOptions(), RandomString(kKeySize), &value);
212 }
213
214 UpdateUsagesHistory(dbs);
215 }
216 for (size_t i = usage_check_point;
217 i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
218 // Since memtables are pinned by iterators, we don't expect the
219 // memory usage of all the memtables decreases as they are pinned
220 // by iterators.
221 ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
222 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
223 // Expect the usage history from the "usage_decay_point" is
224 // monotonically decreasing.
225 ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
226 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
227 // Expect the usage history of the table readers increases
228 // as we flush tables.
229 ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
230 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
231 ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
232 usage_history_[MemoryUtil::kCacheTotal][i - 1]);
233 }
234 usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
235 for (int i = 0; i < kNumDBs; ++i) {
236 delete iters[i];
237 UpdateUsagesHistory(dbs);
238 }
239 for (size_t i = usage_check_point;
240 i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
241 // Expect the usage of all memtables decreasing as we delete iterators.
242 ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
243 usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
244 // Since the memory usage of un-flushed memtables is only affected
245 // by Put and flush, we expect EQ here as we only delete iterators.
246 ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
247 usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
248 // Expect EQ as we didn't flush more memtables.
249 ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
250 usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
251 }
252
253 for (int i = 0; i < kNumDBs; ++i) {
254 for (auto* handle : vec_handles[i]) {
255 delete handle;
256 }
257 delete dbs[i];
258 }
259 }
260 } // namespace ROCKSDB_NAMESPACE
261
main(int argc,char ** argv)262 int main(int argc, char** argv) {
263 #if !(defined NDEBUG) || !defined(OS_WIN)
264 ::testing::InitGoogleTest(&argc, argv);
265 return RUN_ALL_TESTS();
266 #else
267 return 0;
268 #endif
269 }
270
271 #else
272 #include <cstdio>
273
main(int,char **)274 int main(int /*argc*/, char** /*argv*/) {
275 printf("Skipped in RocksDBLite as utilities are not supported.\n");
276 return 0;
277 }
278 #endif // !ROCKSDB_LITE
279