1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 
10 #ifndef ROCKSDB_LITE
11 #include "table/cuckoo/cuckoo_table_reader.h"
12 
13 #include <algorithm>
14 #include <limits>
15 #include <string>
16 #include <utility>
17 #include <vector>
18 #include "memory/arena.h"
19 #include "rocksdb/iterator.h"
20 #include "rocksdb/table.h"
21 #include "table/cuckoo/cuckoo_table_factory.h"
22 #include "table/get_context.h"
23 #include "table/internal_iterator.h"
24 #include "table/meta_blocks.h"
25 #include "util/coding.h"
26 
27 namespace ROCKSDB_NAMESPACE {
28 namespace {
29 const uint64_t CACHE_LINE_MASK = ~((uint64_t)CACHE_LINE_SIZE - 1);
30 const uint32_t kInvalidIndex = std::numeric_limits<uint32_t>::max();
31 }
32 
33 extern const uint64_t kCuckooTableMagicNumber;
34 
CuckooTableReader(const ImmutableCFOptions & ioptions,std::unique_ptr<RandomAccessFileReader> && file,uint64_t file_size,const Comparator * comparator,uint64_t (* get_slice_hash)(const Slice &,uint32_t,uint64_t))35 CuckooTableReader::CuckooTableReader(
36     const ImmutableCFOptions& ioptions,
37     std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
38     const Comparator* comparator,
39     uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t))
40     : file_(std::move(file)),
41       is_last_level_(false),
42       identity_as_first_hash_(false),
43       use_module_hash_(false),
44       num_hash_func_(0),
45       unused_key_(""),
46       key_length_(0),
47       user_key_length_(0),
48       value_length_(0),
49       bucket_length_(0),
50       cuckoo_block_size_(0),
51       cuckoo_block_bytes_minus_one_(0),
52       table_size_(0),
53       ucomp_(comparator),
54       get_slice_hash_(get_slice_hash) {
55   if (!ioptions.allow_mmap_reads) {
56     status_ = Status::InvalidArgument("File is not mmaped");
57     return;
58   }
59   TableProperties* props = nullptr;
60   status_ = ReadTableProperties(file_.get(), file_size, kCuckooTableMagicNumber,
61       ioptions, &props, true /* compression_type_missing */);
62   if (!status_.ok()) {
63     return;
64   }
65   table_props_.reset(props);
66   auto& user_props = props->user_collected_properties;
67   auto hash_funs = user_props.find(CuckooTablePropertyNames::kNumHashFunc);
68   if (hash_funs == user_props.end()) {
69     status_ = Status::Corruption("Number of hash functions not found");
70     return;
71   }
72   num_hash_func_ = *reinterpret_cast<const uint32_t*>(hash_funs->second.data());
73   auto unused_key = user_props.find(CuckooTablePropertyNames::kEmptyKey);
74   if (unused_key == user_props.end()) {
75     status_ = Status::Corruption("Empty bucket value not found");
76     return;
77   }
78   unused_key_ = unused_key->second;
79 
80   key_length_ = static_cast<uint32_t>(props->fixed_key_len);
81   auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength);
82   if (user_key_len == user_props.end()) {
83     status_ = Status::Corruption("User key length not found");
84     return;
85   }
86   user_key_length_ = *reinterpret_cast<const uint32_t*>(
87       user_key_len->second.data());
88 
89   auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength);
90   if (value_length == user_props.end()) {
91     status_ = Status::Corruption("Value length not found");
92     return;
93   }
94   value_length_ = *reinterpret_cast<const uint32_t*>(
95       value_length->second.data());
96   bucket_length_ = key_length_ + value_length_;
97 
98   auto hash_table_size = user_props.find(
99       CuckooTablePropertyNames::kHashTableSize);
100   if (hash_table_size == user_props.end()) {
101     status_ = Status::Corruption("Hash table size not found");
102     return;
103   }
104   table_size_ = *reinterpret_cast<const uint64_t*>(
105       hash_table_size->second.data());
106 
107   auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel);
108   if (is_last_level == user_props.end()) {
109     status_ = Status::Corruption("Is last level not found");
110     return;
111   }
112   is_last_level_ = *reinterpret_cast<const bool*>(is_last_level->second.data());
113 
114   auto identity_as_first_hash = user_props.find(
115       CuckooTablePropertyNames::kIdentityAsFirstHash);
116   if (identity_as_first_hash == user_props.end()) {
117     status_ = Status::Corruption("identity as first hash not found");
118     return;
119   }
120   identity_as_first_hash_ = *reinterpret_cast<const bool*>(
121       identity_as_first_hash->second.data());
122 
123   auto use_module_hash = user_props.find(
124       CuckooTablePropertyNames::kUseModuleHash);
125   if (use_module_hash == user_props.end()) {
126     status_ = Status::Corruption("hash type is not found");
127     return;
128   }
129   use_module_hash_ = *reinterpret_cast<const bool*>(
130       use_module_hash->second.data());
131   auto cuckoo_block_size = user_props.find(
132       CuckooTablePropertyNames::kCuckooBlockSize);
133   if (cuckoo_block_size == user_props.end()) {
134     status_ = Status::Corruption("Cuckoo block size not found");
135     return;
136   }
137   cuckoo_block_size_ = *reinterpret_cast<const uint32_t*>(
138       cuckoo_block_size->second.data());
139   cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1;
140   status_ = file_->Read(0, static_cast<size_t>(file_size), &file_data_, nullptr,
141                         nullptr);
142 }
143 
Get(const ReadOptions &,const Slice & key,GetContext * get_context,const SliceTransform *,bool)144 Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/,
145                               const Slice& key, GetContext* get_context,
146                               const SliceTransform* /* prefix_extractor */,
147                               bool /*skip_filters*/) {
148   assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0));
149   Slice user_key = ExtractUserKey(key);
150   for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
151     uint64_t offset = bucket_length_ * CuckooHash(
152         user_key, hash_cnt, use_module_hash_, table_size_,
153         identity_as_first_hash_, get_slice_hash_);
154     const char* bucket = &file_data_.data()[offset];
155     for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_;
156          ++block_idx, bucket += bucket_length_) {
157       if (ucomp_->Equal(Slice(unused_key_.data(), user_key.size()),
158                         Slice(bucket, user_key.size()))) {
159         return Status::OK();
160       }
161       // Here, we compare only the user key part as we support only one entry
162       // per user key and we don't support snapshot.
163       if (ucomp_->Equal(user_key, Slice(bucket, user_key.size()))) {
164         Slice value(bucket + key_length_, value_length_);
165         if (is_last_level_) {
166           // Sequence number is not stored at the last level, so we will use
167           // kMaxSequenceNumber since it is unknown.  This could cause some
168           // transactions to fail to lock a key due to known sequence number.
169           // However, it is expected for anyone to use a CuckooTable in a
170           // TransactionDB.
171           get_context->SaveValue(value, kMaxSequenceNumber);
172         } else {
173           Slice full_key(bucket, key_length_);
174           ParsedInternalKey found_ikey;
175           ParseInternalKey(full_key, &found_ikey);
176           bool dont_care __attribute__((__unused__));
177           get_context->SaveValue(found_ikey, value, &dont_care);
178         }
179         // We don't support merge operations. So, we return here.
180         return Status::OK();
181       }
182     }
183   }
184   return Status::OK();
185 }
186 
Prepare(const Slice & key)187 void CuckooTableReader::Prepare(const Slice& key) {
188   // Prefetch the first Cuckoo Block.
189   Slice user_key = ExtractUserKey(key);
190   uint64_t addr = reinterpret_cast<uint64_t>(file_data_.data()) +
191     bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_,
192                                 identity_as_first_hash_, nullptr);
193   uint64_t end_addr = addr + cuckoo_block_bytes_minus_one_;
194   for (addr &= CACHE_LINE_MASK; addr < end_addr; addr += CACHE_LINE_SIZE) {
195     PREFETCH(reinterpret_cast<const char*>(addr), 0, 3);
196   }
197 }
198 
199 class CuckooTableIterator : public InternalIterator {
200  public:
201   explicit CuckooTableIterator(CuckooTableReader* reader);
202   // No copying allowed
203   CuckooTableIterator(const CuckooTableIterator&) = delete;
204   void operator=(const Iterator&) = delete;
~CuckooTableIterator()205   ~CuckooTableIterator() override {}
206   bool Valid() const override;
207   void SeekToFirst() override;
208   void SeekToLast() override;
209   void Seek(const Slice& target) override;
210   void SeekForPrev(const Slice& target) override;
211   void Next() override;
212   void Prev() override;
213   Slice key() const override;
214   Slice value() const override;
status() const215   Status status() const override { return Status::OK(); }
216   void InitIfNeeded();
217 
218  private:
219   struct BucketComparator {
BucketComparatorROCKSDB_NAMESPACE::CuckooTableIterator::BucketComparator220     BucketComparator(const Slice& file_data, const Comparator* ucomp,
221                      uint32_t bucket_len, uint32_t user_key_len,
222                      const Slice& target = Slice())
223       : file_data_(file_data),
224         ucomp_(ucomp),
225         bucket_len_(bucket_len),
226         user_key_len_(user_key_len),
227         target_(target) {}
operator ()ROCKSDB_NAMESPACE::CuckooTableIterator::BucketComparator228     bool operator()(const uint32_t first, const uint32_t second) const {
229       const char* first_bucket =
230         (first == kInvalidIndex) ? target_.data() :
231                                    &file_data_.data()[first * bucket_len_];
232       const char* second_bucket =
233         (second == kInvalidIndex) ? target_.data() :
234                                     &file_data_.data()[second * bucket_len_];
235       return ucomp_->Compare(Slice(first_bucket, user_key_len_),
236                              Slice(second_bucket, user_key_len_)) < 0;
237     }
238    private:
239     const Slice file_data_;
240     const Comparator* ucomp_;
241     const uint32_t bucket_len_;
242     const uint32_t user_key_len_;
243     const Slice target_;
244   };
245 
246   const BucketComparator bucket_comparator_;
247   void PrepareKVAtCurrIdx();
248   CuckooTableReader* reader_;
249   bool initialized_;
250   // Contains a map of keys to bucket_id sorted in key order.
251   std::vector<uint32_t> sorted_bucket_ids_;
252   // We assume that the number of items can be stored in uint32 (4 Billion).
253   uint32_t curr_key_idx_;
254   Slice curr_value_;
255   IterKey curr_key_;
256 };
257 
CuckooTableIterator(CuckooTableReader * reader)258 CuckooTableIterator::CuckooTableIterator(CuckooTableReader* reader)
259   : bucket_comparator_(reader->file_data_, reader->ucomp_,
260                        reader->bucket_length_, reader->user_key_length_),
261     reader_(reader),
262     initialized_(false),
263     curr_key_idx_(kInvalidIndex) {
264   sorted_bucket_ids_.clear();
265   curr_value_.clear();
266   curr_key_.Clear();
267 }
268 
InitIfNeeded()269 void CuckooTableIterator::InitIfNeeded() {
270   if (initialized_) {
271     return;
272   }
273   sorted_bucket_ids_.reserve(static_cast<size_t>(reader_->GetTableProperties()->num_entries));
274   uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1;
275   assert(num_buckets < kInvalidIndex);
276   const char* bucket = reader_->file_data_.data();
277   for (uint32_t bucket_id = 0; bucket_id < num_buckets; ++bucket_id) {
278     if (Slice(bucket, reader_->key_length_) != Slice(reader_->unused_key_)) {
279       sorted_bucket_ids_.push_back(bucket_id);
280     }
281     bucket += reader_->bucket_length_;
282   }
283   assert(sorted_bucket_ids_.size() ==
284       reader_->GetTableProperties()->num_entries);
285   std::sort(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(),
286             bucket_comparator_);
287   curr_key_idx_ = kInvalidIndex;
288   initialized_ = true;
289 }
290 
SeekToFirst()291 void CuckooTableIterator::SeekToFirst() {
292   InitIfNeeded();
293   curr_key_idx_ = 0;
294   PrepareKVAtCurrIdx();
295 }
296 
SeekToLast()297 void CuckooTableIterator::SeekToLast() {
298   InitIfNeeded();
299   curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size()) - 1;
300   PrepareKVAtCurrIdx();
301 }
302 
Seek(const Slice & target)303 void CuckooTableIterator::Seek(const Slice& target) {
304   InitIfNeeded();
305   const BucketComparator seek_comparator(
306       reader_->file_data_, reader_->ucomp_,
307       reader_->bucket_length_, reader_->user_key_length_,
308       ExtractUserKey(target));
309   auto seek_it = std::lower_bound(sorted_bucket_ids_.begin(),
310       sorted_bucket_ids_.end(),
311       kInvalidIndex,
312       seek_comparator);
313   curr_key_idx_ =
314       static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
315   PrepareKVAtCurrIdx();
316 }
317 
SeekForPrev(const Slice &)318 void CuckooTableIterator::SeekForPrev(const Slice& /*target*/) {
319   // Not supported
320   assert(false);
321 }
322 
Valid() const323 bool CuckooTableIterator::Valid() const {
324   return curr_key_idx_ < sorted_bucket_ids_.size();
325 }
326 
PrepareKVAtCurrIdx()327 void CuckooTableIterator::PrepareKVAtCurrIdx() {
328   if (!Valid()) {
329     curr_value_.clear();
330     curr_key_.Clear();
331     return;
332   }
333   uint32_t id = sorted_bucket_ids_[curr_key_idx_];
334   const char* offset = reader_->file_data_.data() +
335                        id * reader_->bucket_length_;
336   if (reader_->is_last_level_) {
337     // Always return internal key.
338     curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_),
339                              0, kTypeValue);
340   } else {
341     curr_key_.SetInternalKey(Slice(offset, reader_->key_length_));
342   }
343   curr_value_ = Slice(offset + reader_->key_length_, reader_->value_length_);
344 }
345 
Next()346 void CuckooTableIterator::Next() {
347   if (!Valid()) {
348     curr_value_.clear();
349     curr_key_.Clear();
350     return;
351   }
352   ++curr_key_idx_;
353   PrepareKVAtCurrIdx();
354 }
355 
Prev()356 void CuckooTableIterator::Prev() {
357   if (curr_key_idx_ == 0) {
358     curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size());
359   }
360   if (!Valid()) {
361     curr_value_.clear();
362     curr_key_.Clear();
363     return;
364   }
365   --curr_key_idx_;
366   PrepareKVAtCurrIdx();
367 }
368 
key() const369 Slice CuckooTableIterator::key() const {
370   assert(Valid());
371   return curr_key_.GetInternalKey();
372 }
373 
value() const374 Slice CuckooTableIterator::value() const {
375   assert(Valid());
376   return curr_value_;
377 }
378 
NewIterator(const ReadOptions &,const SliceTransform *,Arena * arena,bool,TableReaderCaller,size_t)379 InternalIterator* CuckooTableReader::NewIterator(
380     const ReadOptions& /*read_options*/,
381     const SliceTransform* /* prefix_extractor */, Arena* arena,
382     bool /*skip_filters*/, TableReaderCaller /*caller*/,
383     size_t /*compaction_readahead_size*/) {
384   if (!status().ok()) {
385     return NewErrorInternalIterator<Slice>(
386         Status::Corruption("CuckooTableReader status is not okay."), arena);
387   }
388   CuckooTableIterator* iter;
389   if (arena == nullptr) {
390     iter = new CuckooTableIterator(this);
391   } else {
392     auto iter_mem = arena->AllocateAligned(sizeof(CuckooTableIterator));
393     iter = new (iter_mem) CuckooTableIterator(this);
394   }
395   return iter;
396 }
397 
ApproximateMemoryUsage() const398 size_t CuckooTableReader::ApproximateMemoryUsage() const { return 0; }
399 
400 }  // namespace ROCKSDB_NAMESPACE
401 #endif
402