1 //  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2 //  This source code is licensed under both the GPLv2 (found in the
3 //  COPYING file in the root directory) and Apache 2.0 License
4 //  (found in the LICENSE.Apache file in the root directory).
5 //
6 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7 // Use of this source code is governed by a BSD-style license that can be
8 // found in the LICENSE file. See the AUTHORS file for names of contributors.
9 
10 // Arena is an implementation of Allocator class. For a request of small size,
11 // it allocates a block with pre-defined block size. For a request of big
12 // size, it uses malloc to directly get the requested size.
13 
14 #pragma once
15 #ifndef OS_WIN
16 #include <sys/mman.h>
17 #endif
18 #include <assert.h>
19 #include <stdint.h>
20 #include <cerrno>
21 #include <cstddef>
22 #include <vector>
23 #include "memory/allocator.h"
24 #include "util/mutexlock.h"
25 
26 namespace ROCKSDB_NAMESPACE {
27 
28 class Arena : public Allocator {
29  public:
30   // No copying allowed
31   Arena(const Arena&) = delete;
32   void operator=(const Arena&) = delete;
33 
34   static const size_t kInlineSize = 2048;
35   static const size_t kMinBlockSize;
36   static const size_t kMaxBlockSize;
37 
38   // huge_page_size: if 0, don't use huge page TLB. If > 0 (should set to the
39   // supported hugepage size of the system), block allocation will try huge
40   // page TLB first. If allocation fails, will fall back to normal case.
41   explicit Arena(size_t block_size = kMinBlockSize,
42                  AllocTracker* tracker = nullptr, size_t huge_page_size = 0);
43   ~Arena();
44 
45   char* Allocate(size_t bytes) override;
46 
47   // huge_page_size: if >0, will try to allocate from huage page TLB.
48   // The argument will be the size of the page size for huge page TLB. Bytes
49   // will be rounded up to multiple of the page size to allocate through mmap
50   // anonymous option with huge page on. The extra  space allocated will be
51   // wasted. If allocation fails, will fall back to normal case. To enable it,
52   // need to reserve huge pages for it to be allocated, like:
53   //     sysctl -w vm.nr_hugepages=20
54   // See linux doc Documentation/vm/hugetlbpage.txt for details.
55   // huge page allocation can fail. In this case it will fail back to
56   // normal cases. The messages will be logged to logger. So when calling with
57   // huge_page_tlb_size > 0, we highly recommend a logger is passed in.
58   // Otherwise, the error message will be printed out to stderr directly.
59   char* AllocateAligned(size_t bytes, size_t huge_page_size = 0,
60                         Logger* logger = nullptr) override;
61 
62   // Returns an estimate of the total memory usage of data allocated
63   // by the arena (exclude the space allocated but not yet used for future
64   // allocations).
ApproximateMemoryUsage()65   size_t ApproximateMemoryUsage() const {
66     return blocks_memory_ + blocks_.capacity() * sizeof(char*) -
67            alloc_bytes_remaining_;
68   }
69 
MemoryAllocatedBytes()70   size_t MemoryAllocatedBytes() const { return blocks_memory_; }
71 
AllocatedAndUnused()72   size_t AllocatedAndUnused() const { return alloc_bytes_remaining_; }
73 
74   // If an allocation is too big, we'll allocate an irregular block with the
75   // same size of that allocation.
IrregularBlockNum()76   size_t IrregularBlockNum() const { return irregular_block_num; }
77 
BlockSize()78   size_t BlockSize() const override { return kBlockSize; }
79 
IsInInlineBlock()80   bool IsInInlineBlock() const {
81     return blocks_.empty();
82   }
83 
84  private:
85   char inline_block_[kInlineSize] __attribute__((__aligned__(alignof(max_align_t))));
86   // Number of bytes allocated in one block
87   const size_t kBlockSize;
88   // Array of new[] allocated memory blocks
89   typedef std::vector<char*> Blocks;
90   Blocks blocks_;
91 
92   struct MmapInfo {
93     void* addr_;
94     size_t length_;
95 
MmapInfoMmapInfo96     MmapInfo(void* addr, size_t length) : addr_(addr), length_(length) {}
97   };
98   std::vector<MmapInfo> huge_blocks_;
99   size_t irregular_block_num = 0;
100 
101   // Stats for current active block.
102   // For each block, we allocate aligned memory chucks from one end and
103   // allocate unaligned memory chucks from the other end. Otherwise the
104   // memory waste for alignment will be higher if we allocate both types of
105   // memory from one direction.
106   char* unaligned_alloc_ptr_ = nullptr;
107   char* aligned_alloc_ptr_ = nullptr;
108   // How many bytes left in currently active block?
109   size_t alloc_bytes_remaining_ = 0;
110 
111 #ifdef MAP_HUGETLB
112   size_t hugetlb_size_ = 0;
113 #endif  // MAP_HUGETLB
114   char* AllocateFromHugePage(size_t bytes);
115   char* AllocateFallback(size_t bytes, bool aligned);
116   char* AllocateNewBlock(size_t block_bytes);
117 
118   // Bytes of memory in blocks allocated so far
119   size_t blocks_memory_ = 0;
120   AllocTracker* tracker_;
121 };
122 
Allocate(size_t bytes)123 inline char* Arena::Allocate(size_t bytes) {
124   // The semantics of what to return are a bit messy if we allow
125   // 0-byte allocations, so we disallow them here (we don't need
126   // them for our internal use).
127   assert(bytes > 0);
128   if (bytes <= alloc_bytes_remaining_) {
129     unaligned_alloc_ptr_ -= bytes;
130     alloc_bytes_remaining_ -= bytes;
131     return unaligned_alloc_ptr_;
132   }
133   return AllocateFallback(bytes, false /* unaligned */);
134 }
135 
136 // check and adjust the block_size so that the return value is
137 //  1. in the range of [kMinBlockSize, kMaxBlockSize].
138 //  2. the multiple of align unit.
139 extern size_t OptimizeBlockSize(size_t block_size);
140 
141 }  // namespace ROCKSDB_NAMESPACE
142