1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the section-based memory manager used by the MCJIT
10 // execution engine and RuntimeDyld
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
15 #include "llvm/Config/config.h"
16 #include "llvm/Support/MathExtras.h"
17 #include "llvm/Support/Process.h"
18 
19 namespace llvm {
20 
21 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
22                                                    unsigned Alignment,
23                                                    unsigned SectionID,
24                                                    StringRef SectionName,
25                                                    bool IsReadOnly) {
26   if (IsReadOnly)
27     return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
28                            Size, Alignment);
29   return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
30                          Alignment);
31 }
32 
33 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
34                                                    unsigned Alignment,
35                                                    unsigned SectionID,
36                                                    StringRef SectionName) {
37   return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
38                          Alignment);
39 }
40 
41 uint8_t *SectionMemoryManager::allocateSection(
42     SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
43     unsigned Alignment) {
44   if (!Alignment)
45     Alignment = 16;
46 
47   assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
48 
49   uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
50   uintptr_t Addr = 0;
51 
52   MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
53     switch (Purpose) {
54     case AllocationPurpose::Code:
55       return CodeMem;
56     case AllocationPurpose::ROData:
57       return RODataMem;
58     case AllocationPurpose::RWData:
59       return RWDataMem;
60     }
61     llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
62   }();
63 
64   // Look in the list of free memory regions and use a block there if one
65   // is available.
66   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
67     if (FreeMB.Free.allocatedSize() >= RequiredSize) {
68       Addr = (uintptr_t)FreeMB.Free.base();
69       uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
70       // Align the address.
71       Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
72 
73       if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
74         // The part of the block we're giving out to the user is now pending
75         MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
76 
77         // Remember this pending block, such that future allocations can just
78         // modify it rather than creating a new one
79         FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
80       } else {
81         sys::MemoryBlock &PendingMB =
82             MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
83         PendingMB = sys::MemoryBlock(PendingMB.base(),
84                                      Addr + Size - (uintptr_t)PendingMB.base());
85       }
86 
87       // Remember how much free space is now left in this block
88       FreeMB.Free =
89           sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
90       return (uint8_t *)Addr;
91     }
92   }
93 
94   // No pre-allocated free block was large enough. Allocate a new memory region.
95   // Note that all sections get allocated as read-write.  The permissions will
96   // be updated later based on memory group.
97   //
98   // FIXME: It would be useful to define a default allocation size (or add
99   // it as a constructor parameter) to minimize the number of allocations.
100   //
101   // FIXME: Initialize the Near member for each memory group to avoid
102   // interleaving.
103   std::error_code ec;
104   sys::MemoryBlock MB = MMapper.allocateMappedMemory(
105       Purpose, RequiredSize, &MemGroup.Near,
106       sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
107   if (ec) {
108     // FIXME: Add error propagation to the interface.
109     return nullptr;
110   }
111 
112   // Save this address as the basis for our next request
113   MemGroup.Near = MB;
114 
115   // Copy the address to all the other groups, if they have not
116   // been initialized.
117   if (CodeMem.Near.base() == 0)
118     CodeMem.Near = MB;
119   if (RODataMem.Near.base() == 0)
120     RODataMem.Near = MB;
121   if (RWDataMem.Near.base() == 0)
122     RWDataMem.Near = MB;
123 
124   // Remember that we allocated this memory
125   MemGroup.AllocatedMem.push_back(MB);
126   Addr = (uintptr_t)MB.base();
127   uintptr_t EndOfBlock = Addr + MB.allocatedSize();
128 
129   // Align the address.
130   Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
131 
132   // The part of the block we're giving out to the user is now pending
133   MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
134 
135   // The allocateMappedMemory may allocate much more memory than we need. In
136   // this case, we store the unused memory as a free memory block.
137   unsigned FreeSize = EndOfBlock - Addr - Size;
138   if (FreeSize > 16) {
139     FreeMemBlock FreeMB;
140     FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
141     FreeMB.PendingPrefixIndex = (unsigned)-1;
142     MemGroup.FreeMem.push_back(FreeMB);
143   }
144 
145   // Return aligned address
146   return (uint8_t *)Addr;
147 }
148 
149 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
150   // FIXME: Should in-progress permissions be reverted if an error occurs?
151   std::error_code ec;
152 
153   // Make code memory executable.
154   ec = applyMemoryGroupPermissions(CodeMem,
155                                    sys::Memory::MF_READ | sys::Memory::MF_EXEC);
156   if (ec) {
157     if (ErrMsg) {
158       *ErrMsg = ec.message();
159     }
160     return true;
161   }
162 
163   // Make read-only data memory read-only.
164   ec = applyMemoryGroupPermissions(RODataMem,
165                                    sys::Memory::MF_READ | sys::Memory::MF_EXEC);
166   if (ec) {
167     if (ErrMsg) {
168       *ErrMsg = ec.message();
169     }
170     return true;
171   }
172 
173   // Read-write data memory already has the correct permissions
174 
175   // Some platforms with separate data cache and instruction cache require
176   // explicit cache flush, otherwise JIT code manipulations (like resolved
177   // relocations) will get to the data cache but not to the instruction cache.
178   invalidateInstructionCache();
179 
180   return false;
181 }
182 
183 static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
184   static const size_t PageSize = sys::Process::getPageSizeEstimate();
185 
186   size_t StartOverlap =
187       (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
188 
189   size_t TrimmedSize = M.allocatedSize();
190   TrimmedSize -= StartOverlap;
191   TrimmedSize -= TrimmedSize % PageSize;
192 
193   sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
194                            TrimmedSize);
195 
196   assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
197   assert((Trimmed.allocatedSize() % PageSize) == 0);
198   assert(M.base() <= Trimmed.base() &&
199          Trimmed.allocatedSize() <= M.allocatedSize());
200 
201   return Trimmed;
202 }
203 
204 std::error_code
205 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
206                                                   unsigned Permissions) {
207   for (sys::MemoryBlock &MB : MemGroup.PendingMem)
208     if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
209       return EC;
210 
211   MemGroup.PendingMem.clear();
212 
213   // Now go through free blocks and trim any of them that don't span the entire
214   // page because one of the pending blocks may have overlapped it.
215   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
216     FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
217     // We cleared the PendingMem list, so all these pointers are now invalid
218     FreeMB.PendingPrefixIndex = (unsigned)-1;
219   }
220 
221   // Remove all blocks which are now empty
222   MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem,
223                                    [](FreeMemBlock &FreeMB) {
224                                      return FreeMB.Free.allocatedSize() == 0;
225                                    }),
226                          MemGroup.FreeMem.end());
227 
228   return std::error_code();
229 }
230 
231 void SectionMemoryManager::invalidateInstructionCache() {
232   for (sys::MemoryBlock &Block : CodeMem.PendingMem)
233     sys::Memory::InvalidateInstructionCache(Block.base(),
234                                             Block.allocatedSize());
235 }
236 
237 SectionMemoryManager::~SectionMemoryManager() {
238   for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
239     for (sys::MemoryBlock &Block : Group->AllocatedMem)
240       MMapper.releaseMappedMemory(Block);
241   }
242 }
243 
244 SectionMemoryManager::MemoryMapper::~MemoryMapper() {}
245 
246 void SectionMemoryManager::anchor() {}
247 
248 namespace {
249 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
250 // into sys::Memory.
251 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
252 public:
253   sys::MemoryBlock
254   allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
255                        size_t NumBytes, const sys::MemoryBlock *const NearBlock,
256                        unsigned Flags, std::error_code &EC) override {
257     return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
258   }
259 
260   std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
261                                       unsigned Flags) override {
262     return sys::Memory::protectMappedMemory(Block, Flags);
263   }
264 
265   std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
266     return sys::Memory::releaseMappedMemory(M);
267   }
268 };
269 
270 DefaultMMapper DefaultMMapperInstance;
271 } // namespace
272 
273 SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM)
274     : MMapper(MM ? *MM : DefaultMMapperInstance) {}
275 
276 } // namespace llvm
277