1 //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the section-based memory manager used by the MCJIT
10 // execution engine and RuntimeDyld
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
15 #include "llvm/Config/config.h"
16 #include "llvm/Support/ManagedStatic.h"
17 #include "llvm/Support/MathExtras.h"
18 #include "llvm/Support/Process.h"
19 
20 namespace llvm {
21 
22 uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
23                                                    unsigned Alignment,
24                                                    unsigned SectionID,
25                                                    StringRef SectionName,
26                                                    bool IsReadOnly) {
27   if (IsReadOnly)
28     return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
29                            Size, Alignment);
30   return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
31                          Alignment);
32 }
33 
34 uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
35                                                    unsigned Alignment,
36                                                    unsigned SectionID,
37                                                    StringRef SectionName) {
38   return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
39                          Alignment);
40 }
41 
42 uint8_t *SectionMemoryManager::allocateSection(
43     SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
44     unsigned Alignment) {
45   if (!Alignment)
46     Alignment = 16;
47 
48   assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
49 
50   uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
51   uintptr_t Addr = 0;
52 
53   MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
54     switch (Purpose) {
55     case AllocationPurpose::Code:
56       return CodeMem;
57     case AllocationPurpose::ROData:
58       return RODataMem;
59     case AllocationPurpose::RWData:
60       return RWDataMem;
61     }
62     llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
63   }();
64 
65   // Look in the list of free memory regions and use a block there if one
66   // is available.
67   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
68     if (FreeMB.Free.allocatedSize() >= RequiredSize) {
69       Addr = (uintptr_t)FreeMB.Free.base();
70       uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
71       // Align the address.
72       Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
73 
74       if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
75         // The part of the block we're giving out to the user is now pending
76         MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
77 
78         // Remember this pending block, such that future allocations can just
79         // modify it rather than creating a new one
80         FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
81       } else {
82         sys::MemoryBlock &PendingMB =
83             MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
84         PendingMB = sys::MemoryBlock(PendingMB.base(),
85                                      Addr + Size - (uintptr_t)PendingMB.base());
86       }
87 
88       // Remember how much free space is now left in this block
89       FreeMB.Free =
90           sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
91       return (uint8_t *)Addr;
92     }
93   }
94 
95   // No pre-allocated free block was large enough. Allocate a new memory region.
96   // Note that all sections get allocated as read-write.  The permissions will
97   // be updated later based on memory group.
98   //
99   // FIXME: It would be useful to define a default allocation size (or add
100   // it as a constructor parameter) to minimize the number of allocations.
101   //
102   // FIXME: Initialize the Near member for each memory group to avoid
103   // interleaving.
104   std::error_code ec;
105   sys::MemoryBlock MB = MMapper.allocateMappedMemory(
106       Purpose, RequiredSize, &MemGroup.Near,
107       sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
108   if (ec) {
109     // FIXME: Add error propagation to the interface.
110     return nullptr;
111   }
112 
113   // Save this address as the basis for our next request
114   MemGroup.Near = MB;
115 
116   // Copy the address to all the other groups, if they have not
117   // been initialized.
118   if (CodeMem.Near.base() == 0)
119     CodeMem.Near = MB;
120   if (RODataMem.Near.base() == 0)
121     RODataMem.Near = MB;
122   if (RWDataMem.Near.base() == 0)
123     RWDataMem.Near = MB;
124 
125   // Remember that we allocated this memory
126   MemGroup.AllocatedMem.push_back(MB);
127   Addr = (uintptr_t)MB.base();
128   uintptr_t EndOfBlock = Addr + MB.allocatedSize();
129 
130   // Align the address.
131   Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
132 
133   // The part of the block we're giving out to the user is now pending
134   MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
135 
136   // The allocateMappedMemory may allocate much more memory than we need. In
137   // this case, we store the unused memory as a free memory block.
138   unsigned FreeSize = EndOfBlock - Addr - Size;
139   if (FreeSize > 16) {
140     FreeMemBlock FreeMB;
141     FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
142     FreeMB.PendingPrefixIndex = (unsigned)-1;
143     MemGroup.FreeMem.push_back(FreeMB);
144   }
145 
146   // Return aligned address
147   return (uint8_t *)Addr;
148 }
149 
150 bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
151   // FIXME: Should in-progress permissions be reverted if an error occurs?
152   std::error_code ec;
153 
154   // Make code memory executable.
155   ec = applyMemoryGroupPermissions(CodeMem,
156                                    sys::Memory::MF_READ | sys::Memory::MF_EXEC);
157   if (ec) {
158     if (ErrMsg) {
159       *ErrMsg = ec.message();
160     }
161     return true;
162   }
163 
164   // Make read-only data memory read-only.
165   ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
166   if (ec) {
167     if (ErrMsg) {
168       *ErrMsg = ec.message();
169     }
170     return true;
171   }
172 
173   // Read-write data memory already has the correct permissions
174 
175   // Some platforms with separate data cache and instruction cache require
176   // explicit cache flush, otherwise JIT code manipulations (like resolved
177   // relocations) will get to the data cache but not to the instruction cache.
178   invalidateInstructionCache();
179 
180   return false;
181 }
182 
183 static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
184   static const size_t PageSize = sys::Process::getPageSizeEstimate();
185 
186   size_t StartOverlap =
187       (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
188 
189   size_t TrimmedSize = M.allocatedSize();
190   TrimmedSize -= StartOverlap;
191   TrimmedSize -= TrimmedSize % PageSize;
192 
193   sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
194                            TrimmedSize);
195 
196   assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
197   assert((Trimmed.allocatedSize() % PageSize) == 0);
198   assert(M.base() <= Trimmed.base() &&
199          Trimmed.allocatedSize() <= M.allocatedSize());
200 
201   return Trimmed;
202 }
203 
204 std::error_code
205 SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
206                                                   unsigned Permissions) {
207   for (sys::MemoryBlock &MB : MemGroup.PendingMem)
208     if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
209       return EC;
210 
211   MemGroup.PendingMem.clear();
212 
213   // Now go through free blocks and trim any of them that don't span the entire
214   // page because one of the pending blocks may have overlapped it.
215   for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
216     FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
217     // We cleared the PendingMem list, so all these pointers are now invalid
218     FreeMB.PendingPrefixIndex = (unsigned)-1;
219   }
220 
221   // Remove all blocks which are now empty
222   erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
223     return FreeMB.Free.allocatedSize() == 0;
224   });
225 
226   return std::error_code();
227 }
228 
229 void SectionMemoryManager::invalidateInstructionCache() {
230   for (sys::MemoryBlock &Block : CodeMem.PendingMem)
231     sys::Memory::InvalidateInstructionCache(Block.base(),
232                                             Block.allocatedSize());
233 }
234 
235 SectionMemoryManager::~SectionMemoryManager() {
236   for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
237     for (sys::MemoryBlock &Block : Group->AllocatedMem)
238       MMapper.releaseMappedMemory(Block);
239   }
240 }
241 
242 SectionMemoryManager::MemoryMapper::~MemoryMapper() {}
243 
244 void SectionMemoryManager::anchor() {}
245 
246 namespace {
247 // Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
248 // into sys::Memory.
249 class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
250 public:
251   sys::MemoryBlock
252   allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
253                        size_t NumBytes, const sys::MemoryBlock *const NearBlock,
254                        unsigned Flags, std::error_code &EC) override {
255     return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
256   }
257 
258   std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
259                                       unsigned Flags) override {
260     return sys::Memory::protectMappedMemory(Block, Flags);
261   }
262 
263   std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
264     return sys::Memory::releaseMappedMemory(M);
265   }
266 };
267 
268 ManagedStatic<DefaultMMapper> DefaultMMapperInstance;
269 } // namespace
270 
271 SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM)
272     : MMapper(MM ? *MM : *DefaultMMapperInstance) {}
273 
274 } // namespace llvm
275