1//===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines some functions for various memory management utilities.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Unix.h"
15#include "llvm/Support/DataTypes.h"
16#include "llvm/Support/ErrorHandling.h"
17#include "llvm/Support/Process.h"
18
19#ifdef HAVE_SYS_MMAN_H
20#include <sys/mman.h>
21#endif
22
23#ifdef __APPLE__
24#include <mach/mach.h>
25#endif
26
27#if defined(__mips__)
28#  if defined(__OpenBSD__)
29#    include <mips64/sysarch.h>
30#  else
31#    include <sys/cachectl.h>
32#  endif
33#endif
34
35#ifdef __APPLE__
36extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
37#else
38extern "C" void __clear_cache(void *, void*);
39#endif
40
41namespace {
42
43int getPosixProtectionFlags(unsigned Flags) {
44  switch (Flags) {
45  case llvm::sys::Memory::MF_READ:
46    return PROT_READ;
47  case llvm::sys::Memory::MF_WRITE:
48    return PROT_WRITE;
49  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
50    return PROT_READ | PROT_WRITE;
51  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
52    return PROT_READ | PROT_EXEC;
53  case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE |
54      llvm::sys::Memory::MF_EXEC:
55    return PROT_READ | PROT_WRITE | PROT_EXEC;
56  case llvm::sys::Memory::MF_EXEC:
57#if defined(__FreeBSD__)
58    // On PowerPC, having an executable page that has no read permission
59    // can have unintended consequences.  The function InvalidateInstruction-
60    // Cache uses instructions dcbf and icbi, both of which are treated by
61    // the processor as loads.  If the page has no read permissions,
62    // executing these instructions will result in a segmentation fault.
63    // Somehow, this problem is not present on Linux, but it does happen
64    // on FreeBSD.
65    return PROT_READ | PROT_EXEC;
66#else
67    return PROT_EXEC;
68#endif
69  default:
70    llvm_unreachable("Illegal memory protection flag specified!");
71  }
72  // Provide a default return value as required by some compilers.
73  return PROT_NONE;
74}
75
76} // anonymous namespace
77
78namespace llvm {
79namespace sys {
80
81MemoryBlock
82Memory::allocateMappedMemory(size_t NumBytes,
83                             const MemoryBlock *const NearBlock,
84                             unsigned PFlags,
85                             std::error_code &EC) {
86  EC = std::error_code();
87  if (NumBytes == 0)
88    return MemoryBlock();
89
90  static const size_t PageSize = Process::getPageSize();
91  const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
92
93  int fd = -1;
94
95  int MMFlags = MAP_PRIVATE |
96#ifdef MAP_ANONYMOUS
97  MAP_ANONYMOUS
98#else
99  MAP_ANON
100#endif
101  ; // Ends statement above
102
103  int Protect = getPosixProtectionFlags(PFlags);
104
105  // Use any near hint and the page size to set a page-aligned starting address
106  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
107                                      NearBlock->size() : 0;
108  if (Start && Start % PageSize)
109    Start += PageSize - Start % PageSize;
110
111  void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
112                      Protect, MMFlags, fd, 0);
113  if (Addr == MAP_FAILED) {
114    if (NearBlock) //Try again without a near hint
115      return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
116
117    EC = std::error_code(errno, std::generic_category());
118    return MemoryBlock();
119  }
120
121  MemoryBlock Result;
122  Result.Address = Addr;
123  Result.Size = NumPages*PageSize;
124
125  if (PFlags & MF_EXEC)
126    Memory::InvalidateInstructionCache(Result.Address, Result.Size);
127
128  return Result;
129}
130
131std::error_code
132Memory::releaseMappedMemory(MemoryBlock &M) {
133  if (M.Address == nullptr || M.Size == 0)
134    return std::error_code();
135
136  if (0 != ::munmap(M.Address, M.Size))
137    return std::error_code(errno, std::generic_category());
138
139  M.Address = nullptr;
140  M.Size = 0;
141
142  return std::error_code();
143}
144
145std::error_code
146Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
147  static const size_t PageSize = Process::getPageSize();
148  if (M.Address == nullptr || M.Size == 0)
149    return std::error_code();
150
151  if (!Flags)
152    return std::error_code(EINVAL, std::generic_category());
153
154  int Protect = getPosixProtectionFlags(Flags);
155
156  int Result = ::mprotect((void*)((uintptr_t)M.Address & ~(PageSize-1)), PageSize*((M.Size+PageSize-1)/PageSize), Protect);
157  if (Result != 0)
158    return std::error_code(errno, std::generic_category());
159
160  if (Flags & MF_EXEC)
161    Memory::InvalidateInstructionCache(M.Address, M.Size);
162
163  return std::error_code();
164}
165
166/// AllocateRWX - Allocate a slab of memory with read/write/execute
167/// permissions.  This is typically used for JIT applications where we want
168/// to emit code to the memory then jump to it.  Getting this type of memory
169/// is very OS specific.
170///
171MemoryBlock
172Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
173                    std::string *ErrMsg) {
174  if (NumBytes == 0) return MemoryBlock();
175
176  static const size_t PageSize = Process::getPageSize();
177  size_t NumPages = (NumBytes+PageSize-1)/PageSize;
178
179  int fd = -1;
180
181  int flags = MAP_PRIVATE |
182#ifdef MAP_ANONYMOUS
183  MAP_ANONYMOUS
184#else
185  MAP_ANON
186#endif
187  ;
188
189  void* start = NearBlock ? (unsigned char*)NearBlock->base() +
190                            NearBlock->size() : nullptr;
191
192#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
193  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
194                    flags, fd, 0);
195#else
196  void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
197                    flags, fd, 0);
198#endif
199  if (pa == MAP_FAILED) {
200    if (NearBlock) //Try again without a near hint
201      return AllocateRWX(NumBytes, nullptr);
202
203    MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
204    return MemoryBlock();
205  }
206
207#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
208  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
209                                (vm_size_t)(PageSize*NumPages), 0,
210                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
211  if (KERN_SUCCESS != kr) {
212    MakeErrMsg(ErrMsg, "vm_protect max RX failed");
213    return MemoryBlock();
214  }
215
216  kr = vm_protect(mach_task_self(), (vm_address_t)pa,
217                  (vm_size_t)(PageSize*NumPages), 0,
218                  VM_PROT_READ | VM_PROT_WRITE);
219  if (KERN_SUCCESS != kr) {
220    MakeErrMsg(ErrMsg, "vm_protect RW failed");
221    return MemoryBlock();
222  }
223#endif
224
225  MemoryBlock result;
226  result.Address = pa;
227  result.Size = NumPages*PageSize;
228
229  return result;
230}
231
232bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
233  if (M.Address == nullptr || M.Size == 0) return false;
234  if (0 != ::munmap(M.Address, M.Size))
235    return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
236  return false;
237}
238
239bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
240#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
241  if (M.Address == 0 || M.Size == 0) return false;
242  Memory::InvalidateInstructionCache(M.Address, M.Size);
243  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
244    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
245  return KERN_SUCCESS == kr;
246#else
247  return true;
248#endif
249}
250
251bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
252  if (M.Address == nullptr || M.Size == 0) return false;
253  Memory::InvalidateInstructionCache(M.Address, M.Size);
254#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
255  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
256    (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
257  return KERN_SUCCESS == kr;
258#else
259  return true;
260#endif
261}
262
263bool Memory::setRangeWritable(const void *Addr, size_t Size) {
264#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
265  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
266                                (vm_size_t)Size, 0,
267                                VM_PROT_READ | VM_PROT_WRITE);
268  return KERN_SUCCESS == kr;
269#else
270  return true;
271#endif
272}
273
274bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
275#if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
276  kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
277                                (vm_size_t)Size, 0,
278                                VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
279  return KERN_SUCCESS == kr;
280#else
281  return true;
282#endif
283}
284
285/// InvalidateInstructionCache - Before the JIT can run a block of code
286/// that has been emitted it must invalidate the instruction cache on some
287/// platforms.
288void Memory::InvalidateInstructionCache(const void *Addr,
289                                        size_t Len) {
290
291// icache invalidation for PPC and ARM.
292#if defined(__APPLE__)
293
294#  if (defined(__POWERPC__) || defined (__ppc__) || \
295       defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
296       defined(__arm64__))
297  sys_icache_invalidate(const_cast<void *>(Addr), Len);
298#  endif
299
300#else
301
302#  if (defined(__POWERPC__) || defined (__ppc__) || \
303       defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
304  const size_t LineSize = 32;
305
306  const intptr_t Mask = ~(LineSize - 1);
307  const intptr_t StartLine = ((intptr_t) Addr) & Mask;
308  const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
309
310  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
311    asm volatile("dcbf 0, %0" : : "r"(Line));
312  asm volatile("sync");
313
314  for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
315    asm volatile("icbi 0, %0" : : "r"(Line));
316  asm volatile("isync");
317#  elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \
318        defined(__GNUC__)
319  // FIXME: Can we safely always call this for __GNUC__ everywhere?
320  const char *Start = static_cast<const char *>(Addr);
321  const char *End = Start + Len;
322  __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
323#  endif
324
325#endif  // end apple
326
327  ValgrindDiscardTranslations(Addr, Len);
328}
329
330} // namespace sys
331} // namespace llvm
332