1 /*-
2 * Copyright (c) 2004 Tim J. Robbins
3 * Copyright (c) 2002 Doug Rabson
4 * Copyright (c) 2000 Marcel Moolenaar
5 * Copyright (c) 1994-1995 Søren Schmidt
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/fcntl.h>
33 #include <sys/file.h>
34 #include <sys/ktr.h>
35 #include <sys/lock.h>
36 #include <sys/mman.h>
37 #include <sys/proc.h>
38 #include <sys/resourcevar.h>
39 #include <sys/rwlock.h>
40 #include <sys/syscallsubr.h>
41 #include <sys/sysent.h>
42 #include <sys/sysproto.h>
43
44 #include <vm/pmap.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_object.h>
48
49 #include <compat/linux/linux_emul.h>
50 #include <compat/linux/linux_mmap.h>
51 #include <compat/linux/linux_persona.h>
52 #include <compat/linux/linux_util.h>
53
54 #define STACK_SIZE (2 * 1024 * 1024)
55 #define GUARD_SIZE (4 * PAGE_SIZE)
56
57 #if defined(__amd64__)
58 static void linux_fixup_prot(struct thread *td, int *prot);
59 #endif
60
61 static int
linux_mmap_check_fp(struct file * fp,int flags,int prot,int maxprot)62 linux_mmap_check_fp(struct file *fp, int flags, int prot, int maxprot)
63 {
64
65 /* Linux mmap() just fails for O_WRONLY files */
66 if ((fp->f_flag & FREAD) == 0)
67 return (EACCES);
68
69 return (0);
70 }
71
72 int
linux_mmap_common(struct thread * td,uintptr_t addr,size_t len,int prot,int flags,int fd,off_t pos)73 linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
74 int flags, int fd, off_t pos)
75 {
76 struct mmap_req mr, mr_fixed;
77 struct proc *p = td->td_proc;
78 struct vmspace *vms = td->td_proc->p_vmspace;
79 int bsd_flags, error;
80
81 LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx",
82 addr, len, prot, flags, fd, pos);
83
84 error = 0;
85 bsd_flags = 0;
86
87 /*
88 * Linux mmap(2):
89 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
90 */
91 if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE)))
92 return (EINVAL);
93
94 if (flags & LINUX_MAP_SHARED)
95 bsd_flags |= MAP_SHARED;
96 if (flags & LINUX_MAP_PRIVATE)
97 bsd_flags |= MAP_PRIVATE;
98 if (flags & LINUX_MAP_FIXED)
99 bsd_flags |= MAP_FIXED;
100 if (flags & LINUX_MAP_ANON) {
101 /* Enforce pos to be on page boundary, then ignore. */
102 if ((pos & PAGE_MASK) != 0)
103 return (EINVAL);
104 pos = 0;
105 bsd_flags |= MAP_ANON;
106 } else
107 bsd_flags |= MAP_NOSYNC;
108 if (flags & LINUX_MAP_GROWSDOWN)
109 bsd_flags |= MAP_STACK;
110
111 #if defined(__amd64__)
112 /*
113 * According to the Linux mmap(2) man page, "MAP_32BIT flag
114 * is ignored when MAP_FIXED is set."
115 */
116 if ((flags & LINUX_MAP_32BIT) && (flags & LINUX_MAP_FIXED) == 0)
117 bsd_flags |= MAP_32BIT;
118
119 /*
120 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
121 * on Linux/i386 if the binary requires executable stack.
122 * We do this only for IA32 emulation as on native i386 this is does not
123 * make sense without PAE.
124 *
125 * XXX. Linux checks that the file system is not mounted with noexec.
126 */
127 linux_fixup_prot(td, &prot);
128 #endif
129
130 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
131 fd = (bsd_flags & MAP_ANON) ? -1 : fd;
132 if (flags & LINUX_MAP_GROWSDOWN) {
133 /*
134 * The Linux MAP_GROWSDOWN option does not limit auto
135 * growth of the region. Linux mmap with this option
136 * takes as addr the initial BOS, and as len, the initial
137 * region size. It can then grow down from addr without
138 * limit. However, Linux threads has an implicit internal
139 * limit to stack size of STACK_SIZE. Its just not
140 * enforced explicitly in Linux. But, here we impose
141 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
142 * region, since we can do this with our mmap.
143 *
144 * Our mmap with MAP_STACK takes addr as the maximum
145 * downsize limit on BOS, and as len the max size of
146 * the region. It then maps the top SGROWSIZ bytes,
147 * and auto grows the region down, up to the limit
148 * in addr.
149 *
150 * If we don't use the MAP_STACK option, the effect
151 * of this code is to allocate a stack region of a
152 * fixed size of (STACK_SIZE - GUARD_SIZE).
153 */
154
155 if ((caddr_t)addr + len > vms->vm_maxsaddr) {
156 /*
157 * Some Linux apps will attempt to mmap
158 * thread stacks near the top of their
159 * address space. If their TOS is greater
160 * than vm_maxsaddr, vm_map_growstack()
161 * will confuse the thread stack with the
162 * process stack and deliver a SEGV if they
163 * attempt to grow the thread stack past their
164 * current stacksize rlimit. To avoid this,
165 * adjust vm_maxsaddr upwards to reflect
166 * the current stacksize rlimit rather
167 * than the maximum possible stacksize.
168 * It would be better to adjust the
169 * mmap'ed region, but some apps do not check
170 * mmap's return value.
171 */
172 PROC_LOCK(p);
173 vms->vm_maxsaddr = (char *)round_page(vms->vm_stacktop) -
174 lim_cur_proc(p, RLIMIT_STACK);
175 PROC_UNLOCK(p);
176 }
177
178 /*
179 * This gives us our maximum stack size and a new BOS.
180 * If we're using VM_STACK, then mmap will just map
181 * the top SGROWSIZ bytes, and let the stack grow down
182 * to the limit at BOS. If we're not using VM_STACK
183 * we map the full stack, since we don't have a way
184 * to autogrow it.
185 */
186 if (len <= STACK_SIZE - GUARD_SIZE) {
187 addr = addr - (STACK_SIZE - GUARD_SIZE - len);
188 len = STACK_SIZE - GUARD_SIZE;
189 }
190 }
191
192 /*
193 * FreeBSD is free to ignore the address hint if MAP_FIXED wasn't
194 * passed. However, some Linux applications, like the ART runtime,
195 * depend on the hint. If the MAP_FIXED wasn't passed, but the
196 * address is not zero, try with MAP_FIXED and MAP_EXCL first,
197 * and fall back to the normal behaviour if that fails.
198 */
199 mr = (struct mmap_req) {
200 .mr_hint = addr,
201 .mr_len = len,
202 .mr_prot = prot,
203 .mr_flags = bsd_flags,
204 .mr_fd = fd,
205 .mr_pos = pos,
206 .mr_check_fp_fn = linux_mmap_check_fp,
207 };
208 if (addr != 0 && (bsd_flags & MAP_FIXED) == 0 &&
209 (bsd_flags & MAP_EXCL) == 0) {
210 mr_fixed = mr;
211 mr_fixed.mr_flags |= MAP_FIXED | MAP_EXCL;
212 error = kern_mmap(td, &mr_fixed);
213 if (error == 0)
214 goto out;
215 }
216
217 error = kern_mmap(td, &mr);
218 out:
219 LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
220
221 return (error);
222 }
223
224 int
linux_mprotect_common(struct thread * td,uintptr_t addr,size_t len,int prot)225 linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
226 {
227 int flags = 0;
228
229 /* XXX Ignore PROT_GROWSUP for now. */
230 prot &= ~LINUX_PROT_GROWSUP;
231 if ((prot & ~(LINUX_PROT_GROWSDOWN | PROT_READ | PROT_WRITE |
232 PROT_EXEC)) != 0)
233 return (EINVAL);
234 if ((prot & LINUX_PROT_GROWSDOWN) != 0) {
235 prot &= ~LINUX_PROT_GROWSDOWN;
236 flags |= VM_MAP_PROTECT_GROWSDOWN;
237 }
238
239 #if defined(__amd64__)
240 linux_fixup_prot(td, &prot);
241 #endif
242 return (kern_mprotect(td, addr, len, prot, flags));
243 }
244
245 /*
246 * Implement Linux madvise(MADV_DONTNEED), which has unusual semantics: for
247 * anonymous memory, pages in the range are immediately discarded.
248 */
249 static int
linux_madvise_dontneed(struct thread * td,vm_offset_t start,vm_offset_t end)250 linux_madvise_dontneed(struct thread *td, vm_offset_t start, vm_offset_t end)
251 {
252 vm_map_t map;
253 vm_map_entry_t entry;
254 vm_object_t backing_object, object;
255 vm_offset_t estart, eend;
256 vm_pindex_t pstart, pend;
257 int error;
258
259 map = &td->td_proc->p_vmspace->vm_map;
260
261 if (!vm_map_range_valid(map, start, end))
262 return (EINVAL);
263 start = trunc_page(start);
264 end = round_page(end);
265
266 error = 0;
267 vm_map_lock_read(map);
268 if (!vm_map_lookup_entry(map, start, &entry))
269 entry = vm_map_entry_succ(entry);
270 for (; entry->start < end; entry = vm_map_entry_succ(entry)) {
271 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
272 continue;
273
274 if (entry->wired_count != 0) {
275 error = EINVAL;
276 break;
277 }
278
279 object = entry->object.vm_object;
280 if (object == NULL)
281 continue;
282 if ((object->flags & (OBJ_UNMANAGED | OBJ_FICTITIOUS)) != 0)
283 continue;
284
285 pstart = OFF_TO_IDX(entry->offset);
286 if (start > entry->start) {
287 pstart += atop(start - entry->start);
288 estart = start;
289 } else {
290 estart = entry->start;
291 }
292 pend = OFF_TO_IDX(entry->offset) +
293 atop(entry->end - entry->start);
294 if (entry->end > end) {
295 pend -= atop(entry->end - end);
296 eend = end;
297 } else {
298 eend = entry->end;
299 }
300
301 if ((object->flags & (OBJ_ANON | OBJ_ONEMAPPING)) ==
302 (OBJ_ANON | OBJ_ONEMAPPING)) {
303 /*
304 * Singly-mapped anonymous memory is discarded. This
305 * does not match Linux's semantics when the object
306 * belongs to a shadow chain of length > 1, since
307 * subsequent faults may retrieve pages from an
308 * intermediate anonymous object. However, handling
309 * this case correctly introduces a fair bit of
310 * complexity.
311 */
312 VM_OBJECT_WLOCK(object);
313 if ((object->flags & OBJ_ONEMAPPING) != 0) {
314 vm_object_collapse(object);
315 vm_object_page_remove(object, pstart, pend, 0);
316 backing_object = object->backing_object;
317 if (backing_object != NULL &&
318 (backing_object->flags & OBJ_ANON) != 0)
319 linux_msg(td,
320 "possibly incorrect MADV_DONTNEED");
321 VM_OBJECT_WUNLOCK(object);
322 continue;
323 }
324 VM_OBJECT_WUNLOCK(object);
325 }
326
327 /*
328 * Handle shared mappings. Remove them outright instead of
329 * calling pmap_advise(), for consistency with Linux.
330 */
331 pmap_remove(map->pmap, estart, eend);
332 vm_object_madvise(object, pstart, pend, MADV_DONTNEED);
333 }
334 vm_map_unlock_read(map);
335
336 return (error);
337 }
338
339 int
linux_madvise_common(struct thread * td,uintptr_t addr,size_t len,int behav)340 linux_madvise_common(struct thread *td, uintptr_t addr, size_t len, int behav)
341 {
342
343 switch (behav) {
344 case LINUX_MADV_NORMAL:
345 return (kern_madvise(td, addr, len, MADV_NORMAL));
346 case LINUX_MADV_RANDOM:
347 return (kern_madvise(td, addr, len, MADV_RANDOM));
348 case LINUX_MADV_SEQUENTIAL:
349 return (kern_madvise(td, addr, len, MADV_SEQUENTIAL));
350 case LINUX_MADV_WILLNEED:
351 return (kern_madvise(td, addr, len, MADV_WILLNEED));
352 case LINUX_MADV_DONTNEED:
353 return (linux_madvise_dontneed(td, addr, addr + len));
354 case LINUX_MADV_FREE:
355 return (kern_madvise(td, addr, len, MADV_FREE));
356 case LINUX_MADV_REMOVE:
357 linux_msg(curthread, "unsupported madvise MADV_REMOVE");
358 return (EINVAL);
359 case LINUX_MADV_DONTFORK:
360 return (kern_minherit(td, addr, len, INHERIT_NONE));
361 case LINUX_MADV_DOFORK:
362 return (kern_minherit(td, addr, len, INHERIT_COPY));
363 case LINUX_MADV_MERGEABLE:
364 linux_msg(curthread, "unsupported madvise MADV_MERGEABLE");
365 return (EINVAL);
366 case LINUX_MADV_UNMERGEABLE:
367 /* We don't merge anyway. */
368 return (0);
369 case LINUX_MADV_HUGEPAGE:
370 /* Ignored; on FreeBSD huge pages are always on. */
371 return (0);
372 case LINUX_MADV_NOHUGEPAGE:
373 #if 0
374 /*
375 * Don't warn - Firefox uses it a lot, and in real Linux it's
376 * an optional feature.
377 */
378 linux_msg(curthread, "unsupported madvise MADV_NOHUGEPAGE");
379 #endif
380 return (EINVAL);
381 case LINUX_MADV_DONTDUMP:
382 return (kern_madvise(td, addr, len, MADV_NOCORE));
383 case LINUX_MADV_DODUMP:
384 return (kern_madvise(td, addr, len, MADV_CORE));
385 case LINUX_MADV_WIPEONFORK:
386 return (kern_minherit(td, addr, len, INHERIT_ZERO));
387 case LINUX_MADV_KEEPONFORK:
388 return (kern_minherit(td, addr, len, INHERIT_COPY));
389 case LINUX_MADV_HWPOISON:
390 linux_msg(curthread, "unsupported madvise MADV_HWPOISON");
391 return (EINVAL);
392 case LINUX_MADV_SOFT_OFFLINE:
393 linux_msg(curthread, "unsupported madvise MADV_SOFT_OFFLINE");
394 return (EINVAL);
395 case -1:
396 /*
397 * -1 is sometimes used as a dummy value to detect simplistic
398 * madvise(2) stub implementations. This safeguard is used by
399 * BoringSSL, for example, before assuming MADV_WIPEONFORK is
400 * safe to use. Don't produce an "unsupported" error message
401 * for this special dummy value, which is unlikely to be used
402 * by any new advisory behavior feature.
403 */
404 return (EINVAL);
405 default:
406 linux_msg(curthread, "unsupported madvise behav %d", behav);
407 return (EINVAL);
408 }
409 }
410
411 #if defined(__amd64__)
412 static void
linux_fixup_prot(struct thread * td,int * prot)413 linux_fixup_prot(struct thread *td, int *prot)
414 {
415 struct linux_pemuldata *pem;
416
417 if (SV_PROC_FLAG(td->td_proc, SV_ILP32) && *prot & PROT_READ) {
418 pem = pem_find(td->td_proc);
419 if (pem->persona & LINUX_READ_IMPLIES_EXEC)
420 *prot |= PROT_EXEC;
421 }
422
423 }
424 #endif
425