xref: /linux-6.15/fs/exec.c (revision 3f07c014)
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/coredump.h>
37 #include <linux/sched/signal.h>
38 #include <linux/pagemap.h>
39 #include <linux/perf_event.h>
40 #include <linux/highmem.h>
41 #include <linux/spinlock.h>
42 #include <linux/key.h>
43 #include <linux/personality.h>
44 #include <linux/binfmts.h>
45 #include <linux/utsname.h>
46 #include <linux/pid_namespace.h>
47 #include <linux/module.h>
48 #include <linux/namei.h>
49 #include <linux/mount.h>
50 #include <linux/security.h>
51 #include <linux/syscalls.h>
52 #include <linux/tsacct_kern.h>
53 #include <linux/cn_proc.h>
54 #include <linux/audit.h>
55 #include <linux/tracehook.h>
56 #include <linux/kmod.h>
57 #include <linux/fsnotify.h>
58 #include <linux/fs_struct.h>
59 #include <linux/pipe_fs_i.h>
60 #include <linux/oom.h>
61 #include <linux/compat.h>
62 #include <linux/vmalloc.h>
63 
64 #include <linux/uaccess.h>
65 #include <asm/mmu_context.h>
66 #include <asm/tlb.h>
67 
68 #include <trace/events/task.h>
69 #include "internal.h"
70 
71 #include <trace/events/sched.h>
72 
73 int suid_dumpable = 0;
74 
75 static LIST_HEAD(formats);
76 static DEFINE_RWLOCK(binfmt_lock);
77 
78 void __register_binfmt(struct linux_binfmt * fmt, int insert)
79 {
80 	BUG_ON(!fmt);
81 	if (WARN_ON(!fmt->load_binary))
82 		return;
83 	write_lock(&binfmt_lock);
84 	insert ? list_add(&fmt->lh, &formats) :
85 		 list_add_tail(&fmt->lh, &formats);
86 	write_unlock(&binfmt_lock);
87 }
88 
89 EXPORT_SYMBOL(__register_binfmt);
90 
91 void unregister_binfmt(struct linux_binfmt * fmt)
92 {
93 	write_lock(&binfmt_lock);
94 	list_del(&fmt->lh);
95 	write_unlock(&binfmt_lock);
96 }
97 
98 EXPORT_SYMBOL(unregister_binfmt);
99 
100 static inline void put_binfmt(struct linux_binfmt * fmt)
101 {
102 	module_put(fmt->module);
103 }
104 
105 bool path_noexec(const struct path *path)
106 {
107 	return (path->mnt->mnt_flags & MNT_NOEXEC) ||
108 	       (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
109 }
110 
111 #ifdef CONFIG_USELIB
112 /*
113  * Note that a shared library must be both readable and executable due to
114  * security reasons.
115  *
116  * Also note that we take the address to load from from the file itself.
117  */
118 SYSCALL_DEFINE1(uselib, const char __user *, library)
119 {
120 	struct linux_binfmt *fmt;
121 	struct file *file;
122 	struct filename *tmp = getname(library);
123 	int error = PTR_ERR(tmp);
124 	static const struct open_flags uselib_flags = {
125 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
126 		.acc_mode = MAY_READ | MAY_EXEC,
127 		.intent = LOOKUP_OPEN,
128 		.lookup_flags = LOOKUP_FOLLOW,
129 	};
130 
131 	if (IS_ERR(tmp))
132 		goto out;
133 
134 	file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
135 	putname(tmp);
136 	error = PTR_ERR(file);
137 	if (IS_ERR(file))
138 		goto out;
139 
140 	error = -EINVAL;
141 	if (!S_ISREG(file_inode(file)->i_mode))
142 		goto exit;
143 
144 	error = -EACCES;
145 	if (path_noexec(&file->f_path))
146 		goto exit;
147 
148 	fsnotify_open(file);
149 
150 	error = -ENOEXEC;
151 
152 	read_lock(&binfmt_lock);
153 	list_for_each_entry(fmt, &formats, lh) {
154 		if (!fmt->load_shlib)
155 			continue;
156 		if (!try_module_get(fmt->module))
157 			continue;
158 		read_unlock(&binfmt_lock);
159 		error = fmt->load_shlib(file);
160 		read_lock(&binfmt_lock);
161 		put_binfmt(fmt);
162 		if (error != -ENOEXEC)
163 			break;
164 	}
165 	read_unlock(&binfmt_lock);
166 exit:
167 	fput(file);
168 out:
169   	return error;
170 }
171 #endif /* #ifdef CONFIG_USELIB */
172 
173 #ifdef CONFIG_MMU
174 /*
175  * The nascent bprm->mm is not visible until exec_mmap() but it can
176  * use a lot of memory, account these pages in current->mm temporary
177  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
178  * change the counter back via acct_arg_size(0).
179  */
180 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
181 {
182 	struct mm_struct *mm = current->mm;
183 	long diff = (long)(pages - bprm->vma_pages);
184 
185 	if (!mm || !diff)
186 		return;
187 
188 	bprm->vma_pages = pages;
189 	add_mm_counter(mm, MM_ANONPAGES, diff);
190 }
191 
192 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
193 		int write)
194 {
195 	struct page *page;
196 	int ret;
197 	unsigned int gup_flags = FOLL_FORCE;
198 
199 #ifdef CONFIG_STACK_GROWSUP
200 	if (write) {
201 		ret = expand_downwards(bprm->vma, pos);
202 		if (ret < 0)
203 			return NULL;
204 	}
205 #endif
206 
207 	if (write)
208 		gup_flags |= FOLL_WRITE;
209 
210 	/*
211 	 * We are doing an exec().  'current' is the process
212 	 * doing the exec and bprm->mm is the new process's mm.
213 	 */
214 	ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
215 			&page, NULL, NULL);
216 	if (ret <= 0)
217 		return NULL;
218 
219 	if (write) {
220 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
221 		struct rlimit *rlim;
222 
223 		acct_arg_size(bprm, size / PAGE_SIZE);
224 
225 		/*
226 		 * We've historically supported up to 32 pages (ARG_MAX)
227 		 * of argument strings even with small stacks
228 		 */
229 		if (size <= ARG_MAX)
230 			return page;
231 
232 		/*
233 		 * Limit to 1/4-th the stack size for the argv+env strings.
234 		 * This ensures that:
235 		 *  - the remaining binfmt code will not run out of stack space,
236 		 *  - the program will have a reasonable amount of stack left
237 		 *    to work from.
238 		 */
239 		rlim = current->signal->rlim;
240 		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
241 			put_page(page);
242 			return NULL;
243 		}
244 	}
245 
246 	return page;
247 }
248 
249 static void put_arg_page(struct page *page)
250 {
251 	put_page(page);
252 }
253 
254 static void free_arg_pages(struct linux_binprm *bprm)
255 {
256 }
257 
258 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
259 		struct page *page)
260 {
261 	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
262 }
263 
264 static int __bprm_mm_init(struct linux_binprm *bprm)
265 {
266 	int err;
267 	struct vm_area_struct *vma = NULL;
268 	struct mm_struct *mm = bprm->mm;
269 
270 	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
271 	if (!vma)
272 		return -ENOMEM;
273 
274 	if (down_write_killable(&mm->mmap_sem)) {
275 		err = -EINTR;
276 		goto err_free;
277 	}
278 	vma->vm_mm = mm;
279 
280 	/*
281 	 * Place the stack at the largest stack address the architecture
282 	 * supports. Later, we'll move this to an appropriate place. We don't
283 	 * use STACK_TOP because that can depend on attributes which aren't
284 	 * configured yet.
285 	 */
286 	BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
287 	vma->vm_end = STACK_TOP_MAX;
288 	vma->vm_start = vma->vm_end - PAGE_SIZE;
289 	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
290 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
291 	INIT_LIST_HEAD(&vma->anon_vma_chain);
292 
293 	err = insert_vm_struct(mm, vma);
294 	if (err)
295 		goto err;
296 
297 	mm->stack_vm = mm->total_vm = 1;
298 	arch_bprm_mm_init(mm, vma);
299 	up_write(&mm->mmap_sem);
300 	bprm->p = vma->vm_end - sizeof(void *);
301 	return 0;
302 err:
303 	up_write(&mm->mmap_sem);
304 err_free:
305 	bprm->vma = NULL;
306 	kmem_cache_free(vm_area_cachep, vma);
307 	return err;
308 }
309 
310 static bool valid_arg_len(struct linux_binprm *bprm, long len)
311 {
312 	return len <= MAX_ARG_STRLEN;
313 }
314 
315 #else
316 
317 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
318 {
319 }
320 
321 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
322 		int write)
323 {
324 	struct page *page;
325 
326 	page = bprm->page[pos / PAGE_SIZE];
327 	if (!page && write) {
328 		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
329 		if (!page)
330 			return NULL;
331 		bprm->page[pos / PAGE_SIZE] = page;
332 	}
333 
334 	return page;
335 }
336 
337 static void put_arg_page(struct page *page)
338 {
339 }
340 
341 static void free_arg_page(struct linux_binprm *bprm, int i)
342 {
343 	if (bprm->page[i]) {
344 		__free_page(bprm->page[i]);
345 		bprm->page[i] = NULL;
346 	}
347 }
348 
349 static void free_arg_pages(struct linux_binprm *bprm)
350 {
351 	int i;
352 
353 	for (i = 0; i < MAX_ARG_PAGES; i++)
354 		free_arg_page(bprm, i);
355 }
356 
357 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
358 		struct page *page)
359 {
360 }
361 
362 static int __bprm_mm_init(struct linux_binprm *bprm)
363 {
364 	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
365 	return 0;
366 }
367 
368 static bool valid_arg_len(struct linux_binprm *bprm, long len)
369 {
370 	return len <= bprm->p;
371 }
372 
373 #endif /* CONFIG_MMU */
374 
375 /*
376  * Create a new mm_struct and populate it with a temporary stack
377  * vm_area_struct.  We don't have enough context at this point to set the stack
378  * flags, permissions, and offset, so we use temporary values.  We'll update
379  * them later in setup_arg_pages().
380  */
381 static int bprm_mm_init(struct linux_binprm *bprm)
382 {
383 	int err;
384 	struct mm_struct *mm = NULL;
385 
386 	bprm->mm = mm = mm_alloc();
387 	err = -ENOMEM;
388 	if (!mm)
389 		goto err;
390 
391 	err = __bprm_mm_init(bprm);
392 	if (err)
393 		goto err;
394 
395 	return 0;
396 
397 err:
398 	if (mm) {
399 		bprm->mm = NULL;
400 		mmdrop(mm);
401 	}
402 
403 	return err;
404 }
405 
406 struct user_arg_ptr {
407 #ifdef CONFIG_COMPAT
408 	bool is_compat;
409 #endif
410 	union {
411 		const char __user *const __user *native;
412 #ifdef CONFIG_COMPAT
413 		const compat_uptr_t __user *compat;
414 #endif
415 	} ptr;
416 };
417 
418 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
419 {
420 	const char __user *native;
421 
422 #ifdef CONFIG_COMPAT
423 	if (unlikely(argv.is_compat)) {
424 		compat_uptr_t compat;
425 
426 		if (get_user(compat, argv.ptr.compat + nr))
427 			return ERR_PTR(-EFAULT);
428 
429 		return compat_ptr(compat);
430 	}
431 #endif
432 
433 	if (get_user(native, argv.ptr.native + nr))
434 		return ERR_PTR(-EFAULT);
435 
436 	return native;
437 }
438 
439 /*
440  * count() counts the number of strings in array ARGV.
441  */
442 static int count(struct user_arg_ptr argv, int max)
443 {
444 	int i = 0;
445 
446 	if (argv.ptr.native != NULL) {
447 		for (;;) {
448 			const char __user *p = get_user_arg_ptr(argv, i);
449 
450 			if (!p)
451 				break;
452 
453 			if (IS_ERR(p))
454 				return -EFAULT;
455 
456 			if (i >= max)
457 				return -E2BIG;
458 			++i;
459 
460 			if (fatal_signal_pending(current))
461 				return -ERESTARTNOHAND;
462 			cond_resched();
463 		}
464 	}
465 	return i;
466 }
467 
468 /*
469  * 'copy_strings()' copies argument/environment strings from the old
470  * processes's memory to the new process's stack.  The call to get_user_pages()
471  * ensures the destination page is created and not swapped out.
472  */
473 static int copy_strings(int argc, struct user_arg_ptr argv,
474 			struct linux_binprm *bprm)
475 {
476 	struct page *kmapped_page = NULL;
477 	char *kaddr = NULL;
478 	unsigned long kpos = 0;
479 	int ret;
480 
481 	while (argc-- > 0) {
482 		const char __user *str;
483 		int len;
484 		unsigned long pos;
485 
486 		ret = -EFAULT;
487 		str = get_user_arg_ptr(argv, argc);
488 		if (IS_ERR(str))
489 			goto out;
490 
491 		len = strnlen_user(str, MAX_ARG_STRLEN);
492 		if (!len)
493 			goto out;
494 
495 		ret = -E2BIG;
496 		if (!valid_arg_len(bprm, len))
497 			goto out;
498 
499 		/* We're going to work our way backwords. */
500 		pos = bprm->p;
501 		str += len;
502 		bprm->p -= len;
503 
504 		while (len > 0) {
505 			int offset, bytes_to_copy;
506 
507 			if (fatal_signal_pending(current)) {
508 				ret = -ERESTARTNOHAND;
509 				goto out;
510 			}
511 			cond_resched();
512 
513 			offset = pos % PAGE_SIZE;
514 			if (offset == 0)
515 				offset = PAGE_SIZE;
516 
517 			bytes_to_copy = offset;
518 			if (bytes_to_copy > len)
519 				bytes_to_copy = len;
520 
521 			offset -= bytes_to_copy;
522 			pos -= bytes_to_copy;
523 			str -= bytes_to_copy;
524 			len -= bytes_to_copy;
525 
526 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
527 				struct page *page;
528 
529 				page = get_arg_page(bprm, pos, 1);
530 				if (!page) {
531 					ret = -E2BIG;
532 					goto out;
533 				}
534 
535 				if (kmapped_page) {
536 					flush_kernel_dcache_page(kmapped_page);
537 					kunmap(kmapped_page);
538 					put_arg_page(kmapped_page);
539 				}
540 				kmapped_page = page;
541 				kaddr = kmap(kmapped_page);
542 				kpos = pos & PAGE_MASK;
543 				flush_arg_page(bprm, kpos, kmapped_page);
544 			}
545 			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
546 				ret = -EFAULT;
547 				goto out;
548 			}
549 		}
550 	}
551 	ret = 0;
552 out:
553 	if (kmapped_page) {
554 		flush_kernel_dcache_page(kmapped_page);
555 		kunmap(kmapped_page);
556 		put_arg_page(kmapped_page);
557 	}
558 	return ret;
559 }
560 
561 /*
562  * Like copy_strings, but get argv and its values from kernel memory.
563  */
564 int copy_strings_kernel(int argc, const char *const *__argv,
565 			struct linux_binprm *bprm)
566 {
567 	int r;
568 	mm_segment_t oldfs = get_fs();
569 	struct user_arg_ptr argv = {
570 		.ptr.native = (const char __user *const  __user *)__argv,
571 	};
572 
573 	set_fs(KERNEL_DS);
574 	r = copy_strings(argc, argv, bprm);
575 	set_fs(oldfs);
576 
577 	return r;
578 }
579 EXPORT_SYMBOL(copy_strings_kernel);
580 
581 #ifdef CONFIG_MMU
582 
583 /*
584  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
585  * the binfmt code determines where the new stack should reside, we shift it to
586  * its final location.  The process proceeds as follows:
587  *
588  * 1) Use shift to calculate the new vma endpoints.
589  * 2) Extend vma to cover both the old and new ranges.  This ensures the
590  *    arguments passed to subsequent functions are consistent.
591  * 3) Move vma's page tables to the new range.
592  * 4) Free up any cleared pgd range.
593  * 5) Shrink the vma to cover only the new range.
594  */
595 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
596 {
597 	struct mm_struct *mm = vma->vm_mm;
598 	unsigned long old_start = vma->vm_start;
599 	unsigned long old_end = vma->vm_end;
600 	unsigned long length = old_end - old_start;
601 	unsigned long new_start = old_start - shift;
602 	unsigned long new_end = old_end - shift;
603 	struct mmu_gather tlb;
604 
605 	BUG_ON(new_start > new_end);
606 
607 	/*
608 	 * ensure there are no vmas between where we want to go
609 	 * and where we are
610 	 */
611 	if (vma != find_vma(mm, new_start))
612 		return -EFAULT;
613 
614 	/*
615 	 * cover the whole range: [new_start, old_end)
616 	 */
617 	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
618 		return -ENOMEM;
619 
620 	/*
621 	 * move the page tables downwards, on failure we rely on
622 	 * process cleanup to remove whatever mess we made.
623 	 */
624 	if (length != move_page_tables(vma, old_start,
625 				       vma, new_start, length, false))
626 		return -ENOMEM;
627 
628 	lru_add_drain();
629 	tlb_gather_mmu(&tlb, mm, old_start, old_end);
630 	if (new_end > old_start) {
631 		/*
632 		 * when the old and new regions overlap clear from new_end.
633 		 */
634 		free_pgd_range(&tlb, new_end, old_end, new_end,
635 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
636 	} else {
637 		/*
638 		 * otherwise, clean from old_start; this is done to not touch
639 		 * the address space in [new_end, old_start) some architectures
640 		 * have constraints on va-space that make this illegal (IA64) -
641 		 * for the others its just a little faster.
642 		 */
643 		free_pgd_range(&tlb, old_start, old_end, new_end,
644 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
645 	}
646 	tlb_finish_mmu(&tlb, old_start, old_end);
647 
648 	/*
649 	 * Shrink the vma to just the new range.  Always succeeds.
650 	 */
651 	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
652 
653 	return 0;
654 }
655 
656 /*
657  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
658  * the stack is optionally relocated, and some extra space is added.
659  */
660 int setup_arg_pages(struct linux_binprm *bprm,
661 		    unsigned long stack_top,
662 		    int executable_stack)
663 {
664 	unsigned long ret;
665 	unsigned long stack_shift;
666 	struct mm_struct *mm = current->mm;
667 	struct vm_area_struct *vma = bprm->vma;
668 	struct vm_area_struct *prev = NULL;
669 	unsigned long vm_flags;
670 	unsigned long stack_base;
671 	unsigned long stack_size;
672 	unsigned long stack_expand;
673 	unsigned long rlim_stack;
674 
675 #ifdef CONFIG_STACK_GROWSUP
676 	/* Limit stack size */
677 	stack_base = rlimit_max(RLIMIT_STACK);
678 	if (stack_base > STACK_SIZE_MAX)
679 		stack_base = STACK_SIZE_MAX;
680 
681 	/* Add space for stack randomization. */
682 	stack_base += (STACK_RND_MASK << PAGE_SHIFT);
683 
684 	/* Make sure we didn't let the argument array grow too large. */
685 	if (vma->vm_end - vma->vm_start > stack_base)
686 		return -ENOMEM;
687 
688 	stack_base = PAGE_ALIGN(stack_top - stack_base);
689 
690 	stack_shift = vma->vm_start - stack_base;
691 	mm->arg_start = bprm->p - stack_shift;
692 	bprm->p = vma->vm_end - stack_shift;
693 #else
694 	stack_top = arch_align_stack(stack_top);
695 	stack_top = PAGE_ALIGN(stack_top);
696 
697 	if (unlikely(stack_top < mmap_min_addr) ||
698 	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
699 		return -ENOMEM;
700 
701 	stack_shift = vma->vm_end - stack_top;
702 
703 	bprm->p -= stack_shift;
704 	mm->arg_start = bprm->p;
705 #endif
706 
707 	if (bprm->loader)
708 		bprm->loader -= stack_shift;
709 	bprm->exec -= stack_shift;
710 
711 	if (down_write_killable(&mm->mmap_sem))
712 		return -EINTR;
713 
714 	vm_flags = VM_STACK_FLAGS;
715 
716 	/*
717 	 * Adjust stack execute permissions; explicitly enable for
718 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
719 	 * (arch default) otherwise.
720 	 */
721 	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
722 		vm_flags |= VM_EXEC;
723 	else if (executable_stack == EXSTACK_DISABLE_X)
724 		vm_flags &= ~VM_EXEC;
725 	vm_flags |= mm->def_flags;
726 	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
727 
728 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
729 			vm_flags);
730 	if (ret)
731 		goto out_unlock;
732 	BUG_ON(prev != vma);
733 
734 	/* Move stack pages down in memory. */
735 	if (stack_shift) {
736 		ret = shift_arg_pages(vma, stack_shift);
737 		if (ret)
738 			goto out_unlock;
739 	}
740 
741 	/* mprotect_fixup is overkill to remove the temporary stack flags */
742 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
743 
744 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
745 	stack_size = vma->vm_end - vma->vm_start;
746 	/*
747 	 * Align this down to a page boundary as expand_stack
748 	 * will align it up.
749 	 */
750 	rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
751 #ifdef CONFIG_STACK_GROWSUP
752 	if (stack_size + stack_expand > rlim_stack)
753 		stack_base = vma->vm_start + rlim_stack;
754 	else
755 		stack_base = vma->vm_end + stack_expand;
756 #else
757 	if (stack_size + stack_expand > rlim_stack)
758 		stack_base = vma->vm_end - rlim_stack;
759 	else
760 		stack_base = vma->vm_start - stack_expand;
761 #endif
762 	current->mm->start_stack = bprm->p;
763 	ret = expand_stack(vma, stack_base);
764 	if (ret)
765 		ret = -EFAULT;
766 
767 out_unlock:
768 	up_write(&mm->mmap_sem);
769 	return ret;
770 }
771 EXPORT_SYMBOL(setup_arg_pages);
772 
773 #else
774 
775 /*
776  * Transfer the program arguments and environment from the holding pages
777  * onto the stack. The provided stack pointer is adjusted accordingly.
778  */
779 int transfer_args_to_stack(struct linux_binprm *bprm,
780 			   unsigned long *sp_location)
781 {
782 	unsigned long index, stop, sp;
783 	int ret = 0;
784 
785 	stop = bprm->p >> PAGE_SHIFT;
786 	sp = *sp_location;
787 
788 	for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
789 		unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
790 		char *src = kmap(bprm->page[index]) + offset;
791 		sp -= PAGE_SIZE - offset;
792 		if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
793 			ret = -EFAULT;
794 		kunmap(bprm->page[index]);
795 		if (ret)
796 			goto out;
797 	}
798 
799 	*sp_location = sp;
800 
801 out:
802 	return ret;
803 }
804 EXPORT_SYMBOL(transfer_args_to_stack);
805 
806 #endif /* CONFIG_MMU */
807 
808 static struct file *do_open_execat(int fd, struct filename *name, int flags)
809 {
810 	struct file *file;
811 	int err;
812 	struct open_flags open_exec_flags = {
813 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
814 		.acc_mode = MAY_EXEC,
815 		.intent = LOOKUP_OPEN,
816 		.lookup_flags = LOOKUP_FOLLOW,
817 	};
818 
819 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
820 		return ERR_PTR(-EINVAL);
821 	if (flags & AT_SYMLINK_NOFOLLOW)
822 		open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
823 	if (flags & AT_EMPTY_PATH)
824 		open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
825 
826 	file = do_filp_open(fd, name, &open_exec_flags);
827 	if (IS_ERR(file))
828 		goto out;
829 
830 	err = -EACCES;
831 	if (!S_ISREG(file_inode(file)->i_mode))
832 		goto exit;
833 
834 	if (path_noexec(&file->f_path))
835 		goto exit;
836 
837 	err = deny_write_access(file);
838 	if (err)
839 		goto exit;
840 
841 	if (name->name[0] != '\0')
842 		fsnotify_open(file);
843 
844 out:
845 	return file;
846 
847 exit:
848 	fput(file);
849 	return ERR_PTR(err);
850 }
851 
852 struct file *open_exec(const char *name)
853 {
854 	struct filename *filename = getname_kernel(name);
855 	struct file *f = ERR_CAST(filename);
856 
857 	if (!IS_ERR(filename)) {
858 		f = do_open_execat(AT_FDCWD, filename, 0);
859 		putname(filename);
860 	}
861 	return f;
862 }
863 EXPORT_SYMBOL(open_exec);
864 
865 int kernel_read(struct file *file, loff_t offset,
866 		char *addr, unsigned long count)
867 {
868 	mm_segment_t old_fs;
869 	loff_t pos = offset;
870 	int result;
871 
872 	old_fs = get_fs();
873 	set_fs(get_ds());
874 	/* The cast to a user pointer is valid due to the set_fs() */
875 	result = vfs_read(file, (void __user *)addr, count, &pos);
876 	set_fs(old_fs);
877 	return result;
878 }
879 
880 EXPORT_SYMBOL(kernel_read);
881 
882 int kernel_read_file(struct file *file, void **buf, loff_t *size,
883 		     loff_t max_size, enum kernel_read_file_id id)
884 {
885 	loff_t i_size, pos;
886 	ssize_t bytes = 0;
887 	int ret;
888 
889 	if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
890 		return -EINVAL;
891 
892 	ret = security_kernel_read_file(file, id);
893 	if (ret)
894 		return ret;
895 
896 	ret = deny_write_access(file);
897 	if (ret)
898 		return ret;
899 
900 	i_size = i_size_read(file_inode(file));
901 	if (max_size > 0 && i_size > max_size) {
902 		ret = -EFBIG;
903 		goto out;
904 	}
905 	if (i_size <= 0) {
906 		ret = -EINVAL;
907 		goto out;
908 	}
909 
910 	if (id != READING_FIRMWARE_PREALLOC_BUFFER)
911 		*buf = vmalloc(i_size);
912 	if (!*buf) {
913 		ret = -ENOMEM;
914 		goto out;
915 	}
916 
917 	pos = 0;
918 	while (pos < i_size) {
919 		bytes = kernel_read(file, pos, (char *)(*buf) + pos,
920 				    i_size - pos);
921 		if (bytes < 0) {
922 			ret = bytes;
923 			goto out;
924 		}
925 
926 		if (bytes == 0)
927 			break;
928 		pos += bytes;
929 	}
930 
931 	if (pos != i_size) {
932 		ret = -EIO;
933 		goto out_free;
934 	}
935 
936 	ret = security_kernel_post_read_file(file, *buf, i_size, id);
937 	if (!ret)
938 		*size = pos;
939 
940 out_free:
941 	if (ret < 0) {
942 		if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
943 			vfree(*buf);
944 			*buf = NULL;
945 		}
946 	}
947 
948 out:
949 	allow_write_access(file);
950 	return ret;
951 }
952 EXPORT_SYMBOL_GPL(kernel_read_file);
953 
954 int kernel_read_file_from_path(char *path, void **buf, loff_t *size,
955 			       loff_t max_size, enum kernel_read_file_id id)
956 {
957 	struct file *file;
958 	int ret;
959 
960 	if (!path || !*path)
961 		return -EINVAL;
962 
963 	file = filp_open(path, O_RDONLY, 0);
964 	if (IS_ERR(file))
965 		return PTR_ERR(file);
966 
967 	ret = kernel_read_file(file, buf, size, max_size, id);
968 	fput(file);
969 	return ret;
970 }
971 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
972 
973 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
974 			     enum kernel_read_file_id id)
975 {
976 	struct fd f = fdget(fd);
977 	int ret = -EBADF;
978 
979 	if (!f.file)
980 		goto out;
981 
982 	ret = kernel_read_file(f.file, buf, size, max_size, id);
983 out:
984 	fdput(f);
985 	return ret;
986 }
987 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
988 
989 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
990 {
991 	ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
992 	if (res > 0)
993 		flush_icache_range(addr, addr + len);
994 	return res;
995 }
996 EXPORT_SYMBOL(read_code);
997 
998 static int exec_mmap(struct mm_struct *mm)
999 {
1000 	struct task_struct *tsk;
1001 	struct mm_struct *old_mm, *active_mm;
1002 
1003 	/* Notify parent that we're no longer interested in the old VM */
1004 	tsk = current;
1005 	old_mm = current->mm;
1006 	mm_release(tsk, old_mm);
1007 
1008 	if (old_mm) {
1009 		sync_mm_rss(old_mm);
1010 		/*
1011 		 * Make sure that if there is a core dump in progress
1012 		 * for the old mm, we get out and die instead of going
1013 		 * through with the exec.  We must hold mmap_sem around
1014 		 * checking core_state and changing tsk->mm.
1015 		 */
1016 		down_read(&old_mm->mmap_sem);
1017 		if (unlikely(old_mm->core_state)) {
1018 			up_read(&old_mm->mmap_sem);
1019 			return -EINTR;
1020 		}
1021 	}
1022 	task_lock(tsk);
1023 	active_mm = tsk->active_mm;
1024 	tsk->mm = mm;
1025 	tsk->active_mm = mm;
1026 	activate_mm(active_mm, mm);
1027 	tsk->mm->vmacache_seqnum = 0;
1028 	vmacache_flush(tsk);
1029 	task_unlock(tsk);
1030 	if (old_mm) {
1031 		up_read(&old_mm->mmap_sem);
1032 		BUG_ON(active_mm != old_mm);
1033 		setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1034 		mm_update_next_owner(old_mm);
1035 		mmput(old_mm);
1036 		return 0;
1037 	}
1038 	mmdrop(active_mm);
1039 	return 0;
1040 }
1041 
1042 /*
1043  * This function makes sure the current process has its own signal table,
1044  * so that flush_signal_handlers can later reset the handlers without
1045  * disturbing other processes.  (Other processes might share the signal
1046  * table via the CLONE_SIGHAND option to clone().)
1047  */
1048 static int de_thread(struct task_struct *tsk)
1049 {
1050 	struct signal_struct *sig = tsk->signal;
1051 	struct sighand_struct *oldsighand = tsk->sighand;
1052 	spinlock_t *lock = &oldsighand->siglock;
1053 
1054 	if (thread_group_empty(tsk))
1055 		goto no_thread_group;
1056 
1057 	/*
1058 	 * Kill all other threads in the thread group.
1059 	 */
1060 	spin_lock_irq(lock);
1061 	if (signal_group_exit(sig)) {
1062 		/*
1063 		 * Another group action in progress, just
1064 		 * return so that the signal is processed.
1065 		 */
1066 		spin_unlock_irq(lock);
1067 		return -EAGAIN;
1068 	}
1069 
1070 	sig->group_exit_task = tsk;
1071 	sig->notify_count = zap_other_threads(tsk);
1072 	if (!thread_group_leader(tsk))
1073 		sig->notify_count--;
1074 
1075 	while (sig->notify_count) {
1076 		__set_current_state(TASK_KILLABLE);
1077 		spin_unlock_irq(lock);
1078 		schedule();
1079 		if (unlikely(__fatal_signal_pending(tsk)))
1080 			goto killed;
1081 		spin_lock_irq(lock);
1082 	}
1083 	spin_unlock_irq(lock);
1084 
1085 	/*
1086 	 * At this point all other threads have exited, all we have to
1087 	 * do is to wait for the thread group leader to become inactive,
1088 	 * and to assume its PID:
1089 	 */
1090 	if (!thread_group_leader(tsk)) {
1091 		struct task_struct *leader = tsk->group_leader;
1092 
1093 		for (;;) {
1094 			cgroup_threadgroup_change_begin(tsk);
1095 			write_lock_irq(&tasklist_lock);
1096 			/*
1097 			 * Do this under tasklist_lock to ensure that
1098 			 * exit_notify() can't miss ->group_exit_task
1099 			 */
1100 			sig->notify_count = -1;
1101 			if (likely(leader->exit_state))
1102 				break;
1103 			__set_current_state(TASK_KILLABLE);
1104 			write_unlock_irq(&tasklist_lock);
1105 			cgroup_threadgroup_change_end(tsk);
1106 			schedule();
1107 			if (unlikely(__fatal_signal_pending(tsk)))
1108 				goto killed;
1109 		}
1110 
1111 		/*
1112 		 * The only record we have of the real-time age of a
1113 		 * process, regardless of execs it's done, is start_time.
1114 		 * All the past CPU time is accumulated in signal_struct
1115 		 * from sister threads now dead.  But in this non-leader
1116 		 * exec, nothing survives from the original leader thread,
1117 		 * whose birth marks the true age of this process now.
1118 		 * When we take on its identity by switching to its PID, we
1119 		 * also take its birthdate (always earlier than our own).
1120 		 */
1121 		tsk->start_time = leader->start_time;
1122 		tsk->real_start_time = leader->real_start_time;
1123 
1124 		BUG_ON(!same_thread_group(leader, tsk));
1125 		BUG_ON(has_group_leader_pid(tsk));
1126 		/*
1127 		 * An exec() starts a new thread group with the
1128 		 * TGID of the previous thread group. Rehash the
1129 		 * two threads with a switched PID, and release
1130 		 * the former thread group leader:
1131 		 */
1132 
1133 		/* Become a process group leader with the old leader's pid.
1134 		 * The old leader becomes a thread of the this thread group.
1135 		 * Note: The old leader also uses this pid until release_task
1136 		 *       is called.  Odd but simple and correct.
1137 		 */
1138 		tsk->pid = leader->pid;
1139 		change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1140 		transfer_pid(leader, tsk, PIDTYPE_PGID);
1141 		transfer_pid(leader, tsk, PIDTYPE_SID);
1142 
1143 		list_replace_rcu(&leader->tasks, &tsk->tasks);
1144 		list_replace_init(&leader->sibling, &tsk->sibling);
1145 
1146 		tsk->group_leader = tsk;
1147 		leader->group_leader = tsk;
1148 
1149 		tsk->exit_signal = SIGCHLD;
1150 		leader->exit_signal = -1;
1151 
1152 		BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1153 		leader->exit_state = EXIT_DEAD;
1154 
1155 		/*
1156 		 * We are going to release_task()->ptrace_unlink() silently,
1157 		 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1158 		 * the tracer wont't block again waiting for this thread.
1159 		 */
1160 		if (unlikely(leader->ptrace))
1161 			__wake_up_parent(leader, leader->parent);
1162 		write_unlock_irq(&tasklist_lock);
1163 		cgroup_threadgroup_change_end(tsk);
1164 
1165 		release_task(leader);
1166 	}
1167 
1168 	sig->group_exit_task = NULL;
1169 	sig->notify_count = 0;
1170 
1171 no_thread_group:
1172 	/* we have changed execution domain */
1173 	tsk->exit_signal = SIGCHLD;
1174 
1175 #ifdef CONFIG_POSIX_TIMERS
1176 	exit_itimers(sig);
1177 	flush_itimer_signals();
1178 #endif
1179 
1180 	if (atomic_read(&oldsighand->count) != 1) {
1181 		struct sighand_struct *newsighand;
1182 		/*
1183 		 * This ->sighand is shared with the CLONE_SIGHAND
1184 		 * but not CLONE_THREAD task, switch to the new one.
1185 		 */
1186 		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1187 		if (!newsighand)
1188 			return -ENOMEM;
1189 
1190 		atomic_set(&newsighand->count, 1);
1191 		memcpy(newsighand->action, oldsighand->action,
1192 		       sizeof(newsighand->action));
1193 
1194 		write_lock_irq(&tasklist_lock);
1195 		spin_lock(&oldsighand->siglock);
1196 		rcu_assign_pointer(tsk->sighand, newsighand);
1197 		spin_unlock(&oldsighand->siglock);
1198 		write_unlock_irq(&tasklist_lock);
1199 
1200 		__cleanup_sighand(oldsighand);
1201 	}
1202 
1203 	BUG_ON(!thread_group_leader(tsk));
1204 	return 0;
1205 
1206 killed:
1207 	/* protects against exit_notify() and __exit_signal() */
1208 	read_lock(&tasklist_lock);
1209 	sig->group_exit_task = NULL;
1210 	sig->notify_count = 0;
1211 	read_unlock(&tasklist_lock);
1212 	return -EAGAIN;
1213 }
1214 
1215 char *get_task_comm(char *buf, struct task_struct *tsk)
1216 {
1217 	/* buf must be at least sizeof(tsk->comm) in size */
1218 	task_lock(tsk);
1219 	strncpy(buf, tsk->comm, sizeof(tsk->comm));
1220 	task_unlock(tsk);
1221 	return buf;
1222 }
1223 EXPORT_SYMBOL_GPL(get_task_comm);
1224 
1225 /*
1226  * These functions flushes out all traces of the currently running executable
1227  * so that a new one can be started
1228  */
1229 
1230 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1231 {
1232 	task_lock(tsk);
1233 	trace_task_rename(tsk, buf);
1234 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1235 	task_unlock(tsk);
1236 	perf_event_comm(tsk, exec);
1237 }
1238 
1239 int flush_old_exec(struct linux_binprm * bprm)
1240 {
1241 	int retval;
1242 
1243 	/*
1244 	 * Make sure we have a private signal table and that
1245 	 * we are unassociated from the previous thread group.
1246 	 */
1247 	retval = de_thread(current);
1248 	if (retval)
1249 		goto out;
1250 
1251 	/*
1252 	 * Must be called _before_ exec_mmap() as bprm->mm is
1253 	 * not visibile until then. This also enables the update
1254 	 * to be lockless.
1255 	 */
1256 	set_mm_exe_file(bprm->mm, bprm->file);
1257 
1258 	/*
1259 	 * Release all of the old mmap stuff
1260 	 */
1261 	acct_arg_size(bprm, 0);
1262 	retval = exec_mmap(bprm->mm);
1263 	if (retval)
1264 		goto out;
1265 
1266 	bprm->mm = NULL;		/* We're using it now */
1267 
1268 	set_fs(USER_DS);
1269 	current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1270 					PF_NOFREEZE | PF_NO_SETAFFINITY);
1271 	flush_thread();
1272 	current->personality &= ~bprm->per_clear;
1273 
1274 	/*
1275 	 * We have to apply CLOEXEC before we change whether the process is
1276 	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1277 	 * trying to access the should-be-closed file descriptors of a process
1278 	 * undergoing exec(2).
1279 	 */
1280 	do_close_on_exec(current->files);
1281 	return 0;
1282 
1283 out:
1284 	return retval;
1285 }
1286 EXPORT_SYMBOL(flush_old_exec);
1287 
1288 void would_dump(struct linux_binprm *bprm, struct file *file)
1289 {
1290 	struct inode *inode = file_inode(file);
1291 	if (inode_permission(inode, MAY_READ) < 0) {
1292 		struct user_namespace *old, *user_ns;
1293 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1294 
1295 		/* Ensure mm->user_ns contains the executable */
1296 		user_ns = old = bprm->mm->user_ns;
1297 		while ((user_ns != &init_user_ns) &&
1298 		       !privileged_wrt_inode_uidgid(user_ns, inode))
1299 			user_ns = user_ns->parent;
1300 
1301 		if (old != user_ns) {
1302 			bprm->mm->user_ns = get_user_ns(user_ns);
1303 			put_user_ns(old);
1304 		}
1305 	}
1306 }
1307 EXPORT_SYMBOL(would_dump);
1308 
1309 void setup_new_exec(struct linux_binprm * bprm)
1310 {
1311 	arch_pick_mmap_layout(current->mm);
1312 
1313 	/* This is the point of no return */
1314 	current->sas_ss_sp = current->sas_ss_size = 0;
1315 
1316 	if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1317 		set_dumpable(current->mm, SUID_DUMP_USER);
1318 	else
1319 		set_dumpable(current->mm, suid_dumpable);
1320 
1321 	perf_event_exec();
1322 	__set_task_comm(current, kbasename(bprm->filename), true);
1323 
1324 	/* Set the new mm task size. We have to do that late because it may
1325 	 * depend on TIF_32BIT which is only updated in flush_thread() on
1326 	 * some architectures like powerpc
1327 	 */
1328 	current->mm->task_size = TASK_SIZE;
1329 
1330 	/* install the new credentials */
1331 	if (!uid_eq(bprm->cred->uid, current_euid()) ||
1332 	    !gid_eq(bprm->cred->gid, current_egid())) {
1333 		current->pdeath_signal = 0;
1334 	} else {
1335 		if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1336 			set_dumpable(current->mm, suid_dumpable);
1337 	}
1338 
1339 	/* An exec changes our domain. We are no longer part of the thread
1340 	   group */
1341 	current->self_exec_id++;
1342 	flush_signal_handlers(current, 0);
1343 }
1344 EXPORT_SYMBOL(setup_new_exec);
1345 
1346 /*
1347  * Prepare credentials and lock ->cred_guard_mutex.
1348  * install_exec_creds() commits the new creds and drops the lock.
1349  * Or, if exec fails before, free_bprm() should release ->cred and
1350  * and unlock.
1351  */
1352 int prepare_bprm_creds(struct linux_binprm *bprm)
1353 {
1354 	if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1355 		return -ERESTARTNOINTR;
1356 
1357 	bprm->cred = prepare_exec_creds();
1358 	if (likely(bprm->cred))
1359 		return 0;
1360 
1361 	mutex_unlock(&current->signal->cred_guard_mutex);
1362 	return -ENOMEM;
1363 }
1364 
1365 static void free_bprm(struct linux_binprm *bprm)
1366 {
1367 	free_arg_pages(bprm);
1368 	if (bprm->cred) {
1369 		mutex_unlock(&current->signal->cred_guard_mutex);
1370 		abort_creds(bprm->cred);
1371 	}
1372 	if (bprm->file) {
1373 		allow_write_access(bprm->file);
1374 		fput(bprm->file);
1375 	}
1376 	/* If a binfmt changed the interp, free it. */
1377 	if (bprm->interp != bprm->filename)
1378 		kfree(bprm->interp);
1379 	kfree(bprm);
1380 }
1381 
1382 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1383 {
1384 	/* If a binfmt changed the interp, free it first. */
1385 	if (bprm->interp != bprm->filename)
1386 		kfree(bprm->interp);
1387 	bprm->interp = kstrdup(interp, GFP_KERNEL);
1388 	if (!bprm->interp)
1389 		return -ENOMEM;
1390 	return 0;
1391 }
1392 EXPORT_SYMBOL(bprm_change_interp);
1393 
1394 /*
1395  * install the new credentials for this executable
1396  */
1397 void install_exec_creds(struct linux_binprm *bprm)
1398 {
1399 	security_bprm_committing_creds(bprm);
1400 
1401 	commit_creds(bprm->cred);
1402 	bprm->cred = NULL;
1403 
1404 	/*
1405 	 * Disable monitoring for regular users
1406 	 * when executing setuid binaries. Must
1407 	 * wait until new credentials are committed
1408 	 * by commit_creds() above
1409 	 */
1410 	if (get_dumpable(current->mm) != SUID_DUMP_USER)
1411 		perf_event_exit_task(current);
1412 	/*
1413 	 * cred_guard_mutex must be held at least to this point to prevent
1414 	 * ptrace_attach() from altering our determination of the task's
1415 	 * credentials; any time after this it may be unlocked.
1416 	 */
1417 	security_bprm_committed_creds(bprm);
1418 	mutex_unlock(&current->signal->cred_guard_mutex);
1419 }
1420 EXPORT_SYMBOL(install_exec_creds);
1421 
1422 /*
1423  * determine how safe it is to execute the proposed program
1424  * - the caller must hold ->cred_guard_mutex to protect against
1425  *   PTRACE_ATTACH or seccomp thread-sync
1426  */
1427 static void check_unsafe_exec(struct linux_binprm *bprm)
1428 {
1429 	struct task_struct *p = current, *t;
1430 	unsigned n_fs;
1431 
1432 	if (p->ptrace)
1433 		bprm->unsafe |= LSM_UNSAFE_PTRACE;
1434 
1435 	/*
1436 	 * This isn't strictly necessary, but it makes it harder for LSMs to
1437 	 * mess up.
1438 	 */
1439 	if (task_no_new_privs(current))
1440 		bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1441 
1442 	t = p;
1443 	n_fs = 1;
1444 	spin_lock(&p->fs->lock);
1445 	rcu_read_lock();
1446 	while_each_thread(p, t) {
1447 		if (t->fs == p->fs)
1448 			n_fs++;
1449 	}
1450 	rcu_read_unlock();
1451 
1452 	if (p->fs->users > n_fs)
1453 		bprm->unsafe |= LSM_UNSAFE_SHARE;
1454 	else
1455 		p->fs->in_exec = 1;
1456 	spin_unlock(&p->fs->lock);
1457 }
1458 
1459 static void bprm_fill_uid(struct linux_binprm *bprm)
1460 {
1461 	struct inode *inode;
1462 	unsigned int mode;
1463 	kuid_t uid;
1464 	kgid_t gid;
1465 
1466 	/*
1467 	 * Since this can be called multiple times (via prepare_binprm),
1468 	 * we must clear any previous work done when setting set[ug]id
1469 	 * bits from any earlier bprm->file uses (for example when run
1470 	 * first for a setuid script then again for its interpreter).
1471 	 */
1472 	bprm->cred->euid = current_euid();
1473 	bprm->cred->egid = current_egid();
1474 
1475 	if (!mnt_may_suid(bprm->file->f_path.mnt))
1476 		return;
1477 
1478 	if (task_no_new_privs(current))
1479 		return;
1480 
1481 	inode = bprm->file->f_path.dentry->d_inode;
1482 	mode = READ_ONCE(inode->i_mode);
1483 	if (!(mode & (S_ISUID|S_ISGID)))
1484 		return;
1485 
1486 	/* Be careful if suid/sgid is set */
1487 	inode_lock(inode);
1488 
1489 	/* reload atomically mode/uid/gid now that lock held */
1490 	mode = inode->i_mode;
1491 	uid = inode->i_uid;
1492 	gid = inode->i_gid;
1493 	inode_unlock(inode);
1494 
1495 	/* We ignore suid/sgid if there are no mappings for them in the ns */
1496 	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1497 		 !kgid_has_mapping(bprm->cred->user_ns, gid))
1498 		return;
1499 
1500 	if (mode & S_ISUID) {
1501 		bprm->per_clear |= PER_CLEAR_ON_SETID;
1502 		bprm->cred->euid = uid;
1503 	}
1504 
1505 	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1506 		bprm->per_clear |= PER_CLEAR_ON_SETID;
1507 		bprm->cred->egid = gid;
1508 	}
1509 }
1510 
1511 /*
1512  * Fill the binprm structure from the inode.
1513  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1514  *
1515  * This may be called multiple times for binary chains (scripts for example).
1516  */
1517 int prepare_binprm(struct linux_binprm *bprm)
1518 {
1519 	int retval;
1520 
1521 	bprm_fill_uid(bprm);
1522 
1523 	/* fill in binprm security blob */
1524 	retval = security_bprm_set_creds(bprm);
1525 	if (retval)
1526 		return retval;
1527 	bprm->cred_prepared = 1;
1528 
1529 	memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1530 	return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1531 }
1532 
1533 EXPORT_SYMBOL(prepare_binprm);
1534 
1535 /*
1536  * Arguments are '\0' separated strings found at the location bprm->p
1537  * points to; chop off the first by relocating brpm->p to right after
1538  * the first '\0' encountered.
1539  */
1540 int remove_arg_zero(struct linux_binprm *bprm)
1541 {
1542 	int ret = 0;
1543 	unsigned long offset;
1544 	char *kaddr;
1545 	struct page *page;
1546 
1547 	if (!bprm->argc)
1548 		return 0;
1549 
1550 	do {
1551 		offset = bprm->p & ~PAGE_MASK;
1552 		page = get_arg_page(bprm, bprm->p, 0);
1553 		if (!page) {
1554 			ret = -EFAULT;
1555 			goto out;
1556 		}
1557 		kaddr = kmap_atomic(page);
1558 
1559 		for (; offset < PAGE_SIZE && kaddr[offset];
1560 				offset++, bprm->p++)
1561 			;
1562 
1563 		kunmap_atomic(kaddr);
1564 		put_arg_page(page);
1565 	} while (offset == PAGE_SIZE);
1566 
1567 	bprm->p++;
1568 	bprm->argc--;
1569 	ret = 0;
1570 
1571 out:
1572 	return ret;
1573 }
1574 EXPORT_SYMBOL(remove_arg_zero);
1575 
1576 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1577 /*
1578  * cycle the list of binary formats handler, until one recognizes the image
1579  */
1580 int search_binary_handler(struct linux_binprm *bprm)
1581 {
1582 	bool need_retry = IS_ENABLED(CONFIG_MODULES);
1583 	struct linux_binfmt *fmt;
1584 	int retval;
1585 
1586 	/* This allows 4 levels of binfmt rewrites before failing hard. */
1587 	if (bprm->recursion_depth > 5)
1588 		return -ELOOP;
1589 
1590 	retval = security_bprm_check(bprm);
1591 	if (retval)
1592 		return retval;
1593 
1594 	retval = -ENOENT;
1595  retry:
1596 	read_lock(&binfmt_lock);
1597 	list_for_each_entry(fmt, &formats, lh) {
1598 		if (!try_module_get(fmt->module))
1599 			continue;
1600 		read_unlock(&binfmt_lock);
1601 		bprm->recursion_depth++;
1602 		retval = fmt->load_binary(bprm);
1603 		read_lock(&binfmt_lock);
1604 		put_binfmt(fmt);
1605 		bprm->recursion_depth--;
1606 		if (retval < 0 && !bprm->mm) {
1607 			/* we got to flush_old_exec() and failed after it */
1608 			read_unlock(&binfmt_lock);
1609 			force_sigsegv(SIGSEGV, current);
1610 			return retval;
1611 		}
1612 		if (retval != -ENOEXEC || !bprm->file) {
1613 			read_unlock(&binfmt_lock);
1614 			return retval;
1615 		}
1616 	}
1617 	read_unlock(&binfmt_lock);
1618 
1619 	if (need_retry) {
1620 		if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1621 		    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1622 			return retval;
1623 		if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1624 			return retval;
1625 		need_retry = false;
1626 		goto retry;
1627 	}
1628 
1629 	return retval;
1630 }
1631 EXPORT_SYMBOL(search_binary_handler);
1632 
1633 static int exec_binprm(struct linux_binprm *bprm)
1634 {
1635 	pid_t old_pid, old_vpid;
1636 	int ret;
1637 
1638 	/* Need to fetch pid before load_binary changes it */
1639 	old_pid = current->pid;
1640 	rcu_read_lock();
1641 	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1642 	rcu_read_unlock();
1643 
1644 	ret = search_binary_handler(bprm);
1645 	if (ret >= 0) {
1646 		audit_bprm(bprm);
1647 		trace_sched_process_exec(current, old_pid, bprm);
1648 		ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1649 		proc_exec_connector(current);
1650 	}
1651 
1652 	return ret;
1653 }
1654 
1655 /*
1656  * sys_execve() executes a new program.
1657  */
1658 static int do_execveat_common(int fd, struct filename *filename,
1659 			      struct user_arg_ptr argv,
1660 			      struct user_arg_ptr envp,
1661 			      int flags)
1662 {
1663 	char *pathbuf = NULL;
1664 	struct linux_binprm *bprm;
1665 	struct file *file;
1666 	struct files_struct *displaced;
1667 	int retval;
1668 
1669 	if (IS_ERR(filename))
1670 		return PTR_ERR(filename);
1671 
1672 	/*
1673 	 * We move the actual failure in case of RLIMIT_NPROC excess from
1674 	 * set*uid() to execve() because too many poorly written programs
1675 	 * don't check setuid() return code.  Here we additionally recheck
1676 	 * whether NPROC limit is still exceeded.
1677 	 */
1678 	if ((current->flags & PF_NPROC_EXCEEDED) &&
1679 	    atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1680 		retval = -EAGAIN;
1681 		goto out_ret;
1682 	}
1683 
1684 	/* We're below the limit (still or again), so we don't want to make
1685 	 * further execve() calls fail. */
1686 	current->flags &= ~PF_NPROC_EXCEEDED;
1687 
1688 	retval = unshare_files(&displaced);
1689 	if (retval)
1690 		goto out_ret;
1691 
1692 	retval = -ENOMEM;
1693 	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1694 	if (!bprm)
1695 		goto out_files;
1696 
1697 	retval = prepare_bprm_creds(bprm);
1698 	if (retval)
1699 		goto out_free;
1700 
1701 	check_unsafe_exec(bprm);
1702 	current->in_execve = 1;
1703 
1704 	file = do_open_execat(fd, filename, flags);
1705 	retval = PTR_ERR(file);
1706 	if (IS_ERR(file))
1707 		goto out_unmark;
1708 
1709 	sched_exec();
1710 
1711 	bprm->file = file;
1712 	if (fd == AT_FDCWD || filename->name[0] == '/') {
1713 		bprm->filename = filename->name;
1714 	} else {
1715 		if (filename->name[0] == '\0')
1716 			pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd);
1717 		else
1718 			pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s",
1719 					    fd, filename->name);
1720 		if (!pathbuf) {
1721 			retval = -ENOMEM;
1722 			goto out_unmark;
1723 		}
1724 		/*
1725 		 * Record that a name derived from an O_CLOEXEC fd will be
1726 		 * inaccessible after exec. Relies on having exclusive access to
1727 		 * current->files (due to unshare_files above).
1728 		 */
1729 		if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1730 			bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1731 		bprm->filename = pathbuf;
1732 	}
1733 	bprm->interp = bprm->filename;
1734 
1735 	retval = bprm_mm_init(bprm);
1736 	if (retval)
1737 		goto out_unmark;
1738 
1739 	bprm->argc = count(argv, MAX_ARG_STRINGS);
1740 	if ((retval = bprm->argc) < 0)
1741 		goto out;
1742 
1743 	bprm->envc = count(envp, MAX_ARG_STRINGS);
1744 	if ((retval = bprm->envc) < 0)
1745 		goto out;
1746 
1747 	retval = prepare_binprm(bprm);
1748 	if (retval < 0)
1749 		goto out;
1750 
1751 	retval = copy_strings_kernel(1, &bprm->filename, bprm);
1752 	if (retval < 0)
1753 		goto out;
1754 
1755 	bprm->exec = bprm->p;
1756 	retval = copy_strings(bprm->envc, envp, bprm);
1757 	if (retval < 0)
1758 		goto out;
1759 
1760 	retval = copy_strings(bprm->argc, argv, bprm);
1761 	if (retval < 0)
1762 		goto out;
1763 
1764 	would_dump(bprm, bprm->file);
1765 
1766 	retval = exec_binprm(bprm);
1767 	if (retval < 0)
1768 		goto out;
1769 
1770 	/* execve succeeded */
1771 	current->fs->in_exec = 0;
1772 	current->in_execve = 0;
1773 	acct_update_integrals(current);
1774 	task_numa_free(current);
1775 	free_bprm(bprm);
1776 	kfree(pathbuf);
1777 	putname(filename);
1778 	if (displaced)
1779 		put_files_struct(displaced);
1780 	return retval;
1781 
1782 out:
1783 	if (bprm->mm) {
1784 		acct_arg_size(bprm, 0);
1785 		mmput(bprm->mm);
1786 	}
1787 
1788 out_unmark:
1789 	current->fs->in_exec = 0;
1790 	current->in_execve = 0;
1791 
1792 out_free:
1793 	free_bprm(bprm);
1794 	kfree(pathbuf);
1795 
1796 out_files:
1797 	if (displaced)
1798 		reset_files_struct(displaced);
1799 out_ret:
1800 	putname(filename);
1801 	return retval;
1802 }
1803 
1804 int do_execve(struct filename *filename,
1805 	const char __user *const __user *__argv,
1806 	const char __user *const __user *__envp)
1807 {
1808 	struct user_arg_ptr argv = { .ptr.native = __argv };
1809 	struct user_arg_ptr envp = { .ptr.native = __envp };
1810 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1811 }
1812 
1813 int do_execveat(int fd, struct filename *filename,
1814 		const char __user *const __user *__argv,
1815 		const char __user *const __user *__envp,
1816 		int flags)
1817 {
1818 	struct user_arg_ptr argv = { .ptr.native = __argv };
1819 	struct user_arg_ptr envp = { .ptr.native = __envp };
1820 
1821 	return do_execveat_common(fd, filename, argv, envp, flags);
1822 }
1823 
1824 #ifdef CONFIG_COMPAT
1825 static int compat_do_execve(struct filename *filename,
1826 	const compat_uptr_t __user *__argv,
1827 	const compat_uptr_t __user *__envp)
1828 {
1829 	struct user_arg_ptr argv = {
1830 		.is_compat = true,
1831 		.ptr.compat = __argv,
1832 	};
1833 	struct user_arg_ptr envp = {
1834 		.is_compat = true,
1835 		.ptr.compat = __envp,
1836 	};
1837 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1838 }
1839 
1840 static int compat_do_execveat(int fd, struct filename *filename,
1841 			      const compat_uptr_t __user *__argv,
1842 			      const compat_uptr_t __user *__envp,
1843 			      int flags)
1844 {
1845 	struct user_arg_ptr argv = {
1846 		.is_compat = true,
1847 		.ptr.compat = __argv,
1848 	};
1849 	struct user_arg_ptr envp = {
1850 		.is_compat = true,
1851 		.ptr.compat = __envp,
1852 	};
1853 	return do_execveat_common(fd, filename, argv, envp, flags);
1854 }
1855 #endif
1856 
1857 void set_binfmt(struct linux_binfmt *new)
1858 {
1859 	struct mm_struct *mm = current->mm;
1860 
1861 	if (mm->binfmt)
1862 		module_put(mm->binfmt->module);
1863 
1864 	mm->binfmt = new;
1865 	if (new)
1866 		__module_get(new->module);
1867 }
1868 EXPORT_SYMBOL(set_binfmt);
1869 
1870 /*
1871  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1872  */
1873 void set_dumpable(struct mm_struct *mm, int value)
1874 {
1875 	unsigned long old, new;
1876 
1877 	if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1878 		return;
1879 
1880 	do {
1881 		old = ACCESS_ONCE(mm->flags);
1882 		new = (old & ~MMF_DUMPABLE_MASK) | value;
1883 	} while (cmpxchg(&mm->flags, old, new) != old);
1884 }
1885 
1886 SYSCALL_DEFINE3(execve,
1887 		const char __user *, filename,
1888 		const char __user *const __user *, argv,
1889 		const char __user *const __user *, envp)
1890 {
1891 	return do_execve(getname(filename), argv, envp);
1892 }
1893 
1894 SYSCALL_DEFINE5(execveat,
1895 		int, fd, const char __user *, filename,
1896 		const char __user *const __user *, argv,
1897 		const char __user *const __user *, envp,
1898 		int, flags)
1899 {
1900 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1901 
1902 	return do_execveat(fd,
1903 			   getname_flags(filename, lookup_flags, NULL),
1904 			   argv, envp, flags);
1905 }
1906 
1907 #ifdef CONFIG_COMPAT
1908 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1909 	const compat_uptr_t __user *, argv,
1910 	const compat_uptr_t __user *, envp)
1911 {
1912 	return compat_do_execve(getname(filename), argv, envp);
1913 }
1914 
1915 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1916 		       const char __user *, filename,
1917 		       const compat_uptr_t __user *, argv,
1918 		       const compat_uptr_t __user *, envp,
1919 		       int,  flags)
1920 {
1921 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1922 
1923 	return compat_do_execveat(fd,
1924 				  getname_flags(filename, lookup_flags, NULL),
1925 				  argv, envp, flags);
1926 }
1927 #endif
1928