1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/task_io_accounting_ops.h> 52 #include <linux/blkdev.h> 53 #include <linux/task_work.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/kmsan.h> 64 #include <linux/random.h> 65 #include <linux/rcuwait.h> 66 #include <linux/compat.h> 67 #include <linux/io_uring.h> 68 #include <linux/kprobes.h> 69 #include <linux/rethook.h> 70 #include <linux/sysfs.h> 71 #include <linux/user_events.h> 72 #include <linux/uaccess.h> 73 74 #include <uapi/linux/wait.h> 75 76 #include <asm/unistd.h> 77 #include <asm/mmu_context.h> 78 79 #include "exit.h" 80 81 /* 82 * The default value should be high enough to not crash a system that randomly 83 * crashes its kernel from time to time, but low enough to at least not permit 84 * overflowing 32-bit refcounts or the ldsem writer count. 85 */ 86 static unsigned int oops_limit = 10000; 87 88 #ifdef CONFIG_SYSCTL 89 static struct ctl_table kern_exit_table[] = { 90 { 91 .procname = "oops_limit", 92 .data = &oops_limit, 93 .maxlen = sizeof(oops_limit), 94 .mode = 0644, 95 .proc_handler = proc_douintvec, 96 }, 97 { } 98 }; 99 100 static __init int kernel_exit_sysctls_init(void) 101 { 102 register_sysctl_init("kernel", kern_exit_table); 103 return 0; 104 } 105 late_initcall(kernel_exit_sysctls_init); 106 #endif 107 108 static atomic_t oops_count = ATOMIC_INIT(0); 109 110 #ifdef CONFIG_SYSFS 111 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 112 char *page) 113 { 114 return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 115 } 116 117 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 118 119 static __init int kernel_exit_sysfs_init(void) 120 { 121 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 122 return 0; 123 } 124 late_initcall(kernel_exit_sysfs_init); 125 #endif 126 127 static void __unhash_process(struct task_struct *p, bool group_dead) 128 { 129 nr_threads--; 130 detach_pid(p, PIDTYPE_PID); 131 if (group_dead) { 132 detach_pid(p, PIDTYPE_TGID); 133 detach_pid(p, PIDTYPE_PGID); 134 detach_pid(p, PIDTYPE_SID); 135 136 list_del_rcu(&p->tasks); 137 list_del_init(&p->sibling); 138 __this_cpu_dec(process_counts); 139 } 140 list_del_rcu(&p->thread_node); 141 } 142 143 /* 144 * This function expects the tasklist_lock write-locked. 145 */ 146 static void __exit_signal(struct task_struct *tsk) 147 { 148 struct signal_struct *sig = tsk->signal; 149 bool group_dead = thread_group_leader(tsk); 150 struct sighand_struct *sighand; 151 struct tty_struct *tty; 152 u64 utime, stime; 153 154 sighand = rcu_dereference_check(tsk->sighand, 155 lockdep_tasklist_lock_is_held()); 156 spin_lock(&sighand->siglock); 157 158 #ifdef CONFIG_POSIX_TIMERS 159 posix_cpu_timers_exit(tsk); 160 if (group_dead) 161 posix_cpu_timers_exit_group(tsk); 162 #endif 163 164 if (group_dead) { 165 tty = sig->tty; 166 sig->tty = NULL; 167 } else { 168 /* 169 * If there is any task waiting for the group exit 170 * then notify it: 171 */ 172 if (sig->notify_count > 0 && !--sig->notify_count) 173 wake_up_process(sig->group_exec_task); 174 175 if (tsk == sig->curr_target) 176 sig->curr_target = next_thread(tsk); 177 } 178 179 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 180 sizeof(unsigned long long)); 181 182 /* 183 * Accumulate here the counters for all threads as they die. We could 184 * skip the group leader because it is the last user of signal_struct, 185 * but we want to avoid the race with thread_group_cputime() which can 186 * see the empty ->thread_head list. 187 */ 188 task_cputime(tsk, &utime, &stime); 189 write_seqlock(&sig->stats_lock); 190 sig->utime += utime; 191 sig->stime += stime; 192 sig->gtime += task_gtime(tsk); 193 sig->min_flt += tsk->min_flt; 194 sig->maj_flt += tsk->maj_flt; 195 sig->nvcsw += tsk->nvcsw; 196 sig->nivcsw += tsk->nivcsw; 197 sig->inblock += task_io_get_inblock(tsk); 198 sig->oublock += task_io_get_oublock(tsk); 199 task_io_accounting_add(&sig->ioac, &tsk->ioac); 200 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 201 sig->nr_threads--; 202 __unhash_process(tsk, group_dead); 203 write_sequnlock(&sig->stats_lock); 204 205 /* 206 * Do this under ->siglock, we can race with another thread 207 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 208 */ 209 flush_sigqueue(&tsk->pending); 210 tsk->sighand = NULL; 211 spin_unlock(&sighand->siglock); 212 213 __cleanup_sighand(sighand); 214 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 215 if (group_dead) { 216 flush_sigqueue(&sig->shared_pending); 217 tty_kref_put(tty); 218 } 219 } 220 221 static void delayed_put_task_struct(struct rcu_head *rhp) 222 { 223 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 224 225 kprobe_flush_task(tsk); 226 rethook_flush_task(tsk); 227 perf_event_delayed_put(tsk); 228 trace_sched_process_free(tsk); 229 put_task_struct(tsk); 230 } 231 232 void put_task_struct_rcu_user(struct task_struct *task) 233 { 234 if (refcount_dec_and_test(&task->rcu_users)) 235 call_rcu(&task->rcu, delayed_put_task_struct); 236 } 237 238 void __weak release_thread(struct task_struct *dead_task) 239 { 240 } 241 242 void release_task(struct task_struct *p) 243 { 244 struct task_struct *leader; 245 struct pid *thread_pid; 246 int zap_leader; 247 repeat: 248 /* don't need to get the RCU readlock here - the process is dead and 249 * can't be modifying its own credentials. But shut RCU-lockdep up */ 250 rcu_read_lock(); 251 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 252 rcu_read_unlock(); 253 254 cgroup_release(p); 255 256 write_lock_irq(&tasklist_lock); 257 ptrace_release_task(p); 258 thread_pid = get_pid(p->thread_pid); 259 __exit_signal(p); 260 261 /* 262 * If we are the last non-leader member of the thread 263 * group, and the leader is zombie, then notify the 264 * group leader's parent process. (if it wants notification.) 265 */ 266 zap_leader = 0; 267 leader = p->group_leader; 268 if (leader != p && thread_group_empty(leader) 269 && leader->exit_state == EXIT_ZOMBIE) { 270 /* 271 * If we were the last child thread and the leader has 272 * exited already, and the leader's parent ignores SIGCHLD, 273 * then we are the one who should release the leader. 274 */ 275 zap_leader = do_notify_parent(leader, leader->exit_signal); 276 if (zap_leader) 277 leader->exit_state = EXIT_DEAD; 278 } 279 280 write_unlock_irq(&tasklist_lock); 281 seccomp_filter_release(p); 282 proc_flush_pid(thread_pid); 283 put_pid(thread_pid); 284 release_thread(p); 285 put_task_struct_rcu_user(p); 286 287 p = leader; 288 if (unlikely(zap_leader)) 289 goto repeat; 290 } 291 292 int rcuwait_wake_up(struct rcuwait *w) 293 { 294 int ret = 0; 295 struct task_struct *task; 296 297 rcu_read_lock(); 298 299 /* 300 * Order condition vs @task, such that everything prior to the load 301 * of @task is visible. This is the condition as to why the user called 302 * rcuwait_wake() in the first place. Pairs with set_current_state() 303 * barrier (A) in rcuwait_wait_event(). 304 * 305 * WAIT WAKE 306 * [S] tsk = current [S] cond = true 307 * MB (A) MB (B) 308 * [L] cond [L] tsk 309 */ 310 smp_mb(); /* (B) */ 311 312 task = rcu_dereference(w->task); 313 if (task) 314 ret = wake_up_process(task); 315 rcu_read_unlock(); 316 317 return ret; 318 } 319 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 320 321 /* 322 * Determine if a process group is "orphaned", according to the POSIX 323 * definition in 2.2.2.52. Orphaned process groups are not to be affected 324 * by terminal-generated stop signals. Newly orphaned process groups are 325 * to receive a SIGHUP and a SIGCONT. 326 * 327 * "I ask you, have you ever known what it is to be an orphan?" 328 */ 329 static int will_become_orphaned_pgrp(struct pid *pgrp, 330 struct task_struct *ignored_task) 331 { 332 struct task_struct *p; 333 334 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 335 if ((p == ignored_task) || 336 (p->exit_state && thread_group_empty(p)) || 337 is_global_init(p->real_parent)) 338 continue; 339 340 if (task_pgrp(p->real_parent) != pgrp && 341 task_session(p->real_parent) == task_session(p)) 342 return 0; 343 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 344 345 return 1; 346 } 347 348 int is_current_pgrp_orphaned(void) 349 { 350 int retval; 351 352 read_lock(&tasklist_lock); 353 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 354 read_unlock(&tasklist_lock); 355 356 return retval; 357 } 358 359 static bool has_stopped_jobs(struct pid *pgrp) 360 { 361 struct task_struct *p; 362 363 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 364 if (p->signal->flags & SIGNAL_STOP_STOPPED) 365 return true; 366 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 367 368 return false; 369 } 370 371 /* 372 * Check to see if any process groups have become orphaned as 373 * a result of our exiting, and if they have any stopped jobs, 374 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 375 */ 376 static void 377 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 378 { 379 struct pid *pgrp = task_pgrp(tsk); 380 struct task_struct *ignored_task = tsk; 381 382 if (!parent) 383 /* exit: our father is in a different pgrp than 384 * we are and we were the only connection outside. 385 */ 386 parent = tsk->real_parent; 387 else 388 /* reparent: our child is in a different pgrp than 389 * we are, and it was the only connection outside. 390 */ 391 ignored_task = NULL; 392 393 if (task_pgrp(parent) != pgrp && 394 task_session(parent) == task_session(tsk) && 395 will_become_orphaned_pgrp(pgrp, ignored_task) && 396 has_stopped_jobs(pgrp)) { 397 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 398 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 399 } 400 } 401 402 static void coredump_task_exit(struct task_struct *tsk) 403 { 404 struct core_state *core_state; 405 406 /* 407 * Serialize with any possible pending coredump. 408 * We must hold siglock around checking core_state 409 * and setting PF_POSTCOREDUMP. The core-inducing thread 410 * will increment ->nr_threads for each thread in the 411 * group without PF_POSTCOREDUMP set. 412 */ 413 spin_lock_irq(&tsk->sighand->siglock); 414 tsk->flags |= PF_POSTCOREDUMP; 415 core_state = tsk->signal->core_state; 416 spin_unlock_irq(&tsk->sighand->siglock); 417 418 /* The vhost_worker does not particpate in coredumps */ 419 if (core_state && 420 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { 421 struct core_thread self; 422 423 self.task = current; 424 if (self.task->flags & PF_SIGNALED) 425 self.next = xchg(&core_state->dumper.next, &self); 426 else 427 self.task = NULL; 428 /* 429 * Implies mb(), the result of xchg() must be visible 430 * to core_state->dumper. 431 */ 432 if (atomic_dec_and_test(&core_state->nr_threads)) 433 complete(&core_state->startup); 434 435 for (;;) { 436 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 437 if (!self.task) /* see coredump_finish() */ 438 break; 439 schedule(); 440 } 441 __set_current_state(TASK_RUNNING); 442 } 443 } 444 445 #ifdef CONFIG_MEMCG 446 /* 447 * A task is exiting. If it owned this mm, find a new owner for the mm. 448 */ 449 void mm_update_next_owner(struct mm_struct *mm) 450 { 451 struct task_struct *c, *g, *p = current; 452 453 retry: 454 /* 455 * If the exiting or execing task is not the owner, it's 456 * someone else's problem. 457 */ 458 if (mm->owner != p) 459 return; 460 /* 461 * The current owner is exiting/execing and there are no other 462 * candidates. Do not leave the mm pointing to a possibly 463 * freed task structure. 464 */ 465 if (atomic_read(&mm->mm_users) <= 1) { 466 WRITE_ONCE(mm->owner, NULL); 467 return; 468 } 469 470 read_lock(&tasklist_lock); 471 /* 472 * Search in the children 473 */ 474 list_for_each_entry(c, &p->children, sibling) { 475 if (c->mm == mm) 476 goto assign_new_owner; 477 } 478 479 /* 480 * Search in the siblings 481 */ 482 list_for_each_entry(c, &p->real_parent->children, sibling) { 483 if (c->mm == mm) 484 goto assign_new_owner; 485 } 486 487 /* 488 * Search through everything else, we should not get here often. 489 */ 490 for_each_process(g) { 491 if (g->flags & PF_KTHREAD) 492 continue; 493 for_each_thread(g, c) { 494 if (c->mm == mm) 495 goto assign_new_owner; 496 if (c->mm) 497 break; 498 } 499 } 500 read_unlock(&tasklist_lock); 501 /* 502 * We found no owner yet mm_users > 1: this implies that we are 503 * most likely racing with swapoff (try_to_unuse()) or /proc or 504 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 505 */ 506 WRITE_ONCE(mm->owner, NULL); 507 return; 508 509 assign_new_owner: 510 BUG_ON(c == p); 511 get_task_struct(c); 512 /* 513 * The task_lock protects c->mm from changing. 514 * We always want mm->owner->mm == mm 515 */ 516 task_lock(c); 517 /* 518 * Delay read_unlock() till we have the task_lock() 519 * to ensure that c does not slip away underneath us 520 */ 521 read_unlock(&tasklist_lock); 522 if (c->mm != mm) { 523 task_unlock(c); 524 put_task_struct(c); 525 goto retry; 526 } 527 WRITE_ONCE(mm->owner, c); 528 lru_gen_migrate_mm(mm); 529 task_unlock(c); 530 put_task_struct(c); 531 } 532 #endif /* CONFIG_MEMCG */ 533 534 /* 535 * Turn us into a lazy TLB process if we 536 * aren't already.. 537 */ 538 static void exit_mm(void) 539 { 540 struct mm_struct *mm = current->mm; 541 542 exit_mm_release(current, mm); 543 if (!mm) 544 return; 545 mmap_read_lock(mm); 546 mmgrab_lazy_tlb(mm); 547 BUG_ON(mm != current->active_mm); 548 /* more a memory barrier than a real lock */ 549 task_lock(current); 550 /* 551 * When a thread stops operating on an address space, the loop 552 * in membarrier_private_expedited() may not observe that 553 * tsk->mm, and the loop in membarrier_global_expedited() may 554 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 555 * rq->membarrier_state, so those would not issue an IPI. 556 * Membarrier requires a memory barrier after accessing 557 * user-space memory, before clearing tsk->mm or the 558 * rq->membarrier_state. 559 */ 560 smp_mb__after_spinlock(); 561 local_irq_disable(); 562 current->mm = NULL; 563 membarrier_update_current_mm(NULL); 564 enter_lazy_tlb(mm, current); 565 local_irq_enable(); 566 task_unlock(current); 567 mmap_read_unlock(mm); 568 mm_update_next_owner(mm); 569 mmput(mm); 570 if (test_thread_flag(TIF_MEMDIE)) 571 exit_oom_victim(); 572 } 573 574 static struct task_struct *find_alive_thread(struct task_struct *p) 575 { 576 struct task_struct *t; 577 578 for_each_thread(p, t) { 579 if (!(t->flags & PF_EXITING)) 580 return t; 581 } 582 return NULL; 583 } 584 585 static struct task_struct *find_child_reaper(struct task_struct *father, 586 struct list_head *dead) 587 __releases(&tasklist_lock) 588 __acquires(&tasklist_lock) 589 { 590 struct pid_namespace *pid_ns = task_active_pid_ns(father); 591 struct task_struct *reaper = pid_ns->child_reaper; 592 struct task_struct *p, *n; 593 594 if (likely(reaper != father)) 595 return reaper; 596 597 reaper = find_alive_thread(father); 598 if (reaper) { 599 pid_ns->child_reaper = reaper; 600 return reaper; 601 } 602 603 write_unlock_irq(&tasklist_lock); 604 605 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 606 list_del_init(&p->ptrace_entry); 607 release_task(p); 608 } 609 610 zap_pid_ns_processes(pid_ns); 611 write_lock_irq(&tasklist_lock); 612 613 return father; 614 } 615 616 /* 617 * When we die, we re-parent all our children, and try to: 618 * 1. give them to another thread in our thread group, if such a member exists 619 * 2. give it to the first ancestor process which prctl'd itself as a 620 * child_subreaper for its children (like a service manager) 621 * 3. give it to the init process (PID 1) in our pid namespace 622 */ 623 static struct task_struct *find_new_reaper(struct task_struct *father, 624 struct task_struct *child_reaper) 625 { 626 struct task_struct *thread, *reaper; 627 628 thread = find_alive_thread(father); 629 if (thread) 630 return thread; 631 632 if (father->signal->has_child_subreaper) { 633 unsigned int ns_level = task_pid(father)->level; 634 /* 635 * Find the first ->is_child_subreaper ancestor in our pid_ns. 636 * We can't check reaper != child_reaper to ensure we do not 637 * cross the namespaces, the exiting parent could be injected 638 * by setns() + fork(). 639 * We check pid->level, this is slightly more efficient than 640 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 641 */ 642 for (reaper = father->real_parent; 643 task_pid(reaper)->level == ns_level; 644 reaper = reaper->real_parent) { 645 if (reaper == &init_task) 646 break; 647 if (!reaper->signal->is_child_subreaper) 648 continue; 649 thread = find_alive_thread(reaper); 650 if (thread) 651 return thread; 652 } 653 } 654 655 return child_reaper; 656 } 657 658 /* 659 * Any that need to be release_task'd are put on the @dead list. 660 */ 661 static void reparent_leader(struct task_struct *father, struct task_struct *p, 662 struct list_head *dead) 663 { 664 if (unlikely(p->exit_state == EXIT_DEAD)) 665 return; 666 667 /* We don't want people slaying init. */ 668 p->exit_signal = SIGCHLD; 669 670 /* If it has exited notify the new parent about this child's death. */ 671 if (!p->ptrace && 672 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 673 if (do_notify_parent(p, p->exit_signal)) { 674 p->exit_state = EXIT_DEAD; 675 list_add(&p->ptrace_entry, dead); 676 } 677 } 678 679 kill_orphaned_pgrp(p, father); 680 } 681 682 /* 683 * This does two things: 684 * 685 * A. Make init inherit all the child processes 686 * B. Check to see if any process groups have become orphaned 687 * as a result of our exiting, and if they have any stopped 688 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 689 */ 690 static void forget_original_parent(struct task_struct *father, 691 struct list_head *dead) 692 { 693 struct task_struct *p, *t, *reaper; 694 695 if (unlikely(!list_empty(&father->ptraced))) 696 exit_ptrace(father, dead); 697 698 /* Can drop and reacquire tasklist_lock */ 699 reaper = find_child_reaper(father, dead); 700 if (list_empty(&father->children)) 701 return; 702 703 reaper = find_new_reaper(father, reaper); 704 list_for_each_entry(p, &father->children, sibling) { 705 for_each_thread(p, t) { 706 RCU_INIT_POINTER(t->real_parent, reaper); 707 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 708 if (likely(!t->ptrace)) 709 t->parent = t->real_parent; 710 if (t->pdeath_signal) 711 group_send_sig_info(t->pdeath_signal, 712 SEND_SIG_NOINFO, t, 713 PIDTYPE_TGID); 714 } 715 /* 716 * If this is a threaded reparent there is no need to 717 * notify anyone anything has happened. 718 */ 719 if (!same_thread_group(reaper, father)) 720 reparent_leader(father, p, dead); 721 } 722 list_splice_tail_init(&father->children, &reaper->children); 723 } 724 725 /* 726 * Send signals to all our closest relatives so that they know 727 * to properly mourn us.. 728 */ 729 static void exit_notify(struct task_struct *tsk, int group_dead) 730 { 731 bool autoreap; 732 struct task_struct *p, *n; 733 LIST_HEAD(dead); 734 735 write_lock_irq(&tasklist_lock); 736 forget_original_parent(tsk, &dead); 737 738 if (group_dead) 739 kill_orphaned_pgrp(tsk->group_leader, NULL); 740 741 tsk->exit_state = EXIT_ZOMBIE; 742 /* 743 * sub-thread or delay_group_leader(), wake up the 744 * PIDFD_THREAD waiters. 745 */ 746 if (!thread_group_empty(tsk)) 747 do_notify_pidfd(tsk); 748 749 if (unlikely(tsk->ptrace)) { 750 int sig = thread_group_leader(tsk) && 751 thread_group_empty(tsk) && 752 !ptrace_reparented(tsk) ? 753 tsk->exit_signal : SIGCHLD; 754 autoreap = do_notify_parent(tsk, sig); 755 } else if (thread_group_leader(tsk)) { 756 autoreap = thread_group_empty(tsk) && 757 do_notify_parent(tsk, tsk->exit_signal); 758 } else { 759 autoreap = true; 760 } 761 762 if (autoreap) { 763 tsk->exit_state = EXIT_DEAD; 764 list_add(&tsk->ptrace_entry, &dead); 765 } 766 767 /* mt-exec, de_thread() is waiting for group leader */ 768 if (unlikely(tsk->signal->notify_count < 0)) 769 wake_up_process(tsk->signal->group_exec_task); 770 write_unlock_irq(&tasklist_lock); 771 772 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 773 list_del_init(&p->ptrace_entry); 774 release_task(p); 775 } 776 } 777 778 #ifdef CONFIG_DEBUG_STACK_USAGE 779 static void check_stack_usage(void) 780 { 781 static DEFINE_SPINLOCK(low_water_lock); 782 static int lowest_to_date = THREAD_SIZE; 783 unsigned long free; 784 785 free = stack_not_used(current); 786 787 if (free >= lowest_to_date) 788 return; 789 790 spin_lock(&low_water_lock); 791 if (free < lowest_to_date) { 792 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 793 current->comm, task_pid_nr(current), free); 794 lowest_to_date = free; 795 } 796 spin_unlock(&low_water_lock); 797 } 798 #else 799 static inline void check_stack_usage(void) {} 800 #endif 801 802 static void synchronize_group_exit(struct task_struct *tsk, long code) 803 { 804 struct sighand_struct *sighand = tsk->sighand; 805 struct signal_struct *signal = tsk->signal; 806 807 spin_lock_irq(&sighand->siglock); 808 signal->quick_threads--; 809 if ((signal->quick_threads == 0) && 810 !(signal->flags & SIGNAL_GROUP_EXIT)) { 811 signal->flags = SIGNAL_GROUP_EXIT; 812 signal->group_exit_code = code; 813 signal->group_stop_count = 0; 814 } 815 spin_unlock_irq(&sighand->siglock); 816 } 817 818 void __noreturn do_exit(long code) 819 { 820 struct task_struct *tsk = current; 821 int group_dead; 822 823 WARN_ON(irqs_disabled()); 824 825 synchronize_group_exit(tsk, code); 826 827 WARN_ON(tsk->plug); 828 829 kcov_task_exit(tsk); 830 kmsan_task_exit(tsk); 831 832 coredump_task_exit(tsk); 833 ptrace_event(PTRACE_EVENT_EXIT, code); 834 user_events_exit(tsk); 835 836 io_uring_files_cancel(); 837 exit_signals(tsk); /* sets PF_EXITING */ 838 839 acct_update_integrals(tsk); 840 group_dead = atomic_dec_and_test(&tsk->signal->live); 841 if (group_dead) { 842 /* 843 * If the last thread of global init has exited, panic 844 * immediately to get a useable coredump. 845 */ 846 if (unlikely(is_global_init(tsk))) 847 panic("Attempted to kill init! exitcode=0x%08x\n", 848 tsk->signal->group_exit_code ?: (int)code); 849 850 #ifdef CONFIG_POSIX_TIMERS 851 hrtimer_cancel(&tsk->signal->real_timer); 852 exit_itimers(tsk); 853 #endif 854 if (tsk->mm) 855 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 856 } 857 acct_collect(code, group_dead); 858 if (group_dead) 859 tty_audit_exit(); 860 audit_free(tsk); 861 862 tsk->exit_code = code; 863 taskstats_exit(tsk, group_dead); 864 865 exit_mm(); 866 867 if (group_dead) 868 acct_process(); 869 trace_sched_process_exit(tsk); 870 871 exit_sem(tsk); 872 exit_shm(tsk); 873 exit_files(tsk); 874 exit_fs(tsk); 875 if (group_dead) 876 disassociate_ctty(1); 877 exit_task_namespaces(tsk); 878 exit_task_work(tsk); 879 exit_thread(tsk); 880 881 /* 882 * Flush inherited counters to the parent - before the parent 883 * gets woken up by child-exit notifications. 884 * 885 * because of cgroup mode, must be called before cgroup_exit() 886 */ 887 perf_event_exit_task(tsk); 888 889 sched_autogroup_exit_task(tsk); 890 cgroup_exit(tsk); 891 892 /* 893 * FIXME: do that only when needed, using sched_exit tracepoint 894 */ 895 flush_ptrace_hw_breakpoint(tsk); 896 897 exit_tasks_rcu_start(); 898 exit_notify(tsk, group_dead); 899 proc_exit_connector(tsk); 900 mpol_put_task_policy(tsk); 901 #ifdef CONFIG_FUTEX 902 if (unlikely(current->pi_state_cache)) 903 kfree(current->pi_state_cache); 904 #endif 905 /* 906 * Make sure we are holding no locks: 907 */ 908 debug_check_no_locks_held(); 909 910 if (tsk->io_context) 911 exit_io_context(tsk); 912 913 if (tsk->splice_pipe) 914 free_pipe_info(tsk->splice_pipe); 915 916 if (tsk->task_frag.page) 917 put_page(tsk->task_frag.page); 918 919 exit_task_stack_account(tsk); 920 921 check_stack_usage(); 922 preempt_disable(); 923 if (tsk->nr_dirtied) 924 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 925 exit_rcu(); 926 exit_tasks_rcu_finish(); 927 928 lockdep_free_task(tsk); 929 do_task_dead(); 930 } 931 932 void __noreturn make_task_dead(int signr) 933 { 934 /* 935 * Take the task off the cpu after something catastrophic has 936 * happened. 937 * 938 * We can get here from a kernel oops, sometimes with preemption off. 939 * Start by checking for critical errors. 940 * Then fix up important state like USER_DS and preemption. 941 * Then do everything else. 942 */ 943 struct task_struct *tsk = current; 944 unsigned int limit; 945 946 if (unlikely(in_interrupt())) 947 panic("Aiee, killing interrupt handler!"); 948 if (unlikely(!tsk->pid)) 949 panic("Attempted to kill the idle task!"); 950 951 if (unlikely(irqs_disabled())) { 952 pr_info("note: %s[%d] exited with irqs disabled\n", 953 current->comm, task_pid_nr(current)); 954 local_irq_enable(); 955 } 956 if (unlikely(in_atomic())) { 957 pr_info("note: %s[%d] exited with preempt_count %d\n", 958 current->comm, task_pid_nr(current), 959 preempt_count()); 960 preempt_count_set(PREEMPT_ENABLED); 961 } 962 963 /* 964 * Every time the system oopses, if the oops happens while a reference 965 * to an object was held, the reference leaks. 966 * If the oops doesn't also leak memory, repeated oopsing can cause 967 * reference counters to wrap around (if they're not using refcount_t). 968 * This means that repeated oopsing can make unexploitable-looking bugs 969 * exploitable through repeated oopsing. 970 * To make sure this can't happen, place an upper bound on how often the 971 * kernel may oops without panic(). 972 */ 973 limit = READ_ONCE(oops_limit); 974 if (atomic_inc_return(&oops_count) >= limit && limit) 975 panic("Oopsed too often (kernel.oops_limit is %d)", limit); 976 977 /* 978 * We're taking recursive faults here in make_task_dead. Safest is to just 979 * leave this task alone and wait for reboot. 980 */ 981 if (unlikely(tsk->flags & PF_EXITING)) { 982 pr_alert("Fixing recursive fault but reboot is needed!\n"); 983 futex_exit_recursive(tsk); 984 tsk->exit_state = EXIT_DEAD; 985 refcount_inc(&tsk->rcu_users); 986 do_task_dead(); 987 } 988 989 do_exit(signr); 990 } 991 992 SYSCALL_DEFINE1(exit, int, error_code) 993 { 994 do_exit((error_code&0xff)<<8); 995 } 996 997 /* 998 * Take down every thread in the group. This is called by fatal signals 999 * as well as by sys_exit_group (below). 1000 */ 1001 void __noreturn 1002 do_group_exit(int exit_code) 1003 { 1004 struct signal_struct *sig = current->signal; 1005 1006 if (sig->flags & SIGNAL_GROUP_EXIT) 1007 exit_code = sig->group_exit_code; 1008 else if (sig->group_exec_task) 1009 exit_code = 0; 1010 else { 1011 struct sighand_struct *const sighand = current->sighand; 1012 1013 spin_lock_irq(&sighand->siglock); 1014 if (sig->flags & SIGNAL_GROUP_EXIT) 1015 /* Another thread got here before we took the lock. */ 1016 exit_code = sig->group_exit_code; 1017 else if (sig->group_exec_task) 1018 exit_code = 0; 1019 else { 1020 sig->group_exit_code = exit_code; 1021 sig->flags = SIGNAL_GROUP_EXIT; 1022 zap_other_threads(current); 1023 } 1024 spin_unlock_irq(&sighand->siglock); 1025 } 1026 1027 do_exit(exit_code); 1028 /* NOTREACHED */ 1029 } 1030 1031 /* 1032 * this kills every thread in the thread group. Note that any externally 1033 * wait4()-ing process will get the correct exit code - even if this 1034 * thread is not the thread group leader. 1035 */ 1036 SYSCALL_DEFINE1(exit_group, int, error_code) 1037 { 1038 do_group_exit((error_code & 0xff) << 8); 1039 /* NOTREACHED */ 1040 return 0; 1041 } 1042 1043 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1044 { 1045 return wo->wo_type == PIDTYPE_MAX || 1046 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1047 } 1048 1049 static int 1050 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1051 { 1052 if (!eligible_pid(wo, p)) 1053 return 0; 1054 1055 /* 1056 * Wait for all children (clone and not) if __WALL is set or 1057 * if it is traced by us. 1058 */ 1059 if (ptrace || (wo->wo_flags & __WALL)) 1060 return 1; 1061 1062 /* 1063 * Otherwise, wait for clone children *only* if __WCLONE is set; 1064 * otherwise, wait for non-clone children *only*. 1065 * 1066 * Note: a "clone" child here is one that reports to its parent 1067 * using a signal other than SIGCHLD, or a non-leader thread which 1068 * we can only see if it is traced by us. 1069 */ 1070 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1071 return 0; 1072 1073 return 1; 1074 } 1075 1076 /* 1077 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1078 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1079 * the lock and this task is uninteresting. If we return nonzero, we have 1080 * released the lock and the system call should return. 1081 */ 1082 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1083 { 1084 int state, status; 1085 pid_t pid = task_pid_vnr(p); 1086 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1087 struct waitid_info *infop; 1088 1089 if (!likely(wo->wo_flags & WEXITED)) 1090 return 0; 1091 1092 if (unlikely(wo->wo_flags & WNOWAIT)) { 1093 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1094 ? p->signal->group_exit_code : p->exit_code; 1095 get_task_struct(p); 1096 read_unlock(&tasklist_lock); 1097 sched_annotate_sleep(); 1098 if (wo->wo_rusage) 1099 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1100 put_task_struct(p); 1101 goto out_info; 1102 } 1103 /* 1104 * Move the task's state to DEAD/TRACE, only one thread can do this. 1105 */ 1106 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1107 EXIT_TRACE : EXIT_DEAD; 1108 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1109 return 0; 1110 /* 1111 * We own this thread, nobody else can reap it. 1112 */ 1113 read_unlock(&tasklist_lock); 1114 sched_annotate_sleep(); 1115 1116 /* 1117 * Check thread_group_leader() to exclude the traced sub-threads. 1118 */ 1119 if (state == EXIT_DEAD && thread_group_leader(p)) { 1120 struct signal_struct *sig = p->signal; 1121 struct signal_struct *psig = current->signal; 1122 unsigned long maxrss; 1123 u64 tgutime, tgstime; 1124 1125 /* 1126 * The resource counters for the group leader are in its 1127 * own task_struct. Those for dead threads in the group 1128 * are in its signal_struct, as are those for the child 1129 * processes it has previously reaped. All these 1130 * accumulate in the parent's signal_struct c* fields. 1131 * 1132 * We don't bother to take a lock here to protect these 1133 * p->signal fields because the whole thread group is dead 1134 * and nobody can change them. 1135 * 1136 * psig->stats_lock also protects us from our sub-threads 1137 * which can reap other children at the same time. Until 1138 * we change k_getrusage()-like users to rely on this lock 1139 * we have to take ->siglock as well. 1140 * 1141 * We use thread_group_cputime_adjusted() to get times for 1142 * the thread group, which consolidates times for all threads 1143 * in the group including the group leader. 1144 */ 1145 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1146 spin_lock_irq(¤t->sighand->siglock); 1147 write_seqlock(&psig->stats_lock); 1148 psig->cutime += tgutime + sig->cutime; 1149 psig->cstime += tgstime + sig->cstime; 1150 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1151 psig->cmin_flt += 1152 p->min_flt + sig->min_flt + sig->cmin_flt; 1153 psig->cmaj_flt += 1154 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1155 psig->cnvcsw += 1156 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1157 psig->cnivcsw += 1158 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1159 psig->cinblock += 1160 task_io_get_inblock(p) + 1161 sig->inblock + sig->cinblock; 1162 psig->coublock += 1163 task_io_get_oublock(p) + 1164 sig->oublock + sig->coublock; 1165 maxrss = max(sig->maxrss, sig->cmaxrss); 1166 if (psig->cmaxrss < maxrss) 1167 psig->cmaxrss = maxrss; 1168 task_io_accounting_add(&psig->ioac, &p->ioac); 1169 task_io_accounting_add(&psig->ioac, &sig->ioac); 1170 write_sequnlock(&psig->stats_lock); 1171 spin_unlock_irq(¤t->sighand->siglock); 1172 } 1173 1174 if (wo->wo_rusage) 1175 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1176 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1177 ? p->signal->group_exit_code : p->exit_code; 1178 wo->wo_stat = status; 1179 1180 if (state == EXIT_TRACE) { 1181 write_lock_irq(&tasklist_lock); 1182 /* We dropped tasklist, ptracer could die and untrace */ 1183 ptrace_unlink(p); 1184 1185 /* If parent wants a zombie, don't release it now */ 1186 state = EXIT_ZOMBIE; 1187 if (do_notify_parent(p, p->exit_signal)) 1188 state = EXIT_DEAD; 1189 p->exit_state = state; 1190 write_unlock_irq(&tasklist_lock); 1191 } 1192 if (state == EXIT_DEAD) 1193 release_task(p); 1194 1195 out_info: 1196 infop = wo->wo_info; 1197 if (infop) { 1198 if ((status & 0x7f) == 0) { 1199 infop->cause = CLD_EXITED; 1200 infop->status = status >> 8; 1201 } else { 1202 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1203 infop->status = status & 0x7f; 1204 } 1205 infop->pid = pid; 1206 infop->uid = uid; 1207 } 1208 1209 return pid; 1210 } 1211 1212 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1213 { 1214 if (ptrace) { 1215 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1216 return &p->exit_code; 1217 } else { 1218 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1219 return &p->signal->group_exit_code; 1220 } 1221 return NULL; 1222 } 1223 1224 /** 1225 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1226 * @wo: wait options 1227 * @ptrace: is the wait for ptrace 1228 * @p: task to wait for 1229 * 1230 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1231 * 1232 * CONTEXT: 1233 * read_lock(&tasklist_lock), which is released if return value is 1234 * non-zero. Also, grabs and releases @p->sighand->siglock. 1235 * 1236 * RETURNS: 1237 * 0 if wait condition didn't exist and search for other wait conditions 1238 * should continue. Non-zero return, -errno on failure and @p's pid on 1239 * success, implies that tasklist_lock is released and wait condition 1240 * search should terminate. 1241 */ 1242 static int wait_task_stopped(struct wait_opts *wo, 1243 int ptrace, struct task_struct *p) 1244 { 1245 struct waitid_info *infop; 1246 int exit_code, *p_code, why; 1247 uid_t uid = 0; /* unneeded, required by compiler */ 1248 pid_t pid; 1249 1250 /* 1251 * Traditionally we see ptrace'd stopped tasks regardless of options. 1252 */ 1253 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1254 return 0; 1255 1256 if (!task_stopped_code(p, ptrace)) 1257 return 0; 1258 1259 exit_code = 0; 1260 spin_lock_irq(&p->sighand->siglock); 1261 1262 p_code = task_stopped_code(p, ptrace); 1263 if (unlikely(!p_code)) 1264 goto unlock_sig; 1265 1266 exit_code = *p_code; 1267 if (!exit_code) 1268 goto unlock_sig; 1269 1270 if (!unlikely(wo->wo_flags & WNOWAIT)) 1271 *p_code = 0; 1272 1273 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1274 unlock_sig: 1275 spin_unlock_irq(&p->sighand->siglock); 1276 if (!exit_code) 1277 return 0; 1278 1279 /* 1280 * Now we are pretty sure this task is interesting. 1281 * Make sure it doesn't get reaped out from under us while we 1282 * give up the lock and then examine it below. We don't want to 1283 * keep holding onto the tasklist_lock while we call getrusage and 1284 * possibly take page faults for user memory. 1285 */ 1286 get_task_struct(p); 1287 pid = task_pid_vnr(p); 1288 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1289 read_unlock(&tasklist_lock); 1290 sched_annotate_sleep(); 1291 if (wo->wo_rusage) 1292 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1293 put_task_struct(p); 1294 1295 if (likely(!(wo->wo_flags & WNOWAIT))) 1296 wo->wo_stat = (exit_code << 8) | 0x7f; 1297 1298 infop = wo->wo_info; 1299 if (infop) { 1300 infop->cause = why; 1301 infop->status = exit_code; 1302 infop->pid = pid; 1303 infop->uid = uid; 1304 } 1305 return pid; 1306 } 1307 1308 /* 1309 * Handle do_wait work for one task in a live, non-stopped state. 1310 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1311 * the lock and this task is uninteresting. If we return nonzero, we have 1312 * released the lock and the system call should return. 1313 */ 1314 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1315 { 1316 struct waitid_info *infop; 1317 pid_t pid; 1318 uid_t uid; 1319 1320 if (!unlikely(wo->wo_flags & WCONTINUED)) 1321 return 0; 1322 1323 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1324 return 0; 1325 1326 spin_lock_irq(&p->sighand->siglock); 1327 /* Re-check with the lock held. */ 1328 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1329 spin_unlock_irq(&p->sighand->siglock); 1330 return 0; 1331 } 1332 if (!unlikely(wo->wo_flags & WNOWAIT)) 1333 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1334 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1335 spin_unlock_irq(&p->sighand->siglock); 1336 1337 pid = task_pid_vnr(p); 1338 get_task_struct(p); 1339 read_unlock(&tasklist_lock); 1340 sched_annotate_sleep(); 1341 if (wo->wo_rusage) 1342 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1343 put_task_struct(p); 1344 1345 infop = wo->wo_info; 1346 if (!infop) { 1347 wo->wo_stat = 0xffff; 1348 } else { 1349 infop->cause = CLD_CONTINUED; 1350 infop->pid = pid; 1351 infop->uid = uid; 1352 infop->status = SIGCONT; 1353 } 1354 return pid; 1355 } 1356 1357 /* 1358 * Consider @p for a wait by @parent. 1359 * 1360 * -ECHILD should be in ->notask_error before the first call. 1361 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1362 * Returns zero if the search for a child should continue; 1363 * then ->notask_error is 0 if @p is an eligible child, 1364 * or still -ECHILD. 1365 */ 1366 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1367 struct task_struct *p) 1368 { 1369 /* 1370 * We can race with wait_task_zombie() from another thread. 1371 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1372 * can't confuse the checks below. 1373 */ 1374 int exit_state = READ_ONCE(p->exit_state); 1375 int ret; 1376 1377 if (unlikely(exit_state == EXIT_DEAD)) 1378 return 0; 1379 1380 ret = eligible_child(wo, ptrace, p); 1381 if (!ret) 1382 return ret; 1383 1384 if (unlikely(exit_state == EXIT_TRACE)) { 1385 /* 1386 * ptrace == 0 means we are the natural parent. In this case 1387 * we should clear notask_error, debugger will notify us. 1388 */ 1389 if (likely(!ptrace)) 1390 wo->notask_error = 0; 1391 return 0; 1392 } 1393 1394 if (likely(!ptrace) && unlikely(p->ptrace)) { 1395 /* 1396 * If it is traced by its real parent's group, just pretend 1397 * the caller is ptrace_do_wait() and reap this child if it 1398 * is zombie. 1399 * 1400 * This also hides group stop state from real parent; otherwise 1401 * a single stop can be reported twice as group and ptrace stop. 1402 * If a ptracer wants to distinguish these two events for its 1403 * own children it should create a separate process which takes 1404 * the role of real parent. 1405 */ 1406 if (!ptrace_reparented(p)) 1407 ptrace = 1; 1408 } 1409 1410 /* slay zombie? */ 1411 if (exit_state == EXIT_ZOMBIE) { 1412 /* we don't reap group leaders with subthreads */ 1413 if (!delay_group_leader(p)) { 1414 /* 1415 * A zombie ptracee is only visible to its ptracer. 1416 * Notification and reaping will be cascaded to the 1417 * real parent when the ptracer detaches. 1418 */ 1419 if (unlikely(ptrace) || likely(!p->ptrace)) 1420 return wait_task_zombie(wo, p); 1421 } 1422 1423 /* 1424 * Allow access to stopped/continued state via zombie by 1425 * falling through. Clearing of notask_error is complex. 1426 * 1427 * When !@ptrace: 1428 * 1429 * If WEXITED is set, notask_error should naturally be 1430 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1431 * so, if there are live subthreads, there are events to 1432 * wait for. If all subthreads are dead, it's still safe 1433 * to clear - this function will be called again in finite 1434 * amount time once all the subthreads are released and 1435 * will then return without clearing. 1436 * 1437 * When @ptrace: 1438 * 1439 * Stopped state is per-task and thus can't change once the 1440 * target task dies. Only continued and exited can happen. 1441 * Clear notask_error if WCONTINUED | WEXITED. 1442 */ 1443 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1444 wo->notask_error = 0; 1445 } else { 1446 /* 1447 * @p is alive and it's gonna stop, continue or exit, so 1448 * there always is something to wait for. 1449 */ 1450 wo->notask_error = 0; 1451 } 1452 1453 /* 1454 * Wait for stopped. Depending on @ptrace, different stopped state 1455 * is used and the two don't interact with each other. 1456 */ 1457 ret = wait_task_stopped(wo, ptrace, p); 1458 if (ret) 1459 return ret; 1460 1461 /* 1462 * Wait for continued. There's only one continued state and the 1463 * ptracer can consume it which can confuse the real parent. Don't 1464 * use WCONTINUED from ptracer. You don't need or want it. 1465 */ 1466 return wait_task_continued(wo, p); 1467 } 1468 1469 /* 1470 * Do the work of do_wait() for one thread in the group, @tsk. 1471 * 1472 * -ECHILD should be in ->notask_error before the first call. 1473 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1474 * Returns zero if the search for a child should continue; then 1475 * ->notask_error is 0 if there were any eligible children, 1476 * or still -ECHILD. 1477 */ 1478 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1479 { 1480 struct task_struct *p; 1481 1482 list_for_each_entry(p, &tsk->children, sibling) { 1483 int ret = wait_consider_task(wo, 0, p); 1484 1485 if (ret) 1486 return ret; 1487 } 1488 1489 return 0; 1490 } 1491 1492 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1493 { 1494 struct task_struct *p; 1495 1496 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1497 int ret = wait_consider_task(wo, 1, p); 1498 1499 if (ret) 1500 return ret; 1501 } 1502 1503 return 0; 1504 } 1505 1506 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p) 1507 { 1508 if (!eligible_pid(wo, p)) 1509 return false; 1510 1511 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) 1512 return false; 1513 1514 return true; 1515 } 1516 1517 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1518 int sync, void *key) 1519 { 1520 struct wait_opts *wo = container_of(wait, struct wait_opts, 1521 child_wait); 1522 struct task_struct *p = key; 1523 1524 if (pid_child_should_wake(wo, p)) 1525 return default_wake_function(wait, mode, sync, key); 1526 1527 return 0; 1528 } 1529 1530 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1531 { 1532 __wake_up_sync_key(&parent->signal->wait_chldexit, 1533 TASK_INTERRUPTIBLE, p); 1534 } 1535 1536 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, 1537 struct task_struct *target) 1538 { 1539 struct task_struct *parent = 1540 !ptrace ? target->real_parent : target->parent; 1541 1542 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && 1543 same_thread_group(current, parent)); 1544 } 1545 1546 /* 1547 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child 1548 * and tracee lists to find the target task. 1549 */ 1550 static int do_wait_pid(struct wait_opts *wo) 1551 { 1552 bool ptrace; 1553 struct task_struct *target; 1554 int retval; 1555 1556 ptrace = false; 1557 target = pid_task(wo->wo_pid, PIDTYPE_TGID); 1558 if (target && is_effectively_child(wo, ptrace, target)) { 1559 retval = wait_consider_task(wo, ptrace, target); 1560 if (retval) 1561 return retval; 1562 } 1563 1564 ptrace = true; 1565 target = pid_task(wo->wo_pid, PIDTYPE_PID); 1566 if (target && target->ptrace && 1567 is_effectively_child(wo, ptrace, target)) { 1568 retval = wait_consider_task(wo, ptrace, target); 1569 if (retval) 1570 return retval; 1571 } 1572 1573 return 0; 1574 } 1575 1576 long __do_wait(struct wait_opts *wo) 1577 { 1578 long retval; 1579 1580 /* 1581 * If there is nothing that can match our criteria, just get out. 1582 * We will clear ->notask_error to zero if we see any child that 1583 * might later match our criteria, even if we are not able to reap 1584 * it yet. 1585 */ 1586 wo->notask_error = -ECHILD; 1587 if ((wo->wo_type < PIDTYPE_MAX) && 1588 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1589 goto notask; 1590 1591 read_lock(&tasklist_lock); 1592 1593 if (wo->wo_type == PIDTYPE_PID) { 1594 retval = do_wait_pid(wo); 1595 if (retval) 1596 return retval; 1597 } else { 1598 struct task_struct *tsk = current; 1599 1600 do { 1601 retval = do_wait_thread(wo, tsk); 1602 if (retval) 1603 return retval; 1604 1605 retval = ptrace_do_wait(wo, tsk); 1606 if (retval) 1607 return retval; 1608 1609 if (wo->wo_flags & __WNOTHREAD) 1610 break; 1611 } while_each_thread(current, tsk); 1612 } 1613 read_unlock(&tasklist_lock); 1614 1615 notask: 1616 retval = wo->notask_error; 1617 if (!retval && !(wo->wo_flags & WNOHANG)) 1618 return -ERESTARTSYS; 1619 1620 return retval; 1621 } 1622 1623 static long do_wait(struct wait_opts *wo) 1624 { 1625 int retval; 1626 1627 trace_sched_process_wait(wo->wo_pid); 1628 1629 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1630 wo->child_wait.private = current; 1631 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1632 1633 do { 1634 set_current_state(TASK_INTERRUPTIBLE); 1635 retval = __do_wait(wo); 1636 if (retval != -ERESTARTSYS) 1637 break; 1638 if (signal_pending(current)) 1639 break; 1640 schedule(); 1641 } while (1); 1642 1643 __set_current_state(TASK_RUNNING); 1644 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1645 return retval; 1646 } 1647 1648 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid, 1649 struct waitid_info *infop, int options, 1650 struct rusage *ru) 1651 { 1652 unsigned int f_flags = 0; 1653 struct pid *pid = NULL; 1654 enum pid_type type; 1655 1656 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1657 __WNOTHREAD|__WCLONE|__WALL)) 1658 return -EINVAL; 1659 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1660 return -EINVAL; 1661 1662 switch (which) { 1663 case P_ALL: 1664 type = PIDTYPE_MAX; 1665 break; 1666 case P_PID: 1667 type = PIDTYPE_PID; 1668 if (upid <= 0) 1669 return -EINVAL; 1670 1671 pid = find_get_pid(upid); 1672 break; 1673 case P_PGID: 1674 type = PIDTYPE_PGID; 1675 if (upid < 0) 1676 return -EINVAL; 1677 1678 if (upid) 1679 pid = find_get_pid(upid); 1680 else 1681 pid = get_task_pid(current, PIDTYPE_PGID); 1682 break; 1683 case P_PIDFD: 1684 type = PIDTYPE_PID; 1685 if (upid < 0) 1686 return -EINVAL; 1687 1688 pid = pidfd_get_pid(upid, &f_flags); 1689 if (IS_ERR(pid)) 1690 return PTR_ERR(pid); 1691 1692 break; 1693 default: 1694 return -EINVAL; 1695 } 1696 1697 wo->wo_type = type; 1698 wo->wo_pid = pid; 1699 wo->wo_flags = options; 1700 wo->wo_info = infop; 1701 wo->wo_rusage = ru; 1702 if (f_flags & O_NONBLOCK) 1703 wo->wo_flags |= WNOHANG; 1704 1705 return 0; 1706 } 1707 1708 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1709 int options, struct rusage *ru) 1710 { 1711 struct wait_opts wo; 1712 long ret; 1713 1714 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru); 1715 if (ret) 1716 return ret; 1717 1718 ret = do_wait(&wo); 1719 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG)) 1720 ret = -EAGAIN; 1721 1722 put_pid(wo.wo_pid); 1723 return ret; 1724 } 1725 1726 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1727 infop, int, options, struct rusage __user *, ru) 1728 { 1729 struct rusage r; 1730 struct waitid_info info = {.status = 0}; 1731 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1732 int signo = 0; 1733 1734 if (err > 0) { 1735 signo = SIGCHLD; 1736 err = 0; 1737 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1738 return -EFAULT; 1739 } 1740 if (!infop) 1741 return err; 1742 1743 if (!user_write_access_begin(infop, sizeof(*infop))) 1744 return -EFAULT; 1745 1746 unsafe_put_user(signo, &infop->si_signo, Efault); 1747 unsafe_put_user(0, &infop->si_errno, Efault); 1748 unsafe_put_user(info.cause, &infop->si_code, Efault); 1749 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1750 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1751 unsafe_put_user(info.status, &infop->si_status, Efault); 1752 user_write_access_end(); 1753 return err; 1754 Efault: 1755 user_write_access_end(); 1756 return -EFAULT; 1757 } 1758 1759 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1760 struct rusage *ru) 1761 { 1762 struct wait_opts wo; 1763 struct pid *pid = NULL; 1764 enum pid_type type; 1765 long ret; 1766 1767 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1768 __WNOTHREAD|__WCLONE|__WALL)) 1769 return -EINVAL; 1770 1771 /* -INT_MIN is not defined */ 1772 if (upid == INT_MIN) 1773 return -ESRCH; 1774 1775 if (upid == -1) 1776 type = PIDTYPE_MAX; 1777 else if (upid < 0) { 1778 type = PIDTYPE_PGID; 1779 pid = find_get_pid(-upid); 1780 } else if (upid == 0) { 1781 type = PIDTYPE_PGID; 1782 pid = get_task_pid(current, PIDTYPE_PGID); 1783 } else /* upid > 0 */ { 1784 type = PIDTYPE_PID; 1785 pid = find_get_pid(upid); 1786 } 1787 1788 wo.wo_type = type; 1789 wo.wo_pid = pid; 1790 wo.wo_flags = options | WEXITED; 1791 wo.wo_info = NULL; 1792 wo.wo_stat = 0; 1793 wo.wo_rusage = ru; 1794 ret = do_wait(&wo); 1795 put_pid(pid); 1796 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1797 ret = -EFAULT; 1798 1799 return ret; 1800 } 1801 1802 int kernel_wait(pid_t pid, int *stat) 1803 { 1804 struct wait_opts wo = { 1805 .wo_type = PIDTYPE_PID, 1806 .wo_pid = find_get_pid(pid), 1807 .wo_flags = WEXITED, 1808 }; 1809 int ret; 1810 1811 ret = do_wait(&wo); 1812 if (ret > 0 && wo.wo_stat) 1813 *stat = wo.wo_stat; 1814 put_pid(wo.wo_pid); 1815 return ret; 1816 } 1817 1818 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1819 int, options, struct rusage __user *, ru) 1820 { 1821 struct rusage r; 1822 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1823 1824 if (err > 0) { 1825 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1826 return -EFAULT; 1827 } 1828 return err; 1829 } 1830 1831 #ifdef __ARCH_WANT_SYS_WAITPID 1832 1833 /* 1834 * sys_waitpid() remains for compatibility. waitpid() should be 1835 * implemented by calling sys_wait4() from libc.a. 1836 */ 1837 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1838 { 1839 return kernel_wait4(pid, stat_addr, options, NULL); 1840 } 1841 1842 #endif 1843 1844 #ifdef CONFIG_COMPAT 1845 COMPAT_SYSCALL_DEFINE4(wait4, 1846 compat_pid_t, pid, 1847 compat_uint_t __user *, stat_addr, 1848 int, options, 1849 struct compat_rusage __user *, ru) 1850 { 1851 struct rusage r; 1852 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1853 if (err > 0) { 1854 if (ru && put_compat_rusage(&r, ru)) 1855 return -EFAULT; 1856 } 1857 return err; 1858 } 1859 1860 COMPAT_SYSCALL_DEFINE5(waitid, 1861 int, which, compat_pid_t, pid, 1862 struct compat_siginfo __user *, infop, int, options, 1863 struct compat_rusage __user *, uru) 1864 { 1865 struct rusage ru; 1866 struct waitid_info info = {.status = 0}; 1867 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1868 int signo = 0; 1869 if (err > 0) { 1870 signo = SIGCHLD; 1871 err = 0; 1872 if (uru) { 1873 /* kernel_waitid() overwrites everything in ru */ 1874 if (COMPAT_USE_64BIT_TIME) 1875 err = copy_to_user(uru, &ru, sizeof(ru)); 1876 else 1877 err = put_compat_rusage(&ru, uru); 1878 if (err) 1879 return -EFAULT; 1880 } 1881 } 1882 1883 if (!infop) 1884 return err; 1885 1886 if (!user_write_access_begin(infop, sizeof(*infop))) 1887 return -EFAULT; 1888 1889 unsafe_put_user(signo, &infop->si_signo, Efault); 1890 unsafe_put_user(0, &infop->si_errno, Efault); 1891 unsafe_put_user(info.cause, &infop->si_code, Efault); 1892 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1893 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1894 unsafe_put_user(info.status, &infop->si_status, Efault); 1895 user_write_access_end(); 1896 return err; 1897 Efault: 1898 user_write_access_end(); 1899 return -EFAULT; 1900 } 1901 #endif 1902 1903 /* 1904 * This needs to be __function_aligned as GCC implicitly makes any 1905 * implementation of abort() cold and drops alignment specified by 1906 * -falign-functions=N. 1907 * 1908 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 1909 */ 1910 __weak __function_aligned void abort(void) 1911 { 1912 BUG(); 1913 1914 /* if that doesn't kill us, halt */ 1915 panic("Oops failed to kill thread"); 1916 } 1917 EXPORT_SYMBOL(abort); 1918