1 /*- 2 * Copyright (c) 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)proc.h 8.15 (Berkeley) 5/19/95 35 * $FreeBSD$ 36 */ 37 38 #ifndef _SYS_PROC_H_ 39 #define _SYS_PROC_H_ 40 41 #include <sys/callout.h> /* For struct callout. */ 42 #include <sys/event.h> /* For struct klist. */ 43 #include <sys/condvar.h> 44 #ifndef _KERNEL 45 #include <sys/filedesc.h> 46 #endif 47 #include <sys/queue.h> 48 #include <sys/_lock.h> 49 #include <sys/lock_profile.h> 50 #include <sys/_mutex.h> 51 #include <sys/osd.h> 52 #include <sys/priority.h> 53 #include <sys/rtprio.h> /* XXX. */ 54 #include <sys/runq.h> 55 #include <sys/resource.h> 56 #include <sys/sigio.h> 57 #include <sys/signal.h> 58 #include <sys/signalvar.h> 59 #ifndef _KERNEL 60 #include <sys/time.h> /* For structs itimerval, timeval. */ 61 #else 62 #include <sys/pcpu.h> 63 #endif 64 #include <sys/ucontext.h> 65 #include <sys/ucred.h> 66 #include <sys/_vm_domain.h> 67 #include <machine/proc.h> /* Machine-dependent proc substruct. */ 68 69 /* 70 * One structure allocated per session. 71 * 72 * List of locks 73 * (m) locked by s_mtx mtx 74 * (e) locked by proctree_lock sx 75 * (c) const until freeing 76 */ 77 struct session { 78 u_int s_count; /* Ref cnt; pgrps in session - atomic. */ 79 struct proc *s_leader; /* (m + e) Session leader. */ 80 struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ 81 struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ 82 struct tty *s_ttyp; /* (e) Controlling tty. */ 83 pid_t s_sid; /* (c) Session ID. */ 84 /* (m) Setlogin() name: */ 85 char s_login[roundup(MAXLOGNAME, sizeof(long))]; 86 struct mtx s_mtx; /* Mutex to protect members. */ 87 }; 88 89 /* 90 * One structure allocated per process group. 91 * 92 * List of locks 93 * (m) locked by pg_mtx mtx 94 * (e) locked by proctree_lock sx 95 * (c) const until freeing 96 */ 97 struct pgrp { 98 LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ 99 LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ 100 struct session *pg_session; /* (c) Pointer to session. */ 101 struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ 102 pid_t pg_id; /* (c) Process group id. */ 103 int pg_jobc; /* (m) Job control process count. */ 104 struct mtx pg_mtx; /* Mutex to protect members */ 105 }; 106 107 /* 108 * pargs, used to hold a copy of the command line, if it had a sane length. 109 */ 110 struct pargs { 111 u_int ar_ref; /* Reference count. */ 112 u_int ar_length; /* Length. */ 113 u_char ar_args[1]; /* Arguments. */ 114 }; 115 116 /*- 117 * Description of a process. 118 * 119 * This structure contains the information needed to manage a thread of 120 * control, known in UN*X as a process; it has references to substructures 121 * containing descriptions of things that the process uses, but may share 122 * with related processes. The process structure and the substructures 123 * are always addressable except for those marked "(CPU)" below, 124 * which might be addressable only on a processor on which the process 125 * is running. 126 * 127 * Below is a key of locks used to protect each member of struct proc. The 128 * lock is indicated by a reference to a specific character in parens in the 129 * associated comment. 130 * * - not yet protected 131 * a - only touched by curproc or parent during fork/wait 132 * b - created at fork, never changes 133 * (exception aiods switch vmspaces, but they are also 134 * marked 'P_SYSTEM' so hopefully it will be left alone) 135 * c - locked by proc mtx 136 * d - locked by allproc_lock lock 137 * e - locked by proctree_lock lock 138 * f - session mtx 139 * g - process group mtx 140 * h - callout_lock mtx 141 * i - by curproc or the master session mtx 142 * j - locked by proc slock 143 * k - only accessed by curthread 144 * k*- only accessed by curthread and from an interrupt 145 * l - the attaching proc or attaching proc parent 146 * m - Giant 147 * n - not locked, lazy 148 * o - ktrace lock 149 * q - td_contested lock 150 * r - p_peers lock 151 * t - thread lock 152 * u - process stat lock 153 * w - process timer lock 154 * x - created at fork, only changes during single threading in exec 155 * y - created at first aio, doesn't change until exit or exec at which 156 * point we are single-threaded and only curthread changes it 157 * z - zombie threads lock 158 * 159 * If the locking key specifies two identifiers (for example, p_pptr) then 160 * either lock is sufficient for read access, but both locks must be held 161 * for write access. 162 */ 163 struct cpuset; 164 struct filecaps; 165 struct filemon; 166 struct kaioinfo; 167 struct kaudit_record; 168 struct kdtrace_proc; 169 struct kdtrace_thread; 170 struct mqueue_notifier; 171 struct nlminfo; 172 struct p_sched; 173 struct proc; 174 struct procdesc; 175 struct racct; 176 struct sbuf; 177 struct sleepqueue; 178 struct syscall_args; 179 struct td_sched; 180 struct thread; 181 struct trapframe; 182 struct turnstile; 183 184 /* 185 * XXX: Does this belong in resource.h or resourcevar.h instead? 186 * Resource usage extension. The times in rusage structs in the kernel are 187 * never up to date. The actual times are kept as runtimes and tick counts 188 * (with control info in the "previous" times), and are converted when 189 * userland asks for rusage info. Backwards compatibility prevents putting 190 * this directly in the user-visible rusage struct. 191 * 192 * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. 193 * Locking for td_rux: (t) for all fields. 194 */ 195 struct rusage_ext { 196 uint64_t rux_runtime; /* (cu) Real time. */ 197 uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ 198 uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ 199 uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ 200 uint64_t rux_uu; /* (c) Previous user time in usec. */ 201 uint64_t rux_su; /* (c) Previous sys time in usec. */ 202 uint64_t rux_tu; /* (c) Previous total time in usec. */ 203 }; 204 205 /* 206 * Kernel runnable context (thread). 207 * This is what is put to sleep and reactivated. 208 * Thread context. Processes may have multiple threads. 209 */ 210 struct thread { 211 struct mtx *volatile td_lock; /* replaces sched lock */ 212 struct proc *td_proc; /* (*) Associated process. */ 213 TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ 214 TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ 215 TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ 216 TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ 217 LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ 218 struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ 219 struct seltd *td_sel; /* Select queue/channel. */ 220 struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ 221 struct turnstile *td_turnstile; /* (k) Associated turnstile. */ 222 struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ 223 struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ 224 struct vm_domain_policy td_vm_dom_policy; /* (c) current numa domain policy */ 225 lwpid_t td_tid; /* (b) Thread ID. */ 226 sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ 227 #define td_siglist td_sigqueue.sq_signals 228 u_char td_lend_user_pri; /* (t) Lend user pri. */ 229 230 /* Cleared during fork1() */ 231 #define td_startzero td_flags 232 int td_flags; /* (t) TDF_* flags. */ 233 int td_inhibitors; /* (t) Why can not run. */ 234 int td_pflags; /* (k) Private thread (TDP_*) flags. */ 235 int td_dupfd; /* (k) Ret value from fdopen. XXX */ 236 int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ 237 void *td_wchan; /* (t) Sleep address. */ 238 const char *td_wmesg; /* (t) Reason for sleep. */ 239 volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ 240 u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ 241 short td_locks; /* (k) Debug: count of non-spin locks */ 242 short td_rw_rlocks; /* (k) Count of rwlock read locks. */ 243 short td_lk_slocks; /* (k) Count of lockmgr shared locks. */ 244 short td_stopsched; /* (k) Scheduler stopped. */ 245 struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ 246 const char *td_lockname; /* (t) Name of lock blocked on. */ 247 LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ 248 struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ 249 int td_intr_nesting_level; /* (k) Interrupt recursion. */ 250 int td_pinned; /* (k) Temporary cpu pin count. */ 251 struct ucred *td_ucred; /* (k) Reference to credentials. */ 252 struct plimit *td_limit; /* (k) Resource limits. */ 253 int td_slptick; /* (t) Time at sleep. */ 254 int td_blktick; /* (t) Time spent blocked. */ 255 int td_swvoltick; /* (t) Time at last SW_VOL switch. */ 256 int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ 257 u_int td_cow; /* (*) Number of copy-on-write faults */ 258 struct rusage td_ru; /* (t) rusage information. */ 259 struct rusage_ext td_rux; /* (t) Internal rusage information. */ 260 uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ 261 uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ 262 u_int td_pticks; /* (t) Statclock hits for profiling */ 263 u_int td_sticks; /* (t) Statclock hits in system mode. */ 264 u_int td_iticks; /* (t) Statclock hits in intr mode. */ 265 u_int td_uticks; /* (t) Statclock hits in user mode. */ 266 int td_intrval; /* (t) Return value for sleepq. */ 267 sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ 268 volatile u_int td_generation; /* (k) For detection of preemption */ 269 stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ 270 int td_xsig; /* (c) Signal for ptrace */ 271 u_long td_profil_addr; /* (k) Temporary addr until AST. */ 272 u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ 273 char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ 274 struct file *td_fpop; /* (k) file referencing cdev under op */ 275 int td_dbgflags; /* (c) Userland debugger flags */ 276 struct ksiginfo td_dbgksi; /* (c) ksi reflected to debugger. */ 277 int td_ng_outbound; /* (k) Thread entered ng from above. */ 278 struct osd td_osd; /* (k) Object specific data. */ 279 struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ 280 pid_t td_dbg_forked; /* (c) Child pid for debugger. */ 281 u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ 282 int td_no_sleeping; /* (k) Sleeping disabled count. */ 283 int td_dom_rr_idx; /* (k) RR Numa domain selection. */ 284 void *td_su; /* (k) FFS SU private */ 285 #define td_endzero td_sigmask 286 287 /* Copied during fork1() or create_thread(). */ 288 #define td_startcopy td_endzero 289 sigset_t td_sigmask; /* (c) Current signal mask. */ 290 u_char td_rqindex; /* (t) Run queue index. */ 291 u_char td_base_pri; /* (t) Thread base kernel priority. */ 292 u_char td_priority; /* (t) Thread active priority. */ 293 u_char td_pri_class; /* (t) Scheduling class. */ 294 u_char td_user_pri; /* (t) User pri from estcpu and nice. */ 295 u_char td_base_user_pri; /* (t) Base user pri */ 296 u_int td_dbg_sc_code; /* (c) Syscall code to debugger. */ 297 u_int td_dbg_sc_narg; /* (c) Syscall arg count to debugger.*/ 298 uintptr_t td_rb_list; /* (k) Robust list head. */ 299 uintptr_t td_rbp_list; /* (k) Robust priv list head. */ 300 uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ 301 #define td_endcopy td_pcb 302 303 /* 304 * Fields that must be manually set in fork1() or create_thread() 305 * or already have been set in the allocator, constructor, etc. 306 */ 307 struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ 308 enum { 309 TDS_INACTIVE = 0x0, 310 TDS_INHIBITED, 311 TDS_CAN_RUN, 312 TDS_RUNQ, 313 TDS_RUNNING 314 } td_state; /* (t) thread state */ 315 union { 316 register_t tdu_retval[2]; 317 off_t tdu_off; 318 } td_uretoff; /* (k) Syscall aux returns. */ 319 #define td_retval td_uretoff.tdu_retval 320 u_int td_cowgen; /* (k) Generation of COW pointers. */ 321 struct callout td_slpcallout; /* (h) Callout for sleep. */ 322 struct trapframe *td_frame; /* (k) */ 323 struct vm_object *td_kstack_obj;/* (a) Kstack object. */ 324 vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ 325 int td_kstack_pages; /* (a) Size of the kstack. */ 326 volatile u_int td_critnest; /* (k*) Critical section nest level. */ 327 struct mdthread td_md; /* (k) Any machine-dependent fields. */ 328 struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ 329 struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ 330 struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ 331 int td_errno; /* Error returned by last syscall. */ 332 struct vnet *td_vnet; /* (k) Effective vnet. */ 333 const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ 334 struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ 335 struct proc *td_rfppwait_p; /* (k) The vforked child */ 336 struct vm_page **td_ma; /* (k) uio pages held */ 337 int td_ma_cnt; /* (k) size of *td_ma */ 338 void *td_emuldata; /* Emulator state data */ 339 int td_lastcpu; /* (t) Last cpu we were on. */ 340 int td_oncpu; /* (t) Which cpu we are on. */ 341 }; 342 343 struct thread0_storage { 344 struct thread t0st_thread; 345 uint64_t t0st_sched[10]; 346 }; 347 348 struct mtx *thread_lock_block(struct thread *); 349 void thread_lock_unblock(struct thread *, struct mtx *); 350 void thread_lock_set(struct thread *, struct mtx *); 351 #define THREAD_LOCK_ASSERT(td, type) \ 352 do { \ 353 struct mtx *__m = (td)->td_lock; \ 354 if (__m != &blocked_lock) \ 355 mtx_assert(__m, (type)); \ 356 } while (0) 357 358 #ifdef INVARIANTS 359 #define THREAD_LOCKPTR_ASSERT(td, lock) \ 360 do { \ 361 struct mtx *__m = (td)->td_lock; \ 362 KASSERT((__m == &blocked_lock || __m == (lock)), \ 363 ("Thread %p lock %p does not match %p", td, __m, (lock))); \ 364 } while (0) 365 366 #define TD_LOCKS_INC(td) ((td)->td_locks++) 367 #define TD_LOCKS_DEC(td) ((td)->td_locks--) 368 #else 369 #define THREAD_LOCKPTR_ASSERT(td, lock) 370 371 #define TD_LOCKS_INC(td) 372 #define TD_LOCKS_DEC(td) 373 #endif 374 375 /* 376 * Flags kept in td_flags: 377 * To change these you MUST have the scheduler lock. 378 */ 379 #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ 380 #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ 381 #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ 382 #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ 383 #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ 384 #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ 385 #define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */ 386 #define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */ 387 #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ 388 #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ 389 #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ 390 #define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */ 391 #define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */ 392 #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ 393 #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ 394 #define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */ 395 #define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */ 396 #define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */ 397 #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ 398 #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ 399 #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ 400 #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ 401 #define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */ 402 #define TDF_UNUSED23 0x00800000 /* --available-- */ 403 #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ 404 #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ 405 #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ 406 #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ 407 #define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */ 408 #define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */ 409 #define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */ 410 411 /* Userland debug flags */ 412 #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ 413 #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ 414 #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ 415 #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ 416 #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ 417 #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ 418 #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new 419 process */ 420 #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child 421 only) */ 422 #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ 423 #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ 424 #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ 425 426 /* 427 * "Private" flags kept in td_pflags: 428 * These are only written by curthread and thus need no locking. 429 */ 430 #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ 431 #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ 432 #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ 433 #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ 434 #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ 435 #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ 436 #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ 437 #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ 438 #define TDP_UNUSED9 0x00000100 /* --available-- */ 439 #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ 440 #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ 441 #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ 442 #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ 443 #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ 444 #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ 445 #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ 446 #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ 447 #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ 448 #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ 449 #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ 450 #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ 451 #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ 452 #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ 453 #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ 454 #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ 455 #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ 456 #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ 457 #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ 458 #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ 459 #define TDP_FORKING 0x20000000 /* Thread is being created through fork() */ 460 #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ 461 462 /* 463 * Reasons that the current thread can not be run yet. 464 * More than one may apply. 465 */ 466 #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ 467 #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ 468 #define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */ 469 #define TDI_LOCK 0x0008 /* Stopped on a lock. */ 470 #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ 471 472 #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) 473 #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) 474 #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) 475 #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) 476 #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) 477 #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) 478 #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) 479 #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) 480 #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) 481 #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) 482 #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) 483 #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) 484 485 486 #define TD_SET_INHIB(td, inhib) do { \ 487 (td)->td_state = TDS_INHIBITED; \ 488 (td)->td_inhibitors |= (inhib); \ 489 } while (0) 490 491 #define TD_CLR_INHIB(td, inhib) do { \ 492 if (((td)->td_inhibitors & (inhib)) && \ 493 (((td)->td_inhibitors &= ~(inhib)) == 0)) \ 494 (td)->td_state = TDS_CAN_RUN; \ 495 } while (0) 496 497 #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) 498 #define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED) 499 #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) 500 #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) 501 #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) 502 #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) 503 504 #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) 505 #define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED) 506 #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) 507 #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) 508 #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) 509 510 #define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING 511 #define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ 512 #define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN 513 514 #define TD_SBDRY_INTR(td) \ 515 (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) 516 #define TD_SBDRY_ERRNO(td) \ 517 (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) 518 519 /* 520 * Process structure. 521 */ 522 struct proc { 523 LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ 524 TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ 525 struct mtx p_slock; /* process spin lock */ 526 struct ucred *p_ucred; /* (c) Process owner's identity. */ 527 struct filedesc *p_fd; /* (b) Open files. */ 528 struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ 529 struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ 530 struct plimit *p_limit; /* (c) Resource limits. */ 531 struct callout p_limco; /* (c) Limit callout handle */ 532 struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ 533 534 int p_flag; /* (c) P_* flags. */ 535 int p_flag2; /* (c) P2_* flags. */ 536 enum { 537 PRS_NEW = 0, /* In creation */ 538 PRS_NORMAL, /* threads can be run. */ 539 PRS_ZOMBIE 540 } p_state; /* (j/c) Process status. */ 541 pid_t p_pid; /* (b) Process identifier. */ 542 LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ 543 LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ 544 struct proc *p_pptr; /* (c + e) Pointer to parent process. */ 545 LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ 546 LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ 547 struct proc *p_reaper; /* (e) My reaper. */ 548 LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants 549 (if I am reaper). */ 550 LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of 551 the same reaper. */ 552 struct mtx p_mtx; /* (n) Lock for this struct. */ 553 struct mtx p_statmtx; /* Lock for the stats */ 554 struct mtx p_itimmtx; /* Lock for the virt/prof timers */ 555 struct mtx p_profmtx; /* Lock for the profiling */ 556 struct ksiginfo *p_ksi; /* Locked by parent proc lock */ 557 sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ 558 #define p_siglist p_sigqueue.sq_signals 559 560 /* The following fields are all zeroed upon creation in fork. */ 561 #define p_startzero p_oppid 562 pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */ 563 struct vmspace *p_vmspace; /* (b) Address space. */ 564 u_int p_swtick; /* (c) Tick when swapped in or out. */ 565 u_int p_cowgen; /* (c) Generation of COW pointers. */ 566 struct itimerval p_realtimer; /* (c) Alarm timer. */ 567 struct rusage p_ru; /* (a) Exit information. */ 568 struct rusage_ext p_rux; /* (cu) Internal resource usage. */ 569 struct rusage_ext p_crux; /* (c) Internal child resource usage. */ 570 int p_profthreads; /* (c) Num threads in addupc_task. */ 571 volatile int p_exitthreads; /* (j) Number of threads exiting */ 572 int p_traceflag; /* (o) Kernel trace points. */ 573 struct vnode *p_tracevp; /* (c + o) Trace to vnode. */ 574 struct ucred *p_tracecred; /* (o) Credentials to trace with. */ 575 struct vnode *p_textvp; /* (b) Vnode of executable. */ 576 u_int p_lock; /* (c) Proclock (prevent swap) count. */ 577 struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ 578 int p_sigparent; /* (c) Signal to parent on exit. */ 579 int p_sig; /* (n) For core dump/debugger XXX. */ 580 u_long p_code; /* (n) For core dump/debugger XXX. */ 581 u_int p_stops; /* (c) Stop event bitmask. */ 582 u_int p_stype; /* (c) Stop event type. */ 583 char p_step; /* (c) Process is stopped. */ 584 u_char p_pfsflags; /* (c) Procfs flags. */ 585 struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */ 586 struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ 587 struct thread *p_singlethread;/* (c + j) If single threading this is it */ 588 int p_suspcount; /* (j) Num threads in suspended mode. */ 589 struct thread *p_xthread; /* (c) Trap thread */ 590 int p_boundary_count;/* (j) Num threads at user boundary */ 591 int p_pendingcnt; /* how many signals are pending */ 592 struct itimers *p_itimers; /* (c) POSIX interval timers. */ 593 struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ 594 u_int p_treeflag; /* (e) P_TREE flags */ 595 int p_pendingexits; /* (c) Count of pending thread exits. */ 596 struct filemon *p_filemon; /* (c) filemon-specific data. */ 597 /* End area that is zeroed on creation. */ 598 #define p_endzero p_magic 599 600 /* The following fields are all copied upon creation in fork. */ 601 #define p_startcopy p_endzero 602 u_int p_magic; /* (b) Magic number. */ 603 int p_osrel; /* (x) osreldate for the 604 binary (from ELF note, if any) */ 605 char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ 606 struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ 607 struct pargs *p_args; /* (c) Process arguments. */ 608 rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ 609 signed char p_nice; /* (c) Process "nice" value. */ 610 int p_fibnum; /* in this routing domain XXX MRT */ 611 pid_t p_reapsubtree; /* (e) Pid of the direct child of the 612 reaper which spawned 613 our subtree. */ 614 u_int p_xexit; /* (c) Exit code. */ 615 u_int p_xsig; /* (c) Stop/kill sig. */ 616 /* End area that is copied on creation. */ 617 #define p_endcopy p_xsig 618 struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ 619 struct knlist *p_klist; /* (c) Knotes attached to this proc. */ 620 int p_numthreads; /* (c) Number of threads. */ 621 struct mdproc p_md; /* Any machine-dependent fields. */ 622 struct callout p_itcallout; /* (h + c) Interval timer callout. */ 623 u_short p_acflag; /* (c) Accounting flags. */ 624 struct proc *p_peers; /* (r) */ 625 struct proc *p_leader; /* (b) */ 626 void *p_emuldata; /* (c) Emulator state data. */ 627 struct label *p_label; /* (*) Proc (not subject) MAC label. */ 628 STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ 629 LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ 630 struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ 631 struct cv p_pwait; /* (*) wait cv for exit/exec. */ 632 struct cv p_dbgwait; /* (*) wait cv for debugger attach 633 after fork. */ 634 uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ 635 struct racct *p_racct; /* (b) Resource accounting. */ 636 int p_throttled; /* (c) Flag for racct pcpu throttling */ 637 struct vm_domain_policy p_vm_dom_policy; /* (c) process default VM domain, or -1 */ 638 /* 639 * An orphan is the child that has beed re-parented to the 640 * debugger as a result of attaching to it. Need to keep 641 * track of them for parent to be able to collect the exit 642 * status of what used to be children. 643 */ 644 LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ 645 LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ 646 }; 647 648 #define p_session p_pgrp->pg_session 649 #define p_pgid p_pgrp->pg_id 650 651 #define NOCPU (-1) /* For when we aren't on a CPU. */ 652 #define NOCPU_OLD (255) 653 #define MAXCPU_OLD (254) 654 655 #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) 656 #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) 657 #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) 658 659 #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) 660 #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) 661 #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) 662 663 #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) 664 #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) 665 #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) 666 667 #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) 668 #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) 669 #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) 670 671 /* These flags are kept in p_flag. */ 672 #define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ 673 #define P_CONTROLT 0x00002 /* Has a controlling terminal. */ 674 #define P_KPROC 0x00004 /* Kernel process. */ 675 #define P_FOLLOWFORK 0x00008 /* Attach parent debugger to children. */ 676 #define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ 677 #define P_PROFIL 0x00020 /* Has started profiling. */ 678 #define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */ 679 #define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */ 680 #define P_SUGID 0x00100 /* Had set id privileges since last exec. */ 681 #define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ 682 #define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */ 683 #define P_TRACED 0x00800 /* Debugged process being traced. */ 684 #define P_WAITED 0x01000 /* Someone is waiting for us. */ 685 #define P_WEXIT 0x02000 /* Working on exiting. */ 686 #define P_EXEC 0x04000 /* Process called exec. */ 687 #define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */ 688 #define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */ 689 #define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */ 690 #define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */ 691 #define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */ 692 #define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */ 693 #define P_SIGEVENT 0x200000 /* Process pending signals changed. */ 694 #define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */ 695 #define P_HWPMC 0x800000 /* Process is using HWPMCs */ 696 #define P_JAILED 0x1000000 /* Process is in jail. */ 697 #define P_TOTAL_STOP 0x2000000 /* Stopped in stop_all_proc. */ 698 #define P_INEXEC 0x4000000 /* Process is in execve(). */ 699 #define P_STATCHILD 0x8000000 /* Child process stopped or exited. */ 700 #define P_INMEM 0x10000000 /* Loaded into memory. */ 701 #define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */ 702 #define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */ 703 #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ 704 705 #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) 706 #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) 707 #define P_KILLED(p) ((p)->p_flag & P_WKILLED) 708 709 /* These flags are kept in p_flag2. */ 710 #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ 711 #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ 712 #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ 713 #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ 714 #define P2_LWP_EVENTS 0x00000010 /* Report LWP events via ptrace(2). */ 715 716 /* Flags protected by proctree_lock, kept in p_treeflags. */ 717 #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ 718 #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan 719 list */ 720 #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ 721 722 /* 723 * These were process status values (p_stat), now they are only used in 724 * legacy conversion code. 725 */ 726 #define SIDL 1 /* Process being created by fork. */ 727 #define SRUN 2 /* Currently runnable. */ 728 #define SSLEEP 3 /* Sleeping on an address. */ 729 #define SSTOP 4 /* Process debugging or suspension. */ 730 #define SZOMB 5 /* Awaiting collection by parent. */ 731 #define SWAIT 6 /* Waiting for interrupt. */ 732 #define SLOCK 7 /* Blocked on a lock. */ 733 734 #define P_MAGIC 0xbeefface 735 736 #ifdef _KERNEL 737 738 /* Types and flags for mi_switch(). */ 739 #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ 740 #define SWT_NONE 0 /* Unspecified switch. */ 741 #define SWT_PREEMPT 1 /* Switching due to preemption. */ 742 #define SWT_OWEPREEMPT 2 /* Switching due to owepreempt. */ 743 #define SWT_TURNSTILE 3 /* Turnstile contention. */ 744 #define SWT_SLEEPQ 4 /* Sleepq wait. */ 745 #define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */ 746 #define SWT_RELINQUISH 6 /* yield call. */ 747 #define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */ 748 #define SWT_IDLE 8 /* Switching from the idle thread. */ 749 #define SWT_IWAIT 9 /* Waiting for interrupts. */ 750 #define SWT_SUSPEND 10 /* Thread suspended. */ 751 #define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */ 752 #define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */ 753 #define SWT_COUNT 13 /* Number of switch types. */ 754 /* Flags */ 755 #define SW_VOL 0x0100 /* Voluntary switch. */ 756 #define SW_INVOL 0x0200 /* Involuntary switch. */ 757 #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ 758 759 /* How values for thread_single(). */ 760 #define SINGLE_NO_EXIT 0 761 #define SINGLE_EXIT 1 762 #define SINGLE_BOUNDARY 2 763 #define SINGLE_ALLPROC 3 764 765 #ifdef MALLOC_DECLARE 766 MALLOC_DECLARE(M_PARGS); 767 MALLOC_DECLARE(M_PGRP); 768 MALLOC_DECLARE(M_SESSION); 769 MALLOC_DECLARE(M_SUBPROC); 770 #endif 771 772 #define FOREACH_PROC_IN_SYSTEM(p) \ 773 LIST_FOREACH((p), &allproc, p_list) 774 #define FOREACH_THREAD_IN_PROC(p, td) \ 775 TAILQ_FOREACH((td), &(p)->p_threads, td_plist) 776 777 #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) 778 779 /* 780 * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit 781 * in a pid_t, as it is used to represent "no process group". 782 */ 783 #define PID_MAX 99999 784 #define NO_PID 100000 785 extern pid_t pid_max; 786 787 #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) 788 789 790 #define STOPEVENT(p, e, v) do { \ 791 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \ 792 "checking stopevent %d", (e)); \ 793 if ((p)->p_stops & (e)) { \ 794 PROC_LOCK(p); \ 795 stopevent((p), (e), (v)); \ 796 PROC_UNLOCK(p); \ 797 } \ 798 } while (0) 799 #define _STOPEVENT(p, e, v) do { \ 800 PROC_LOCK_ASSERT(p, MA_OWNED); \ 801 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \ 802 "checking stopevent %d", (e)); \ 803 if ((p)->p_stops & (e)) \ 804 stopevent((p), (e), (v)); \ 805 } while (0) 806 807 /* Lock and unlock a process. */ 808 #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) 809 #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) 810 #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) 811 #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) 812 #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) 813 814 /* Lock and unlock a process group. */ 815 #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) 816 #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) 817 #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) 818 #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) 819 820 #define PGRP_LOCK_PGSIGNAL(pg) do { \ 821 if ((pg) != NULL) \ 822 PGRP_LOCK(pg); \ 823 } while (0) 824 #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ 825 if ((pg) != NULL) \ 826 PGRP_UNLOCK(pg); \ 827 } while (0) 828 829 /* Lock and unlock a session. */ 830 #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) 831 #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) 832 #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) 833 #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) 834 835 /* 836 * Non-zero p_lock ensures that: 837 * - exit1() is not performed until p_lock reaches zero; 838 * - the process' threads stack are not swapped out if they are currently 839 * not (P_INMEM). 840 * 841 * PHOLD() asserts that the process (except the current process) is 842 * not exiting, increments p_lock and swaps threads stacks into memory, 843 * if needed. 844 * _PHOLD() is same as PHOLD(), it takes the process locked. 845 * _PHOLD_LITE() also takes the process locked, but comparing with 846 * _PHOLD(), it only guarantees that exit1() is not executed, 847 * faultin() is not called. 848 */ 849 #define PHOLD(p) do { \ 850 PROC_LOCK(p); \ 851 _PHOLD(p); \ 852 PROC_UNLOCK(p); \ 853 } while (0) 854 #define _PHOLD(p) do { \ 855 PROC_LOCK_ASSERT((p), MA_OWNED); \ 856 KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ 857 ("PHOLD of exiting process %p", p)); \ 858 (p)->p_lock++; \ 859 if (((p)->p_flag & P_INMEM) == 0) \ 860 faultin((p)); \ 861 } while (0) 862 #define _PHOLD_LITE(p) do { \ 863 PROC_LOCK_ASSERT((p), MA_OWNED); \ 864 KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ 865 ("PHOLD of exiting process %p", p)); \ 866 (p)->p_lock++; \ 867 } while (0) 868 #define PROC_ASSERT_HELD(p) do { \ 869 KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ 870 } while (0) 871 872 #define PRELE(p) do { \ 873 PROC_LOCK((p)); \ 874 _PRELE((p)); \ 875 PROC_UNLOCK((p)); \ 876 } while (0) 877 #define _PRELE(p) do { \ 878 PROC_LOCK_ASSERT((p), MA_OWNED); \ 879 PROC_ASSERT_HELD(p); \ 880 (--(p)->p_lock); \ 881 if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ 882 wakeup(&(p)->p_lock); \ 883 } while (0) 884 #define PROC_ASSERT_NOT_HELD(p) do { \ 885 KASSERT((p)->p_lock == 0, ("process %p held", p)); \ 886 } while (0) 887 888 #define PROC_UPDATE_COW(p) do { \ 889 PROC_LOCK_ASSERT((p), MA_OWNED); \ 890 (p)->p_cowgen++; \ 891 } while (0) 892 893 /* Check whether a thread is safe to be swapped out. */ 894 #define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP) 895 896 /* Control whether or not it is safe for curthread to sleep. */ 897 #define THREAD_NO_SLEEPING() ((curthread)->td_no_sleeping++) 898 899 #define THREAD_SLEEPING_OK() ((curthread)->td_no_sleeping--) 900 901 #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) 902 903 #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) 904 extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; 905 extern u_long pidhash; 906 #define TIDHASH(tid) (&tidhashtbl[(tid) & tidhash]) 907 extern LIST_HEAD(tidhashhead, thread) *tidhashtbl; 908 extern u_long tidhash; 909 extern struct rwlock tidhash_lock; 910 911 #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) 912 extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; 913 extern u_long pgrphash; 914 915 extern struct sx allproc_lock; 916 extern int allproc_gen; 917 extern struct sx proctree_lock; 918 extern struct mtx ppeers_lock; 919 extern struct proc proc0; /* Process slot for swapper. */ 920 extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ 921 #define thread0 (thread0_st.t0st_thread) 922 extern struct vmspace vmspace0; /* VM space for proc0. */ 923 extern int hogticks; /* Limit on kernel cpu hogs. */ 924 extern int lastpid; 925 extern int nprocs, maxproc; /* Current and max number of procs. */ 926 extern int maxprocperuid; /* Max procs per uid. */ 927 extern u_long ps_arg_cache_limit; 928 929 LIST_HEAD(proclist, proc); 930 TAILQ_HEAD(procqueue, proc); 931 TAILQ_HEAD(threadqueue, thread); 932 extern struct proclist allproc; /* List of all processes. */ 933 extern struct proclist zombproc; /* List of zombie processes. */ 934 extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ 935 936 extern struct uma_zone *proc_zone; 937 938 struct proc *pfind(pid_t); /* Find process by id. */ 939 struct proc *pfind_locked(pid_t pid); 940 struct pgrp *pgfind(pid_t); /* Find process group by id. */ 941 struct proc *zpfind(pid_t); /* Find zombie process by id. */ 942 943 struct fork_req { 944 int fr_flags; 945 int fr_pages; 946 int *fr_pidp; 947 struct proc **fr_procp; 948 int *fr_pd_fd; 949 int fr_pd_flags; 950 struct filecaps *fr_pd_fcaps; 951 }; 952 953 /* 954 * pget() flags. 955 */ 956 #define PGET_HOLD 0x00001 /* Hold the process. */ 957 #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ 958 #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ 959 #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ 960 #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ 961 #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ 962 #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ 963 964 #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) 965 966 int pget(pid_t pid, int flags, struct proc **pp); 967 968 void ast(struct trapframe *framep); 969 struct thread *choosethread(void); 970 int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); 971 int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, 972 struct session *sess); 973 int enterthispgrp(struct proc *p, struct pgrp *pgrp); 974 void faultin(struct proc *p); 975 void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); 976 int fork1(struct thread *, struct fork_req *); 977 void fork_exit(void (*)(void *, struct trapframe *), void *, 978 struct trapframe *); 979 void fork_return(struct thread *, struct trapframe *); 980 int inferior(struct proc *p); 981 void kern_yield(int); 982 void kick_proc0(void); 983 void killjobc(void); 984 int leavepgrp(struct proc *p); 985 int maybe_preempt(struct thread *td); 986 void maybe_yield(void); 987 void mi_switch(int flags, struct thread *newtd); 988 int p_candebug(struct thread *td, struct proc *p); 989 int p_cansee(struct thread *td, struct proc *p); 990 int p_cansched(struct thread *td, struct proc *p); 991 int p_cansignal(struct thread *td, struct proc *p, int signum); 992 int p_canwait(struct thread *td, struct proc *p); 993 struct pargs *pargs_alloc(int len); 994 void pargs_drop(struct pargs *pa); 995 void pargs_hold(struct pargs *pa); 996 int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); 997 int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); 998 int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); 999 void procinit(void); 1000 void proc_linkup0(struct proc *p, struct thread *td); 1001 void proc_linkup(struct proc *p, struct thread *td); 1002 struct proc *proc_realparent(struct proc *child); 1003 void proc_reap(struct thread *td, struct proc *p, int *status, int options); 1004 void proc_reparent(struct proc *child, struct proc *newparent); 1005 struct pstats *pstats_alloc(void); 1006 void pstats_fork(struct pstats *src, struct pstats *dst); 1007 void pstats_free(struct pstats *ps); 1008 void reaper_abandon_children(struct proc *p, bool exiting); 1009 int securelevel_ge(struct ucred *cr, int level); 1010 int securelevel_gt(struct ucred *cr, int level); 1011 void sess_hold(struct session *); 1012 void sess_release(struct session *); 1013 int setrunnable(struct thread *); 1014 void setsugid(struct proc *p); 1015 int should_yield(void); 1016 int sigonstack(size_t sp); 1017 void stopevent(struct proc *, u_int, u_int); 1018 struct thread *tdfind(lwpid_t, pid_t); 1019 void threadinit(void); 1020 void tidhash_add(struct thread *); 1021 void tidhash_remove(struct thread *); 1022 void cpu_idle(int); 1023 int cpu_idle_wakeup(int); 1024 extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ 1025 void cpu_switch(struct thread *, struct thread *, struct mtx *); 1026 void cpu_throw(struct thread *, struct thread *) __dead2; 1027 void unsleep(struct thread *); 1028 void userret(struct thread *, struct trapframe *); 1029 1030 void cpu_exit(struct thread *); 1031 void exit1(struct thread *, int, int) __dead2; 1032 void cpu_copy_thread(struct thread *td, struct thread *td0); 1033 int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa); 1034 void cpu_fork(struct thread *, struct proc *, struct thread *, int); 1035 void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); 1036 void cpu_set_syscall_retval(struct thread *, int); 1037 void cpu_set_upcall(struct thread *, void (*)(void *), void *, 1038 stack_t *); 1039 int cpu_set_user_tls(struct thread *, void *tls_base); 1040 void cpu_thread_alloc(struct thread *); 1041 void cpu_thread_clean(struct thread *); 1042 void cpu_thread_exit(struct thread *); 1043 void cpu_thread_free(struct thread *); 1044 void cpu_thread_swapin(struct thread *); 1045 void cpu_thread_swapout(struct thread *); 1046 struct thread *thread_alloc(int pages); 1047 int thread_alloc_stack(struct thread *, int pages); 1048 void thread_cow_get_proc(struct thread *newtd, struct proc *p); 1049 void thread_cow_get(struct thread *newtd, struct thread *td); 1050 void thread_cow_free(struct thread *td); 1051 void thread_cow_update(struct thread *td); 1052 int thread_create(struct thread *td, struct rtprio *rtp, 1053 int (*initialize_thread)(struct thread *, void *), void *thunk); 1054 void thread_exit(void) __dead2; 1055 void thread_free(struct thread *td); 1056 void thread_link(struct thread *td, struct proc *p); 1057 void thread_reap(void); 1058 int thread_single(struct proc *p, int how); 1059 void thread_single_end(struct proc *p, int how); 1060 void thread_stash(struct thread *td); 1061 void thread_stopped(struct proc *p); 1062 void childproc_stopped(struct proc *child, int reason); 1063 void childproc_continued(struct proc *child); 1064 void childproc_exited(struct proc *child); 1065 int thread_suspend_check(int how); 1066 bool thread_suspend_check_needed(void); 1067 void thread_suspend_switch(struct thread *, struct proc *p); 1068 void thread_suspend_one(struct thread *td); 1069 void thread_unlink(struct thread *td); 1070 void thread_unsuspend(struct proc *p); 1071 void thread_wait(struct proc *p); 1072 struct thread *thread_find(struct proc *p, lwpid_t tid); 1073 1074 void stop_all_proc(void); 1075 void resume_all_proc(void); 1076 1077 static __inline int 1078 curthread_pflags_set(int flags) 1079 { 1080 struct thread *td; 1081 int save; 1082 1083 td = curthread; 1084 save = ~flags | (td->td_pflags & flags); 1085 td->td_pflags |= flags; 1086 return (save); 1087 } 1088 1089 static __inline void 1090 curthread_pflags_restore(int save) 1091 { 1092 1093 curthread->td_pflags &= save; 1094 } 1095 1096 static __inline __pure2 struct td_sched * 1097 td_get_sched(struct thread *td) 1098 { 1099 1100 return ((struct td_sched *)&td[1]); 1101 } 1102 1103 #endif /* _KERNEL */ 1104 1105 #endif /* !_SYS_PROC_H_ */ 1106