xref: /linux-6.15/include/linux/sched.h (revision a078ccff)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 
7 struct sched_param {
8 	int sched_priority;
9 };
10 
11 #include <asm/param.h>	/* for HZ */
12 
13 #include <linux/capability.h>
14 #include <linux/threads.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/timex.h>
18 #include <linux/jiffies.h>
19 #include <linux/rbtree.h>
20 #include <linux/thread_info.h>
21 #include <linux/cpumask.h>
22 #include <linux/errno.h>
23 #include <linux/nodemask.h>
24 #include <linux/mm_types.h>
25 
26 #include <asm/page.h>
27 #include <asm/ptrace.h>
28 #include <asm/cputime.h>
29 
30 #include <linux/smp.h>
31 #include <linux/sem.h>
32 #include <linux/signal.h>
33 #include <linux/compiler.h>
34 #include <linux/completion.h>
35 #include <linux/pid.h>
36 #include <linux/percpu.h>
37 #include <linux/topology.h>
38 #include <linux/proportions.h>
39 #include <linux/seccomp.h>
40 #include <linux/rcupdate.h>
41 #include <linux/rculist.h>
42 #include <linux/rtmutex.h>
43 
44 #include <linux/time.h>
45 #include <linux/param.h>
46 #include <linux/resource.h>
47 #include <linux/timer.h>
48 #include <linux/hrtimer.h>
49 #include <linux/task_io_accounting.h>
50 #include <linux/latencytop.h>
51 #include <linux/cred.h>
52 #include <linux/llist.h>
53 #include <linux/uidgid.h>
54 #include <linux/gfp.h>
55 
56 #include <asm/processor.h>
57 
58 struct exec_domain;
59 struct futex_pi_state;
60 struct robust_list_head;
61 struct bio_list;
62 struct fs_struct;
63 struct perf_event_context;
64 struct blk_plug;
65 
66 /*
67  * List of flags we want to share for kernel threads,
68  * if only because they are not used by them anyway.
69  */
70 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
71 
72 /*
73  * These are the constant used to fake the fixed-point load-average
74  * counting. Some notes:
75  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
76  *    a load-average precision of 10 bits integer + 11 bits fractional
77  *  - if you want to count load-averages more often, you need more
78  *    precision, or rounding will get you. With 2-second counting freq,
79  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
80  *    11 bit fractions.
81  */
82 extern unsigned long avenrun[];		/* Load averages */
83 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
84 
85 #define FSHIFT		11		/* nr of bits of precision */
86 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
87 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
88 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
89 #define EXP_5		2014		/* 1/exp(5sec/5min) */
90 #define EXP_15		2037		/* 1/exp(5sec/15min) */
91 
92 #define CALC_LOAD(load,exp,n) \
93 	load *= exp; \
94 	load += n*(FIXED_1-exp); \
95 	load >>= FSHIFT;
96 
97 extern unsigned long total_forks;
98 extern int nr_threads;
99 DECLARE_PER_CPU(unsigned long, process_counts);
100 extern int nr_processes(void);
101 extern unsigned long nr_running(void);
102 extern unsigned long nr_iowait(void);
103 extern unsigned long nr_iowait_cpu(int cpu);
104 extern unsigned long this_cpu_load(void);
105 
106 
107 extern void calc_global_load(unsigned long ticks);
108 extern void update_cpu_load_nohz(void);
109 
110 /* Notifier for when a task gets migrated to a new CPU */
111 struct task_migration_notifier {
112 	struct task_struct *task;
113 	int from_cpu;
114 	int to_cpu;
115 };
116 extern void register_task_migration_notifier(struct notifier_block *n);
117 
118 extern unsigned long get_parent_ip(unsigned long addr);
119 
120 extern void dump_cpu_task(int cpu);
121 
122 struct seq_file;
123 struct cfs_rq;
124 struct task_group;
125 #ifdef CONFIG_SCHED_DEBUG
126 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127 extern void proc_sched_set_task(struct task_struct *p);
128 extern void
129 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130 #else
131 static inline void
132 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
133 {
134 }
135 static inline void proc_sched_set_task(struct task_struct *p)
136 {
137 }
138 static inline void
139 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140 {
141 }
142 #endif
143 
144 /*
145  * Task state bitmask. NOTE! These bits are also
146  * encoded in fs/proc/array.c: get_task_state().
147  *
148  * We have two separate sets of flags: task->state
149  * is about runnability, while task->exit_state are
150  * about the task exiting. Confusing, but this way
151  * modifying one set can't modify the other one by
152  * mistake.
153  */
154 #define TASK_RUNNING		0
155 #define TASK_INTERRUPTIBLE	1
156 #define TASK_UNINTERRUPTIBLE	2
157 #define __TASK_STOPPED		4
158 #define __TASK_TRACED		8
159 /* in tsk->exit_state */
160 #define EXIT_ZOMBIE		16
161 #define EXIT_DEAD		32
162 /* in tsk->state again */
163 #define TASK_DEAD		64
164 #define TASK_WAKEKILL		128
165 #define TASK_WAKING		256
166 #define TASK_PARKED		512
167 #define TASK_STATE_MAX		1024
168 
169 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
170 
171 extern char ___assert_task_state[1 - 2*!!(
172 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
173 
174 /* Convenience macros for the sake of set_task_state */
175 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
176 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
177 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
178 
179 /* Convenience macros for the sake of wake_up */
180 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
181 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
182 
183 /* get_task_state() */
184 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
185 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
186 				 __TASK_TRACED)
187 
188 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
189 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
190 #define task_is_dead(task)	((task)->exit_state != 0)
191 #define task_is_stopped_or_traced(task)	\
192 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
193 #define task_contributes_to_load(task)	\
194 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
195 				 (task->flags & PF_FROZEN) == 0)
196 
197 #define __set_task_state(tsk, state_value)		\
198 	do { (tsk)->state = (state_value); } while (0)
199 #define set_task_state(tsk, state_value)		\
200 	set_mb((tsk)->state, (state_value))
201 
202 /*
203  * set_current_state() includes a barrier so that the write of current->state
204  * is correctly serialised wrt the caller's subsequent test of whether to
205  * actually sleep:
206  *
207  *	set_current_state(TASK_UNINTERRUPTIBLE);
208  *	if (do_i_need_to_sleep())
209  *		schedule();
210  *
211  * If the caller does not need such serialisation then use __set_current_state()
212  */
213 #define __set_current_state(state_value)			\
214 	do { current->state = (state_value); } while (0)
215 #define set_current_state(state_value)		\
216 	set_mb(current->state, (state_value))
217 
218 /* Task command name length */
219 #define TASK_COMM_LEN 16
220 
221 #include <linux/spinlock.h>
222 
223 /*
224  * This serializes "schedule()" and also protects
225  * the run-queue from deletions/modifications (but
226  * _adding_ to the beginning of the run-queue has
227  * a separate lock).
228  */
229 extern rwlock_t tasklist_lock;
230 extern spinlock_t mmlist_lock;
231 
232 struct task_struct;
233 
234 #ifdef CONFIG_PROVE_RCU
235 extern int lockdep_tasklist_lock_is_held(void);
236 #endif /* #ifdef CONFIG_PROVE_RCU */
237 
238 extern void sched_init(void);
239 extern void sched_init_smp(void);
240 extern asmlinkage void schedule_tail(struct task_struct *prev);
241 extern void init_idle(struct task_struct *idle, int cpu);
242 extern void init_idle_bootup_task(struct task_struct *idle);
243 
244 extern int runqueue_is_locked(int cpu);
245 
246 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
247 extern void nohz_balance_enter_idle(int cpu);
248 extern void set_cpu_sd_state_idle(void);
249 extern int get_nohz_timer_target(void);
250 #else
251 static inline void nohz_balance_enter_idle(int cpu) { }
252 static inline void set_cpu_sd_state_idle(void) { }
253 #endif
254 
255 /*
256  * Only dump TASK_* tasks. (0 for all tasks)
257  */
258 extern void show_state_filter(unsigned long state_filter);
259 
260 static inline void show_state(void)
261 {
262 	show_state_filter(0);
263 }
264 
265 extern void show_regs(struct pt_regs *);
266 
267 /*
268  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
269  * task), SP is the stack pointer of the first frame that should be shown in the back
270  * trace (or NULL if the entire call-chain of the task should be shown).
271  */
272 extern void show_stack(struct task_struct *task, unsigned long *sp);
273 
274 void io_schedule(void);
275 long io_schedule_timeout(long timeout);
276 
277 extern void cpu_init (void);
278 extern void trap_init(void);
279 extern void update_process_times(int user);
280 extern void scheduler_tick(void);
281 
282 extern void sched_show_task(struct task_struct *p);
283 
284 #ifdef CONFIG_LOCKUP_DETECTOR
285 extern void touch_softlockup_watchdog(void);
286 extern void touch_softlockup_watchdog_sync(void);
287 extern void touch_all_softlockup_watchdogs(void);
288 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
289 				  void __user *buffer,
290 				  size_t *lenp, loff_t *ppos);
291 extern unsigned int  softlockup_panic;
292 void lockup_detector_init(void);
293 #else
294 static inline void touch_softlockup_watchdog(void)
295 {
296 }
297 static inline void touch_softlockup_watchdog_sync(void)
298 {
299 }
300 static inline void touch_all_softlockup_watchdogs(void)
301 {
302 }
303 static inline void lockup_detector_init(void)
304 {
305 }
306 #endif
307 
308 /* Attach to any functions which should be ignored in wchan output. */
309 #define __sched		__attribute__((__section__(".sched.text")))
310 
311 /* Linker adds these: start and end of __sched functions */
312 extern char __sched_text_start[], __sched_text_end[];
313 
314 /* Is this address in the __sched functions? */
315 extern int in_sched_functions(unsigned long addr);
316 
317 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
318 extern signed long schedule_timeout(signed long timeout);
319 extern signed long schedule_timeout_interruptible(signed long timeout);
320 extern signed long schedule_timeout_killable(signed long timeout);
321 extern signed long schedule_timeout_uninterruptible(signed long timeout);
322 asmlinkage void schedule(void);
323 extern void schedule_preempt_disabled(void);
324 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
325 
326 struct nsproxy;
327 struct user_namespace;
328 
329 #include <linux/aio.h>
330 
331 #ifdef CONFIG_MMU
332 extern void arch_pick_mmap_layout(struct mm_struct *mm);
333 extern unsigned long
334 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
335 		       unsigned long, unsigned long);
336 extern unsigned long
337 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
338 			  unsigned long len, unsigned long pgoff,
339 			  unsigned long flags);
340 extern void arch_unmap_area(struct mm_struct *, unsigned long);
341 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
342 #else
343 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
344 #endif
345 
346 
347 extern void set_dumpable(struct mm_struct *mm, int value);
348 extern int get_dumpable(struct mm_struct *mm);
349 
350 /* mm flags */
351 /* dumpable bits */
352 #define MMF_DUMPABLE      0  /* core dump is permitted */
353 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
354 
355 #define MMF_DUMPABLE_BITS 2
356 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
357 
358 /* coredump filter bits */
359 #define MMF_DUMP_ANON_PRIVATE	2
360 #define MMF_DUMP_ANON_SHARED	3
361 #define MMF_DUMP_MAPPED_PRIVATE	4
362 #define MMF_DUMP_MAPPED_SHARED	5
363 #define MMF_DUMP_ELF_HEADERS	6
364 #define MMF_DUMP_HUGETLB_PRIVATE 7
365 #define MMF_DUMP_HUGETLB_SHARED  8
366 
367 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
368 #define MMF_DUMP_FILTER_BITS	7
369 #define MMF_DUMP_FILTER_MASK \
370 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
371 #define MMF_DUMP_FILTER_DEFAULT \
372 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
373 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
374 
375 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
376 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
377 #else
378 # define MMF_DUMP_MASK_DEFAULT_ELF	0
379 #endif
380 					/* leave room for more dump flags */
381 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
382 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
383 #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
384 
385 #define MMF_HAS_UPROBES		19	/* has uprobes */
386 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
387 
388 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
389 
390 struct sighand_struct {
391 	atomic_t		count;
392 	struct k_sigaction	action[_NSIG];
393 	spinlock_t		siglock;
394 	wait_queue_head_t	signalfd_wqh;
395 };
396 
397 struct pacct_struct {
398 	int			ac_flag;
399 	long			ac_exitcode;
400 	unsigned long		ac_mem;
401 	cputime_t		ac_utime, ac_stime;
402 	unsigned long		ac_minflt, ac_majflt;
403 };
404 
405 struct cpu_itimer {
406 	cputime_t expires;
407 	cputime_t incr;
408 	u32 error;
409 	u32 incr_error;
410 };
411 
412 /**
413  * struct cputime - snaphsot of system and user cputime
414  * @utime: time spent in user mode
415  * @stime: time spent in system mode
416  *
417  * Gathers a generic snapshot of user and system time.
418  */
419 struct cputime {
420 	cputime_t utime;
421 	cputime_t stime;
422 };
423 
424 /**
425  * struct task_cputime - collected CPU time counts
426  * @utime:		time spent in user mode, in &cputime_t units
427  * @stime:		time spent in kernel mode, in &cputime_t units
428  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
429  *
430  * This is an extension of struct cputime that includes the total runtime
431  * spent by the task from the scheduler point of view.
432  *
433  * As a result, this structure groups together three kinds of CPU time
434  * that are tracked for threads and thread groups.  Most things considering
435  * CPU time want to group these counts together and treat all three
436  * of them in parallel.
437  */
438 struct task_cputime {
439 	cputime_t utime;
440 	cputime_t stime;
441 	unsigned long long sum_exec_runtime;
442 };
443 /* Alternate field names when used to cache expirations. */
444 #define prof_exp	stime
445 #define virt_exp	utime
446 #define sched_exp	sum_exec_runtime
447 
448 #define INIT_CPUTIME	\
449 	(struct task_cputime) {					\
450 		.utime = 0,					\
451 		.stime = 0,					\
452 		.sum_exec_runtime = 0,				\
453 	}
454 
455 /*
456  * Disable preemption until the scheduler is running.
457  * Reset by start_kernel()->sched_init()->init_idle().
458  *
459  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
460  * before the scheduler is active -- see should_resched().
461  */
462 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
463 
464 /**
465  * struct thread_group_cputimer - thread group interval timer counts
466  * @cputime:		thread group interval timers.
467  * @running:		non-zero when there are timers running and
468  * 			@cputime receives updates.
469  * @lock:		lock for fields in this struct.
470  *
471  * This structure contains the version of task_cputime, above, that is
472  * used for thread group CPU timer calculations.
473  */
474 struct thread_group_cputimer {
475 	struct task_cputime cputime;
476 	int running;
477 	raw_spinlock_t lock;
478 };
479 
480 #include <linux/rwsem.h>
481 struct autogroup;
482 
483 /*
484  * NOTE! "signal_struct" does not have its own
485  * locking, because a shared signal_struct always
486  * implies a shared sighand_struct, so locking
487  * sighand_struct is always a proper superset of
488  * the locking of signal_struct.
489  */
490 struct signal_struct {
491 	atomic_t		sigcnt;
492 	atomic_t		live;
493 	int			nr_threads;
494 
495 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
496 
497 	/* current thread group signal load-balancing target: */
498 	struct task_struct	*curr_target;
499 
500 	/* shared signal handling: */
501 	struct sigpending	shared_pending;
502 
503 	/* thread group exit support */
504 	int			group_exit_code;
505 	/* overloaded:
506 	 * - notify group_exit_task when ->count is equal to notify_count
507 	 * - everyone except group_exit_task is stopped during signal delivery
508 	 *   of fatal signals, group_exit_task processes the signal.
509 	 */
510 	int			notify_count;
511 	struct task_struct	*group_exit_task;
512 
513 	/* thread group stop support, overloads group_exit_code too */
514 	int			group_stop_count;
515 	unsigned int		flags; /* see SIGNAL_* flags below */
516 
517 	/*
518 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
519 	 * manager, to re-parent orphan (double-forking) child processes
520 	 * to this process instead of 'init'. The service manager is
521 	 * able to receive SIGCHLD signals and is able to investigate
522 	 * the process until it calls wait(). All children of this
523 	 * process will inherit a flag if they should look for a
524 	 * child_subreaper process at exit.
525 	 */
526 	unsigned int		is_child_subreaper:1;
527 	unsigned int		has_child_subreaper:1;
528 
529 	/* POSIX.1b Interval Timers */
530 	struct list_head posix_timers;
531 
532 	/* ITIMER_REAL timer for the process */
533 	struct hrtimer real_timer;
534 	struct pid *leader_pid;
535 	ktime_t it_real_incr;
536 
537 	/*
538 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
539 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
540 	 * values are defined to 0 and 1 respectively
541 	 */
542 	struct cpu_itimer it[2];
543 
544 	/*
545 	 * Thread group totals for process CPU timers.
546 	 * See thread_group_cputimer(), et al, for details.
547 	 */
548 	struct thread_group_cputimer cputimer;
549 
550 	/* Earliest-expiration cache. */
551 	struct task_cputime cputime_expires;
552 
553 	struct list_head cpu_timers[3];
554 
555 	struct pid *tty_old_pgrp;
556 
557 	/* boolean value for session group leader */
558 	int leader;
559 
560 	struct tty_struct *tty; /* NULL if no tty */
561 
562 #ifdef CONFIG_SCHED_AUTOGROUP
563 	struct autogroup *autogroup;
564 #endif
565 	/*
566 	 * Cumulative resource counters for dead threads in the group,
567 	 * and for reaped dead child processes forked by this group.
568 	 * Live threads maintain their own counters and add to these
569 	 * in __exit_signal, except for the group leader.
570 	 */
571 	cputime_t utime, stime, cutime, cstime;
572 	cputime_t gtime;
573 	cputime_t cgtime;
574 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
575 	struct cputime prev_cputime;
576 #endif
577 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
578 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
579 	unsigned long inblock, oublock, cinblock, coublock;
580 	unsigned long maxrss, cmaxrss;
581 	struct task_io_accounting ioac;
582 
583 	/*
584 	 * Cumulative ns of schedule CPU time fo dead threads in the
585 	 * group, not including a zombie group leader, (This only differs
586 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
587 	 * other than jiffies.)
588 	 */
589 	unsigned long long sum_sched_runtime;
590 
591 	/*
592 	 * We don't bother to synchronize most readers of this at all,
593 	 * because there is no reader checking a limit that actually needs
594 	 * to get both rlim_cur and rlim_max atomically, and either one
595 	 * alone is a single word that can safely be read normally.
596 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
597 	 * protect this instead of the siglock, because they really
598 	 * have no need to disable irqs.
599 	 */
600 	struct rlimit rlim[RLIM_NLIMITS];
601 
602 #ifdef CONFIG_BSD_PROCESS_ACCT
603 	struct pacct_struct pacct;	/* per-process accounting information */
604 #endif
605 #ifdef CONFIG_TASKSTATS
606 	struct taskstats *stats;
607 #endif
608 #ifdef CONFIG_AUDIT
609 	unsigned audit_tty;
610 	struct tty_audit_buf *tty_audit_buf;
611 #endif
612 #ifdef CONFIG_CGROUPS
613 	/*
614 	 * group_rwsem prevents new tasks from entering the threadgroup and
615 	 * member tasks from exiting,a more specifically, setting of
616 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
617 	 * using threadgroup_change_begin/end().  Users which require
618 	 * threadgroup to remain stable should use threadgroup_[un]lock()
619 	 * which also takes care of exec path.  Currently, cgroup is the
620 	 * only user.
621 	 */
622 	struct rw_semaphore group_rwsem;
623 #endif
624 
625 	oom_flags_t oom_flags;
626 	short oom_score_adj;		/* OOM kill score adjustment */
627 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
628 					 * Only settable by CAP_SYS_RESOURCE. */
629 
630 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
631 					 * credential calculations
632 					 * (notably. ptrace) */
633 };
634 
635 /*
636  * Bits in flags field of signal_struct.
637  */
638 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
639 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
640 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
641 /*
642  * Pending notifications to parent.
643  */
644 #define SIGNAL_CLD_STOPPED	0x00000010
645 #define SIGNAL_CLD_CONTINUED	0x00000020
646 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
647 
648 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
649 
650 /* If true, all threads except ->group_exit_task have pending SIGKILL */
651 static inline int signal_group_exit(const struct signal_struct *sig)
652 {
653 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
654 		(sig->group_exit_task != NULL);
655 }
656 
657 /*
658  * Some day this will be a full-fledged user tracking system..
659  */
660 struct user_struct {
661 	atomic_t __count;	/* reference count */
662 	atomic_t processes;	/* How many processes does this user have? */
663 	atomic_t files;		/* How many open files does this user have? */
664 	atomic_t sigpending;	/* How many pending signals does this user have? */
665 #ifdef CONFIG_INOTIFY_USER
666 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
667 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
668 #endif
669 #ifdef CONFIG_FANOTIFY
670 	atomic_t fanotify_listeners;
671 #endif
672 #ifdef CONFIG_EPOLL
673 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
674 #endif
675 #ifdef CONFIG_POSIX_MQUEUE
676 	/* protected by mq_lock	*/
677 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
678 #endif
679 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
680 
681 #ifdef CONFIG_KEYS
682 	struct key *uid_keyring;	/* UID specific keyring */
683 	struct key *session_keyring;	/* UID's default session keyring */
684 #endif
685 
686 	/* Hash table maintenance information */
687 	struct hlist_node uidhash_node;
688 	kuid_t uid;
689 
690 #ifdef CONFIG_PERF_EVENTS
691 	atomic_long_t locked_vm;
692 #endif
693 };
694 
695 extern int uids_sysfs_init(void);
696 
697 extern struct user_struct *find_user(kuid_t);
698 
699 extern struct user_struct root_user;
700 #define INIT_USER (&root_user)
701 
702 
703 struct backing_dev_info;
704 struct reclaim_state;
705 
706 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
707 struct sched_info {
708 	/* cumulative counters */
709 	unsigned long pcount;	      /* # of times run on this cpu */
710 	unsigned long long run_delay; /* time spent waiting on a runqueue */
711 
712 	/* timestamps */
713 	unsigned long long last_arrival,/* when we last ran on a cpu */
714 			   last_queued;	/* when we were last queued to run */
715 };
716 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
717 
718 #ifdef CONFIG_TASK_DELAY_ACCT
719 struct task_delay_info {
720 	spinlock_t	lock;
721 	unsigned int	flags;	/* Private per-task flags */
722 
723 	/* For each stat XXX, add following, aligned appropriately
724 	 *
725 	 * struct timespec XXX_start, XXX_end;
726 	 * u64 XXX_delay;
727 	 * u32 XXX_count;
728 	 *
729 	 * Atomicity of updates to XXX_delay, XXX_count protected by
730 	 * single lock above (split into XXX_lock if contention is an issue).
731 	 */
732 
733 	/*
734 	 * XXX_count is incremented on every XXX operation, the delay
735 	 * associated with the operation is added to XXX_delay.
736 	 * XXX_delay contains the accumulated delay time in nanoseconds.
737 	 */
738 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
739 	u64 blkio_delay;	/* wait for sync block io completion */
740 	u64 swapin_delay;	/* wait for swapin block io completion */
741 	u32 blkio_count;	/* total count of the number of sync block */
742 				/* io operations performed */
743 	u32 swapin_count;	/* total count of the number of swapin block */
744 				/* io operations performed */
745 
746 	struct timespec freepages_start, freepages_end;
747 	u64 freepages_delay;	/* wait for memory reclaim */
748 	u32 freepages_count;	/* total count of memory reclaim */
749 };
750 #endif	/* CONFIG_TASK_DELAY_ACCT */
751 
752 static inline int sched_info_on(void)
753 {
754 #ifdef CONFIG_SCHEDSTATS
755 	return 1;
756 #elif defined(CONFIG_TASK_DELAY_ACCT)
757 	extern int delayacct_on;
758 	return delayacct_on;
759 #else
760 	return 0;
761 #endif
762 }
763 
764 enum cpu_idle_type {
765 	CPU_IDLE,
766 	CPU_NOT_IDLE,
767 	CPU_NEWLY_IDLE,
768 	CPU_MAX_IDLE_TYPES
769 };
770 
771 /*
772  * Increase resolution of nice-level calculations for 64-bit architectures.
773  * The extra resolution improves shares distribution and load balancing of
774  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
775  * hierarchies, especially on larger systems. This is not a user-visible change
776  * and does not change the user-interface for setting shares/weights.
777  *
778  * We increase resolution only if we have enough bits to allow this increased
779  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
780  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
781  * increased costs.
782  */
783 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
784 # define SCHED_LOAD_RESOLUTION	10
785 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
786 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
787 #else
788 # define SCHED_LOAD_RESOLUTION	0
789 # define scale_load(w)		(w)
790 # define scale_load_down(w)	(w)
791 #endif
792 
793 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
794 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
795 
796 /*
797  * Increase resolution of cpu_power calculations
798  */
799 #define SCHED_POWER_SHIFT	10
800 #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
801 
802 /*
803  * sched-domains (multiprocessor balancing) declarations:
804  */
805 #ifdef CONFIG_SMP
806 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
807 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
808 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
809 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
810 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
811 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
812 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
813 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
814 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
815 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
816 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
817 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
818 
819 extern int __weak arch_sd_sibiling_asym_packing(void);
820 
821 struct sched_group_power {
822 	atomic_t ref;
823 	/*
824 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
825 	 * single CPU.
826 	 */
827 	unsigned int power, power_orig;
828 	unsigned long next_update;
829 	/*
830 	 * Number of busy cpus in this group.
831 	 */
832 	atomic_t nr_busy_cpus;
833 
834 	unsigned long cpumask[0]; /* iteration mask */
835 };
836 
837 struct sched_group {
838 	struct sched_group *next;	/* Must be a circular list */
839 	atomic_t ref;
840 
841 	unsigned int group_weight;
842 	struct sched_group_power *sgp;
843 
844 	/*
845 	 * The CPUs this group covers.
846 	 *
847 	 * NOTE: this field is variable length. (Allocated dynamically
848 	 * by attaching extra space to the end of the structure,
849 	 * depending on how many CPUs the kernel has booted up with)
850 	 */
851 	unsigned long cpumask[0];
852 };
853 
854 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
855 {
856 	return to_cpumask(sg->cpumask);
857 }
858 
859 /*
860  * cpumask masking which cpus in the group are allowed to iterate up the domain
861  * tree.
862  */
863 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
864 {
865 	return to_cpumask(sg->sgp->cpumask);
866 }
867 
868 /**
869  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
870  * @group: The group whose first cpu is to be returned.
871  */
872 static inline unsigned int group_first_cpu(struct sched_group *group)
873 {
874 	return cpumask_first(sched_group_cpus(group));
875 }
876 
877 struct sched_domain_attr {
878 	int relax_domain_level;
879 };
880 
881 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
882 	.relax_domain_level = -1,			\
883 }
884 
885 extern int sched_domain_level_max;
886 
887 struct sched_domain {
888 	/* These fields must be setup */
889 	struct sched_domain *parent;	/* top domain must be null terminated */
890 	struct sched_domain *child;	/* bottom domain must be null terminated */
891 	struct sched_group *groups;	/* the balancing groups of the domain */
892 	unsigned long min_interval;	/* Minimum balance interval ms */
893 	unsigned long max_interval;	/* Maximum balance interval ms */
894 	unsigned int busy_factor;	/* less balancing by factor if busy */
895 	unsigned int imbalance_pct;	/* No balance until over watermark */
896 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
897 	unsigned int busy_idx;
898 	unsigned int idle_idx;
899 	unsigned int newidle_idx;
900 	unsigned int wake_idx;
901 	unsigned int forkexec_idx;
902 	unsigned int smt_gain;
903 	int flags;			/* See SD_* */
904 	int level;
905 
906 	/* Runtime fields. */
907 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
908 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
909 	unsigned int nr_balance_failed; /* initialise to 0 */
910 
911 	u64 last_update;
912 
913 #ifdef CONFIG_SCHEDSTATS
914 	/* load_balance() stats */
915 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
916 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
917 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
918 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
919 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
920 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
921 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
922 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
923 
924 	/* Active load balancing */
925 	unsigned int alb_count;
926 	unsigned int alb_failed;
927 	unsigned int alb_pushed;
928 
929 	/* SD_BALANCE_EXEC stats */
930 	unsigned int sbe_count;
931 	unsigned int sbe_balanced;
932 	unsigned int sbe_pushed;
933 
934 	/* SD_BALANCE_FORK stats */
935 	unsigned int sbf_count;
936 	unsigned int sbf_balanced;
937 	unsigned int sbf_pushed;
938 
939 	/* try_to_wake_up() stats */
940 	unsigned int ttwu_wake_remote;
941 	unsigned int ttwu_move_affine;
942 	unsigned int ttwu_move_balance;
943 #endif
944 #ifdef CONFIG_SCHED_DEBUG
945 	char *name;
946 #endif
947 	union {
948 		void *private;		/* used during construction */
949 		struct rcu_head rcu;	/* used during destruction */
950 	};
951 
952 	unsigned int span_weight;
953 	/*
954 	 * Span of all CPUs in this domain.
955 	 *
956 	 * NOTE: this field is variable length. (Allocated dynamically
957 	 * by attaching extra space to the end of the structure,
958 	 * depending on how many CPUs the kernel has booted up with)
959 	 */
960 	unsigned long span[0];
961 };
962 
963 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
964 {
965 	return to_cpumask(sd->span);
966 }
967 
968 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
969 				    struct sched_domain_attr *dattr_new);
970 
971 /* Allocate an array of sched domains, for partition_sched_domains(). */
972 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
973 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
974 
975 /* Test a flag in parent sched domain */
976 static inline int test_sd_parent(struct sched_domain *sd, int flag)
977 {
978 	if (sd->parent && (sd->parent->flags & flag))
979 		return 1;
980 
981 	return 0;
982 }
983 
984 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
985 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
986 
987 bool cpus_share_cache(int this_cpu, int that_cpu);
988 
989 #else /* CONFIG_SMP */
990 
991 struct sched_domain_attr;
992 
993 static inline void
994 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
995 			struct sched_domain_attr *dattr_new)
996 {
997 }
998 
999 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1000 {
1001 	return true;
1002 }
1003 
1004 #endif	/* !CONFIG_SMP */
1005 
1006 
1007 struct io_context;			/* See blkdev.h */
1008 
1009 
1010 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1011 extern void prefetch_stack(struct task_struct *t);
1012 #else
1013 static inline void prefetch_stack(struct task_struct *t) { }
1014 #endif
1015 
1016 struct audit_context;		/* See audit.c */
1017 struct mempolicy;
1018 struct pipe_inode_info;
1019 struct uts_namespace;
1020 
1021 struct rq;
1022 struct sched_domain;
1023 
1024 /*
1025  * wake flags
1026  */
1027 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1028 #define WF_FORK		0x02		/* child wakeup after fork */
1029 #define WF_MIGRATED	0x04		/* internal use, task got migrated */
1030 
1031 #define ENQUEUE_WAKEUP		1
1032 #define ENQUEUE_HEAD		2
1033 #ifdef CONFIG_SMP
1034 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1035 #else
1036 #define ENQUEUE_WAKING		0
1037 #endif
1038 
1039 #define DEQUEUE_SLEEP		1
1040 
1041 struct sched_class {
1042 	const struct sched_class *next;
1043 
1044 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1045 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1046 	void (*yield_task) (struct rq *rq);
1047 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1048 
1049 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1050 
1051 	struct task_struct * (*pick_next_task) (struct rq *rq);
1052 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1053 
1054 #ifdef CONFIG_SMP
1055 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1056 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1057 
1058 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1059 	void (*post_schedule) (struct rq *this_rq);
1060 	void (*task_waking) (struct task_struct *task);
1061 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1062 
1063 	void (*set_cpus_allowed)(struct task_struct *p,
1064 				 const struct cpumask *newmask);
1065 
1066 	void (*rq_online)(struct rq *rq);
1067 	void (*rq_offline)(struct rq *rq);
1068 #endif
1069 
1070 	void (*set_curr_task) (struct rq *rq);
1071 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1072 	void (*task_fork) (struct task_struct *p);
1073 
1074 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1075 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1076 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1077 			     int oldprio);
1078 
1079 	unsigned int (*get_rr_interval) (struct rq *rq,
1080 					 struct task_struct *task);
1081 
1082 #ifdef CONFIG_FAIR_GROUP_SCHED
1083 	void (*task_move_group) (struct task_struct *p, int on_rq);
1084 #endif
1085 };
1086 
1087 struct load_weight {
1088 	unsigned long weight, inv_weight;
1089 };
1090 
1091 struct sched_avg {
1092 	/*
1093 	 * These sums represent an infinite geometric series and so are bound
1094 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for for all
1095 	 * choices of y < 1-2^(-32)*1024.
1096 	 */
1097 	u32 runnable_avg_sum, runnable_avg_period;
1098 	u64 last_runnable_update;
1099 	s64 decay_count;
1100 	unsigned long load_avg_contrib;
1101 };
1102 
1103 #ifdef CONFIG_SCHEDSTATS
1104 struct sched_statistics {
1105 	u64			wait_start;
1106 	u64			wait_max;
1107 	u64			wait_count;
1108 	u64			wait_sum;
1109 	u64			iowait_count;
1110 	u64			iowait_sum;
1111 
1112 	u64			sleep_start;
1113 	u64			sleep_max;
1114 	s64			sum_sleep_runtime;
1115 
1116 	u64			block_start;
1117 	u64			block_max;
1118 	u64			exec_max;
1119 	u64			slice_max;
1120 
1121 	u64			nr_migrations_cold;
1122 	u64			nr_failed_migrations_affine;
1123 	u64			nr_failed_migrations_running;
1124 	u64			nr_failed_migrations_hot;
1125 	u64			nr_forced_migrations;
1126 
1127 	u64			nr_wakeups;
1128 	u64			nr_wakeups_sync;
1129 	u64			nr_wakeups_migrate;
1130 	u64			nr_wakeups_local;
1131 	u64			nr_wakeups_remote;
1132 	u64			nr_wakeups_affine;
1133 	u64			nr_wakeups_affine_attempts;
1134 	u64			nr_wakeups_passive;
1135 	u64			nr_wakeups_idle;
1136 };
1137 #endif
1138 
1139 struct sched_entity {
1140 	struct load_weight	load;		/* for load-balancing */
1141 	struct rb_node		run_node;
1142 	struct list_head	group_node;
1143 	unsigned int		on_rq;
1144 
1145 	u64			exec_start;
1146 	u64			sum_exec_runtime;
1147 	u64			vruntime;
1148 	u64			prev_sum_exec_runtime;
1149 
1150 	u64			nr_migrations;
1151 
1152 #ifdef CONFIG_SCHEDSTATS
1153 	struct sched_statistics statistics;
1154 #endif
1155 
1156 #ifdef CONFIG_FAIR_GROUP_SCHED
1157 	struct sched_entity	*parent;
1158 	/* rq on which this entity is (to be) queued: */
1159 	struct cfs_rq		*cfs_rq;
1160 	/* rq "owned" by this entity/group: */
1161 	struct cfs_rq		*my_q;
1162 #endif
1163 
1164 /*
1165  * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1166  * removed when useful for applications beyond shares distribution (e.g.
1167  * load-balance).
1168  */
1169 #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1170 	/* Per-entity load-tracking */
1171 	struct sched_avg	avg;
1172 #endif
1173 };
1174 
1175 struct sched_rt_entity {
1176 	struct list_head run_list;
1177 	unsigned long timeout;
1178 	unsigned long watchdog_stamp;
1179 	unsigned int time_slice;
1180 
1181 	struct sched_rt_entity *back;
1182 #ifdef CONFIG_RT_GROUP_SCHED
1183 	struct sched_rt_entity	*parent;
1184 	/* rq on which this entity is (to be) queued: */
1185 	struct rt_rq		*rt_rq;
1186 	/* rq "owned" by this entity/group: */
1187 	struct rt_rq		*my_q;
1188 #endif
1189 };
1190 
1191 
1192 struct rcu_node;
1193 
1194 enum perf_event_task_context {
1195 	perf_invalid_context = -1,
1196 	perf_hw_context = 0,
1197 	perf_sw_context,
1198 	perf_nr_task_contexts,
1199 };
1200 
1201 struct task_struct {
1202 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1203 	void *stack;
1204 	atomic_t usage;
1205 	unsigned int flags;	/* per process flags, defined below */
1206 	unsigned int ptrace;
1207 
1208 #ifdef CONFIG_SMP
1209 	struct llist_node wake_entry;
1210 	int on_cpu;
1211 #endif
1212 	int on_rq;
1213 
1214 	int prio, static_prio, normal_prio;
1215 	unsigned int rt_priority;
1216 	const struct sched_class *sched_class;
1217 	struct sched_entity se;
1218 	struct sched_rt_entity rt;
1219 #ifdef CONFIG_CGROUP_SCHED
1220 	struct task_group *sched_task_group;
1221 #endif
1222 
1223 #ifdef CONFIG_PREEMPT_NOTIFIERS
1224 	/* list of struct preempt_notifier: */
1225 	struct hlist_head preempt_notifiers;
1226 #endif
1227 
1228 	/*
1229 	 * fpu_counter contains the number of consecutive context switches
1230 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1231 	 * saving becomes unlazy to save the trap. This is an unsigned char
1232 	 * so that after 256 times the counter wraps and the behavior turns
1233 	 * lazy again; this to deal with bursty apps that only use FPU for
1234 	 * a short time
1235 	 */
1236 	unsigned char fpu_counter;
1237 #ifdef CONFIG_BLK_DEV_IO_TRACE
1238 	unsigned int btrace_seq;
1239 #endif
1240 
1241 	unsigned int policy;
1242 	int nr_cpus_allowed;
1243 	cpumask_t cpus_allowed;
1244 
1245 #ifdef CONFIG_PREEMPT_RCU
1246 	int rcu_read_lock_nesting;
1247 	char rcu_read_unlock_special;
1248 	struct list_head rcu_node_entry;
1249 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1250 #ifdef CONFIG_TREE_PREEMPT_RCU
1251 	struct rcu_node *rcu_blocked_node;
1252 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1253 #ifdef CONFIG_RCU_BOOST
1254 	struct rt_mutex *rcu_boost_mutex;
1255 #endif /* #ifdef CONFIG_RCU_BOOST */
1256 
1257 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1258 	struct sched_info sched_info;
1259 #endif
1260 
1261 	struct list_head tasks;
1262 #ifdef CONFIG_SMP
1263 	struct plist_node pushable_tasks;
1264 #endif
1265 
1266 	struct mm_struct *mm, *active_mm;
1267 #ifdef CONFIG_COMPAT_BRK
1268 	unsigned brk_randomized:1;
1269 #endif
1270 #if defined(SPLIT_RSS_COUNTING)
1271 	struct task_rss_stat	rss_stat;
1272 #endif
1273 /* task state */
1274 	int exit_state;
1275 	int exit_code, exit_signal;
1276 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1277 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1278 	/* ??? */
1279 	unsigned int personality;
1280 	unsigned did_exec:1;
1281 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1282 				 * execve */
1283 	unsigned in_iowait:1;
1284 
1285 	/* task may not gain privileges */
1286 	unsigned no_new_privs:1;
1287 
1288 	/* Revert to default priority/policy when forking */
1289 	unsigned sched_reset_on_fork:1;
1290 	unsigned sched_contributes_to_load:1;
1291 
1292 	pid_t pid;
1293 	pid_t tgid;
1294 
1295 #ifdef CONFIG_CC_STACKPROTECTOR
1296 	/* Canary value for the -fstack-protector gcc feature */
1297 	unsigned long stack_canary;
1298 #endif
1299 	/*
1300 	 * pointers to (original) parent process, youngest child, younger sibling,
1301 	 * older sibling, respectively.  (p->father can be replaced with
1302 	 * p->real_parent->pid)
1303 	 */
1304 	struct task_struct __rcu *real_parent; /* real parent process */
1305 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1306 	/*
1307 	 * children/sibling forms the list of my natural children
1308 	 */
1309 	struct list_head children;	/* list of my children */
1310 	struct list_head sibling;	/* linkage in my parent's children list */
1311 	struct task_struct *group_leader;	/* threadgroup leader */
1312 
1313 	/*
1314 	 * ptraced is the list of tasks this task is using ptrace on.
1315 	 * This includes both natural children and PTRACE_ATTACH targets.
1316 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1317 	 */
1318 	struct list_head ptraced;
1319 	struct list_head ptrace_entry;
1320 
1321 	/* PID/PID hash table linkage. */
1322 	struct pid_link pids[PIDTYPE_MAX];
1323 	struct list_head thread_group;
1324 
1325 	struct completion *vfork_done;		/* for vfork() */
1326 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1327 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1328 
1329 	cputime_t utime, stime, utimescaled, stimescaled;
1330 	cputime_t gtime;
1331 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1332 	struct cputime prev_cputime;
1333 #endif
1334 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1335 	seqlock_t vtime_seqlock;
1336 	unsigned long long vtime_snap;
1337 	enum {
1338 		VTIME_SLEEPING = 0,
1339 		VTIME_USER,
1340 		VTIME_SYS,
1341 	} vtime_snap_whence;
1342 #endif
1343 	unsigned long nvcsw, nivcsw; /* context switch counts */
1344 	struct timespec start_time; 		/* monotonic time */
1345 	struct timespec real_start_time;	/* boot based time */
1346 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1347 	unsigned long min_flt, maj_flt;
1348 
1349 	struct task_cputime cputime_expires;
1350 	struct list_head cpu_timers[3];
1351 
1352 /* process credentials */
1353 	const struct cred __rcu *real_cred; /* objective and real subjective task
1354 					 * credentials (COW) */
1355 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1356 					 * credentials (COW) */
1357 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1358 				     - access with [gs]et_task_comm (which lock
1359 				       it with task_lock())
1360 				     - initialized normally by setup_new_exec */
1361 /* file system info */
1362 	int link_count, total_link_count;
1363 #ifdef CONFIG_SYSVIPC
1364 /* ipc stuff */
1365 	struct sysv_sem sysvsem;
1366 #endif
1367 #ifdef CONFIG_DETECT_HUNG_TASK
1368 /* hung task detection */
1369 	unsigned long last_switch_count;
1370 #endif
1371 /* CPU-specific state of this task */
1372 	struct thread_struct thread;
1373 /* filesystem information */
1374 	struct fs_struct *fs;
1375 /* open file information */
1376 	struct files_struct *files;
1377 /* namespaces */
1378 	struct nsproxy *nsproxy;
1379 /* signal handlers */
1380 	struct signal_struct *signal;
1381 	struct sighand_struct *sighand;
1382 
1383 	sigset_t blocked, real_blocked;
1384 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1385 	struct sigpending pending;
1386 
1387 	unsigned long sas_ss_sp;
1388 	size_t sas_ss_size;
1389 	int (*notifier)(void *priv);
1390 	void *notifier_data;
1391 	sigset_t *notifier_mask;
1392 	struct callback_head *task_works;
1393 
1394 	struct audit_context *audit_context;
1395 #ifdef CONFIG_AUDITSYSCALL
1396 	kuid_t loginuid;
1397 	unsigned int sessionid;
1398 #endif
1399 	struct seccomp seccomp;
1400 
1401 /* Thread group tracking */
1402    	u32 parent_exec_id;
1403    	u32 self_exec_id;
1404 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1405  * mempolicy */
1406 	spinlock_t alloc_lock;
1407 
1408 	/* Protection of the PI data structures: */
1409 	raw_spinlock_t pi_lock;
1410 
1411 #ifdef CONFIG_RT_MUTEXES
1412 	/* PI waiters blocked on a rt_mutex held by this task */
1413 	struct plist_head pi_waiters;
1414 	/* Deadlock detection and priority inheritance handling */
1415 	struct rt_mutex_waiter *pi_blocked_on;
1416 #endif
1417 
1418 #ifdef CONFIG_DEBUG_MUTEXES
1419 	/* mutex deadlock detection */
1420 	struct mutex_waiter *blocked_on;
1421 #endif
1422 #ifdef CONFIG_TRACE_IRQFLAGS
1423 	unsigned int irq_events;
1424 	unsigned long hardirq_enable_ip;
1425 	unsigned long hardirq_disable_ip;
1426 	unsigned int hardirq_enable_event;
1427 	unsigned int hardirq_disable_event;
1428 	int hardirqs_enabled;
1429 	int hardirq_context;
1430 	unsigned long softirq_disable_ip;
1431 	unsigned long softirq_enable_ip;
1432 	unsigned int softirq_disable_event;
1433 	unsigned int softirq_enable_event;
1434 	int softirqs_enabled;
1435 	int softirq_context;
1436 #endif
1437 #ifdef CONFIG_LOCKDEP
1438 # define MAX_LOCK_DEPTH 48UL
1439 	u64 curr_chain_key;
1440 	int lockdep_depth;
1441 	unsigned int lockdep_recursion;
1442 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1443 	gfp_t lockdep_reclaim_gfp;
1444 #endif
1445 
1446 /* journalling filesystem info */
1447 	void *journal_info;
1448 
1449 /* stacked block device info */
1450 	struct bio_list *bio_list;
1451 
1452 #ifdef CONFIG_BLOCK
1453 /* stack plugging */
1454 	struct blk_plug *plug;
1455 #endif
1456 
1457 /* VM state */
1458 	struct reclaim_state *reclaim_state;
1459 
1460 	struct backing_dev_info *backing_dev_info;
1461 
1462 	struct io_context *io_context;
1463 
1464 	unsigned long ptrace_message;
1465 	siginfo_t *last_siginfo; /* For ptrace use.  */
1466 	struct task_io_accounting ioac;
1467 #if defined(CONFIG_TASK_XACCT)
1468 	u64 acct_rss_mem1;	/* accumulated rss usage */
1469 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1470 	cputime_t acct_timexpd;	/* stime + utime since last update */
1471 #endif
1472 #ifdef CONFIG_CPUSETS
1473 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1474 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1475 	int cpuset_mem_spread_rotor;
1476 	int cpuset_slab_spread_rotor;
1477 #endif
1478 #ifdef CONFIG_CGROUPS
1479 	/* Control Group info protected by css_set_lock */
1480 	struct css_set __rcu *cgroups;
1481 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1482 	struct list_head cg_list;
1483 #endif
1484 #ifdef CONFIG_FUTEX
1485 	struct robust_list_head __user *robust_list;
1486 #ifdef CONFIG_COMPAT
1487 	struct compat_robust_list_head __user *compat_robust_list;
1488 #endif
1489 	struct list_head pi_state_list;
1490 	struct futex_pi_state *pi_state_cache;
1491 #endif
1492 #ifdef CONFIG_PERF_EVENTS
1493 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1494 	struct mutex perf_event_mutex;
1495 	struct list_head perf_event_list;
1496 #endif
1497 #ifdef CONFIG_NUMA
1498 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1499 	short il_next;
1500 	short pref_node_fork;
1501 #endif
1502 #ifdef CONFIG_NUMA_BALANCING
1503 	int numa_scan_seq;
1504 	int numa_migrate_seq;
1505 	unsigned int numa_scan_period;
1506 	u64 node_stamp;			/* migration stamp  */
1507 	struct callback_head numa_work;
1508 #endif /* CONFIG_NUMA_BALANCING */
1509 
1510 	struct rcu_head rcu;
1511 
1512 	/*
1513 	 * cache last used pipe for splice
1514 	 */
1515 	struct pipe_inode_info *splice_pipe;
1516 
1517 	struct page_frag task_frag;
1518 
1519 #ifdef	CONFIG_TASK_DELAY_ACCT
1520 	struct task_delay_info *delays;
1521 #endif
1522 #ifdef CONFIG_FAULT_INJECTION
1523 	int make_it_fail;
1524 #endif
1525 	/*
1526 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1527 	 * balance_dirty_pages() for some dirty throttling pause
1528 	 */
1529 	int nr_dirtied;
1530 	int nr_dirtied_pause;
1531 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1532 
1533 #ifdef CONFIG_LATENCYTOP
1534 	int latency_record_count;
1535 	struct latency_record latency_record[LT_SAVECOUNT];
1536 #endif
1537 	/*
1538 	 * time slack values; these are used to round up poll() and
1539 	 * select() etc timeout values. These are in nanoseconds.
1540 	 */
1541 	unsigned long timer_slack_ns;
1542 	unsigned long default_timer_slack_ns;
1543 
1544 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1545 	/* Index of current stored address in ret_stack */
1546 	int curr_ret_stack;
1547 	/* Stack of return addresses for return function tracing */
1548 	struct ftrace_ret_stack	*ret_stack;
1549 	/* time stamp for last schedule */
1550 	unsigned long long ftrace_timestamp;
1551 	/*
1552 	 * Number of functions that haven't been traced
1553 	 * because of depth overrun.
1554 	 */
1555 	atomic_t trace_overrun;
1556 	/* Pause for the tracing */
1557 	atomic_t tracing_graph_pause;
1558 #endif
1559 #ifdef CONFIG_TRACING
1560 	/* state flags for use by tracers */
1561 	unsigned long trace;
1562 	/* bitmask and counter of trace recursion */
1563 	unsigned long trace_recursion;
1564 #endif /* CONFIG_TRACING */
1565 #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1566 	struct memcg_batch_info {
1567 		int do_batch;	/* incremented when batch uncharge started */
1568 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1569 		unsigned long nr_pages;	/* uncharged usage */
1570 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1571 	} memcg_batch;
1572 	unsigned int memcg_kmem_skip_account;
1573 #endif
1574 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1575 	atomic_t ptrace_bp_refcnt;
1576 #endif
1577 #ifdef CONFIG_UPROBES
1578 	struct uprobe_task *utask;
1579 #endif
1580 };
1581 
1582 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1583 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1584 
1585 #ifdef CONFIG_NUMA_BALANCING
1586 extern void task_numa_fault(int node, int pages, bool migrated);
1587 extern void set_numabalancing_state(bool enabled);
1588 #else
1589 static inline void task_numa_fault(int node, int pages, bool migrated)
1590 {
1591 }
1592 static inline void set_numabalancing_state(bool enabled)
1593 {
1594 }
1595 #endif
1596 
1597 static inline struct pid *task_pid(struct task_struct *task)
1598 {
1599 	return task->pids[PIDTYPE_PID].pid;
1600 }
1601 
1602 static inline struct pid *task_tgid(struct task_struct *task)
1603 {
1604 	return task->group_leader->pids[PIDTYPE_PID].pid;
1605 }
1606 
1607 /*
1608  * Without tasklist or rcu lock it is not safe to dereference
1609  * the result of task_pgrp/task_session even if task == current,
1610  * we can race with another thread doing sys_setsid/sys_setpgid.
1611  */
1612 static inline struct pid *task_pgrp(struct task_struct *task)
1613 {
1614 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1615 }
1616 
1617 static inline struct pid *task_session(struct task_struct *task)
1618 {
1619 	return task->group_leader->pids[PIDTYPE_SID].pid;
1620 }
1621 
1622 struct pid_namespace;
1623 
1624 /*
1625  * the helpers to get the task's different pids as they are seen
1626  * from various namespaces
1627  *
1628  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1629  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1630  *                     current.
1631  * task_xid_nr_ns()  : id seen from the ns specified;
1632  *
1633  * set_task_vxid()   : assigns a virtual id to a task;
1634  *
1635  * see also pid_nr() etc in include/linux/pid.h
1636  */
1637 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1638 			struct pid_namespace *ns);
1639 
1640 static inline pid_t task_pid_nr(struct task_struct *tsk)
1641 {
1642 	return tsk->pid;
1643 }
1644 
1645 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1646 					struct pid_namespace *ns)
1647 {
1648 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1649 }
1650 
1651 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1652 {
1653 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1654 }
1655 
1656 
1657 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1658 {
1659 	return tsk->tgid;
1660 }
1661 
1662 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1663 
1664 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1665 {
1666 	return pid_vnr(task_tgid(tsk));
1667 }
1668 
1669 
1670 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1671 					struct pid_namespace *ns)
1672 {
1673 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1674 }
1675 
1676 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1677 {
1678 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1679 }
1680 
1681 
1682 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1683 					struct pid_namespace *ns)
1684 {
1685 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1686 }
1687 
1688 static inline pid_t task_session_vnr(struct task_struct *tsk)
1689 {
1690 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1691 }
1692 
1693 /* obsolete, do not use */
1694 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1695 {
1696 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1697 }
1698 
1699 /**
1700  * pid_alive - check that a task structure is not stale
1701  * @p: Task structure to be checked.
1702  *
1703  * Test if a process is not yet dead (at most zombie state)
1704  * If pid_alive fails, then pointers within the task structure
1705  * can be stale and must not be dereferenced.
1706  */
1707 static inline int pid_alive(struct task_struct *p)
1708 {
1709 	return p->pids[PIDTYPE_PID].pid != NULL;
1710 }
1711 
1712 /**
1713  * is_global_init - check if a task structure is init
1714  * @tsk: Task structure to be checked.
1715  *
1716  * Check if a task structure is the first user space task the kernel created.
1717  */
1718 static inline int is_global_init(struct task_struct *tsk)
1719 {
1720 	return tsk->pid == 1;
1721 }
1722 
1723 extern struct pid *cad_pid;
1724 
1725 extern void free_task(struct task_struct *tsk);
1726 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1727 
1728 extern void __put_task_struct(struct task_struct *t);
1729 
1730 static inline void put_task_struct(struct task_struct *t)
1731 {
1732 	if (atomic_dec_and_test(&t->usage))
1733 		__put_task_struct(t);
1734 }
1735 
1736 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1737 extern void task_cputime(struct task_struct *t,
1738 			 cputime_t *utime, cputime_t *stime);
1739 extern void task_cputime_scaled(struct task_struct *t,
1740 				cputime_t *utimescaled, cputime_t *stimescaled);
1741 extern cputime_t task_gtime(struct task_struct *t);
1742 #else
1743 static inline void task_cputime(struct task_struct *t,
1744 				cputime_t *utime, cputime_t *stime)
1745 {
1746 	if (utime)
1747 		*utime = t->utime;
1748 	if (stime)
1749 		*stime = t->stime;
1750 }
1751 
1752 static inline void task_cputime_scaled(struct task_struct *t,
1753 				       cputime_t *utimescaled,
1754 				       cputime_t *stimescaled)
1755 {
1756 	if (utimescaled)
1757 		*utimescaled = t->utimescaled;
1758 	if (stimescaled)
1759 		*stimescaled = t->stimescaled;
1760 }
1761 
1762 static inline cputime_t task_gtime(struct task_struct *t)
1763 {
1764 	return t->gtime;
1765 }
1766 #endif
1767 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1768 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1769 
1770 /*
1771  * Per process flags
1772  */
1773 #define PF_EXITING	0x00000004	/* getting shut down */
1774 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1775 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1776 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1777 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1778 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1779 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1780 #define PF_DUMPCORE	0x00000200	/* dumped core */
1781 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1782 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1783 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1784 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1785 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
1786 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1787 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1788 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1789 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1790 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
1791 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1792 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1793 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1794 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1795 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1796 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1797 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1798 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1799 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1800 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1801 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1802 
1803 /*
1804  * Only the _current_ task can read/write to tsk->flags, but other
1805  * tasks can access tsk->flags in readonly mode for example
1806  * with tsk_used_math (like during threaded core dumping).
1807  * There is however an exception to this rule during ptrace
1808  * or during fork: the ptracer task is allowed to write to the
1809  * child->flags of its traced child (same goes for fork, the parent
1810  * can write to the child->flags), because we're guaranteed the
1811  * child is not running and in turn not changing child->flags
1812  * at the same time the parent does it.
1813  */
1814 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1815 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1816 #define clear_used_math() clear_stopped_child_used_math(current)
1817 #define set_used_math() set_stopped_child_used_math(current)
1818 #define conditional_stopped_child_used_math(condition, child) \
1819 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1820 #define conditional_used_math(condition) \
1821 	conditional_stopped_child_used_math(condition, current)
1822 #define copy_to_stopped_child_used_math(child) \
1823 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1824 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1825 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1826 #define used_math() tsk_used_math(current)
1827 
1828 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1829 static inline gfp_t memalloc_noio_flags(gfp_t flags)
1830 {
1831 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1832 		flags &= ~__GFP_IO;
1833 	return flags;
1834 }
1835 
1836 static inline unsigned int memalloc_noio_save(void)
1837 {
1838 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1839 	current->flags |= PF_MEMALLOC_NOIO;
1840 	return flags;
1841 }
1842 
1843 static inline void memalloc_noio_restore(unsigned int flags)
1844 {
1845 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1846 }
1847 
1848 /*
1849  * task->jobctl flags
1850  */
1851 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1852 
1853 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1854 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1855 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1856 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1857 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1858 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1859 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1860 
1861 #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1862 #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1863 #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1864 #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1865 #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1866 #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1867 #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1868 
1869 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1870 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1871 
1872 extern bool task_set_jobctl_pending(struct task_struct *task,
1873 				    unsigned int mask);
1874 extern void task_clear_jobctl_trapping(struct task_struct *task);
1875 extern void task_clear_jobctl_pending(struct task_struct *task,
1876 				      unsigned int mask);
1877 
1878 #ifdef CONFIG_PREEMPT_RCU
1879 
1880 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1881 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1882 
1883 static inline void rcu_copy_process(struct task_struct *p)
1884 {
1885 	p->rcu_read_lock_nesting = 0;
1886 	p->rcu_read_unlock_special = 0;
1887 #ifdef CONFIG_TREE_PREEMPT_RCU
1888 	p->rcu_blocked_node = NULL;
1889 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1890 #ifdef CONFIG_RCU_BOOST
1891 	p->rcu_boost_mutex = NULL;
1892 #endif /* #ifdef CONFIG_RCU_BOOST */
1893 	INIT_LIST_HEAD(&p->rcu_node_entry);
1894 }
1895 
1896 #else
1897 
1898 static inline void rcu_copy_process(struct task_struct *p)
1899 {
1900 }
1901 
1902 #endif
1903 
1904 static inline void tsk_restore_flags(struct task_struct *task,
1905 				unsigned long orig_flags, unsigned long flags)
1906 {
1907 	task->flags &= ~flags;
1908 	task->flags |= orig_flags & flags;
1909 }
1910 
1911 #ifdef CONFIG_SMP
1912 extern void do_set_cpus_allowed(struct task_struct *p,
1913 			       const struct cpumask *new_mask);
1914 
1915 extern int set_cpus_allowed_ptr(struct task_struct *p,
1916 				const struct cpumask *new_mask);
1917 #else
1918 static inline void do_set_cpus_allowed(struct task_struct *p,
1919 				      const struct cpumask *new_mask)
1920 {
1921 }
1922 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1923 				       const struct cpumask *new_mask)
1924 {
1925 	if (!cpumask_test_cpu(0, new_mask))
1926 		return -EINVAL;
1927 	return 0;
1928 }
1929 #endif
1930 
1931 #ifdef CONFIG_NO_HZ
1932 void calc_load_enter_idle(void);
1933 void calc_load_exit_idle(void);
1934 #else
1935 static inline void calc_load_enter_idle(void) { }
1936 static inline void calc_load_exit_idle(void) { }
1937 #endif /* CONFIG_NO_HZ */
1938 
1939 #ifndef CONFIG_CPUMASK_OFFSTACK
1940 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1941 {
1942 	return set_cpus_allowed_ptr(p, &new_mask);
1943 }
1944 #endif
1945 
1946 /*
1947  * Do not use outside of architecture code which knows its limitations.
1948  *
1949  * sched_clock() has no promise of monotonicity or bounded drift between
1950  * CPUs, use (which you should not) requires disabling IRQs.
1951  *
1952  * Please use one of the three interfaces below.
1953  */
1954 extern unsigned long long notrace sched_clock(void);
1955 /*
1956  * See the comment in kernel/sched/clock.c
1957  */
1958 extern u64 cpu_clock(int cpu);
1959 extern u64 local_clock(void);
1960 extern u64 sched_clock_cpu(int cpu);
1961 
1962 
1963 extern void sched_clock_init(void);
1964 
1965 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1966 static inline void sched_clock_tick(void)
1967 {
1968 }
1969 
1970 static inline void sched_clock_idle_sleep_event(void)
1971 {
1972 }
1973 
1974 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1975 {
1976 }
1977 #else
1978 /*
1979  * Architectures can set this to 1 if they have specified
1980  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1981  * but then during bootup it turns out that sched_clock()
1982  * is reliable after all:
1983  */
1984 extern int sched_clock_stable;
1985 
1986 extern void sched_clock_tick(void);
1987 extern void sched_clock_idle_sleep_event(void);
1988 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1989 #endif
1990 
1991 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1992 /*
1993  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1994  * The reason for this explicit opt-in is not to have perf penalty with
1995  * slow sched_clocks.
1996  */
1997 extern void enable_sched_clock_irqtime(void);
1998 extern void disable_sched_clock_irqtime(void);
1999 #else
2000 static inline void enable_sched_clock_irqtime(void) {}
2001 static inline void disable_sched_clock_irqtime(void) {}
2002 #endif
2003 
2004 extern unsigned long long
2005 task_sched_runtime(struct task_struct *task);
2006 
2007 /* sched_exec is called by processes performing an exec */
2008 #ifdef CONFIG_SMP
2009 extern void sched_exec(void);
2010 #else
2011 #define sched_exec()   {}
2012 #endif
2013 
2014 extern void sched_clock_idle_sleep_event(void);
2015 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2016 
2017 #ifdef CONFIG_HOTPLUG_CPU
2018 extern void idle_task_exit(void);
2019 #else
2020 static inline void idle_task_exit(void) {}
2021 #endif
2022 
2023 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
2024 extern void wake_up_idle_cpu(int cpu);
2025 #else
2026 static inline void wake_up_idle_cpu(int cpu) { }
2027 #endif
2028 
2029 #ifdef CONFIG_SCHED_AUTOGROUP
2030 extern void sched_autogroup_create_attach(struct task_struct *p);
2031 extern void sched_autogroup_detach(struct task_struct *p);
2032 extern void sched_autogroup_fork(struct signal_struct *sig);
2033 extern void sched_autogroup_exit(struct signal_struct *sig);
2034 #ifdef CONFIG_PROC_FS
2035 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2036 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2037 #endif
2038 #else
2039 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2040 static inline void sched_autogroup_detach(struct task_struct *p) { }
2041 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2042 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2043 #endif
2044 
2045 extern bool yield_to(struct task_struct *p, bool preempt);
2046 extern void set_user_nice(struct task_struct *p, long nice);
2047 extern int task_prio(const struct task_struct *p);
2048 extern int task_nice(const struct task_struct *p);
2049 extern int can_nice(const struct task_struct *p, const int nice);
2050 extern int task_curr(const struct task_struct *p);
2051 extern int idle_cpu(int cpu);
2052 extern int sched_setscheduler(struct task_struct *, int,
2053 			      const struct sched_param *);
2054 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2055 				      const struct sched_param *);
2056 extern struct task_struct *idle_task(int cpu);
2057 /**
2058  * is_idle_task - is the specified task an idle task?
2059  * @p: the task in question.
2060  */
2061 static inline bool is_idle_task(const struct task_struct *p)
2062 {
2063 	return p->pid == 0;
2064 }
2065 extern struct task_struct *curr_task(int cpu);
2066 extern void set_curr_task(int cpu, struct task_struct *p);
2067 
2068 void yield(void);
2069 
2070 /*
2071  * The default (Linux) execution domain.
2072  */
2073 extern struct exec_domain	default_exec_domain;
2074 
2075 union thread_union {
2076 	struct thread_info thread_info;
2077 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2078 };
2079 
2080 #ifndef __HAVE_ARCH_KSTACK_END
2081 static inline int kstack_end(void *addr)
2082 {
2083 	/* Reliable end of stack detection:
2084 	 * Some APM bios versions misalign the stack
2085 	 */
2086 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2087 }
2088 #endif
2089 
2090 extern union thread_union init_thread_union;
2091 extern struct task_struct init_task;
2092 
2093 extern struct   mm_struct init_mm;
2094 
2095 extern struct pid_namespace init_pid_ns;
2096 
2097 /*
2098  * find a task by one of its numerical ids
2099  *
2100  * find_task_by_pid_ns():
2101  *      finds a task by its pid in the specified namespace
2102  * find_task_by_vpid():
2103  *      finds a task by its virtual pid
2104  *
2105  * see also find_vpid() etc in include/linux/pid.h
2106  */
2107 
2108 extern struct task_struct *find_task_by_vpid(pid_t nr);
2109 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2110 		struct pid_namespace *ns);
2111 
2112 extern void __set_special_pids(struct pid *pid);
2113 
2114 /* per-UID process charging. */
2115 extern struct user_struct * alloc_uid(kuid_t);
2116 static inline struct user_struct *get_uid(struct user_struct *u)
2117 {
2118 	atomic_inc(&u->__count);
2119 	return u;
2120 }
2121 extern void free_uid(struct user_struct *);
2122 
2123 #include <asm/current.h>
2124 
2125 extern void xtime_update(unsigned long ticks);
2126 
2127 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2128 extern int wake_up_process(struct task_struct *tsk);
2129 extern void wake_up_new_task(struct task_struct *tsk);
2130 #ifdef CONFIG_SMP
2131  extern void kick_process(struct task_struct *tsk);
2132 #else
2133  static inline void kick_process(struct task_struct *tsk) { }
2134 #endif
2135 extern void sched_fork(struct task_struct *p);
2136 extern void sched_dead(struct task_struct *p);
2137 
2138 extern void proc_caches_init(void);
2139 extern void flush_signals(struct task_struct *);
2140 extern void __flush_signals(struct task_struct *);
2141 extern void ignore_signals(struct task_struct *);
2142 extern void flush_signal_handlers(struct task_struct *, int force_default);
2143 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2144 
2145 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2146 {
2147 	unsigned long flags;
2148 	int ret;
2149 
2150 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2151 	ret = dequeue_signal(tsk, mask, info);
2152 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2153 
2154 	return ret;
2155 }
2156 
2157 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2158 			      sigset_t *mask);
2159 extern void unblock_all_signals(void);
2160 extern void release_task(struct task_struct * p);
2161 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2162 extern int force_sigsegv(int, struct task_struct *);
2163 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2164 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2165 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2166 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2167 				const struct cred *, u32);
2168 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2169 extern int kill_pid(struct pid *pid, int sig, int priv);
2170 extern int kill_proc_info(int, struct siginfo *, pid_t);
2171 extern __must_check bool do_notify_parent(struct task_struct *, int);
2172 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2173 extern void force_sig(int, struct task_struct *);
2174 extern int send_sig(int, struct task_struct *, int);
2175 extern int zap_other_threads(struct task_struct *p);
2176 extern struct sigqueue *sigqueue_alloc(void);
2177 extern void sigqueue_free(struct sigqueue *);
2178 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2179 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2180 
2181 static inline void restore_saved_sigmask(void)
2182 {
2183 	if (test_and_clear_restore_sigmask())
2184 		__set_current_blocked(&current->saved_sigmask);
2185 }
2186 
2187 static inline sigset_t *sigmask_to_save(void)
2188 {
2189 	sigset_t *res = &current->blocked;
2190 	if (unlikely(test_restore_sigmask()))
2191 		res = &current->saved_sigmask;
2192 	return res;
2193 }
2194 
2195 static inline int kill_cad_pid(int sig, int priv)
2196 {
2197 	return kill_pid(cad_pid, sig, priv);
2198 }
2199 
2200 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2201 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2202 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2203 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2204 
2205 /*
2206  * True if we are on the alternate signal stack.
2207  */
2208 static inline int on_sig_stack(unsigned long sp)
2209 {
2210 #ifdef CONFIG_STACK_GROWSUP
2211 	return sp >= current->sas_ss_sp &&
2212 		sp - current->sas_ss_sp < current->sas_ss_size;
2213 #else
2214 	return sp > current->sas_ss_sp &&
2215 		sp - current->sas_ss_sp <= current->sas_ss_size;
2216 #endif
2217 }
2218 
2219 static inline int sas_ss_flags(unsigned long sp)
2220 {
2221 	return (current->sas_ss_size == 0 ? SS_DISABLE
2222 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2223 }
2224 
2225 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2226 {
2227 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2228 #ifdef CONFIG_STACK_GROWSUP
2229 		return current->sas_ss_sp;
2230 #else
2231 		return current->sas_ss_sp + current->sas_ss_size;
2232 #endif
2233 	return sp;
2234 }
2235 
2236 /*
2237  * Routines for handling mm_structs
2238  */
2239 extern struct mm_struct * mm_alloc(void);
2240 
2241 /* mmdrop drops the mm and the page tables */
2242 extern void __mmdrop(struct mm_struct *);
2243 static inline void mmdrop(struct mm_struct * mm)
2244 {
2245 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2246 		__mmdrop(mm);
2247 }
2248 
2249 /* mmput gets rid of the mappings and all user-space */
2250 extern void mmput(struct mm_struct *);
2251 /* Grab a reference to a task's mm, if it is not already going away */
2252 extern struct mm_struct *get_task_mm(struct task_struct *task);
2253 /*
2254  * Grab a reference to a task's mm, if it is not already going away
2255  * and ptrace_may_access with the mode parameter passed to it
2256  * succeeds.
2257  */
2258 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2259 /* Remove the current tasks stale references to the old mm_struct */
2260 extern void mm_release(struct task_struct *, struct mm_struct *);
2261 /* Allocate a new mm structure and copy contents from tsk->mm */
2262 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2263 
2264 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2265 			struct task_struct *);
2266 extern void flush_thread(void);
2267 extern void exit_thread(void);
2268 
2269 extern void exit_files(struct task_struct *);
2270 extern void __cleanup_sighand(struct sighand_struct *);
2271 
2272 extern void exit_itimers(struct signal_struct *);
2273 extern void flush_itimer_signals(void);
2274 
2275 extern void do_group_exit(int);
2276 
2277 extern int allow_signal(int);
2278 extern int disallow_signal(int);
2279 
2280 extern int do_execve(const char *,
2281 		     const char __user * const __user *,
2282 		     const char __user * const __user *);
2283 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2284 struct task_struct *fork_idle(int);
2285 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2286 
2287 extern void set_task_comm(struct task_struct *tsk, char *from);
2288 extern char *get_task_comm(char *to, struct task_struct *tsk);
2289 
2290 #ifdef CONFIG_SMP
2291 void scheduler_ipi(void);
2292 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2293 #else
2294 static inline void scheduler_ipi(void) { }
2295 static inline unsigned long wait_task_inactive(struct task_struct *p,
2296 					       long match_state)
2297 {
2298 	return 1;
2299 }
2300 #endif
2301 
2302 #define next_task(p) \
2303 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2304 
2305 #define for_each_process(p) \
2306 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2307 
2308 extern bool current_is_single_threaded(void);
2309 
2310 /*
2311  * Careful: do_each_thread/while_each_thread is a double loop so
2312  *          'break' will not work as expected - use goto instead.
2313  */
2314 #define do_each_thread(g, t) \
2315 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2316 
2317 #define while_each_thread(g, t) \
2318 	while ((t = next_thread(t)) != g)
2319 
2320 static inline int get_nr_threads(struct task_struct *tsk)
2321 {
2322 	return tsk->signal->nr_threads;
2323 }
2324 
2325 static inline bool thread_group_leader(struct task_struct *p)
2326 {
2327 	return p->exit_signal >= 0;
2328 }
2329 
2330 /* Do to the insanities of de_thread it is possible for a process
2331  * to have the pid of the thread group leader without actually being
2332  * the thread group leader.  For iteration through the pids in proc
2333  * all we care about is that we have a task with the appropriate
2334  * pid, we don't actually care if we have the right task.
2335  */
2336 static inline int has_group_leader_pid(struct task_struct *p)
2337 {
2338 	return p->pid == p->tgid;
2339 }
2340 
2341 static inline
2342 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2343 {
2344 	return p1->tgid == p2->tgid;
2345 }
2346 
2347 static inline struct task_struct *next_thread(const struct task_struct *p)
2348 {
2349 	return list_entry_rcu(p->thread_group.next,
2350 			      struct task_struct, thread_group);
2351 }
2352 
2353 static inline int thread_group_empty(struct task_struct *p)
2354 {
2355 	return list_empty(&p->thread_group);
2356 }
2357 
2358 #define delay_group_leader(p) \
2359 		(thread_group_leader(p) && !thread_group_empty(p))
2360 
2361 /*
2362  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2363  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2364  * pins the final release of task.io_context.  Also protects ->cpuset and
2365  * ->cgroup.subsys[]. And ->vfork_done.
2366  *
2367  * Nests both inside and outside of read_lock(&tasklist_lock).
2368  * It must not be nested with write_lock_irq(&tasklist_lock),
2369  * neither inside nor outside.
2370  */
2371 static inline void task_lock(struct task_struct *p)
2372 {
2373 	spin_lock(&p->alloc_lock);
2374 }
2375 
2376 static inline void task_unlock(struct task_struct *p)
2377 {
2378 	spin_unlock(&p->alloc_lock);
2379 }
2380 
2381 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2382 							unsigned long *flags);
2383 
2384 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2385 						       unsigned long *flags)
2386 {
2387 	struct sighand_struct *ret;
2388 
2389 	ret = __lock_task_sighand(tsk, flags);
2390 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2391 	return ret;
2392 }
2393 
2394 static inline void unlock_task_sighand(struct task_struct *tsk,
2395 						unsigned long *flags)
2396 {
2397 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2398 }
2399 
2400 #ifdef CONFIG_CGROUPS
2401 static inline void threadgroup_change_begin(struct task_struct *tsk)
2402 {
2403 	down_read(&tsk->signal->group_rwsem);
2404 }
2405 static inline void threadgroup_change_end(struct task_struct *tsk)
2406 {
2407 	up_read(&tsk->signal->group_rwsem);
2408 }
2409 
2410 /**
2411  * threadgroup_lock - lock threadgroup
2412  * @tsk: member task of the threadgroup to lock
2413  *
2414  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2415  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2416  * perform exec.  This is useful for cases where the threadgroup needs to
2417  * stay stable across blockable operations.
2418  *
2419  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2420  * synchronization.  While held, no new task will be added to threadgroup
2421  * and no existing live task will have its PF_EXITING set.
2422  *
2423  * During exec, a task goes and puts its thread group through unusual
2424  * changes.  After de-threading, exclusive access is assumed to resources
2425  * which are usually shared by tasks in the same group - e.g. sighand may
2426  * be replaced with a new one.  Also, the exec'ing task takes over group
2427  * leader role including its pid.  Exclude these changes while locked by
2428  * grabbing cred_guard_mutex which is used to synchronize exec path.
2429  */
2430 static inline void threadgroup_lock(struct task_struct *tsk)
2431 {
2432 	/*
2433 	 * exec uses exit for de-threading nesting group_rwsem inside
2434 	 * cred_guard_mutex. Grab cred_guard_mutex first.
2435 	 */
2436 	mutex_lock(&tsk->signal->cred_guard_mutex);
2437 	down_write(&tsk->signal->group_rwsem);
2438 }
2439 
2440 /**
2441  * threadgroup_unlock - unlock threadgroup
2442  * @tsk: member task of the threadgroup to unlock
2443  *
2444  * Reverse threadgroup_lock().
2445  */
2446 static inline void threadgroup_unlock(struct task_struct *tsk)
2447 {
2448 	up_write(&tsk->signal->group_rwsem);
2449 	mutex_unlock(&tsk->signal->cred_guard_mutex);
2450 }
2451 #else
2452 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2453 static inline void threadgroup_change_end(struct task_struct *tsk) {}
2454 static inline void threadgroup_lock(struct task_struct *tsk) {}
2455 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2456 #endif
2457 
2458 #ifndef __HAVE_THREAD_FUNCTIONS
2459 
2460 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2461 #define task_stack_page(task)	((task)->stack)
2462 
2463 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2464 {
2465 	*task_thread_info(p) = *task_thread_info(org);
2466 	task_thread_info(p)->task = p;
2467 }
2468 
2469 static inline unsigned long *end_of_stack(struct task_struct *p)
2470 {
2471 	return (unsigned long *)(task_thread_info(p) + 1);
2472 }
2473 
2474 #endif
2475 
2476 static inline int object_is_on_stack(void *obj)
2477 {
2478 	void *stack = task_stack_page(current);
2479 
2480 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2481 }
2482 
2483 extern void thread_info_cache_init(void);
2484 
2485 #ifdef CONFIG_DEBUG_STACK_USAGE
2486 static inline unsigned long stack_not_used(struct task_struct *p)
2487 {
2488 	unsigned long *n = end_of_stack(p);
2489 
2490 	do { 	/* Skip over canary */
2491 		n++;
2492 	} while (!*n);
2493 
2494 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2495 }
2496 #endif
2497 
2498 /* set thread flags in other task's structures
2499  * - see asm/thread_info.h for TIF_xxxx flags available
2500  */
2501 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2502 {
2503 	set_ti_thread_flag(task_thread_info(tsk), flag);
2504 }
2505 
2506 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2507 {
2508 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2509 }
2510 
2511 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2512 {
2513 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2514 }
2515 
2516 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2517 {
2518 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2519 }
2520 
2521 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2522 {
2523 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2524 }
2525 
2526 static inline void set_tsk_need_resched(struct task_struct *tsk)
2527 {
2528 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2529 }
2530 
2531 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2532 {
2533 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2534 }
2535 
2536 static inline int test_tsk_need_resched(struct task_struct *tsk)
2537 {
2538 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2539 }
2540 
2541 static inline int restart_syscall(void)
2542 {
2543 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2544 	return -ERESTARTNOINTR;
2545 }
2546 
2547 static inline int signal_pending(struct task_struct *p)
2548 {
2549 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2550 }
2551 
2552 static inline int __fatal_signal_pending(struct task_struct *p)
2553 {
2554 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2555 }
2556 
2557 static inline int fatal_signal_pending(struct task_struct *p)
2558 {
2559 	return signal_pending(p) && __fatal_signal_pending(p);
2560 }
2561 
2562 static inline int signal_pending_state(long state, struct task_struct *p)
2563 {
2564 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2565 		return 0;
2566 	if (!signal_pending(p))
2567 		return 0;
2568 
2569 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2570 }
2571 
2572 static inline int need_resched(void)
2573 {
2574 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2575 }
2576 
2577 /*
2578  * cond_resched() and cond_resched_lock(): latency reduction via
2579  * explicit rescheduling in places that are safe. The return
2580  * value indicates whether a reschedule was done in fact.
2581  * cond_resched_lock() will drop the spinlock before scheduling,
2582  * cond_resched_softirq() will enable bhs before scheduling.
2583  */
2584 extern int _cond_resched(void);
2585 
2586 #define cond_resched() ({			\
2587 	__might_sleep(__FILE__, __LINE__, 0);	\
2588 	_cond_resched();			\
2589 })
2590 
2591 extern int __cond_resched_lock(spinlock_t *lock);
2592 
2593 #ifdef CONFIG_PREEMPT_COUNT
2594 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2595 #else
2596 #define PREEMPT_LOCK_OFFSET	0
2597 #endif
2598 
2599 #define cond_resched_lock(lock) ({				\
2600 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2601 	__cond_resched_lock(lock);				\
2602 })
2603 
2604 extern int __cond_resched_softirq(void);
2605 
2606 #define cond_resched_softirq() ({					\
2607 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2608 	__cond_resched_softirq();					\
2609 })
2610 
2611 /*
2612  * Does a critical section need to be broken due to another
2613  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2614  * but a general need for low latency)
2615  */
2616 static inline int spin_needbreak(spinlock_t *lock)
2617 {
2618 #ifdef CONFIG_PREEMPT
2619 	return spin_is_contended(lock);
2620 #else
2621 	return 0;
2622 #endif
2623 }
2624 
2625 /*
2626  * Thread group CPU time accounting.
2627  */
2628 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2629 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2630 
2631 static inline void thread_group_cputime_init(struct signal_struct *sig)
2632 {
2633 	raw_spin_lock_init(&sig->cputimer.lock);
2634 }
2635 
2636 /*
2637  * Reevaluate whether the task has signals pending delivery.
2638  * Wake the task if so.
2639  * This is required every time the blocked sigset_t changes.
2640  * callers must hold sighand->siglock.
2641  */
2642 extern void recalc_sigpending_and_wake(struct task_struct *t);
2643 extern void recalc_sigpending(void);
2644 
2645 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2646 
2647 static inline void signal_wake_up(struct task_struct *t, bool resume)
2648 {
2649 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2650 }
2651 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2652 {
2653 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2654 }
2655 
2656 /*
2657  * Wrappers for p->thread_info->cpu access. No-op on UP.
2658  */
2659 #ifdef CONFIG_SMP
2660 
2661 static inline unsigned int task_cpu(const struct task_struct *p)
2662 {
2663 	return task_thread_info(p)->cpu;
2664 }
2665 
2666 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2667 
2668 #else
2669 
2670 static inline unsigned int task_cpu(const struct task_struct *p)
2671 {
2672 	return 0;
2673 }
2674 
2675 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2676 {
2677 }
2678 
2679 #endif /* CONFIG_SMP */
2680 
2681 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2682 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2683 
2684 #ifdef CONFIG_CGROUP_SCHED
2685 
2686 extern struct task_group root_task_group;
2687 
2688 extern struct task_group *sched_create_group(struct task_group *parent);
2689 extern void sched_online_group(struct task_group *tg,
2690 			       struct task_group *parent);
2691 extern void sched_destroy_group(struct task_group *tg);
2692 extern void sched_offline_group(struct task_group *tg);
2693 extern void sched_move_task(struct task_struct *tsk);
2694 #ifdef CONFIG_FAIR_GROUP_SCHED
2695 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2696 extern unsigned long sched_group_shares(struct task_group *tg);
2697 #endif
2698 #ifdef CONFIG_RT_GROUP_SCHED
2699 extern int sched_group_set_rt_runtime(struct task_group *tg,
2700 				      long rt_runtime_us);
2701 extern long sched_group_rt_runtime(struct task_group *tg);
2702 extern int sched_group_set_rt_period(struct task_group *tg,
2703 				      long rt_period_us);
2704 extern long sched_group_rt_period(struct task_group *tg);
2705 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2706 #endif
2707 #endif /* CONFIG_CGROUP_SCHED */
2708 
2709 extern int task_can_switch_user(struct user_struct *up,
2710 					struct task_struct *tsk);
2711 
2712 #ifdef CONFIG_TASK_XACCT
2713 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2714 {
2715 	tsk->ioac.rchar += amt;
2716 }
2717 
2718 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2719 {
2720 	tsk->ioac.wchar += amt;
2721 }
2722 
2723 static inline void inc_syscr(struct task_struct *tsk)
2724 {
2725 	tsk->ioac.syscr++;
2726 }
2727 
2728 static inline void inc_syscw(struct task_struct *tsk)
2729 {
2730 	tsk->ioac.syscw++;
2731 }
2732 #else
2733 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2734 {
2735 }
2736 
2737 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2738 {
2739 }
2740 
2741 static inline void inc_syscr(struct task_struct *tsk)
2742 {
2743 }
2744 
2745 static inline void inc_syscw(struct task_struct *tsk)
2746 {
2747 }
2748 #endif
2749 
2750 #ifndef TASK_SIZE_OF
2751 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2752 #endif
2753 
2754 #ifdef CONFIG_MM_OWNER
2755 extern void mm_update_next_owner(struct mm_struct *mm);
2756 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2757 #else
2758 static inline void mm_update_next_owner(struct mm_struct *mm)
2759 {
2760 }
2761 
2762 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2763 {
2764 }
2765 #endif /* CONFIG_MM_OWNER */
2766 
2767 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2768 		unsigned int limit)
2769 {
2770 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2771 }
2772 
2773 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2774 		unsigned int limit)
2775 {
2776 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2777 }
2778 
2779 static inline unsigned long rlimit(unsigned int limit)
2780 {
2781 	return task_rlimit(current, limit);
2782 }
2783 
2784 static inline unsigned long rlimit_max(unsigned int limit)
2785 {
2786 	return task_rlimit_max(current, limit);
2787 }
2788 
2789 #endif
2790