xref: /linux-6.15/include/linux/sched.h (revision 3e4cd073)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 /*
5  * cloning flags:
6  */
7 #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8 #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9 #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10 #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11 #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD	0x00010000	/* Same thread group? */
16 #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17 #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25    and is now available for re-use. */
26 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
28 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
29 #define CLONE_NEWPID		0x20000000	/* New pid namespace */
30 #define CLONE_NEWNET		0x40000000	/* New network namespace */
31 #define CLONE_IO		0x80000000	/* Clone io context */
32 
33 /*
34  * Scheduling policies
35  */
36 #define SCHED_NORMAL		0
37 #define SCHED_FIFO		1
38 #define SCHED_RR		2
39 #define SCHED_BATCH		3
40 /* SCHED_ISO: reserved but not implemented yet */
41 #define SCHED_IDLE		5
42 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43 #define SCHED_RESET_ON_FORK     0x40000000
44 
45 #ifdef __KERNEL__
46 
47 struct sched_param {
48 	int sched_priority;
49 };
50 
51 #include <asm/param.h>	/* for HZ */
52 
53 #include <linux/capability.h>
54 #include <linux/threads.h>
55 #include <linux/kernel.h>
56 #include <linux/types.h>
57 #include <linux/timex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rbtree.h>
60 #include <linux/thread_info.h>
61 #include <linux/cpumask.h>
62 #include <linux/errno.h>
63 #include <linux/nodemask.h>
64 #include <linux/mm_types.h>
65 
66 #include <asm/system.h>
67 #include <asm/page.h>
68 #include <asm/ptrace.h>
69 #include <asm/cputime.h>
70 
71 #include <linux/smp.h>
72 #include <linux/sem.h>
73 #include <linux/signal.h>
74 #include <linux/compiler.h>
75 #include <linux/completion.h>
76 #include <linux/pid.h>
77 #include <linux/percpu.h>
78 #include <linux/topology.h>
79 #include <linux/proportions.h>
80 #include <linux/seccomp.h>
81 #include <linux/rcupdate.h>
82 #include <linux/rculist.h>
83 #include <linux/rtmutex.h>
84 
85 #include <linux/time.h>
86 #include <linux/param.h>
87 #include <linux/resource.h>
88 #include <linux/timer.h>
89 #include <linux/hrtimer.h>
90 #include <linux/task_io_accounting.h>
91 #include <linux/latencytop.h>
92 #include <linux/cred.h>
93 
94 #include <asm/processor.h>
95 
96 struct exec_domain;
97 struct futex_pi_state;
98 struct robust_list_head;
99 struct bio_list;
100 struct fs_struct;
101 struct perf_event_context;
102 struct blk_plug;
103 
104 /*
105  * List of flags we want to share for kernel threads,
106  * if only because they are not used by them anyway.
107  */
108 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
109 
110 /*
111  * These are the constant used to fake the fixed-point load-average
112  * counting. Some notes:
113  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
114  *    a load-average precision of 10 bits integer + 11 bits fractional
115  *  - if you want to count load-averages more often, you need more
116  *    precision, or rounding will get you. With 2-second counting freq,
117  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
118  *    11 bit fractions.
119  */
120 extern unsigned long avenrun[];		/* Load averages */
121 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
122 
123 #define FSHIFT		11		/* nr of bits of precision */
124 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
126 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
127 #define EXP_5		2014		/* 1/exp(5sec/5min) */
128 #define EXP_15		2037		/* 1/exp(5sec/15min) */
129 
130 #define CALC_LOAD(load,exp,n) \
131 	load *= exp; \
132 	load += n*(FIXED_1-exp); \
133 	load >>= FSHIFT;
134 
135 extern unsigned long total_forks;
136 extern int nr_threads;
137 DECLARE_PER_CPU(unsigned long, process_counts);
138 extern int nr_processes(void);
139 extern unsigned long nr_running(void);
140 extern unsigned long nr_uninterruptible(void);
141 extern unsigned long nr_iowait(void);
142 extern unsigned long nr_iowait_cpu(int cpu);
143 extern unsigned long this_cpu_load(void);
144 
145 
146 extern void calc_global_load(unsigned long ticks);
147 
148 extern unsigned long get_parent_ip(unsigned long addr);
149 
150 struct seq_file;
151 struct cfs_rq;
152 struct task_group;
153 #ifdef CONFIG_SCHED_DEBUG
154 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
155 extern void proc_sched_set_task(struct task_struct *p);
156 extern void
157 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
158 #else
159 static inline void
160 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
161 {
162 }
163 static inline void proc_sched_set_task(struct task_struct *p)
164 {
165 }
166 static inline void
167 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168 {
169 }
170 #endif
171 
172 /*
173  * Task state bitmask. NOTE! These bits are also
174  * encoded in fs/proc/array.c: get_task_state().
175  *
176  * We have two separate sets of flags: task->state
177  * is about runnability, while task->exit_state are
178  * about the task exiting. Confusing, but this way
179  * modifying one set can't modify the other one by
180  * mistake.
181  */
182 #define TASK_RUNNING		0
183 #define TASK_INTERRUPTIBLE	1
184 #define TASK_UNINTERRUPTIBLE	2
185 #define __TASK_STOPPED		4
186 #define __TASK_TRACED		8
187 /* in tsk->exit_state */
188 #define EXIT_ZOMBIE		16
189 #define EXIT_DEAD		32
190 /* in tsk->state again */
191 #define TASK_DEAD		64
192 #define TASK_WAKEKILL		128
193 #define TASK_WAKING		256
194 #define TASK_STATE_MAX		512
195 
196 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
197 
198 extern char ___assert_task_state[1 - 2*!!(
199 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
200 
201 /* Convenience macros for the sake of set_task_state */
202 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
203 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
204 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
205 
206 /* Convenience macros for the sake of wake_up */
207 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
208 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
209 
210 /* get_task_state() */
211 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
212 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
213 				 __TASK_TRACED)
214 
215 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
216 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
217 #define task_is_dead(task)	((task)->exit_state != 0)
218 #define task_is_stopped_or_traced(task)	\
219 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
220 #define task_contributes_to_load(task)	\
221 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222 				 (task->flags & PF_FREEZING) == 0)
223 
224 #define __set_task_state(tsk, state_value)		\
225 	do { (tsk)->state = (state_value); } while (0)
226 #define set_task_state(tsk, state_value)		\
227 	set_mb((tsk)->state, (state_value))
228 
229 /*
230  * set_current_state() includes a barrier so that the write of current->state
231  * is correctly serialised wrt the caller's subsequent test of whether to
232  * actually sleep:
233  *
234  *	set_current_state(TASK_UNINTERRUPTIBLE);
235  *	if (do_i_need_to_sleep())
236  *		schedule();
237  *
238  * If the caller does not need such serialisation then use __set_current_state()
239  */
240 #define __set_current_state(state_value)			\
241 	do { current->state = (state_value); } while (0)
242 #define set_current_state(state_value)		\
243 	set_mb(current->state, (state_value))
244 
245 /* Task command name length */
246 #define TASK_COMM_LEN 16
247 
248 #include <linux/spinlock.h>
249 
250 /*
251  * This serializes "schedule()" and also protects
252  * the run-queue from deletions/modifications (but
253  * _adding_ to the beginning of the run-queue has
254  * a separate lock).
255  */
256 extern rwlock_t tasklist_lock;
257 extern spinlock_t mmlist_lock;
258 
259 struct task_struct;
260 
261 #ifdef CONFIG_PROVE_RCU
262 extern int lockdep_tasklist_lock_is_held(void);
263 #endif /* #ifdef CONFIG_PROVE_RCU */
264 
265 extern void sched_init(void);
266 extern void sched_init_smp(void);
267 extern asmlinkage void schedule_tail(struct task_struct *prev);
268 extern void init_idle(struct task_struct *idle, int cpu);
269 extern void init_idle_bootup_task(struct task_struct *idle);
270 
271 extern int runqueue_is_locked(int cpu);
272 
273 extern cpumask_var_t nohz_cpu_mask;
274 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275 extern void select_nohz_load_balancer(int stop_tick);
276 extern int get_nohz_timer_target(void);
277 #else
278 static inline void select_nohz_load_balancer(int stop_tick) { }
279 #endif
280 
281 /*
282  * Only dump TASK_* tasks. (0 for all tasks)
283  */
284 extern void show_state_filter(unsigned long state_filter);
285 
286 static inline void show_state(void)
287 {
288 	show_state_filter(0);
289 }
290 
291 extern void show_regs(struct pt_regs *);
292 
293 /*
294  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
295  * task), SP is the stack pointer of the first frame that should be shown in the back
296  * trace (or NULL if the entire call-chain of the task should be shown).
297  */
298 extern void show_stack(struct task_struct *task, unsigned long *sp);
299 
300 void io_schedule(void);
301 long io_schedule_timeout(long timeout);
302 
303 extern void cpu_init (void);
304 extern void trap_init(void);
305 extern void update_process_times(int user);
306 extern void scheduler_tick(void);
307 
308 extern void sched_show_task(struct task_struct *p);
309 
310 #ifdef CONFIG_LOCKUP_DETECTOR
311 extern void touch_softlockup_watchdog(void);
312 extern void touch_softlockup_watchdog_sync(void);
313 extern void touch_all_softlockup_watchdogs(void);
314 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
315 				  void __user *buffer,
316 				  size_t *lenp, loff_t *ppos);
317 extern unsigned int  softlockup_panic;
318 extern int softlockup_thresh;
319 void lockup_detector_init(void);
320 #else
321 static inline void touch_softlockup_watchdog(void)
322 {
323 }
324 static inline void touch_softlockup_watchdog_sync(void)
325 {
326 }
327 static inline void touch_all_softlockup_watchdogs(void)
328 {
329 }
330 static inline void lockup_detector_init(void)
331 {
332 }
333 #endif
334 
335 #ifdef CONFIG_DETECT_HUNG_TASK
336 extern unsigned int  sysctl_hung_task_panic;
337 extern unsigned long sysctl_hung_task_check_count;
338 extern unsigned long sysctl_hung_task_timeout_secs;
339 extern unsigned long sysctl_hung_task_warnings;
340 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
341 					 void __user *buffer,
342 					 size_t *lenp, loff_t *ppos);
343 #else
344 /* Avoid need for ifdefs elsewhere in the code */
345 enum { sysctl_hung_task_timeout_secs = 0 };
346 #endif
347 
348 /* Attach to any functions which should be ignored in wchan output. */
349 #define __sched		__attribute__((__section__(".sched.text")))
350 
351 /* Linker adds these: start and end of __sched functions */
352 extern char __sched_text_start[], __sched_text_end[];
353 
354 /* Is this address in the __sched functions? */
355 extern int in_sched_functions(unsigned long addr);
356 
357 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
358 extern signed long schedule_timeout(signed long timeout);
359 extern signed long schedule_timeout_interruptible(signed long timeout);
360 extern signed long schedule_timeout_killable(signed long timeout);
361 extern signed long schedule_timeout_uninterruptible(signed long timeout);
362 asmlinkage void schedule(void);
363 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
364 
365 struct nsproxy;
366 struct user_namespace;
367 
368 /*
369  * Default maximum number of active map areas, this limits the number of vmas
370  * per mm struct. Users can overwrite this number by sysctl but there is a
371  * problem.
372  *
373  * When a program's coredump is generated as ELF format, a section is created
374  * per a vma. In ELF, the number of sections is represented in unsigned short.
375  * This means the number of sections should be smaller than 65535 at coredump.
376  * Because the kernel adds some informative sections to a image of program at
377  * generating coredump, we need some margin. The number of extra sections is
378  * 1-3 now and depends on arch. We use "5" as safe margin, here.
379  */
380 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
381 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
382 
383 extern int sysctl_max_map_count;
384 
385 #include <linux/aio.h>
386 
387 #ifdef CONFIG_MMU
388 extern void arch_pick_mmap_layout(struct mm_struct *mm);
389 extern unsigned long
390 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
391 		       unsigned long, unsigned long);
392 extern unsigned long
393 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
394 			  unsigned long len, unsigned long pgoff,
395 			  unsigned long flags);
396 extern void arch_unmap_area(struct mm_struct *, unsigned long);
397 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
398 #else
399 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
400 #endif
401 
402 
403 extern void set_dumpable(struct mm_struct *mm, int value);
404 extern int get_dumpable(struct mm_struct *mm);
405 
406 /* mm flags */
407 /* dumpable bits */
408 #define MMF_DUMPABLE      0  /* core dump is permitted */
409 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
410 
411 #define MMF_DUMPABLE_BITS 2
412 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
413 
414 /* coredump filter bits */
415 #define MMF_DUMP_ANON_PRIVATE	2
416 #define MMF_DUMP_ANON_SHARED	3
417 #define MMF_DUMP_MAPPED_PRIVATE	4
418 #define MMF_DUMP_MAPPED_SHARED	5
419 #define MMF_DUMP_ELF_HEADERS	6
420 #define MMF_DUMP_HUGETLB_PRIVATE 7
421 #define MMF_DUMP_HUGETLB_SHARED  8
422 
423 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
424 #define MMF_DUMP_FILTER_BITS	7
425 #define MMF_DUMP_FILTER_MASK \
426 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
427 #define MMF_DUMP_FILTER_DEFAULT \
428 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
429 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
430 
431 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
432 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
433 #else
434 # define MMF_DUMP_MASK_DEFAULT_ELF	0
435 #endif
436 					/* leave room for more dump flags */
437 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
438 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
439 
440 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
441 
442 struct sighand_struct {
443 	atomic_t		count;
444 	struct k_sigaction	action[_NSIG];
445 	spinlock_t		siglock;
446 	wait_queue_head_t	signalfd_wqh;
447 };
448 
449 struct pacct_struct {
450 	int			ac_flag;
451 	long			ac_exitcode;
452 	unsigned long		ac_mem;
453 	cputime_t		ac_utime, ac_stime;
454 	unsigned long		ac_minflt, ac_majflt;
455 };
456 
457 struct cpu_itimer {
458 	cputime_t expires;
459 	cputime_t incr;
460 	u32 error;
461 	u32 incr_error;
462 };
463 
464 /**
465  * struct task_cputime - collected CPU time counts
466  * @utime:		time spent in user mode, in &cputime_t units
467  * @stime:		time spent in kernel mode, in &cputime_t units
468  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
469  *
470  * This structure groups together three kinds of CPU time that are
471  * tracked for threads and thread groups.  Most things considering
472  * CPU time want to group these counts together and treat all three
473  * of them in parallel.
474  */
475 struct task_cputime {
476 	cputime_t utime;
477 	cputime_t stime;
478 	unsigned long long sum_exec_runtime;
479 };
480 /* Alternate field names when used to cache expirations. */
481 #define prof_exp	stime
482 #define virt_exp	utime
483 #define sched_exp	sum_exec_runtime
484 
485 #define INIT_CPUTIME	\
486 	(struct task_cputime) {					\
487 		.utime = cputime_zero,				\
488 		.stime = cputime_zero,				\
489 		.sum_exec_runtime = 0,				\
490 	}
491 
492 /*
493  * Disable preemption until the scheduler is running.
494  * Reset by start_kernel()->sched_init()->init_idle().
495  *
496  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
497  * before the scheduler is active -- see should_resched().
498  */
499 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
500 
501 /**
502  * struct thread_group_cputimer - thread group interval timer counts
503  * @cputime:		thread group interval timers.
504  * @running:		non-zero when there are timers running and
505  * 			@cputime receives updates.
506  * @lock:		lock for fields in this struct.
507  *
508  * This structure contains the version of task_cputime, above, that is
509  * used for thread group CPU timer calculations.
510  */
511 struct thread_group_cputimer {
512 	struct task_cputime cputime;
513 	int running;
514 	spinlock_t lock;
515 };
516 
517 struct autogroup;
518 
519 /*
520  * NOTE! "signal_struct" does not have its own
521  * locking, because a shared signal_struct always
522  * implies a shared sighand_struct, so locking
523  * sighand_struct is always a proper superset of
524  * the locking of signal_struct.
525  */
526 struct signal_struct {
527 	atomic_t		sigcnt;
528 	atomic_t		live;
529 	int			nr_threads;
530 
531 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
532 
533 	/* current thread group signal load-balancing target: */
534 	struct task_struct	*curr_target;
535 
536 	/* shared signal handling: */
537 	struct sigpending	shared_pending;
538 
539 	/* thread group exit support */
540 	int			group_exit_code;
541 	/* overloaded:
542 	 * - notify group_exit_task when ->count is equal to notify_count
543 	 * - everyone except group_exit_task is stopped during signal delivery
544 	 *   of fatal signals, group_exit_task processes the signal.
545 	 */
546 	int			notify_count;
547 	struct task_struct	*group_exit_task;
548 
549 	/* thread group stop support, overloads group_exit_code too */
550 	int			group_stop_count;
551 	unsigned int		flags; /* see SIGNAL_* flags below */
552 
553 	/* POSIX.1b Interval Timers */
554 	struct list_head posix_timers;
555 
556 	/* ITIMER_REAL timer for the process */
557 	struct hrtimer real_timer;
558 	struct pid *leader_pid;
559 	ktime_t it_real_incr;
560 
561 	/*
562 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
563 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
564 	 * values are defined to 0 and 1 respectively
565 	 */
566 	struct cpu_itimer it[2];
567 
568 	/*
569 	 * Thread group totals for process CPU timers.
570 	 * See thread_group_cputimer(), et al, for details.
571 	 */
572 	struct thread_group_cputimer cputimer;
573 
574 	/* Earliest-expiration cache. */
575 	struct task_cputime cputime_expires;
576 
577 	struct list_head cpu_timers[3];
578 
579 	struct pid *tty_old_pgrp;
580 
581 	/* boolean value for session group leader */
582 	int leader;
583 
584 	struct tty_struct *tty; /* NULL if no tty */
585 
586 #ifdef CONFIG_SCHED_AUTOGROUP
587 	struct autogroup *autogroup;
588 #endif
589 	/*
590 	 * Cumulative resource counters for dead threads in the group,
591 	 * and for reaped dead child processes forked by this group.
592 	 * Live threads maintain their own counters and add to these
593 	 * in __exit_signal, except for the group leader.
594 	 */
595 	cputime_t utime, stime, cutime, cstime;
596 	cputime_t gtime;
597 	cputime_t cgtime;
598 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
599 	cputime_t prev_utime, prev_stime;
600 #endif
601 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
602 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
603 	unsigned long inblock, oublock, cinblock, coublock;
604 	unsigned long maxrss, cmaxrss;
605 	struct task_io_accounting ioac;
606 
607 	/*
608 	 * Cumulative ns of schedule CPU time fo dead threads in the
609 	 * group, not including a zombie group leader, (This only differs
610 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
611 	 * other than jiffies.)
612 	 */
613 	unsigned long long sum_sched_runtime;
614 
615 	/*
616 	 * We don't bother to synchronize most readers of this at all,
617 	 * because there is no reader checking a limit that actually needs
618 	 * to get both rlim_cur and rlim_max atomically, and either one
619 	 * alone is a single word that can safely be read normally.
620 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
621 	 * protect this instead of the siglock, because they really
622 	 * have no need to disable irqs.
623 	 */
624 	struct rlimit rlim[RLIM_NLIMITS];
625 
626 #ifdef CONFIG_BSD_PROCESS_ACCT
627 	struct pacct_struct pacct;	/* per-process accounting information */
628 #endif
629 #ifdef CONFIG_TASKSTATS
630 	struct taskstats *stats;
631 #endif
632 #ifdef CONFIG_AUDIT
633 	unsigned audit_tty;
634 	struct tty_audit_buf *tty_audit_buf;
635 #endif
636 
637 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
638 	int oom_score_adj;	/* OOM kill score adjustment */
639 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
640 				 * Only settable by CAP_SYS_RESOURCE. */
641 
642 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
643 					 * credential calculations
644 					 * (notably. ptrace) */
645 };
646 
647 /* Context switch must be unlocked if interrupts are to be enabled */
648 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
649 # define __ARCH_WANT_UNLOCKED_CTXSW
650 #endif
651 
652 /*
653  * Bits in flags field of signal_struct.
654  */
655 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
656 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
657 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
658 /*
659  * Pending notifications to parent.
660  */
661 #define SIGNAL_CLD_STOPPED	0x00000010
662 #define SIGNAL_CLD_CONTINUED	0x00000020
663 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
664 
665 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
666 
667 /* If true, all threads except ->group_exit_task have pending SIGKILL */
668 static inline int signal_group_exit(const struct signal_struct *sig)
669 {
670 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
671 		(sig->group_exit_task != NULL);
672 }
673 
674 /*
675  * Some day this will be a full-fledged user tracking system..
676  */
677 struct user_struct {
678 	atomic_t __count;	/* reference count */
679 	atomic_t processes;	/* How many processes does this user have? */
680 	atomic_t files;		/* How many open files does this user have? */
681 	atomic_t sigpending;	/* How many pending signals does this user have? */
682 #ifdef CONFIG_INOTIFY_USER
683 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
684 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
685 #endif
686 #ifdef CONFIG_FANOTIFY
687 	atomic_t fanotify_listeners;
688 #endif
689 #ifdef CONFIG_EPOLL
690 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
691 #endif
692 #ifdef CONFIG_POSIX_MQUEUE
693 	/* protected by mq_lock	*/
694 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
695 #endif
696 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
697 
698 #ifdef CONFIG_KEYS
699 	struct key *uid_keyring;	/* UID specific keyring */
700 	struct key *session_keyring;	/* UID's default session keyring */
701 #endif
702 
703 	/* Hash table maintenance information */
704 	struct hlist_node uidhash_node;
705 	uid_t uid;
706 	struct user_namespace *user_ns;
707 
708 #ifdef CONFIG_PERF_EVENTS
709 	atomic_long_t locked_vm;
710 #endif
711 };
712 
713 extern int uids_sysfs_init(void);
714 
715 extern struct user_struct *find_user(uid_t);
716 
717 extern struct user_struct root_user;
718 #define INIT_USER (&root_user)
719 
720 
721 struct backing_dev_info;
722 struct reclaim_state;
723 
724 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
725 struct sched_info {
726 	/* cumulative counters */
727 	unsigned long pcount;	      /* # of times run on this cpu */
728 	unsigned long long run_delay; /* time spent waiting on a runqueue */
729 
730 	/* timestamps */
731 	unsigned long long last_arrival,/* when we last ran on a cpu */
732 			   last_queued;	/* when we were last queued to run */
733 };
734 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
735 
736 #ifdef CONFIG_TASK_DELAY_ACCT
737 struct task_delay_info {
738 	spinlock_t	lock;
739 	unsigned int	flags;	/* Private per-task flags */
740 
741 	/* For each stat XXX, add following, aligned appropriately
742 	 *
743 	 * struct timespec XXX_start, XXX_end;
744 	 * u64 XXX_delay;
745 	 * u32 XXX_count;
746 	 *
747 	 * Atomicity of updates to XXX_delay, XXX_count protected by
748 	 * single lock above (split into XXX_lock if contention is an issue).
749 	 */
750 
751 	/*
752 	 * XXX_count is incremented on every XXX operation, the delay
753 	 * associated with the operation is added to XXX_delay.
754 	 * XXX_delay contains the accumulated delay time in nanoseconds.
755 	 */
756 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
757 	u64 blkio_delay;	/* wait for sync block io completion */
758 	u64 swapin_delay;	/* wait for swapin block io completion */
759 	u32 blkio_count;	/* total count of the number of sync block */
760 				/* io operations performed */
761 	u32 swapin_count;	/* total count of the number of swapin block */
762 				/* io operations performed */
763 
764 	struct timespec freepages_start, freepages_end;
765 	u64 freepages_delay;	/* wait for memory reclaim */
766 	u32 freepages_count;	/* total count of memory reclaim */
767 };
768 #endif	/* CONFIG_TASK_DELAY_ACCT */
769 
770 static inline int sched_info_on(void)
771 {
772 #ifdef CONFIG_SCHEDSTATS
773 	return 1;
774 #elif defined(CONFIG_TASK_DELAY_ACCT)
775 	extern int delayacct_on;
776 	return delayacct_on;
777 #else
778 	return 0;
779 #endif
780 }
781 
782 enum cpu_idle_type {
783 	CPU_IDLE,
784 	CPU_NOT_IDLE,
785 	CPU_NEWLY_IDLE,
786 	CPU_MAX_IDLE_TYPES
787 };
788 
789 /*
790  * sched-domains (multiprocessor balancing) declarations:
791  */
792 
793 /*
794  * Increase resolution of nice-level calculations:
795  */
796 #define SCHED_LOAD_SHIFT	10
797 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
798 
799 #define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
800 
801 #ifdef CONFIG_SMP
802 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
803 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
804 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
805 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
806 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
807 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
808 #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
809 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
810 #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
811 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
812 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
813 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
814 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
815 
816 enum powersavings_balance_level {
817 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
818 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
819 					 * first for long running threads
820 					 */
821 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
822 					 * cpu package for power savings
823 					 */
824 	MAX_POWERSAVINGS_BALANCE_LEVELS
825 };
826 
827 extern int sched_mc_power_savings, sched_smt_power_savings;
828 
829 static inline int sd_balance_for_mc_power(void)
830 {
831 	if (sched_smt_power_savings)
832 		return SD_POWERSAVINGS_BALANCE;
833 
834 	if (!sched_mc_power_savings)
835 		return SD_PREFER_SIBLING;
836 
837 	return 0;
838 }
839 
840 static inline int sd_balance_for_package_power(void)
841 {
842 	if (sched_mc_power_savings | sched_smt_power_savings)
843 		return SD_POWERSAVINGS_BALANCE;
844 
845 	return SD_PREFER_SIBLING;
846 }
847 
848 extern int __weak arch_sd_sibiling_asym_packing(void);
849 
850 /*
851  * Optimise SD flags for power savings:
852  * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
853  * Keep default SD flags if sched_{smt,mc}_power_saving=0
854  */
855 
856 static inline int sd_power_saving_flags(void)
857 {
858 	if (sched_mc_power_savings | sched_smt_power_savings)
859 		return SD_BALANCE_NEWIDLE;
860 
861 	return 0;
862 }
863 
864 struct sched_group {
865 	struct sched_group *next;	/* Must be a circular list */
866 	atomic_t ref;
867 
868 	/*
869 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
870 	 * single CPU.
871 	 */
872 	unsigned int cpu_power, cpu_power_orig;
873 	unsigned int group_weight;
874 
875 	/*
876 	 * The CPUs this group covers.
877 	 *
878 	 * NOTE: this field is variable length. (Allocated dynamically
879 	 * by attaching extra space to the end of the structure,
880 	 * depending on how many CPUs the kernel has booted up with)
881 	 */
882 	unsigned long cpumask[0];
883 };
884 
885 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
886 {
887 	return to_cpumask(sg->cpumask);
888 }
889 
890 struct sched_domain_attr {
891 	int relax_domain_level;
892 };
893 
894 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
895 	.relax_domain_level = -1,			\
896 }
897 
898 extern int sched_domain_level_max;
899 
900 struct sched_domain {
901 	/* These fields must be setup */
902 	struct sched_domain *parent;	/* top domain must be null terminated */
903 	struct sched_domain *child;	/* bottom domain must be null terminated */
904 	struct sched_group *groups;	/* the balancing groups of the domain */
905 	unsigned long min_interval;	/* Minimum balance interval ms */
906 	unsigned long max_interval;	/* Maximum balance interval ms */
907 	unsigned int busy_factor;	/* less balancing by factor if busy */
908 	unsigned int imbalance_pct;	/* No balance until over watermark */
909 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
910 	unsigned int busy_idx;
911 	unsigned int idle_idx;
912 	unsigned int newidle_idx;
913 	unsigned int wake_idx;
914 	unsigned int forkexec_idx;
915 	unsigned int smt_gain;
916 	int flags;			/* See SD_* */
917 	int level;
918 
919 	/* Runtime fields. */
920 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
921 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
922 	unsigned int nr_balance_failed; /* initialise to 0 */
923 
924 	u64 last_update;
925 
926 #ifdef CONFIG_SCHEDSTATS
927 	/* load_balance() stats */
928 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
929 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
930 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
931 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
932 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
933 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
934 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
935 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
936 
937 	/* Active load balancing */
938 	unsigned int alb_count;
939 	unsigned int alb_failed;
940 	unsigned int alb_pushed;
941 
942 	/* SD_BALANCE_EXEC stats */
943 	unsigned int sbe_count;
944 	unsigned int sbe_balanced;
945 	unsigned int sbe_pushed;
946 
947 	/* SD_BALANCE_FORK stats */
948 	unsigned int sbf_count;
949 	unsigned int sbf_balanced;
950 	unsigned int sbf_pushed;
951 
952 	/* try_to_wake_up() stats */
953 	unsigned int ttwu_wake_remote;
954 	unsigned int ttwu_move_affine;
955 	unsigned int ttwu_move_balance;
956 #endif
957 #ifdef CONFIG_SCHED_DEBUG
958 	char *name;
959 #endif
960 	union {
961 		void *private;		/* used during construction */
962 		struct rcu_head rcu;	/* used during destruction */
963 	};
964 
965 	unsigned int span_weight;
966 	/*
967 	 * Span of all CPUs in this domain.
968 	 *
969 	 * NOTE: this field is variable length. (Allocated dynamically
970 	 * by attaching extra space to the end of the structure,
971 	 * depending on how many CPUs the kernel has booted up with)
972 	 */
973 	unsigned long span[0];
974 };
975 
976 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
977 {
978 	return to_cpumask(sd->span);
979 }
980 
981 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
982 				    struct sched_domain_attr *dattr_new);
983 
984 /* Allocate an array of sched domains, for partition_sched_domains(). */
985 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
986 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
987 
988 /* Test a flag in parent sched domain */
989 static inline int test_sd_parent(struct sched_domain *sd, int flag)
990 {
991 	if (sd->parent && (sd->parent->flags & flag))
992 		return 1;
993 
994 	return 0;
995 }
996 
997 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
998 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
999 
1000 #else /* CONFIG_SMP */
1001 
1002 struct sched_domain_attr;
1003 
1004 static inline void
1005 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1006 			struct sched_domain_attr *dattr_new)
1007 {
1008 }
1009 #endif	/* !CONFIG_SMP */
1010 
1011 
1012 struct io_context;			/* See blkdev.h */
1013 
1014 
1015 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1016 extern void prefetch_stack(struct task_struct *t);
1017 #else
1018 static inline void prefetch_stack(struct task_struct *t) { }
1019 #endif
1020 
1021 struct audit_context;		/* See audit.c */
1022 struct mempolicy;
1023 struct pipe_inode_info;
1024 struct uts_namespace;
1025 
1026 struct rq;
1027 struct sched_domain;
1028 
1029 /*
1030  * wake flags
1031  */
1032 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1033 #define WF_FORK		0x02		/* child wakeup after fork */
1034 
1035 #define ENQUEUE_WAKEUP		1
1036 #define ENQUEUE_HEAD		2
1037 #ifdef CONFIG_SMP
1038 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1039 #else
1040 #define ENQUEUE_WAKING		0
1041 #endif
1042 
1043 #define DEQUEUE_SLEEP		1
1044 
1045 struct sched_class {
1046 	const struct sched_class *next;
1047 
1048 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1049 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1050 	void (*yield_task) (struct rq *rq);
1051 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1052 
1053 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1054 
1055 	struct task_struct * (*pick_next_task) (struct rq *rq);
1056 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1057 
1058 #ifdef CONFIG_SMP
1059 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1060 
1061 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1062 	void (*post_schedule) (struct rq *this_rq);
1063 	void (*task_waking) (struct task_struct *task);
1064 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1065 
1066 	void (*set_cpus_allowed)(struct task_struct *p,
1067 				 const struct cpumask *newmask);
1068 
1069 	void (*rq_online)(struct rq *rq);
1070 	void (*rq_offline)(struct rq *rq);
1071 #endif
1072 
1073 	void (*set_curr_task) (struct rq *rq);
1074 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1075 	void (*task_fork) (struct task_struct *p);
1076 
1077 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1078 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1079 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1080 			     int oldprio);
1081 
1082 	unsigned int (*get_rr_interval) (struct rq *rq,
1083 					 struct task_struct *task);
1084 
1085 #ifdef CONFIG_FAIR_GROUP_SCHED
1086 	void (*task_move_group) (struct task_struct *p, int on_rq);
1087 #endif
1088 };
1089 
1090 struct load_weight {
1091 	unsigned long weight, inv_weight;
1092 };
1093 
1094 #ifdef CONFIG_SCHEDSTATS
1095 struct sched_statistics {
1096 	u64			wait_start;
1097 	u64			wait_max;
1098 	u64			wait_count;
1099 	u64			wait_sum;
1100 	u64			iowait_count;
1101 	u64			iowait_sum;
1102 
1103 	u64			sleep_start;
1104 	u64			sleep_max;
1105 	s64			sum_sleep_runtime;
1106 
1107 	u64			block_start;
1108 	u64			block_max;
1109 	u64			exec_max;
1110 	u64			slice_max;
1111 
1112 	u64			nr_migrations_cold;
1113 	u64			nr_failed_migrations_affine;
1114 	u64			nr_failed_migrations_running;
1115 	u64			nr_failed_migrations_hot;
1116 	u64			nr_forced_migrations;
1117 
1118 	u64			nr_wakeups;
1119 	u64			nr_wakeups_sync;
1120 	u64			nr_wakeups_migrate;
1121 	u64			nr_wakeups_local;
1122 	u64			nr_wakeups_remote;
1123 	u64			nr_wakeups_affine;
1124 	u64			nr_wakeups_affine_attempts;
1125 	u64			nr_wakeups_passive;
1126 	u64			nr_wakeups_idle;
1127 };
1128 #endif
1129 
1130 struct sched_entity {
1131 	struct load_weight	load;		/* for load-balancing */
1132 	struct rb_node		run_node;
1133 	struct list_head	group_node;
1134 	unsigned int		on_rq;
1135 
1136 	u64			exec_start;
1137 	u64			sum_exec_runtime;
1138 	u64			vruntime;
1139 	u64			prev_sum_exec_runtime;
1140 
1141 	u64			nr_migrations;
1142 
1143 #ifdef CONFIG_SCHEDSTATS
1144 	struct sched_statistics statistics;
1145 #endif
1146 
1147 #ifdef CONFIG_FAIR_GROUP_SCHED
1148 	struct sched_entity	*parent;
1149 	/* rq on which this entity is (to be) queued: */
1150 	struct cfs_rq		*cfs_rq;
1151 	/* rq "owned" by this entity/group: */
1152 	struct cfs_rq		*my_q;
1153 #endif
1154 };
1155 
1156 struct sched_rt_entity {
1157 	struct list_head run_list;
1158 	unsigned long timeout;
1159 	unsigned int time_slice;
1160 	int nr_cpus_allowed;
1161 
1162 	struct sched_rt_entity *back;
1163 #ifdef CONFIG_RT_GROUP_SCHED
1164 	struct sched_rt_entity	*parent;
1165 	/* rq on which this entity is (to be) queued: */
1166 	struct rt_rq		*rt_rq;
1167 	/* rq "owned" by this entity/group: */
1168 	struct rt_rq		*my_q;
1169 #endif
1170 };
1171 
1172 struct rcu_node;
1173 
1174 enum perf_event_task_context {
1175 	perf_invalid_context = -1,
1176 	perf_hw_context = 0,
1177 	perf_sw_context,
1178 	perf_nr_task_contexts,
1179 };
1180 
1181 struct task_struct {
1182 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1183 	void *stack;
1184 	atomic_t usage;
1185 	unsigned int flags;	/* per process flags, defined below */
1186 	unsigned int ptrace;
1187 
1188 #ifdef CONFIG_SMP
1189 	struct task_struct *wake_entry;
1190 	int on_cpu;
1191 #endif
1192 	int on_rq;
1193 
1194 	int prio, static_prio, normal_prio;
1195 	unsigned int rt_priority;
1196 	const struct sched_class *sched_class;
1197 	struct sched_entity se;
1198 	struct sched_rt_entity rt;
1199 
1200 #ifdef CONFIG_PREEMPT_NOTIFIERS
1201 	/* list of struct preempt_notifier: */
1202 	struct hlist_head preempt_notifiers;
1203 #endif
1204 
1205 	/*
1206 	 * fpu_counter contains the number of consecutive context switches
1207 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1208 	 * saving becomes unlazy to save the trap. This is an unsigned char
1209 	 * so that after 256 times the counter wraps and the behavior turns
1210 	 * lazy again; this to deal with bursty apps that only use FPU for
1211 	 * a short time
1212 	 */
1213 	unsigned char fpu_counter;
1214 #ifdef CONFIG_BLK_DEV_IO_TRACE
1215 	unsigned int btrace_seq;
1216 #endif
1217 
1218 	unsigned int policy;
1219 	cpumask_t cpus_allowed;
1220 
1221 #ifdef CONFIG_PREEMPT_RCU
1222 	int rcu_read_lock_nesting;
1223 	char rcu_read_unlock_special;
1224 	struct list_head rcu_node_entry;
1225 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1226 #ifdef CONFIG_TREE_PREEMPT_RCU
1227 	struct rcu_node *rcu_blocked_node;
1228 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1229 #ifdef CONFIG_RCU_BOOST
1230 	struct rt_mutex *rcu_boost_mutex;
1231 #endif /* #ifdef CONFIG_RCU_BOOST */
1232 
1233 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1234 	struct sched_info sched_info;
1235 #endif
1236 
1237 	struct list_head tasks;
1238 #ifdef CONFIG_SMP
1239 	struct plist_node pushable_tasks;
1240 #endif
1241 
1242 	struct mm_struct *mm, *active_mm;
1243 #ifdef CONFIG_COMPAT_BRK
1244 	unsigned brk_randomized:1;
1245 #endif
1246 #if defined(SPLIT_RSS_COUNTING)
1247 	struct task_rss_stat	rss_stat;
1248 #endif
1249 /* task state */
1250 	int exit_state;
1251 	int exit_code, exit_signal;
1252 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1253 	unsigned int group_stop;	/* GROUP_STOP_*, siglock protected */
1254 	/* ??? */
1255 	unsigned int personality;
1256 	unsigned did_exec:1;
1257 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1258 				 * execve */
1259 	unsigned in_iowait:1;
1260 
1261 
1262 	/* Revert to default priority/policy when forking */
1263 	unsigned sched_reset_on_fork:1;
1264 	unsigned sched_contributes_to_load:1;
1265 
1266 	pid_t pid;
1267 	pid_t tgid;
1268 
1269 #ifdef CONFIG_CC_STACKPROTECTOR
1270 	/* Canary value for the -fstack-protector gcc feature */
1271 	unsigned long stack_canary;
1272 #endif
1273 
1274 	/*
1275 	 * pointers to (original) parent process, youngest child, younger sibling,
1276 	 * older sibling, respectively.  (p->father can be replaced with
1277 	 * p->real_parent->pid)
1278 	 */
1279 	struct task_struct *real_parent; /* real parent process */
1280 	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1281 	/*
1282 	 * children/sibling forms the list of my natural children
1283 	 */
1284 	struct list_head children;	/* list of my children */
1285 	struct list_head sibling;	/* linkage in my parent's children list */
1286 	struct task_struct *group_leader;	/* threadgroup leader */
1287 
1288 	/*
1289 	 * ptraced is the list of tasks this task is using ptrace on.
1290 	 * This includes both natural children and PTRACE_ATTACH targets.
1291 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1292 	 */
1293 	struct list_head ptraced;
1294 	struct list_head ptrace_entry;
1295 
1296 	/* PID/PID hash table linkage. */
1297 	struct pid_link pids[PIDTYPE_MAX];
1298 	struct list_head thread_group;
1299 
1300 	struct completion *vfork_done;		/* for vfork() */
1301 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1302 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1303 
1304 	cputime_t utime, stime, utimescaled, stimescaled;
1305 	cputime_t gtime;
1306 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1307 	cputime_t prev_utime, prev_stime;
1308 #endif
1309 	unsigned long nvcsw, nivcsw; /* context switch counts */
1310 	struct timespec start_time; 		/* monotonic time */
1311 	struct timespec real_start_time;	/* boot based time */
1312 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1313 	unsigned long min_flt, maj_flt;
1314 
1315 	struct task_cputime cputime_expires;
1316 	struct list_head cpu_timers[3];
1317 
1318 /* process credentials */
1319 	const struct cred __rcu *real_cred; /* objective and real subjective task
1320 					 * credentials (COW) */
1321 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1322 					 * credentials (COW) */
1323 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1324 
1325 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1326 				     - access with [gs]et_task_comm (which lock
1327 				       it with task_lock())
1328 				     - initialized normally by setup_new_exec */
1329 /* file system info */
1330 	int link_count, total_link_count;
1331 #ifdef CONFIG_SYSVIPC
1332 /* ipc stuff */
1333 	struct sysv_sem sysvsem;
1334 #endif
1335 #ifdef CONFIG_DETECT_HUNG_TASK
1336 /* hung task detection */
1337 	unsigned long last_switch_count;
1338 #endif
1339 /* CPU-specific state of this task */
1340 	struct thread_struct thread;
1341 /* filesystem information */
1342 	struct fs_struct *fs;
1343 /* open file information */
1344 	struct files_struct *files;
1345 /* namespaces */
1346 	struct nsproxy *nsproxy;
1347 /* signal handlers */
1348 	struct signal_struct *signal;
1349 	struct sighand_struct *sighand;
1350 
1351 	sigset_t blocked, real_blocked;
1352 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1353 	struct sigpending pending;
1354 
1355 	unsigned long sas_ss_sp;
1356 	size_t sas_ss_size;
1357 	int (*notifier)(void *priv);
1358 	void *notifier_data;
1359 	sigset_t *notifier_mask;
1360 	struct audit_context *audit_context;
1361 #ifdef CONFIG_AUDITSYSCALL
1362 	uid_t loginuid;
1363 	unsigned int sessionid;
1364 #endif
1365 	seccomp_t seccomp;
1366 
1367 /* Thread group tracking */
1368    	u32 parent_exec_id;
1369    	u32 self_exec_id;
1370 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1371  * mempolicy */
1372 	spinlock_t alloc_lock;
1373 
1374 #ifdef CONFIG_GENERIC_HARDIRQS
1375 	/* IRQ handler threads */
1376 	struct irqaction *irqaction;
1377 #endif
1378 
1379 	/* Protection of the PI data structures: */
1380 	raw_spinlock_t pi_lock;
1381 
1382 #ifdef CONFIG_RT_MUTEXES
1383 	/* PI waiters blocked on a rt_mutex held by this task */
1384 	struct plist_head pi_waiters;
1385 	/* Deadlock detection and priority inheritance handling */
1386 	struct rt_mutex_waiter *pi_blocked_on;
1387 #endif
1388 
1389 #ifdef CONFIG_DEBUG_MUTEXES
1390 	/* mutex deadlock detection */
1391 	struct mutex_waiter *blocked_on;
1392 #endif
1393 #ifdef CONFIG_TRACE_IRQFLAGS
1394 	unsigned int irq_events;
1395 	unsigned long hardirq_enable_ip;
1396 	unsigned long hardirq_disable_ip;
1397 	unsigned int hardirq_enable_event;
1398 	unsigned int hardirq_disable_event;
1399 	int hardirqs_enabled;
1400 	int hardirq_context;
1401 	unsigned long softirq_disable_ip;
1402 	unsigned long softirq_enable_ip;
1403 	unsigned int softirq_disable_event;
1404 	unsigned int softirq_enable_event;
1405 	int softirqs_enabled;
1406 	int softirq_context;
1407 #endif
1408 #ifdef CONFIG_LOCKDEP
1409 # define MAX_LOCK_DEPTH 48UL
1410 	u64 curr_chain_key;
1411 	int lockdep_depth;
1412 	unsigned int lockdep_recursion;
1413 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1414 	gfp_t lockdep_reclaim_gfp;
1415 #endif
1416 
1417 /* journalling filesystem info */
1418 	void *journal_info;
1419 
1420 /* stacked block device info */
1421 	struct bio_list *bio_list;
1422 
1423 #ifdef CONFIG_BLOCK
1424 /* stack plugging */
1425 	struct blk_plug *plug;
1426 #endif
1427 
1428 /* VM state */
1429 	struct reclaim_state *reclaim_state;
1430 
1431 	struct backing_dev_info *backing_dev_info;
1432 
1433 	struct io_context *io_context;
1434 
1435 	unsigned long ptrace_message;
1436 	siginfo_t *last_siginfo; /* For ptrace use.  */
1437 	struct task_io_accounting ioac;
1438 #if defined(CONFIG_TASK_XACCT)
1439 	u64 acct_rss_mem1;	/* accumulated rss usage */
1440 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1441 	cputime_t acct_timexpd;	/* stime + utime since last update */
1442 #endif
1443 #ifdef CONFIG_CPUSETS
1444 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1445 	int mems_allowed_change_disable;
1446 	int cpuset_mem_spread_rotor;
1447 	int cpuset_slab_spread_rotor;
1448 #endif
1449 #ifdef CONFIG_CGROUPS
1450 	/* Control Group info protected by css_set_lock */
1451 	struct css_set __rcu *cgroups;
1452 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1453 	struct list_head cg_list;
1454 #endif
1455 #ifdef CONFIG_FUTEX
1456 	struct robust_list_head __user *robust_list;
1457 #ifdef CONFIG_COMPAT
1458 	struct compat_robust_list_head __user *compat_robust_list;
1459 #endif
1460 	struct list_head pi_state_list;
1461 	struct futex_pi_state *pi_state_cache;
1462 #endif
1463 #ifdef CONFIG_PERF_EVENTS
1464 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1465 	struct mutex perf_event_mutex;
1466 	struct list_head perf_event_list;
1467 #endif
1468 #ifdef CONFIG_NUMA
1469 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1470 	short il_next;
1471 	short pref_node_fork;
1472 #endif
1473 	atomic_t fs_excl;	/* holding fs exclusive resources */
1474 	struct rcu_head rcu;
1475 
1476 	/*
1477 	 * cache last used pipe for splice
1478 	 */
1479 	struct pipe_inode_info *splice_pipe;
1480 #ifdef	CONFIG_TASK_DELAY_ACCT
1481 	struct task_delay_info *delays;
1482 #endif
1483 #ifdef CONFIG_FAULT_INJECTION
1484 	int make_it_fail;
1485 #endif
1486 	struct prop_local_single dirties;
1487 #ifdef CONFIG_LATENCYTOP
1488 	int latency_record_count;
1489 	struct latency_record latency_record[LT_SAVECOUNT];
1490 #endif
1491 	/*
1492 	 * time slack values; these are used to round up poll() and
1493 	 * select() etc timeout values. These are in nanoseconds.
1494 	 */
1495 	unsigned long timer_slack_ns;
1496 	unsigned long default_timer_slack_ns;
1497 
1498 	struct list_head	*scm_work_list;
1499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1500 	/* Index of current stored address in ret_stack */
1501 	int curr_ret_stack;
1502 	/* Stack of return addresses for return function tracing */
1503 	struct ftrace_ret_stack	*ret_stack;
1504 	/* time stamp for last schedule */
1505 	unsigned long long ftrace_timestamp;
1506 	/*
1507 	 * Number of functions that haven't been traced
1508 	 * because of depth overrun.
1509 	 */
1510 	atomic_t trace_overrun;
1511 	/* Pause for the tracing */
1512 	atomic_t tracing_graph_pause;
1513 #endif
1514 #ifdef CONFIG_TRACING
1515 	/* state flags for use by tracers */
1516 	unsigned long trace;
1517 	/* bitmask of trace recursion */
1518 	unsigned long trace_recursion;
1519 #endif /* CONFIG_TRACING */
1520 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1521 	struct memcg_batch_info {
1522 		int do_batch;	/* incremented when batch uncharge started */
1523 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1524 		unsigned long nr_pages;	/* uncharged usage */
1525 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1526 	} memcg_batch;
1527 #endif
1528 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1529 	atomic_t ptrace_bp_refcnt;
1530 #endif
1531 };
1532 
1533 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1534 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1535 
1536 /*
1537  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1538  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1539  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1540  * values are inverted: lower p->prio value means higher priority.
1541  *
1542  * The MAX_USER_RT_PRIO value allows the actual maximum
1543  * RT priority to be separate from the value exported to
1544  * user-space.  This allows kernel threads to set their
1545  * priority to a value higher than any user task. Note:
1546  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1547  */
1548 
1549 #define MAX_USER_RT_PRIO	100
1550 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1551 
1552 #define MAX_PRIO		(MAX_RT_PRIO + 40)
1553 #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1554 
1555 static inline int rt_prio(int prio)
1556 {
1557 	if (unlikely(prio < MAX_RT_PRIO))
1558 		return 1;
1559 	return 0;
1560 }
1561 
1562 static inline int rt_task(struct task_struct *p)
1563 {
1564 	return rt_prio(p->prio);
1565 }
1566 
1567 static inline struct pid *task_pid(struct task_struct *task)
1568 {
1569 	return task->pids[PIDTYPE_PID].pid;
1570 }
1571 
1572 static inline struct pid *task_tgid(struct task_struct *task)
1573 {
1574 	return task->group_leader->pids[PIDTYPE_PID].pid;
1575 }
1576 
1577 /*
1578  * Without tasklist or rcu lock it is not safe to dereference
1579  * the result of task_pgrp/task_session even if task == current,
1580  * we can race with another thread doing sys_setsid/sys_setpgid.
1581  */
1582 static inline struct pid *task_pgrp(struct task_struct *task)
1583 {
1584 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1585 }
1586 
1587 static inline struct pid *task_session(struct task_struct *task)
1588 {
1589 	return task->group_leader->pids[PIDTYPE_SID].pid;
1590 }
1591 
1592 struct pid_namespace;
1593 
1594 /*
1595  * the helpers to get the task's different pids as they are seen
1596  * from various namespaces
1597  *
1598  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1599  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1600  *                     current.
1601  * task_xid_nr_ns()  : id seen from the ns specified;
1602  *
1603  * set_task_vxid()   : assigns a virtual id to a task;
1604  *
1605  * see also pid_nr() etc in include/linux/pid.h
1606  */
1607 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1608 			struct pid_namespace *ns);
1609 
1610 static inline pid_t task_pid_nr(struct task_struct *tsk)
1611 {
1612 	return tsk->pid;
1613 }
1614 
1615 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1616 					struct pid_namespace *ns)
1617 {
1618 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1619 }
1620 
1621 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1622 {
1623 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1624 }
1625 
1626 
1627 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1628 {
1629 	return tsk->tgid;
1630 }
1631 
1632 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1633 
1634 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1635 {
1636 	return pid_vnr(task_tgid(tsk));
1637 }
1638 
1639 
1640 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1641 					struct pid_namespace *ns)
1642 {
1643 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1644 }
1645 
1646 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1647 {
1648 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1649 }
1650 
1651 
1652 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1653 					struct pid_namespace *ns)
1654 {
1655 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1656 }
1657 
1658 static inline pid_t task_session_vnr(struct task_struct *tsk)
1659 {
1660 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1661 }
1662 
1663 /* obsolete, do not use */
1664 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1665 {
1666 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1667 }
1668 
1669 /**
1670  * pid_alive - check that a task structure is not stale
1671  * @p: Task structure to be checked.
1672  *
1673  * Test if a process is not yet dead (at most zombie state)
1674  * If pid_alive fails, then pointers within the task structure
1675  * can be stale and must not be dereferenced.
1676  */
1677 static inline int pid_alive(struct task_struct *p)
1678 {
1679 	return p->pids[PIDTYPE_PID].pid != NULL;
1680 }
1681 
1682 /**
1683  * is_global_init - check if a task structure is init
1684  * @tsk: Task structure to be checked.
1685  *
1686  * Check if a task structure is the first user space task the kernel created.
1687  */
1688 static inline int is_global_init(struct task_struct *tsk)
1689 {
1690 	return tsk->pid == 1;
1691 }
1692 
1693 /*
1694  * is_container_init:
1695  * check whether in the task is init in its own pid namespace.
1696  */
1697 extern int is_container_init(struct task_struct *tsk);
1698 
1699 extern struct pid *cad_pid;
1700 
1701 extern void free_task(struct task_struct *tsk);
1702 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1703 
1704 extern void __put_task_struct(struct task_struct *t);
1705 
1706 static inline void put_task_struct(struct task_struct *t)
1707 {
1708 	if (atomic_dec_and_test(&t->usage))
1709 		__put_task_struct(t);
1710 }
1711 
1712 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1713 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1714 
1715 /*
1716  * Per process flags
1717  */
1718 #define PF_STARTING	0x00000002	/* being created */
1719 #define PF_EXITING	0x00000004	/* getting shut down */
1720 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1721 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1722 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1723 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1724 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1725 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1726 #define PF_DUMPCORE	0x00000200	/* dumped core */
1727 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1728 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1729 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1730 #define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
1731 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1732 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1733 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1734 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1735 #define PF_OOM_ORIGIN	0x00080000	/* Allocating much memory to others */
1736 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1737 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1738 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1739 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1740 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1741 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1742 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1743 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1744 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1745 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1746 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1747 #define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
1748 
1749 /*
1750  * Only the _current_ task can read/write to tsk->flags, but other
1751  * tasks can access tsk->flags in readonly mode for example
1752  * with tsk_used_math (like during threaded core dumping).
1753  * There is however an exception to this rule during ptrace
1754  * or during fork: the ptracer task is allowed to write to the
1755  * child->flags of its traced child (same goes for fork, the parent
1756  * can write to the child->flags), because we're guaranteed the
1757  * child is not running and in turn not changing child->flags
1758  * at the same time the parent does it.
1759  */
1760 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1761 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1762 #define clear_used_math() clear_stopped_child_used_math(current)
1763 #define set_used_math() set_stopped_child_used_math(current)
1764 #define conditional_stopped_child_used_math(condition, child) \
1765 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1766 #define conditional_used_math(condition) \
1767 	conditional_stopped_child_used_math(condition, current)
1768 #define copy_to_stopped_child_used_math(child) \
1769 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1770 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1771 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1772 #define used_math() tsk_used_math(current)
1773 
1774 /*
1775  * task->group_stop flags
1776  */
1777 #define GROUP_STOP_SIGMASK	0xffff    /* signr of the last group stop */
1778 #define GROUP_STOP_PENDING	(1 << 16) /* task should stop for group stop */
1779 #define GROUP_STOP_CONSUME	(1 << 17) /* consume group stop count */
1780 #define GROUP_STOP_TRAPPING	(1 << 18) /* switching from STOPPED to TRACED */
1781 #define GROUP_STOP_DEQUEUED	(1 << 19) /* stop signal dequeued */
1782 
1783 extern void task_clear_group_stop_pending(struct task_struct *task);
1784 
1785 #ifdef CONFIG_PREEMPT_RCU
1786 
1787 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1788 #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1789 #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1790 
1791 static inline void rcu_copy_process(struct task_struct *p)
1792 {
1793 	p->rcu_read_lock_nesting = 0;
1794 	p->rcu_read_unlock_special = 0;
1795 #ifdef CONFIG_TREE_PREEMPT_RCU
1796 	p->rcu_blocked_node = NULL;
1797 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1798 #ifdef CONFIG_RCU_BOOST
1799 	p->rcu_boost_mutex = NULL;
1800 #endif /* #ifdef CONFIG_RCU_BOOST */
1801 	INIT_LIST_HEAD(&p->rcu_node_entry);
1802 }
1803 
1804 #else
1805 
1806 static inline void rcu_copy_process(struct task_struct *p)
1807 {
1808 }
1809 
1810 #endif
1811 
1812 #ifdef CONFIG_SMP
1813 extern int set_cpus_allowed_ptr(struct task_struct *p,
1814 				const struct cpumask *new_mask);
1815 #else
1816 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1817 				       const struct cpumask *new_mask)
1818 {
1819 	if (!cpumask_test_cpu(0, new_mask))
1820 		return -EINVAL;
1821 	return 0;
1822 }
1823 #endif
1824 
1825 #ifndef CONFIG_CPUMASK_OFFSTACK
1826 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1827 {
1828 	return set_cpus_allowed_ptr(p, &new_mask);
1829 }
1830 #endif
1831 
1832 /*
1833  * Do not use outside of architecture code which knows its limitations.
1834  *
1835  * sched_clock() has no promise of monotonicity or bounded drift between
1836  * CPUs, use (which you should not) requires disabling IRQs.
1837  *
1838  * Please use one of the three interfaces below.
1839  */
1840 extern unsigned long long notrace sched_clock(void);
1841 /*
1842  * See the comment in kernel/sched_clock.c
1843  */
1844 extern u64 cpu_clock(int cpu);
1845 extern u64 local_clock(void);
1846 extern u64 sched_clock_cpu(int cpu);
1847 
1848 
1849 extern void sched_clock_init(void);
1850 
1851 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1852 static inline void sched_clock_tick(void)
1853 {
1854 }
1855 
1856 static inline void sched_clock_idle_sleep_event(void)
1857 {
1858 }
1859 
1860 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1861 {
1862 }
1863 #else
1864 /*
1865  * Architectures can set this to 1 if they have specified
1866  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1867  * but then during bootup it turns out that sched_clock()
1868  * is reliable after all:
1869  */
1870 extern int sched_clock_stable;
1871 
1872 extern void sched_clock_tick(void);
1873 extern void sched_clock_idle_sleep_event(void);
1874 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1875 #endif
1876 
1877 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1878 /*
1879  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1880  * The reason for this explicit opt-in is not to have perf penalty with
1881  * slow sched_clocks.
1882  */
1883 extern void enable_sched_clock_irqtime(void);
1884 extern void disable_sched_clock_irqtime(void);
1885 #else
1886 static inline void enable_sched_clock_irqtime(void) {}
1887 static inline void disable_sched_clock_irqtime(void) {}
1888 #endif
1889 
1890 extern unsigned long long
1891 task_sched_runtime(struct task_struct *task);
1892 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1893 
1894 /* sched_exec is called by processes performing an exec */
1895 #ifdef CONFIG_SMP
1896 extern void sched_exec(void);
1897 #else
1898 #define sched_exec()   {}
1899 #endif
1900 
1901 extern void sched_clock_idle_sleep_event(void);
1902 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1903 
1904 #ifdef CONFIG_HOTPLUG_CPU
1905 extern void idle_task_exit(void);
1906 #else
1907 static inline void idle_task_exit(void) {}
1908 #endif
1909 
1910 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1911 extern void wake_up_idle_cpu(int cpu);
1912 #else
1913 static inline void wake_up_idle_cpu(int cpu) { }
1914 #endif
1915 
1916 extern unsigned int sysctl_sched_latency;
1917 extern unsigned int sysctl_sched_min_granularity;
1918 extern unsigned int sysctl_sched_wakeup_granularity;
1919 extern unsigned int sysctl_sched_child_runs_first;
1920 
1921 enum sched_tunable_scaling {
1922 	SCHED_TUNABLESCALING_NONE,
1923 	SCHED_TUNABLESCALING_LOG,
1924 	SCHED_TUNABLESCALING_LINEAR,
1925 	SCHED_TUNABLESCALING_END,
1926 };
1927 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1928 
1929 #ifdef CONFIG_SCHED_DEBUG
1930 extern unsigned int sysctl_sched_migration_cost;
1931 extern unsigned int sysctl_sched_nr_migrate;
1932 extern unsigned int sysctl_sched_time_avg;
1933 extern unsigned int sysctl_timer_migration;
1934 extern unsigned int sysctl_sched_shares_window;
1935 
1936 int sched_proc_update_handler(struct ctl_table *table, int write,
1937 		void __user *buffer, size_t *length,
1938 		loff_t *ppos);
1939 #endif
1940 #ifdef CONFIG_SCHED_DEBUG
1941 static inline unsigned int get_sysctl_timer_migration(void)
1942 {
1943 	return sysctl_timer_migration;
1944 }
1945 #else
1946 static inline unsigned int get_sysctl_timer_migration(void)
1947 {
1948 	return 1;
1949 }
1950 #endif
1951 extern unsigned int sysctl_sched_rt_period;
1952 extern int sysctl_sched_rt_runtime;
1953 
1954 int sched_rt_handler(struct ctl_table *table, int write,
1955 		void __user *buffer, size_t *lenp,
1956 		loff_t *ppos);
1957 
1958 #ifdef CONFIG_SCHED_AUTOGROUP
1959 extern unsigned int sysctl_sched_autogroup_enabled;
1960 
1961 extern void sched_autogroup_create_attach(struct task_struct *p);
1962 extern void sched_autogroup_detach(struct task_struct *p);
1963 extern void sched_autogroup_fork(struct signal_struct *sig);
1964 extern void sched_autogroup_exit(struct signal_struct *sig);
1965 #ifdef CONFIG_PROC_FS
1966 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1967 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
1968 #endif
1969 #else
1970 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1971 static inline void sched_autogroup_detach(struct task_struct *p) { }
1972 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1973 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1974 #endif
1975 
1976 #ifdef CONFIG_RT_MUTEXES
1977 extern int rt_mutex_getprio(struct task_struct *p);
1978 extern void rt_mutex_setprio(struct task_struct *p, int prio);
1979 extern void rt_mutex_adjust_pi(struct task_struct *p);
1980 #else
1981 static inline int rt_mutex_getprio(struct task_struct *p)
1982 {
1983 	return p->normal_prio;
1984 }
1985 # define rt_mutex_adjust_pi(p)		do { } while (0)
1986 #endif
1987 
1988 extern bool yield_to(struct task_struct *p, bool preempt);
1989 extern void set_user_nice(struct task_struct *p, long nice);
1990 extern int task_prio(const struct task_struct *p);
1991 extern int task_nice(const struct task_struct *p);
1992 extern int can_nice(const struct task_struct *p, const int nice);
1993 extern int task_curr(const struct task_struct *p);
1994 extern int idle_cpu(int cpu);
1995 extern int sched_setscheduler(struct task_struct *, int,
1996 			      const struct sched_param *);
1997 extern int sched_setscheduler_nocheck(struct task_struct *, int,
1998 				      const struct sched_param *);
1999 extern struct task_struct *idle_task(int cpu);
2000 extern struct task_struct *curr_task(int cpu);
2001 extern void set_curr_task(int cpu, struct task_struct *p);
2002 
2003 void yield(void);
2004 
2005 /*
2006  * The default (Linux) execution domain.
2007  */
2008 extern struct exec_domain	default_exec_domain;
2009 
2010 union thread_union {
2011 	struct thread_info thread_info;
2012 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2013 };
2014 
2015 #ifndef __HAVE_ARCH_KSTACK_END
2016 static inline int kstack_end(void *addr)
2017 {
2018 	/* Reliable end of stack detection:
2019 	 * Some APM bios versions misalign the stack
2020 	 */
2021 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2022 }
2023 #endif
2024 
2025 extern union thread_union init_thread_union;
2026 extern struct task_struct init_task;
2027 
2028 extern struct   mm_struct init_mm;
2029 
2030 extern struct pid_namespace init_pid_ns;
2031 
2032 /*
2033  * find a task by one of its numerical ids
2034  *
2035  * find_task_by_pid_ns():
2036  *      finds a task by its pid in the specified namespace
2037  * find_task_by_vpid():
2038  *      finds a task by its virtual pid
2039  *
2040  * see also find_vpid() etc in include/linux/pid.h
2041  */
2042 
2043 extern struct task_struct *find_task_by_vpid(pid_t nr);
2044 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2045 		struct pid_namespace *ns);
2046 
2047 extern void __set_special_pids(struct pid *pid);
2048 
2049 /* per-UID process charging. */
2050 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2051 static inline struct user_struct *get_uid(struct user_struct *u)
2052 {
2053 	atomic_inc(&u->__count);
2054 	return u;
2055 }
2056 extern void free_uid(struct user_struct *);
2057 extern void release_uids(struct user_namespace *ns);
2058 
2059 #include <asm/current.h>
2060 
2061 extern void xtime_update(unsigned long ticks);
2062 
2063 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2064 extern int wake_up_process(struct task_struct *tsk);
2065 extern void wake_up_new_task(struct task_struct *tsk);
2066 #ifdef CONFIG_SMP
2067  extern void kick_process(struct task_struct *tsk);
2068 #else
2069  static inline void kick_process(struct task_struct *tsk) { }
2070 #endif
2071 extern void sched_fork(struct task_struct *p);
2072 extern void sched_dead(struct task_struct *p);
2073 
2074 extern void proc_caches_init(void);
2075 extern void flush_signals(struct task_struct *);
2076 extern void __flush_signals(struct task_struct *);
2077 extern void ignore_signals(struct task_struct *);
2078 extern void flush_signal_handlers(struct task_struct *, int force_default);
2079 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2080 
2081 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2082 {
2083 	unsigned long flags;
2084 	int ret;
2085 
2086 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2087 	ret = dequeue_signal(tsk, mask, info);
2088 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2089 
2090 	return ret;
2091 }
2092 
2093 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2094 			      sigset_t *mask);
2095 extern void unblock_all_signals(void);
2096 extern void release_task(struct task_struct * p);
2097 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2098 extern int force_sigsegv(int, struct task_struct *);
2099 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2100 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2101 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2102 extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2103 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2104 extern int kill_pid(struct pid *pid, int sig, int priv);
2105 extern int kill_proc_info(int, struct siginfo *, pid_t);
2106 extern int do_notify_parent(struct task_struct *, int);
2107 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2108 extern void force_sig(int, struct task_struct *);
2109 extern int send_sig(int, struct task_struct *, int);
2110 extern int zap_other_threads(struct task_struct *p);
2111 extern struct sigqueue *sigqueue_alloc(void);
2112 extern void sigqueue_free(struct sigqueue *);
2113 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2114 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2115 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2116 
2117 static inline int kill_cad_pid(int sig, int priv)
2118 {
2119 	return kill_pid(cad_pid, sig, priv);
2120 }
2121 
2122 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2123 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2124 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2125 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2126 
2127 /*
2128  * True if we are on the alternate signal stack.
2129  */
2130 static inline int on_sig_stack(unsigned long sp)
2131 {
2132 #ifdef CONFIG_STACK_GROWSUP
2133 	return sp >= current->sas_ss_sp &&
2134 		sp - current->sas_ss_sp < current->sas_ss_size;
2135 #else
2136 	return sp > current->sas_ss_sp &&
2137 		sp - current->sas_ss_sp <= current->sas_ss_size;
2138 #endif
2139 }
2140 
2141 static inline int sas_ss_flags(unsigned long sp)
2142 {
2143 	return (current->sas_ss_size == 0 ? SS_DISABLE
2144 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2145 }
2146 
2147 /*
2148  * Routines for handling mm_structs
2149  */
2150 extern struct mm_struct * mm_alloc(void);
2151 
2152 /* mmdrop drops the mm and the page tables */
2153 extern void __mmdrop(struct mm_struct *);
2154 static inline void mmdrop(struct mm_struct * mm)
2155 {
2156 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2157 		__mmdrop(mm);
2158 }
2159 
2160 /* mmput gets rid of the mappings and all user-space */
2161 extern void mmput(struct mm_struct *);
2162 /* Grab a reference to a task's mm, if it is not already going away */
2163 extern struct mm_struct *get_task_mm(struct task_struct *task);
2164 /* Remove the current tasks stale references to the old mm_struct */
2165 extern void mm_release(struct task_struct *, struct mm_struct *);
2166 /* Allocate a new mm structure and copy contents from tsk->mm */
2167 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2168 
2169 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2170 			struct task_struct *, struct pt_regs *);
2171 extern void flush_thread(void);
2172 extern void exit_thread(void);
2173 
2174 extern void exit_files(struct task_struct *);
2175 extern void __cleanup_sighand(struct sighand_struct *);
2176 
2177 extern void exit_itimers(struct signal_struct *);
2178 extern void flush_itimer_signals(void);
2179 
2180 extern NORET_TYPE void do_group_exit(int);
2181 
2182 extern void daemonize(const char *, ...);
2183 extern int allow_signal(int);
2184 extern int disallow_signal(int);
2185 
2186 extern int do_execve(const char *,
2187 		     const char __user * const __user *,
2188 		     const char __user * const __user *, struct pt_regs *);
2189 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2190 struct task_struct *fork_idle(int);
2191 
2192 extern void set_task_comm(struct task_struct *tsk, char *from);
2193 extern char *get_task_comm(char *to, struct task_struct *tsk);
2194 
2195 #ifdef CONFIG_SMP
2196 void scheduler_ipi(void);
2197 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2198 #else
2199 static inline void scheduler_ipi(void) { }
2200 static inline unsigned long wait_task_inactive(struct task_struct *p,
2201 					       long match_state)
2202 {
2203 	return 1;
2204 }
2205 #endif
2206 
2207 #define next_task(p) \
2208 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2209 
2210 #define for_each_process(p) \
2211 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2212 
2213 extern bool current_is_single_threaded(void);
2214 
2215 /*
2216  * Careful: do_each_thread/while_each_thread is a double loop so
2217  *          'break' will not work as expected - use goto instead.
2218  */
2219 #define do_each_thread(g, t) \
2220 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2221 
2222 #define while_each_thread(g, t) \
2223 	while ((t = next_thread(t)) != g)
2224 
2225 static inline int get_nr_threads(struct task_struct *tsk)
2226 {
2227 	return tsk->signal->nr_threads;
2228 }
2229 
2230 /* de_thread depends on thread_group_leader not being a pid based check */
2231 #define thread_group_leader(p)	(p == p->group_leader)
2232 
2233 /* Do to the insanities of de_thread it is possible for a process
2234  * to have the pid of the thread group leader without actually being
2235  * the thread group leader.  For iteration through the pids in proc
2236  * all we care about is that we have a task with the appropriate
2237  * pid, we don't actually care if we have the right task.
2238  */
2239 static inline int has_group_leader_pid(struct task_struct *p)
2240 {
2241 	return p->pid == p->tgid;
2242 }
2243 
2244 static inline
2245 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2246 {
2247 	return p1->tgid == p2->tgid;
2248 }
2249 
2250 static inline struct task_struct *next_thread(const struct task_struct *p)
2251 {
2252 	return list_entry_rcu(p->thread_group.next,
2253 			      struct task_struct, thread_group);
2254 }
2255 
2256 static inline int thread_group_empty(struct task_struct *p)
2257 {
2258 	return list_empty(&p->thread_group);
2259 }
2260 
2261 #define delay_group_leader(p) \
2262 		(thread_group_leader(p) && !thread_group_empty(p))
2263 
2264 static inline int task_detached(struct task_struct *p)
2265 {
2266 	return p->exit_signal == -1;
2267 }
2268 
2269 /*
2270  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2271  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2272  * pins the final release of task.io_context.  Also protects ->cpuset and
2273  * ->cgroup.subsys[].
2274  *
2275  * Nests both inside and outside of read_lock(&tasklist_lock).
2276  * It must not be nested with write_lock_irq(&tasklist_lock),
2277  * neither inside nor outside.
2278  */
2279 static inline void task_lock(struct task_struct *p)
2280 {
2281 	spin_lock(&p->alloc_lock);
2282 }
2283 
2284 static inline void task_unlock(struct task_struct *p)
2285 {
2286 	spin_unlock(&p->alloc_lock);
2287 }
2288 
2289 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2290 							unsigned long *flags);
2291 
2292 #define lock_task_sighand(tsk, flags)					\
2293 ({	struct sighand_struct *__ss;					\
2294 	__cond_lock(&(tsk)->sighand->siglock,				\
2295 		    (__ss = __lock_task_sighand(tsk, flags)));		\
2296 	__ss;								\
2297 })									\
2298 
2299 static inline void unlock_task_sighand(struct task_struct *tsk,
2300 						unsigned long *flags)
2301 {
2302 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2303 }
2304 
2305 #ifndef __HAVE_THREAD_FUNCTIONS
2306 
2307 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2308 #define task_stack_page(task)	((task)->stack)
2309 
2310 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2311 {
2312 	*task_thread_info(p) = *task_thread_info(org);
2313 	task_thread_info(p)->task = p;
2314 }
2315 
2316 static inline unsigned long *end_of_stack(struct task_struct *p)
2317 {
2318 	return (unsigned long *)(task_thread_info(p) + 1);
2319 }
2320 
2321 #endif
2322 
2323 static inline int object_is_on_stack(void *obj)
2324 {
2325 	void *stack = task_stack_page(current);
2326 
2327 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2328 }
2329 
2330 extern void thread_info_cache_init(void);
2331 
2332 #ifdef CONFIG_DEBUG_STACK_USAGE
2333 static inline unsigned long stack_not_used(struct task_struct *p)
2334 {
2335 	unsigned long *n = end_of_stack(p);
2336 
2337 	do { 	/* Skip over canary */
2338 		n++;
2339 	} while (!*n);
2340 
2341 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2342 }
2343 #endif
2344 
2345 /* set thread flags in other task's structures
2346  * - see asm/thread_info.h for TIF_xxxx flags available
2347  */
2348 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2349 {
2350 	set_ti_thread_flag(task_thread_info(tsk), flag);
2351 }
2352 
2353 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2354 {
2355 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2356 }
2357 
2358 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2359 {
2360 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2361 }
2362 
2363 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2364 {
2365 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2366 }
2367 
2368 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2369 {
2370 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2371 }
2372 
2373 static inline void set_tsk_need_resched(struct task_struct *tsk)
2374 {
2375 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2376 }
2377 
2378 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2379 {
2380 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2381 }
2382 
2383 static inline int test_tsk_need_resched(struct task_struct *tsk)
2384 {
2385 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2386 }
2387 
2388 static inline int restart_syscall(void)
2389 {
2390 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2391 	return -ERESTARTNOINTR;
2392 }
2393 
2394 static inline int signal_pending(struct task_struct *p)
2395 {
2396 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2397 }
2398 
2399 static inline int __fatal_signal_pending(struct task_struct *p)
2400 {
2401 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2402 }
2403 
2404 static inline int fatal_signal_pending(struct task_struct *p)
2405 {
2406 	return signal_pending(p) && __fatal_signal_pending(p);
2407 }
2408 
2409 static inline int signal_pending_state(long state, struct task_struct *p)
2410 {
2411 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2412 		return 0;
2413 	if (!signal_pending(p))
2414 		return 0;
2415 
2416 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2417 }
2418 
2419 static inline int need_resched(void)
2420 {
2421 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2422 }
2423 
2424 /*
2425  * cond_resched() and cond_resched_lock(): latency reduction via
2426  * explicit rescheduling in places that are safe. The return
2427  * value indicates whether a reschedule was done in fact.
2428  * cond_resched_lock() will drop the spinlock before scheduling,
2429  * cond_resched_softirq() will enable bhs before scheduling.
2430  */
2431 extern int _cond_resched(void);
2432 
2433 #define cond_resched() ({			\
2434 	__might_sleep(__FILE__, __LINE__, 0);	\
2435 	_cond_resched();			\
2436 })
2437 
2438 extern int __cond_resched_lock(spinlock_t *lock);
2439 
2440 #ifdef CONFIG_PREEMPT
2441 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2442 #else
2443 #define PREEMPT_LOCK_OFFSET	0
2444 #endif
2445 
2446 #define cond_resched_lock(lock) ({				\
2447 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2448 	__cond_resched_lock(lock);				\
2449 })
2450 
2451 extern int __cond_resched_softirq(void);
2452 
2453 #define cond_resched_softirq() ({					\
2454 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2455 	__cond_resched_softirq();					\
2456 })
2457 
2458 /*
2459  * Does a critical section need to be broken due to another
2460  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2461  * but a general need for low latency)
2462  */
2463 static inline int spin_needbreak(spinlock_t *lock)
2464 {
2465 #ifdef CONFIG_PREEMPT
2466 	return spin_is_contended(lock);
2467 #else
2468 	return 0;
2469 #endif
2470 }
2471 
2472 /*
2473  * Thread group CPU time accounting.
2474  */
2475 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2476 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2477 
2478 static inline void thread_group_cputime_init(struct signal_struct *sig)
2479 {
2480 	spin_lock_init(&sig->cputimer.lock);
2481 }
2482 
2483 /*
2484  * Reevaluate whether the task has signals pending delivery.
2485  * Wake the task if so.
2486  * This is required every time the blocked sigset_t changes.
2487  * callers must hold sighand->siglock.
2488  */
2489 extern void recalc_sigpending_and_wake(struct task_struct *t);
2490 extern void recalc_sigpending(void);
2491 
2492 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2493 
2494 /*
2495  * Wrappers for p->thread_info->cpu access. No-op on UP.
2496  */
2497 #ifdef CONFIG_SMP
2498 
2499 static inline unsigned int task_cpu(const struct task_struct *p)
2500 {
2501 	return task_thread_info(p)->cpu;
2502 }
2503 
2504 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2505 
2506 #else
2507 
2508 static inline unsigned int task_cpu(const struct task_struct *p)
2509 {
2510 	return 0;
2511 }
2512 
2513 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2514 {
2515 }
2516 
2517 #endif /* CONFIG_SMP */
2518 
2519 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2520 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2521 
2522 extern void normalize_rt_tasks(void);
2523 
2524 #ifdef CONFIG_CGROUP_SCHED
2525 
2526 extern struct task_group root_task_group;
2527 
2528 extern struct task_group *sched_create_group(struct task_group *parent);
2529 extern void sched_destroy_group(struct task_group *tg);
2530 extern void sched_move_task(struct task_struct *tsk);
2531 #ifdef CONFIG_FAIR_GROUP_SCHED
2532 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2533 extern unsigned long sched_group_shares(struct task_group *tg);
2534 #endif
2535 #ifdef CONFIG_RT_GROUP_SCHED
2536 extern int sched_group_set_rt_runtime(struct task_group *tg,
2537 				      long rt_runtime_us);
2538 extern long sched_group_rt_runtime(struct task_group *tg);
2539 extern int sched_group_set_rt_period(struct task_group *tg,
2540 				      long rt_period_us);
2541 extern long sched_group_rt_period(struct task_group *tg);
2542 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2543 #endif
2544 #endif
2545 
2546 extern int task_can_switch_user(struct user_struct *up,
2547 					struct task_struct *tsk);
2548 
2549 #ifdef CONFIG_TASK_XACCT
2550 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2551 {
2552 	tsk->ioac.rchar += amt;
2553 }
2554 
2555 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2556 {
2557 	tsk->ioac.wchar += amt;
2558 }
2559 
2560 static inline void inc_syscr(struct task_struct *tsk)
2561 {
2562 	tsk->ioac.syscr++;
2563 }
2564 
2565 static inline void inc_syscw(struct task_struct *tsk)
2566 {
2567 	tsk->ioac.syscw++;
2568 }
2569 #else
2570 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2571 {
2572 }
2573 
2574 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2575 {
2576 }
2577 
2578 static inline void inc_syscr(struct task_struct *tsk)
2579 {
2580 }
2581 
2582 static inline void inc_syscw(struct task_struct *tsk)
2583 {
2584 }
2585 #endif
2586 
2587 #ifndef TASK_SIZE_OF
2588 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2589 #endif
2590 
2591 #ifdef CONFIG_MM_OWNER
2592 extern void mm_update_next_owner(struct mm_struct *mm);
2593 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2594 #else
2595 static inline void mm_update_next_owner(struct mm_struct *mm)
2596 {
2597 }
2598 
2599 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2600 {
2601 }
2602 #endif /* CONFIG_MM_OWNER */
2603 
2604 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2605 		unsigned int limit)
2606 {
2607 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2608 }
2609 
2610 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2611 		unsigned int limit)
2612 {
2613 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2614 }
2615 
2616 static inline unsigned long rlimit(unsigned int limit)
2617 {
2618 	return task_rlimit(current, limit);
2619 }
2620 
2621 static inline unsigned long rlimit_max(unsigned int limit)
2622 {
2623 	return task_rlimit_max(current, limit);
2624 }
2625 
2626 #endif /* __KERNEL__ */
2627 
2628 #endif
2629