xref: /linux-6.15/include/linux/sched.h (revision 02bf6cc7)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 /*
5  * cloning flags:
6  */
7 #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8 #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9 #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10 #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11 #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD	0x00010000	/* Same thread group? */
16 #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17 #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 #define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
26 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
27 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
28 #define CLONE_NEWPID		0x20000000	/* New pid namespace */
29 #define CLONE_NEWNET		0x40000000	/* New network namespace */
30 #define CLONE_IO		0x80000000	/* Clone io context */
31 
32 /*
33  * Scheduling policies
34  */
35 #define SCHED_NORMAL		0
36 #define SCHED_FIFO		1
37 #define SCHED_RR		2
38 #define SCHED_BATCH		3
39 /* SCHED_ISO: reserved but not implemented yet */
40 #define SCHED_IDLE		5
41 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42 #define SCHED_RESET_ON_FORK     0x40000000
43 
44 #ifdef __KERNEL__
45 
46 struct sched_param {
47 	int sched_priority;
48 };
49 
50 #include <asm/param.h>	/* for HZ */
51 
52 #include <linux/capability.h>
53 #include <linux/threads.h>
54 #include <linux/kernel.h>
55 #include <linux/types.h>
56 #include <linux/timex.h>
57 #include <linux/jiffies.h>
58 #include <linux/rbtree.h>
59 #include <linux/thread_info.h>
60 #include <linux/cpumask.h>
61 #include <linux/errno.h>
62 #include <linux/nodemask.h>
63 #include <linux/mm_types.h>
64 
65 #include <asm/system.h>
66 #include <asm/page.h>
67 #include <asm/ptrace.h>
68 #include <asm/cputime.h>
69 
70 #include <linux/smp.h>
71 #include <linux/sem.h>
72 #include <linux/signal.h>
73 #include <linux/path.h>
74 #include <linux/compiler.h>
75 #include <linux/completion.h>
76 #include <linux/pid.h>
77 #include <linux/percpu.h>
78 #include <linux/topology.h>
79 #include <linux/proportions.h>
80 #include <linux/seccomp.h>
81 #include <linux/rcupdate.h>
82 #include <linux/rculist.h>
83 #include <linux/rtmutex.h>
84 
85 #include <linux/time.h>
86 #include <linux/param.h>
87 #include <linux/resource.h>
88 #include <linux/timer.h>
89 #include <linux/hrtimer.h>
90 #include <linux/task_io_accounting.h>
91 #include <linux/kobject.h>
92 #include <linux/latencytop.h>
93 #include <linux/cred.h>
94 
95 #include <asm/processor.h>
96 
97 struct exec_domain;
98 struct futex_pi_state;
99 struct robust_list_head;
100 struct bio;
101 struct fs_struct;
102 struct bts_context;
103 struct perf_event_context;
104 
105 /*
106  * List of flags we want to share for kernel threads,
107  * if only because they are not used by them anyway.
108  */
109 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
110 
111 /*
112  * These are the constant used to fake the fixed-point load-average
113  * counting. Some notes:
114  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
115  *    a load-average precision of 10 bits integer + 11 bits fractional
116  *  - if you want to count load-averages more often, you need more
117  *    precision, or rounding will get you. With 2-second counting freq,
118  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
119  *    11 bit fractions.
120  */
121 extern unsigned long avenrun[];		/* Load averages */
122 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
123 
124 #define FSHIFT		11		/* nr of bits of precision */
125 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
126 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
127 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
128 #define EXP_5		2014		/* 1/exp(5sec/5min) */
129 #define EXP_15		2037		/* 1/exp(5sec/15min) */
130 
131 #define CALC_LOAD(load,exp,n) \
132 	load *= exp; \
133 	load += n*(FIXED_1-exp); \
134 	load >>= FSHIFT;
135 
136 extern unsigned long total_forks;
137 extern int nr_threads;
138 DECLARE_PER_CPU(unsigned long, process_counts);
139 extern int nr_processes(void);
140 extern unsigned long nr_running(void);
141 extern unsigned long nr_uninterruptible(void);
142 extern unsigned long nr_iowait(void);
143 extern unsigned long nr_iowait_cpu(void);
144 extern unsigned long this_cpu_load(void);
145 
146 
147 extern void calc_global_load(void);
148 
149 extern unsigned long get_parent_ip(unsigned long addr);
150 
151 struct seq_file;
152 struct cfs_rq;
153 struct task_group;
154 #ifdef CONFIG_SCHED_DEBUG
155 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
156 extern void proc_sched_set_task(struct task_struct *p);
157 extern void
158 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
159 #else
160 static inline void
161 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
162 {
163 }
164 static inline void proc_sched_set_task(struct task_struct *p)
165 {
166 }
167 static inline void
168 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
169 {
170 }
171 #endif
172 
173 /*
174  * Task state bitmask. NOTE! These bits are also
175  * encoded in fs/proc/array.c: get_task_state().
176  *
177  * We have two separate sets of flags: task->state
178  * is about runnability, while task->exit_state are
179  * about the task exiting. Confusing, but this way
180  * modifying one set can't modify the other one by
181  * mistake.
182  */
183 #define TASK_RUNNING		0
184 #define TASK_INTERRUPTIBLE	1
185 #define TASK_UNINTERRUPTIBLE	2
186 #define __TASK_STOPPED		4
187 #define __TASK_TRACED		8
188 /* in tsk->exit_state */
189 #define EXIT_ZOMBIE		16
190 #define EXIT_DEAD		32
191 /* in tsk->state again */
192 #define TASK_DEAD		64
193 #define TASK_WAKEKILL		128
194 #define TASK_WAKING		256
195 #define TASK_STATE_MAX		512
196 
197 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
198 
199 extern char ___assert_task_state[1 - 2*!!(
200 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
201 
202 /* Convenience macros for the sake of set_task_state */
203 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
204 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
205 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
206 
207 /* Convenience macros for the sake of wake_up */
208 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
209 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
210 
211 /* get_task_state() */
212 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
213 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
214 				 __TASK_TRACED)
215 
216 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
217 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
218 #define task_is_stopped_or_traced(task)	\
219 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
220 #define task_contributes_to_load(task)	\
221 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222 				 (task->flags & PF_FREEZING) == 0)
223 
224 #define __set_task_state(tsk, state_value)		\
225 	do { (tsk)->state = (state_value); } while (0)
226 #define set_task_state(tsk, state_value)		\
227 	set_mb((tsk)->state, (state_value))
228 
229 /*
230  * set_current_state() includes a barrier so that the write of current->state
231  * is correctly serialised wrt the caller's subsequent test of whether to
232  * actually sleep:
233  *
234  *	set_current_state(TASK_UNINTERRUPTIBLE);
235  *	if (do_i_need_to_sleep())
236  *		schedule();
237  *
238  * If the caller does not need such serialisation then use __set_current_state()
239  */
240 #define __set_current_state(state_value)			\
241 	do { current->state = (state_value); } while (0)
242 #define set_current_state(state_value)		\
243 	set_mb(current->state, (state_value))
244 
245 /* Task command name length */
246 #define TASK_COMM_LEN 16
247 
248 #include <linux/spinlock.h>
249 
250 /*
251  * This serializes "schedule()" and also protects
252  * the run-queue from deletions/modifications (but
253  * _adding_ to the beginning of the run-queue has
254  * a separate lock).
255  */
256 extern rwlock_t tasklist_lock;
257 extern spinlock_t mmlist_lock;
258 
259 struct task_struct;
260 
261 extern void sched_init(void);
262 extern void sched_init_smp(void);
263 extern asmlinkage void schedule_tail(struct task_struct *prev);
264 extern void init_idle(struct task_struct *idle, int cpu);
265 extern void init_idle_bootup_task(struct task_struct *idle);
266 
267 extern int runqueue_is_locked(int cpu);
268 extern void task_rq_unlock_wait(struct task_struct *p);
269 
270 extern cpumask_var_t nohz_cpu_mask;
271 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
272 extern int select_nohz_load_balancer(int cpu);
273 extern int get_nohz_load_balancer(void);
274 #else
275 static inline int select_nohz_load_balancer(int cpu)
276 {
277 	return 0;
278 }
279 #endif
280 
281 /*
282  * Only dump TASK_* tasks. (0 for all tasks)
283  */
284 extern void show_state_filter(unsigned long state_filter);
285 
286 static inline void show_state(void)
287 {
288 	show_state_filter(0);
289 }
290 
291 extern void show_regs(struct pt_regs *);
292 
293 /*
294  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
295  * task), SP is the stack pointer of the first frame that should be shown in the back
296  * trace (or NULL if the entire call-chain of the task should be shown).
297  */
298 extern void show_stack(struct task_struct *task, unsigned long *sp);
299 
300 void io_schedule(void);
301 long io_schedule_timeout(long timeout);
302 
303 extern void cpu_init (void);
304 extern void trap_init(void);
305 extern void update_process_times(int user);
306 extern void scheduler_tick(void);
307 
308 extern void sched_show_task(struct task_struct *p);
309 
310 #ifdef CONFIG_DETECT_SOFTLOCKUP
311 extern void softlockup_tick(void);
312 extern void touch_softlockup_watchdog(void);
313 extern void touch_all_softlockup_watchdogs(void);
314 extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
315 				    void __user *buffer,
316 				    size_t *lenp, loff_t *ppos);
317 extern unsigned int  softlockup_panic;
318 extern int softlockup_thresh;
319 #else
320 static inline void softlockup_tick(void)
321 {
322 }
323 static inline void touch_softlockup_watchdog(void)
324 {
325 }
326 static inline void touch_all_softlockup_watchdogs(void)
327 {
328 }
329 #endif
330 
331 #ifdef CONFIG_DETECT_HUNG_TASK
332 extern unsigned int  sysctl_hung_task_panic;
333 extern unsigned long sysctl_hung_task_check_count;
334 extern unsigned long sysctl_hung_task_timeout_secs;
335 extern unsigned long sysctl_hung_task_warnings;
336 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
337 					 void __user *buffer,
338 					 size_t *lenp, loff_t *ppos);
339 #endif
340 
341 /* Attach to any functions which should be ignored in wchan output. */
342 #define __sched		__attribute__((__section__(".sched.text")))
343 
344 /* Linker adds these: start and end of __sched functions */
345 extern char __sched_text_start[], __sched_text_end[];
346 
347 /* Is this address in the __sched functions? */
348 extern int in_sched_functions(unsigned long addr);
349 
350 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
351 extern signed long schedule_timeout(signed long timeout);
352 extern signed long schedule_timeout_interruptible(signed long timeout);
353 extern signed long schedule_timeout_killable(signed long timeout);
354 extern signed long schedule_timeout_uninterruptible(signed long timeout);
355 asmlinkage void schedule(void);
356 extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
357 
358 struct nsproxy;
359 struct user_namespace;
360 
361 /*
362  * Default maximum number of active map areas, this limits the number of vmas
363  * per mm struct. Users can overwrite this number by sysctl but there is a
364  * problem.
365  *
366  * When a program's coredump is generated as ELF format, a section is created
367  * per a vma. In ELF, the number of sections is represented in unsigned short.
368  * This means the number of sections should be smaller than 65535 at coredump.
369  * Because the kernel adds some informative sections to a image of program at
370  * generating coredump, we need some margin. The number of extra sections is
371  * 1-3 now and depends on arch. We use "5" as safe margin, here.
372  */
373 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
374 #define DEFAULT_MAX_MAP_COUNT	(USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
375 
376 extern int sysctl_max_map_count;
377 
378 #include <linux/aio.h>
379 
380 extern unsigned long
381 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
382 		       unsigned long, unsigned long);
383 extern unsigned long
384 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
385 			  unsigned long len, unsigned long pgoff,
386 			  unsigned long flags);
387 extern void arch_unmap_area(struct mm_struct *, unsigned long);
388 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
389 
390 #if USE_SPLIT_PTLOCKS
391 /*
392  * The mm counters are not protected by its page_table_lock,
393  * so must be incremented atomically.
394  */
395 #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
396 #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
397 #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
398 #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
399 #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
400 
401 #else  /* !USE_SPLIT_PTLOCKS */
402 /*
403  * The mm counters are protected by its page_table_lock,
404  * so can be incremented directly.
405  */
406 #define set_mm_counter(mm, member, value) (mm)->_##member = (value)
407 #define get_mm_counter(mm, member) ((mm)->_##member)
408 #define add_mm_counter(mm, member, value) (mm)->_##member += (value)
409 #define inc_mm_counter(mm, member) (mm)->_##member++
410 #define dec_mm_counter(mm, member) (mm)->_##member--
411 
412 #endif /* !USE_SPLIT_PTLOCKS */
413 
414 #define get_mm_rss(mm)					\
415 	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
416 #define update_hiwater_rss(mm)	do {			\
417 	unsigned long _rss = get_mm_rss(mm);		\
418 	if ((mm)->hiwater_rss < _rss)			\
419 		(mm)->hiwater_rss = _rss;		\
420 } while (0)
421 #define update_hiwater_vm(mm)	do {			\
422 	if ((mm)->hiwater_vm < (mm)->total_vm)		\
423 		(mm)->hiwater_vm = (mm)->total_vm;	\
424 } while (0)
425 
426 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
427 {
428 	return max(mm->hiwater_rss, get_mm_rss(mm));
429 }
430 
431 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
432 					 struct mm_struct *mm)
433 {
434 	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
435 
436 	if (*maxrss < hiwater_rss)
437 		*maxrss = hiwater_rss;
438 }
439 
440 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
441 {
442 	return max(mm->hiwater_vm, mm->total_vm);
443 }
444 
445 extern void set_dumpable(struct mm_struct *mm, int value);
446 extern int get_dumpable(struct mm_struct *mm);
447 
448 /* mm flags */
449 /* dumpable bits */
450 #define MMF_DUMPABLE      0  /* core dump is permitted */
451 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
452 
453 #define MMF_DUMPABLE_BITS 2
454 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
455 
456 /* coredump filter bits */
457 #define MMF_DUMP_ANON_PRIVATE	2
458 #define MMF_DUMP_ANON_SHARED	3
459 #define MMF_DUMP_MAPPED_PRIVATE	4
460 #define MMF_DUMP_MAPPED_SHARED	5
461 #define MMF_DUMP_ELF_HEADERS	6
462 #define MMF_DUMP_HUGETLB_PRIVATE 7
463 #define MMF_DUMP_HUGETLB_SHARED  8
464 
465 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
466 #define MMF_DUMP_FILTER_BITS	7
467 #define MMF_DUMP_FILTER_MASK \
468 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
469 #define MMF_DUMP_FILTER_DEFAULT \
470 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
471 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
472 
473 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
474 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
475 #else
476 # define MMF_DUMP_MASK_DEFAULT_ELF	0
477 #endif
478 					/* leave room for more dump flags */
479 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
480 
481 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
482 
483 struct sighand_struct {
484 	atomic_t		count;
485 	struct k_sigaction	action[_NSIG];
486 	spinlock_t		siglock;
487 	wait_queue_head_t	signalfd_wqh;
488 };
489 
490 struct pacct_struct {
491 	int			ac_flag;
492 	long			ac_exitcode;
493 	unsigned long		ac_mem;
494 	cputime_t		ac_utime, ac_stime;
495 	unsigned long		ac_minflt, ac_majflt;
496 };
497 
498 struct cpu_itimer {
499 	cputime_t expires;
500 	cputime_t incr;
501 	u32 error;
502 	u32 incr_error;
503 };
504 
505 /**
506  * struct task_cputime - collected CPU time counts
507  * @utime:		time spent in user mode, in &cputime_t units
508  * @stime:		time spent in kernel mode, in &cputime_t units
509  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
510  *
511  * This structure groups together three kinds of CPU time that are
512  * tracked for threads and thread groups.  Most things considering
513  * CPU time want to group these counts together and treat all three
514  * of them in parallel.
515  */
516 struct task_cputime {
517 	cputime_t utime;
518 	cputime_t stime;
519 	unsigned long long sum_exec_runtime;
520 };
521 /* Alternate field names when used to cache expirations. */
522 #define prof_exp	stime
523 #define virt_exp	utime
524 #define sched_exp	sum_exec_runtime
525 
526 #define INIT_CPUTIME	\
527 	(struct task_cputime) {					\
528 		.utime = cputime_zero,				\
529 		.stime = cputime_zero,				\
530 		.sum_exec_runtime = 0,				\
531 	}
532 
533 /*
534  * Disable preemption until the scheduler is running.
535  * Reset by start_kernel()->sched_init()->init_idle().
536  *
537  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
538  * before the scheduler is active -- see should_resched().
539  */
540 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
541 
542 /**
543  * struct thread_group_cputimer - thread group interval timer counts
544  * @cputime:		thread group interval timers.
545  * @running:		non-zero when there are timers running and
546  * 			@cputime receives updates.
547  * @lock:		lock for fields in this struct.
548  *
549  * This structure contains the version of task_cputime, above, that is
550  * used for thread group CPU timer calculations.
551  */
552 struct thread_group_cputimer {
553 	struct task_cputime cputime;
554 	int running;
555 	spinlock_t lock;
556 };
557 
558 /*
559  * NOTE! "signal_struct" does not have it's own
560  * locking, because a shared signal_struct always
561  * implies a shared sighand_struct, so locking
562  * sighand_struct is always a proper superset of
563  * the locking of signal_struct.
564  */
565 struct signal_struct {
566 	atomic_t		count;
567 	atomic_t		live;
568 
569 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
570 
571 	/* current thread group signal load-balancing target: */
572 	struct task_struct	*curr_target;
573 
574 	/* shared signal handling: */
575 	struct sigpending	shared_pending;
576 
577 	/* thread group exit support */
578 	int			group_exit_code;
579 	/* overloaded:
580 	 * - notify group_exit_task when ->count is equal to notify_count
581 	 * - everyone except group_exit_task is stopped during signal delivery
582 	 *   of fatal signals, group_exit_task processes the signal.
583 	 */
584 	int			notify_count;
585 	struct task_struct	*group_exit_task;
586 
587 	/* thread group stop support, overloads group_exit_code too */
588 	int			group_stop_count;
589 	unsigned int		flags; /* see SIGNAL_* flags below */
590 
591 	/* POSIX.1b Interval Timers */
592 	struct list_head posix_timers;
593 
594 	/* ITIMER_REAL timer for the process */
595 	struct hrtimer real_timer;
596 	struct pid *leader_pid;
597 	ktime_t it_real_incr;
598 
599 	/*
600 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
601 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
602 	 * values are defined to 0 and 1 respectively
603 	 */
604 	struct cpu_itimer it[2];
605 
606 	/*
607 	 * Thread group totals for process CPU timers.
608 	 * See thread_group_cputimer(), et al, for details.
609 	 */
610 	struct thread_group_cputimer cputimer;
611 
612 	/* Earliest-expiration cache. */
613 	struct task_cputime cputime_expires;
614 
615 	struct list_head cpu_timers[3];
616 
617 	struct pid *tty_old_pgrp;
618 
619 	/* boolean value for session group leader */
620 	int leader;
621 
622 	struct tty_struct *tty; /* NULL if no tty */
623 
624 	/*
625 	 * Cumulative resource counters for dead threads in the group,
626 	 * and for reaped dead child processes forked by this group.
627 	 * Live threads maintain their own counters and add to these
628 	 * in __exit_signal, except for the group leader.
629 	 */
630 	cputime_t utime, stime, cutime, cstime;
631 	cputime_t gtime;
632 	cputime_t cgtime;
633 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
634 	cputime_t prev_utime, prev_stime;
635 #endif
636 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
637 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
638 	unsigned long inblock, oublock, cinblock, coublock;
639 	unsigned long maxrss, cmaxrss;
640 	struct task_io_accounting ioac;
641 
642 	/*
643 	 * Cumulative ns of schedule CPU time fo dead threads in the
644 	 * group, not including a zombie group leader, (This only differs
645 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
646 	 * other than jiffies.)
647 	 */
648 	unsigned long long sum_sched_runtime;
649 
650 	/*
651 	 * We don't bother to synchronize most readers of this at all,
652 	 * because there is no reader checking a limit that actually needs
653 	 * to get both rlim_cur and rlim_max atomically, and either one
654 	 * alone is a single word that can safely be read normally.
655 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
656 	 * protect this instead of the siglock, because they really
657 	 * have no need to disable irqs.
658 	 */
659 	struct rlimit rlim[RLIM_NLIMITS];
660 
661 #ifdef CONFIG_BSD_PROCESS_ACCT
662 	struct pacct_struct pacct;	/* per-process accounting information */
663 #endif
664 #ifdef CONFIG_TASKSTATS
665 	struct taskstats *stats;
666 #endif
667 #ifdef CONFIG_AUDIT
668 	unsigned audit_tty;
669 	struct tty_audit_buf *tty_audit_buf;
670 #endif
671 
672 	int oom_adj;	/* OOM kill score adjustment (bit shift) */
673 };
674 
675 /* Context switch must be unlocked if interrupts are to be enabled */
676 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
677 # define __ARCH_WANT_UNLOCKED_CTXSW
678 #endif
679 
680 /*
681  * Bits in flags field of signal_struct.
682  */
683 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
684 #define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
685 #define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
686 #define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
687 /*
688  * Pending notifications to parent.
689  */
690 #define SIGNAL_CLD_STOPPED	0x00000010
691 #define SIGNAL_CLD_CONTINUED	0x00000020
692 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
693 
694 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
695 
696 /* If true, all threads except ->group_exit_task have pending SIGKILL */
697 static inline int signal_group_exit(const struct signal_struct *sig)
698 {
699 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
700 		(sig->group_exit_task != NULL);
701 }
702 
703 /*
704  * Some day this will be a full-fledged user tracking system..
705  */
706 struct user_struct {
707 	atomic_t __count;	/* reference count */
708 	atomic_t processes;	/* How many processes does this user have? */
709 	atomic_t files;		/* How many open files does this user have? */
710 	atomic_t sigpending;	/* How many pending signals does this user have? */
711 #ifdef CONFIG_INOTIFY_USER
712 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
713 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
714 #endif
715 #ifdef CONFIG_EPOLL
716 	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
717 #endif
718 #ifdef CONFIG_POSIX_MQUEUE
719 	/* protected by mq_lock	*/
720 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
721 #endif
722 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
723 
724 #ifdef CONFIG_KEYS
725 	struct key *uid_keyring;	/* UID specific keyring */
726 	struct key *session_keyring;	/* UID's default session keyring */
727 #endif
728 
729 	/* Hash table maintenance information */
730 	struct hlist_node uidhash_node;
731 	uid_t uid;
732 	struct user_namespace *user_ns;
733 
734 #ifdef CONFIG_USER_SCHED
735 	struct task_group *tg;
736 #ifdef CONFIG_SYSFS
737 	struct kobject kobj;
738 	struct delayed_work work;
739 #endif
740 #endif
741 
742 #ifdef CONFIG_PERF_EVENTS
743 	atomic_long_t locked_vm;
744 #endif
745 };
746 
747 extern int uids_sysfs_init(void);
748 
749 extern struct user_struct *find_user(uid_t);
750 
751 extern struct user_struct root_user;
752 #define INIT_USER (&root_user)
753 
754 
755 struct backing_dev_info;
756 struct reclaim_state;
757 
758 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
759 struct sched_info {
760 	/* cumulative counters */
761 	unsigned long pcount;	      /* # of times run on this cpu */
762 	unsigned long long run_delay; /* time spent waiting on a runqueue */
763 
764 	/* timestamps */
765 	unsigned long long last_arrival,/* when we last ran on a cpu */
766 			   last_queued;	/* when we were last queued to run */
767 #ifdef CONFIG_SCHEDSTATS
768 	/* BKL stats */
769 	unsigned int bkl_count;
770 #endif
771 };
772 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
773 
774 #ifdef CONFIG_TASK_DELAY_ACCT
775 struct task_delay_info {
776 	spinlock_t	lock;
777 	unsigned int	flags;	/* Private per-task flags */
778 
779 	/* For each stat XXX, add following, aligned appropriately
780 	 *
781 	 * struct timespec XXX_start, XXX_end;
782 	 * u64 XXX_delay;
783 	 * u32 XXX_count;
784 	 *
785 	 * Atomicity of updates to XXX_delay, XXX_count protected by
786 	 * single lock above (split into XXX_lock if contention is an issue).
787 	 */
788 
789 	/*
790 	 * XXX_count is incremented on every XXX operation, the delay
791 	 * associated with the operation is added to XXX_delay.
792 	 * XXX_delay contains the accumulated delay time in nanoseconds.
793 	 */
794 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
795 	u64 blkio_delay;	/* wait for sync block io completion */
796 	u64 swapin_delay;	/* wait for swapin block io completion */
797 	u32 blkio_count;	/* total count of the number of sync block */
798 				/* io operations performed */
799 	u32 swapin_count;	/* total count of the number of swapin block */
800 				/* io operations performed */
801 
802 	struct timespec freepages_start, freepages_end;
803 	u64 freepages_delay;	/* wait for memory reclaim */
804 	u32 freepages_count;	/* total count of memory reclaim */
805 };
806 #endif	/* CONFIG_TASK_DELAY_ACCT */
807 
808 static inline int sched_info_on(void)
809 {
810 #ifdef CONFIG_SCHEDSTATS
811 	return 1;
812 #elif defined(CONFIG_TASK_DELAY_ACCT)
813 	extern int delayacct_on;
814 	return delayacct_on;
815 #else
816 	return 0;
817 #endif
818 }
819 
820 enum cpu_idle_type {
821 	CPU_IDLE,
822 	CPU_NOT_IDLE,
823 	CPU_NEWLY_IDLE,
824 	CPU_MAX_IDLE_TYPES
825 };
826 
827 /*
828  * sched-domains (multiprocessor balancing) declarations:
829  */
830 
831 /*
832  * Increase resolution of nice-level calculations:
833  */
834 #define SCHED_LOAD_SHIFT	10
835 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
836 
837 #define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
838 
839 #ifdef CONFIG_SMP
840 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
841 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
842 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
843 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
844 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
845 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
846 #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
847 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
848 #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
849 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
850 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
851 
852 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
853 
854 enum powersavings_balance_level {
855 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
856 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
857 					 * first for long running threads
858 					 */
859 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
860 					 * cpu package for power savings
861 					 */
862 	MAX_POWERSAVINGS_BALANCE_LEVELS
863 };
864 
865 extern int sched_mc_power_savings, sched_smt_power_savings;
866 
867 static inline int sd_balance_for_mc_power(void)
868 {
869 	if (sched_smt_power_savings)
870 		return SD_POWERSAVINGS_BALANCE;
871 
872 	return SD_PREFER_SIBLING;
873 }
874 
875 static inline int sd_balance_for_package_power(void)
876 {
877 	if (sched_mc_power_savings | sched_smt_power_savings)
878 		return SD_POWERSAVINGS_BALANCE;
879 
880 	return SD_PREFER_SIBLING;
881 }
882 
883 /*
884  * Optimise SD flags for power savings:
885  * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
886  * Keep default SD flags if sched_{smt,mc}_power_saving=0
887  */
888 
889 static inline int sd_power_saving_flags(void)
890 {
891 	if (sched_mc_power_savings | sched_smt_power_savings)
892 		return SD_BALANCE_NEWIDLE;
893 
894 	return 0;
895 }
896 
897 struct sched_group {
898 	struct sched_group *next;	/* Must be a circular list */
899 
900 	/*
901 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
902 	 * single CPU.
903 	 */
904 	unsigned int cpu_power;
905 
906 	/*
907 	 * The CPUs this group covers.
908 	 *
909 	 * NOTE: this field is variable length. (Allocated dynamically
910 	 * by attaching extra space to the end of the structure,
911 	 * depending on how many CPUs the kernel has booted up with)
912 	 *
913 	 * It is also be embedded into static data structures at build
914 	 * time. (See 'struct static_sched_group' in kernel/sched.c)
915 	 */
916 	unsigned long cpumask[0];
917 };
918 
919 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
920 {
921 	return to_cpumask(sg->cpumask);
922 }
923 
924 enum sched_domain_level {
925 	SD_LV_NONE = 0,
926 	SD_LV_SIBLING,
927 	SD_LV_MC,
928 	SD_LV_CPU,
929 	SD_LV_NODE,
930 	SD_LV_ALLNODES,
931 	SD_LV_MAX
932 };
933 
934 struct sched_domain_attr {
935 	int relax_domain_level;
936 };
937 
938 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
939 	.relax_domain_level = -1,			\
940 }
941 
942 struct sched_domain {
943 	/* These fields must be setup */
944 	struct sched_domain *parent;	/* top domain must be null terminated */
945 	struct sched_domain *child;	/* bottom domain must be null terminated */
946 	struct sched_group *groups;	/* the balancing groups of the domain */
947 	unsigned long min_interval;	/* Minimum balance interval ms */
948 	unsigned long max_interval;	/* Maximum balance interval ms */
949 	unsigned int busy_factor;	/* less balancing by factor if busy */
950 	unsigned int imbalance_pct;	/* No balance until over watermark */
951 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
952 	unsigned int busy_idx;
953 	unsigned int idle_idx;
954 	unsigned int newidle_idx;
955 	unsigned int wake_idx;
956 	unsigned int forkexec_idx;
957 	unsigned int smt_gain;
958 	int flags;			/* See SD_* */
959 	enum sched_domain_level level;
960 
961 	/* Runtime fields. */
962 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
963 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
964 	unsigned int nr_balance_failed; /* initialise to 0 */
965 
966 	u64 last_update;
967 
968 #ifdef CONFIG_SCHEDSTATS
969 	/* load_balance() stats */
970 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
971 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
972 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
973 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
974 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
975 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
976 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
977 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
978 
979 	/* Active load balancing */
980 	unsigned int alb_count;
981 	unsigned int alb_failed;
982 	unsigned int alb_pushed;
983 
984 	/* SD_BALANCE_EXEC stats */
985 	unsigned int sbe_count;
986 	unsigned int sbe_balanced;
987 	unsigned int sbe_pushed;
988 
989 	/* SD_BALANCE_FORK stats */
990 	unsigned int sbf_count;
991 	unsigned int sbf_balanced;
992 	unsigned int sbf_pushed;
993 
994 	/* try_to_wake_up() stats */
995 	unsigned int ttwu_wake_remote;
996 	unsigned int ttwu_move_affine;
997 	unsigned int ttwu_move_balance;
998 #endif
999 #ifdef CONFIG_SCHED_DEBUG
1000 	char *name;
1001 #endif
1002 
1003 	/*
1004 	 * Span of all CPUs in this domain.
1005 	 *
1006 	 * NOTE: this field is variable length. (Allocated dynamically
1007 	 * by attaching extra space to the end of the structure,
1008 	 * depending on how many CPUs the kernel has booted up with)
1009 	 *
1010 	 * It is also be embedded into static data structures at build
1011 	 * time. (See 'struct static_sched_domain' in kernel/sched.c)
1012 	 */
1013 	unsigned long span[0];
1014 };
1015 
1016 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1017 {
1018 	return to_cpumask(sd->span);
1019 }
1020 
1021 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1022 				    struct sched_domain_attr *dattr_new);
1023 
1024 /* Allocate an array of sched domains, for partition_sched_domains(). */
1025 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1026 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1027 
1028 /* Test a flag in parent sched domain */
1029 static inline int test_sd_parent(struct sched_domain *sd, int flag)
1030 {
1031 	if (sd->parent && (sd->parent->flags & flag))
1032 		return 1;
1033 
1034 	return 0;
1035 }
1036 
1037 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1038 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1039 
1040 #else /* CONFIG_SMP */
1041 
1042 struct sched_domain_attr;
1043 
1044 static inline void
1045 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1046 			struct sched_domain_attr *dattr_new)
1047 {
1048 }
1049 #endif	/* !CONFIG_SMP */
1050 
1051 
1052 struct io_context;			/* See blkdev.h */
1053 
1054 
1055 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1056 extern void prefetch_stack(struct task_struct *t);
1057 #else
1058 static inline void prefetch_stack(struct task_struct *t) { }
1059 #endif
1060 
1061 struct audit_context;		/* See audit.c */
1062 struct mempolicy;
1063 struct pipe_inode_info;
1064 struct uts_namespace;
1065 
1066 struct rq;
1067 struct sched_domain;
1068 
1069 /*
1070  * wake flags
1071  */
1072 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1073 #define WF_FORK		0x02		/* child wakeup after fork */
1074 
1075 struct sched_class {
1076 	const struct sched_class *next;
1077 
1078 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1079 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1080 	void (*yield_task) (struct rq *rq);
1081 
1082 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1083 
1084 	struct task_struct * (*pick_next_task) (struct rq *rq);
1085 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1086 
1087 #ifdef CONFIG_SMP
1088 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1089 
1090 	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1091 			struct rq *busiest, unsigned long max_load_move,
1092 			struct sched_domain *sd, enum cpu_idle_type idle,
1093 			int *all_pinned, int *this_best_prio);
1094 
1095 	int (*move_one_task) (struct rq *this_rq, int this_cpu,
1096 			      struct rq *busiest, struct sched_domain *sd,
1097 			      enum cpu_idle_type idle);
1098 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1099 	void (*post_schedule) (struct rq *this_rq);
1100 	void (*task_waking) (struct rq *this_rq, struct task_struct *task);
1101 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1102 
1103 	void (*set_cpus_allowed)(struct task_struct *p,
1104 				 const struct cpumask *newmask);
1105 
1106 	void (*rq_online)(struct rq *rq);
1107 	void (*rq_offline)(struct rq *rq);
1108 #endif
1109 
1110 	void (*set_curr_task) (struct rq *rq);
1111 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1112 	void (*task_fork) (struct task_struct *p);
1113 
1114 	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
1115 			       int running);
1116 	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1117 			     int running);
1118 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1119 			     int oldprio, int running);
1120 
1121 	unsigned int (*get_rr_interval) (struct rq *rq,
1122 					 struct task_struct *task);
1123 
1124 #ifdef CONFIG_FAIR_GROUP_SCHED
1125 	void (*moved_group) (struct task_struct *p, int on_rq);
1126 #endif
1127 };
1128 
1129 struct load_weight {
1130 	unsigned long weight, inv_weight;
1131 };
1132 
1133 /*
1134  * CFS stats for a schedulable entity (task, task-group etc)
1135  *
1136  * Current field usage histogram:
1137  *
1138  *     4 se->block_start
1139  *     4 se->run_node
1140  *     4 se->sleep_start
1141  *     6 se->load.weight
1142  */
1143 struct sched_entity {
1144 	struct load_weight	load;		/* for load-balancing */
1145 	struct rb_node		run_node;
1146 	struct list_head	group_node;
1147 	unsigned int		on_rq;
1148 
1149 	u64			exec_start;
1150 	u64			sum_exec_runtime;
1151 	u64			vruntime;
1152 	u64			prev_sum_exec_runtime;
1153 
1154 	u64			last_wakeup;
1155 	u64			avg_overlap;
1156 
1157 	u64			nr_migrations;
1158 
1159 	u64			start_runtime;
1160 	u64			avg_wakeup;
1161 
1162 #ifdef CONFIG_SCHEDSTATS
1163 	u64			wait_start;
1164 	u64			wait_max;
1165 	u64			wait_count;
1166 	u64			wait_sum;
1167 	u64			iowait_count;
1168 	u64			iowait_sum;
1169 
1170 	u64			sleep_start;
1171 	u64			sleep_max;
1172 	s64			sum_sleep_runtime;
1173 
1174 	u64			block_start;
1175 	u64			block_max;
1176 	u64			exec_max;
1177 	u64			slice_max;
1178 
1179 	u64			nr_migrations_cold;
1180 	u64			nr_failed_migrations_affine;
1181 	u64			nr_failed_migrations_running;
1182 	u64			nr_failed_migrations_hot;
1183 	u64			nr_forced_migrations;
1184 
1185 	u64			nr_wakeups;
1186 	u64			nr_wakeups_sync;
1187 	u64			nr_wakeups_migrate;
1188 	u64			nr_wakeups_local;
1189 	u64			nr_wakeups_remote;
1190 	u64			nr_wakeups_affine;
1191 	u64			nr_wakeups_affine_attempts;
1192 	u64			nr_wakeups_passive;
1193 	u64			nr_wakeups_idle;
1194 #endif
1195 
1196 #ifdef CONFIG_FAIR_GROUP_SCHED
1197 	struct sched_entity	*parent;
1198 	/* rq on which this entity is (to be) queued: */
1199 	struct cfs_rq		*cfs_rq;
1200 	/* rq "owned" by this entity/group: */
1201 	struct cfs_rq		*my_q;
1202 #endif
1203 };
1204 
1205 struct sched_rt_entity {
1206 	struct list_head run_list;
1207 	unsigned long timeout;
1208 	unsigned int time_slice;
1209 	int nr_cpus_allowed;
1210 
1211 	struct sched_rt_entity *back;
1212 #ifdef CONFIG_RT_GROUP_SCHED
1213 	struct sched_rt_entity	*parent;
1214 	/* rq on which this entity is (to be) queued: */
1215 	struct rt_rq		*rt_rq;
1216 	/* rq "owned" by this entity/group: */
1217 	struct rt_rq		*my_q;
1218 #endif
1219 };
1220 
1221 struct rcu_node;
1222 
1223 struct task_struct {
1224 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1225 	void *stack;
1226 	atomic_t usage;
1227 	unsigned int flags;	/* per process flags, defined below */
1228 	unsigned int ptrace;
1229 
1230 	int lock_depth;		/* BKL lock depth */
1231 
1232 #ifdef CONFIG_SMP
1233 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
1234 	int oncpu;
1235 #endif
1236 #endif
1237 
1238 	int prio, static_prio, normal_prio;
1239 	unsigned int rt_priority;
1240 	const struct sched_class *sched_class;
1241 	struct sched_entity se;
1242 	struct sched_rt_entity rt;
1243 
1244 #ifdef CONFIG_PREEMPT_NOTIFIERS
1245 	/* list of struct preempt_notifier: */
1246 	struct hlist_head preempt_notifiers;
1247 #endif
1248 
1249 	/*
1250 	 * fpu_counter contains the number of consecutive context switches
1251 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1252 	 * saving becomes unlazy to save the trap. This is an unsigned char
1253 	 * so that after 256 times the counter wraps and the behavior turns
1254 	 * lazy again; this to deal with bursty apps that only use FPU for
1255 	 * a short time
1256 	 */
1257 	unsigned char fpu_counter;
1258 #ifdef CONFIG_BLK_DEV_IO_TRACE
1259 	unsigned int btrace_seq;
1260 #endif
1261 
1262 	unsigned int policy;
1263 	cpumask_t cpus_allowed;
1264 
1265 #ifdef CONFIG_TREE_PREEMPT_RCU
1266 	int rcu_read_lock_nesting;
1267 	char rcu_read_unlock_special;
1268 	struct rcu_node *rcu_blocked_node;
1269 	struct list_head rcu_node_entry;
1270 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1271 
1272 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1273 	struct sched_info sched_info;
1274 #endif
1275 
1276 	struct list_head tasks;
1277 	struct plist_node pushable_tasks;
1278 
1279 	struct mm_struct *mm, *active_mm;
1280 
1281 /* task state */
1282 	int exit_state;
1283 	int exit_code, exit_signal;
1284 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1285 	/* ??? */
1286 	unsigned int personality;
1287 	unsigned did_exec:1;
1288 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1289 				 * execve */
1290 	unsigned in_iowait:1;
1291 
1292 
1293 	/* Revert to default priority/policy when forking */
1294 	unsigned sched_reset_on_fork:1;
1295 
1296 	pid_t pid;
1297 	pid_t tgid;
1298 
1299 #ifdef CONFIG_CC_STACKPROTECTOR
1300 	/* Canary value for the -fstack-protector gcc feature */
1301 	unsigned long stack_canary;
1302 #endif
1303 
1304 	/*
1305 	 * pointers to (original) parent process, youngest child, younger sibling,
1306 	 * older sibling, respectively.  (p->father can be replaced with
1307 	 * p->real_parent->pid)
1308 	 */
1309 	struct task_struct *real_parent; /* real parent process */
1310 	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1311 	/*
1312 	 * children/sibling forms the list of my natural children
1313 	 */
1314 	struct list_head children;	/* list of my children */
1315 	struct list_head sibling;	/* linkage in my parent's children list */
1316 	struct task_struct *group_leader;	/* threadgroup leader */
1317 
1318 	/*
1319 	 * ptraced is the list of tasks this task is using ptrace on.
1320 	 * This includes both natural children and PTRACE_ATTACH targets.
1321 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1322 	 */
1323 	struct list_head ptraced;
1324 	struct list_head ptrace_entry;
1325 
1326 	/*
1327 	 * This is the tracer handle for the ptrace BTS extension.
1328 	 * This field actually belongs to the ptracer task.
1329 	 */
1330 	struct bts_context *bts;
1331 
1332 	/* PID/PID hash table linkage. */
1333 	struct pid_link pids[PIDTYPE_MAX];
1334 	struct list_head thread_group;
1335 
1336 	struct completion *vfork_done;		/* for vfork() */
1337 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1338 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1339 
1340 	cputime_t utime, stime, utimescaled, stimescaled;
1341 	cputime_t gtime;
1342 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1343 	cputime_t prev_utime, prev_stime;
1344 #endif
1345 	unsigned long nvcsw, nivcsw; /* context switch counts */
1346 	struct timespec start_time; 		/* monotonic time */
1347 	struct timespec real_start_time;	/* boot based time */
1348 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1349 	unsigned long min_flt, maj_flt;
1350 
1351 	struct task_cputime cputime_expires;
1352 	struct list_head cpu_timers[3];
1353 
1354 /* process credentials */
1355 	const struct cred *real_cred;	/* objective and real subjective task
1356 					 * credentials (COW) */
1357 	const struct cred *cred;	/* effective (overridable) subjective task
1358 					 * credentials (COW) */
1359 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
1360 					 * credential calculations
1361 					 * (notably. ptrace) */
1362 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1363 
1364 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1365 				     - access with [gs]et_task_comm (which lock
1366 				       it with task_lock())
1367 				     - initialized normally by flush_old_exec */
1368 /* file system info */
1369 	int link_count, total_link_count;
1370 #ifdef CONFIG_SYSVIPC
1371 /* ipc stuff */
1372 	struct sysv_sem sysvsem;
1373 #endif
1374 #ifdef CONFIG_DETECT_HUNG_TASK
1375 /* hung task detection */
1376 	unsigned long last_switch_count;
1377 #endif
1378 /* CPU-specific state of this task */
1379 	struct thread_struct thread;
1380 /* filesystem information */
1381 	struct fs_struct *fs;
1382 /* open file information */
1383 	struct files_struct *files;
1384 /* namespaces */
1385 	struct nsproxy *nsproxy;
1386 /* signal handlers */
1387 	struct signal_struct *signal;
1388 	struct sighand_struct *sighand;
1389 
1390 	sigset_t blocked, real_blocked;
1391 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1392 	struct sigpending pending;
1393 
1394 	unsigned long sas_ss_sp;
1395 	size_t sas_ss_size;
1396 	int (*notifier)(void *priv);
1397 	void *notifier_data;
1398 	sigset_t *notifier_mask;
1399 	struct audit_context *audit_context;
1400 #ifdef CONFIG_AUDITSYSCALL
1401 	uid_t loginuid;
1402 	unsigned int sessionid;
1403 #endif
1404 	seccomp_t seccomp;
1405 
1406 /* Thread group tracking */
1407    	u32 parent_exec_id;
1408    	u32 self_exec_id;
1409 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1410  * mempolicy */
1411 	spinlock_t alloc_lock;
1412 
1413 #ifdef CONFIG_GENERIC_HARDIRQS
1414 	/* IRQ handler threads */
1415 	struct irqaction *irqaction;
1416 #endif
1417 
1418 	/* Protection of the PI data structures: */
1419 	raw_spinlock_t pi_lock;
1420 
1421 #ifdef CONFIG_RT_MUTEXES
1422 	/* PI waiters blocked on a rt_mutex held by this task */
1423 	struct plist_head pi_waiters;
1424 	/* Deadlock detection and priority inheritance handling */
1425 	struct rt_mutex_waiter *pi_blocked_on;
1426 #endif
1427 
1428 #ifdef CONFIG_DEBUG_MUTEXES
1429 	/* mutex deadlock detection */
1430 	struct mutex_waiter *blocked_on;
1431 #endif
1432 #ifdef CONFIG_TRACE_IRQFLAGS
1433 	unsigned int irq_events;
1434 	unsigned long hardirq_enable_ip;
1435 	unsigned long hardirq_disable_ip;
1436 	unsigned int hardirq_enable_event;
1437 	unsigned int hardirq_disable_event;
1438 	int hardirqs_enabled;
1439 	int hardirq_context;
1440 	unsigned long softirq_disable_ip;
1441 	unsigned long softirq_enable_ip;
1442 	unsigned int softirq_disable_event;
1443 	unsigned int softirq_enable_event;
1444 	int softirqs_enabled;
1445 	int softirq_context;
1446 #endif
1447 #ifdef CONFIG_LOCKDEP
1448 # define MAX_LOCK_DEPTH 48UL
1449 	u64 curr_chain_key;
1450 	int lockdep_depth;
1451 	unsigned int lockdep_recursion;
1452 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1453 	gfp_t lockdep_reclaim_gfp;
1454 #endif
1455 
1456 /* journalling filesystem info */
1457 	void *journal_info;
1458 
1459 /* stacked block device info */
1460 	struct bio *bio_list, **bio_tail;
1461 
1462 /* VM state */
1463 	struct reclaim_state *reclaim_state;
1464 
1465 	struct backing_dev_info *backing_dev_info;
1466 
1467 	struct io_context *io_context;
1468 
1469 	unsigned long ptrace_message;
1470 	siginfo_t *last_siginfo; /* For ptrace use.  */
1471 	struct task_io_accounting ioac;
1472 #if defined(CONFIG_TASK_XACCT)
1473 	u64 acct_rss_mem1;	/* accumulated rss usage */
1474 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1475 	cputime_t acct_timexpd;	/* stime + utime since last update */
1476 #endif
1477 #ifdef CONFIG_CPUSETS
1478 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1479 	int cpuset_mem_spread_rotor;
1480 #endif
1481 #ifdef CONFIG_CGROUPS
1482 	/* Control Group info protected by css_set_lock */
1483 	struct css_set *cgroups;
1484 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1485 	struct list_head cg_list;
1486 #endif
1487 #ifdef CONFIG_FUTEX
1488 	struct robust_list_head __user *robust_list;
1489 #ifdef CONFIG_COMPAT
1490 	struct compat_robust_list_head __user *compat_robust_list;
1491 #endif
1492 	struct list_head pi_state_list;
1493 	struct futex_pi_state *pi_state_cache;
1494 #endif
1495 #ifdef CONFIG_PERF_EVENTS
1496 	struct perf_event_context *perf_event_ctxp;
1497 	struct mutex perf_event_mutex;
1498 	struct list_head perf_event_list;
1499 #endif
1500 #ifdef CONFIG_NUMA
1501 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1502 	short il_next;
1503 #endif
1504 	atomic_t fs_excl;	/* holding fs exclusive resources */
1505 	struct rcu_head rcu;
1506 
1507 	/*
1508 	 * cache last used pipe for splice
1509 	 */
1510 	struct pipe_inode_info *splice_pipe;
1511 #ifdef	CONFIG_TASK_DELAY_ACCT
1512 	struct task_delay_info *delays;
1513 #endif
1514 #ifdef CONFIG_FAULT_INJECTION
1515 	int make_it_fail;
1516 #endif
1517 	struct prop_local_single dirties;
1518 #ifdef CONFIG_LATENCYTOP
1519 	int latency_record_count;
1520 	struct latency_record latency_record[LT_SAVECOUNT];
1521 #endif
1522 	/*
1523 	 * time slack values; these are used to round up poll() and
1524 	 * select() etc timeout values. These are in nanoseconds.
1525 	 */
1526 	unsigned long timer_slack_ns;
1527 	unsigned long default_timer_slack_ns;
1528 
1529 	struct list_head	*scm_work_list;
1530 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1531 	/* Index of current stored adress in ret_stack */
1532 	int curr_ret_stack;
1533 	/* Stack of return addresses for return function tracing */
1534 	struct ftrace_ret_stack	*ret_stack;
1535 	/* time stamp for last schedule */
1536 	unsigned long long ftrace_timestamp;
1537 	/*
1538 	 * Number of functions that haven't been traced
1539 	 * because of depth overrun.
1540 	 */
1541 	atomic_t trace_overrun;
1542 	/* Pause for the tracing */
1543 	atomic_t tracing_graph_pause;
1544 #endif
1545 #ifdef CONFIG_TRACING
1546 	/* state flags for use by tracers */
1547 	unsigned long trace;
1548 	/* bitmask of trace recursion */
1549 	unsigned long trace_recursion;
1550 #endif /* CONFIG_TRACING */
1551 	unsigned long stack_start;
1552 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1553 	struct memcg_batch_info {
1554 		int do_batch;	/* incremented when batch uncharge started */
1555 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1556 		unsigned long bytes; 		/* uncharged usage */
1557 		unsigned long memsw_bytes; /* uncharged mem+swap usage */
1558 	} memcg_batch;
1559 #endif
1560 };
1561 
1562 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1563 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1564 
1565 /*
1566  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1567  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1568  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1569  * values are inverted: lower p->prio value means higher priority.
1570  *
1571  * The MAX_USER_RT_PRIO value allows the actual maximum
1572  * RT priority to be separate from the value exported to
1573  * user-space.  This allows kernel threads to set their
1574  * priority to a value higher than any user task. Note:
1575  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1576  */
1577 
1578 #define MAX_USER_RT_PRIO	100
1579 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1580 
1581 #define MAX_PRIO		(MAX_RT_PRIO + 40)
1582 #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1583 
1584 static inline int rt_prio(int prio)
1585 {
1586 	if (unlikely(prio < MAX_RT_PRIO))
1587 		return 1;
1588 	return 0;
1589 }
1590 
1591 static inline int rt_task(struct task_struct *p)
1592 {
1593 	return rt_prio(p->prio);
1594 }
1595 
1596 static inline struct pid *task_pid(struct task_struct *task)
1597 {
1598 	return task->pids[PIDTYPE_PID].pid;
1599 }
1600 
1601 static inline struct pid *task_tgid(struct task_struct *task)
1602 {
1603 	return task->group_leader->pids[PIDTYPE_PID].pid;
1604 }
1605 
1606 /*
1607  * Without tasklist or rcu lock it is not safe to dereference
1608  * the result of task_pgrp/task_session even if task == current,
1609  * we can race with another thread doing sys_setsid/sys_setpgid.
1610  */
1611 static inline struct pid *task_pgrp(struct task_struct *task)
1612 {
1613 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1614 }
1615 
1616 static inline struct pid *task_session(struct task_struct *task)
1617 {
1618 	return task->group_leader->pids[PIDTYPE_SID].pid;
1619 }
1620 
1621 struct pid_namespace;
1622 
1623 /*
1624  * the helpers to get the task's different pids as they are seen
1625  * from various namespaces
1626  *
1627  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1628  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1629  *                     current.
1630  * task_xid_nr_ns()  : id seen from the ns specified;
1631  *
1632  * set_task_vxid()   : assigns a virtual id to a task;
1633  *
1634  * see also pid_nr() etc in include/linux/pid.h
1635  */
1636 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1637 			struct pid_namespace *ns);
1638 
1639 static inline pid_t task_pid_nr(struct task_struct *tsk)
1640 {
1641 	return tsk->pid;
1642 }
1643 
1644 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1645 					struct pid_namespace *ns)
1646 {
1647 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1648 }
1649 
1650 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1651 {
1652 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1653 }
1654 
1655 
1656 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1657 {
1658 	return tsk->tgid;
1659 }
1660 
1661 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1662 
1663 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1664 {
1665 	return pid_vnr(task_tgid(tsk));
1666 }
1667 
1668 
1669 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1670 					struct pid_namespace *ns)
1671 {
1672 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1673 }
1674 
1675 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1676 {
1677 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1678 }
1679 
1680 
1681 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1682 					struct pid_namespace *ns)
1683 {
1684 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1685 }
1686 
1687 static inline pid_t task_session_vnr(struct task_struct *tsk)
1688 {
1689 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1690 }
1691 
1692 /* obsolete, do not use */
1693 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1694 {
1695 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1696 }
1697 
1698 /**
1699  * pid_alive - check that a task structure is not stale
1700  * @p: Task structure to be checked.
1701  *
1702  * Test if a process is not yet dead (at most zombie state)
1703  * If pid_alive fails, then pointers within the task structure
1704  * can be stale and must not be dereferenced.
1705  */
1706 static inline int pid_alive(struct task_struct *p)
1707 {
1708 	return p->pids[PIDTYPE_PID].pid != NULL;
1709 }
1710 
1711 /**
1712  * is_global_init - check if a task structure is init
1713  * @tsk: Task structure to be checked.
1714  *
1715  * Check if a task structure is the first user space task the kernel created.
1716  */
1717 static inline int is_global_init(struct task_struct *tsk)
1718 {
1719 	return tsk->pid == 1;
1720 }
1721 
1722 /*
1723  * is_container_init:
1724  * check whether in the task is init in its own pid namespace.
1725  */
1726 extern int is_container_init(struct task_struct *tsk);
1727 
1728 extern struct pid *cad_pid;
1729 
1730 extern void free_task(struct task_struct *tsk);
1731 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1732 
1733 extern void __put_task_struct(struct task_struct *t);
1734 
1735 static inline void put_task_struct(struct task_struct *t)
1736 {
1737 	if (atomic_dec_and_test(&t->usage))
1738 		__put_task_struct(t);
1739 }
1740 
1741 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1742 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1743 
1744 /*
1745  * Per process flags
1746  */
1747 #define PF_ALIGNWARN	0x00000001	/* Print alignment warning msgs */
1748 					/* Not implemented yet, only for 486*/
1749 #define PF_STARTING	0x00000002	/* being created */
1750 #define PF_EXITING	0x00000004	/* getting shut down */
1751 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1752 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1753 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1754 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1755 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1756 #define PF_DUMPCORE	0x00000200	/* dumped core */
1757 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1758 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1759 #define PF_FLUSHER	0x00001000	/* responsible for disk writeback */
1760 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1761 #define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
1762 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1763 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1764 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1765 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1766 #define PF_OOM_ORIGIN	0x00080000	/* Allocating much memory to others */
1767 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1768 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1769 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1770 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1771 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1772 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1773 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1774 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1775 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1776 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1777 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
1778 #define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
1779 
1780 /*
1781  * Only the _current_ task can read/write to tsk->flags, but other
1782  * tasks can access tsk->flags in readonly mode for example
1783  * with tsk_used_math (like during threaded core dumping).
1784  * There is however an exception to this rule during ptrace
1785  * or during fork: the ptracer task is allowed to write to the
1786  * child->flags of its traced child (same goes for fork, the parent
1787  * can write to the child->flags), because we're guaranteed the
1788  * child is not running and in turn not changing child->flags
1789  * at the same time the parent does it.
1790  */
1791 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1792 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1793 #define clear_used_math() clear_stopped_child_used_math(current)
1794 #define set_used_math() set_stopped_child_used_math(current)
1795 #define conditional_stopped_child_used_math(condition, child) \
1796 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1797 #define conditional_used_math(condition) \
1798 	conditional_stopped_child_used_math(condition, current)
1799 #define copy_to_stopped_child_used_math(child) \
1800 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1801 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1802 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1803 #define used_math() tsk_used_math(current)
1804 
1805 #ifdef CONFIG_TREE_PREEMPT_RCU
1806 
1807 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1808 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1809 
1810 static inline void rcu_copy_process(struct task_struct *p)
1811 {
1812 	p->rcu_read_lock_nesting = 0;
1813 	p->rcu_read_unlock_special = 0;
1814 	p->rcu_blocked_node = NULL;
1815 	INIT_LIST_HEAD(&p->rcu_node_entry);
1816 }
1817 
1818 #else
1819 
1820 static inline void rcu_copy_process(struct task_struct *p)
1821 {
1822 }
1823 
1824 #endif
1825 
1826 #ifdef CONFIG_SMP
1827 extern int set_cpus_allowed_ptr(struct task_struct *p,
1828 				const struct cpumask *new_mask);
1829 #else
1830 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1831 				       const struct cpumask *new_mask)
1832 {
1833 	if (!cpumask_test_cpu(0, new_mask))
1834 		return -EINVAL;
1835 	return 0;
1836 }
1837 #endif
1838 
1839 #ifndef CONFIG_CPUMASK_OFFSTACK
1840 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1841 {
1842 	return set_cpus_allowed_ptr(p, &new_mask);
1843 }
1844 #endif
1845 
1846 /*
1847  * Architectures can set this to 1 if they have specified
1848  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1849  * but then during bootup it turns out that sched_clock()
1850  * is reliable after all:
1851  */
1852 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1853 extern int sched_clock_stable;
1854 #endif
1855 
1856 /* ftrace calls sched_clock() directly */
1857 extern unsigned long long notrace sched_clock(void);
1858 
1859 extern void sched_clock_init(void);
1860 extern u64 sched_clock_cpu(int cpu);
1861 
1862 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1863 static inline void sched_clock_tick(void)
1864 {
1865 }
1866 
1867 static inline void sched_clock_idle_sleep_event(void)
1868 {
1869 }
1870 
1871 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1872 {
1873 }
1874 #else
1875 extern void sched_clock_tick(void);
1876 extern void sched_clock_idle_sleep_event(void);
1877 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1878 #endif
1879 
1880 /*
1881  * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1882  * clock constructed from sched_clock():
1883  */
1884 extern unsigned long long cpu_clock(int cpu);
1885 
1886 extern unsigned long long
1887 task_sched_runtime(struct task_struct *task);
1888 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1889 
1890 /* sched_exec is called by processes performing an exec */
1891 #ifdef CONFIG_SMP
1892 extern void sched_exec(void);
1893 #else
1894 #define sched_exec()   {}
1895 #endif
1896 
1897 extern void sched_clock_idle_sleep_event(void);
1898 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1899 
1900 #ifdef CONFIG_HOTPLUG_CPU
1901 extern void idle_task_exit(void);
1902 #else
1903 static inline void idle_task_exit(void) {}
1904 #endif
1905 
1906 extern void sched_idle_next(void);
1907 
1908 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1909 extern void wake_up_idle_cpu(int cpu);
1910 #else
1911 static inline void wake_up_idle_cpu(int cpu) { }
1912 #endif
1913 
1914 extern unsigned int sysctl_sched_latency;
1915 extern unsigned int sysctl_sched_min_granularity;
1916 extern unsigned int sysctl_sched_wakeup_granularity;
1917 extern unsigned int sysctl_sched_shares_ratelimit;
1918 extern unsigned int sysctl_sched_shares_thresh;
1919 extern unsigned int sysctl_sched_child_runs_first;
1920 
1921 enum sched_tunable_scaling {
1922 	SCHED_TUNABLESCALING_NONE,
1923 	SCHED_TUNABLESCALING_LOG,
1924 	SCHED_TUNABLESCALING_LINEAR,
1925 	SCHED_TUNABLESCALING_END,
1926 };
1927 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1928 
1929 #ifdef CONFIG_SCHED_DEBUG
1930 extern unsigned int sysctl_sched_migration_cost;
1931 extern unsigned int sysctl_sched_nr_migrate;
1932 extern unsigned int sysctl_sched_time_avg;
1933 extern unsigned int sysctl_timer_migration;
1934 
1935 int sched_proc_update_handler(struct ctl_table *table, int write,
1936 		void __user *buffer, size_t *length,
1937 		loff_t *ppos);
1938 #endif
1939 #ifdef CONFIG_SCHED_DEBUG
1940 static inline unsigned int get_sysctl_timer_migration(void)
1941 {
1942 	return sysctl_timer_migration;
1943 }
1944 #else
1945 static inline unsigned int get_sysctl_timer_migration(void)
1946 {
1947 	return 1;
1948 }
1949 #endif
1950 extern unsigned int sysctl_sched_rt_period;
1951 extern int sysctl_sched_rt_runtime;
1952 
1953 int sched_rt_handler(struct ctl_table *table, int write,
1954 		void __user *buffer, size_t *lenp,
1955 		loff_t *ppos);
1956 
1957 extern unsigned int sysctl_sched_compat_yield;
1958 
1959 #ifdef CONFIG_RT_MUTEXES
1960 extern int rt_mutex_getprio(struct task_struct *p);
1961 extern void rt_mutex_setprio(struct task_struct *p, int prio);
1962 extern void rt_mutex_adjust_pi(struct task_struct *p);
1963 #else
1964 static inline int rt_mutex_getprio(struct task_struct *p)
1965 {
1966 	return p->normal_prio;
1967 }
1968 # define rt_mutex_adjust_pi(p)		do { } while (0)
1969 #endif
1970 
1971 extern void set_user_nice(struct task_struct *p, long nice);
1972 extern int task_prio(const struct task_struct *p);
1973 extern int task_nice(const struct task_struct *p);
1974 extern int can_nice(const struct task_struct *p, const int nice);
1975 extern int task_curr(const struct task_struct *p);
1976 extern int idle_cpu(int cpu);
1977 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1978 extern int sched_setscheduler_nocheck(struct task_struct *, int,
1979 				      struct sched_param *);
1980 extern struct task_struct *idle_task(int cpu);
1981 extern struct task_struct *curr_task(int cpu);
1982 extern void set_curr_task(int cpu, struct task_struct *p);
1983 
1984 void yield(void);
1985 
1986 /*
1987  * The default (Linux) execution domain.
1988  */
1989 extern struct exec_domain	default_exec_domain;
1990 
1991 union thread_union {
1992 	struct thread_info thread_info;
1993 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1994 };
1995 
1996 #ifndef __HAVE_ARCH_KSTACK_END
1997 static inline int kstack_end(void *addr)
1998 {
1999 	/* Reliable end of stack detection:
2000 	 * Some APM bios versions misalign the stack
2001 	 */
2002 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2003 }
2004 #endif
2005 
2006 extern union thread_union init_thread_union;
2007 extern struct task_struct init_task;
2008 
2009 extern struct   mm_struct init_mm;
2010 
2011 extern struct pid_namespace init_pid_ns;
2012 
2013 /*
2014  * find a task by one of its numerical ids
2015  *
2016  * find_task_by_pid_ns():
2017  *      finds a task by its pid in the specified namespace
2018  * find_task_by_vpid():
2019  *      finds a task by its virtual pid
2020  *
2021  * see also find_vpid() etc in include/linux/pid.h
2022  */
2023 
2024 extern struct task_struct *find_task_by_vpid(pid_t nr);
2025 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2026 		struct pid_namespace *ns);
2027 
2028 extern void __set_special_pids(struct pid *pid);
2029 
2030 /* per-UID process charging. */
2031 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2032 static inline struct user_struct *get_uid(struct user_struct *u)
2033 {
2034 	atomic_inc(&u->__count);
2035 	return u;
2036 }
2037 extern void free_uid(struct user_struct *);
2038 extern void release_uids(struct user_namespace *ns);
2039 
2040 #include <asm/current.h>
2041 
2042 extern void do_timer(unsigned long ticks);
2043 
2044 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2045 extern int wake_up_process(struct task_struct *tsk);
2046 extern void wake_up_new_task(struct task_struct *tsk,
2047 				unsigned long clone_flags);
2048 #ifdef CONFIG_SMP
2049  extern void kick_process(struct task_struct *tsk);
2050 #else
2051  static inline void kick_process(struct task_struct *tsk) { }
2052 #endif
2053 extern void sched_fork(struct task_struct *p, int clone_flags);
2054 extern void sched_dead(struct task_struct *p);
2055 
2056 extern void proc_caches_init(void);
2057 extern void flush_signals(struct task_struct *);
2058 extern void __flush_signals(struct task_struct *);
2059 extern void ignore_signals(struct task_struct *);
2060 extern void flush_signal_handlers(struct task_struct *, int force_default);
2061 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2062 
2063 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2064 {
2065 	unsigned long flags;
2066 	int ret;
2067 
2068 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2069 	ret = dequeue_signal(tsk, mask, info);
2070 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2071 
2072 	return ret;
2073 }
2074 
2075 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2076 			      sigset_t *mask);
2077 extern void unblock_all_signals(void);
2078 extern void release_task(struct task_struct * p);
2079 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2080 extern int force_sigsegv(int, struct task_struct *);
2081 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2082 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2083 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2084 extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2085 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2086 extern int kill_pid(struct pid *pid, int sig, int priv);
2087 extern int kill_proc_info(int, struct siginfo *, pid_t);
2088 extern int do_notify_parent(struct task_struct *, int);
2089 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2090 extern void force_sig(int, struct task_struct *);
2091 extern int send_sig(int, struct task_struct *, int);
2092 extern void zap_other_threads(struct task_struct *p);
2093 extern struct sigqueue *sigqueue_alloc(void);
2094 extern void sigqueue_free(struct sigqueue *);
2095 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2096 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2097 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2098 
2099 static inline int kill_cad_pid(int sig, int priv)
2100 {
2101 	return kill_pid(cad_pid, sig, priv);
2102 }
2103 
2104 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2105 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2106 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2107 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2108 
2109 /*
2110  * True if we are on the alternate signal stack.
2111  */
2112 static inline int on_sig_stack(unsigned long sp)
2113 {
2114 #ifdef CONFIG_STACK_GROWSUP
2115 	return sp >= current->sas_ss_sp &&
2116 		sp - current->sas_ss_sp < current->sas_ss_size;
2117 #else
2118 	return sp > current->sas_ss_sp &&
2119 		sp - current->sas_ss_sp <= current->sas_ss_size;
2120 #endif
2121 }
2122 
2123 static inline int sas_ss_flags(unsigned long sp)
2124 {
2125 	return (current->sas_ss_size == 0 ? SS_DISABLE
2126 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2127 }
2128 
2129 /*
2130  * Routines for handling mm_structs
2131  */
2132 extern struct mm_struct * mm_alloc(void);
2133 
2134 /* mmdrop drops the mm and the page tables */
2135 extern void __mmdrop(struct mm_struct *);
2136 static inline void mmdrop(struct mm_struct * mm)
2137 {
2138 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2139 		__mmdrop(mm);
2140 }
2141 
2142 /* mmput gets rid of the mappings and all user-space */
2143 extern void mmput(struct mm_struct *);
2144 /* Grab a reference to a task's mm, if it is not already going away */
2145 extern struct mm_struct *get_task_mm(struct task_struct *task);
2146 /* Remove the current tasks stale references to the old mm_struct */
2147 extern void mm_release(struct task_struct *, struct mm_struct *);
2148 /* Allocate a new mm structure and copy contents from tsk->mm */
2149 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2150 
2151 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2152 			struct task_struct *, struct pt_regs *);
2153 extern void flush_thread(void);
2154 extern void exit_thread(void);
2155 
2156 extern void exit_files(struct task_struct *);
2157 extern void __cleanup_signal(struct signal_struct *);
2158 extern void __cleanup_sighand(struct sighand_struct *);
2159 
2160 extern void exit_itimers(struct signal_struct *);
2161 extern void flush_itimer_signals(void);
2162 
2163 extern NORET_TYPE void do_group_exit(int);
2164 
2165 extern void daemonize(const char *, ...);
2166 extern int allow_signal(int);
2167 extern int disallow_signal(int);
2168 
2169 extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
2170 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2171 struct task_struct *fork_idle(int);
2172 
2173 extern void set_task_comm(struct task_struct *tsk, char *from);
2174 extern char *get_task_comm(char *to, struct task_struct *tsk);
2175 
2176 #ifdef CONFIG_SMP
2177 extern void wait_task_context_switch(struct task_struct *p);
2178 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2179 #else
2180 static inline void wait_task_context_switch(struct task_struct *p) {}
2181 static inline unsigned long wait_task_inactive(struct task_struct *p,
2182 					       long match_state)
2183 {
2184 	return 1;
2185 }
2186 #endif
2187 
2188 #define next_task(p) \
2189 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2190 
2191 #define for_each_process(p) \
2192 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2193 
2194 extern bool current_is_single_threaded(void);
2195 
2196 /*
2197  * Careful: do_each_thread/while_each_thread is a double loop so
2198  *          'break' will not work as expected - use goto instead.
2199  */
2200 #define do_each_thread(g, t) \
2201 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2202 
2203 #define while_each_thread(g, t) \
2204 	while ((t = next_thread(t)) != g)
2205 
2206 /* de_thread depends on thread_group_leader not being a pid based check */
2207 #define thread_group_leader(p)	(p == p->group_leader)
2208 
2209 /* Do to the insanities of de_thread it is possible for a process
2210  * to have the pid of the thread group leader without actually being
2211  * the thread group leader.  For iteration through the pids in proc
2212  * all we care about is that we have a task with the appropriate
2213  * pid, we don't actually care if we have the right task.
2214  */
2215 static inline int has_group_leader_pid(struct task_struct *p)
2216 {
2217 	return p->pid == p->tgid;
2218 }
2219 
2220 static inline
2221 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2222 {
2223 	return p1->tgid == p2->tgid;
2224 }
2225 
2226 static inline struct task_struct *next_thread(const struct task_struct *p)
2227 {
2228 	return list_entry_rcu(p->thread_group.next,
2229 			      struct task_struct, thread_group);
2230 }
2231 
2232 static inline int thread_group_empty(struct task_struct *p)
2233 {
2234 	return list_empty(&p->thread_group);
2235 }
2236 
2237 #define delay_group_leader(p) \
2238 		(thread_group_leader(p) && !thread_group_empty(p))
2239 
2240 static inline int task_detached(struct task_struct *p)
2241 {
2242 	return p->exit_signal == -1;
2243 }
2244 
2245 /*
2246  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2247  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2248  * pins the final release of task.io_context.  Also protects ->cpuset and
2249  * ->cgroup.subsys[].
2250  *
2251  * Nests both inside and outside of read_lock(&tasklist_lock).
2252  * It must not be nested with write_lock_irq(&tasklist_lock),
2253  * neither inside nor outside.
2254  */
2255 static inline void task_lock(struct task_struct *p)
2256 {
2257 	spin_lock(&p->alloc_lock);
2258 }
2259 
2260 static inline void task_unlock(struct task_struct *p)
2261 {
2262 	spin_unlock(&p->alloc_lock);
2263 }
2264 
2265 extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2266 							unsigned long *flags);
2267 
2268 static inline void unlock_task_sighand(struct task_struct *tsk,
2269 						unsigned long *flags)
2270 {
2271 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2272 }
2273 
2274 #ifndef __HAVE_THREAD_FUNCTIONS
2275 
2276 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2277 #define task_stack_page(task)	((task)->stack)
2278 
2279 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2280 {
2281 	*task_thread_info(p) = *task_thread_info(org);
2282 	task_thread_info(p)->task = p;
2283 }
2284 
2285 static inline unsigned long *end_of_stack(struct task_struct *p)
2286 {
2287 	return (unsigned long *)(task_thread_info(p) + 1);
2288 }
2289 
2290 #endif
2291 
2292 static inline int object_is_on_stack(void *obj)
2293 {
2294 	void *stack = task_stack_page(current);
2295 
2296 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2297 }
2298 
2299 extern void thread_info_cache_init(void);
2300 
2301 #ifdef CONFIG_DEBUG_STACK_USAGE
2302 static inline unsigned long stack_not_used(struct task_struct *p)
2303 {
2304 	unsigned long *n = end_of_stack(p);
2305 
2306 	do { 	/* Skip over canary */
2307 		n++;
2308 	} while (!*n);
2309 
2310 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2311 }
2312 #endif
2313 
2314 /* set thread flags in other task's structures
2315  * - see asm/thread_info.h for TIF_xxxx flags available
2316  */
2317 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2318 {
2319 	set_ti_thread_flag(task_thread_info(tsk), flag);
2320 }
2321 
2322 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2323 {
2324 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2325 }
2326 
2327 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2328 {
2329 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2330 }
2331 
2332 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2333 {
2334 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2335 }
2336 
2337 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2338 {
2339 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2340 }
2341 
2342 static inline void set_tsk_need_resched(struct task_struct *tsk)
2343 {
2344 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2345 }
2346 
2347 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2348 {
2349 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2350 }
2351 
2352 static inline int test_tsk_need_resched(struct task_struct *tsk)
2353 {
2354 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2355 }
2356 
2357 static inline int restart_syscall(void)
2358 {
2359 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2360 	return -ERESTARTNOINTR;
2361 }
2362 
2363 static inline int signal_pending(struct task_struct *p)
2364 {
2365 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2366 }
2367 
2368 static inline int __fatal_signal_pending(struct task_struct *p)
2369 {
2370 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2371 }
2372 
2373 static inline int fatal_signal_pending(struct task_struct *p)
2374 {
2375 	return signal_pending(p) && __fatal_signal_pending(p);
2376 }
2377 
2378 static inline int signal_pending_state(long state, struct task_struct *p)
2379 {
2380 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2381 		return 0;
2382 	if (!signal_pending(p))
2383 		return 0;
2384 
2385 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2386 }
2387 
2388 static inline int need_resched(void)
2389 {
2390 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2391 }
2392 
2393 /*
2394  * cond_resched() and cond_resched_lock(): latency reduction via
2395  * explicit rescheduling in places that are safe. The return
2396  * value indicates whether a reschedule was done in fact.
2397  * cond_resched_lock() will drop the spinlock before scheduling,
2398  * cond_resched_softirq() will enable bhs before scheduling.
2399  */
2400 extern int _cond_resched(void);
2401 
2402 #define cond_resched() ({			\
2403 	__might_sleep(__FILE__, __LINE__, 0);	\
2404 	_cond_resched();			\
2405 })
2406 
2407 extern int __cond_resched_lock(spinlock_t *lock);
2408 
2409 #ifdef CONFIG_PREEMPT
2410 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2411 #else
2412 #define PREEMPT_LOCK_OFFSET	0
2413 #endif
2414 
2415 #define cond_resched_lock(lock) ({				\
2416 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2417 	__cond_resched_lock(lock);				\
2418 })
2419 
2420 extern int __cond_resched_softirq(void);
2421 
2422 #define cond_resched_softirq() ({				\
2423 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET);	\
2424 	__cond_resched_softirq();				\
2425 })
2426 
2427 /*
2428  * Does a critical section need to be broken due to another
2429  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2430  * but a general need for low latency)
2431  */
2432 static inline int spin_needbreak(spinlock_t *lock)
2433 {
2434 #ifdef CONFIG_PREEMPT
2435 	return spin_is_contended(lock);
2436 #else
2437 	return 0;
2438 #endif
2439 }
2440 
2441 /*
2442  * Thread group CPU time accounting.
2443  */
2444 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2445 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2446 
2447 static inline void thread_group_cputime_init(struct signal_struct *sig)
2448 {
2449 	sig->cputimer.cputime = INIT_CPUTIME;
2450 	spin_lock_init(&sig->cputimer.lock);
2451 	sig->cputimer.running = 0;
2452 }
2453 
2454 static inline void thread_group_cputime_free(struct signal_struct *sig)
2455 {
2456 }
2457 
2458 /*
2459  * Reevaluate whether the task has signals pending delivery.
2460  * Wake the task if so.
2461  * This is required every time the blocked sigset_t changes.
2462  * callers must hold sighand->siglock.
2463  */
2464 extern void recalc_sigpending_and_wake(struct task_struct *t);
2465 extern void recalc_sigpending(void);
2466 
2467 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2468 
2469 /*
2470  * Wrappers for p->thread_info->cpu access. No-op on UP.
2471  */
2472 #ifdef CONFIG_SMP
2473 
2474 static inline unsigned int task_cpu(const struct task_struct *p)
2475 {
2476 	return task_thread_info(p)->cpu;
2477 }
2478 
2479 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2480 
2481 #else
2482 
2483 static inline unsigned int task_cpu(const struct task_struct *p)
2484 {
2485 	return 0;
2486 }
2487 
2488 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2489 {
2490 }
2491 
2492 #endif /* CONFIG_SMP */
2493 
2494 extern void arch_pick_mmap_layout(struct mm_struct *mm);
2495 
2496 #ifdef CONFIG_TRACING
2497 extern void
2498 __trace_special(void *__tr, void *__data,
2499 		unsigned long arg1, unsigned long arg2, unsigned long arg3);
2500 #else
2501 static inline void
2502 __trace_special(void *__tr, void *__data,
2503 		unsigned long arg1, unsigned long arg2, unsigned long arg3)
2504 {
2505 }
2506 #endif
2507 
2508 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2509 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2510 
2511 extern void normalize_rt_tasks(void);
2512 
2513 #ifdef CONFIG_GROUP_SCHED
2514 
2515 extern struct task_group init_task_group;
2516 #ifdef CONFIG_USER_SCHED
2517 extern struct task_group root_task_group;
2518 extern void set_tg_uid(struct user_struct *user);
2519 #endif
2520 
2521 extern struct task_group *sched_create_group(struct task_group *parent);
2522 extern void sched_destroy_group(struct task_group *tg);
2523 extern void sched_move_task(struct task_struct *tsk);
2524 #ifdef CONFIG_FAIR_GROUP_SCHED
2525 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2526 extern unsigned long sched_group_shares(struct task_group *tg);
2527 #endif
2528 #ifdef CONFIG_RT_GROUP_SCHED
2529 extern int sched_group_set_rt_runtime(struct task_group *tg,
2530 				      long rt_runtime_us);
2531 extern long sched_group_rt_runtime(struct task_group *tg);
2532 extern int sched_group_set_rt_period(struct task_group *tg,
2533 				      long rt_period_us);
2534 extern long sched_group_rt_period(struct task_group *tg);
2535 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2536 #endif
2537 #endif
2538 
2539 extern int task_can_switch_user(struct user_struct *up,
2540 					struct task_struct *tsk);
2541 
2542 #ifdef CONFIG_TASK_XACCT
2543 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2544 {
2545 	tsk->ioac.rchar += amt;
2546 }
2547 
2548 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2549 {
2550 	tsk->ioac.wchar += amt;
2551 }
2552 
2553 static inline void inc_syscr(struct task_struct *tsk)
2554 {
2555 	tsk->ioac.syscr++;
2556 }
2557 
2558 static inline void inc_syscw(struct task_struct *tsk)
2559 {
2560 	tsk->ioac.syscw++;
2561 }
2562 #else
2563 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2564 {
2565 }
2566 
2567 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2568 {
2569 }
2570 
2571 static inline void inc_syscr(struct task_struct *tsk)
2572 {
2573 }
2574 
2575 static inline void inc_syscw(struct task_struct *tsk)
2576 {
2577 }
2578 #endif
2579 
2580 #ifndef TASK_SIZE_OF
2581 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2582 #endif
2583 
2584 /*
2585  * Call the function if the target task is executing on a CPU right now:
2586  */
2587 extern void task_oncpu_function_call(struct task_struct *p,
2588 				     void (*func) (void *info), void *info);
2589 
2590 
2591 #ifdef CONFIG_MM_OWNER
2592 extern void mm_update_next_owner(struct mm_struct *mm);
2593 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2594 #else
2595 static inline void mm_update_next_owner(struct mm_struct *mm)
2596 {
2597 }
2598 
2599 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2600 {
2601 }
2602 #endif /* CONFIG_MM_OWNER */
2603 
2604 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2605 		unsigned int limit)
2606 {
2607 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2608 }
2609 
2610 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2611 		unsigned int limit)
2612 {
2613 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2614 }
2615 
2616 static inline unsigned long rlimit(unsigned int limit)
2617 {
2618 	return task_rlimit(current, limit);
2619 }
2620 
2621 static inline unsigned long rlimit_max(unsigned int limit)
2622 {
2623 	return task_rlimit_max(current, limit);
2624 }
2625 
2626 #endif /* __KERNEL__ */
2627 
2628 #endif
2629