xref: /linux-6.15/include/linux/sched.h (revision 3f07c014)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 #include <linux/sched/prio.h>
7 
8 
9 struct sched_param {
10 	int sched_priority;
11 };
12 
13 #include <asm/param.h>	/* for HZ */
14 
15 #include <linux/capability.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/timex.h>
20 #include <linux/jiffies.h>
21 #include <linux/plist.h>
22 #include <linux/rbtree.h>
23 #include <linux/thread_info.h>
24 #include <linux/cpumask.h>
25 #include <linux/errno.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm_types.h>
28 #include <linux/preempt.h>
29 
30 #include <asm/page.h>
31 #include <asm/ptrace.h>
32 
33 #include <linux/smp.h>
34 #include <linux/sem.h>
35 #include <linux/shm.h>
36 #include <linux/signal.h>
37 #include <linux/compiler.h>
38 #include <linux/completion.h>
39 #include <linux/pid.h>
40 #include <linux/percpu.h>
41 #include <linux/topology.h>
42 #include <linux/seccomp.h>
43 #include <linux/rcupdate.h>
44 #include <linux/rculist.h>
45 #include <linux/rtmutex.h>
46 
47 #include <linux/time.h>
48 #include <linux/param.h>
49 #include <linux/resource.h>
50 #include <linux/timer.h>
51 #include <linux/hrtimer.h>
52 #include <linux/kcov.h>
53 #include <linux/task_io_accounting.h>
54 #include <linux/latencytop.h>
55 #include <linux/cred.h>
56 #include <linux/llist.h>
57 #include <linux/uidgid.h>
58 #include <linux/gfp.h>
59 #include <linux/magic.h>
60 #include <linux/cgroup-defs.h>
61 
62 #include <asm/processor.h>
63 
64 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
65 
66 /*
67  * Extended scheduling parameters data structure.
68  *
69  * This is needed because the original struct sched_param can not be
70  * altered without introducing ABI issues with legacy applications
71  * (e.g., in sched_getparam()).
72  *
73  * However, the possibility of specifying more than just a priority for
74  * the tasks may be useful for a wide variety of application fields, e.g.,
75  * multimedia, streaming, automation and control, and many others.
76  *
77  * This variant (sched_attr) is meant at describing a so-called
78  * sporadic time-constrained task. In such model a task is specified by:
79  *  - the activation period or minimum instance inter-arrival time;
80  *  - the maximum (or average, depending on the actual scheduling
81  *    discipline) computation time of all instances, a.k.a. runtime;
82  *  - the deadline (relative to the actual activation time) of each
83  *    instance.
84  * Very briefly, a periodic (sporadic) task asks for the execution of
85  * some specific computation --which is typically called an instance--
86  * (at most) every period. Moreover, each instance typically lasts no more
87  * than the runtime and must be completed by time instant t equal to
88  * the instance activation time + the deadline.
89  *
90  * This is reflected by the actual fields of the sched_attr structure:
91  *
92  *  @size		size of the structure, for fwd/bwd compat.
93  *
94  *  @sched_policy	task's scheduling policy
95  *  @sched_flags	for customizing the scheduler behaviour
96  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
97  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
98  *  @sched_deadline	representative of the task's deadline
99  *  @sched_runtime	representative of the task's runtime
100  *  @sched_period	representative of the task's period
101  *
102  * Given this task model, there are a multiplicity of scheduling algorithms
103  * and policies, that can be used to ensure all the tasks will make their
104  * timing constraints.
105  *
106  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
107  * only user of this new interface. More information about the algorithm
108  * available in the scheduling class file or in Documentation/.
109  */
110 struct sched_attr {
111 	u32 size;
112 
113 	u32 sched_policy;
114 	u64 sched_flags;
115 
116 	/* SCHED_NORMAL, SCHED_BATCH */
117 	s32 sched_nice;
118 
119 	/* SCHED_FIFO, SCHED_RR */
120 	u32 sched_priority;
121 
122 	/* SCHED_DEADLINE */
123 	u64 sched_runtime;
124 	u64 sched_deadline;
125 	u64 sched_period;
126 };
127 
128 struct futex_pi_state;
129 struct robust_list_head;
130 struct bio_list;
131 struct fs_struct;
132 struct perf_event_context;
133 struct blk_plug;
134 struct filename;
135 struct nameidata;
136 
137 /*
138  * These are the constant used to fake the fixed-point load-average
139  * counting. Some notes:
140  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
141  *    a load-average precision of 10 bits integer + 11 bits fractional
142  *  - if you want to count load-averages more often, you need more
143  *    precision, or rounding will get you. With 2-second counting freq,
144  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
145  *    11 bit fractions.
146  */
147 extern unsigned long avenrun[];		/* Load averages */
148 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
149 
150 #define FSHIFT		11		/* nr of bits of precision */
151 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
152 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
153 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
154 #define EXP_5		2014		/* 1/exp(5sec/5min) */
155 #define EXP_15		2037		/* 1/exp(5sec/15min) */
156 
157 #define CALC_LOAD(load,exp,n) \
158 	load *= exp; \
159 	load += n*(FIXED_1-exp); \
160 	load >>= FSHIFT;
161 
162 extern unsigned long total_forks;
163 extern int nr_threads;
164 DECLARE_PER_CPU(unsigned long, process_counts);
165 extern int nr_processes(void);
166 extern unsigned long nr_running(void);
167 extern bool single_task_running(void);
168 extern unsigned long nr_iowait(void);
169 extern unsigned long nr_iowait_cpu(int cpu);
170 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
171 
172 extern void calc_global_load(unsigned long ticks);
173 
174 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
175 extern void cpu_load_update_nohz_start(void);
176 extern void cpu_load_update_nohz_stop(void);
177 #else
178 static inline void cpu_load_update_nohz_start(void) { }
179 static inline void cpu_load_update_nohz_stop(void) { }
180 #endif
181 
182 extern void dump_cpu_task(int cpu);
183 
184 struct seq_file;
185 struct cfs_rq;
186 struct task_group;
187 #ifdef CONFIG_SCHED_DEBUG
188 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
189 extern void proc_sched_set_task(struct task_struct *p);
190 #endif
191 
192 /*
193  * Task state bitmask. NOTE! These bits are also
194  * encoded in fs/proc/array.c: get_task_state().
195  *
196  * We have two separate sets of flags: task->state
197  * is about runnability, while task->exit_state are
198  * about the task exiting. Confusing, but this way
199  * modifying one set can't modify the other one by
200  * mistake.
201  */
202 #define TASK_RUNNING		0
203 #define TASK_INTERRUPTIBLE	1
204 #define TASK_UNINTERRUPTIBLE	2
205 #define __TASK_STOPPED		4
206 #define __TASK_TRACED		8
207 /* in tsk->exit_state */
208 #define EXIT_DEAD		16
209 #define EXIT_ZOMBIE		32
210 #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
211 /* in tsk->state again */
212 #define TASK_DEAD		64
213 #define TASK_WAKEKILL		128
214 #define TASK_WAKING		256
215 #define TASK_PARKED		512
216 #define TASK_NOLOAD		1024
217 #define TASK_NEW		2048
218 #define TASK_STATE_MAX		4096
219 
220 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
221 
222 /* Convenience macros for the sake of set_current_state */
223 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
224 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
225 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
226 
227 #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
228 
229 /* Convenience macros for the sake of wake_up */
230 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
231 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
232 
233 /* get_task_state() */
234 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
235 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
236 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
237 
238 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
239 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
240 #define task_is_stopped_or_traced(task)	\
241 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
242 #define task_contributes_to_load(task)	\
243 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
244 				 (task->flags & PF_FROZEN) == 0 && \
245 				 (task->state & TASK_NOLOAD) == 0)
246 
247 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
248 
249 #define __set_current_state(state_value)			\
250 	do {							\
251 		current->task_state_change = _THIS_IP_;		\
252 		current->state = (state_value);			\
253 	} while (0)
254 #define set_current_state(state_value)				\
255 	do {							\
256 		current->task_state_change = _THIS_IP_;		\
257 		smp_store_mb(current->state, (state_value));	\
258 	} while (0)
259 
260 #else
261 /*
262  * set_current_state() includes a barrier so that the write of current->state
263  * is correctly serialised wrt the caller's subsequent test of whether to
264  * actually sleep:
265  *
266  *   for (;;) {
267  *	set_current_state(TASK_UNINTERRUPTIBLE);
268  *	if (!need_sleep)
269  *		break;
270  *
271  *	schedule();
272  *   }
273  *   __set_current_state(TASK_RUNNING);
274  *
275  * If the caller does not need such serialisation (because, for instance, the
276  * condition test and condition change and wakeup are under the same lock) then
277  * use __set_current_state().
278  *
279  * The above is typically ordered against the wakeup, which does:
280  *
281  *	need_sleep = false;
282  *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
283  *
284  * Where wake_up_state() (and all other wakeup primitives) imply enough
285  * barriers to order the store of the variable against wakeup.
286  *
287  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
288  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
289  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
290  *
291  * This is obviously fine, since they both store the exact same value.
292  *
293  * Also see the comments of try_to_wake_up().
294  */
295 #define __set_current_state(state_value)		\
296 	do { current->state = (state_value); } while (0)
297 #define set_current_state(state_value)			\
298 	smp_store_mb(current->state, (state_value))
299 
300 #endif
301 
302 /* Task command name length */
303 #define TASK_COMM_LEN 16
304 
305 #include <linux/spinlock.h>
306 
307 /*
308  * This serializes "schedule()" and also protects
309  * the run-queue from deletions/modifications (but
310  * _adding_ to the beginning of the run-queue has
311  * a separate lock).
312  */
313 extern rwlock_t tasklist_lock;
314 extern spinlock_t mmlist_lock;
315 
316 struct task_struct;
317 
318 #ifdef CONFIG_PROVE_RCU
319 extern int lockdep_tasklist_lock_is_held(void);
320 #endif /* #ifdef CONFIG_PROVE_RCU */
321 
322 extern void sched_init(void);
323 extern void sched_init_smp(void);
324 extern asmlinkage void schedule_tail(struct task_struct *prev);
325 extern void init_idle(struct task_struct *idle, int cpu);
326 extern void init_idle_bootup_task(struct task_struct *idle);
327 
328 extern cpumask_var_t cpu_isolated_map;
329 
330 extern int runqueue_is_locked(int cpu);
331 
332 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
333 extern void nohz_balance_enter_idle(int cpu);
334 extern void set_cpu_sd_state_idle(void);
335 extern int get_nohz_timer_target(void);
336 #else
337 static inline void nohz_balance_enter_idle(int cpu) { }
338 static inline void set_cpu_sd_state_idle(void) { }
339 #endif
340 
341 /*
342  * Only dump TASK_* tasks. (0 for all tasks)
343  */
344 extern void show_state_filter(unsigned long state_filter);
345 
346 static inline void show_state(void)
347 {
348 	show_state_filter(0);
349 }
350 
351 extern void show_regs(struct pt_regs *);
352 
353 /*
354  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
355  * task), SP is the stack pointer of the first frame that should be shown in the back
356  * trace (or NULL if the entire call-chain of the task should be shown).
357  */
358 extern void show_stack(struct task_struct *task, unsigned long *sp);
359 
360 extern void cpu_init (void);
361 extern void trap_init(void);
362 extern void update_process_times(int user);
363 extern void scheduler_tick(void);
364 extern int sched_cpu_starting(unsigned int cpu);
365 extern int sched_cpu_activate(unsigned int cpu);
366 extern int sched_cpu_deactivate(unsigned int cpu);
367 
368 #ifdef CONFIG_HOTPLUG_CPU
369 extern int sched_cpu_dying(unsigned int cpu);
370 #else
371 # define sched_cpu_dying	NULL
372 #endif
373 
374 extern void sched_show_task(struct task_struct *p);
375 
376 #ifdef CONFIG_LOCKUP_DETECTOR
377 extern void touch_softlockup_watchdog_sched(void);
378 extern void touch_softlockup_watchdog(void);
379 extern void touch_softlockup_watchdog_sync(void);
380 extern void touch_all_softlockup_watchdogs(void);
381 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
382 				  void __user *buffer,
383 				  size_t *lenp, loff_t *ppos);
384 extern unsigned int  softlockup_panic;
385 extern unsigned int  hardlockup_panic;
386 void lockup_detector_init(void);
387 #else
388 static inline void touch_softlockup_watchdog_sched(void)
389 {
390 }
391 static inline void touch_softlockup_watchdog(void)
392 {
393 }
394 static inline void touch_softlockup_watchdog_sync(void)
395 {
396 }
397 static inline void touch_all_softlockup_watchdogs(void)
398 {
399 }
400 static inline void lockup_detector_init(void)
401 {
402 }
403 #endif
404 
405 #ifdef CONFIG_DETECT_HUNG_TASK
406 void reset_hung_task_detector(void);
407 #else
408 static inline void reset_hung_task_detector(void)
409 {
410 }
411 #endif
412 
413 /* Attach to any functions which should be ignored in wchan output. */
414 #define __sched		__attribute__((__section__(".sched.text")))
415 
416 /* Linker adds these: start and end of __sched functions */
417 extern char __sched_text_start[], __sched_text_end[];
418 
419 /* Is this address in the __sched functions? */
420 extern int in_sched_functions(unsigned long addr);
421 
422 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
423 extern signed long schedule_timeout(signed long timeout);
424 extern signed long schedule_timeout_interruptible(signed long timeout);
425 extern signed long schedule_timeout_killable(signed long timeout);
426 extern signed long schedule_timeout_uninterruptible(signed long timeout);
427 extern signed long schedule_timeout_idle(signed long timeout);
428 asmlinkage void schedule(void);
429 extern void schedule_preempt_disabled(void);
430 
431 extern int __must_check io_schedule_prepare(void);
432 extern void io_schedule_finish(int token);
433 extern long io_schedule_timeout(long timeout);
434 extern void io_schedule(void);
435 
436 void __noreturn do_task_dead(void);
437 
438 struct nsproxy;
439 struct user_namespace;
440 
441 #ifdef CONFIG_MMU
442 extern void arch_pick_mmap_layout(struct mm_struct *mm);
443 extern unsigned long
444 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
445 		       unsigned long, unsigned long);
446 extern unsigned long
447 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
448 			  unsigned long len, unsigned long pgoff,
449 			  unsigned long flags);
450 #else
451 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
452 #endif
453 
454 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
455 #define SUID_DUMP_USER		1	/* Dump as user of process */
456 #define SUID_DUMP_ROOT		2	/* Dump as root */
457 
458 /* mm flags */
459 
460 /* for SUID_DUMP_* above */
461 #define MMF_DUMPABLE_BITS 2
462 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
463 
464 extern void set_dumpable(struct mm_struct *mm, int value);
465 /*
466  * This returns the actual value of the suid_dumpable flag. For things
467  * that are using this for checking for privilege transitions, it must
468  * test against SUID_DUMP_USER rather than treating it as a boolean
469  * value.
470  */
471 static inline int __get_dumpable(unsigned long mm_flags)
472 {
473 	return mm_flags & MMF_DUMPABLE_MASK;
474 }
475 
476 static inline int get_dumpable(struct mm_struct *mm)
477 {
478 	return __get_dumpable(mm->flags);
479 }
480 
481 /* coredump filter bits */
482 #define MMF_DUMP_ANON_PRIVATE	2
483 #define MMF_DUMP_ANON_SHARED	3
484 #define MMF_DUMP_MAPPED_PRIVATE	4
485 #define MMF_DUMP_MAPPED_SHARED	5
486 #define MMF_DUMP_ELF_HEADERS	6
487 #define MMF_DUMP_HUGETLB_PRIVATE 7
488 #define MMF_DUMP_HUGETLB_SHARED  8
489 #define MMF_DUMP_DAX_PRIVATE	9
490 #define MMF_DUMP_DAX_SHARED	10
491 
492 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
493 #define MMF_DUMP_FILTER_BITS	9
494 #define MMF_DUMP_FILTER_MASK \
495 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
496 #define MMF_DUMP_FILTER_DEFAULT \
497 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
498 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
499 
500 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
501 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
502 #else
503 # define MMF_DUMP_MASK_DEFAULT_ELF	0
504 #endif
505 					/* leave room for more dump flags */
506 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
507 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
508 /*
509  * This one-shot flag is dropped due to necessity of changing exe once again
510  * on NFS restore
511  */
512 //#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
513 
514 #define MMF_HAS_UPROBES		19	/* has uprobes */
515 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
516 #define MMF_OOM_SKIP		21	/* mm is of no interest for the OOM killer */
517 #define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
518 #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
519 
520 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
521 
522 struct sighand_struct {
523 	atomic_t		count;
524 	struct k_sigaction	action[_NSIG];
525 	spinlock_t		siglock;
526 	wait_queue_head_t	signalfd_wqh;
527 };
528 
529 struct pacct_struct {
530 	int			ac_flag;
531 	long			ac_exitcode;
532 	unsigned long		ac_mem;
533 	u64			ac_utime, ac_stime;
534 	unsigned long		ac_minflt, ac_majflt;
535 };
536 
537 struct cpu_itimer {
538 	u64 expires;
539 	u64 incr;
540 };
541 
542 /**
543  * struct prev_cputime - snaphsot of system and user cputime
544  * @utime: time spent in user mode
545  * @stime: time spent in system mode
546  * @lock: protects the above two fields
547  *
548  * Stores previous user/system time values such that we can guarantee
549  * monotonicity.
550  */
551 struct prev_cputime {
552 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
553 	u64 utime;
554 	u64 stime;
555 	raw_spinlock_t lock;
556 #endif
557 };
558 
559 static inline void prev_cputime_init(struct prev_cputime *prev)
560 {
561 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
562 	prev->utime = prev->stime = 0;
563 	raw_spin_lock_init(&prev->lock);
564 #endif
565 }
566 
567 /**
568  * struct task_cputime - collected CPU time counts
569  * @utime:		time spent in user mode, in nanoseconds
570  * @stime:		time spent in kernel mode, in nanoseconds
571  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
572  *
573  * This structure groups together three kinds of CPU time that are tracked for
574  * threads and thread groups.  Most things considering CPU time want to group
575  * these counts together and treat all three of them in parallel.
576  */
577 struct task_cputime {
578 	u64 utime;
579 	u64 stime;
580 	unsigned long long sum_exec_runtime;
581 };
582 
583 /* Alternate field names when used to cache expirations. */
584 #define virt_exp	utime
585 #define prof_exp	stime
586 #define sched_exp	sum_exec_runtime
587 
588 /*
589  * This is the atomic variant of task_cputime, which can be used for
590  * storing and updating task_cputime statistics without locking.
591  */
592 struct task_cputime_atomic {
593 	atomic64_t utime;
594 	atomic64_t stime;
595 	atomic64_t sum_exec_runtime;
596 };
597 
598 #define INIT_CPUTIME_ATOMIC \
599 	(struct task_cputime_atomic) {				\
600 		.utime = ATOMIC64_INIT(0),			\
601 		.stime = ATOMIC64_INIT(0),			\
602 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
603 	}
604 
605 #define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
606 
607 /*
608  * Disable preemption until the scheduler is running -- use an unconditional
609  * value so that it also works on !PREEMPT_COUNT kernels.
610  *
611  * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
612  */
613 #define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
614 
615 /*
616  * Initial preempt_count value; reflects the preempt_count schedule invariant
617  * which states that during context switches:
618  *
619  *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
620  *
621  * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
622  * Note: See finish_task_switch().
623  */
624 #define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
625 
626 /**
627  * struct thread_group_cputimer - thread group interval timer counts
628  * @cputime_atomic:	atomic thread group interval timers.
629  * @running:		true when there are timers running and
630  *			@cputime_atomic receives updates.
631  * @checking_timer:	true when a thread in the group is in the
632  *			process of checking for thread group timers.
633  *
634  * This structure contains the version of task_cputime, above, that is
635  * used for thread group CPU timer calculations.
636  */
637 struct thread_group_cputimer {
638 	struct task_cputime_atomic cputime_atomic;
639 	bool running;
640 	bool checking_timer;
641 };
642 
643 #include <linux/rwsem.h>
644 struct autogroup;
645 
646 /*
647  * NOTE! "signal_struct" does not have its own
648  * locking, because a shared signal_struct always
649  * implies a shared sighand_struct, so locking
650  * sighand_struct is always a proper superset of
651  * the locking of signal_struct.
652  */
653 struct signal_struct {
654 	atomic_t		sigcnt;
655 	atomic_t		live;
656 	int			nr_threads;
657 	struct list_head	thread_head;
658 
659 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
660 
661 	/* current thread group signal load-balancing target: */
662 	struct task_struct	*curr_target;
663 
664 	/* shared signal handling: */
665 	struct sigpending	shared_pending;
666 
667 	/* thread group exit support */
668 	int			group_exit_code;
669 	/* overloaded:
670 	 * - notify group_exit_task when ->count is equal to notify_count
671 	 * - everyone except group_exit_task is stopped during signal delivery
672 	 *   of fatal signals, group_exit_task processes the signal.
673 	 */
674 	int			notify_count;
675 	struct task_struct	*group_exit_task;
676 
677 	/* thread group stop support, overloads group_exit_code too */
678 	int			group_stop_count;
679 	unsigned int		flags; /* see SIGNAL_* flags below */
680 
681 	/*
682 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
683 	 * manager, to re-parent orphan (double-forking) child processes
684 	 * to this process instead of 'init'. The service manager is
685 	 * able to receive SIGCHLD signals and is able to investigate
686 	 * the process until it calls wait(). All children of this
687 	 * process will inherit a flag if they should look for a
688 	 * child_subreaper process at exit.
689 	 */
690 	unsigned int		is_child_subreaper:1;
691 	unsigned int		has_child_subreaper:1;
692 
693 #ifdef CONFIG_POSIX_TIMERS
694 
695 	/* POSIX.1b Interval Timers */
696 	int			posix_timer_id;
697 	struct list_head	posix_timers;
698 
699 	/* ITIMER_REAL timer for the process */
700 	struct hrtimer real_timer;
701 	ktime_t it_real_incr;
702 
703 	/*
704 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
705 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
706 	 * values are defined to 0 and 1 respectively
707 	 */
708 	struct cpu_itimer it[2];
709 
710 	/*
711 	 * Thread group totals for process CPU timers.
712 	 * See thread_group_cputimer(), et al, for details.
713 	 */
714 	struct thread_group_cputimer cputimer;
715 
716 	/* Earliest-expiration cache. */
717 	struct task_cputime cputime_expires;
718 
719 	struct list_head cpu_timers[3];
720 
721 #endif
722 
723 	struct pid *leader_pid;
724 
725 #ifdef CONFIG_NO_HZ_FULL
726 	atomic_t tick_dep_mask;
727 #endif
728 
729 	struct pid *tty_old_pgrp;
730 
731 	/* boolean value for session group leader */
732 	int leader;
733 
734 	struct tty_struct *tty; /* NULL if no tty */
735 
736 #ifdef CONFIG_SCHED_AUTOGROUP
737 	struct autogroup *autogroup;
738 #endif
739 	/*
740 	 * Cumulative resource counters for dead threads in the group,
741 	 * and for reaped dead child processes forked by this group.
742 	 * Live threads maintain their own counters and add to these
743 	 * in __exit_signal, except for the group leader.
744 	 */
745 	seqlock_t stats_lock;
746 	u64 utime, stime, cutime, cstime;
747 	u64 gtime;
748 	u64 cgtime;
749 	struct prev_cputime prev_cputime;
750 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
751 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
752 	unsigned long inblock, oublock, cinblock, coublock;
753 	unsigned long maxrss, cmaxrss;
754 	struct task_io_accounting ioac;
755 
756 	/*
757 	 * Cumulative ns of schedule CPU time fo dead threads in the
758 	 * group, not including a zombie group leader, (This only differs
759 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
760 	 * other than jiffies.)
761 	 */
762 	unsigned long long sum_sched_runtime;
763 
764 	/*
765 	 * We don't bother to synchronize most readers of this at all,
766 	 * because there is no reader checking a limit that actually needs
767 	 * to get both rlim_cur and rlim_max atomically, and either one
768 	 * alone is a single word that can safely be read normally.
769 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
770 	 * protect this instead of the siglock, because they really
771 	 * have no need to disable irqs.
772 	 */
773 	struct rlimit rlim[RLIM_NLIMITS];
774 
775 #ifdef CONFIG_BSD_PROCESS_ACCT
776 	struct pacct_struct pacct;	/* per-process accounting information */
777 #endif
778 #ifdef CONFIG_TASKSTATS
779 	struct taskstats *stats;
780 #endif
781 #ifdef CONFIG_AUDIT
782 	unsigned audit_tty;
783 	struct tty_audit_buf *tty_audit_buf;
784 #endif
785 
786 	/*
787 	 * Thread is the potential origin of an oom condition; kill first on
788 	 * oom
789 	 */
790 	bool oom_flag_origin;
791 	short oom_score_adj;		/* OOM kill score adjustment */
792 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
793 					 * Only settable by CAP_SYS_RESOURCE. */
794 	struct mm_struct *oom_mm;	/* recorded mm when the thread group got
795 					 * killed by the oom killer */
796 
797 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
798 					 * credential calculations
799 					 * (notably. ptrace) */
800 };
801 
802 /*
803  * Bits in flags field of signal_struct.
804  */
805 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
806 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
807 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
808 #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
809 /*
810  * Pending notifications to parent.
811  */
812 #define SIGNAL_CLD_STOPPED	0x00000010
813 #define SIGNAL_CLD_CONTINUED	0x00000020
814 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
815 
816 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
817 
818 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
819 			  SIGNAL_STOP_CONTINUED)
820 
821 static inline void signal_set_stop_flags(struct signal_struct *sig,
822 					 unsigned int flags)
823 {
824 	WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
825 	sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
826 }
827 
828 /* If true, all threads except ->group_exit_task have pending SIGKILL */
829 static inline int signal_group_exit(const struct signal_struct *sig)
830 {
831 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
832 		(sig->group_exit_task != NULL);
833 }
834 
835 /*
836  * Some day this will be a full-fledged user tracking system..
837  */
838 struct user_struct {
839 	atomic_t __count;	/* reference count */
840 	atomic_t processes;	/* How many processes does this user have? */
841 	atomic_t sigpending;	/* How many pending signals does this user have? */
842 #ifdef CONFIG_FANOTIFY
843 	atomic_t fanotify_listeners;
844 #endif
845 #ifdef CONFIG_EPOLL
846 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
847 #endif
848 #ifdef CONFIG_POSIX_MQUEUE
849 	/* protected by mq_lock	*/
850 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
851 #endif
852 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
853 	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
854 	atomic_long_t pipe_bufs;  /* how many pages are allocated in pipe buffers */
855 
856 #ifdef CONFIG_KEYS
857 	struct key *uid_keyring;	/* UID specific keyring */
858 	struct key *session_keyring;	/* UID's default session keyring */
859 #endif
860 
861 	/* Hash table maintenance information */
862 	struct hlist_node uidhash_node;
863 	kuid_t uid;
864 
865 #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
866 	atomic_long_t locked_vm;
867 #endif
868 };
869 
870 extern int uids_sysfs_init(void);
871 
872 extern struct user_struct *find_user(kuid_t);
873 
874 extern struct user_struct root_user;
875 #define INIT_USER (&root_user)
876 
877 
878 struct backing_dev_info;
879 struct reclaim_state;
880 
881 #ifdef CONFIG_SCHED_INFO
882 struct sched_info {
883 	/* cumulative counters */
884 	unsigned long pcount;	      /* # of times run on this cpu */
885 	unsigned long long run_delay; /* time spent waiting on a runqueue */
886 
887 	/* timestamps */
888 	unsigned long long last_arrival,/* when we last ran on a cpu */
889 			   last_queued;	/* when we were last queued to run */
890 };
891 #endif /* CONFIG_SCHED_INFO */
892 
893 #ifdef CONFIG_TASK_DELAY_ACCT
894 struct task_delay_info {
895 	spinlock_t	lock;
896 	unsigned int	flags;	/* Private per-task flags */
897 
898 	/* For each stat XXX, add following, aligned appropriately
899 	 *
900 	 * struct timespec XXX_start, XXX_end;
901 	 * u64 XXX_delay;
902 	 * u32 XXX_count;
903 	 *
904 	 * Atomicity of updates to XXX_delay, XXX_count protected by
905 	 * single lock above (split into XXX_lock if contention is an issue).
906 	 */
907 
908 	/*
909 	 * XXX_count is incremented on every XXX operation, the delay
910 	 * associated with the operation is added to XXX_delay.
911 	 * XXX_delay contains the accumulated delay time in nanoseconds.
912 	 */
913 	u64 blkio_start;	/* Shared by blkio, swapin */
914 	u64 blkio_delay;	/* wait for sync block io completion */
915 	u64 swapin_delay;	/* wait for swapin block io completion */
916 	u32 blkio_count;	/* total count of the number of sync block */
917 				/* io operations performed */
918 	u32 swapin_count;	/* total count of the number of swapin block */
919 				/* io operations performed */
920 
921 	u64 freepages_start;
922 	u64 freepages_delay;	/* wait for memory reclaim */
923 	u32 freepages_count;	/* total count of memory reclaim */
924 };
925 #endif	/* CONFIG_TASK_DELAY_ACCT */
926 
927 static inline int sched_info_on(void)
928 {
929 #ifdef CONFIG_SCHEDSTATS
930 	return 1;
931 #elif defined(CONFIG_TASK_DELAY_ACCT)
932 	extern int delayacct_on;
933 	return delayacct_on;
934 #else
935 	return 0;
936 #endif
937 }
938 
939 #ifdef CONFIG_SCHEDSTATS
940 void force_schedstat_enabled(void);
941 #endif
942 
943 enum cpu_idle_type {
944 	CPU_IDLE,
945 	CPU_NOT_IDLE,
946 	CPU_NEWLY_IDLE,
947 	CPU_MAX_IDLE_TYPES
948 };
949 
950 /*
951  * Integer metrics need fixed point arithmetic, e.g., sched/fair
952  * has a few: load, load_avg, util_avg, freq, and capacity.
953  *
954  * We define a basic fixed point arithmetic range, and then formalize
955  * all these metrics based on that basic range.
956  */
957 # define SCHED_FIXEDPOINT_SHIFT	10
958 # define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)
959 
960 /*
961  * Increase resolution of cpu_capacity calculations
962  */
963 #define SCHED_CAPACITY_SHIFT	SCHED_FIXEDPOINT_SHIFT
964 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
965 
966 /*
967  * Wake-queues are lists of tasks with a pending wakeup, whose
968  * callers have already marked the task as woken internally,
969  * and can thus carry on. A common use case is being able to
970  * do the wakeups once the corresponding user lock as been
971  * released.
972  *
973  * We hold reference to each task in the list across the wakeup,
974  * thus guaranteeing that the memory is still valid by the time
975  * the actual wakeups are performed in wake_up_q().
976  *
977  * One per task suffices, because there's never a need for a task to be
978  * in two wake queues simultaneously; it is forbidden to abandon a task
979  * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
980  * already in a wake queue, the wakeup will happen soon and the second
981  * waker can just skip it.
982  *
983  * The DEFINE_WAKE_Q macro declares and initializes the list head.
984  * wake_up_q() does NOT reinitialize the list; it's expected to be
985  * called near the end of a function. Otherwise, the list can be
986  * re-initialized for later re-use by wake_q_init().
987  *
988  * Note that this can cause spurious wakeups. schedule() callers
989  * must ensure the call is done inside a loop, confirming that the
990  * wakeup condition has in fact occurred.
991  */
992 struct wake_q_node {
993 	struct wake_q_node *next;
994 };
995 
996 struct wake_q_head {
997 	struct wake_q_node *first;
998 	struct wake_q_node **lastp;
999 };
1000 
1001 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1002 
1003 #define DEFINE_WAKE_Q(name)				\
1004 	struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1005 
1006 static inline void wake_q_init(struct wake_q_head *head)
1007 {
1008 	head->first = WAKE_Q_TAIL;
1009 	head->lastp = &head->first;
1010 }
1011 
1012 extern void wake_q_add(struct wake_q_head *head,
1013 		       struct task_struct *task);
1014 extern void wake_up_q(struct wake_q_head *head);
1015 
1016 /*
1017  * sched-domains (multiprocessor balancing) declarations:
1018  */
1019 #ifdef CONFIG_SMP
1020 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
1021 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
1022 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
1023 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
1024 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
1025 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
1026 #define SD_ASYM_CPUCAPACITY	0x0040  /* Groups have different max cpu capacities */
1027 #define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu capacity */
1028 #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
1029 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
1030 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
1031 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
1032 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
1033 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
1034 #define SD_NUMA			0x4000	/* cross-node balancing */
1035 
1036 #ifdef CONFIG_SCHED_SMT
1037 static inline int cpu_smt_flags(void)
1038 {
1039 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1040 }
1041 #endif
1042 
1043 #ifdef CONFIG_SCHED_MC
1044 static inline int cpu_core_flags(void)
1045 {
1046 	return SD_SHARE_PKG_RESOURCES;
1047 }
1048 #endif
1049 
1050 #ifdef CONFIG_NUMA
1051 static inline int cpu_numa_flags(void)
1052 {
1053 	return SD_NUMA;
1054 }
1055 #endif
1056 
1057 extern int arch_asym_cpu_priority(int cpu);
1058 
1059 struct sched_domain_attr {
1060 	int relax_domain_level;
1061 };
1062 
1063 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
1064 	.relax_domain_level = -1,			\
1065 }
1066 
1067 extern int sched_domain_level_max;
1068 
1069 struct sched_group;
1070 
1071 struct sched_domain_shared {
1072 	atomic_t	ref;
1073 	atomic_t	nr_busy_cpus;
1074 	int		has_idle_cores;
1075 };
1076 
1077 struct sched_domain {
1078 	/* These fields must be setup */
1079 	struct sched_domain *parent;	/* top domain must be null terminated */
1080 	struct sched_domain *child;	/* bottom domain must be null terminated */
1081 	struct sched_group *groups;	/* the balancing groups of the domain */
1082 	unsigned long min_interval;	/* Minimum balance interval ms */
1083 	unsigned long max_interval;	/* Maximum balance interval ms */
1084 	unsigned int busy_factor;	/* less balancing by factor if busy */
1085 	unsigned int imbalance_pct;	/* No balance until over watermark */
1086 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
1087 	unsigned int busy_idx;
1088 	unsigned int idle_idx;
1089 	unsigned int newidle_idx;
1090 	unsigned int wake_idx;
1091 	unsigned int forkexec_idx;
1092 	unsigned int smt_gain;
1093 
1094 	int nohz_idle;			/* NOHZ IDLE status */
1095 	int flags;			/* See SD_* */
1096 	int level;
1097 
1098 	/* Runtime fields. */
1099 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
1100 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
1101 	unsigned int nr_balance_failed; /* initialise to 0 */
1102 
1103 	/* idle_balance() stats */
1104 	u64 max_newidle_lb_cost;
1105 	unsigned long next_decay_max_lb_cost;
1106 
1107 	u64 avg_scan_cost;		/* select_idle_sibling */
1108 
1109 #ifdef CONFIG_SCHEDSTATS
1110 	/* load_balance() stats */
1111 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1112 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1113 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1114 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1115 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1116 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1117 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1118 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1119 
1120 	/* Active load balancing */
1121 	unsigned int alb_count;
1122 	unsigned int alb_failed;
1123 	unsigned int alb_pushed;
1124 
1125 	/* SD_BALANCE_EXEC stats */
1126 	unsigned int sbe_count;
1127 	unsigned int sbe_balanced;
1128 	unsigned int sbe_pushed;
1129 
1130 	/* SD_BALANCE_FORK stats */
1131 	unsigned int sbf_count;
1132 	unsigned int sbf_balanced;
1133 	unsigned int sbf_pushed;
1134 
1135 	/* try_to_wake_up() stats */
1136 	unsigned int ttwu_wake_remote;
1137 	unsigned int ttwu_move_affine;
1138 	unsigned int ttwu_move_balance;
1139 #endif
1140 #ifdef CONFIG_SCHED_DEBUG
1141 	char *name;
1142 #endif
1143 	union {
1144 		void *private;		/* used during construction */
1145 		struct rcu_head rcu;	/* used during destruction */
1146 	};
1147 	struct sched_domain_shared *shared;
1148 
1149 	unsigned int span_weight;
1150 	/*
1151 	 * Span of all CPUs in this domain.
1152 	 *
1153 	 * NOTE: this field is variable length. (Allocated dynamically
1154 	 * by attaching extra space to the end of the structure,
1155 	 * depending on how many CPUs the kernel has booted up with)
1156 	 */
1157 	unsigned long span[0];
1158 };
1159 
1160 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1161 {
1162 	return to_cpumask(sd->span);
1163 }
1164 
1165 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1166 				    struct sched_domain_attr *dattr_new);
1167 
1168 /* Allocate an array of sched domains, for partition_sched_domains(). */
1169 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1170 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1171 
1172 bool cpus_share_cache(int this_cpu, int that_cpu);
1173 
1174 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1175 typedef int (*sched_domain_flags_f)(void);
1176 
1177 #define SDTL_OVERLAP	0x01
1178 
1179 struct sd_data {
1180 	struct sched_domain **__percpu sd;
1181 	struct sched_domain_shared **__percpu sds;
1182 	struct sched_group **__percpu sg;
1183 	struct sched_group_capacity **__percpu sgc;
1184 };
1185 
1186 struct sched_domain_topology_level {
1187 	sched_domain_mask_f mask;
1188 	sched_domain_flags_f sd_flags;
1189 	int		    flags;
1190 	int		    numa_level;
1191 	struct sd_data      data;
1192 #ifdef CONFIG_SCHED_DEBUG
1193 	char                *name;
1194 #endif
1195 };
1196 
1197 extern void set_sched_topology(struct sched_domain_topology_level *tl);
1198 extern void wake_up_if_idle(int cpu);
1199 
1200 #ifdef CONFIG_SCHED_DEBUG
1201 # define SD_INIT_NAME(type)		.name = #type
1202 #else
1203 # define SD_INIT_NAME(type)
1204 #endif
1205 
1206 #else /* CONFIG_SMP */
1207 
1208 struct sched_domain_attr;
1209 
1210 static inline void
1211 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1212 			struct sched_domain_attr *dattr_new)
1213 {
1214 }
1215 
1216 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1217 {
1218 	return true;
1219 }
1220 
1221 #endif	/* !CONFIG_SMP */
1222 
1223 
1224 struct io_context;			/* See blkdev.h */
1225 
1226 
1227 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1228 extern void prefetch_stack(struct task_struct *t);
1229 #else
1230 static inline void prefetch_stack(struct task_struct *t) { }
1231 #endif
1232 
1233 struct audit_context;		/* See audit.c */
1234 struct mempolicy;
1235 struct pipe_inode_info;
1236 struct uts_namespace;
1237 
1238 struct load_weight {
1239 	unsigned long weight;
1240 	u32 inv_weight;
1241 };
1242 
1243 /*
1244  * The load_avg/util_avg accumulates an infinite geometric series
1245  * (see __update_load_avg() in kernel/sched/fair.c).
1246  *
1247  * [load_avg definition]
1248  *
1249  *   load_avg = runnable% * scale_load_down(load)
1250  *
1251  * where runnable% is the time ratio that a sched_entity is runnable.
1252  * For cfs_rq, it is the aggregated load_avg of all runnable and
1253  * blocked sched_entities.
1254  *
1255  * load_avg may also take frequency scaling into account:
1256  *
1257  *   load_avg = runnable% * scale_load_down(load) * freq%
1258  *
1259  * where freq% is the CPU frequency normalized to the highest frequency.
1260  *
1261  * [util_avg definition]
1262  *
1263  *   util_avg = running% * SCHED_CAPACITY_SCALE
1264  *
1265  * where running% is the time ratio that a sched_entity is running on
1266  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
1267  * and blocked sched_entities.
1268  *
1269  * util_avg may also factor frequency scaling and CPU capacity scaling:
1270  *
1271  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
1272  *
1273  * where freq% is the same as above, and capacity% is the CPU capacity
1274  * normalized to the greatest capacity (due to uarch differences, etc).
1275  *
1276  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
1277  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
1278  * we therefore scale them to as large a range as necessary. This is for
1279  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
1280  *
1281  * [Overflow issue]
1282  *
1283  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
1284  * with the highest load (=88761), always runnable on a single cfs_rq,
1285  * and should not overflow as the number already hits PID_MAX_LIMIT.
1286  *
1287  * For all other cases (including 32-bit kernels), struct load_weight's
1288  * weight will overflow first before we do, because:
1289  *
1290  *    Max(load_avg) <= Max(load.weight)
1291  *
1292  * Then it is the load_weight's responsibility to consider overflow
1293  * issues.
1294  */
1295 struct sched_avg {
1296 	u64 last_update_time, load_sum;
1297 	u32 util_sum, period_contrib;
1298 	unsigned long load_avg, util_avg;
1299 };
1300 
1301 #ifdef CONFIG_SCHEDSTATS
1302 struct sched_statistics {
1303 	u64			wait_start;
1304 	u64			wait_max;
1305 	u64			wait_count;
1306 	u64			wait_sum;
1307 	u64			iowait_count;
1308 	u64			iowait_sum;
1309 
1310 	u64			sleep_start;
1311 	u64			sleep_max;
1312 	s64			sum_sleep_runtime;
1313 
1314 	u64			block_start;
1315 	u64			block_max;
1316 	u64			exec_max;
1317 	u64			slice_max;
1318 
1319 	u64			nr_migrations_cold;
1320 	u64			nr_failed_migrations_affine;
1321 	u64			nr_failed_migrations_running;
1322 	u64			nr_failed_migrations_hot;
1323 	u64			nr_forced_migrations;
1324 
1325 	u64			nr_wakeups;
1326 	u64			nr_wakeups_sync;
1327 	u64			nr_wakeups_migrate;
1328 	u64			nr_wakeups_local;
1329 	u64			nr_wakeups_remote;
1330 	u64			nr_wakeups_affine;
1331 	u64			nr_wakeups_affine_attempts;
1332 	u64			nr_wakeups_passive;
1333 	u64			nr_wakeups_idle;
1334 };
1335 #endif
1336 
1337 struct sched_entity {
1338 	struct load_weight	load;		/* for load-balancing */
1339 	struct rb_node		run_node;
1340 	struct list_head	group_node;
1341 	unsigned int		on_rq;
1342 
1343 	u64			exec_start;
1344 	u64			sum_exec_runtime;
1345 	u64			vruntime;
1346 	u64			prev_sum_exec_runtime;
1347 
1348 	u64			nr_migrations;
1349 
1350 #ifdef CONFIG_SCHEDSTATS
1351 	struct sched_statistics statistics;
1352 #endif
1353 
1354 #ifdef CONFIG_FAIR_GROUP_SCHED
1355 	int			depth;
1356 	struct sched_entity	*parent;
1357 	/* rq on which this entity is (to be) queued: */
1358 	struct cfs_rq		*cfs_rq;
1359 	/* rq "owned" by this entity/group: */
1360 	struct cfs_rq		*my_q;
1361 #endif
1362 
1363 #ifdef CONFIG_SMP
1364 	/*
1365 	 * Per entity load average tracking.
1366 	 *
1367 	 * Put into separate cache line so it does not
1368 	 * collide with read-mostly values above.
1369 	 */
1370 	struct sched_avg	avg ____cacheline_aligned_in_smp;
1371 #endif
1372 };
1373 
1374 struct sched_rt_entity {
1375 	struct list_head run_list;
1376 	unsigned long timeout;
1377 	unsigned long watchdog_stamp;
1378 	unsigned int time_slice;
1379 	unsigned short on_rq;
1380 	unsigned short on_list;
1381 
1382 	struct sched_rt_entity *back;
1383 #ifdef CONFIG_RT_GROUP_SCHED
1384 	struct sched_rt_entity	*parent;
1385 	/* rq on which this entity is (to be) queued: */
1386 	struct rt_rq		*rt_rq;
1387 	/* rq "owned" by this entity/group: */
1388 	struct rt_rq		*my_q;
1389 #endif
1390 };
1391 
1392 struct sched_dl_entity {
1393 	struct rb_node	rb_node;
1394 
1395 	/*
1396 	 * Original scheduling parameters. Copied here from sched_attr
1397 	 * during sched_setattr(), they will remain the same until
1398 	 * the next sched_setattr().
1399 	 */
1400 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1401 	u64 dl_deadline;	/* relative deadline of each instance	*/
1402 	u64 dl_period;		/* separation of two instances (period) */
1403 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1404 
1405 	/*
1406 	 * Actual scheduling parameters. Initialized with the values above,
1407 	 * they are continously updated during task execution. Note that
1408 	 * the remaining runtime could be < 0 in case we are in overrun.
1409 	 */
1410 	s64 runtime;		/* remaining runtime for this instance	*/
1411 	u64 deadline;		/* absolute deadline for this instance	*/
1412 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1413 
1414 	/*
1415 	 * Some bool flags:
1416 	 *
1417 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1418 	 * task has to wait for a replenishment to be performed at the
1419 	 * next firing of dl_timer.
1420 	 *
1421 	 * @dl_boosted tells if we are boosted due to DI. If so we are
1422 	 * outside bandwidth enforcement mechanism (but only until we
1423 	 * exit the critical section);
1424 	 *
1425 	 * @dl_yielded tells if task gave up the cpu before consuming
1426 	 * all its available runtime during the last job.
1427 	 */
1428 	int dl_throttled, dl_boosted, dl_yielded;
1429 
1430 	/*
1431 	 * Bandwidth enforcement timer. Each -deadline task has its
1432 	 * own bandwidth to be enforced, thus we need one timer per task.
1433 	 */
1434 	struct hrtimer dl_timer;
1435 };
1436 
1437 union rcu_special {
1438 	struct {
1439 		u8 blocked;
1440 		u8 need_qs;
1441 		u8 exp_need_qs;
1442 		u8 pad;	/* Otherwise the compiler can store garbage here. */
1443 	} b; /* Bits. */
1444 	u32 s; /* Set of bits. */
1445 };
1446 struct rcu_node;
1447 
1448 enum perf_event_task_context {
1449 	perf_invalid_context = -1,
1450 	perf_hw_context = 0,
1451 	perf_sw_context,
1452 	perf_nr_task_contexts,
1453 };
1454 
1455 /* Track pages that require TLB flushes */
1456 struct tlbflush_unmap_batch {
1457 	/*
1458 	 * Each bit set is a CPU that potentially has a TLB entry for one of
1459 	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1460 	 */
1461 	struct cpumask cpumask;
1462 
1463 	/* True if any bit in cpumask is set */
1464 	bool flush_required;
1465 
1466 	/*
1467 	 * If true then the PTE was dirty when unmapped. The entry must be
1468 	 * flushed before IO is initiated or a stale TLB entry potentially
1469 	 * allows an update without redirtying the page.
1470 	 */
1471 	bool writable;
1472 };
1473 
1474 struct task_struct {
1475 #ifdef CONFIG_THREAD_INFO_IN_TASK
1476 	/*
1477 	 * For reasons of header soup (see current_thread_info()), this
1478 	 * must be the first element of task_struct.
1479 	 */
1480 	struct thread_info thread_info;
1481 #endif
1482 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1483 	void *stack;
1484 	atomic_t usage;
1485 	unsigned int flags;	/* per process flags, defined below */
1486 	unsigned int ptrace;
1487 
1488 #ifdef CONFIG_SMP
1489 	struct llist_node wake_entry;
1490 	int on_cpu;
1491 #ifdef CONFIG_THREAD_INFO_IN_TASK
1492 	unsigned int cpu;	/* current CPU */
1493 #endif
1494 	unsigned int wakee_flips;
1495 	unsigned long wakee_flip_decay_ts;
1496 	struct task_struct *last_wakee;
1497 
1498 	int wake_cpu;
1499 #endif
1500 	int on_rq;
1501 
1502 	int prio, static_prio, normal_prio;
1503 	unsigned int rt_priority;
1504 	const struct sched_class *sched_class;
1505 	struct sched_entity se;
1506 	struct sched_rt_entity rt;
1507 #ifdef CONFIG_CGROUP_SCHED
1508 	struct task_group *sched_task_group;
1509 #endif
1510 	struct sched_dl_entity dl;
1511 
1512 #ifdef CONFIG_PREEMPT_NOTIFIERS
1513 	/* list of struct preempt_notifier: */
1514 	struct hlist_head preempt_notifiers;
1515 #endif
1516 
1517 #ifdef CONFIG_BLK_DEV_IO_TRACE
1518 	unsigned int btrace_seq;
1519 #endif
1520 
1521 	unsigned int policy;
1522 	int nr_cpus_allowed;
1523 	cpumask_t cpus_allowed;
1524 
1525 #ifdef CONFIG_PREEMPT_RCU
1526 	int rcu_read_lock_nesting;
1527 	union rcu_special rcu_read_unlock_special;
1528 	struct list_head rcu_node_entry;
1529 	struct rcu_node *rcu_blocked_node;
1530 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1531 #ifdef CONFIG_TASKS_RCU
1532 	unsigned long rcu_tasks_nvcsw;
1533 	bool rcu_tasks_holdout;
1534 	struct list_head rcu_tasks_holdout_list;
1535 	int rcu_tasks_idle_cpu;
1536 #endif /* #ifdef CONFIG_TASKS_RCU */
1537 
1538 #ifdef CONFIG_SCHED_INFO
1539 	struct sched_info sched_info;
1540 #endif
1541 
1542 	struct list_head tasks;
1543 #ifdef CONFIG_SMP
1544 	struct plist_node pushable_tasks;
1545 	struct rb_node pushable_dl_tasks;
1546 #endif
1547 
1548 	struct mm_struct *mm, *active_mm;
1549 
1550 	/* Per-thread vma caching: */
1551 	struct vmacache vmacache;
1552 
1553 #if defined(SPLIT_RSS_COUNTING)
1554 	struct task_rss_stat	rss_stat;
1555 #endif
1556 /* task state */
1557 	int exit_state;
1558 	int exit_code, exit_signal;
1559 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1560 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
1561 
1562 	/* Used for emulating ABI behavior of previous Linux versions */
1563 	unsigned int personality;
1564 
1565 	/* scheduler bits, serialized by scheduler locks */
1566 	unsigned sched_reset_on_fork:1;
1567 	unsigned sched_contributes_to_load:1;
1568 	unsigned sched_migrated:1;
1569 	unsigned sched_remote_wakeup:1;
1570 	unsigned :0; /* force alignment to the next boundary */
1571 
1572 	/* unserialized, strictly 'current' */
1573 	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
1574 	unsigned in_iowait:1;
1575 #if !defined(TIF_RESTORE_SIGMASK)
1576 	unsigned restore_sigmask:1;
1577 #endif
1578 #ifdef CONFIG_MEMCG
1579 	unsigned memcg_may_oom:1;
1580 #ifndef CONFIG_SLOB
1581 	unsigned memcg_kmem_skip_account:1;
1582 #endif
1583 #endif
1584 #ifdef CONFIG_COMPAT_BRK
1585 	unsigned brk_randomized:1;
1586 #endif
1587 
1588 	unsigned long atomic_flags; /* Flags needing atomic access. */
1589 
1590 	struct restart_block restart_block;
1591 
1592 	pid_t pid;
1593 	pid_t tgid;
1594 
1595 #ifdef CONFIG_CC_STACKPROTECTOR
1596 	/* Canary value for the -fstack-protector gcc feature */
1597 	unsigned long stack_canary;
1598 #endif
1599 	/*
1600 	 * pointers to (original) parent process, youngest child, younger sibling,
1601 	 * older sibling, respectively.  (p->father can be replaced with
1602 	 * p->real_parent->pid)
1603 	 */
1604 	struct task_struct __rcu *real_parent; /* real parent process */
1605 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1606 	/*
1607 	 * children/sibling forms the list of my natural children
1608 	 */
1609 	struct list_head children;	/* list of my children */
1610 	struct list_head sibling;	/* linkage in my parent's children list */
1611 	struct task_struct *group_leader;	/* threadgroup leader */
1612 
1613 	/*
1614 	 * ptraced is the list of tasks this task is using ptrace on.
1615 	 * This includes both natural children and PTRACE_ATTACH targets.
1616 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1617 	 */
1618 	struct list_head ptraced;
1619 	struct list_head ptrace_entry;
1620 
1621 	/* PID/PID hash table linkage. */
1622 	struct pid_link pids[PIDTYPE_MAX];
1623 	struct list_head thread_group;
1624 	struct list_head thread_node;
1625 
1626 	struct completion *vfork_done;		/* for vfork() */
1627 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1628 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1629 
1630 	u64 utime, stime;
1631 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1632 	u64 utimescaled, stimescaled;
1633 #endif
1634 	u64 gtime;
1635 	struct prev_cputime prev_cputime;
1636 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1637 	seqcount_t vtime_seqcount;
1638 	unsigned long long vtime_snap;
1639 	enum {
1640 		/* Task is sleeping or running in a CPU with VTIME inactive */
1641 		VTIME_INACTIVE = 0,
1642 		/* Task runs in userspace in a CPU with VTIME active */
1643 		VTIME_USER,
1644 		/* Task runs in kernelspace in a CPU with VTIME active */
1645 		VTIME_SYS,
1646 	} vtime_snap_whence;
1647 #endif
1648 
1649 #ifdef CONFIG_NO_HZ_FULL
1650 	atomic_t tick_dep_mask;
1651 #endif
1652 	unsigned long nvcsw, nivcsw; /* context switch counts */
1653 	u64 start_time;		/* monotonic time in nsec */
1654 	u64 real_start_time;	/* boot based time in nsec */
1655 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1656 	unsigned long min_flt, maj_flt;
1657 
1658 #ifdef CONFIG_POSIX_TIMERS
1659 	struct task_cputime cputime_expires;
1660 	struct list_head cpu_timers[3];
1661 #endif
1662 
1663 /* process credentials */
1664 	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
1665 	const struct cred __rcu *real_cred; /* objective and real subjective task
1666 					 * credentials (COW) */
1667 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1668 					 * credentials (COW) */
1669 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1670 				     - access with [gs]et_task_comm (which lock
1671 				       it with task_lock())
1672 				     - initialized normally by setup_new_exec */
1673 /* file system info */
1674 	struct nameidata *nameidata;
1675 #ifdef CONFIG_SYSVIPC
1676 /* ipc stuff */
1677 	struct sysv_sem sysvsem;
1678 	struct sysv_shm sysvshm;
1679 #endif
1680 #ifdef CONFIG_DETECT_HUNG_TASK
1681 /* hung task detection */
1682 	unsigned long last_switch_count;
1683 #endif
1684 /* filesystem information */
1685 	struct fs_struct *fs;
1686 /* open file information */
1687 	struct files_struct *files;
1688 /* namespaces */
1689 	struct nsproxy *nsproxy;
1690 /* signal handlers */
1691 	struct signal_struct *signal;
1692 	struct sighand_struct *sighand;
1693 
1694 	sigset_t blocked, real_blocked;
1695 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1696 	struct sigpending pending;
1697 
1698 	unsigned long sas_ss_sp;
1699 	size_t sas_ss_size;
1700 	unsigned sas_ss_flags;
1701 
1702 	struct callback_head *task_works;
1703 
1704 	struct audit_context *audit_context;
1705 #ifdef CONFIG_AUDITSYSCALL
1706 	kuid_t loginuid;
1707 	unsigned int sessionid;
1708 #endif
1709 	struct seccomp seccomp;
1710 
1711 /* Thread group tracking */
1712    	u32 parent_exec_id;
1713    	u32 self_exec_id;
1714 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1715  * mempolicy */
1716 	spinlock_t alloc_lock;
1717 
1718 	/* Protection of the PI data structures: */
1719 	raw_spinlock_t pi_lock;
1720 
1721 	struct wake_q_node wake_q;
1722 
1723 #ifdef CONFIG_RT_MUTEXES
1724 	/* PI waiters blocked on a rt_mutex held by this task */
1725 	struct rb_root pi_waiters;
1726 	struct rb_node *pi_waiters_leftmost;
1727 	/* Deadlock detection and priority inheritance handling */
1728 	struct rt_mutex_waiter *pi_blocked_on;
1729 #endif
1730 
1731 #ifdef CONFIG_DEBUG_MUTEXES
1732 	/* mutex deadlock detection */
1733 	struct mutex_waiter *blocked_on;
1734 #endif
1735 #ifdef CONFIG_TRACE_IRQFLAGS
1736 	unsigned int irq_events;
1737 	unsigned long hardirq_enable_ip;
1738 	unsigned long hardirq_disable_ip;
1739 	unsigned int hardirq_enable_event;
1740 	unsigned int hardirq_disable_event;
1741 	int hardirqs_enabled;
1742 	int hardirq_context;
1743 	unsigned long softirq_disable_ip;
1744 	unsigned long softirq_enable_ip;
1745 	unsigned int softirq_disable_event;
1746 	unsigned int softirq_enable_event;
1747 	int softirqs_enabled;
1748 	int softirq_context;
1749 #endif
1750 #ifdef CONFIG_LOCKDEP
1751 # define MAX_LOCK_DEPTH 48UL
1752 	u64 curr_chain_key;
1753 	int lockdep_depth;
1754 	unsigned int lockdep_recursion;
1755 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1756 	gfp_t lockdep_reclaim_gfp;
1757 #endif
1758 #ifdef CONFIG_UBSAN
1759 	unsigned int in_ubsan;
1760 #endif
1761 
1762 /* journalling filesystem info */
1763 	void *journal_info;
1764 
1765 /* stacked block device info */
1766 	struct bio_list *bio_list;
1767 
1768 #ifdef CONFIG_BLOCK
1769 /* stack plugging */
1770 	struct blk_plug *plug;
1771 #endif
1772 
1773 /* VM state */
1774 	struct reclaim_state *reclaim_state;
1775 
1776 	struct backing_dev_info *backing_dev_info;
1777 
1778 	struct io_context *io_context;
1779 
1780 	unsigned long ptrace_message;
1781 	siginfo_t *last_siginfo; /* For ptrace use.  */
1782 	struct task_io_accounting ioac;
1783 #if defined(CONFIG_TASK_XACCT)
1784 	u64 acct_rss_mem1;	/* accumulated rss usage */
1785 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1786 	u64 acct_timexpd;	/* stime + utime since last update */
1787 #endif
1788 #ifdef CONFIG_CPUSETS
1789 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1790 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1791 	int cpuset_mem_spread_rotor;
1792 	int cpuset_slab_spread_rotor;
1793 #endif
1794 #ifdef CONFIG_CGROUPS
1795 	/* Control Group info protected by css_set_lock */
1796 	struct css_set __rcu *cgroups;
1797 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1798 	struct list_head cg_list;
1799 #endif
1800 #ifdef CONFIG_INTEL_RDT_A
1801 	int closid;
1802 #endif
1803 #ifdef CONFIG_FUTEX
1804 	struct robust_list_head __user *robust_list;
1805 #ifdef CONFIG_COMPAT
1806 	struct compat_robust_list_head __user *compat_robust_list;
1807 #endif
1808 	struct list_head pi_state_list;
1809 	struct futex_pi_state *pi_state_cache;
1810 #endif
1811 #ifdef CONFIG_PERF_EVENTS
1812 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1813 	struct mutex perf_event_mutex;
1814 	struct list_head perf_event_list;
1815 #endif
1816 #ifdef CONFIG_DEBUG_PREEMPT
1817 	unsigned long preempt_disable_ip;
1818 #endif
1819 #ifdef CONFIG_NUMA
1820 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1821 	short il_next;
1822 	short pref_node_fork;
1823 #endif
1824 #ifdef CONFIG_NUMA_BALANCING
1825 	int numa_scan_seq;
1826 	unsigned int numa_scan_period;
1827 	unsigned int numa_scan_period_max;
1828 	int numa_preferred_nid;
1829 	unsigned long numa_migrate_retry;
1830 	u64 node_stamp;			/* migration stamp  */
1831 	u64 last_task_numa_placement;
1832 	u64 last_sum_exec_runtime;
1833 	struct callback_head numa_work;
1834 
1835 	struct list_head numa_entry;
1836 	struct numa_group *numa_group;
1837 
1838 	/*
1839 	 * numa_faults is an array split into four regions:
1840 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1841 	 * in this precise order.
1842 	 *
1843 	 * faults_memory: Exponential decaying average of faults on a per-node
1844 	 * basis. Scheduling placement decisions are made based on these
1845 	 * counts. The values remain static for the duration of a PTE scan.
1846 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1847 	 * hinting fault was incurred.
1848 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1849 	 * during the current scan window. When the scan completes, the counts
1850 	 * in faults_memory and faults_cpu decay and these values are copied.
1851 	 */
1852 	unsigned long *numa_faults;
1853 	unsigned long total_numa_faults;
1854 
1855 	/*
1856 	 * numa_faults_locality tracks if faults recorded during the last
1857 	 * scan window were remote/local or failed to migrate. The task scan
1858 	 * period is adapted based on the locality of the faults with different
1859 	 * weights depending on whether they were shared or private faults
1860 	 */
1861 	unsigned long numa_faults_locality[3];
1862 
1863 	unsigned long numa_pages_migrated;
1864 #endif /* CONFIG_NUMA_BALANCING */
1865 
1866 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1867 	struct tlbflush_unmap_batch tlb_ubc;
1868 #endif
1869 
1870 	struct rcu_head rcu;
1871 
1872 	/*
1873 	 * cache last used pipe for splice
1874 	 */
1875 	struct pipe_inode_info *splice_pipe;
1876 
1877 	struct page_frag task_frag;
1878 
1879 #ifdef	CONFIG_TASK_DELAY_ACCT
1880 	struct task_delay_info *delays;
1881 #endif
1882 #ifdef CONFIG_FAULT_INJECTION
1883 	int make_it_fail;
1884 #endif
1885 	/*
1886 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1887 	 * balance_dirty_pages() for some dirty throttling pause
1888 	 */
1889 	int nr_dirtied;
1890 	int nr_dirtied_pause;
1891 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1892 
1893 #ifdef CONFIG_LATENCYTOP
1894 	int latency_record_count;
1895 	struct latency_record latency_record[LT_SAVECOUNT];
1896 #endif
1897 	/*
1898 	 * time slack values; these are used to round up poll() and
1899 	 * select() etc timeout values. These are in nanoseconds.
1900 	 */
1901 	u64 timer_slack_ns;
1902 	u64 default_timer_slack_ns;
1903 
1904 #ifdef CONFIG_KASAN
1905 	unsigned int kasan_depth;
1906 #endif
1907 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1908 	/* Index of current stored address in ret_stack */
1909 	int curr_ret_stack;
1910 	/* Stack of return addresses for return function tracing */
1911 	struct ftrace_ret_stack	*ret_stack;
1912 	/* time stamp for last schedule */
1913 	unsigned long long ftrace_timestamp;
1914 	/*
1915 	 * Number of functions that haven't been traced
1916 	 * because of depth overrun.
1917 	 */
1918 	atomic_t trace_overrun;
1919 	/* Pause for the tracing */
1920 	atomic_t tracing_graph_pause;
1921 #endif
1922 #ifdef CONFIG_TRACING
1923 	/* state flags for use by tracers */
1924 	unsigned long trace;
1925 	/* bitmask and counter of trace recursion */
1926 	unsigned long trace_recursion;
1927 #endif /* CONFIG_TRACING */
1928 #ifdef CONFIG_KCOV
1929 	/* Coverage collection mode enabled for this task (0 if disabled). */
1930 	enum kcov_mode kcov_mode;
1931 	/* Size of the kcov_area. */
1932 	unsigned	kcov_size;
1933 	/* Buffer for coverage collection. */
1934 	void		*kcov_area;
1935 	/* kcov desciptor wired with this task or NULL. */
1936 	struct kcov	*kcov;
1937 #endif
1938 #ifdef CONFIG_MEMCG
1939 	struct mem_cgroup *memcg_in_oom;
1940 	gfp_t memcg_oom_gfp_mask;
1941 	int memcg_oom_order;
1942 
1943 	/* number of pages to reclaim on returning to userland */
1944 	unsigned int memcg_nr_pages_over_high;
1945 #endif
1946 #ifdef CONFIG_UPROBES
1947 	struct uprobe_task *utask;
1948 #endif
1949 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1950 	unsigned int	sequential_io;
1951 	unsigned int	sequential_io_avg;
1952 #endif
1953 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1954 	unsigned long	task_state_change;
1955 #endif
1956 	int pagefault_disabled;
1957 #ifdef CONFIG_MMU
1958 	struct task_struct *oom_reaper_list;
1959 #endif
1960 #ifdef CONFIG_VMAP_STACK
1961 	struct vm_struct *stack_vm_area;
1962 #endif
1963 #ifdef CONFIG_THREAD_INFO_IN_TASK
1964 	/* A live task holds one reference. */
1965 	atomic_t stack_refcount;
1966 #endif
1967 /* CPU-specific state of this task */
1968 	struct thread_struct thread;
1969 /*
1970  * WARNING: on x86, 'thread_struct' contains a variable-sized
1971  * structure.  It *MUST* be at the end of 'task_struct'.
1972  *
1973  * Do not put anything below here!
1974  */
1975 };
1976 
1977 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1978 extern int arch_task_struct_size __read_mostly;
1979 #else
1980 # define arch_task_struct_size (sizeof(struct task_struct))
1981 #endif
1982 
1983 #ifdef CONFIG_VMAP_STACK
1984 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1985 {
1986 	return t->stack_vm_area;
1987 }
1988 #else
1989 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1990 {
1991 	return NULL;
1992 }
1993 #endif
1994 
1995 #define TNF_MIGRATED	0x01
1996 #define TNF_NO_GROUP	0x02
1997 #define TNF_SHARED	0x04
1998 #define TNF_FAULT_LOCAL	0x08
1999 #define TNF_MIGRATE_FAIL 0x10
2000 
2001 static inline bool in_vfork(struct task_struct *tsk)
2002 {
2003 	bool ret;
2004 
2005 	/*
2006 	 * need RCU to access ->real_parent if CLONE_VM was used along with
2007 	 * CLONE_PARENT.
2008 	 *
2009 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
2010 	 * imply CLONE_VM
2011 	 *
2012 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
2013 	 * ->real_parent is not necessarily the task doing vfork(), so in
2014 	 * theory we can't rely on task_lock() if we want to dereference it.
2015 	 *
2016 	 * And in this case we can't trust the real_parent->mm == tsk->mm
2017 	 * check, it can be false negative. But we do not care, if init or
2018 	 * another oom-unkillable task does this it should blame itself.
2019 	 */
2020 	rcu_read_lock();
2021 	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
2022 	rcu_read_unlock();
2023 
2024 	return ret;
2025 }
2026 
2027 #ifdef CONFIG_NUMA_BALANCING
2028 extern void task_numa_fault(int last_node, int node, int pages, int flags);
2029 extern pid_t task_numa_group_id(struct task_struct *p);
2030 extern void set_numabalancing_state(bool enabled);
2031 extern void task_numa_free(struct task_struct *p);
2032 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
2033 					int src_nid, int dst_cpu);
2034 #else
2035 static inline void task_numa_fault(int last_node, int node, int pages,
2036 				   int flags)
2037 {
2038 }
2039 static inline pid_t task_numa_group_id(struct task_struct *p)
2040 {
2041 	return 0;
2042 }
2043 static inline void set_numabalancing_state(bool enabled)
2044 {
2045 }
2046 static inline void task_numa_free(struct task_struct *p)
2047 {
2048 }
2049 static inline bool should_numa_migrate_memory(struct task_struct *p,
2050 				struct page *page, int src_nid, int dst_cpu)
2051 {
2052 	return true;
2053 }
2054 #endif
2055 
2056 static inline struct pid *task_pid(struct task_struct *task)
2057 {
2058 	return task->pids[PIDTYPE_PID].pid;
2059 }
2060 
2061 static inline struct pid *task_tgid(struct task_struct *task)
2062 {
2063 	return task->group_leader->pids[PIDTYPE_PID].pid;
2064 }
2065 
2066 /*
2067  * Without tasklist or rcu lock it is not safe to dereference
2068  * the result of task_pgrp/task_session even if task == current,
2069  * we can race with another thread doing sys_setsid/sys_setpgid.
2070  */
2071 static inline struct pid *task_pgrp(struct task_struct *task)
2072 {
2073 	return task->group_leader->pids[PIDTYPE_PGID].pid;
2074 }
2075 
2076 static inline struct pid *task_session(struct task_struct *task)
2077 {
2078 	return task->group_leader->pids[PIDTYPE_SID].pid;
2079 }
2080 
2081 struct pid_namespace;
2082 
2083 /*
2084  * the helpers to get the task's different pids as they are seen
2085  * from various namespaces
2086  *
2087  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
2088  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
2089  *                     current.
2090  * task_xid_nr_ns()  : id seen from the ns specified;
2091  *
2092  * set_task_vxid()   : assigns a virtual id to a task;
2093  *
2094  * see also pid_nr() etc in include/linux/pid.h
2095  */
2096 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2097 			struct pid_namespace *ns);
2098 
2099 static inline pid_t task_pid_nr(struct task_struct *tsk)
2100 {
2101 	return tsk->pid;
2102 }
2103 
2104 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2105 					struct pid_namespace *ns)
2106 {
2107 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2108 }
2109 
2110 static inline pid_t task_pid_vnr(struct task_struct *tsk)
2111 {
2112 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2113 }
2114 
2115 
2116 static inline pid_t task_tgid_nr(struct task_struct *tsk)
2117 {
2118 	return tsk->tgid;
2119 }
2120 
2121 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2122 
2123 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2124 {
2125 	return pid_vnr(task_tgid(tsk));
2126 }
2127 
2128 
2129 static inline int pid_alive(const struct task_struct *p);
2130 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2131 {
2132 	pid_t pid = 0;
2133 
2134 	rcu_read_lock();
2135 	if (pid_alive(tsk))
2136 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2137 	rcu_read_unlock();
2138 
2139 	return pid;
2140 }
2141 
2142 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2143 {
2144 	return task_ppid_nr_ns(tsk, &init_pid_ns);
2145 }
2146 
2147 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2148 					struct pid_namespace *ns)
2149 {
2150 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2151 }
2152 
2153 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2154 {
2155 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2156 }
2157 
2158 
2159 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2160 					struct pid_namespace *ns)
2161 {
2162 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2163 }
2164 
2165 static inline pid_t task_session_vnr(struct task_struct *tsk)
2166 {
2167 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2168 }
2169 
2170 /* obsolete, do not use */
2171 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2172 {
2173 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
2174 }
2175 
2176 /**
2177  * pid_alive - check that a task structure is not stale
2178  * @p: Task structure to be checked.
2179  *
2180  * Test if a process is not yet dead (at most zombie state)
2181  * If pid_alive fails, then pointers within the task structure
2182  * can be stale and must not be dereferenced.
2183  *
2184  * Return: 1 if the process is alive. 0 otherwise.
2185  */
2186 static inline int pid_alive(const struct task_struct *p)
2187 {
2188 	return p->pids[PIDTYPE_PID].pid != NULL;
2189 }
2190 
2191 /**
2192  * is_global_init - check if a task structure is init. Since init
2193  * is free to have sub-threads we need to check tgid.
2194  * @tsk: Task structure to be checked.
2195  *
2196  * Check if a task structure is the first user space task the kernel created.
2197  *
2198  * Return: 1 if the task structure is init. 0 otherwise.
2199  */
2200 static inline int is_global_init(struct task_struct *tsk)
2201 {
2202 	return task_tgid_nr(tsk) == 1;
2203 }
2204 
2205 extern struct pid *cad_pid;
2206 
2207 extern void free_task(struct task_struct *tsk);
2208 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2209 
2210 extern void __put_task_struct(struct task_struct *t);
2211 
2212 static inline void put_task_struct(struct task_struct *t)
2213 {
2214 	if (atomic_dec_and_test(&t->usage))
2215 		__put_task_struct(t);
2216 }
2217 
2218 struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2219 struct task_struct *try_get_task_struct(struct task_struct **ptask);
2220 
2221 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2222 extern void task_cputime(struct task_struct *t,
2223 			 u64 *utime, u64 *stime);
2224 extern u64 task_gtime(struct task_struct *t);
2225 #else
2226 static inline void task_cputime(struct task_struct *t,
2227 				u64 *utime, u64 *stime)
2228 {
2229 	*utime = t->utime;
2230 	*stime = t->stime;
2231 }
2232 
2233 static inline u64 task_gtime(struct task_struct *t)
2234 {
2235 	return t->gtime;
2236 }
2237 #endif
2238 
2239 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2240 static inline void task_cputime_scaled(struct task_struct *t,
2241 				       u64 *utimescaled,
2242 				       u64 *stimescaled)
2243 {
2244 	*utimescaled = t->utimescaled;
2245 	*stimescaled = t->stimescaled;
2246 }
2247 #else
2248 static inline void task_cputime_scaled(struct task_struct *t,
2249 				       u64 *utimescaled,
2250 				       u64 *stimescaled)
2251 {
2252 	task_cputime(t, utimescaled, stimescaled);
2253 }
2254 #endif
2255 
2256 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2257 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2258 
2259 /*
2260  * Per process flags
2261  */
2262 #define PF_IDLE		0x00000002	/* I am an IDLE thread */
2263 #define PF_EXITING	0x00000004	/* getting shut down */
2264 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
2265 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
2266 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
2267 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
2268 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
2269 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
2270 #define PF_DUMPCORE	0x00000200	/* dumped core */
2271 #define PF_SIGNALED	0x00000400	/* killed by a signal */
2272 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
2273 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
2274 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
2275 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
2276 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
2277 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
2278 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
2279 #define PF_KSWAPD	0x00040000	/* I am kswapd */
2280 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
2281 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
2282 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
2283 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
2284 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
2285 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
2286 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
2287 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
2288 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
2289 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
2290 
2291 /*
2292  * Only the _current_ task can read/write to tsk->flags, but other
2293  * tasks can access tsk->flags in readonly mode for example
2294  * with tsk_used_math (like during threaded core dumping).
2295  * There is however an exception to this rule during ptrace
2296  * or during fork: the ptracer task is allowed to write to the
2297  * child->flags of its traced child (same goes for fork, the parent
2298  * can write to the child->flags), because we're guaranteed the
2299  * child is not running and in turn not changing child->flags
2300  * at the same time the parent does it.
2301  */
2302 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2303 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2304 #define clear_used_math() clear_stopped_child_used_math(current)
2305 #define set_used_math() set_stopped_child_used_math(current)
2306 #define conditional_stopped_child_used_math(condition, child) \
2307 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2308 #define conditional_used_math(condition) \
2309 	conditional_stopped_child_used_math(condition, current)
2310 #define copy_to_stopped_child_used_math(child) \
2311 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2312 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2313 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2314 #define used_math() tsk_used_math(current)
2315 
2316 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2317  * __GFP_FS is also cleared as it implies __GFP_IO.
2318  */
2319 static inline gfp_t memalloc_noio_flags(gfp_t flags)
2320 {
2321 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2322 		flags &= ~(__GFP_IO | __GFP_FS);
2323 	return flags;
2324 }
2325 
2326 static inline unsigned int memalloc_noio_save(void)
2327 {
2328 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2329 	current->flags |= PF_MEMALLOC_NOIO;
2330 	return flags;
2331 }
2332 
2333 static inline void memalloc_noio_restore(unsigned int flags)
2334 {
2335 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2336 }
2337 
2338 /* Per-process atomic flags. */
2339 #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
2340 #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2341 #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2342 #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
2343 
2344 
2345 #define TASK_PFA_TEST(name, func)					\
2346 	static inline bool task_##func(struct task_struct *p)		\
2347 	{ return test_bit(PFA_##name, &p->atomic_flags); }
2348 #define TASK_PFA_SET(name, func)					\
2349 	static inline void task_set_##func(struct task_struct *p)	\
2350 	{ set_bit(PFA_##name, &p->atomic_flags); }
2351 #define TASK_PFA_CLEAR(name, func)					\
2352 	static inline void task_clear_##func(struct task_struct *p)	\
2353 	{ clear_bit(PFA_##name, &p->atomic_flags); }
2354 
2355 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2356 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2357 
2358 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2359 TASK_PFA_SET(SPREAD_PAGE, spread_page)
2360 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2361 
2362 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2363 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2364 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2365 
2366 TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2367 TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2368 
2369 /*
2370  * task->jobctl flags
2371  */
2372 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
2373 
2374 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
2375 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
2376 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
2377 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
2378 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
2379 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
2380 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
2381 
2382 #define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
2383 #define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
2384 #define JOBCTL_STOP_CONSUME	(1UL << JOBCTL_STOP_CONSUME_BIT)
2385 #define JOBCTL_TRAP_STOP	(1UL << JOBCTL_TRAP_STOP_BIT)
2386 #define JOBCTL_TRAP_NOTIFY	(1UL << JOBCTL_TRAP_NOTIFY_BIT)
2387 #define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
2388 #define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
2389 
2390 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2391 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2392 
2393 extern bool task_set_jobctl_pending(struct task_struct *task,
2394 				    unsigned long mask);
2395 extern void task_clear_jobctl_trapping(struct task_struct *task);
2396 extern void task_clear_jobctl_pending(struct task_struct *task,
2397 				      unsigned long mask);
2398 
2399 static inline void rcu_copy_process(struct task_struct *p)
2400 {
2401 #ifdef CONFIG_PREEMPT_RCU
2402 	p->rcu_read_lock_nesting = 0;
2403 	p->rcu_read_unlock_special.s = 0;
2404 	p->rcu_blocked_node = NULL;
2405 	INIT_LIST_HEAD(&p->rcu_node_entry);
2406 #endif /* #ifdef CONFIG_PREEMPT_RCU */
2407 #ifdef CONFIG_TASKS_RCU
2408 	p->rcu_tasks_holdout = false;
2409 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2410 	p->rcu_tasks_idle_cpu = -1;
2411 #endif /* #ifdef CONFIG_TASKS_RCU */
2412 }
2413 
2414 static inline void tsk_restore_flags(struct task_struct *task,
2415 				unsigned long orig_flags, unsigned long flags)
2416 {
2417 	task->flags &= ~flags;
2418 	task->flags |= orig_flags & flags;
2419 }
2420 
2421 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2422 				     const struct cpumask *trial);
2423 extern int task_can_attach(struct task_struct *p,
2424 			   const struct cpumask *cs_cpus_allowed);
2425 #ifdef CONFIG_SMP
2426 extern void do_set_cpus_allowed(struct task_struct *p,
2427 			       const struct cpumask *new_mask);
2428 
2429 extern int set_cpus_allowed_ptr(struct task_struct *p,
2430 				const struct cpumask *new_mask);
2431 #else
2432 static inline void do_set_cpus_allowed(struct task_struct *p,
2433 				      const struct cpumask *new_mask)
2434 {
2435 }
2436 static inline int set_cpus_allowed_ptr(struct task_struct *p,
2437 				       const struct cpumask *new_mask)
2438 {
2439 	if (!cpumask_test_cpu(0, new_mask))
2440 		return -EINVAL;
2441 	return 0;
2442 }
2443 #endif
2444 
2445 #ifdef CONFIG_NO_HZ_COMMON
2446 void calc_load_enter_idle(void);
2447 void calc_load_exit_idle(void);
2448 #else
2449 static inline void calc_load_enter_idle(void) { }
2450 static inline void calc_load_exit_idle(void) { }
2451 #endif /* CONFIG_NO_HZ_COMMON */
2452 
2453 #ifndef cpu_relax_yield
2454 #define cpu_relax_yield() cpu_relax()
2455 #endif
2456 
2457 /*
2458  * Do not use outside of architecture code which knows its limitations.
2459  *
2460  * sched_clock() has no promise of monotonicity or bounded drift between
2461  * CPUs, use (which you should not) requires disabling IRQs.
2462  *
2463  * Please use one of the three interfaces below.
2464  */
2465 extern unsigned long long notrace sched_clock(void);
2466 /*
2467  * See the comment in kernel/sched/clock.c
2468  */
2469 extern u64 running_clock(void);
2470 extern u64 sched_clock_cpu(int cpu);
2471 
2472 
2473 extern void sched_clock_init(void);
2474 
2475 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2476 static inline void sched_clock_init_late(void)
2477 {
2478 }
2479 
2480 static inline void sched_clock_tick(void)
2481 {
2482 }
2483 
2484 static inline void clear_sched_clock_stable(void)
2485 {
2486 }
2487 
2488 static inline void sched_clock_idle_sleep_event(void)
2489 {
2490 }
2491 
2492 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2493 {
2494 }
2495 
2496 static inline u64 cpu_clock(int cpu)
2497 {
2498 	return sched_clock();
2499 }
2500 
2501 static inline u64 local_clock(void)
2502 {
2503 	return sched_clock();
2504 }
2505 #else
2506 extern void sched_clock_init_late(void);
2507 /*
2508  * Architectures can set this to 1 if they have specified
2509  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2510  * but then during bootup it turns out that sched_clock()
2511  * is reliable after all:
2512  */
2513 extern int sched_clock_stable(void);
2514 extern void clear_sched_clock_stable(void);
2515 
2516 extern void sched_clock_tick(void);
2517 extern void sched_clock_idle_sleep_event(void);
2518 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2519 
2520 /*
2521  * As outlined in clock.c, provides a fast, high resolution, nanosecond
2522  * time source that is monotonic per cpu argument and has bounded drift
2523  * between cpus.
2524  *
2525  * ######################### BIG FAT WARNING ##########################
2526  * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
2527  * # go backwards !!                                                  #
2528  * ####################################################################
2529  */
2530 static inline u64 cpu_clock(int cpu)
2531 {
2532 	return sched_clock_cpu(cpu);
2533 }
2534 
2535 static inline u64 local_clock(void)
2536 {
2537 	return sched_clock_cpu(raw_smp_processor_id());
2538 }
2539 #endif
2540 
2541 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2542 /*
2543  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2544  * The reason for this explicit opt-in is not to have perf penalty with
2545  * slow sched_clocks.
2546  */
2547 extern void enable_sched_clock_irqtime(void);
2548 extern void disable_sched_clock_irqtime(void);
2549 #else
2550 static inline void enable_sched_clock_irqtime(void) {}
2551 static inline void disable_sched_clock_irqtime(void) {}
2552 #endif
2553 
2554 extern unsigned long long
2555 task_sched_runtime(struct task_struct *task);
2556 
2557 /* sched_exec is called by processes performing an exec */
2558 #ifdef CONFIG_SMP
2559 extern void sched_exec(void);
2560 #else
2561 #define sched_exec()   {}
2562 #endif
2563 
2564 extern void sched_clock_idle_sleep_event(void);
2565 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2566 
2567 #ifdef CONFIG_HOTPLUG_CPU
2568 extern void idle_task_exit(void);
2569 #else
2570 static inline void idle_task_exit(void) {}
2571 #endif
2572 
2573 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2574 extern void wake_up_nohz_cpu(int cpu);
2575 #else
2576 static inline void wake_up_nohz_cpu(int cpu) { }
2577 #endif
2578 
2579 #ifdef CONFIG_NO_HZ_FULL
2580 extern u64 scheduler_tick_max_deferment(void);
2581 #endif
2582 
2583 #ifdef CONFIG_SCHED_AUTOGROUP
2584 extern void sched_autogroup_create_attach(struct task_struct *p);
2585 extern void sched_autogroup_detach(struct task_struct *p);
2586 extern void sched_autogroup_fork(struct signal_struct *sig);
2587 extern void sched_autogroup_exit(struct signal_struct *sig);
2588 extern void sched_autogroup_exit_task(struct task_struct *p);
2589 #ifdef CONFIG_PROC_FS
2590 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2591 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2592 #endif
2593 #else
2594 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2595 static inline void sched_autogroup_detach(struct task_struct *p) { }
2596 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2597 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2598 static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2599 #endif
2600 
2601 extern int yield_to(struct task_struct *p, bool preempt);
2602 extern void set_user_nice(struct task_struct *p, long nice);
2603 extern int task_prio(const struct task_struct *p);
2604 /**
2605  * task_nice - return the nice value of a given task.
2606  * @p: the task in question.
2607  *
2608  * Return: The nice value [ -20 ... 0 ... 19 ].
2609  */
2610 static inline int task_nice(const struct task_struct *p)
2611 {
2612 	return PRIO_TO_NICE((p)->static_prio);
2613 }
2614 extern int can_nice(const struct task_struct *p, const int nice);
2615 extern int task_curr(const struct task_struct *p);
2616 extern int idle_cpu(int cpu);
2617 extern int sched_setscheduler(struct task_struct *, int,
2618 			      const struct sched_param *);
2619 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2620 				      const struct sched_param *);
2621 extern int sched_setattr(struct task_struct *,
2622 			 const struct sched_attr *);
2623 extern struct task_struct *idle_task(int cpu);
2624 /**
2625  * is_idle_task - is the specified task an idle task?
2626  * @p: the task in question.
2627  *
2628  * Return: 1 if @p is an idle task. 0 otherwise.
2629  */
2630 static inline bool is_idle_task(const struct task_struct *p)
2631 {
2632 	return !!(p->flags & PF_IDLE);
2633 }
2634 extern struct task_struct *curr_task(int cpu);
2635 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
2636 
2637 void yield(void);
2638 
2639 union thread_union {
2640 #ifndef CONFIG_THREAD_INFO_IN_TASK
2641 	struct thread_info thread_info;
2642 #endif
2643 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2644 };
2645 
2646 #ifndef __HAVE_ARCH_KSTACK_END
2647 static inline int kstack_end(void *addr)
2648 {
2649 	/* Reliable end of stack detection:
2650 	 * Some APM bios versions misalign the stack
2651 	 */
2652 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2653 }
2654 #endif
2655 
2656 extern union thread_union init_thread_union;
2657 extern struct task_struct init_task;
2658 
2659 extern struct   mm_struct init_mm;
2660 
2661 extern struct pid_namespace init_pid_ns;
2662 
2663 /*
2664  * find a task by one of its numerical ids
2665  *
2666  * find_task_by_pid_ns():
2667  *      finds a task by its pid in the specified namespace
2668  * find_task_by_vpid():
2669  *      finds a task by its virtual pid
2670  *
2671  * see also find_vpid() etc in include/linux/pid.h
2672  */
2673 
2674 extern struct task_struct *find_task_by_vpid(pid_t nr);
2675 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2676 		struct pid_namespace *ns);
2677 
2678 /* per-UID process charging. */
2679 extern struct user_struct * alloc_uid(kuid_t);
2680 static inline struct user_struct *get_uid(struct user_struct *u)
2681 {
2682 	atomic_inc(&u->__count);
2683 	return u;
2684 }
2685 extern void free_uid(struct user_struct *);
2686 
2687 #include <asm/current.h>
2688 
2689 extern void xtime_update(unsigned long ticks);
2690 
2691 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2692 extern int wake_up_process(struct task_struct *tsk);
2693 extern void wake_up_new_task(struct task_struct *tsk);
2694 #ifdef CONFIG_SMP
2695  extern void kick_process(struct task_struct *tsk);
2696 #else
2697  static inline void kick_process(struct task_struct *tsk) { }
2698 #endif
2699 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2700 extern void sched_dead(struct task_struct *p);
2701 
2702 extern void proc_caches_init(void);
2703 extern void flush_signals(struct task_struct *);
2704 extern void ignore_signals(struct task_struct *);
2705 extern void flush_signal_handlers(struct task_struct *, int force_default);
2706 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2707 
2708 static inline int kernel_dequeue_signal(siginfo_t *info)
2709 {
2710 	struct task_struct *tsk = current;
2711 	siginfo_t __info;
2712 	int ret;
2713 
2714 	spin_lock_irq(&tsk->sighand->siglock);
2715 	ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2716 	spin_unlock_irq(&tsk->sighand->siglock);
2717 
2718 	return ret;
2719 }
2720 
2721 static inline void kernel_signal_stop(void)
2722 {
2723 	spin_lock_irq(&current->sighand->siglock);
2724 	if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2725 		__set_current_state(TASK_STOPPED);
2726 	spin_unlock_irq(&current->sighand->siglock);
2727 
2728 	schedule();
2729 }
2730 
2731 extern void release_task(struct task_struct * p);
2732 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2733 extern int force_sigsegv(int, struct task_struct *);
2734 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2735 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2736 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2737 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2738 				const struct cred *, u32);
2739 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2740 extern int kill_pid(struct pid *pid, int sig, int priv);
2741 extern int kill_proc_info(int, struct siginfo *, pid_t);
2742 extern __must_check bool do_notify_parent(struct task_struct *, int);
2743 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2744 extern void force_sig(int, struct task_struct *);
2745 extern int send_sig(int, struct task_struct *, int);
2746 extern int zap_other_threads(struct task_struct *p);
2747 extern struct sigqueue *sigqueue_alloc(void);
2748 extern void sigqueue_free(struct sigqueue *);
2749 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2750 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2751 
2752 #ifdef TIF_RESTORE_SIGMASK
2753 /*
2754  * Legacy restore_sigmask accessors.  These are inefficient on
2755  * SMP architectures because they require atomic operations.
2756  */
2757 
2758 /**
2759  * set_restore_sigmask() - make sure saved_sigmask processing gets done
2760  *
2761  * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
2762  * will run before returning to user mode, to process the flag.  For
2763  * all callers, TIF_SIGPENDING is already set or it's no harm to set
2764  * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
2765  * arch code will notice on return to user mode, in case those bits
2766  * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
2767  * signal code always gets run when TIF_RESTORE_SIGMASK is set.
2768  */
2769 static inline void set_restore_sigmask(void)
2770 {
2771 	set_thread_flag(TIF_RESTORE_SIGMASK);
2772 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2773 }
2774 static inline void clear_restore_sigmask(void)
2775 {
2776 	clear_thread_flag(TIF_RESTORE_SIGMASK);
2777 }
2778 static inline bool test_restore_sigmask(void)
2779 {
2780 	return test_thread_flag(TIF_RESTORE_SIGMASK);
2781 }
2782 static inline bool test_and_clear_restore_sigmask(void)
2783 {
2784 	return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2785 }
2786 
2787 #else	/* TIF_RESTORE_SIGMASK */
2788 
2789 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
2790 static inline void set_restore_sigmask(void)
2791 {
2792 	current->restore_sigmask = true;
2793 	WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2794 }
2795 static inline void clear_restore_sigmask(void)
2796 {
2797 	current->restore_sigmask = false;
2798 }
2799 static inline bool test_restore_sigmask(void)
2800 {
2801 	return current->restore_sigmask;
2802 }
2803 static inline bool test_and_clear_restore_sigmask(void)
2804 {
2805 	if (!current->restore_sigmask)
2806 		return false;
2807 	current->restore_sigmask = false;
2808 	return true;
2809 }
2810 #endif
2811 
2812 static inline void restore_saved_sigmask(void)
2813 {
2814 	if (test_and_clear_restore_sigmask())
2815 		__set_current_blocked(&current->saved_sigmask);
2816 }
2817 
2818 static inline sigset_t *sigmask_to_save(void)
2819 {
2820 	sigset_t *res = &current->blocked;
2821 	if (unlikely(test_restore_sigmask()))
2822 		res = &current->saved_sigmask;
2823 	return res;
2824 }
2825 
2826 static inline int kill_cad_pid(int sig, int priv)
2827 {
2828 	return kill_pid(cad_pid, sig, priv);
2829 }
2830 
2831 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2832 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2833 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2834 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2835 
2836 /*
2837  * True if we are on the alternate signal stack.
2838  */
2839 static inline int on_sig_stack(unsigned long sp)
2840 {
2841 	/*
2842 	 * If the signal stack is SS_AUTODISARM then, by construction, we
2843 	 * can't be on the signal stack unless user code deliberately set
2844 	 * SS_AUTODISARM when we were already on it.
2845 	 *
2846 	 * This improves reliability: if user state gets corrupted such that
2847 	 * the stack pointer points very close to the end of the signal stack,
2848 	 * then this check will enable the signal to be handled anyway.
2849 	 */
2850 	if (current->sas_ss_flags & SS_AUTODISARM)
2851 		return 0;
2852 
2853 #ifdef CONFIG_STACK_GROWSUP
2854 	return sp >= current->sas_ss_sp &&
2855 		sp - current->sas_ss_sp < current->sas_ss_size;
2856 #else
2857 	return sp > current->sas_ss_sp &&
2858 		sp - current->sas_ss_sp <= current->sas_ss_size;
2859 #endif
2860 }
2861 
2862 static inline int sas_ss_flags(unsigned long sp)
2863 {
2864 	if (!current->sas_ss_size)
2865 		return SS_DISABLE;
2866 
2867 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
2868 }
2869 
2870 static inline void sas_ss_reset(struct task_struct *p)
2871 {
2872 	p->sas_ss_sp = 0;
2873 	p->sas_ss_size = 0;
2874 	p->sas_ss_flags = SS_DISABLE;
2875 }
2876 
2877 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2878 {
2879 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2880 #ifdef CONFIG_STACK_GROWSUP
2881 		return current->sas_ss_sp;
2882 #else
2883 		return current->sas_ss_sp + current->sas_ss_size;
2884 #endif
2885 	return sp;
2886 }
2887 
2888 /*
2889  * Routines for handling mm_structs
2890  */
2891 extern struct mm_struct * mm_alloc(void);
2892 
2893 /**
2894  * mmgrab() - Pin a &struct mm_struct.
2895  * @mm: The &struct mm_struct to pin.
2896  *
2897  * Make sure that @mm will not get freed even after the owning task
2898  * exits. This doesn't guarantee that the associated address space
2899  * will still exist later on and mmget_not_zero() has to be used before
2900  * accessing it.
2901  *
2902  * This is a preferred way to to pin @mm for a longer/unbounded amount
2903  * of time.
2904  *
2905  * Use mmdrop() to release the reference acquired by mmgrab().
2906  *
2907  * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
2908  * of &mm_struct.mm_count vs &mm_struct.mm_users.
2909  */
2910 static inline void mmgrab(struct mm_struct *mm)
2911 {
2912 	atomic_inc(&mm->mm_count);
2913 }
2914 
2915 /* mmdrop drops the mm and the page tables */
2916 extern void __mmdrop(struct mm_struct *);
2917 static inline void mmdrop(struct mm_struct *mm)
2918 {
2919 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2920 		__mmdrop(mm);
2921 }
2922 
2923 static inline void mmdrop_async_fn(struct work_struct *work)
2924 {
2925 	struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
2926 	__mmdrop(mm);
2927 }
2928 
2929 static inline void mmdrop_async(struct mm_struct *mm)
2930 {
2931 	if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
2932 		INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
2933 		schedule_work(&mm->async_put_work);
2934 	}
2935 }
2936 
2937 /**
2938  * mmget() - Pin the address space associated with a &struct mm_struct.
2939  * @mm: The address space to pin.
2940  *
2941  * Make sure that the address space of the given &struct mm_struct doesn't
2942  * go away. This does not protect against parts of the address space being
2943  * modified or freed, however.
2944  *
2945  * Never use this function to pin this address space for an
2946  * unbounded/indefinite amount of time.
2947  *
2948  * Use mmput() to release the reference acquired by mmget().
2949  *
2950  * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
2951  * of &mm_struct.mm_count vs &mm_struct.mm_users.
2952  */
2953 static inline void mmget(struct mm_struct *mm)
2954 {
2955 	atomic_inc(&mm->mm_users);
2956 }
2957 
2958 static inline bool mmget_not_zero(struct mm_struct *mm)
2959 {
2960 	return atomic_inc_not_zero(&mm->mm_users);
2961 }
2962 
2963 /* mmput gets rid of the mappings and all user-space */
2964 extern void mmput(struct mm_struct *);
2965 #ifdef CONFIG_MMU
2966 /* same as above but performs the slow path from the async context. Can
2967  * be called from the atomic context as well
2968  */
2969 extern void mmput_async(struct mm_struct *);
2970 #endif
2971 
2972 /* Grab a reference to a task's mm, if it is not already going away */
2973 extern struct mm_struct *get_task_mm(struct task_struct *task);
2974 /*
2975  * Grab a reference to a task's mm, if it is not already going away
2976  * and ptrace_may_access with the mode parameter passed to it
2977  * succeeds.
2978  */
2979 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2980 /* Remove the current tasks stale references to the old mm_struct */
2981 extern void mm_release(struct task_struct *, struct mm_struct *);
2982 
2983 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
2984 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2985 			struct task_struct *, unsigned long);
2986 #else
2987 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2988 			struct task_struct *);
2989 
2990 /* Architectures that haven't opted into copy_thread_tls get the tls argument
2991  * via pt_regs, so ignore the tls argument passed via C. */
2992 static inline int copy_thread_tls(
2993 		unsigned long clone_flags, unsigned long sp, unsigned long arg,
2994 		struct task_struct *p, unsigned long tls)
2995 {
2996 	return copy_thread(clone_flags, sp, arg, p);
2997 }
2998 #endif
2999 extern void flush_thread(void);
3000 
3001 #ifdef CONFIG_HAVE_EXIT_THREAD
3002 extern void exit_thread(struct task_struct *tsk);
3003 #else
3004 static inline void exit_thread(struct task_struct *tsk)
3005 {
3006 }
3007 #endif
3008 
3009 extern void exit_files(struct task_struct *);
3010 extern void __cleanup_sighand(struct sighand_struct *);
3011 
3012 extern void exit_itimers(struct signal_struct *);
3013 extern void flush_itimer_signals(void);
3014 
3015 extern void do_group_exit(int);
3016 
3017 extern int do_execve(struct filename *,
3018 		     const char __user * const __user *,
3019 		     const char __user * const __user *);
3020 extern int do_execveat(int, struct filename *,
3021 		       const char __user * const __user *,
3022 		       const char __user * const __user *,
3023 		       int);
3024 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
3025 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
3026 struct task_struct *fork_idle(int);
3027 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
3028 
3029 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
3030 static inline void set_task_comm(struct task_struct *tsk, const char *from)
3031 {
3032 	__set_task_comm(tsk, from, false);
3033 }
3034 extern char *get_task_comm(char *to, struct task_struct *tsk);
3035 
3036 #ifdef CONFIG_SMP
3037 void scheduler_ipi(void);
3038 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
3039 #else
3040 static inline void scheduler_ipi(void) { }
3041 static inline unsigned long wait_task_inactive(struct task_struct *p,
3042 					       long match_state)
3043 {
3044 	return 1;
3045 }
3046 #endif
3047 
3048 #define tasklist_empty() \
3049 	list_empty(&init_task.tasks)
3050 
3051 #define next_task(p) \
3052 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
3053 
3054 #define for_each_process(p) \
3055 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
3056 
3057 extern bool current_is_single_threaded(void);
3058 
3059 /*
3060  * Careful: do_each_thread/while_each_thread is a double loop so
3061  *          'break' will not work as expected - use goto instead.
3062  */
3063 #define do_each_thread(g, t) \
3064 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
3065 
3066 #define while_each_thread(g, t) \
3067 	while ((t = next_thread(t)) != g)
3068 
3069 #define __for_each_thread(signal, t)	\
3070 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
3071 
3072 #define for_each_thread(p, t)		\
3073 	__for_each_thread((p)->signal, t)
3074 
3075 /* Careful: this is a double loop, 'break' won't work as expected. */
3076 #define for_each_process_thread(p, t)	\
3077 	for_each_process(p) for_each_thread(p, t)
3078 
3079 typedef int (*proc_visitor)(struct task_struct *p, void *data);
3080 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
3081 
3082 static inline int get_nr_threads(struct task_struct *tsk)
3083 {
3084 	return tsk->signal->nr_threads;
3085 }
3086 
3087 static inline bool thread_group_leader(struct task_struct *p)
3088 {
3089 	return p->exit_signal >= 0;
3090 }
3091 
3092 /* Do to the insanities of de_thread it is possible for a process
3093  * to have the pid of the thread group leader without actually being
3094  * the thread group leader.  For iteration through the pids in proc
3095  * all we care about is that we have a task with the appropriate
3096  * pid, we don't actually care if we have the right task.
3097  */
3098 static inline bool has_group_leader_pid(struct task_struct *p)
3099 {
3100 	return task_pid(p) == p->signal->leader_pid;
3101 }
3102 
3103 static inline
3104 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
3105 {
3106 	return p1->signal == p2->signal;
3107 }
3108 
3109 static inline struct task_struct *next_thread(const struct task_struct *p)
3110 {
3111 	return list_entry_rcu(p->thread_group.next,
3112 			      struct task_struct, thread_group);
3113 }
3114 
3115 static inline int thread_group_empty(struct task_struct *p)
3116 {
3117 	return list_empty(&p->thread_group);
3118 }
3119 
3120 #define delay_group_leader(p) \
3121 		(thread_group_leader(p) && !thread_group_empty(p))
3122 
3123 /*
3124  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
3125  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
3126  * pins the final release of task.io_context.  Also protects ->cpuset and
3127  * ->cgroup.subsys[]. And ->vfork_done.
3128  *
3129  * Nests both inside and outside of read_lock(&tasklist_lock).
3130  * It must not be nested with write_lock_irq(&tasklist_lock),
3131  * neither inside nor outside.
3132  */
3133 static inline void task_lock(struct task_struct *p)
3134 {
3135 	spin_lock(&p->alloc_lock);
3136 }
3137 
3138 static inline void task_unlock(struct task_struct *p)
3139 {
3140 	spin_unlock(&p->alloc_lock);
3141 }
3142 
3143 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
3144 							unsigned long *flags);
3145 
3146 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3147 						       unsigned long *flags)
3148 {
3149 	struct sighand_struct *ret;
3150 
3151 	ret = __lock_task_sighand(tsk, flags);
3152 	(void)__cond_lock(&tsk->sighand->siglock, ret);
3153 	return ret;
3154 }
3155 
3156 static inline void unlock_task_sighand(struct task_struct *tsk,
3157 						unsigned long *flags)
3158 {
3159 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3160 }
3161 
3162 #ifdef CONFIG_THREAD_INFO_IN_TASK
3163 
3164 static inline struct thread_info *task_thread_info(struct task_struct *task)
3165 {
3166 	return &task->thread_info;
3167 }
3168 
3169 /*
3170  * When accessing the stack of a non-current task that might exit, use
3171  * try_get_task_stack() instead.  task_stack_page will return a pointer
3172  * that could get freed out from under you.
3173  */
3174 static inline void *task_stack_page(const struct task_struct *task)
3175 {
3176 	return task->stack;
3177 }
3178 
3179 #define setup_thread_stack(new,old)	do { } while(0)
3180 
3181 static inline unsigned long *end_of_stack(const struct task_struct *task)
3182 {
3183 	return task->stack;
3184 }
3185 
3186 #elif !defined(__HAVE_THREAD_FUNCTIONS)
3187 
3188 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
3189 #define task_stack_page(task)	((void *)(task)->stack)
3190 
3191 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3192 {
3193 	*task_thread_info(p) = *task_thread_info(org);
3194 	task_thread_info(p)->task = p;
3195 }
3196 
3197 /*
3198  * Return the address of the last usable long on the stack.
3199  *
3200  * When the stack grows down, this is just above the thread
3201  * info struct. Going any lower will corrupt the threadinfo.
3202  *
3203  * When the stack grows up, this is the highest address.
3204  * Beyond that position, we corrupt data on the next page.
3205  */
3206 static inline unsigned long *end_of_stack(struct task_struct *p)
3207 {
3208 #ifdef CONFIG_STACK_GROWSUP
3209 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3210 #else
3211 	return (unsigned long *)(task_thread_info(p) + 1);
3212 #endif
3213 }
3214 
3215 #endif
3216 
3217 #ifdef CONFIG_THREAD_INFO_IN_TASK
3218 static inline void *try_get_task_stack(struct task_struct *tsk)
3219 {
3220 	return atomic_inc_not_zero(&tsk->stack_refcount) ?
3221 		task_stack_page(tsk) : NULL;
3222 }
3223 
3224 extern void put_task_stack(struct task_struct *tsk);
3225 #else
3226 static inline void *try_get_task_stack(struct task_struct *tsk)
3227 {
3228 	return task_stack_page(tsk);
3229 }
3230 
3231 static inline void put_task_stack(struct task_struct *tsk) {}
3232 #endif
3233 
3234 #define task_stack_end_corrupted(task) \
3235 		(*(end_of_stack(task)) != STACK_END_MAGIC)
3236 
3237 static inline int object_is_on_stack(void *obj)
3238 {
3239 	void *stack = task_stack_page(current);
3240 
3241 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3242 }
3243 
3244 extern void thread_stack_cache_init(void);
3245 
3246 #ifdef CONFIG_DEBUG_STACK_USAGE
3247 static inline unsigned long stack_not_used(struct task_struct *p)
3248 {
3249 	unsigned long *n = end_of_stack(p);
3250 
3251 	do { 	/* Skip over canary */
3252 # ifdef CONFIG_STACK_GROWSUP
3253 		n--;
3254 # else
3255 		n++;
3256 # endif
3257 	} while (!*n);
3258 
3259 # ifdef CONFIG_STACK_GROWSUP
3260 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
3261 # else
3262 	return (unsigned long)n - (unsigned long)end_of_stack(p);
3263 # endif
3264 }
3265 #endif
3266 extern void set_task_stack_end_magic(struct task_struct *tsk);
3267 
3268 /* set thread flags in other task's structures
3269  * - see asm/thread_info.h for TIF_xxxx flags available
3270  */
3271 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3272 {
3273 	set_ti_thread_flag(task_thread_info(tsk), flag);
3274 }
3275 
3276 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3277 {
3278 	clear_ti_thread_flag(task_thread_info(tsk), flag);
3279 }
3280 
3281 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3282 {
3283 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
3284 }
3285 
3286 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3287 {
3288 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
3289 }
3290 
3291 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3292 {
3293 	return test_ti_thread_flag(task_thread_info(tsk), flag);
3294 }
3295 
3296 static inline void set_tsk_need_resched(struct task_struct *tsk)
3297 {
3298 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3299 }
3300 
3301 static inline void clear_tsk_need_resched(struct task_struct *tsk)
3302 {
3303 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3304 }
3305 
3306 static inline int test_tsk_need_resched(struct task_struct *tsk)
3307 {
3308 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3309 }
3310 
3311 static inline int restart_syscall(void)
3312 {
3313 	set_tsk_thread_flag(current, TIF_SIGPENDING);
3314 	return -ERESTARTNOINTR;
3315 }
3316 
3317 static inline int signal_pending(struct task_struct *p)
3318 {
3319 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3320 }
3321 
3322 static inline int __fatal_signal_pending(struct task_struct *p)
3323 {
3324 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
3325 }
3326 
3327 static inline int fatal_signal_pending(struct task_struct *p)
3328 {
3329 	return signal_pending(p) && __fatal_signal_pending(p);
3330 }
3331 
3332 static inline int signal_pending_state(long state, struct task_struct *p)
3333 {
3334 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3335 		return 0;
3336 	if (!signal_pending(p))
3337 		return 0;
3338 
3339 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3340 }
3341 
3342 /*
3343  * cond_resched() and cond_resched_lock(): latency reduction via
3344  * explicit rescheduling in places that are safe. The return
3345  * value indicates whether a reschedule was done in fact.
3346  * cond_resched_lock() will drop the spinlock before scheduling,
3347  * cond_resched_softirq() will enable bhs before scheduling.
3348  */
3349 #ifndef CONFIG_PREEMPT
3350 extern int _cond_resched(void);
3351 #else
3352 static inline int _cond_resched(void) { return 0; }
3353 #endif
3354 
3355 #define cond_resched() ({			\
3356 	___might_sleep(__FILE__, __LINE__, 0);	\
3357 	_cond_resched();			\
3358 })
3359 
3360 extern int __cond_resched_lock(spinlock_t *lock);
3361 
3362 #define cond_resched_lock(lock) ({				\
3363 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3364 	__cond_resched_lock(lock);				\
3365 })
3366 
3367 extern int __cond_resched_softirq(void);
3368 
3369 #define cond_resched_softirq() ({					\
3370 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
3371 	__cond_resched_softirq();					\
3372 })
3373 
3374 static inline void cond_resched_rcu(void)
3375 {
3376 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3377 	rcu_read_unlock();
3378 	cond_resched();
3379 	rcu_read_lock();
3380 #endif
3381 }
3382 
3383 /*
3384  * Does a critical section need to be broken due to another
3385  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
3386  * but a general need for low latency)
3387  */
3388 static inline int spin_needbreak(spinlock_t *lock)
3389 {
3390 #ifdef CONFIG_PREEMPT
3391 	return spin_is_contended(lock);
3392 #else
3393 	return 0;
3394 #endif
3395 }
3396 
3397 /*
3398  * Idle thread specific functions to determine the need_resched
3399  * polling state.
3400  */
3401 #ifdef TIF_POLLING_NRFLAG
3402 static inline int tsk_is_polling(struct task_struct *p)
3403 {
3404 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3405 }
3406 
3407 static inline void __current_set_polling(void)
3408 {
3409 	set_thread_flag(TIF_POLLING_NRFLAG);
3410 }
3411 
3412 static inline bool __must_check current_set_polling_and_test(void)
3413 {
3414 	__current_set_polling();
3415 
3416 	/*
3417 	 * Polling state must be visible before we test NEED_RESCHED,
3418 	 * paired by resched_curr()
3419 	 */
3420 	smp_mb__after_atomic();
3421 
3422 	return unlikely(tif_need_resched());
3423 }
3424 
3425 static inline void __current_clr_polling(void)
3426 {
3427 	clear_thread_flag(TIF_POLLING_NRFLAG);
3428 }
3429 
3430 static inline bool __must_check current_clr_polling_and_test(void)
3431 {
3432 	__current_clr_polling();
3433 
3434 	/*
3435 	 * Polling state must be visible before we test NEED_RESCHED,
3436 	 * paired by resched_curr()
3437 	 */
3438 	smp_mb__after_atomic();
3439 
3440 	return unlikely(tif_need_resched());
3441 }
3442 
3443 #else
3444 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3445 static inline void __current_set_polling(void) { }
3446 static inline void __current_clr_polling(void) { }
3447 
3448 static inline bool __must_check current_set_polling_and_test(void)
3449 {
3450 	return unlikely(tif_need_resched());
3451 }
3452 static inline bool __must_check current_clr_polling_and_test(void)
3453 {
3454 	return unlikely(tif_need_resched());
3455 }
3456 #endif
3457 
3458 static inline void current_clr_polling(void)
3459 {
3460 	__current_clr_polling();
3461 
3462 	/*
3463 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
3464 	 * Once the bit is cleared, we'll get IPIs with every new
3465 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
3466 	 * fold.
3467 	 */
3468 	smp_mb(); /* paired with resched_curr() */
3469 
3470 	preempt_fold_need_resched();
3471 }
3472 
3473 static __always_inline bool need_resched(void)
3474 {
3475 	return unlikely(tif_need_resched());
3476 }
3477 
3478 /*
3479  * Thread group CPU time accounting.
3480  */
3481 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3482 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3483 
3484 /*
3485  * Reevaluate whether the task has signals pending delivery.
3486  * Wake the task if so.
3487  * This is required every time the blocked sigset_t changes.
3488  * callers must hold sighand->siglock.
3489  */
3490 extern void recalc_sigpending_and_wake(struct task_struct *t);
3491 extern void recalc_sigpending(void);
3492 
3493 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3494 
3495 static inline void signal_wake_up(struct task_struct *t, bool resume)
3496 {
3497 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3498 }
3499 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3500 {
3501 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3502 }
3503 
3504 /*
3505  * Wrappers for p->thread_info->cpu access. No-op on UP.
3506  */
3507 #ifdef CONFIG_SMP
3508 
3509 static inline unsigned int task_cpu(const struct task_struct *p)
3510 {
3511 #ifdef CONFIG_THREAD_INFO_IN_TASK
3512 	return p->cpu;
3513 #else
3514 	return task_thread_info(p)->cpu;
3515 #endif
3516 }
3517 
3518 static inline int task_node(const struct task_struct *p)
3519 {
3520 	return cpu_to_node(task_cpu(p));
3521 }
3522 
3523 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3524 
3525 #else
3526 
3527 static inline unsigned int task_cpu(const struct task_struct *p)
3528 {
3529 	return 0;
3530 }
3531 
3532 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3533 {
3534 }
3535 
3536 #endif /* CONFIG_SMP */
3537 
3538 /*
3539  * In order to reduce various lock holder preemption latencies provide an
3540  * interface to see if a vCPU is currently running or not.
3541  *
3542  * This allows us to terminate optimistic spin loops and block, analogous to
3543  * the native optimistic spin heuristic of testing if the lock owner task is
3544  * running or not.
3545  */
3546 #ifndef vcpu_is_preempted
3547 # define vcpu_is_preempted(cpu)	false
3548 #endif
3549 
3550 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3551 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3552 
3553 #ifdef CONFIG_CGROUP_SCHED
3554 extern struct task_group root_task_group;
3555 #endif /* CONFIG_CGROUP_SCHED */
3556 
3557 extern int task_can_switch_user(struct user_struct *up,
3558 					struct task_struct *tsk);
3559 
3560 #ifdef CONFIG_TASK_XACCT
3561 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3562 {
3563 	tsk->ioac.rchar += amt;
3564 }
3565 
3566 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3567 {
3568 	tsk->ioac.wchar += amt;
3569 }
3570 
3571 static inline void inc_syscr(struct task_struct *tsk)
3572 {
3573 	tsk->ioac.syscr++;
3574 }
3575 
3576 static inline void inc_syscw(struct task_struct *tsk)
3577 {
3578 	tsk->ioac.syscw++;
3579 }
3580 #else
3581 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3582 {
3583 }
3584 
3585 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3586 {
3587 }
3588 
3589 static inline void inc_syscr(struct task_struct *tsk)
3590 {
3591 }
3592 
3593 static inline void inc_syscw(struct task_struct *tsk)
3594 {
3595 }
3596 #endif
3597 
3598 #ifndef TASK_SIZE_OF
3599 #define TASK_SIZE_OF(tsk)	TASK_SIZE
3600 #endif
3601 
3602 #ifdef CONFIG_MEMCG
3603 extern void mm_update_next_owner(struct mm_struct *mm);
3604 #else
3605 static inline void mm_update_next_owner(struct mm_struct *mm)
3606 {
3607 }
3608 #endif /* CONFIG_MEMCG */
3609 
3610 static inline unsigned long task_rlimit(const struct task_struct *tsk,
3611 		unsigned int limit)
3612 {
3613 	return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3614 }
3615 
3616 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3617 		unsigned int limit)
3618 {
3619 	return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3620 }
3621 
3622 static inline unsigned long rlimit(unsigned int limit)
3623 {
3624 	return task_rlimit(current, limit);
3625 }
3626 
3627 static inline unsigned long rlimit_max(unsigned int limit)
3628 {
3629 	return task_rlimit_max(current, limit);
3630 }
3631 
3632 #define SCHED_CPUFREQ_RT	(1U << 0)
3633 #define SCHED_CPUFREQ_DL	(1U << 1)
3634 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
3635 
3636 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
3637 
3638 #ifdef CONFIG_CPU_FREQ
3639 struct update_util_data {
3640        void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
3641 };
3642 
3643 void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3644                        void (*func)(struct update_util_data *data, u64 time,
3645 				    unsigned int flags));
3646 void cpufreq_remove_update_util_hook(int cpu);
3647 #endif /* CONFIG_CPU_FREQ */
3648 
3649 #endif
3650