1 #ifndef _LINUX_SCHED_H 2 #define _LINUX_SCHED_H 3 4 /* 5 * cloning flags: 6 */ 7 #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ 8 #define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9 #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10 #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11 #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12 #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13 #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14 #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 15 #define CLONE_THREAD 0x00010000 /* Same thread group? */ 16 #define CLONE_NEWNS 0x00020000 /* New namespace group? */ 17 #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 18 #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 19 #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 20 #define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ 21 #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ 22 #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23 #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 24 #define CLONE_STOPPED 0x02000000 /* Start in stopped state */ 25 #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ 26 #define CLONE_NEWIPC 0x08000000 /* New ipcs */ 27 #define CLONE_NEWUSER 0x10000000 /* New user namespace */ 28 #define CLONE_NEWPID 0x20000000 /* New pid namespace */ 29 #define CLONE_NEWNET 0x40000000 /* New network namespace */ 30 31 /* 32 * Scheduling policies 33 */ 34 #define SCHED_NORMAL 0 35 #define SCHED_FIFO 1 36 #define SCHED_RR 2 37 #define SCHED_BATCH 3 38 /* SCHED_ISO: reserved but not implemented yet */ 39 #define SCHED_IDLE 5 40 41 #ifdef __KERNEL__ 42 43 struct sched_param { 44 int sched_priority; 45 }; 46 47 #include <asm/param.h> /* for HZ */ 48 49 #include <linux/capability.h> 50 #include <linux/threads.h> 51 #include <linux/kernel.h> 52 #include <linux/types.h> 53 #include <linux/timex.h> 54 #include <linux/jiffies.h> 55 #include <linux/rbtree.h> 56 #include <linux/thread_info.h> 57 #include <linux/cpumask.h> 58 #include <linux/errno.h> 59 #include <linux/nodemask.h> 60 #include <linux/mm_types.h> 61 62 #include <asm/system.h> 63 #include <asm/semaphore.h> 64 #include <asm/page.h> 65 #include <asm/ptrace.h> 66 #include <asm/cputime.h> 67 68 #include <linux/smp.h> 69 #include <linux/sem.h> 70 #include <linux/signal.h> 71 #include <linux/securebits.h> 72 #include <linux/fs_struct.h> 73 #include <linux/compiler.h> 74 #include <linux/completion.h> 75 #include <linux/pid.h> 76 #include <linux/percpu.h> 77 #include <linux/topology.h> 78 #include <linux/proportions.h> 79 #include <linux/seccomp.h> 80 #include <linux/rcupdate.h> 81 #include <linux/futex.h> 82 #include <linux/rtmutex.h> 83 84 #include <linux/time.h> 85 #include <linux/param.h> 86 #include <linux/resource.h> 87 #include <linux/timer.h> 88 #include <linux/hrtimer.h> 89 #include <linux/task_io_accounting.h> 90 #include <linux/kobject.h> 91 92 #include <asm/processor.h> 93 94 struct exec_domain; 95 struct futex_pi_state; 96 struct bio; 97 98 /* 99 * List of flags we want to share for kernel threads, 100 * if only because they are not used by them anyway. 101 */ 102 #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) 103 104 /* 105 * These are the constant used to fake the fixed-point load-average 106 * counting. Some notes: 107 * - 11 bit fractions expand to 22 bits by the multiplies: this gives 108 * a load-average precision of 10 bits integer + 11 bits fractional 109 * - if you want to count load-averages more often, you need more 110 * precision, or rounding will get you. With 2-second counting freq, 111 * the EXP_n values would be 1981, 2034 and 2043 if still using only 112 * 11 bit fractions. 113 */ 114 extern unsigned long avenrun[]; /* Load averages */ 115 116 #define FSHIFT 11 /* nr of bits of precision */ 117 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 118 #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ 119 #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ 120 #define EXP_5 2014 /* 1/exp(5sec/5min) */ 121 #define EXP_15 2037 /* 1/exp(5sec/15min) */ 122 123 #define CALC_LOAD(load,exp,n) \ 124 load *= exp; \ 125 load += n*(FIXED_1-exp); \ 126 load >>= FSHIFT; 127 128 extern unsigned long total_forks; 129 extern int nr_threads; 130 DECLARE_PER_CPU(unsigned long, process_counts); 131 extern int nr_processes(void); 132 extern unsigned long nr_running(void); 133 extern unsigned long nr_uninterruptible(void); 134 extern unsigned long nr_active(void); 135 extern unsigned long nr_iowait(void); 136 extern unsigned long weighted_cpuload(const int cpu); 137 138 struct seq_file; 139 struct cfs_rq; 140 struct task_group; 141 #ifdef CONFIG_SCHED_DEBUG 142 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); 143 extern void proc_sched_set_task(struct task_struct *p); 144 extern void 145 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 146 #else 147 static inline void 148 proc_sched_show_task(struct task_struct *p, struct seq_file *m) 149 { 150 } 151 static inline void proc_sched_set_task(struct task_struct *p) 152 { 153 } 154 static inline void 155 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 156 { 157 } 158 #endif 159 160 /* 161 * Task state bitmask. NOTE! These bits are also 162 * encoded in fs/proc/array.c: get_task_state(). 163 * 164 * We have two separate sets of flags: task->state 165 * is about runnability, while task->exit_state are 166 * about the task exiting. Confusing, but this way 167 * modifying one set can't modify the other one by 168 * mistake. 169 */ 170 #define TASK_RUNNING 0 171 #define TASK_INTERRUPTIBLE 1 172 #define TASK_UNINTERRUPTIBLE 2 173 #define TASK_STOPPED 4 174 #define TASK_TRACED 8 175 /* in tsk->exit_state */ 176 #define EXIT_ZOMBIE 16 177 #define EXIT_DEAD 32 178 /* in tsk->state again */ 179 #define TASK_DEAD 64 180 181 #define __set_task_state(tsk, state_value) \ 182 do { (tsk)->state = (state_value); } while (0) 183 #define set_task_state(tsk, state_value) \ 184 set_mb((tsk)->state, (state_value)) 185 186 /* 187 * set_current_state() includes a barrier so that the write of current->state 188 * is correctly serialised wrt the caller's subsequent test of whether to 189 * actually sleep: 190 * 191 * set_current_state(TASK_UNINTERRUPTIBLE); 192 * if (do_i_need_to_sleep()) 193 * schedule(); 194 * 195 * If the caller does not need such serialisation then use __set_current_state() 196 */ 197 #define __set_current_state(state_value) \ 198 do { current->state = (state_value); } while (0) 199 #define set_current_state(state_value) \ 200 set_mb(current->state, (state_value)) 201 202 /* Task command name length */ 203 #define TASK_COMM_LEN 16 204 205 #include <linux/spinlock.h> 206 207 /* 208 * This serializes "schedule()" and also protects 209 * the run-queue from deletions/modifications (but 210 * _adding_ to the beginning of the run-queue has 211 * a separate lock). 212 */ 213 extern rwlock_t tasklist_lock; 214 extern spinlock_t mmlist_lock; 215 216 struct task_struct; 217 218 extern void sched_init(void); 219 extern void sched_init_smp(void); 220 extern void init_idle(struct task_struct *idle, int cpu); 221 extern void init_idle_bootup_task(struct task_struct *idle); 222 223 extern cpumask_t nohz_cpu_mask; 224 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 225 extern int select_nohz_load_balancer(int cpu); 226 #else 227 static inline int select_nohz_load_balancer(int cpu) 228 { 229 return 0; 230 } 231 #endif 232 233 /* 234 * Only dump TASK_* tasks. (0 for all tasks) 235 */ 236 extern void show_state_filter(unsigned long state_filter); 237 238 static inline void show_state(void) 239 { 240 show_state_filter(0); 241 } 242 243 extern void show_regs(struct pt_regs *); 244 245 /* 246 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current 247 * task), SP is the stack pointer of the first frame that should be shown in the back 248 * trace (or NULL if the entire call-chain of the task should be shown). 249 */ 250 extern void show_stack(struct task_struct *task, unsigned long *sp); 251 252 void io_schedule(void); 253 long io_schedule_timeout(long timeout); 254 255 extern void cpu_init (void); 256 extern void trap_init(void); 257 extern void account_process_tick(struct task_struct *task, int user); 258 extern void update_process_times(int user); 259 extern void scheduler_tick(void); 260 261 #ifdef CONFIG_DETECT_SOFTLOCKUP 262 extern void softlockup_tick(void); 263 extern void spawn_softlockup_task(void); 264 extern void touch_softlockup_watchdog(void); 265 extern void touch_all_softlockup_watchdogs(void); 266 extern int softlockup_thresh; 267 #else 268 static inline void softlockup_tick(void) 269 { 270 } 271 static inline void spawn_softlockup_task(void) 272 { 273 } 274 static inline void touch_softlockup_watchdog(void) 275 { 276 } 277 static inline void touch_all_softlockup_watchdogs(void) 278 { 279 } 280 #endif 281 282 283 /* Attach to any functions which should be ignored in wchan output. */ 284 #define __sched __attribute__((__section__(".sched.text"))) 285 286 /* Linker adds these: start and end of __sched functions */ 287 extern char __sched_text_start[], __sched_text_end[]; 288 289 /* Is this address in the __sched functions? */ 290 extern int in_sched_functions(unsigned long addr); 291 292 #define MAX_SCHEDULE_TIMEOUT LONG_MAX 293 extern signed long FASTCALL(schedule_timeout(signed long timeout)); 294 extern signed long schedule_timeout_interruptible(signed long timeout); 295 extern signed long schedule_timeout_uninterruptible(signed long timeout); 296 asmlinkage void schedule(void); 297 298 struct nsproxy; 299 struct user_namespace; 300 301 /* Maximum number of active map areas.. This is a random (large) number */ 302 #define DEFAULT_MAX_MAP_COUNT 65536 303 304 extern int sysctl_max_map_count; 305 306 #include <linux/aio.h> 307 308 extern unsigned long 309 arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 310 unsigned long, unsigned long); 311 extern unsigned long 312 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 313 unsigned long len, unsigned long pgoff, 314 unsigned long flags); 315 extern void arch_unmap_area(struct mm_struct *, unsigned long); 316 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 317 318 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 319 /* 320 * The mm counters are not protected by its page_table_lock, 321 * so must be incremented atomically. 322 */ 323 #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value) 324 #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member)) 325 #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member) 326 #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 327 #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 328 329 #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 330 /* 331 * The mm counters are protected by its page_table_lock, 332 * so can be incremented directly. 333 */ 334 #define set_mm_counter(mm, member, value) (mm)->_##member = (value) 335 #define get_mm_counter(mm, member) ((mm)->_##member) 336 #define add_mm_counter(mm, member, value) (mm)->_##member += (value) 337 #define inc_mm_counter(mm, member) (mm)->_##member++ 338 #define dec_mm_counter(mm, member) (mm)->_##member-- 339 340 #endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 341 342 #define get_mm_rss(mm) \ 343 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 344 #define update_hiwater_rss(mm) do { \ 345 unsigned long _rss = get_mm_rss(mm); \ 346 if ((mm)->hiwater_rss < _rss) \ 347 (mm)->hiwater_rss = _rss; \ 348 } while (0) 349 #define update_hiwater_vm(mm) do { \ 350 if ((mm)->hiwater_vm < (mm)->total_vm) \ 351 (mm)->hiwater_vm = (mm)->total_vm; \ 352 } while (0) 353 354 extern void set_dumpable(struct mm_struct *mm, int value); 355 extern int get_dumpable(struct mm_struct *mm); 356 357 /* mm flags */ 358 /* dumpable bits */ 359 #define MMF_DUMPABLE 0 /* core dump is permitted */ 360 #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 361 #define MMF_DUMPABLE_BITS 2 362 363 /* coredump filter bits */ 364 #define MMF_DUMP_ANON_PRIVATE 2 365 #define MMF_DUMP_ANON_SHARED 3 366 #define MMF_DUMP_MAPPED_PRIVATE 4 367 #define MMF_DUMP_MAPPED_SHARED 5 368 #define MMF_DUMP_ELF_HEADERS 6 369 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 370 #define MMF_DUMP_FILTER_BITS 5 371 #define MMF_DUMP_FILTER_MASK \ 372 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 373 #define MMF_DUMP_FILTER_DEFAULT \ 374 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 375 376 struct sighand_struct { 377 atomic_t count; 378 struct k_sigaction action[_NSIG]; 379 spinlock_t siglock; 380 wait_queue_head_t signalfd_wqh; 381 }; 382 383 struct pacct_struct { 384 int ac_flag; 385 long ac_exitcode; 386 unsigned long ac_mem; 387 cputime_t ac_utime, ac_stime; 388 unsigned long ac_minflt, ac_majflt; 389 }; 390 391 /* 392 * NOTE! "signal_struct" does not have it's own 393 * locking, because a shared signal_struct always 394 * implies a shared sighand_struct, so locking 395 * sighand_struct is always a proper superset of 396 * the locking of signal_struct. 397 */ 398 struct signal_struct { 399 atomic_t count; 400 atomic_t live; 401 402 wait_queue_head_t wait_chldexit; /* for wait4() */ 403 404 /* current thread group signal load-balancing target: */ 405 struct task_struct *curr_target; 406 407 /* shared signal handling: */ 408 struct sigpending shared_pending; 409 410 /* thread group exit support */ 411 int group_exit_code; 412 /* overloaded: 413 * - notify group_exit_task when ->count is equal to notify_count 414 * - everyone except group_exit_task is stopped during signal delivery 415 * of fatal signals, group_exit_task processes the signal. 416 */ 417 struct task_struct *group_exit_task; 418 int notify_count; 419 420 /* thread group stop support, overloads group_exit_code too */ 421 int group_stop_count; 422 unsigned int flags; /* see SIGNAL_* flags below */ 423 424 /* POSIX.1b Interval Timers */ 425 struct list_head posix_timers; 426 427 /* ITIMER_REAL timer for the process */ 428 struct hrtimer real_timer; 429 struct task_struct *tsk; 430 ktime_t it_real_incr; 431 432 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 433 cputime_t it_prof_expires, it_virt_expires; 434 cputime_t it_prof_incr, it_virt_incr; 435 436 /* job control IDs */ 437 438 /* 439 * pgrp and session fields are deprecated. 440 * use the task_session_Xnr and task_pgrp_Xnr routines below 441 */ 442 443 union { 444 pid_t pgrp __deprecated; 445 pid_t __pgrp; 446 }; 447 448 struct pid *tty_old_pgrp; 449 450 union { 451 pid_t session __deprecated; 452 pid_t __session; 453 }; 454 455 /* boolean value for session group leader */ 456 int leader; 457 458 struct tty_struct *tty; /* NULL if no tty */ 459 460 /* 461 * Cumulative resource counters for dead threads in the group, 462 * and for reaped dead child processes forked by this group. 463 * Live threads maintain their own counters and add to these 464 * in __exit_signal, except for the group leader. 465 */ 466 cputime_t utime, stime, cutime, cstime; 467 cputime_t gtime; 468 cputime_t cgtime; 469 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 470 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 471 unsigned long inblock, oublock, cinblock, coublock; 472 473 /* 474 * Cumulative ns of scheduled CPU time for dead threads in the 475 * group, not including a zombie group leader. (This only differs 476 * from jiffies_to_ns(utime + stime) if sched_clock uses something 477 * other than jiffies.) 478 */ 479 unsigned long long sum_sched_runtime; 480 481 /* 482 * We don't bother to synchronize most readers of this at all, 483 * because there is no reader checking a limit that actually needs 484 * to get both rlim_cur and rlim_max atomically, and either one 485 * alone is a single word that can safely be read normally. 486 * getrlimit/setrlimit use task_lock(current->group_leader) to 487 * protect this instead of the siglock, because they really 488 * have no need to disable irqs. 489 */ 490 struct rlimit rlim[RLIM_NLIMITS]; 491 492 struct list_head cpu_timers[3]; 493 494 /* keep the process-shared keyrings here so that they do the right 495 * thing in threads created with CLONE_THREAD */ 496 #ifdef CONFIG_KEYS 497 struct key *session_keyring; /* keyring inherited over fork */ 498 struct key *process_keyring; /* keyring private to this process */ 499 #endif 500 #ifdef CONFIG_BSD_PROCESS_ACCT 501 struct pacct_struct pacct; /* per-process accounting information */ 502 #endif 503 #ifdef CONFIG_TASKSTATS 504 struct taskstats *stats; 505 #endif 506 #ifdef CONFIG_AUDIT 507 unsigned audit_tty; 508 struct tty_audit_buf *tty_audit_buf; 509 #endif 510 }; 511 512 /* Context switch must be unlocked if interrupts are to be enabled */ 513 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 514 # define __ARCH_WANT_UNLOCKED_CTXSW 515 #endif 516 517 /* 518 * Bits in flags field of signal_struct. 519 */ 520 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 521 #define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ 522 #define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 523 #define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ 524 525 /* 526 * Some day this will be a full-fledged user tracking system.. 527 */ 528 struct user_struct { 529 atomic_t __count; /* reference count */ 530 atomic_t processes; /* How many processes does this user have? */ 531 atomic_t files; /* How many open files does this user have? */ 532 atomic_t sigpending; /* How many pending signals does this user have? */ 533 #ifdef CONFIG_INOTIFY_USER 534 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 535 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 536 #endif 537 #ifdef CONFIG_POSIX_MQUEUE 538 /* protected by mq_lock */ 539 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 540 #endif 541 unsigned long locked_shm; /* How many pages of mlocked shm ? */ 542 543 #ifdef CONFIG_KEYS 544 struct key *uid_keyring; /* UID specific keyring */ 545 struct key *session_keyring; /* UID's default session keyring */ 546 #endif 547 548 /* Hash table maintenance information */ 549 struct hlist_node uidhash_node; 550 uid_t uid; 551 552 #ifdef CONFIG_FAIR_USER_SCHED 553 struct task_group *tg; 554 #ifdef CONFIG_SYSFS 555 struct kset kset; 556 struct subsys_attribute user_attr; 557 struct work_struct work; 558 #endif 559 #endif 560 }; 561 562 #ifdef CONFIG_FAIR_USER_SCHED 563 extern int uids_kobject_init(void); 564 #else 565 static inline int uids_kobject_init(void) { return 0; } 566 #endif 567 568 extern struct user_struct *find_user(uid_t); 569 570 extern struct user_struct root_user; 571 #define INIT_USER (&root_user) 572 573 struct backing_dev_info; 574 struct reclaim_state; 575 576 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 577 struct sched_info { 578 /* cumulative counters */ 579 unsigned long pcount; /* # of times run on this cpu */ 580 unsigned long long cpu_time, /* time spent on the cpu */ 581 run_delay; /* time spent waiting on a runqueue */ 582 583 /* timestamps */ 584 unsigned long long last_arrival,/* when we last ran on a cpu */ 585 last_queued; /* when we were last queued to run */ 586 #ifdef CONFIG_SCHEDSTATS 587 /* BKL stats */ 588 unsigned int bkl_count; 589 #endif 590 }; 591 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 592 593 #ifdef CONFIG_SCHEDSTATS 594 extern const struct file_operations proc_schedstat_operations; 595 #endif /* CONFIG_SCHEDSTATS */ 596 597 #ifdef CONFIG_TASK_DELAY_ACCT 598 struct task_delay_info { 599 spinlock_t lock; 600 unsigned int flags; /* Private per-task flags */ 601 602 /* For each stat XXX, add following, aligned appropriately 603 * 604 * struct timespec XXX_start, XXX_end; 605 * u64 XXX_delay; 606 * u32 XXX_count; 607 * 608 * Atomicity of updates to XXX_delay, XXX_count protected by 609 * single lock above (split into XXX_lock if contention is an issue). 610 */ 611 612 /* 613 * XXX_count is incremented on every XXX operation, the delay 614 * associated with the operation is added to XXX_delay. 615 * XXX_delay contains the accumulated delay time in nanoseconds. 616 */ 617 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 618 u64 blkio_delay; /* wait for sync block io completion */ 619 u64 swapin_delay; /* wait for swapin block io completion */ 620 u32 blkio_count; /* total count of the number of sync block */ 621 /* io operations performed */ 622 u32 swapin_count; /* total count of the number of swapin block */ 623 /* io operations performed */ 624 }; 625 #endif /* CONFIG_TASK_DELAY_ACCT */ 626 627 static inline int sched_info_on(void) 628 { 629 #ifdef CONFIG_SCHEDSTATS 630 return 1; 631 #elif defined(CONFIG_TASK_DELAY_ACCT) 632 extern int delayacct_on; 633 return delayacct_on; 634 #else 635 return 0; 636 #endif 637 } 638 639 enum cpu_idle_type { 640 CPU_IDLE, 641 CPU_NOT_IDLE, 642 CPU_NEWLY_IDLE, 643 CPU_MAX_IDLE_TYPES 644 }; 645 646 /* 647 * sched-domains (multiprocessor balancing) declarations: 648 */ 649 650 /* 651 * Increase resolution of nice-level calculations: 652 */ 653 #define SCHED_LOAD_SHIFT 10 654 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 655 656 #define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 657 658 #ifdef CONFIG_SMP 659 #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 660 #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 661 #define SD_BALANCE_EXEC 4 /* Balance on exec */ 662 #define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 663 #define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 664 #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 665 #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 666 #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 667 #define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 668 #define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 669 #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 670 671 #define BALANCE_FOR_MC_POWER \ 672 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 673 674 #define BALANCE_FOR_PKG_POWER \ 675 ((sched_mc_power_savings || sched_smt_power_savings) ? \ 676 SD_POWERSAVINGS_BALANCE : 0) 677 678 #define test_sd_parent(sd, flag) ((sd->parent && \ 679 (sd->parent->flags & flag)) ? 1 : 0) 680 681 682 struct sched_group { 683 struct sched_group *next; /* Must be a circular list */ 684 cpumask_t cpumask; 685 686 /* 687 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 688 * single CPU. This is read only (except for setup, hotplug CPU). 689 * Note : Never change cpu_power without recompute its reciprocal 690 */ 691 unsigned int __cpu_power; 692 /* 693 * reciprocal value of cpu_power to avoid expensive divides 694 * (see include/linux/reciprocal_div.h) 695 */ 696 u32 reciprocal_cpu_power; 697 }; 698 699 struct sched_domain { 700 /* These fields must be setup */ 701 struct sched_domain *parent; /* top domain must be null terminated */ 702 struct sched_domain *child; /* bottom domain must be null terminated */ 703 struct sched_group *groups; /* the balancing groups of the domain */ 704 cpumask_t span; /* span of all CPUs in this domain */ 705 unsigned long min_interval; /* Minimum balance interval ms */ 706 unsigned long max_interval; /* Maximum balance interval ms */ 707 unsigned int busy_factor; /* less balancing by factor if busy */ 708 unsigned int imbalance_pct; /* No balance until over watermark */ 709 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ 710 unsigned int busy_idx; 711 unsigned int idle_idx; 712 unsigned int newidle_idx; 713 unsigned int wake_idx; 714 unsigned int forkexec_idx; 715 int flags; /* See SD_* */ 716 717 /* Runtime fields. */ 718 unsigned long last_balance; /* init to jiffies. units in jiffies */ 719 unsigned int balance_interval; /* initialise to 1. units in ms. */ 720 unsigned int nr_balance_failed; /* initialise to 0 */ 721 722 #ifdef CONFIG_SCHEDSTATS 723 /* load_balance() stats */ 724 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 725 unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; 726 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; 727 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; 728 unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; 729 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; 730 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; 731 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; 732 733 /* Active load balancing */ 734 unsigned int alb_count; 735 unsigned int alb_failed; 736 unsigned int alb_pushed; 737 738 /* SD_BALANCE_EXEC stats */ 739 unsigned int sbe_count; 740 unsigned int sbe_balanced; 741 unsigned int sbe_pushed; 742 743 /* SD_BALANCE_FORK stats */ 744 unsigned int sbf_count; 745 unsigned int sbf_balanced; 746 unsigned int sbf_pushed; 747 748 /* try_to_wake_up() stats */ 749 unsigned int ttwu_wake_remote; 750 unsigned int ttwu_move_affine; 751 unsigned int ttwu_move_balance; 752 #endif 753 }; 754 755 extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 756 757 #endif /* CONFIG_SMP */ 758 759 /* 760 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of 761 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a 762 * task of nice 0 or enough lower priority tasks to bring up the 763 * weighted_cpuload 764 */ 765 static inline int above_background_load(void) 766 { 767 unsigned long cpu; 768 769 for_each_online_cpu(cpu) { 770 if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE) 771 return 1; 772 } 773 return 0; 774 } 775 776 struct io_context; /* See blkdev.h */ 777 #define NGROUPS_SMALL 32 778 #define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t))) 779 struct group_info { 780 int ngroups; 781 atomic_t usage; 782 gid_t small_block[NGROUPS_SMALL]; 783 int nblocks; 784 gid_t *blocks[0]; 785 }; 786 787 /* 788 * get_group_info() must be called with the owning task locked (via task_lock()) 789 * when task != current. The reason being that the vast majority of callers are 790 * looking at current->group_info, which can not be changed except by the 791 * current task. Changing current->group_info requires the task lock, too. 792 */ 793 #define get_group_info(group_info) do { \ 794 atomic_inc(&(group_info)->usage); \ 795 } while (0) 796 797 #define put_group_info(group_info) do { \ 798 if (atomic_dec_and_test(&(group_info)->usage)) \ 799 groups_free(group_info); \ 800 } while (0) 801 802 extern struct group_info *groups_alloc(int gidsetsize); 803 extern void groups_free(struct group_info *group_info); 804 extern int set_current_groups(struct group_info *group_info); 805 extern int groups_search(struct group_info *group_info, gid_t grp); 806 /* access the groups "array" with this macro */ 807 #define GROUP_AT(gi, i) \ 808 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 809 810 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 811 extern void prefetch_stack(struct task_struct *t); 812 #else 813 static inline void prefetch_stack(struct task_struct *t) { } 814 #endif 815 816 struct audit_context; /* See audit.c */ 817 struct mempolicy; 818 struct pipe_inode_info; 819 struct uts_namespace; 820 821 struct rq; 822 struct sched_domain; 823 824 struct sched_class { 825 const struct sched_class *next; 826 827 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 828 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 829 void (*yield_task) (struct rq *rq); 830 831 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 832 833 struct task_struct * (*pick_next_task) (struct rq *rq); 834 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 835 836 #ifdef CONFIG_SMP 837 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 838 struct rq *busiest, unsigned long max_load_move, 839 struct sched_domain *sd, enum cpu_idle_type idle, 840 int *all_pinned, int *this_best_prio); 841 842 int (*move_one_task) (struct rq *this_rq, int this_cpu, 843 struct rq *busiest, struct sched_domain *sd, 844 enum cpu_idle_type idle); 845 #endif 846 847 void (*set_curr_task) (struct rq *rq); 848 void (*task_tick) (struct rq *rq, struct task_struct *p); 849 void (*task_new) (struct rq *rq, struct task_struct *p); 850 }; 851 852 struct load_weight { 853 unsigned long weight, inv_weight; 854 }; 855 856 /* 857 * CFS stats for a schedulable entity (task, task-group etc) 858 * 859 * Current field usage histogram: 860 * 861 * 4 se->block_start 862 * 4 se->run_node 863 * 4 se->sleep_start 864 * 6 se->load.weight 865 */ 866 struct sched_entity { 867 struct load_weight load; /* for load-balancing */ 868 struct rb_node run_node; 869 unsigned int on_rq; 870 871 u64 exec_start; 872 u64 sum_exec_runtime; 873 u64 vruntime; 874 u64 prev_sum_exec_runtime; 875 876 #ifdef CONFIG_SCHEDSTATS 877 u64 wait_start; 878 u64 wait_max; 879 880 u64 sleep_start; 881 u64 sleep_max; 882 s64 sum_sleep_runtime; 883 884 u64 block_start; 885 u64 block_max; 886 u64 exec_max; 887 u64 slice_max; 888 889 u64 nr_migrations; 890 u64 nr_migrations_cold; 891 u64 nr_failed_migrations_affine; 892 u64 nr_failed_migrations_running; 893 u64 nr_failed_migrations_hot; 894 u64 nr_forced_migrations; 895 u64 nr_forced2_migrations; 896 897 u64 nr_wakeups; 898 u64 nr_wakeups_sync; 899 u64 nr_wakeups_migrate; 900 u64 nr_wakeups_local; 901 u64 nr_wakeups_remote; 902 u64 nr_wakeups_affine; 903 u64 nr_wakeups_affine_attempts; 904 u64 nr_wakeups_passive; 905 u64 nr_wakeups_idle; 906 #endif 907 908 #ifdef CONFIG_FAIR_GROUP_SCHED 909 struct sched_entity *parent; 910 /* rq on which this entity is (to be) queued: */ 911 struct cfs_rq *cfs_rq; 912 /* rq "owned" by this entity/group: */ 913 struct cfs_rq *my_q; 914 #endif 915 }; 916 917 struct task_struct { 918 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 919 void *stack; 920 atomic_t usage; 921 unsigned int flags; /* per process flags, defined below */ 922 unsigned int ptrace; 923 924 int lock_depth; /* BKL lock depth */ 925 926 #ifdef CONFIG_SMP 927 #ifdef __ARCH_WANT_UNLOCKED_CTXSW 928 int oncpu; 929 #endif 930 #endif 931 932 int prio, static_prio, normal_prio; 933 struct list_head run_list; 934 const struct sched_class *sched_class; 935 struct sched_entity se; 936 937 #ifdef CONFIG_PREEMPT_NOTIFIERS 938 /* list of struct preempt_notifier: */ 939 struct hlist_head preempt_notifiers; 940 #endif 941 942 unsigned short ioprio; 943 /* 944 * fpu_counter contains the number of consecutive context switches 945 * that the FPU is used. If this is over a threshold, the lazy fpu 946 * saving becomes unlazy to save the trap. This is an unsigned char 947 * so that after 256 times the counter wraps and the behavior turns 948 * lazy again; this to deal with bursty apps that only use FPU for 949 * a short time 950 */ 951 unsigned char fpu_counter; 952 s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ 953 #ifdef CONFIG_BLK_DEV_IO_TRACE 954 unsigned int btrace_seq; 955 #endif 956 957 unsigned int policy; 958 cpumask_t cpus_allowed; 959 unsigned int time_slice; 960 961 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 962 struct sched_info sched_info; 963 #endif 964 965 struct list_head tasks; 966 /* 967 * ptrace_list/ptrace_children forms the list of my children 968 * that were stolen by a ptracer. 969 */ 970 struct list_head ptrace_children; 971 struct list_head ptrace_list; 972 973 struct mm_struct *mm, *active_mm; 974 975 /* task state */ 976 struct linux_binfmt *binfmt; 977 int exit_state; 978 int exit_code, exit_signal; 979 int pdeath_signal; /* The signal sent when the parent dies */ 980 /* ??? */ 981 unsigned int personality; 982 unsigned did_exec:1; 983 pid_t pid; 984 pid_t tgid; 985 986 #ifdef CONFIG_CC_STACKPROTECTOR 987 /* Canary value for the -fstack-protector gcc feature */ 988 unsigned long stack_canary; 989 #endif 990 /* 991 * pointers to (original) parent process, youngest child, younger sibling, 992 * older sibling, respectively. (p->father can be replaced with 993 * p->parent->pid) 994 */ 995 struct task_struct *real_parent; /* real parent process (when being debugged) */ 996 struct task_struct *parent; /* parent process */ 997 /* 998 * children/sibling forms the list of my children plus the 999 * tasks I'm ptracing. 1000 */ 1001 struct list_head children; /* list of my children */ 1002 struct list_head sibling; /* linkage in my parent's children list */ 1003 struct task_struct *group_leader; /* threadgroup leader */ 1004 1005 /* PID/PID hash table linkage. */ 1006 struct pid_link pids[PIDTYPE_MAX]; 1007 struct list_head thread_group; 1008 1009 struct completion *vfork_done; /* for vfork() */ 1010 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1011 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1012 1013 unsigned int rt_priority; 1014 cputime_t utime, stime, utimescaled, stimescaled; 1015 cputime_t gtime; 1016 cputime_t prev_utime, prev_stime; 1017 unsigned long nvcsw, nivcsw; /* context switch counts */ 1018 struct timespec start_time; /* monotonic time */ 1019 struct timespec real_start_time; /* boot based time */ 1020 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1021 unsigned long min_flt, maj_flt; 1022 1023 cputime_t it_prof_expires, it_virt_expires; 1024 unsigned long long it_sched_expires; 1025 struct list_head cpu_timers[3]; 1026 1027 /* process credentials */ 1028 uid_t uid,euid,suid,fsuid; 1029 gid_t gid,egid,sgid,fsgid; 1030 struct group_info *group_info; 1031 kernel_cap_t cap_effective, cap_inheritable, cap_permitted; 1032 unsigned keep_capabilities:1; 1033 struct user_struct *user; 1034 #ifdef CONFIG_KEYS 1035 struct key *request_key_auth; /* assumed request_key authority */ 1036 struct key *thread_keyring; /* keyring private to this thread */ 1037 unsigned char jit_keyring; /* default keyring to attach requested keys to */ 1038 #endif 1039 char comm[TASK_COMM_LEN]; /* executable name excluding path 1040 - access with [gs]et_task_comm (which lock 1041 it with task_lock()) 1042 - initialized normally by flush_old_exec */ 1043 /* file system info */ 1044 int link_count, total_link_count; 1045 #ifdef CONFIG_SYSVIPC 1046 /* ipc stuff */ 1047 struct sysv_sem sysvsem; 1048 #endif 1049 /* CPU-specific state of this task */ 1050 struct thread_struct thread; 1051 /* filesystem information */ 1052 struct fs_struct *fs; 1053 /* open file information */ 1054 struct files_struct *files; 1055 /* namespaces */ 1056 struct nsproxy *nsproxy; 1057 /* signal handlers */ 1058 struct signal_struct *signal; 1059 struct sighand_struct *sighand; 1060 1061 sigset_t blocked, real_blocked; 1062 sigset_t saved_sigmask; /* To be restored with TIF_RESTORE_SIGMASK */ 1063 struct sigpending pending; 1064 1065 unsigned long sas_ss_sp; 1066 size_t sas_ss_size; 1067 int (*notifier)(void *priv); 1068 void *notifier_data; 1069 sigset_t *notifier_mask; 1070 #ifdef CONFIG_SECURITY 1071 void *security; 1072 #endif 1073 struct audit_context *audit_context; 1074 seccomp_t seccomp; 1075 1076 /* Thread group tracking */ 1077 u32 parent_exec_id; 1078 u32 self_exec_id; 1079 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 1080 spinlock_t alloc_lock; 1081 1082 /* Protection of the PI data structures: */ 1083 spinlock_t pi_lock; 1084 1085 #ifdef CONFIG_RT_MUTEXES 1086 /* PI waiters blocked on a rt_mutex held by this task */ 1087 struct plist_head pi_waiters; 1088 /* Deadlock detection and priority inheritance handling */ 1089 struct rt_mutex_waiter *pi_blocked_on; 1090 #endif 1091 1092 #ifdef CONFIG_DEBUG_MUTEXES 1093 /* mutex deadlock detection */ 1094 struct mutex_waiter *blocked_on; 1095 #endif 1096 #ifdef CONFIG_TRACE_IRQFLAGS 1097 unsigned int irq_events; 1098 int hardirqs_enabled; 1099 unsigned long hardirq_enable_ip; 1100 unsigned int hardirq_enable_event; 1101 unsigned long hardirq_disable_ip; 1102 unsigned int hardirq_disable_event; 1103 int softirqs_enabled; 1104 unsigned long softirq_disable_ip; 1105 unsigned int softirq_disable_event; 1106 unsigned long softirq_enable_ip; 1107 unsigned int softirq_enable_event; 1108 int hardirq_context; 1109 int softirq_context; 1110 #endif 1111 #ifdef CONFIG_LOCKDEP 1112 # define MAX_LOCK_DEPTH 30UL 1113 u64 curr_chain_key; 1114 int lockdep_depth; 1115 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1116 unsigned int lockdep_recursion; 1117 #endif 1118 1119 /* journalling filesystem info */ 1120 void *journal_info; 1121 1122 /* stacked block device info */ 1123 struct bio *bio_list, **bio_tail; 1124 1125 /* VM state */ 1126 struct reclaim_state *reclaim_state; 1127 1128 struct backing_dev_info *backing_dev_info; 1129 1130 struct io_context *io_context; 1131 1132 unsigned long ptrace_message; 1133 siginfo_t *last_siginfo; /* For ptrace use. */ 1134 #ifdef CONFIG_TASK_XACCT 1135 /* i/o counters(bytes read/written, #syscalls */ 1136 u64 rchar, wchar, syscr, syscw; 1137 #endif 1138 struct task_io_accounting ioac; 1139 #if defined(CONFIG_TASK_XACCT) 1140 u64 acct_rss_mem1; /* accumulated rss usage */ 1141 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1142 cputime_t acct_stimexpd;/* stime since last update */ 1143 #endif 1144 #ifdef CONFIG_NUMA 1145 struct mempolicy *mempolicy; 1146 short il_next; 1147 #endif 1148 #ifdef CONFIG_CPUSETS 1149 nodemask_t mems_allowed; 1150 int cpuset_mems_generation; 1151 int cpuset_mem_spread_rotor; 1152 #endif 1153 #ifdef CONFIG_CGROUPS 1154 /* Control Group info protected by css_set_lock */ 1155 struct css_set *cgroups; 1156 /* cg_list protected by css_set_lock and tsk->alloc_lock */ 1157 struct list_head cg_list; 1158 #endif 1159 #ifdef CONFIG_FUTEX 1160 struct robust_list_head __user *robust_list; 1161 #ifdef CONFIG_COMPAT 1162 struct compat_robust_list_head __user *compat_robust_list; 1163 #endif 1164 struct list_head pi_state_list; 1165 struct futex_pi_state *pi_state_cache; 1166 #endif 1167 atomic_t fs_excl; /* holding fs exclusive resources */ 1168 struct rcu_head rcu; 1169 1170 /* 1171 * cache last used pipe for splice 1172 */ 1173 struct pipe_inode_info *splice_pipe; 1174 #ifdef CONFIG_TASK_DELAY_ACCT 1175 struct task_delay_info *delays; 1176 #endif 1177 #ifdef CONFIG_FAULT_INJECTION 1178 int make_it_fail; 1179 #endif 1180 struct prop_local_single dirties; 1181 }; 1182 1183 /* 1184 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1185 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1186 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority 1187 * values are inverted: lower p->prio value means higher priority. 1188 * 1189 * The MAX_USER_RT_PRIO value allows the actual maximum 1190 * RT priority to be separate from the value exported to 1191 * user-space. This allows kernel threads to set their 1192 * priority to a value higher than any user task. Note: 1193 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. 1194 */ 1195 1196 #define MAX_USER_RT_PRIO 100 1197 #define MAX_RT_PRIO MAX_USER_RT_PRIO 1198 1199 #define MAX_PRIO (MAX_RT_PRIO + 40) 1200 #define DEFAULT_PRIO (MAX_RT_PRIO + 20) 1201 1202 static inline int rt_prio(int prio) 1203 { 1204 if (unlikely(prio < MAX_RT_PRIO)) 1205 return 1; 1206 return 0; 1207 } 1208 1209 static inline int rt_task(struct task_struct *p) 1210 { 1211 return rt_prio(p->prio); 1212 } 1213 1214 static inline void set_task_session(struct task_struct *tsk, pid_t session) 1215 { 1216 tsk->signal->__session = session; 1217 } 1218 1219 static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) 1220 { 1221 tsk->signal->__pgrp = pgrp; 1222 } 1223 1224 static inline struct pid *task_pid(struct task_struct *task) 1225 { 1226 return task->pids[PIDTYPE_PID].pid; 1227 } 1228 1229 static inline struct pid *task_tgid(struct task_struct *task) 1230 { 1231 return task->group_leader->pids[PIDTYPE_PID].pid; 1232 } 1233 1234 static inline struct pid *task_pgrp(struct task_struct *task) 1235 { 1236 return task->group_leader->pids[PIDTYPE_PGID].pid; 1237 } 1238 1239 static inline struct pid *task_session(struct task_struct *task) 1240 { 1241 return task->group_leader->pids[PIDTYPE_SID].pid; 1242 } 1243 1244 struct pid_namespace; 1245 1246 /* 1247 * the helpers to get the task's different pids as they are seen 1248 * from various namespaces 1249 * 1250 * task_xid_nr() : global id, i.e. the id seen from the init namespace; 1251 * task_xid_vnr() : virtual id, i.e. the id seen from the namespace the task 1252 * belongs to. this only makes sence when called in the 1253 * context of the task that belongs to the same namespace; 1254 * task_xid_nr_ns() : id seen from the ns specified; 1255 * 1256 * set_task_vxid() : assigns a virtual id to a task; 1257 * 1258 * task_ppid_nr_ns() : the parent's id as seen from the namespace specified. 1259 * the result depends on the namespace and whether the 1260 * task in question is the namespace's init. e.g. for the 1261 * namespace's init this will return 0 when called from 1262 * the namespace of this init, or appropriate id otherwise. 1263 * 1264 * 1265 * see also pid_nr() etc in include/linux/pid.h 1266 */ 1267 1268 static inline pid_t task_pid_nr(struct task_struct *tsk) 1269 { 1270 return tsk->pid; 1271 } 1272 1273 pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1274 1275 static inline pid_t task_pid_vnr(struct task_struct *tsk) 1276 { 1277 return pid_vnr(task_pid(tsk)); 1278 } 1279 1280 1281 static inline pid_t task_tgid_nr(struct task_struct *tsk) 1282 { 1283 return tsk->tgid; 1284 } 1285 1286 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1287 1288 static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1289 { 1290 return pid_vnr(task_tgid(tsk)); 1291 } 1292 1293 1294 static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1295 { 1296 return tsk->signal->__pgrp; 1297 } 1298 1299 pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1300 1301 static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1302 { 1303 return pid_vnr(task_pgrp(tsk)); 1304 } 1305 1306 1307 static inline pid_t task_session_nr(struct task_struct *tsk) 1308 { 1309 return tsk->signal->__session; 1310 } 1311 1312 pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1313 1314 static inline pid_t task_session_vnr(struct task_struct *tsk) 1315 { 1316 return pid_vnr(task_session(tsk)); 1317 } 1318 1319 1320 static inline pid_t task_ppid_nr_ns(struct task_struct *tsk, 1321 struct pid_namespace *ns) 1322 { 1323 return pid_nr_ns(task_pid(rcu_dereference(tsk->real_parent)), ns); 1324 } 1325 1326 /** 1327 * pid_alive - check that a task structure is not stale 1328 * @p: Task structure to be checked. 1329 * 1330 * Test if a process is not yet dead (at most zombie state) 1331 * If pid_alive fails, then pointers within the task structure 1332 * can be stale and must not be dereferenced. 1333 */ 1334 static inline int pid_alive(struct task_struct *p) 1335 { 1336 return p->pids[PIDTYPE_PID].pid != NULL; 1337 } 1338 1339 /** 1340 * is_global_init - check if a task structure is init 1341 * @tsk: Task structure to be checked. 1342 * 1343 * Check if a task structure is the first user space task the kernel created. 1344 */ 1345 static inline int is_global_init(struct task_struct *tsk) 1346 { 1347 return tsk->pid == 1; 1348 } 1349 1350 /* 1351 * is_container_init: 1352 * check whether in the task is init in its own pid namespace. 1353 */ 1354 extern int is_container_init(struct task_struct *tsk); 1355 1356 extern struct pid *cad_pid; 1357 1358 extern void free_task(struct task_struct *tsk); 1359 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1360 1361 extern void __put_task_struct(struct task_struct *t); 1362 1363 static inline void put_task_struct(struct task_struct *t) 1364 { 1365 if (atomic_dec_and_test(&t->usage)) 1366 __put_task_struct(t); 1367 } 1368 1369 /* 1370 * Per process flags 1371 */ 1372 #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ 1373 /* Not implemented yet, only for 486*/ 1374 #define PF_STARTING 0x00000002 /* being created */ 1375 #define PF_EXITING 0x00000004 /* getting shut down */ 1376 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1377 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1378 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1379 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1380 #define PF_DUMPCORE 0x00000200 /* dumped core */ 1381 #define PF_SIGNALED 0x00000400 /* killed by a signal */ 1382 #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 1383 #define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ 1384 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ 1385 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ 1386 #define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1387 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1388 #define PF_KSWAPD 0x00040000 /* I am kswapd */ 1389 #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1390 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1391 #define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ 1392 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1393 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1394 #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1395 #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1396 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1397 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1398 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1399 1400 /* 1401 * Only the _current_ task can read/write to tsk->flags, but other 1402 * tasks can access tsk->flags in readonly mode for example 1403 * with tsk_used_math (like during threaded core dumping). 1404 * There is however an exception to this rule during ptrace 1405 * or during fork: the ptracer task is allowed to write to the 1406 * child->flags of its traced child (same goes for fork, the parent 1407 * can write to the child->flags), because we're guaranteed the 1408 * child is not running and in turn not changing child->flags 1409 * at the same time the parent does it. 1410 */ 1411 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 1412 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 1413 #define clear_used_math() clear_stopped_child_used_math(current) 1414 #define set_used_math() set_stopped_child_used_math(current) 1415 #define conditional_stopped_child_used_math(condition, child) \ 1416 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 1417 #define conditional_used_math(condition) \ 1418 conditional_stopped_child_used_math(condition, current) 1419 #define copy_to_stopped_child_used_math(child) \ 1420 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 1421 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 1422 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1423 #define used_math() tsk_used_math(current) 1424 1425 #ifdef CONFIG_SMP 1426 extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); 1427 #else 1428 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1429 { 1430 if (!cpu_isset(0, new_mask)) 1431 return -EINVAL; 1432 return 0; 1433 } 1434 #endif 1435 1436 extern unsigned long long sched_clock(void); 1437 1438 /* 1439 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1440 * clock constructed from sched_clock(): 1441 */ 1442 extern unsigned long long cpu_clock(int cpu); 1443 1444 extern unsigned long long 1445 task_sched_runtime(struct task_struct *task); 1446 1447 /* sched_exec is called by processes performing an exec */ 1448 #ifdef CONFIG_SMP 1449 extern void sched_exec(void); 1450 #else 1451 #define sched_exec() {} 1452 #endif 1453 1454 extern void sched_clock_idle_sleep_event(void); 1455 extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1456 1457 #ifdef CONFIG_HOTPLUG_CPU 1458 extern void idle_task_exit(void); 1459 #else 1460 static inline void idle_task_exit(void) {} 1461 #endif 1462 1463 extern void sched_idle_next(void); 1464 1465 #ifdef CONFIG_SCHED_DEBUG 1466 extern unsigned int sysctl_sched_latency; 1467 extern unsigned int sysctl_sched_min_granularity; 1468 extern unsigned int sysctl_sched_wakeup_granularity; 1469 extern unsigned int sysctl_sched_batch_wakeup_granularity; 1470 extern unsigned int sysctl_sched_child_runs_first; 1471 extern unsigned int sysctl_sched_features; 1472 extern unsigned int sysctl_sched_migration_cost; 1473 extern unsigned int sysctl_sched_nr_migrate; 1474 1475 int sched_nr_latency_handler(struct ctl_table *table, int write, 1476 struct file *file, void __user *buffer, size_t *length, 1477 loff_t *ppos); 1478 #endif 1479 1480 extern unsigned int sysctl_sched_compat_yield; 1481 1482 #ifdef CONFIG_RT_MUTEXES 1483 extern int rt_mutex_getprio(struct task_struct *p); 1484 extern void rt_mutex_setprio(struct task_struct *p, int prio); 1485 extern void rt_mutex_adjust_pi(struct task_struct *p); 1486 #else 1487 static inline int rt_mutex_getprio(struct task_struct *p) 1488 { 1489 return p->normal_prio; 1490 } 1491 # define rt_mutex_adjust_pi(p) do { } while (0) 1492 #endif 1493 1494 extern void set_user_nice(struct task_struct *p, long nice); 1495 extern int task_prio(const struct task_struct *p); 1496 extern int task_nice(const struct task_struct *p); 1497 extern int can_nice(const struct task_struct *p, const int nice); 1498 extern int task_curr(const struct task_struct *p); 1499 extern int idle_cpu(int cpu); 1500 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1501 extern struct task_struct *idle_task(int cpu); 1502 extern struct task_struct *curr_task(int cpu); 1503 extern void set_curr_task(int cpu, struct task_struct *p); 1504 1505 void yield(void); 1506 1507 /* 1508 * The default (Linux) execution domain. 1509 */ 1510 extern struct exec_domain default_exec_domain; 1511 1512 union thread_union { 1513 struct thread_info thread_info; 1514 unsigned long stack[THREAD_SIZE/sizeof(long)]; 1515 }; 1516 1517 #ifndef __HAVE_ARCH_KSTACK_END 1518 static inline int kstack_end(void *addr) 1519 { 1520 /* Reliable end of stack detection: 1521 * Some APM bios versions misalign the stack 1522 */ 1523 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); 1524 } 1525 #endif 1526 1527 extern union thread_union init_thread_union; 1528 extern struct task_struct init_task; 1529 1530 extern struct mm_struct init_mm; 1531 1532 extern struct pid_namespace init_pid_ns; 1533 1534 /* 1535 * find a task by one of its numerical ids 1536 * 1537 * find_task_by_pid_type_ns(): 1538 * it is the most generic call - it finds a task by all id, 1539 * type and namespace specified 1540 * find_task_by_pid_ns(): 1541 * finds a task by its pid in the specified namespace 1542 * find_task_by_vpid(): 1543 * finds a task by its virtual pid 1544 * find_task_by_pid(): 1545 * finds a task by its global pid 1546 * 1547 * see also find_pid() etc in include/linux/pid.h 1548 */ 1549 1550 extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, 1551 struct pid_namespace *ns); 1552 1553 extern struct task_struct *find_task_by_pid(pid_t nr); 1554 extern struct task_struct *find_task_by_vpid(pid_t nr); 1555 extern struct task_struct *find_task_by_pid_ns(pid_t nr, 1556 struct pid_namespace *ns); 1557 1558 extern void __set_special_pids(pid_t session, pid_t pgrp); 1559 1560 /* per-UID process charging. */ 1561 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); 1562 static inline struct user_struct *get_uid(struct user_struct *u) 1563 { 1564 atomic_inc(&u->__count); 1565 return u; 1566 } 1567 extern void free_uid(struct user_struct *); 1568 extern void switch_uid(struct user_struct *); 1569 extern void release_uids(struct user_namespace *ns); 1570 1571 #include <asm/current.h> 1572 1573 extern void do_timer(unsigned long ticks); 1574 1575 extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); 1576 extern int FASTCALL(wake_up_process(struct task_struct * tsk)); 1577 extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, 1578 unsigned long clone_flags)); 1579 #ifdef CONFIG_SMP 1580 extern void kick_process(struct task_struct *tsk); 1581 #else 1582 static inline void kick_process(struct task_struct *tsk) { } 1583 #endif 1584 extern void sched_fork(struct task_struct *p, int clone_flags); 1585 extern void sched_dead(struct task_struct *p); 1586 1587 extern int in_group_p(gid_t); 1588 extern int in_egroup_p(gid_t); 1589 1590 extern void proc_caches_init(void); 1591 extern void flush_signals(struct task_struct *); 1592 extern void ignore_signals(struct task_struct *); 1593 extern void flush_signal_handlers(struct task_struct *, int force_default); 1594 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1595 1596 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 1597 { 1598 unsigned long flags; 1599 int ret; 1600 1601 spin_lock_irqsave(&tsk->sighand->siglock, flags); 1602 ret = dequeue_signal(tsk, mask, info); 1603 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 1604 1605 return ret; 1606 } 1607 1608 extern void block_all_signals(int (*notifier)(void *priv), void *priv, 1609 sigset_t *mask); 1610 extern void unblock_all_signals(void); 1611 extern void release_task(struct task_struct * p); 1612 extern int send_sig_info(int, struct siginfo *, struct task_struct *); 1613 extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); 1614 extern int force_sigsegv(int, struct task_struct *); 1615 extern int force_sig_info(int, struct siginfo *, struct task_struct *); 1616 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 1617 extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 1618 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 1619 extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); 1620 extern int kill_pgrp(struct pid *pid, int sig, int priv); 1621 extern int kill_pid(struct pid *pid, int sig, int priv); 1622 extern int kill_proc_info(int, struct siginfo *, pid_t); 1623 extern void do_notify_parent(struct task_struct *, int); 1624 extern void force_sig(int, struct task_struct *); 1625 extern void force_sig_specific(int, struct task_struct *); 1626 extern int send_sig(int, struct task_struct *, int); 1627 extern void zap_other_threads(struct task_struct *p); 1628 extern int kill_proc(pid_t, int, int); 1629 extern struct sigqueue *sigqueue_alloc(void); 1630 extern void sigqueue_free(struct sigqueue *); 1631 extern int send_sigqueue(int, struct sigqueue *, struct task_struct *); 1632 extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *); 1633 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 1634 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 1635 1636 static inline int kill_cad_pid(int sig, int priv) 1637 { 1638 return kill_pid(cad_pid, sig, priv); 1639 } 1640 1641 /* These can be the second arg to send_sig_info/send_group_sig_info. */ 1642 #define SEND_SIG_NOINFO ((struct siginfo *) 0) 1643 #define SEND_SIG_PRIV ((struct siginfo *) 1) 1644 #define SEND_SIG_FORCED ((struct siginfo *) 2) 1645 1646 static inline int is_si_special(const struct siginfo *info) 1647 { 1648 return info <= SEND_SIG_FORCED; 1649 } 1650 1651 /* True if we are on the alternate signal stack. */ 1652 1653 static inline int on_sig_stack(unsigned long sp) 1654 { 1655 return (sp - current->sas_ss_sp < current->sas_ss_size); 1656 } 1657 1658 static inline int sas_ss_flags(unsigned long sp) 1659 { 1660 return (current->sas_ss_size == 0 ? SS_DISABLE 1661 : on_sig_stack(sp) ? SS_ONSTACK : 0); 1662 } 1663 1664 /* 1665 * Routines for handling mm_structs 1666 */ 1667 extern struct mm_struct * mm_alloc(void); 1668 1669 /* mmdrop drops the mm and the page tables */ 1670 extern void FASTCALL(__mmdrop(struct mm_struct *)); 1671 static inline void mmdrop(struct mm_struct * mm) 1672 { 1673 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 1674 __mmdrop(mm); 1675 } 1676 1677 /* mmput gets rid of the mappings and all user-space */ 1678 extern void mmput(struct mm_struct *); 1679 /* Grab a reference to a task's mm, if it is not already going away */ 1680 extern struct mm_struct *get_task_mm(struct task_struct *task); 1681 /* Remove the current tasks stale references to the old mm_struct */ 1682 extern void mm_release(struct task_struct *, struct mm_struct *); 1683 1684 extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1685 extern void flush_thread(void); 1686 extern void exit_thread(void); 1687 1688 extern void exit_files(struct task_struct *); 1689 extern void __cleanup_signal(struct signal_struct *); 1690 extern void __cleanup_sighand(struct sighand_struct *); 1691 extern void exit_itimers(struct signal_struct *); 1692 1693 extern NORET_TYPE void do_group_exit(int); 1694 1695 extern void daemonize(const char *, ...); 1696 extern int allow_signal(int); 1697 extern int disallow_signal(int); 1698 1699 extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1700 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1701 struct task_struct *fork_idle(int); 1702 1703 extern void set_task_comm(struct task_struct *tsk, char *from); 1704 extern void get_task_comm(char *to, struct task_struct *tsk); 1705 1706 #ifdef CONFIG_SMP 1707 extern void wait_task_inactive(struct task_struct * p); 1708 #else 1709 #define wait_task_inactive(p) do { } while (0) 1710 #endif 1711 1712 #define remove_parent(p) list_del_init(&(p)->sibling) 1713 #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) 1714 1715 #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 1716 1717 #define for_each_process(p) \ 1718 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1719 1720 /* 1721 * Careful: do_each_thread/while_each_thread is a double loop so 1722 * 'break' will not work as expected - use goto instead. 1723 */ 1724 #define do_each_thread(g, t) \ 1725 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 1726 1727 #define while_each_thread(g, t) \ 1728 while ((t = next_thread(t)) != g) 1729 1730 /* de_thread depends on thread_group_leader not being a pid based check */ 1731 #define thread_group_leader(p) (p == p->group_leader) 1732 1733 /* Do to the insanities of de_thread it is possible for a process 1734 * to have the pid of the thread group leader without actually being 1735 * the thread group leader. For iteration through the pids in proc 1736 * all we care about is that we have a task with the appropriate 1737 * pid, we don't actually care if we have the right task. 1738 */ 1739 static inline int has_group_leader_pid(struct task_struct *p) 1740 { 1741 return p->pid == p->tgid; 1742 } 1743 1744 static inline 1745 int same_thread_group(struct task_struct *p1, struct task_struct *p2) 1746 { 1747 return p1->tgid == p2->tgid; 1748 } 1749 1750 static inline struct task_struct *next_thread(const struct task_struct *p) 1751 { 1752 return list_entry(rcu_dereference(p->thread_group.next), 1753 struct task_struct, thread_group); 1754 } 1755 1756 static inline int thread_group_empty(struct task_struct *p) 1757 { 1758 return list_empty(&p->thread_group); 1759 } 1760 1761 #define delay_group_leader(p) \ 1762 (thread_group_leader(p) && !thread_group_empty(p)) 1763 1764 /* 1765 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 1766 * subscriptions and synchronises with wait4(). Also used in procfs. Also 1767 * pins the final release of task.io_context. Also protects ->cpuset and 1768 * ->cgroup.subsys[]. 1769 * 1770 * Nests both inside and outside of read_lock(&tasklist_lock). 1771 * It must not be nested with write_lock_irq(&tasklist_lock), 1772 * neither inside nor outside. 1773 */ 1774 static inline void task_lock(struct task_struct *p) 1775 { 1776 spin_lock(&p->alloc_lock); 1777 } 1778 1779 static inline void task_unlock(struct task_struct *p) 1780 { 1781 spin_unlock(&p->alloc_lock); 1782 } 1783 1784 extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, 1785 unsigned long *flags); 1786 1787 static inline void unlock_task_sighand(struct task_struct *tsk, 1788 unsigned long *flags) 1789 { 1790 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 1791 } 1792 1793 #ifndef __HAVE_THREAD_FUNCTIONS 1794 1795 #define task_thread_info(task) ((struct thread_info *)(task)->stack) 1796 #define task_stack_page(task) ((task)->stack) 1797 1798 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1799 { 1800 *task_thread_info(p) = *task_thread_info(org); 1801 task_thread_info(p)->task = p; 1802 } 1803 1804 static inline unsigned long *end_of_stack(struct task_struct *p) 1805 { 1806 return (unsigned long *)(task_thread_info(p) + 1); 1807 } 1808 1809 #endif 1810 1811 /* set thread flags in other task's structures 1812 * - see asm/thread_info.h for TIF_xxxx flags available 1813 */ 1814 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1815 { 1816 set_ti_thread_flag(task_thread_info(tsk), flag); 1817 } 1818 1819 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1820 { 1821 clear_ti_thread_flag(task_thread_info(tsk), flag); 1822 } 1823 1824 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1825 { 1826 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 1827 } 1828 1829 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1830 { 1831 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 1832 } 1833 1834 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1835 { 1836 return test_ti_thread_flag(task_thread_info(tsk), flag); 1837 } 1838 1839 static inline void set_tsk_need_resched(struct task_struct *tsk) 1840 { 1841 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1842 } 1843 1844 static inline void clear_tsk_need_resched(struct task_struct *tsk) 1845 { 1846 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 1847 } 1848 1849 static inline int signal_pending(struct task_struct *p) 1850 { 1851 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 1852 } 1853 1854 static inline int need_resched(void) 1855 { 1856 return unlikely(test_thread_flag(TIF_NEED_RESCHED)); 1857 } 1858 1859 /* 1860 * cond_resched() and cond_resched_lock(): latency reduction via 1861 * explicit rescheduling in places that are safe. The return 1862 * value indicates whether a reschedule was done in fact. 1863 * cond_resched_lock() will drop the spinlock before scheduling, 1864 * cond_resched_softirq() will enable bhs before scheduling. 1865 */ 1866 extern int cond_resched(void); 1867 extern int cond_resched_lock(spinlock_t * lock); 1868 extern int cond_resched_softirq(void); 1869 1870 /* 1871 * Does a critical section need to be broken due to another 1872 * task waiting?: 1873 */ 1874 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) 1875 # define need_lockbreak(lock) ((lock)->break_lock) 1876 #else 1877 # define need_lockbreak(lock) 0 1878 #endif 1879 1880 /* 1881 * Does a critical section need to be broken due to another 1882 * task waiting or preemption being signalled: 1883 */ 1884 static inline int lock_need_resched(spinlock_t *lock) 1885 { 1886 if (need_lockbreak(lock) || need_resched()) 1887 return 1; 1888 return 0; 1889 } 1890 1891 /* 1892 * Reevaluate whether the task has signals pending delivery. 1893 * Wake the task if so. 1894 * This is required every time the blocked sigset_t changes. 1895 * callers must hold sighand->siglock. 1896 */ 1897 extern void recalc_sigpending_and_wake(struct task_struct *t); 1898 extern void recalc_sigpending(void); 1899 1900 extern void signal_wake_up(struct task_struct *t, int resume_stopped); 1901 1902 /* 1903 * Wrappers for p->thread_info->cpu access. No-op on UP. 1904 */ 1905 #ifdef CONFIG_SMP 1906 1907 static inline unsigned int task_cpu(const struct task_struct *p) 1908 { 1909 return task_thread_info(p)->cpu; 1910 } 1911 1912 extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 1913 1914 #else 1915 1916 static inline unsigned int task_cpu(const struct task_struct *p) 1917 { 1918 return 0; 1919 } 1920 1921 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 1922 { 1923 } 1924 1925 #endif /* CONFIG_SMP */ 1926 1927 #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT 1928 extern void arch_pick_mmap_layout(struct mm_struct *mm); 1929 #else 1930 static inline void arch_pick_mmap_layout(struct mm_struct *mm) 1931 { 1932 mm->mmap_base = TASK_UNMAPPED_BASE; 1933 mm->get_unmapped_area = arch_get_unmapped_area; 1934 mm->unmap_area = arch_unmap_area; 1935 } 1936 #endif 1937 1938 extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 1939 extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 1940 1941 extern int sched_mc_power_savings, sched_smt_power_savings; 1942 1943 extern void normalize_rt_tasks(void); 1944 1945 #ifdef CONFIG_FAIR_GROUP_SCHED 1946 1947 extern struct task_group init_task_group; 1948 1949 extern struct task_group *sched_create_group(void); 1950 extern void sched_destroy_group(struct task_group *tg); 1951 extern void sched_move_task(struct task_struct *tsk); 1952 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 1953 extern unsigned long sched_group_shares(struct task_group *tg); 1954 1955 #endif 1956 1957 #ifdef CONFIG_TASK_XACCT 1958 static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 1959 { 1960 tsk->rchar += amt; 1961 } 1962 1963 static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 1964 { 1965 tsk->wchar += amt; 1966 } 1967 1968 static inline void inc_syscr(struct task_struct *tsk) 1969 { 1970 tsk->syscr++; 1971 } 1972 1973 static inline void inc_syscw(struct task_struct *tsk) 1974 { 1975 tsk->syscw++; 1976 } 1977 #else 1978 static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 1979 { 1980 } 1981 1982 static inline void add_wchar(struct task_struct *tsk, ssize_t amt) 1983 { 1984 } 1985 1986 static inline void inc_syscr(struct task_struct *tsk) 1987 { 1988 } 1989 1990 static inline void inc_syscw(struct task_struct *tsk) 1991 { 1992 } 1993 #endif 1994 1995 #ifdef CONFIG_SMP 1996 void migration_init(void); 1997 #else 1998 static inline void migration_init(void) 1999 { 2000 } 2001 #endif 2002 2003 #endif /* __KERNEL__ */ 2004 2005 #endif 2006