1 #ifndef _LINUX__INIT_TASK_H 2 #define _LINUX__INIT_TASK_H 3 4 #include <linux/rcupdate.h> 5 #include <linux/irqflags.h> 6 #include <linux/utsname.h> 7 #include <linux/lockdep.h> 8 #include <linux/ftrace.h> 9 #include <linux/ipc.h> 10 #include <linux/pid_namespace.h> 11 #include <linux/user_namespace.h> 12 #include <linux/securebits.h> 13 #include <net/net_namespace.h> 14 15 extern struct files_struct init_files; 16 extern struct fs_struct init_fs; 17 18 #define INIT_SIGNALS(sig) { \ 19 .nr_threads = 1, \ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 21 .shared_pending = { \ 22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 23 .signal = {{0}}}, \ 24 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ 25 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 26 .rlim = INIT_RLIMITS, \ 27 .cputimer = { \ 28 .cputime = INIT_CPUTIME, \ 29 .running = 0, \ 30 .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ 31 }, \ 32 } 33 34 extern struct nsproxy init_nsproxy; 35 36 #define INIT_SIGHAND(sighand) { \ 37 .count = ATOMIC_INIT(1), \ 38 .action = { { { .sa_handler = SIG_DFL, } }, }, \ 39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ 41 } 42 43 extern struct group_info init_groups; 44 45 #define INIT_STRUCT_PID { \ 46 .count = ATOMIC_INIT(1), \ 47 .tasks = { \ 48 { .first = NULL }, \ 49 { .first = NULL }, \ 50 { .first = NULL }, \ 51 }, \ 52 .level = 0, \ 53 .numbers = { { \ 54 .nr = 0, \ 55 .ns = &init_pid_ns, \ 56 .pid_chain = { .next = NULL, .pprev = NULL }, \ 57 }, } \ 58 } 59 60 #define INIT_PID_LINK(type) \ 61 { \ 62 .node = { \ 63 .next = NULL, \ 64 .pprev = NULL, \ 65 }, \ 66 .pid = &init_struct_pid, \ 67 } 68 69 #ifdef CONFIG_AUDITSYSCALL 70 #define INIT_IDS \ 71 .loginuid = -1, \ 72 .sessionid = -1, 73 #else 74 #define INIT_IDS 75 #endif 76 77 /* 78 * Because of the reduced scope of CAP_SETPCAP when filesystem 79 * capabilities are in effect, it is safe to allow CAP_SETPCAP to 80 * be available in the default configuration. 81 */ 82 # define CAP_INIT_BSET CAP_FULL_SET 83 84 #ifdef CONFIG_TREE_PREEMPT_RCU 85 #define INIT_TASK_RCU_PREEMPT(tsk) \ 86 .rcu_read_lock_nesting = 0, \ 87 .rcu_read_unlock_special = 0, \ 88 .rcu_blocked_node = NULL, \ 89 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), 90 #else 91 #define INIT_TASK_RCU_PREEMPT(tsk) 92 #endif 93 94 extern struct cred init_cred; 95 96 #ifdef CONFIG_PERF_EVENTS 97 # define INIT_PERF_EVENTS(tsk) \ 98 .perf_event_mutex = \ 99 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ 100 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), 101 #else 102 # define INIT_PERF_EVENTS(tsk) 103 #endif 104 105 /* 106 * INIT_TASK is used to set up the first task table, touch at 107 * your own risk!. Base=0, limit=0x1fffff (=2MB) 108 */ 109 #define INIT_TASK(tsk) \ 110 { \ 111 .state = 0, \ 112 .stack = &init_thread_info, \ 113 .usage = ATOMIC_INIT(2), \ 114 .flags = PF_KTHREAD, \ 115 .lock_depth = -1, \ 116 .prio = MAX_PRIO-20, \ 117 .static_prio = MAX_PRIO-20, \ 118 .normal_prio = MAX_PRIO-20, \ 119 .policy = SCHED_NORMAL, \ 120 .cpus_allowed = CPU_MASK_ALL, \ 121 .mm = NULL, \ 122 .active_mm = &init_mm, \ 123 .se = { \ 124 .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ 125 }, \ 126 .rt = { \ 127 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 128 .time_slice = HZ, \ 129 .nr_cpus_allowed = NR_CPUS, \ 130 }, \ 131 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 132 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \ 133 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 134 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 135 .real_parent = &tsk, \ 136 .parent = &tsk, \ 137 .children = LIST_HEAD_INIT(tsk.children), \ 138 .sibling = LIST_HEAD_INIT(tsk.sibling), \ 139 .group_leader = &tsk, \ 140 .real_cred = &init_cred, \ 141 .cred = &init_cred, \ 142 .cred_guard_mutex = \ 143 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ 144 .comm = "swapper", \ 145 .thread = INIT_THREAD, \ 146 .fs = &init_fs, \ 147 .files = &init_files, \ 148 .signal = &init_signals, \ 149 .sighand = &init_sighand, \ 150 .nsproxy = &init_nsproxy, \ 151 .pending = { \ 152 .list = LIST_HEAD_INIT(tsk.pending.list), \ 153 .signal = {{0}}}, \ 154 .blocked = {{0}}, \ 155 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 156 .journal_info = NULL, \ 157 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 158 .fs_excl = ATOMIC_INIT(0), \ 159 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 160 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 161 .pids = { \ 162 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 163 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 164 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 165 }, \ 166 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ 167 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 168 INIT_IDS \ 169 INIT_PERF_EVENTS(tsk) \ 170 INIT_TRACE_IRQFLAGS \ 171 INIT_LOCKDEP \ 172 INIT_FTRACE_GRAPH \ 173 INIT_TRACE_RECURSION \ 174 INIT_TASK_RCU_PREEMPT(tsk) \ 175 } 176 177 178 #define INIT_CPU_TIMERS(cpu_timers) \ 179 { \ 180 LIST_HEAD_INIT(cpu_timers[0]), \ 181 LIST_HEAD_INIT(cpu_timers[1]), \ 182 LIST_HEAD_INIT(cpu_timers[2]), \ 183 } 184 185 /* Attach to the init_task data structure for proper alignment */ 186 #define __init_task_data __attribute__((__section__(".data..init_task"))) 187 188 189 #endif 190