1 /*-
2 * Copyright (c) 2017 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <linux/compat.h>
31 #include <linux/completion.h>
32 #include <linux/mm.h>
33 #include <linux/kthread.h>
34
35 #include <sys/kernel.h>
36 #include <sys/eventhandler.h>
37 #include <sys/malloc.h>
38
39 static eventhandler_tag linuxkpi_thread_dtor_tag;
40
41 static MALLOC_DEFINE(M_LINUX_CURRENT, "linuxcurrent", "LinuxKPI task structure");
42
43 int
linux_alloc_current(struct thread * td,int flags)44 linux_alloc_current(struct thread *td, int flags)
45 {
46 struct proc *proc;
47 struct thread *td_other;
48 struct task_struct *ts;
49 struct task_struct *ts_other;
50 struct mm_struct *mm;
51 struct mm_struct *mm_other;
52
53 MPASS(td->td_lkpi_task == NULL);
54
55 ts = malloc(sizeof(*ts), M_LINUX_CURRENT, flags | M_ZERO);
56 if (ts == NULL)
57 return (ENOMEM);
58
59 mm = malloc(sizeof(*mm), M_LINUX_CURRENT, flags | M_ZERO);
60 if (mm == NULL) {
61 free(ts, M_LINUX_CURRENT);
62 return (ENOMEM);
63 }
64
65 /* setup new task structure */
66 atomic_set(&ts->kthread_flags, 0);
67 ts->task_thread = td;
68 ts->comm = td->td_name;
69 ts->pid = td->td_tid;
70 ts->group_leader = ts;
71 atomic_set(&ts->usage, 1);
72 atomic_set(&ts->state, TASK_RUNNING);
73 init_completion(&ts->parked);
74 init_completion(&ts->exited);
75
76 proc = td->td_proc;
77
78 /* check if another thread already has a mm_struct */
79 PROC_LOCK(proc);
80 FOREACH_THREAD_IN_PROC(proc, td_other) {
81 ts_other = td_other->td_lkpi_task;
82 if (ts_other == NULL)
83 continue;
84
85 mm_other = ts_other->mm;
86 if (mm_other == NULL)
87 continue;
88
89 /* try to share other mm_struct */
90 if (atomic_inc_not_zero(&mm_other->mm_users)) {
91 /* set mm_struct pointer */
92 ts->mm = mm_other;
93 break;
94 }
95 }
96
97 /* use allocated mm_struct as a fallback */
98 if (ts->mm == NULL) {
99 /* setup new mm_struct */
100 init_rwsem(&mm->mmap_sem);
101 atomic_set(&mm->mm_count, 1);
102 atomic_set(&mm->mm_users, 1);
103 /* set mm_struct pointer */
104 ts->mm = mm;
105 /* clear pointer to not free memory */
106 mm = NULL;
107 }
108
109 /* store pointer to task struct */
110 td->td_lkpi_task = ts;
111 PROC_UNLOCK(proc);
112
113 /* free mm_struct pointer, if any */
114 free(mm, M_LINUX_CURRENT);
115
116 return (0);
117 }
118
119 struct mm_struct *
linux_get_task_mm(struct task_struct * task)120 linux_get_task_mm(struct task_struct *task)
121 {
122 struct mm_struct *mm;
123
124 mm = task->mm;
125 if (mm != NULL) {
126 atomic_inc(&mm->mm_users);
127 return (mm);
128 }
129 return (NULL);
130 }
131
132 void
linux_mm_dtor(struct mm_struct * mm)133 linux_mm_dtor(struct mm_struct *mm)
134 {
135 free(mm, M_LINUX_CURRENT);
136 }
137
138 void
linux_free_current(struct task_struct * ts)139 linux_free_current(struct task_struct *ts)
140 {
141 mmput(ts->mm);
142 free(ts, M_LINUX_CURRENT);
143 }
144
145 static void
linuxkpi_thread_dtor(void * arg __unused,struct thread * td)146 linuxkpi_thread_dtor(void *arg __unused, struct thread *td)
147 {
148 struct task_struct *ts;
149
150 ts = td->td_lkpi_task;
151 if (ts == NULL)
152 return;
153
154 td->td_lkpi_task = NULL;
155 put_task_struct(ts);
156 }
157
158 struct task_struct *
linux_pid_task(pid_t pid)159 linux_pid_task(pid_t pid)
160 {
161 struct thread *td;
162 struct proc *p;
163
164 /* try to find corresponding thread */
165 td = tdfind(pid, -1);
166 if (td != NULL) {
167 struct task_struct *ts = td->td_lkpi_task;
168 PROC_UNLOCK(td->td_proc);
169 return (ts);
170 }
171
172 /* try to find corresponding procedure */
173 p = pfind(pid);
174 if (p != NULL) {
175 FOREACH_THREAD_IN_PROC(p, td) {
176 struct task_struct *ts = td->td_lkpi_task;
177 if (ts != NULL) {
178 PROC_UNLOCK(p);
179 return (ts);
180 }
181 }
182 PROC_UNLOCK(p);
183 }
184 return (NULL);
185 }
186
187 struct task_struct *
linux_get_pid_task(pid_t pid)188 linux_get_pid_task(pid_t pid)
189 {
190 struct thread *td;
191 struct proc *p;
192
193 /* try to find corresponding thread */
194 td = tdfind(pid, -1);
195 if (td != NULL) {
196 struct task_struct *ts = td->td_lkpi_task;
197 if (ts != NULL)
198 get_task_struct(ts);
199 PROC_UNLOCK(td->td_proc);
200 return (ts);
201 }
202
203 /* try to find corresponding procedure */
204 p = pfind(pid);
205 if (p != NULL) {
206 FOREACH_THREAD_IN_PROC(p, td) {
207 struct task_struct *ts = td->td_lkpi_task;
208 if (ts != NULL) {
209 get_task_struct(ts);
210 PROC_UNLOCK(p);
211 return (ts);
212 }
213 }
214 PROC_UNLOCK(p);
215 }
216 return (NULL);
217 }
218
219 bool
linux_task_exiting(struct task_struct * task)220 linux_task_exiting(struct task_struct *task)
221 {
222 struct proc *p;
223 bool ret;
224
225 ret = false;
226 p = pfind(task->pid);
227 if (p != NULL) {
228 if ((p->p_flag & P_WEXIT) != 0)
229 ret = true;
230 PROC_UNLOCK(p);
231 }
232 return (ret);
233 }
234
235 static void
linux_current_init(void * arg __unused)236 linux_current_init(void *arg __unused)
237 {
238 linuxkpi_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
239 linuxkpi_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
240 }
241 SYSINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_init, NULL);
242
243 static void
linux_current_uninit(void * arg __unused)244 linux_current_uninit(void *arg __unused)
245 {
246 struct proc *p;
247 struct task_struct *ts;
248 struct thread *td;
249
250 sx_slock(&allproc_lock);
251 FOREACH_PROC_IN_SYSTEM(p) {
252 PROC_LOCK(p);
253 FOREACH_THREAD_IN_PROC(p, td) {
254 if ((ts = td->td_lkpi_task) != NULL) {
255 td->td_lkpi_task = NULL;
256 put_task_struct(ts);
257 }
258 }
259 PROC_UNLOCK(p);
260 }
261 sx_sunlock(&allproc_lock);
262
263 EVENTHANDLER_DEREGISTER(thread_dtor, linuxkpi_thread_dtor_tag);
264 }
265 SYSUNINIT(linux_current, SI_SUB_EVENTHANDLER, SI_ORDER_SECOND, linux_current_uninit, NULL);
266