xref: /linux-6.15/kernel/kthread.c (revision d1a89197)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
4  *   Copyright (C) 2009 Red Hat, Inc.
5  *
6  * Creation is done via kthreadd, so that we get a clean environment
7  * even if we're invoked from userspace (think modprobe, hotplug cpu,
8  * etc.).
9  */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32 
33 
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37 
38 static LIST_HEAD(kthreads_hotplug);
39 static DEFINE_MUTEX(kthreads_hotplug_lock);
40 
41 struct kthread_create_info
42 {
43 	/* Information passed to kthread() from kthreadd. */
44 	char *full_name;
45 	int (*threadfn)(void *data);
46 	void *data;
47 	int node;
48 
49 	/* Result passed back to kthread_create() from kthreadd. */
50 	struct task_struct *result;
51 	struct completion *done;
52 
53 	struct list_head list;
54 };
55 
56 struct kthread {
57 	unsigned long flags;
58 	unsigned int cpu;
59 	unsigned int node;
60 	int started;
61 	int result;
62 	int (*threadfn)(void *);
63 	void *data;
64 	struct completion parked;
65 	struct completion exited;
66 #ifdef CONFIG_BLK_CGROUP
67 	struct cgroup_subsys_state *blkcg_css;
68 #endif
69 	/* To store the full name if task comm is truncated. */
70 	char *full_name;
71 	struct task_struct *task;
72 	struct list_head hotplug_node;
73 };
74 
75 enum KTHREAD_BITS {
76 	KTHREAD_IS_PER_CPU = 0,
77 	KTHREAD_SHOULD_STOP,
78 	KTHREAD_SHOULD_PARK,
79 };
80 
81 static inline struct kthread *to_kthread(struct task_struct *k)
82 {
83 	WARN_ON(!(k->flags & PF_KTHREAD));
84 	return k->worker_private;
85 }
86 
87 /*
88  * Variant of to_kthread() that doesn't assume @p is a kthread.
89  *
90  * Per construction; when:
91  *
92  *   (p->flags & PF_KTHREAD) && p->worker_private
93  *
94  * the task is both a kthread and struct kthread is persistent. However
95  * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
96  * begin_new_exec()).
97  */
98 static inline struct kthread *__to_kthread(struct task_struct *p)
99 {
100 	void *kthread = p->worker_private;
101 	if (kthread && !(p->flags & PF_KTHREAD))
102 		kthread = NULL;
103 	return kthread;
104 }
105 
106 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
107 {
108 	struct kthread *kthread = to_kthread(tsk);
109 
110 	if (!kthread || !kthread->full_name) {
111 		strscpy(buf, tsk->comm, buf_size);
112 		return;
113 	}
114 
115 	strscpy_pad(buf, kthread->full_name, buf_size);
116 }
117 
118 bool set_kthread_struct(struct task_struct *p)
119 {
120 	struct kthread *kthread;
121 
122 	if (WARN_ON_ONCE(to_kthread(p)))
123 		return false;
124 
125 	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
126 	if (!kthread)
127 		return false;
128 
129 	init_completion(&kthread->exited);
130 	init_completion(&kthread->parked);
131 	INIT_LIST_HEAD(&kthread->hotplug_node);
132 	p->vfork_done = &kthread->exited;
133 
134 	kthread->task = p;
135 	kthread->node = tsk_fork_get_node(current);
136 	p->worker_private = kthread;
137 	return true;
138 }
139 
140 void free_kthread_struct(struct task_struct *k)
141 {
142 	struct kthread *kthread;
143 
144 	/*
145 	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
146 	 */
147 	kthread = to_kthread(k);
148 	if (!kthread)
149 		return;
150 
151 #ifdef CONFIG_BLK_CGROUP
152 	WARN_ON_ONCE(kthread->blkcg_css);
153 #endif
154 	k->worker_private = NULL;
155 	kfree(kthread->full_name);
156 	kfree(kthread);
157 }
158 
159 /**
160  * kthread_should_stop - should this kthread return now?
161  *
162  * When someone calls kthread_stop() on your kthread, it will be woken
163  * and this will return true.  You should then return, and your return
164  * value will be passed through to kthread_stop().
165  */
166 bool kthread_should_stop(void)
167 {
168 	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
169 }
170 EXPORT_SYMBOL(kthread_should_stop);
171 
172 static bool __kthread_should_park(struct task_struct *k)
173 {
174 	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
175 }
176 
177 /**
178  * kthread_should_park - should this kthread park now?
179  *
180  * When someone calls kthread_park() on your kthread, it will be woken
181  * and this will return true.  You should then do the necessary
182  * cleanup and call kthread_parkme()
183  *
184  * Similar to kthread_should_stop(), but this keeps the thread alive
185  * and in a park position. kthread_unpark() "restarts" the thread and
186  * calls the thread function again.
187  */
188 bool kthread_should_park(void)
189 {
190 	return __kthread_should_park(current);
191 }
192 EXPORT_SYMBOL_GPL(kthread_should_park);
193 
194 bool kthread_should_stop_or_park(void)
195 {
196 	struct kthread *kthread = __to_kthread(current);
197 
198 	if (!kthread)
199 		return false;
200 
201 	return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
202 }
203 
204 /**
205  * kthread_freezable_should_stop - should this freezable kthread return now?
206  * @was_frozen: optional out parameter, indicates whether %current was frozen
207  *
208  * kthread_should_stop() for freezable kthreads, which will enter
209  * refrigerator if necessary.  This function is safe from kthread_stop() /
210  * freezer deadlock and freezable kthreads should use this function instead
211  * of calling try_to_freeze() directly.
212  */
213 bool kthread_freezable_should_stop(bool *was_frozen)
214 {
215 	bool frozen = false;
216 
217 	might_sleep();
218 
219 	if (unlikely(freezing(current)))
220 		frozen = __refrigerator(true);
221 
222 	if (was_frozen)
223 		*was_frozen = frozen;
224 
225 	return kthread_should_stop();
226 }
227 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
228 
229 /**
230  * kthread_func - return the function specified on kthread creation
231  * @task: kthread task in question
232  *
233  * Returns NULL if the task is not a kthread.
234  */
235 void *kthread_func(struct task_struct *task)
236 {
237 	struct kthread *kthread = __to_kthread(task);
238 	if (kthread)
239 		return kthread->threadfn;
240 	return NULL;
241 }
242 EXPORT_SYMBOL_GPL(kthread_func);
243 
244 /**
245  * kthread_data - return data value specified on kthread creation
246  * @task: kthread task in question
247  *
248  * Return the data value specified when kthread @task was created.
249  * The caller is responsible for ensuring the validity of @task when
250  * calling this function.
251  */
252 void *kthread_data(struct task_struct *task)
253 {
254 	return to_kthread(task)->data;
255 }
256 EXPORT_SYMBOL_GPL(kthread_data);
257 
258 /**
259  * kthread_probe_data - speculative version of kthread_data()
260  * @task: possible kthread task in question
261  *
262  * @task could be a kthread task.  Return the data value specified when it
263  * was created if accessible.  If @task isn't a kthread task or its data is
264  * inaccessible for any reason, %NULL is returned.  This function requires
265  * that @task itself is safe to dereference.
266  */
267 void *kthread_probe_data(struct task_struct *task)
268 {
269 	struct kthread *kthread = __to_kthread(task);
270 	void *data = NULL;
271 
272 	if (kthread)
273 		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
274 	return data;
275 }
276 
277 static void __kthread_parkme(struct kthread *self)
278 {
279 	for (;;) {
280 		/*
281 		 * TASK_PARKED is a special state; we must serialize against
282 		 * possible pending wakeups to avoid store-store collisions on
283 		 * task->state.
284 		 *
285 		 * Such a collision might possibly result in the task state
286 		 * changin from TASK_PARKED and us failing the
287 		 * wait_task_inactive() in kthread_park().
288 		 */
289 		set_special_state(TASK_PARKED);
290 		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
291 			break;
292 
293 		/*
294 		 * Thread is going to call schedule(), do not preempt it,
295 		 * or the caller of kthread_park() may spend more time in
296 		 * wait_task_inactive().
297 		 */
298 		preempt_disable();
299 		complete(&self->parked);
300 		schedule_preempt_disabled();
301 		preempt_enable();
302 	}
303 	__set_current_state(TASK_RUNNING);
304 }
305 
306 void kthread_parkme(void)
307 {
308 	__kthread_parkme(to_kthread(current));
309 }
310 EXPORT_SYMBOL_GPL(kthread_parkme);
311 
312 /**
313  * kthread_exit - Cause the current kthread return @result to kthread_stop().
314  * @result: The integer value to return to kthread_stop().
315  *
316  * While kthread_exit can be called directly, it exists so that
317  * functions which do some additional work in non-modular code such as
318  * module_put_and_kthread_exit can be implemented.
319  *
320  * Does not return.
321  */
322 void __noreturn kthread_exit(long result)
323 {
324 	struct kthread *kthread = to_kthread(current);
325 	kthread->result = result;
326 	if (!list_empty(&kthread->hotplug_node)) {
327 		mutex_lock(&kthreads_hotplug_lock);
328 		list_del(&kthread->hotplug_node);
329 		mutex_unlock(&kthreads_hotplug_lock);
330 	}
331 	do_exit(0);
332 }
333 EXPORT_SYMBOL(kthread_exit);
334 
335 /**
336  * kthread_complete_and_exit - Exit the current kthread.
337  * @comp: Completion to complete
338  * @code: The integer value to return to kthread_stop().
339  *
340  * If present, complete @comp and then return code to kthread_stop().
341  *
342  * A kernel thread whose module may be removed after the completion of
343  * @comp can use this function to exit safely.
344  *
345  * Does not return.
346  */
347 void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
348 {
349 	if (comp)
350 		complete(comp);
351 
352 	kthread_exit(code);
353 }
354 EXPORT_SYMBOL(kthread_complete_and_exit);
355 
356 static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask)
357 {
358 	cpumask_and(cpumask, cpumask_of_node(kthread->node),
359 		    housekeeping_cpumask(HK_TYPE_KTHREAD));
360 
361 	if (cpumask_empty(cpumask))
362 		cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_KTHREAD));
363 }
364 
365 static void kthread_affine_node(void)
366 {
367 	struct kthread *kthread = to_kthread(current);
368 	cpumask_var_t affinity;
369 
370 	WARN_ON_ONCE(kthread_is_per_cpu(current));
371 
372 	if (kthread->node == NUMA_NO_NODE) {
373 		housekeeping_affine(current, HK_TYPE_KTHREAD);
374 	} else {
375 		if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
376 			WARN_ON_ONCE(1);
377 			return;
378 		}
379 
380 		mutex_lock(&kthreads_hotplug_lock);
381 		WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
382 		list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
383 		/*
384 		 * The node cpumask is racy when read from kthread() but:
385 		 * - a racing CPU going down will either fail on the subsequent
386 		 *   call to set_cpus_allowed_ptr() or be migrated to housekeepers
387 		 *   afterwards by the scheduler.
388 		 * - a racing CPU going up will be handled by kthreads_online_cpu()
389 		 */
390 		kthread_fetch_affinity(kthread, affinity);
391 		set_cpus_allowed_ptr(current, affinity);
392 		mutex_unlock(&kthreads_hotplug_lock);
393 
394 		free_cpumask_var(affinity);
395 	}
396 }
397 
398 static int kthread(void *_create)
399 {
400 	static const struct sched_param param = { .sched_priority = 0 };
401 	/* Copy data: it's on kthread's stack */
402 	struct kthread_create_info *create = _create;
403 	int (*threadfn)(void *data) = create->threadfn;
404 	void *data = create->data;
405 	struct completion *done;
406 	struct kthread *self;
407 	int ret;
408 
409 	self = to_kthread(current);
410 
411 	/* Release the structure when caller killed by a fatal signal. */
412 	done = xchg(&create->done, NULL);
413 	if (!done) {
414 		kfree(create->full_name);
415 		kfree(create);
416 		kthread_exit(-EINTR);
417 	}
418 
419 	self->full_name = create->full_name;
420 	self->threadfn = threadfn;
421 	self->data = data;
422 
423 	/*
424 	 * The new thread inherited kthreadd's priority and CPU mask. Reset
425 	 * back to default in case they have been changed.
426 	 */
427 	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
428 
429 	/* OK, tell user we're spawned, wait for stop or wakeup */
430 	__set_current_state(TASK_UNINTERRUPTIBLE);
431 	create->result = current;
432 	/*
433 	 * Thread is going to call schedule(), do not preempt it,
434 	 * or the creator may spend more time in wait_task_inactive().
435 	 */
436 	preempt_disable();
437 	complete(done);
438 	schedule_preempt_disabled();
439 	preempt_enable();
440 
441 	self->started = 1;
442 
443 	if (!(current->flags & PF_NO_SETAFFINITY))
444 		kthread_affine_node();
445 
446 	ret = -EINTR;
447 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
448 		cgroup_kthread_ready();
449 		__kthread_parkme(self);
450 		ret = threadfn(data);
451 	}
452 	kthread_exit(ret);
453 }
454 
455 /* called from kernel_clone() to get node information for about to be created task */
456 int tsk_fork_get_node(struct task_struct *tsk)
457 {
458 #ifdef CONFIG_NUMA
459 	if (tsk == kthreadd_task)
460 		return tsk->pref_node_fork;
461 #endif
462 	return NUMA_NO_NODE;
463 }
464 
465 static void create_kthread(struct kthread_create_info *create)
466 {
467 	int pid;
468 
469 #ifdef CONFIG_NUMA
470 	current->pref_node_fork = create->node;
471 #endif
472 	/* We want our own signal handler (we take no signals by default). */
473 	pid = kernel_thread(kthread, create, create->full_name,
474 			    CLONE_FS | CLONE_FILES | SIGCHLD);
475 	if (pid < 0) {
476 		/* Release the structure when caller killed by a fatal signal. */
477 		struct completion *done = xchg(&create->done, NULL);
478 
479 		kfree(create->full_name);
480 		if (!done) {
481 			kfree(create);
482 			return;
483 		}
484 		create->result = ERR_PTR(pid);
485 		complete(done);
486 	}
487 }
488 
489 static __printf(4, 0)
490 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
491 						    void *data, int node,
492 						    const char namefmt[],
493 						    va_list args)
494 {
495 	DECLARE_COMPLETION_ONSTACK(done);
496 	struct task_struct *task;
497 	struct kthread_create_info *create = kmalloc(sizeof(*create),
498 						     GFP_KERNEL);
499 
500 	if (!create)
501 		return ERR_PTR(-ENOMEM);
502 	create->threadfn = threadfn;
503 	create->data = data;
504 	create->node = node;
505 	create->done = &done;
506 	create->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
507 	if (!create->full_name) {
508 		task = ERR_PTR(-ENOMEM);
509 		goto free_create;
510 	}
511 
512 	spin_lock(&kthread_create_lock);
513 	list_add_tail(&create->list, &kthread_create_list);
514 	spin_unlock(&kthread_create_lock);
515 
516 	wake_up_process(kthreadd_task);
517 	/*
518 	 * Wait for completion in killable state, for I might be chosen by
519 	 * the OOM killer while kthreadd is trying to allocate memory for
520 	 * new kernel thread.
521 	 */
522 	if (unlikely(wait_for_completion_killable(&done))) {
523 		/*
524 		 * If I was killed by a fatal signal before kthreadd (or new
525 		 * kernel thread) calls complete(), leave the cleanup of this
526 		 * structure to that thread.
527 		 */
528 		if (xchg(&create->done, NULL))
529 			return ERR_PTR(-EINTR);
530 		/*
531 		 * kthreadd (or new kernel thread) will call complete()
532 		 * shortly.
533 		 */
534 		wait_for_completion(&done);
535 	}
536 	task = create->result;
537 free_create:
538 	kfree(create);
539 	return task;
540 }
541 
542 /**
543  * kthread_create_on_node - create a kthread.
544  * @threadfn: the function to run until signal_pending(current).
545  * @data: data ptr for @threadfn.
546  * @node: task and thread structures for the thread are allocated on this node
547  * @namefmt: printf-style name for the thread.
548  *
549  * Description: This helper function creates and names a kernel
550  * thread.  The thread will be stopped: use wake_up_process() to start
551  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
552  * is affine to all CPUs.
553  *
554  * If thread is going to be bound on a particular cpu, give its node
555  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
556  * When woken, the thread will run @threadfn() with @data as its
557  * argument. @threadfn() can either return directly if it is a
558  * standalone thread for which no one will call kthread_stop(), or
559  * return when 'kthread_should_stop()' is true (which means
560  * kthread_stop() has been called).  The return value should be zero
561  * or a negative error number; it will be passed to kthread_stop().
562  *
563  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
564  */
565 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
566 					   void *data, int node,
567 					   const char namefmt[],
568 					   ...)
569 {
570 	struct task_struct *task;
571 	va_list args;
572 
573 	va_start(args, namefmt);
574 	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
575 	va_end(args);
576 
577 	return task;
578 }
579 EXPORT_SYMBOL(kthread_create_on_node);
580 
581 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
582 {
583 	unsigned long flags;
584 
585 	if (!wait_task_inactive(p, state)) {
586 		WARN_ON(1);
587 		return;
588 	}
589 
590 	/* It's safe because the task is inactive. */
591 	raw_spin_lock_irqsave(&p->pi_lock, flags);
592 	do_set_cpus_allowed(p, mask);
593 	p->flags |= PF_NO_SETAFFINITY;
594 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
595 }
596 
597 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
598 {
599 	__kthread_bind_mask(p, cpumask_of(cpu), state);
600 }
601 
602 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
603 {
604 	struct kthread *kthread = to_kthread(p);
605 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
606 	WARN_ON_ONCE(kthread->started);
607 }
608 
609 /**
610  * kthread_bind - bind a just-created kthread to a cpu.
611  * @p: thread created by kthread_create().
612  * @cpu: cpu (might not be online, must be possible) for @k to run on.
613  *
614  * Description: This function is equivalent to set_cpus_allowed(),
615  * except that @cpu doesn't need to be online, and the thread must be
616  * stopped (i.e., just returned from kthread_create()).
617  */
618 void kthread_bind(struct task_struct *p, unsigned int cpu)
619 {
620 	struct kthread *kthread = to_kthread(p);
621 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
622 	WARN_ON_ONCE(kthread->started);
623 }
624 EXPORT_SYMBOL(kthread_bind);
625 
626 /**
627  * kthread_create_on_cpu - Create a cpu bound kthread
628  * @threadfn: the function to run until signal_pending(current).
629  * @data: data ptr for @threadfn.
630  * @cpu: The cpu on which the thread should be bound,
631  * @namefmt: printf-style name for the thread. Format is restricted
632  *	     to "name.*%u". Code fills in cpu number.
633  *
634  * Description: This helper function creates and names a kernel thread
635  */
636 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
637 					  void *data, unsigned int cpu,
638 					  const char *namefmt)
639 {
640 	struct task_struct *p;
641 
642 	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
643 				   cpu);
644 	if (IS_ERR(p))
645 		return p;
646 	kthread_bind(p, cpu);
647 	/* CPU hotplug need to bind once again when unparking the thread. */
648 	to_kthread(p)->cpu = cpu;
649 	return p;
650 }
651 EXPORT_SYMBOL(kthread_create_on_cpu);
652 
653 void kthread_set_per_cpu(struct task_struct *k, int cpu)
654 {
655 	struct kthread *kthread = to_kthread(k);
656 	if (!kthread)
657 		return;
658 
659 	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
660 
661 	if (cpu < 0) {
662 		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
663 		return;
664 	}
665 
666 	kthread->cpu = cpu;
667 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
668 }
669 
670 bool kthread_is_per_cpu(struct task_struct *p)
671 {
672 	struct kthread *kthread = __to_kthread(p);
673 	if (!kthread)
674 		return false;
675 
676 	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
677 }
678 
679 /**
680  * kthread_unpark - unpark a thread created by kthread_create().
681  * @k:		thread created by kthread_create().
682  *
683  * Sets kthread_should_park() for @k to return false, wakes it, and
684  * waits for it to return. If the thread is marked percpu then its
685  * bound to the cpu again.
686  */
687 void kthread_unpark(struct task_struct *k)
688 {
689 	struct kthread *kthread = to_kthread(k);
690 
691 	if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
692 		return;
693 	/*
694 	 * Newly created kthread was parked when the CPU was offline.
695 	 * The binding was lost and we need to set it again.
696 	 */
697 	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
698 		__kthread_bind(k, kthread->cpu, TASK_PARKED);
699 
700 	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
701 	/*
702 	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
703 	 */
704 	wake_up_state(k, TASK_PARKED);
705 }
706 EXPORT_SYMBOL_GPL(kthread_unpark);
707 
708 /**
709  * kthread_park - park a thread created by kthread_create().
710  * @k: thread created by kthread_create().
711  *
712  * Sets kthread_should_park() for @k to return true, wakes it, and
713  * waits for it to return. This can also be called after kthread_create()
714  * instead of calling wake_up_process(): the thread will park without
715  * calling threadfn().
716  *
717  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
718  * If called by the kthread itself just the park bit is set.
719  */
720 int kthread_park(struct task_struct *k)
721 {
722 	struct kthread *kthread = to_kthread(k);
723 
724 	if (WARN_ON(k->flags & PF_EXITING))
725 		return -ENOSYS;
726 
727 	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
728 		return -EBUSY;
729 
730 	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
731 	if (k != current) {
732 		wake_up_process(k);
733 		/*
734 		 * Wait for __kthread_parkme() to complete(), this means we
735 		 * _will_ have TASK_PARKED and are about to call schedule().
736 		 */
737 		wait_for_completion(&kthread->parked);
738 		/*
739 		 * Now wait for that schedule() to complete and the task to
740 		 * get scheduled out.
741 		 */
742 		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
743 	}
744 
745 	return 0;
746 }
747 EXPORT_SYMBOL_GPL(kthread_park);
748 
749 /**
750  * kthread_stop - stop a thread created by kthread_create().
751  * @k: thread created by kthread_create().
752  *
753  * Sets kthread_should_stop() for @k to return true, wakes it, and
754  * waits for it to exit. This can also be called after kthread_create()
755  * instead of calling wake_up_process(): the thread will exit without
756  * calling threadfn().
757  *
758  * If threadfn() may call kthread_exit() itself, the caller must ensure
759  * task_struct can't go away.
760  *
761  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
762  * was never called.
763  */
764 int kthread_stop(struct task_struct *k)
765 {
766 	struct kthread *kthread;
767 	int ret;
768 
769 	trace_sched_kthread_stop(k);
770 
771 	get_task_struct(k);
772 	kthread = to_kthread(k);
773 	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
774 	kthread_unpark(k);
775 	set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL);
776 	wake_up_process(k);
777 	wait_for_completion(&kthread->exited);
778 	ret = kthread->result;
779 	put_task_struct(k);
780 
781 	trace_sched_kthread_stop_ret(ret);
782 	return ret;
783 }
784 EXPORT_SYMBOL(kthread_stop);
785 
786 /**
787  * kthread_stop_put - stop a thread and put its task struct
788  * @k: thread created by kthread_create().
789  *
790  * Stops a thread created by kthread_create() and put its task_struct.
791  * Only use when holding an extra task struct reference obtained by
792  * calling get_task_struct().
793  */
794 int kthread_stop_put(struct task_struct *k)
795 {
796 	int ret;
797 
798 	ret = kthread_stop(k);
799 	put_task_struct(k);
800 	return ret;
801 }
802 EXPORT_SYMBOL(kthread_stop_put);
803 
804 int kthreadd(void *unused)
805 {
806 	struct task_struct *tsk = current;
807 
808 	/* Setup a clean context for our children to inherit. */
809 	set_task_comm(tsk, "kthreadd");
810 	ignore_signals(tsk);
811 	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
812 	set_mems_allowed(node_states[N_MEMORY]);
813 
814 	current->flags |= PF_NOFREEZE;
815 	cgroup_init_kthreadd();
816 
817 	for (;;) {
818 		set_current_state(TASK_INTERRUPTIBLE);
819 		if (list_empty(&kthread_create_list))
820 			schedule();
821 		__set_current_state(TASK_RUNNING);
822 
823 		spin_lock(&kthread_create_lock);
824 		while (!list_empty(&kthread_create_list)) {
825 			struct kthread_create_info *create;
826 
827 			create = list_entry(kthread_create_list.next,
828 					    struct kthread_create_info, list);
829 			list_del_init(&create->list);
830 			spin_unlock(&kthread_create_lock);
831 
832 			create_kthread(create);
833 
834 			spin_lock(&kthread_create_lock);
835 		}
836 		spin_unlock(&kthread_create_lock);
837 	}
838 
839 	return 0;
840 }
841 
842 /*
843  * Re-affine kthreads according to their preferences
844  * and the newly online CPU. The CPU down part is handled
845  * by select_fallback_rq() which default re-affines to
846  * housekeepers in case the preferred affinity doesn't
847  * apply anymore.
848  */
849 static int kthreads_online_cpu(unsigned int cpu)
850 {
851 	cpumask_var_t affinity;
852 	struct kthread *k;
853 	int ret;
854 
855 	guard(mutex)(&kthreads_hotplug_lock);
856 
857 	if (list_empty(&kthreads_hotplug))
858 		return 0;
859 
860 	if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
861 		return -ENOMEM;
862 
863 	ret = 0;
864 
865 	list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
866 		if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
867 				 kthread_is_per_cpu(k->task) ||
868 				 k->node == NUMA_NO_NODE)) {
869 			ret = -EINVAL;
870 			continue;
871 		}
872 		kthread_fetch_affinity(k, affinity);
873 		set_cpus_allowed_ptr(k->task, affinity);
874 	}
875 
876 	free_cpumask_var(affinity);
877 
878 	return ret;
879 }
880 
881 static int kthreads_init(void)
882 {
883 	return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
884 				kthreads_online_cpu, NULL);
885 }
886 early_initcall(kthreads_init);
887 
888 void __kthread_init_worker(struct kthread_worker *worker,
889 				const char *name,
890 				struct lock_class_key *key)
891 {
892 	memset(worker, 0, sizeof(struct kthread_worker));
893 	raw_spin_lock_init(&worker->lock);
894 	lockdep_set_class_and_name(&worker->lock, key, name);
895 	INIT_LIST_HEAD(&worker->work_list);
896 	INIT_LIST_HEAD(&worker->delayed_work_list);
897 }
898 EXPORT_SYMBOL_GPL(__kthread_init_worker);
899 
900 /**
901  * kthread_worker_fn - kthread function to process kthread_worker
902  * @worker_ptr: pointer to initialized kthread_worker
903  *
904  * This function implements the main cycle of kthread worker. It processes
905  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
906  * is empty.
907  *
908  * The works are not allowed to keep any locks, disable preemption or interrupts
909  * when they finish. There is defined a safe point for freezing when one work
910  * finishes and before a new one is started.
911  *
912  * Also the works must not be handled by more than one worker at the same time,
913  * see also kthread_queue_work().
914  */
915 int kthread_worker_fn(void *worker_ptr)
916 {
917 	struct kthread_worker *worker = worker_ptr;
918 	struct kthread_work *work;
919 
920 	/*
921 	 * FIXME: Update the check and remove the assignment when all kthread
922 	 * worker users are created using kthread_create_worker*() functions.
923 	 */
924 	WARN_ON(worker->task && worker->task != current);
925 	worker->task = current;
926 
927 	if (worker->flags & KTW_FREEZABLE)
928 		set_freezable();
929 
930 repeat:
931 	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
932 
933 	if (kthread_should_stop()) {
934 		__set_current_state(TASK_RUNNING);
935 		raw_spin_lock_irq(&worker->lock);
936 		worker->task = NULL;
937 		raw_spin_unlock_irq(&worker->lock);
938 		return 0;
939 	}
940 
941 	work = NULL;
942 	raw_spin_lock_irq(&worker->lock);
943 	if (!list_empty(&worker->work_list)) {
944 		work = list_first_entry(&worker->work_list,
945 					struct kthread_work, node);
946 		list_del_init(&work->node);
947 	}
948 	worker->current_work = work;
949 	raw_spin_unlock_irq(&worker->lock);
950 
951 	if (work) {
952 		kthread_work_func_t func = work->func;
953 		__set_current_state(TASK_RUNNING);
954 		trace_sched_kthread_work_execute_start(work);
955 		work->func(work);
956 		/*
957 		 * Avoid dereferencing work after this point.  The trace
958 		 * event only cares about the address.
959 		 */
960 		trace_sched_kthread_work_execute_end(work, func);
961 	} else if (!freezing(current)) {
962 		schedule();
963 	} else {
964 		/*
965 		 * Handle the case where the current remains
966 		 * TASK_INTERRUPTIBLE. try_to_freeze() expects
967 		 * the current to be TASK_RUNNING.
968 		 */
969 		__set_current_state(TASK_RUNNING);
970 	}
971 
972 	try_to_freeze();
973 	cond_resched();
974 	goto repeat;
975 }
976 EXPORT_SYMBOL_GPL(kthread_worker_fn);
977 
978 static __printf(3, 0) struct kthread_worker *
979 __kthread_create_worker(int cpu, unsigned int flags,
980 			const char namefmt[], va_list args)
981 {
982 	struct kthread_worker *worker;
983 	struct task_struct *task;
984 	int node = NUMA_NO_NODE;
985 
986 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
987 	if (!worker)
988 		return ERR_PTR(-ENOMEM);
989 
990 	kthread_init_worker(worker);
991 
992 	if (cpu >= 0)
993 		node = cpu_to_node(cpu);
994 
995 	task = __kthread_create_on_node(kthread_worker_fn, worker,
996 						node, namefmt, args);
997 	if (IS_ERR(task))
998 		goto fail_task;
999 
1000 	if (cpu >= 0)
1001 		kthread_bind(task, cpu);
1002 
1003 	worker->flags = flags;
1004 	worker->task = task;
1005 	wake_up_process(task);
1006 	return worker;
1007 
1008 fail_task:
1009 	kfree(worker);
1010 	return ERR_CAST(task);
1011 }
1012 
1013 /**
1014  * kthread_create_worker - create a kthread worker
1015  * @flags: flags modifying the default behavior of the worker
1016  * @namefmt: printf-style name for the kthread worker (task).
1017  *
1018  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1019  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1020  * when the caller was killed by a fatal signal.
1021  */
1022 struct kthread_worker *
1023 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
1024 {
1025 	struct kthread_worker *worker;
1026 	va_list args;
1027 
1028 	va_start(args, namefmt);
1029 	worker = __kthread_create_worker(-1, flags, namefmt, args);
1030 	va_end(args);
1031 
1032 	return worker;
1033 }
1034 EXPORT_SYMBOL(kthread_create_worker);
1035 
1036 /**
1037  * kthread_create_worker_on_cpu - create a kthread worker and bind it
1038  *	to a given CPU and the associated NUMA node.
1039  * @cpu: CPU number
1040  * @flags: flags modifying the default behavior of the worker
1041  * @namefmt: printf-style name for the kthread worker (task).
1042  *
1043  * Use a valid CPU number if you want to bind the kthread worker
1044  * to the given CPU and the associated NUMA node.
1045  *
1046  * A good practice is to add the cpu number also into the worker name.
1047  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
1048  *
1049  * CPU hotplug:
1050  * The kthread worker API is simple and generic. It just provides a way
1051  * to create, use, and destroy workers.
1052  *
1053  * It is up to the API user how to handle CPU hotplug. They have to decide
1054  * how to handle pending work items, prevent queuing new ones, and
1055  * restore the functionality when the CPU goes off and on. There are a
1056  * few catches:
1057  *
1058  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
1059  *
1060  *    - The worker might not exist when the CPU was off when the user
1061  *      created the workers.
1062  *
1063  * Good practice is to implement two CPU hotplug callbacks and to
1064  * destroy/create the worker when the CPU goes down/up.
1065  *
1066  * Return:
1067  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1068  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1069  * when the caller was killed by a fatal signal.
1070  */
1071 struct kthread_worker *
1072 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
1073 			     const char namefmt[], ...)
1074 {
1075 	struct kthread_worker *worker;
1076 	va_list args;
1077 
1078 	va_start(args, namefmt);
1079 	worker = __kthread_create_worker(cpu, flags, namefmt, args);
1080 	va_end(args);
1081 
1082 	return worker;
1083 }
1084 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
1085 
1086 /*
1087  * Returns true when the work could not be queued at the moment.
1088  * It happens when it is already pending in a worker list
1089  * or when it is being cancelled.
1090  */
1091 static inline bool queuing_blocked(struct kthread_worker *worker,
1092 				   struct kthread_work *work)
1093 {
1094 	lockdep_assert_held(&worker->lock);
1095 
1096 	return !list_empty(&work->node) || work->canceling;
1097 }
1098 
1099 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
1100 					     struct kthread_work *work)
1101 {
1102 	lockdep_assert_held(&worker->lock);
1103 	WARN_ON_ONCE(!list_empty(&work->node));
1104 	/* Do not use a work with >1 worker, see kthread_queue_work() */
1105 	WARN_ON_ONCE(work->worker && work->worker != worker);
1106 }
1107 
1108 /* insert @work before @pos in @worker */
1109 static void kthread_insert_work(struct kthread_worker *worker,
1110 				struct kthread_work *work,
1111 				struct list_head *pos)
1112 {
1113 	kthread_insert_work_sanity_check(worker, work);
1114 
1115 	trace_sched_kthread_work_queue_work(worker, work);
1116 
1117 	list_add_tail(&work->node, pos);
1118 	work->worker = worker;
1119 	if (!worker->current_work && likely(worker->task))
1120 		wake_up_process(worker->task);
1121 }
1122 
1123 /**
1124  * kthread_queue_work - queue a kthread_work
1125  * @worker: target kthread_worker
1126  * @work: kthread_work to queue
1127  *
1128  * Queue @work to work processor @task for async execution.  @task
1129  * must have been created with kthread_worker_create().  Returns %true
1130  * if @work was successfully queued, %false if it was already pending.
1131  *
1132  * Reinitialize the work if it needs to be used by another worker.
1133  * For example, when the worker was stopped and started again.
1134  */
1135 bool kthread_queue_work(struct kthread_worker *worker,
1136 			struct kthread_work *work)
1137 {
1138 	bool ret = false;
1139 	unsigned long flags;
1140 
1141 	raw_spin_lock_irqsave(&worker->lock, flags);
1142 	if (!queuing_blocked(worker, work)) {
1143 		kthread_insert_work(worker, work, &worker->work_list);
1144 		ret = true;
1145 	}
1146 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1147 	return ret;
1148 }
1149 EXPORT_SYMBOL_GPL(kthread_queue_work);
1150 
1151 /**
1152  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1153  *	delayed work when the timer expires.
1154  * @t: pointer to the expired timer
1155  *
1156  * The format of the function is defined by struct timer_list.
1157  * It should have been called from irqsafe timer with irq already off.
1158  */
1159 void kthread_delayed_work_timer_fn(struct timer_list *t)
1160 {
1161 	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1162 	struct kthread_work *work = &dwork->work;
1163 	struct kthread_worker *worker = work->worker;
1164 	unsigned long flags;
1165 
1166 	/*
1167 	 * This might happen when a pending work is reinitialized.
1168 	 * It means that it is used a wrong way.
1169 	 */
1170 	if (WARN_ON_ONCE(!worker))
1171 		return;
1172 
1173 	raw_spin_lock_irqsave(&worker->lock, flags);
1174 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1175 	WARN_ON_ONCE(work->worker != worker);
1176 
1177 	/* Move the work from worker->delayed_work_list. */
1178 	WARN_ON_ONCE(list_empty(&work->node));
1179 	list_del_init(&work->node);
1180 	if (!work->canceling)
1181 		kthread_insert_work(worker, work, &worker->work_list);
1182 
1183 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1184 }
1185 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1186 
1187 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1188 					 struct kthread_delayed_work *dwork,
1189 					 unsigned long delay)
1190 {
1191 	struct timer_list *timer = &dwork->timer;
1192 	struct kthread_work *work = &dwork->work;
1193 
1194 	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1195 
1196 	/*
1197 	 * If @delay is 0, queue @dwork->work immediately.  This is for
1198 	 * both optimization and correctness.  The earliest @timer can
1199 	 * expire is on the closest next tick and delayed_work users depend
1200 	 * on that there's no such delay when @delay is 0.
1201 	 */
1202 	if (!delay) {
1203 		kthread_insert_work(worker, work, &worker->work_list);
1204 		return;
1205 	}
1206 
1207 	/* Be paranoid and try to detect possible races already now. */
1208 	kthread_insert_work_sanity_check(worker, work);
1209 
1210 	list_add(&work->node, &worker->delayed_work_list);
1211 	work->worker = worker;
1212 	timer->expires = jiffies + delay;
1213 	add_timer(timer);
1214 }
1215 
1216 /**
1217  * kthread_queue_delayed_work - queue the associated kthread work
1218  *	after a delay.
1219  * @worker: target kthread_worker
1220  * @dwork: kthread_delayed_work to queue
1221  * @delay: number of jiffies to wait before queuing
1222  *
1223  * If the work has not been pending it starts a timer that will queue
1224  * the work after the given @delay. If @delay is zero, it queues the
1225  * work immediately.
1226  *
1227  * Return: %false if the @work has already been pending. It means that
1228  * either the timer was running or the work was queued. It returns %true
1229  * otherwise.
1230  */
1231 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1232 				struct kthread_delayed_work *dwork,
1233 				unsigned long delay)
1234 {
1235 	struct kthread_work *work = &dwork->work;
1236 	unsigned long flags;
1237 	bool ret = false;
1238 
1239 	raw_spin_lock_irqsave(&worker->lock, flags);
1240 
1241 	if (!queuing_blocked(worker, work)) {
1242 		__kthread_queue_delayed_work(worker, dwork, delay);
1243 		ret = true;
1244 	}
1245 
1246 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1247 	return ret;
1248 }
1249 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1250 
1251 struct kthread_flush_work {
1252 	struct kthread_work	work;
1253 	struct completion	done;
1254 };
1255 
1256 static void kthread_flush_work_fn(struct kthread_work *work)
1257 {
1258 	struct kthread_flush_work *fwork =
1259 		container_of(work, struct kthread_flush_work, work);
1260 	complete(&fwork->done);
1261 }
1262 
1263 /**
1264  * kthread_flush_work - flush a kthread_work
1265  * @work: work to flush
1266  *
1267  * If @work is queued or executing, wait for it to finish execution.
1268  */
1269 void kthread_flush_work(struct kthread_work *work)
1270 {
1271 	struct kthread_flush_work fwork = {
1272 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1273 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1274 	};
1275 	struct kthread_worker *worker;
1276 	bool noop = false;
1277 
1278 	worker = work->worker;
1279 	if (!worker)
1280 		return;
1281 
1282 	raw_spin_lock_irq(&worker->lock);
1283 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1284 	WARN_ON_ONCE(work->worker != worker);
1285 
1286 	if (!list_empty(&work->node))
1287 		kthread_insert_work(worker, &fwork.work, work->node.next);
1288 	else if (worker->current_work == work)
1289 		kthread_insert_work(worker, &fwork.work,
1290 				    worker->work_list.next);
1291 	else
1292 		noop = true;
1293 
1294 	raw_spin_unlock_irq(&worker->lock);
1295 
1296 	if (!noop)
1297 		wait_for_completion(&fwork.done);
1298 }
1299 EXPORT_SYMBOL_GPL(kthread_flush_work);
1300 
1301 /*
1302  * Make sure that the timer is neither set nor running and could
1303  * not manipulate the work list_head any longer.
1304  *
1305  * The function is called under worker->lock. The lock is temporary
1306  * released but the timer can't be set again in the meantime.
1307  */
1308 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1309 					      unsigned long *flags)
1310 {
1311 	struct kthread_delayed_work *dwork =
1312 		container_of(work, struct kthread_delayed_work, work);
1313 	struct kthread_worker *worker = work->worker;
1314 
1315 	/*
1316 	 * del_timer_sync() must be called to make sure that the timer
1317 	 * callback is not running. The lock must be temporary released
1318 	 * to avoid a deadlock with the callback. In the meantime,
1319 	 * any queuing is blocked by setting the canceling counter.
1320 	 */
1321 	work->canceling++;
1322 	raw_spin_unlock_irqrestore(&worker->lock, *flags);
1323 	del_timer_sync(&dwork->timer);
1324 	raw_spin_lock_irqsave(&worker->lock, *flags);
1325 	work->canceling--;
1326 }
1327 
1328 /*
1329  * This function removes the work from the worker queue.
1330  *
1331  * It is called under worker->lock. The caller must make sure that
1332  * the timer used by delayed work is not running, e.g. by calling
1333  * kthread_cancel_delayed_work_timer().
1334  *
1335  * The work might still be in use when this function finishes. See the
1336  * current_work proceed by the worker.
1337  *
1338  * Return: %true if @work was pending and successfully canceled,
1339  *	%false if @work was not pending
1340  */
1341 static bool __kthread_cancel_work(struct kthread_work *work)
1342 {
1343 	/*
1344 	 * Try to remove the work from a worker list. It might either
1345 	 * be from worker->work_list or from worker->delayed_work_list.
1346 	 */
1347 	if (!list_empty(&work->node)) {
1348 		list_del_init(&work->node);
1349 		return true;
1350 	}
1351 
1352 	return false;
1353 }
1354 
1355 /**
1356  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1357  * @worker: kthread worker to use
1358  * @dwork: kthread delayed work to queue
1359  * @delay: number of jiffies to wait before queuing
1360  *
1361  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1362  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1363  * @work is guaranteed to be queued immediately.
1364  *
1365  * Return: %false if @dwork was idle and queued, %true otherwise.
1366  *
1367  * A special case is when the work is being canceled in parallel.
1368  * It might be caused either by the real kthread_cancel_delayed_work_sync()
1369  * or yet another kthread_mod_delayed_work() call. We let the other command
1370  * win and return %true here. The return value can be used for reference
1371  * counting and the number of queued works stays the same. Anyway, the caller
1372  * is supposed to synchronize these operations a reasonable way.
1373  *
1374  * This function is safe to call from any context including IRQ handler.
1375  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1376  * for details.
1377  */
1378 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1379 			      struct kthread_delayed_work *dwork,
1380 			      unsigned long delay)
1381 {
1382 	struct kthread_work *work = &dwork->work;
1383 	unsigned long flags;
1384 	int ret;
1385 
1386 	raw_spin_lock_irqsave(&worker->lock, flags);
1387 
1388 	/* Do not bother with canceling when never queued. */
1389 	if (!work->worker) {
1390 		ret = false;
1391 		goto fast_queue;
1392 	}
1393 
1394 	/* Work must not be used with >1 worker, see kthread_queue_work() */
1395 	WARN_ON_ONCE(work->worker != worker);
1396 
1397 	/*
1398 	 * Temporary cancel the work but do not fight with another command
1399 	 * that is canceling the work as well.
1400 	 *
1401 	 * It is a bit tricky because of possible races with another
1402 	 * mod_delayed_work() and cancel_delayed_work() callers.
1403 	 *
1404 	 * The timer must be canceled first because worker->lock is released
1405 	 * when doing so. But the work can be removed from the queue (list)
1406 	 * only when it can be queued again so that the return value can
1407 	 * be used for reference counting.
1408 	 */
1409 	kthread_cancel_delayed_work_timer(work, &flags);
1410 	if (work->canceling) {
1411 		/* The number of works in the queue does not change. */
1412 		ret = true;
1413 		goto out;
1414 	}
1415 	ret = __kthread_cancel_work(work);
1416 
1417 fast_queue:
1418 	__kthread_queue_delayed_work(worker, dwork, delay);
1419 out:
1420 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1421 	return ret;
1422 }
1423 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1424 
1425 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1426 {
1427 	struct kthread_worker *worker = work->worker;
1428 	unsigned long flags;
1429 	int ret = false;
1430 
1431 	if (!worker)
1432 		goto out;
1433 
1434 	raw_spin_lock_irqsave(&worker->lock, flags);
1435 	/* Work must not be used with >1 worker, see kthread_queue_work(). */
1436 	WARN_ON_ONCE(work->worker != worker);
1437 
1438 	if (is_dwork)
1439 		kthread_cancel_delayed_work_timer(work, &flags);
1440 
1441 	ret = __kthread_cancel_work(work);
1442 
1443 	if (worker->current_work != work)
1444 		goto out_fast;
1445 
1446 	/*
1447 	 * The work is in progress and we need to wait with the lock released.
1448 	 * In the meantime, block any queuing by setting the canceling counter.
1449 	 */
1450 	work->canceling++;
1451 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1452 	kthread_flush_work(work);
1453 	raw_spin_lock_irqsave(&worker->lock, flags);
1454 	work->canceling--;
1455 
1456 out_fast:
1457 	raw_spin_unlock_irqrestore(&worker->lock, flags);
1458 out:
1459 	return ret;
1460 }
1461 
1462 /**
1463  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1464  * @work: the kthread work to cancel
1465  *
1466  * Cancel @work and wait for its execution to finish.  This function
1467  * can be used even if the work re-queues itself. On return from this
1468  * function, @work is guaranteed to be not pending or executing on any CPU.
1469  *
1470  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1471  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1472  *
1473  * The caller must ensure that the worker on which @work was last
1474  * queued can't be destroyed before this function returns.
1475  *
1476  * Return: %true if @work was pending, %false otherwise.
1477  */
1478 bool kthread_cancel_work_sync(struct kthread_work *work)
1479 {
1480 	return __kthread_cancel_work_sync(work, false);
1481 }
1482 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1483 
1484 /**
1485  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1486  *	wait for it to finish.
1487  * @dwork: the kthread delayed work to cancel
1488  *
1489  * This is kthread_cancel_work_sync() for delayed works.
1490  *
1491  * Return: %true if @dwork was pending, %false otherwise.
1492  */
1493 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1494 {
1495 	return __kthread_cancel_work_sync(&dwork->work, true);
1496 }
1497 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1498 
1499 /**
1500  * kthread_flush_worker - flush all current works on a kthread_worker
1501  * @worker: worker to flush
1502  *
1503  * Wait until all currently executing or pending works on @worker are
1504  * finished.
1505  */
1506 void kthread_flush_worker(struct kthread_worker *worker)
1507 {
1508 	struct kthread_flush_work fwork = {
1509 		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1510 		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1511 	};
1512 
1513 	kthread_queue_work(worker, &fwork.work);
1514 	wait_for_completion(&fwork.done);
1515 }
1516 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1517 
1518 /**
1519  * kthread_destroy_worker - destroy a kthread worker
1520  * @worker: worker to be destroyed
1521  *
1522  * Flush and destroy @worker.  The simple flush is enough because the kthread
1523  * worker API is used only in trivial scenarios.  There are no multi-step state
1524  * machines needed.
1525  *
1526  * Note that this function is not responsible for handling delayed work, so
1527  * caller should be responsible for queuing or canceling all delayed work items
1528  * before invoke this function.
1529  */
1530 void kthread_destroy_worker(struct kthread_worker *worker)
1531 {
1532 	struct task_struct *task;
1533 
1534 	task = worker->task;
1535 	if (WARN_ON(!task))
1536 		return;
1537 
1538 	kthread_flush_worker(worker);
1539 	kthread_stop(task);
1540 	WARN_ON(!list_empty(&worker->delayed_work_list));
1541 	WARN_ON(!list_empty(&worker->work_list));
1542 	kfree(worker);
1543 }
1544 EXPORT_SYMBOL(kthread_destroy_worker);
1545 
1546 /**
1547  * kthread_use_mm - make the calling kthread operate on an address space
1548  * @mm: address space to operate on
1549  */
1550 void kthread_use_mm(struct mm_struct *mm)
1551 {
1552 	struct mm_struct *active_mm;
1553 	struct task_struct *tsk = current;
1554 
1555 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1556 	WARN_ON_ONCE(tsk->mm);
1557 
1558 	/*
1559 	 * It is possible for mm to be the same as tsk->active_mm, but
1560 	 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1561 	 * because these references are not equivalent.
1562 	 */
1563 	mmgrab(mm);
1564 
1565 	task_lock(tsk);
1566 	/* Hold off tlb flush IPIs while switching mm's */
1567 	local_irq_disable();
1568 	active_mm = tsk->active_mm;
1569 	tsk->active_mm = mm;
1570 	tsk->mm = mm;
1571 	membarrier_update_current_mm(mm);
1572 	switch_mm_irqs_off(active_mm, mm, tsk);
1573 	local_irq_enable();
1574 	task_unlock(tsk);
1575 #ifdef finish_arch_post_lock_switch
1576 	finish_arch_post_lock_switch();
1577 #endif
1578 
1579 	/*
1580 	 * When a kthread starts operating on an address space, the loop
1581 	 * in membarrier_{private,global}_expedited() may not observe
1582 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1583 	 * memory barrier after storing to tsk->mm, before accessing
1584 	 * user-space memory. A full memory barrier for membarrier
1585 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1586 	 * mmdrop_lazy_tlb().
1587 	 */
1588 	mmdrop_lazy_tlb(active_mm);
1589 }
1590 EXPORT_SYMBOL_GPL(kthread_use_mm);
1591 
1592 /**
1593  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1594  * @mm: address space to operate on
1595  */
1596 void kthread_unuse_mm(struct mm_struct *mm)
1597 {
1598 	struct task_struct *tsk = current;
1599 
1600 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1601 	WARN_ON_ONCE(!tsk->mm);
1602 
1603 	task_lock(tsk);
1604 	/*
1605 	 * When a kthread stops operating on an address space, the loop
1606 	 * in membarrier_{private,global}_expedited() may not observe
1607 	 * that tsk->mm, and not issue an IPI. Membarrier requires a
1608 	 * memory barrier after accessing user-space memory, before
1609 	 * clearing tsk->mm.
1610 	 */
1611 	smp_mb__after_spinlock();
1612 	local_irq_disable();
1613 	tsk->mm = NULL;
1614 	membarrier_update_current_mm(NULL);
1615 	mmgrab_lazy_tlb(mm);
1616 	/* active_mm is still 'mm' */
1617 	enter_lazy_tlb(mm, tsk);
1618 	local_irq_enable();
1619 	task_unlock(tsk);
1620 
1621 	mmdrop(mm);
1622 }
1623 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1624 
1625 #ifdef CONFIG_BLK_CGROUP
1626 /**
1627  * kthread_associate_blkcg - associate blkcg to current kthread
1628  * @css: the cgroup info
1629  *
1630  * Current thread must be a kthread. The thread is running jobs on behalf of
1631  * other threads. In some cases, we expect the jobs attach cgroup info of
1632  * original threads instead of that of current thread. This function stores
1633  * original thread's cgroup info in current kthread context for later
1634  * retrieval.
1635  */
1636 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1637 {
1638 	struct kthread *kthread;
1639 
1640 	if (!(current->flags & PF_KTHREAD))
1641 		return;
1642 	kthread = to_kthread(current);
1643 	if (!kthread)
1644 		return;
1645 
1646 	if (kthread->blkcg_css) {
1647 		css_put(kthread->blkcg_css);
1648 		kthread->blkcg_css = NULL;
1649 	}
1650 	if (css) {
1651 		css_get(css);
1652 		kthread->blkcg_css = css;
1653 	}
1654 }
1655 EXPORT_SYMBOL(kthread_associate_blkcg);
1656 
1657 /**
1658  * kthread_blkcg - get associated blkcg css of current kthread
1659  *
1660  * Current thread must be a kthread.
1661  */
1662 struct cgroup_subsys_state *kthread_blkcg(void)
1663 {
1664 	struct kthread *kthread;
1665 
1666 	if (current->flags & PF_KTHREAD) {
1667 		kthread = to_kthread(current);
1668 		if (kthread)
1669 			return kthread->blkcg_css;
1670 	}
1671 	return NULL;
1672 }
1673 #endif
1674