1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KTHREAD_H 3 #define _LINUX_KTHREAD_H 4 /* Simple interface for creating and stopping kernel threads without mess. */ 5 #include <linux/err.h> 6 #include <linux/sched.h> 7 8 struct mm_struct; 9 10 __printf(4, 5) 11 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 12 void *data, 13 int node, 14 const char namefmt[], ...); 15 16 /** 17 * kthread_create - create a kthread on the current node 18 * @threadfn: the function to run in the thread 19 * @data: data pointer for @threadfn() 20 * @namefmt: printf-style format string for the thread name 21 * @arg: arguments for @namefmt. 22 * 23 * This macro will create a kthread on the current node, leaving it in 24 * the stopped state. This is just a helper for kthread_create_on_node(); 25 * see the documentation there for more details. 26 */ 27 #define kthread_create(threadfn, data, namefmt, arg...) \ 28 kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) 29 30 31 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 32 void *data, 33 unsigned int cpu, 34 const char *namefmt); 35 36 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk); 37 bool set_kthread_struct(struct task_struct *p); 38 39 void kthread_set_per_cpu(struct task_struct *k, int cpu); 40 bool kthread_is_per_cpu(struct task_struct *k); 41 42 /** 43 * kthread_run - create and wake a thread. 44 * @threadfn: the function to run until signal_pending(current). 45 * @data: data ptr for @threadfn. 46 * @namefmt: printf-style name for the thread. 47 * 48 * Description: Convenient wrapper for kthread_create() followed by 49 * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). 50 */ 51 #define kthread_run(threadfn, data, namefmt, ...) \ 52 ({ \ 53 struct task_struct *__k \ 54 = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ 55 if (!IS_ERR(__k)) \ 56 wake_up_process(__k); \ 57 __k; \ 58 }) 59 60 /** 61 * kthread_run_on_cpu - create and wake a cpu bound thread. 62 * @threadfn: the function to run until signal_pending(current). 63 * @data: data ptr for @threadfn. 64 * @cpu: The cpu on which the thread should be bound, 65 * @namefmt: printf-style name for the thread. Format is restricted 66 * to "name.*%u". Code fills in cpu number. 67 * 68 * Description: Convenient wrapper for kthread_create_on_cpu() 69 * followed by wake_up_process(). Returns the kthread or 70 * ERR_PTR(-ENOMEM). 71 */ 72 static inline struct task_struct * 73 kthread_run_on_cpu(int (*threadfn)(void *data), void *data, 74 unsigned int cpu, const char *namefmt) 75 { 76 struct task_struct *p; 77 78 p = kthread_create_on_cpu(threadfn, data, cpu, namefmt); 79 if (!IS_ERR(p)) 80 wake_up_process(p); 81 82 return p; 83 } 84 85 void free_kthread_struct(struct task_struct *k); 86 void kthread_bind(struct task_struct *k, unsigned int cpu); 87 void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); 88 int kthread_stop(struct task_struct *k); 89 bool kthread_should_stop(void); 90 bool kthread_should_park(void); 91 bool __kthread_should_park(struct task_struct *k); 92 bool kthread_freezable_should_stop(bool *was_frozen); 93 void *kthread_func(struct task_struct *k); 94 void *kthread_data(struct task_struct *k); 95 void *kthread_probe_data(struct task_struct *k); 96 int kthread_park(struct task_struct *k); 97 void kthread_unpark(struct task_struct *k); 98 void kthread_parkme(void); 99 void kthread_exit(long result) __noreturn; 100 void kthread_complete_and_exit(struct completion *, long) __noreturn; 101 102 int kthreadd(void *unused); 103 extern struct task_struct *kthreadd_task; 104 extern int tsk_fork_get_node(struct task_struct *tsk); 105 106 /* 107 * Simple work processor based on kthread. 108 * 109 * This provides easier way to make use of kthreads. A kthread_work 110 * can be queued and flushed using queue/kthread_flush_work() 111 * respectively. Queued kthread_works are processed by a kthread 112 * running kthread_worker_fn(). 113 */ 114 struct kthread_work; 115 typedef void (*kthread_work_func_t)(struct kthread_work *work); 116 void kthread_delayed_work_timer_fn(struct timer_list *t); 117 118 enum { 119 KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ 120 }; 121 122 struct kthread_worker { 123 unsigned int flags; 124 raw_spinlock_t lock; 125 struct list_head work_list; 126 struct list_head delayed_work_list; 127 struct task_struct *task; 128 struct kthread_work *current_work; 129 }; 130 131 struct kthread_work { 132 struct list_head node; 133 kthread_work_func_t func; 134 struct kthread_worker *worker; 135 /* Number of canceling calls that are running at the moment. */ 136 int canceling; 137 }; 138 139 struct kthread_delayed_work { 140 struct kthread_work work; 141 struct timer_list timer; 142 }; 143 144 #define KTHREAD_WORK_INIT(work, fn) { \ 145 .node = LIST_HEAD_INIT((work).node), \ 146 .func = (fn), \ 147 } 148 149 #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ 150 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 151 .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ 152 TIMER_IRQSAFE), \ 153 } 154 155 #define DEFINE_KTHREAD_WORK(work, fn) \ 156 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 157 158 #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ 159 struct kthread_delayed_work dwork = \ 160 KTHREAD_DELAYED_WORK_INIT(dwork, fn) 161 162 extern void __kthread_init_worker(struct kthread_worker *worker, 163 const char *name, struct lock_class_key *key); 164 165 #define kthread_init_worker(worker) \ 166 do { \ 167 static struct lock_class_key __key; \ 168 __kthread_init_worker((worker), "("#worker")->lock", &__key); \ 169 } while (0) 170 171 #define kthread_init_work(work, fn) \ 172 do { \ 173 memset((work), 0, sizeof(struct kthread_work)); \ 174 INIT_LIST_HEAD(&(work)->node); \ 175 (work)->func = (fn); \ 176 } while (0) 177 178 #define kthread_init_delayed_work(dwork, fn) \ 179 do { \ 180 kthread_init_work(&(dwork)->work, (fn)); \ 181 timer_setup(&(dwork)->timer, \ 182 kthread_delayed_work_timer_fn, \ 183 TIMER_IRQSAFE); \ 184 } while (0) 185 186 int kthread_worker_fn(void *worker_ptr); 187 188 __printf(2, 3) 189 struct kthread_worker * 190 kthread_create_worker(unsigned int flags, const char namefmt[], ...); 191 192 __printf(3, 4) struct kthread_worker * 193 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 194 const char namefmt[], ...); 195 196 bool kthread_queue_work(struct kthread_worker *worker, 197 struct kthread_work *work); 198 199 bool kthread_queue_delayed_work(struct kthread_worker *worker, 200 struct kthread_delayed_work *dwork, 201 unsigned long delay); 202 203 bool kthread_mod_delayed_work(struct kthread_worker *worker, 204 struct kthread_delayed_work *dwork, 205 unsigned long delay); 206 207 void kthread_flush_work(struct kthread_work *work); 208 void kthread_flush_worker(struct kthread_worker *worker); 209 210 bool kthread_cancel_work_sync(struct kthread_work *work); 211 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); 212 213 void kthread_destroy_worker(struct kthread_worker *worker); 214 215 void kthread_use_mm(struct mm_struct *mm); 216 void kthread_unuse_mm(struct mm_struct *mm); 217 218 struct cgroup_subsys_state; 219 220 #ifdef CONFIG_BLK_CGROUP 221 void kthread_associate_blkcg(struct cgroup_subsys_state *css); 222 struct cgroup_subsys_state *kthread_blkcg(void); 223 #else 224 static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } 225 #endif 226 #endif /* _LINUX_KTHREAD_H */ 227