1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KTHREAD_H 3 #define _LINUX_KTHREAD_H 4 /* Simple interface for creating and stopping kernel threads without mess. */ 5 #include <linux/err.h> 6 #include <linux/sched.h> 7 8 struct mm_struct; 9 10 __printf(4, 5) 11 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 12 void *data, 13 int node, 14 const char namefmt[], ...); 15 16 /** 17 * kthread_create - create a kthread on the current node 18 * @threadfn: the function to run in the thread 19 * @data: data pointer for @threadfn() 20 * @namefmt: printf-style format string for the thread name 21 * @arg: arguments for @namefmt. 22 * 23 * This macro will create a kthread on the current node, leaving it in 24 * the stopped state. This is just a helper for kthread_create_on_node(); 25 * see the documentation there for more details. 26 */ 27 #define kthread_create(threadfn, data, namefmt, arg...) \ 28 kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) 29 30 31 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 32 void *data, 33 unsigned int cpu, 34 const char *namefmt); 35 36 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk); 37 bool set_kthread_struct(struct task_struct *p); 38 39 void kthread_set_per_cpu(struct task_struct *k, int cpu); 40 bool kthread_is_per_cpu(struct task_struct *k); 41 42 /** 43 * kthread_run - create and wake a thread. 44 * @threadfn: the function to run until signal_pending(current). 45 * @data: data ptr for @threadfn. 46 * @namefmt: printf-style name for the thread. 47 * 48 * Description: Convenient wrapper for kthread_create() followed by 49 * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). 50 */ 51 #define kthread_run(threadfn, data, namefmt, ...) \ 52 ({ \ 53 struct task_struct *__k \ 54 = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ 55 if (!IS_ERR(__k)) \ 56 wake_up_process(__k); \ 57 __k; \ 58 }) 59 60 /** 61 * kthread_run_on_cpu - create and wake a cpu bound thread. 62 * @threadfn: the function to run until signal_pending(current). 63 * @data: data ptr for @threadfn. 64 * @cpu: The cpu on which the thread should be bound, 65 * @namefmt: printf-style name for the thread. Format is restricted 66 * to "name.*%u". Code fills in cpu number. 67 * 68 * Description: Convenient wrapper for kthread_create_on_cpu() 69 * followed by wake_up_process(). Returns the kthread or 70 * ERR_PTR(-ENOMEM). 71 */ 72 static inline struct task_struct * 73 kthread_run_on_cpu(int (*threadfn)(void *data), void *data, 74 unsigned int cpu, const char *namefmt) 75 { 76 struct task_struct *p; 77 78 p = kthread_create_on_cpu(threadfn, data, cpu, namefmt); 79 if (!IS_ERR(p)) 80 wake_up_process(p); 81 82 return p; 83 } 84 85 void free_kthread_struct(struct task_struct *k); 86 void kthread_bind(struct task_struct *k, unsigned int cpu); 87 void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); 88 int kthread_stop(struct task_struct *k); 89 bool kthread_should_stop(void); 90 bool kthread_should_park(void); 91 bool __kthread_should_park(struct task_struct *k); 92 bool kthread_freezable_should_stop(bool *was_frozen); 93 void *kthread_func(struct task_struct *k); 94 void *kthread_data(struct task_struct *k); 95 void *kthread_probe_data(struct task_struct *k); 96 int kthread_park(struct task_struct *k); 97 void kthread_unpark(struct task_struct *k); 98 void kthread_parkme(void); 99 void kthread_exit(long result) __noreturn; 100 void kthread_complete_and_exit(struct completion *, long) __noreturn; 101 102 int kthreadd(void *unused); 103 extern struct task_struct *kthreadd_task; 104 extern int tsk_fork_get_node(struct task_struct *tsk); 105 106 /* 107 * Simple work processor based on kthread. 108 * 109 * This provides easier way to make use of kthreads. A kthread_work 110 * can be queued and flushed using queue/kthread_flush_work() 111 * respectively. Queued kthread_works are processed by a kthread 112 * running kthread_worker_fn(). 113 */ 114 struct kthread_work; 115 typedef void (*kthread_work_func_t)(struct kthread_work *work); 116 void kthread_delayed_work_timer_fn(struct timer_list *t); 117 118 enum { 119 KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ 120 }; 121 122 struct kthread_worker { 123 unsigned int flags; 124 raw_spinlock_t lock; 125 struct list_head work_list; 126 struct list_head delayed_work_list; 127 struct task_struct *task; 128 struct kthread_work *current_work; 129 }; 130 131 struct kthread_work { 132 struct list_head node; 133 kthread_work_func_t func; 134 struct kthread_worker *worker; 135 /* Number of canceling calls that are running at the moment. */ 136 int canceling; 137 }; 138 139 struct kthread_delayed_work { 140 struct kthread_work work; 141 struct timer_list timer; 142 }; 143 144 #define KTHREAD_WORKER_INIT(worker) { \ 145 .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ 146 .work_list = LIST_HEAD_INIT((worker).work_list), \ 147 .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ 148 } 149 150 #define KTHREAD_WORK_INIT(work, fn) { \ 151 .node = LIST_HEAD_INIT((work).node), \ 152 .func = (fn), \ 153 } 154 155 #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ 156 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 157 .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ 158 TIMER_IRQSAFE), \ 159 } 160 161 #define DEFINE_KTHREAD_WORKER(worker) \ 162 struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) 163 164 #define DEFINE_KTHREAD_WORK(work, fn) \ 165 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 166 167 #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ 168 struct kthread_delayed_work dwork = \ 169 KTHREAD_DELAYED_WORK_INIT(dwork, fn) 170 171 /* 172 * kthread_worker.lock needs its own lockdep class key when defined on 173 * stack with lockdep enabled. Use the following macros in such cases. 174 */ 175 #ifdef CONFIG_LOCKDEP 176 # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ 177 ({ kthread_init_worker(&worker); worker; }) 178 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ 179 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) 180 #else 181 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) 182 #endif 183 184 extern void __kthread_init_worker(struct kthread_worker *worker, 185 const char *name, struct lock_class_key *key); 186 187 #define kthread_init_worker(worker) \ 188 do { \ 189 static struct lock_class_key __key; \ 190 __kthread_init_worker((worker), "("#worker")->lock", &__key); \ 191 } while (0) 192 193 #define kthread_init_work(work, fn) \ 194 do { \ 195 memset((work), 0, sizeof(struct kthread_work)); \ 196 INIT_LIST_HEAD(&(work)->node); \ 197 (work)->func = (fn); \ 198 } while (0) 199 200 #define kthread_init_delayed_work(dwork, fn) \ 201 do { \ 202 kthread_init_work(&(dwork)->work, (fn)); \ 203 timer_setup(&(dwork)->timer, \ 204 kthread_delayed_work_timer_fn, \ 205 TIMER_IRQSAFE); \ 206 } while (0) 207 208 int kthread_worker_fn(void *worker_ptr); 209 210 __printf(2, 3) 211 struct kthread_worker * 212 kthread_create_worker(unsigned int flags, const char namefmt[], ...); 213 214 __printf(3, 4) struct kthread_worker * 215 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 216 const char namefmt[], ...); 217 218 bool kthread_queue_work(struct kthread_worker *worker, 219 struct kthread_work *work); 220 221 bool kthread_queue_delayed_work(struct kthread_worker *worker, 222 struct kthread_delayed_work *dwork, 223 unsigned long delay); 224 225 bool kthread_mod_delayed_work(struct kthread_worker *worker, 226 struct kthread_delayed_work *dwork, 227 unsigned long delay); 228 229 void kthread_flush_work(struct kthread_work *work); 230 void kthread_flush_worker(struct kthread_worker *worker); 231 232 bool kthread_cancel_work_sync(struct kthread_work *work); 233 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); 234 235 void kthread_destroy_worker(struct kthread_worker *worker); 236 237 void kthread_use_mm(struct mm_struct *mm); 238 void kthread_unuse_mm(struct mm_struct *mm); 239 240 struct cgroup_subsys_state; 241 242 #ifdef CONFIG_BLK_CGROUP 243 void kthread_associate_blkcg(struct cgroup_subsys_state *css); 244 struct cgroup_subsys_state *kthread_blkcg(void); 245 #else 246 static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } 247 static inline struct cgroup_subsys_state *kthread_blkcg(void) 248 { 249 return NULL; 250 } 251 #endif 252 #endif /* _LINUX_KTHREAD_H */ 253