1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Kernel thread helper functions. 3 * Copyright (C) 2004 IBM Corporation, Rusty Russell. 4 * Copyright (C) 2009 Red Hat, Inc. 5 * 6 * Creation is done via kthreadd, so that we get a clean environment 7 * even if we're invoked from userspace (think modprobe, hotplug cpu, 8 * etc.). 9 */ 10 #include <uapi/linux/sched/types.h> 11 #include <linux/mm.h> 12 #include <linux/mmu_context.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/task.h> 16 #include <linux/kthread.h> 17 #include <linux/completion.h> 18 #include <linux/err.h> 19 #include <linux/cgroup.h> 20 #include <linux/cpuset.h> 21 #include <linux/unistd.h> 22 #include <linux/file.h> 23 #include <linux/export.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/freezer.h> 27 #include <linux/ptrace.h> 28 #include <linux/uaccess.h> 29 #include <linux/numa.h> 30 #include <linux/sched/isolation.h> 31 #include <trace/events/sched.h> 32 33 34 static DEFINE_SPINLOCK(kthread_create_lock); 35 static LIST_HEAD(kthread_create_list); 36 struct task_struct *kthreadd_task; 37 38 struct kthread_create_info 39 { 40 /* Information passed to kthread() from kthreadd. */ 41 int (*threadfn)(void *data); 42 void *data; 43 int node; 44 45 /* Result passed back to kthread_create() from kthreadd. */ 46 struct task_struct *result; 47 struct completion *done; 48 49 struct list_head list; 50 }; 51 52 struct kthread { 53 unsigned long flags; 54 unsigned int cpu; 55 int (*threadfn)(void *); 56 void *data; 57 mm_segment_t oldfs; 58 struct completion parked; 59 struct completion exited; 60 #ifdef CONFIG_BLK_CGROUP 61 struct cgroup_subsys_state *blkcg_css; 62 #endif 63 }; 64 65 enum KTHREAD_BITS { 66 KTHREAD_IS_PER_CPU = 0, 67 KTHREAD_SHOULD_STOP, 68 KTHREAD_SHOULD_PARK, 69 }; 70 71 static inline struct kthread *to_kthread(struct task_struct *k) 72 { 73 WARN_ON(!(k->flags & PF_KTHREAD)); 74 return (__force void *)k->set_child_tid; 75 } 76 77 /* 78 * Variant of to_kthread() that doesn't assume @p is a kthread. 79 * 80 * Per construction; when: 81 * 82 * (p->flags & PF_KTHREAD) && p->set_child_tid 83 * 84 * the task is both a kthread and struct kthread is persistent. However 85 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and 86 * begin_new_exec()). 87 */ 88 static inline struct kthread *__to_kthread(struct task_struct *p) 89 { 90 void *kthread = (__force void *)p->set_child_tid; 91 if (kthread && !(p->flags & PF_KTHREAD)) 92 kthread = NULL; 93 return kthread; 94 } 95 96 void set_kthread_struct(struct task_struct *p) 97 { 98 struct kthread *kthread; 99 100 if (__to_kthread(p)) 101 return; 102 103 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); 104 /* 105 * We abuse ->set_child_tid to avoid the new member and because it 106 * can't be wrongly copied by copy_process(). We also rely on fact 107 * that the caller can't exec, so PF_KTHREAD can't be cleared. 108 */ 109 p->set_child_tid = (__force void __user *)kthread; 110 } 111 112 void free_kthread_struct(struct task_struct *k) 113 { 114 struct kthread *kthread; 115 116 /* 117 * Can be NULL if this kthread was created by kernel_thread() 118 * or if kmalloc() in kthread() failed. 119 */ 120 kthread = to_kthread(k); 121 #ifdef CONFIG_BLK_CGROUP 122 WARN_ON_ONCE(kthread && kthread->blkcg_css); 123 #endif 124 kfree(kthread); 125 } 126 127 /** 128 * kthread_should_stop - should this kthread return now? 129 * 130 * When someone calls kthread_stop() on your kthread, it will be woken 131 * and this will return true. You should then return, and your return 132 * value will be passed through to kthread_stop(). 133 */ 134 bool kthread_should_stop(void) 135 { 136 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 137 } 138 EXPORT_SYMBOL(kthread_should_stop); 139 140 bool __kthread_should_park(struct task_struct *k) 141 { 142 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 143 } 144 EXPORT_SYMBOL_GPL(__kthread_should_park); 145 146 /** 147 * kthread_should_park - should this kthread park now? 148 * 149 * When someone calls kthread_park() on your kthread, it will be woken 150 * and this will return true. You should then do the necessary 151 * cleanup and call kthread_parkme() 152 * 153 * Similar to kthread_should_stop(), but this keeps the thread alive 154 * and in a park position. kthread_unpark() "restarts" the thread and 155 * calls the thread function again. 156 */ 157 bool kthread_should_park(void) 158 { 159 return __kthread_should_park(current); 160 } 161 EXPORT_SYMBOL_GPL(kthread_should_park); 162 163 /** 164 * kthread_freezable_should_stop - should this freezable kthread return now? 165 * @was_frozen: optional out parameter, indicates whether %current was frozen 166 * 167 * kthread_should_stop() for freezable kthreads, which will enter 168 * refrigerator if necessary. This function is safe from kthread_stop() / 169 * freezer deadlock and freezable kthreads should use this function instead 170 * of calling try_to_freeze() directly. 171 */ 172 bool kthread_freezable_should_stop(bool *was_frozen) 173 { 174 bool frozen = false; 175 176 might_sleep(); 177 178 if (unlikely(freezing(current))) 179 frozen = __refrigerator(true); 180 181 if (was_frozen) 182 *was_frozen = frozen; 183 184 return kthread_should_stop(); 185 } 186 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 187 188 /** 189 * kthread_func - return the function specified on kthread creation 190 * @task: kthread task in question 191 * 192 * Returns NULL if the task is not a kthread. 193 */ 194 void *kthread_func(struct task_struct *task) 195 { 196 struct kthread *kthread = __to_kthread(task); 197 if (kthread) 198 return kthread->threadfn; 199 return NULL; 200 } 201 EXPORT_SYMBOL_GPL(kthread_func); 202 203 /** 204 * kthread_data - return data value specified on kthread creation 205 * @task: kthread task in question 206 * 207 * Return the data value specified when kthread @task was created. 208 * The caller is responsible for ensuring the validity of @task when 209 * calling this function. 210 */ 211 void *kthread_data(struct task_struct *task) 212 { 213 return to_kthread(task)->data; 214 } 215 EXPORT_SYMBOL_GPL(kthread_data); 216 217 /** 218 * kthread_probe_data - speculative version of kthread_data() 219 * @task: possible kthread task in question 220 * 221 * @task could be a kthread task. Return the data value specified when it 222 * was created if accessible. If @task isn't a kthread task or its data is 223 * inaccessible for any reason, %NULL is returned. This function requires 224 * that @task itself is safe to dereference. 225 */ 226 void *kthread_probe_data(struct task_struct *task) 227 { 228 struct kthread *kthread = __to_kthread(task); 229 void *data = NULL; 230 231 if (kthread) 232 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 233 return data; 234 } 235 236 static void __kthread_parkme(struct kthread *self) 237 { 238 for (;;) { 239 /* 240 * TASK_PARKED is a special state; we must serialize against 241 * possible pending wakeups to avoid store-store collisions on 242 * task->state. 243 * 244 * Such a collision might possibly result in the task state 245 * changin from TASK_PARKED and us failing the 246 * wait_task_inactive() in kthread_park(). 247 */ 248 set_special_state(TASK_PARKED); 249 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 250 break; 251 252 /* 253 * Thread is going to call schedule(), do not preempt it, 254 * or the caller of kthread_park() may spend more time in 255 * wait_task_inactive(). 256 */ 257 preempt_disable(); 258 complete(&self->parked); 259 schedule_preempt_disabled(); 260 preempt_enable(); 261 } 262 __set_current_state(TASK_RUNNING); 263 } 264 265 void kthread_parkme(void) 266 { 267 __kthread_parkme(to_kthread(current)); 268 } 269 EXPORT_SYMBOL_GPL(kthread_parkme); 270 271 static int kthread(void *_create) 272 { 273 static const struct sched_param param = { .sched_priority = 0 }; 274 /* Copy data: it's on kthread's stack */ 275 struct kthread_create_info *create = _create; 276 int (*threadfn)(void *data) = create->threadfn; 277 void *data = create->data; 278 struct completion *done; 279 struct kthread *self; 280 int ret; 281 282 set_kthread_struct(current); 283 self = to_kthread(current); 284 285 /* If user was SIGKILLed, I release the structure. */ 286 done = xchg(&create->done, NULL); 287 if (!done) { 288 kfree(create); 289 do_exit(-EINTR); 290 } 291 292 if (!self) { 293 create->result = ERR_PTR(-ENOMEM); 294 complete(done); 295 do_exit(-ENOMEM); 296 } 297 298 self->threadfn = threadfn; 299 self->data = data; 300 init_completion(&self->exited); 301 init_completion(&self->parked); 302 current->vfork_done = &self->exited; 303 304 /* 305 * The new thread inherited kthreadd's priority and CPU mask. Reset 306 * back to default in case they have been changed. 307 */ 308 sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); 309 set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD)); 310 311 /* OK, tell user we're spawned, wait for stop or wakeup */ 312 __set_current_state(TASK_UNINTERRUPTIBLE); 313 create->result = current; 314 /* 315 * Thread is going to call schedule(), do not preempt it, 316 * or the creator may spend more time in wait_task_inactive(). 317 */ 318 preempt_disable(); 319 complete(done); 320 schedule_preempt_disabled(); 321 preempt_enable(); 322 323 ret = -EINTR; 324 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 325 cgroup_kthread_ready(); 326 __kthread_parkme(self); 327 ret = threadfn(data); 328 } 329 do_exit(ret); 330 } 331 332 /* called from kernel_clone() to get node information for about to be created task */ 333 int tsk_fork_get_node(struct task_struct *tsk) 334 { 335 #ifdef CONFIG_NUMA 336 if (tsk == kthreadd_task) 337 return tsk->pref_node_fork; 338 #endif 339 return NUMA_NO_NODE; 340 } 341 342 static void create_kthread(struct kthread_create_info *create) 343 { 344 int pid; 345 346 #ifdef CONFIG_NUMA 347 current->pref_node_fork = create->node; 348 #endif 349 /* We want our own signal handler (we take no signals by default). */ 350 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 351 if (pid < 0) { 352 /* If user was SIGKILLed, I release the structure. */ 353 struct completion *done = xchg(&create->done, NULL); 354 355 if (!done) { 356 kfree(create); 357 return; 358 } 359 create->result = ERR_PTR(pid); 360 complete(done); 361 } 362 } 363 364 static __printf(4, 0) 365 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 366 void *data, int node, 367 const char namefmt[], 368 va_list args) 369 { 370 DECLARE_COMPLETION_ONSTACK(done); 371 struct task_struct *task; 372 struct kthread_create_info *create = kmalloc(sizeof(*create), 373 GFP_KERNEL); 374 375 if (!create) 376 return ERR_PTR(-ENOMEM); 377 create->threadfn = threadfn; 378 create->data = data; 379 create->node = node; 380 create->done = &done; 381 382 spin_lock(&kthread_create_lock); 383 list_add_tail(&create->list, &kthread_create_list); 384 spin_unlock(&kthread_create_lock); 385 386 wake_up_process(kthreadd_task); 387 /* 388 * Wait for completion in killable state, for I might be chosen by 389 * the OOM killer while kthreadd is trying to allocate memory for 390 * new kernel thread. 391 */ 392 if (unlikely(wait_for_completion_killable(&done))) { 393 /* 394 * If I was SIGKILLed before kthreadd (or new kernel thread) 395 * calls complete(), leave the cleanup of this structure to 396 * that thread. 397 */ 398 if (xchg(&create->done, NULL)) 399 return ERR_PTR(-EINTR); 400 /* 401 * kthreadd (or new kernel thread) will call complete() 402 * shortly. 403 */ 404 wait_for_completion(&done); 405 } 406 task = create->result; 407 if (!IS_ERR(task)) { 408 char name[TASK_COMM_LEN]; 409 410 /* 411 * task is already visible to other tasks, so updating 412 * COMM must be protected. 413 */ 414 vsnprintf(name, sizeof(name), namefmt, args); 415 set_task_comm(task, name); 416 } 417 kfree(create); 418 return task; 419 } 420 421 /** 422 * kthread_create_on_node - create a kthread. 423 * @threadfn: the function to run until signal_pending(current). 424 * @data: data ptr for @threadfn. 425 * @node: task and thread structures for the thread are allocated on this node 426 * @namefmt: printf-style name for the thread. 427 * 428 * Description: This helper function creates and names a kernel 429 * thread. The thread will be stopped: use wake_up_process() to start 430 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 431 * is affine to all CPUs. 432 * 433 * If thread is going to be bound on a particular cpu, give its node 434 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 435 * When woken, the thread will run @threadfn() with @data as its 436 * argument. @threadfn() can either return directly if it is a 437 * standalone thread for which no one will call kthread_stop(), or 438 * return when 'kthread_should_stop()' is true (which means 439 * kthread_stop() has been called). The return value should be zero 440 * or a negative error number; it will be passed to kthread_stop(). 441 * 442 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 443 */ 444 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 445 void *data, int node, 446 const char namefmt[], 447 ...) 448 { 449 struct task_struct *task; 450 va_list args; 451 452 va_start(args, namefmt); 453 task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 454 va_end(args); 455 456 return task; 457 } 458 EXPORT_SYMBOL(kthread_create_on_node); 459 460 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) 461 { 462 unsigned long flags; 463 464 if (!wait_task_inactive(p, state)) { 465 WARN_ON(1); 466 return; 467 } 468 469 /* It's safe because the task is inactive. */ 470 raw_spin_lock_irqsave(&p->pi_lock, flags); 471 do_set_cpus_allowed(p, mask); 472 p->flags |= PF_NO_SETAFFINITY; 473 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 474 } 475 476 static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) 477 { 478 __kthread_bind_mask(p, cpumask_of(cpu), state); 479 } 480 481 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 482 { 483 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 484 } 485 486 /** 487 * kthread_bind - bind a just-created kthread to a cpu. 488 * @p: thread created by kthread_create(). 489 * @cpu: cpu (might not be online, must be possible) for @k to run on. 490 * 491 * Description: This function is equivalent to set_cpus_allowed(), 492 * except that @cpu doesn't need to be online, and the thread must be 493 * stopped (i.e., just returned from kthread_create()). 494 */ 495 void kthread_bind(struct task_struct *p, unsigned int cpu) 496 { 497 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 498 } 499 EXPORT_SYMBOL(kthread_bind); 500 501 /** 502 * kthread_create_on_cpu - Create a cpu bound kthread 503 * @threadfn: the function to run until signal_pending(current). 504 * @data: data ptr for @threadfn. 505 * @cpu: The cpu on which the thread should be bound, 506 * @namefmt: printf-style name for the thread. Format is restricted 507 * to "name.*%u". Code fills in cpu number. 508 * 509 * Description: This helper function creates and names a kernel thread 510 */ 511 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 512 void *data, unsigned int cpu, 513 const char *namefmt) 514 { 515 struct task_struct *p; 516 517 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 518 cpu); 519 if (IS_ERR(p)) 520 return p; 521 kthread_bind(p, cpu); 522 /* CPU hotplug need to bind once again when unparking the thread. */ 523 to_kthread(p)->cpu = cpu; 524 return p; 525 } 526 EXPORT_SYMBOL(kthread_create_on_cpu); 527 528 void kthread_set_per_cpu(struct task_struct *k, int cpu) 529 { 530 struct kthread *kthread = to_kthread(k); 531 if (!kthread) 532 return; 533 534 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); 535 536 if (cpu < 0) { 537 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 538 return; 539 } 540 541 kthread->cpu = cpu; 542 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 543 } 544 545 bool kthread_is_per_cpu(struct task_struct *p) 546 { 547 struct kthread *kthread = __to_kthread(p); 548 if (!kthread) 549 return false; 550 551 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 552 } 553 554 /** 555 * kthread_unpark - unpark a thread created by kthread_create(). 556 * @k: thread created by kthread_create(). 557 * 558 * Sets kthread_should_park() for @k to return false, wakes it, and 559 * waits for it to return. If the thread is marked percpu then its 560 * bound to the cpu again. 561 */ 562 void kthread_unpark(struct task_struct *k) 563 { 564 struct kthread *kthread = to_kthread(k); 565 566 /* 567 * Newly created kthread was parked when the CPU was offline. 568 * The binding was lost and we need to set it again. 569 */ 570 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 571 __kthread_bind(k, kthread->cpu, TASK_PARKED); 572 573 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 574 /* 575 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 576 */ 577 wake_up_state(k, TASK_PARKED); 578 } 579 EXPORT_SYMBOL_GPL(kthread_unpark); 580 581 /** 582 * kthread_park - park a thread created by kthread_create(). 583 * @k: thread created by kthread_create(). 584 * 585 * Sets kthread_should_park() for @k to return true, wakes it, and 586 * waits for it to return. This can also be called after kthread_create() 587 * instead of calling wake_up_process(): the thread will park without 588 * calling threadfn(). 589 * 590 * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 591 * If called by the kthread itself just the park bit is set. 592 */ 593 int kthread_park(struct task_struct *k) 594 { 595 struct kthread *kthread = to_kthread(k); 596 597 if (WARN_ON(k->flags & PF_EXITING)) 598 return -ENOSYS; 599 600 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 601 return -EBUSY; 602 603 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 604 if (k != current) { 605 wake_up_process(k); 606 /* 607 * Wait for __kthread_parkme() to complete(), this means we 608 * _will_ have TASK_PARKED and are about to call schedule(). 609 */ 610 wait_for_completion(&kthread->parked); 611 /* 612 * Now wait for that schedule() to complete and the task to 613 * get scheduled out. 614 */ 615 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 616 } 617 618 return 0; 619 } 620 EXPORT_SYMBOL_GPL(kthread_park); 621 622 /** 623 * kthread_stop - stop a thread created by kthread_create(). 624 * @k: thread created by kthread_create(). 625 * 626 * Sets kthread_should_stop() for @k to return true, wakes it, and 627 * waits for it to exit. This can also be called after kthread_create() 628 * instead of calling wake_up_process(): the thread will exit without 629 * calling threadfn(). 630 * 631 * If threadfn() may call do_exit() itself, the caller must ensure 632 * task_struct can't go away. 633 * 634 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 635 * was never called. 636 */ 637 int kthread_stop(struct task_struct *k) 638 { 639 struct kthread *kthread; 640 int ret; 641 642 trace_sched_kthread_stop(k); 643 644 get_task_struct(k); 645 kthread = to_kthread(k); 646 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 647 kthread_unpark(k); 648 wake_up_process(k); 649 wait_for_completion(&kthread->exited); 650 ret = k->exit_code; 651 put_task_struct(k); 652 653 trace_sched_kthread_stop_ret(ret); 654 return ret; 655 } 656 EXPORT_SYMBOL(kthread_stop); 657 658 int kthreadd(void *unused) 659 { 660 struct task_struct *tsk = current; 661 662 /* Setup a clean context for our children to inherit. */ 663 set_task_comm(tsk, "kthreadd"); 664 ignore_signals(tsk); 665 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); 666 set_mems_allowed(node_states[N_MEMORY]); 667 668 current->flags |= PF_NOFREEZE; 669 cgroup_init_kthreadd(); 670 671 for (;;) { 672 set_current_state(TASK_INTERRUPTIBLE); 673 if (list_empty(&kthread_create_list)) 674 schedule(); 675 __set_current_state(TASK_RUNNING); 676 677 spin_lock(&kthread_create_lock); 678 while (!list_empty(&kthread_create_list)) { 679 struct kthread_create_info *create; 680 681 create = list_entry(kthread_create_list.next, 682 struct kthread_create_info, list); 683 list_del_init(&create->list); 684 spin_unlock(&kthread_create_lock); 685 686 create_kthread(create); 687 688 spin_lock(&kthread_create_lock); 689 } 690 spin_unlock(&kthread_create_lock); 691 } 692 693 return 0; 694 } 695 696 void __kthread_init_worker(struct kthread_worker *worker, 697 const char *name, 698 struct lock_class_key *key) 699 { 700 memset(worker, 0, sizeof(struct kthread_worker)); 701 raw_spin_lock_init(&worker->lock); 702 lockdep_set_class_and_name(&worker->lock, key, name); 703 INIT_LIST_HEAD(&worker->work_list); 704 INIT_LIST_HEAD(&worker->delayed_work_list); 705 } 706 EXPORT_SYMBOL_GPL(__kthread_init_worker); 707 708 /** 709 * kthread_worker_fn - kthread function to process kthread_worker 710 * @worker_ptr: pointer to initialized kthread_worker 711 * 712 * This function implements the main cycle of kthread worker. It processes 713 * work_list until it is stopped with kthread_stop(). It sleeps when the queue 714 * is empty. 715 * 716 * The works are not allowed to keep any locks, disable preemption or interrupts 717 * when they finish. There is defined a safe point for freezing when one work 718 * finishes and before a new one is started. 719 * 720 * Also the works must not be handled by more than one worker at the same time, 721 * see also kthread_queue_work(). 722 */ 723 int kthread_worker_fn(void *worker_ptr) 724 { 725 struct kthread_worker *worker = worker_ptr; 726 struct kthread_work *work; 727 728 /* 729 * FIXME: Update the check and remove the assignment when all kthread 730 * worker users are created using kthread_create_worker*() functions. 731 */ 732 WARN_ON(worker->task && worker->task != current); 733 worker->task = current; 734 735 if (worker->flags & KTW_FREEZABLE) 736 set_freezable(); 737 738 repeat: 739 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 740 741 if (kthread_should_stop()) { 742 __set_current_state(TASK_RUNNING); 743 raw_spin_lock_irq(&worker->lock); 744 worker->task = NULL; 745 raw_spin_unlock_irq(&worker->lock); 746 return 0; 747 } 748 749 work = NULL; 750 raw_spin_lock_irq(&worker->lock); 751 if (!list_empty(&worker->work_list)) { 752 work = list_first_entry(&worker->work_list, 753 struct kthread_work, node); 754 list_del_init(&work->node); 755 } 756 worker->current_work = work; 757 raw_spin_unlock_irq(&worker->lock); 758 759 if (work) { 760 kthread_work_func_t func = work->func; 761 __set_current_state(TASK_RUNNING); 762 trace_sched_kthread_work_execute_start(work); 763 work->func(work); 764 /* 765 * Avoid dereferencing work after this point. The trace 766 * event only cares about the address. 767 */ 768 trace_sched_kthread_work_execute_end(work, func); 769 } else if (!freezing(current)) 770 schedule(); 771 772 try_to_freeze(); 773 cond_resched(); 774 goto repeat; 775 } 776 EXPORT_SYMBOL_GPL(kthread_worker_fn); 777 778 static __printf(3, 0) struct kthread_worker * 779 __kthread_create_worker(int cpu, unsigned int flags, 780 const char namefmt[], va_list args) 781 { 782 struct kthread_worker *worker; 783 struct task_struct *task; 784 int node = NUMA_NO_NODE; 785 786 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 787 if (!worker) 788 return ERR_PTR(-ENOMEM); 789 790 kthread_init_worker(worker); 791 792 if (cpu >= 0) 793 node = cpu_to_node(cpu); 794 795 task = __kthread_create_on_node(kthread_worker_fn, worker, 796 node, namefmt, args); 797 if (IS_ERR(task)) 798 goto fail_task; 799 800 if (cpu >= 0) 801 kthread_bind(task, cpu); 802 803 worker->flags = flags; 804 worker->task = task; 805 wake_up_process(task); 806 return worker; 807 808 fail_task: 809 kfree(worker); 810 return ERR_CAST(task); 811 } 812 813 /** 814 * kthread_create_worker - create a kthread worker 815 * @flags: flags modifying the default behavior of the worker 816 * @namefmt: printf-style name for the kthread worker (task). 817 * 818 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 819 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 820 * when the worker was SIGKILLed. 821 */ 822 struct kthread_worker * 823 kthread_create_worker(unsigned int flags, const char namefmt[], ...) 824 { 825 struct kthread_worker *worker; 826 va_list args; 827 828 va_start(args, namefmt); 829 worker = __kthread_create_worker(-1, flags, namefmt, args); 830 va_end(args); 831 832 return worker; 833 } 834 EXPORT_SYMBOL(kthread_create_worker); 835 836 /** 837 * kthread_create_worker_on_cpu - create a kthread worker and bind it 838 * to a given CPU and the associated NUMA node. 839 * @cpu: CPU number 840 * @flags: flags modifying the default behavior of the worker 841 * @namefmt: printf-style name for the kthread worker (task). 842 * 843 * Use a valid CPU number if you want to bind the kthread worker 844 * to the given CPU and the associated NUMA node. 845 * 846 * A good practice is to add the cpu number also into the worker name. 847 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 848 * 849 * CPU hotplug: 850 * The kthread worker API is simple and generic. It just provides a way 851 * to create, use, and destroy workers. 852 * 853 * It is up to the API user how to handle CPU hotplug. They have to decide 854 * how to handle pending work items, prevent queuing new ones, and 855 * restore the functionality when the CPU goes off and on. There are a 856 * few catches: 857 * 858 * - CPU affinity gets lost when it is scheduled on an offline CPU. 859 * 860 * - The worker might not exist when the CPU was off when the user 861 * created the workers. 862 * 863 * Good practice is to implement two CPU hotplug callbacks and to 864 * destroy/create the worker when the CPU goes down/up. 865 * 866 * Return: 867 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 868 * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 869 * when the worker was SIGKILLed. 870 */ 871 struct kthread_worker * 872 kthread_create_worker_on_cpu(int cpu, unsigned int flags, 873 const char namefmt[], ...) 874 { 875 struct kthread_worker *worker; 876 va_list args; 877 878 va_start(args, namefmt); 879 worker = __kthread_create_worker(cpu, flags, namefmt, args); 880 va_end(args); 881 882 return worker; 883 } 884 EXPORT_SYMBOL(kthread_create_worker_on_cpu); 885 886 /* 887 * Returns true when the work could not be queued at the moment. 888 * It happens when it is already pending in a worker list 889 * or when it is being cancelled. 890 */ 891 static inline bool queuing_blocked(struct kthread_worker *worker, 892 struct kthread_work *work) 893 { 894 lockdep_assert_held(&worker->lock); 895 896 return !list_empty(&work->node) || work->canceling; 897 } 898 899 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, 900 struct kthread_work *work) 901 { 902 lockdep_assert_held(&worker->lock); 903 WARN_ON_ONCE(!list_empty(&work->node)); 904 /* Do not use a work with >1 worker, see kthread_queue_work() */ 905 WARN_ON_ONCE(work->worker && work->worker != worker); 906 } 907 908 /* insert @work before @pos in @worker */ 909 static void kthread_insert_work(struct kthread_worker *worker, 910 struct kthread_work *work, 911 struct list_head *pos) 912 { 913 kthread_insert_work_sanity_check(worker, work); 914 915 trace_sched_kthread_work_queue_work(worker, work); 916 917 list_add_tail(&work->node, pos); 918 work->worker = worker; 919 if (!worker->current_work && likely(worker->task)) 920 wake_up_process(worker->task); 921 } 922 923 /** 924 * kthread_queue_work - queue a kthread_work 925 * @worker: target kthread_worker 926 * @work: kthread_work to queue 927 * 928 * Queue @work to work processor @task for async execution. @task 929 * must have been created with kthread_worker_create(). Returns %true 930 * if @work was successfully queued, %false if it was already pending. 931 * 932 * Reinitialize the work if it needs to be used by another worker. 933 * For example, when the worker was stopped and started again. 934 */ 935 bool kthread_queue_work(struct kthread_worker *worker, 936 struct kthread_work *work) 937 { 938 bool ret = false; 939 unsigned long flags; 940 941 raw_spin_lock_irqsave(&worker->lock, flags); 942 if (!queuing_blocked(worker, work)) { 943 kthread_insert_work(worker, work, &worker->work_list); 944 ret = true; 945 } 946 raw_spin_unlock_irqrestore(&worker->lock, flags); 947 return ret; 948 } 949 EXPORT_SYMBOL_GPL(kthread_queue_work); 950 951 /** 952 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 953 * delayed work when the timer expires. 954 * @t: pointer to the expired timer 955 * 956 * The format of the function is defined by struct timer_list. 957 * It should have been called from irqsafe timer with irq already off. 958 */ 959 void kthread_delayed_work_timer_fn(struct timer_list *t) 960 { 961 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 962 struct kthread_work *work = &dwork->work; 963 struct kthread_worker *worker = work->worker; 964 unsigned long flags; 965 966 /* 967 * This might happen when a pending work is reinitialized. 968 * It means that it is used a wrong way. 969 */ 970 if (WARN_ON_ONCE(!worker)) 971 return; 972 973 raw_spin_lock_irqsave(&worker->lock, flags); 974 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 975 WARN_ON_ONCE(work->worker != worker); 976 977 /* Move the work from worker->delayed_work_list. */ 978 WARN_ON_ONCE(list_empty(&work->node)); 979 list_del_init(&work->node); 980 if (!work->canceling) 981 kthread_insert_work(worker, work, &worker->work_list); 982 983 raw_spin_unlock_irqrestore(&worker->lock, flags); 984 } 985 EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 986 987 static void __kthread_queue_delayed_work(struct kthread_worker *worker, 988 struct kthread_delayed_work *dwork, 989 unsigned long delay) 990 { 991 struct timer_list *timer = &dwork->timer; 992 struct kthread_work *work = &dwork->work; 993 994 WARN_ON_FUNCTION_MISMATCH(timer->function, 995 kthread_delayed_work_timer_fn); 996 997 /* 998 * If @delay is 0, queue @dwork->work immediately. This is for 999 * both optimization and correctness. The earliest @timer can 1000 * expire is on the closest next tick and delayed_work users depend 1001 * on that there's no such delay when @delay is 0. 1002 */ 1003 if (!delay) { 1004 kthread_insert_work(worker, work, &worker->work_list); 1005 return; 1006 } 1007 1008 /* Be paranoid and try to detect possible races already now. */ 1009 kthread_insert_work_sanity_check(worker, work); 1010 1011 list_add(&work->node, &worker->delayed_work_list); 1012 work->worker = worker; 1013 timer->expires = jiffies + delay; 1014 add_timer(timer); 1015 } 1016 1017 /** 1018 * kthread_queue_delayed_work - queue the associated kthread work 1019 * after a delay. 1020 * @worker: target kthread_worker 1021 * @dwork: kthread_delayed_work to queue 1022 * @delay: number of jiffies to wait before queuing 1023 * 1024 * If the work has not been pending it starts a timer that will queue 1025 * the work after the given @delay. If @delay is zero, it queues the 1026 * work immediately. 1027 * 1028 * Return: %false if the @work has already been pending. It means that 1029 * either the timer was running or the work was queued. It returns %true 1030 * otherwise. 1031 */ 1032 bool kthread_queue_delayed_work(struct kthread_worker *worker, 1033 struct kthread_delayed_work *dwork, 1034 unsigned long delay) 1035 { 1036 struct kthread_work *work = &dwork->work; 1037 unsigned long flags; 1038 bool ret = false; 1039 1040 raw_spin_lock_irqsave(&worker->lock, flags); 1041 1042 if (!queuing_blocked(worker, work)) { 1043 __kthread_queue_delayed_work(worker, dwork, delay); 1044 ret = true; 1045 } 1046 1047 raw_spin_unlock_irqrestore(&worker->lock, flags); 1048 return ret; 1049 } 1050 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 1051 1052 struct kthread_flush_work { 1053 struct kthread_work work; 1054 struct completion done; 1055 }; 1056 1057 static void kthread_flush_work_fn(struct kthread_work *work) 1058 { 1059 struct kthread_flush_work *fwork = 1060 container_of(work, struct kthread_flush_work, work); 1061 complete(&fwork->done); 1062 } 1063 1064 /** 1065 * kthread_flush_work - flush a kthread_work 1066 * @work: work to flush 1067 * 1068 * If @work is queued or executing, wait for it to finish execution. 1069 */ 1070 void kthread_flush_work(struct kthread_work *work) 1071 { 1072 struct kthread_flush_work fwork = { 1073 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1074 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1075 }; 1076 struct kthread_worker *worker; 1077 bool noop = false; 1078 1079 worker = work->worker; 1080 if (!worker) 1081 return; 1082 1083 raw_spin_lock_irq(&worker->lock); 1084 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1085 WARN_ON_ONCE(work->worker != worker); 1086 1087 if (!list_empty(&work->node)) 1088 kthread_insert_work(worker, &fwork.work, work->node.next); 1089 else if (worker->current_work == work) 1090 kthread_insert_work(worker, &fwork.work, 1091 worker->work_list.next); 1092 else 1093 noop = true; 1094 1095 raw_spin_unlock_irq(&worker->lock); 1096 1097 if (!noop) 1098 wait_for_completion(&fwork.done); 1099 } 1100 EXPORT_SYMBOL_GPL(kthread_flush_work); 1101 1102 /* 1103 * Make sure that the timer is neither set nor running and could 1104 * not manipulate the work list_head any longer. 1105 * 1106 * The function is called under worker->lock. The lock is temporary 1107 * released but the timer can't be set again in the meantime. 1108 */ 1109 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, 1110 unsigned long *flags) 1111 { 1112 struct kthread_delayed_work *dwork = 1113 container_of(work, struct kthread_delayed_work, work); 1114 struct kthread_worker *worker = work->worker; 1115 1116 /* 1117 * del_timer_sync() must be called to make sure that the timer 1118 * callback is not running. The lock must be temporary released 1119 * to avoid a deadlock with the callback. In the meantime, 1120 * any queuing is blocked by setting the canceling counter. 1121 */ 1122 work->canceling++; 1123 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1124 del_timer_sync(&dwork->timer); 1125 raw_spin_lock_irqsave(&worker->lock, *flags); 1126 work->canceling--; 1127 } 1128 1129 /* 1130 * This function removes the work from the worker queue. 1131 * 1132 * It is called under worker->lock. The caller must make sure that 1133 * the timer used by delayed work is not running, e.g. by calling 1134 * kthread_cancel_delayed_work_timer(). 1135 * 1136 * The work might still be in use when this function finishes. See the 1137 * current_work proceed by the worker. 1138 * 1139 * Return: %true if @work was pending and successfully canceled, 1140 * %false if @work was not pending 1141 */ 1142 static bool __kthread_cancel_work(struct kthread_work *work) 1143 { 1144 /* 1145 * Try to remove the work from a worker list. It might either 1146 * be from worker->work_list or from worker->delayed_work_list. 1147 */ 1148 if (!list_empty(&work->node)) { 1149 list_del_init(&work->node); 1150 return true; 1151 } 1152 1153 return false; 1154 } 1155 1156 /** 1157 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 1158 * @worker: kthread worker to use 1159 * @dwork: kthread delayed work to queue 1160 * @delay: number of jiffies to wait before queuing 1161 * 1162 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 1163 * modify @dwork's timer so that it expires after @delay. If @delay is zero, 1164 * @work is guaranteed to be queued immediately. 1165 * 1166 * Return: %false if @dwork was idle and queued, %true otherwise. 1167 * 1168 * A special case is when the work is being canceled in parallel. 1169 * It might be caused either by the real kthread_cancel_delayed_work_sync() 1170 * or yet another kthread_mod_delayed_work() call. We let the other command 1171 * win and return %true here. The return value can be used for reference 1172 * counting and the number of queued works stays the same. Anyway, the caller 1173 * is supposed to synchronize these operations a reasonable way. 1174 * 1175 * This function is safe to call from any context including IRQ handler. 1176 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 1177 * for details. 1178 */ 1179 bool kthread_mod_delayed_work(struct kthread_worker *worker, 1180 struct kthread_delayed_work *dwork, 1181 unsigned long delay) 1182 { 1183 struct kthread_work *work = &dwork->work; 1184 unsigned long flags; 1185 int ret; 1186 1187 raw_spin_lock_irqsave(&worker->lock, flags); 1188 1189 /* Do not bother with canceling when never queued. */ 1190 if (!work->worker) { 1191 ret = false; 1192 goto fast_queue; 1193 } 1194 1195 /* Work must not be used with >1 worker, see kthread_queue_work() */ 1196 WARN_ON_ONCE(work->worker != worker); 1197 1198 /* 1199 * Temporary cancel the work but do not fight with another command 1200 * that is canceling the work as well. 1201 * 1202 * It is a bit tricky because of possible races with another 1203 * mod_delayed_work() and cancel_delayed_work() callers. 1204 * 1205 * The timer must be canceled first because worker->lock is released 1206 * when doing so. But the work can be removed from the queue (list) 1207 * only when it can be queued again so that the return value can 1208 * be used for reference counting. 1209 */ 1210 kthread_cancel_delayed_work_timer(work, &flags); 1211 if (work->canceling) { 1212 /* The number of works in the queue does not change. */ 1213 ret = true; 1214 goto out; 1215 } 1216 ret = __kthread_cancel_work(work); 1217 1218 fast_queue: 1219 __kthread_queue_delayed_work(worker, dwork, delay); 1220 out: 1221 raw_spin_unlock_irqrestore(&worker->lock, flags); 1222 return ret; 1223 } 1224 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1225 1226 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 1227 { 1228 struct kthread_worker *worker = work->worker; 1229 unsigned long flags; 1230 int ret = false; 1231 1232 if (!worker) 1233 goto out; 1234 1235 raw_spin_lock_irqsave(&worker->lock, flags); 1236 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1237 WARN_ON_ONCE(work->worker != worker); 1238 1239 if (is_dwork) 1240 kthread_cancel_delayed_work_timer(work, &flags); 1241 1242 ret = __kthread_cancel_work(work); 1243 1244 if (worker->current_work != work) 1245 goto out_fast; 1246 1247 /* 1248 * The work is in progress and we need to wait with the lock released. 1249 * In the meantime, block any queuing by setting the canceling counter. 1250 */ 1251 work->canceling++; 1252 raw_spin_unlock_irqrestore(&worker->lock, flags); 1253 kthread_flush_work(work); 1254 raw_spin_lock_irqsave(&worker->lock, flags); 1255 work->canceling--; 1256 1257 out_fast: 1258 raw_spin_unlock_irqrestore(&worker->lock, flags); 1259 out: 1260 return ret; 1261 } 1262 1263 /** 1264 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 1265 * @work: the kthread work to cancel 1266 * 1267 * Cancel @work and wait for its execution to finish. This function 1268 * can be used even if the work re-queues itself. On return from this 1269 * function, @work is guaranteed to be not pending or executing on any CPU. 1270 * 1271 * kthread_cancel_work_sync(&delayed_work->work) must not be used for 1272 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 1273 * 1274 * The caller must ensure that the worker on which @work was last 1275 * queued can't be destroyed before this function returns. 1276 * 1277 * Return: %true if @work was pending, %false otherwise. 1278 */ 1279 bool kthread_cancel_work_sync(struct kthread_work *work) 1280 { 1281 return __kthread_cancel_work_sync(work, false); 1282 } 1283 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 1284 1285 /** 1286 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 1287 * wait for it to finish. 1288 * @dwork: the kthread delayed work to cancel 1289 * 1290 * This is kthread_cancel_work_sync() for delayed works. 1291 * 1292 * Return: %true if @dwork was pending, %false otherwise. 1293 */ 1294 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 1295 { 1296 return __kthread_cancel_work_sync(&dwork->work, true); 1297 } 1298 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 1299 1300 /** 1301 * kthread_flush_worker - flush all current works on a kthread_worker 1302 * @worker: worker to flush 1303 * 1304 * Wait until all currently executing or pending works on @worker are 1305 * finished. 1306 */ 1307 void kthread_flush_worker(struct kthread_worker *worker) 1308 { 1309 struct kthread_flush_work fwork = { 1310 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 1311 COMPLETION_INITIALIZER_ONSTACK(fwork.done), 1312 }; 1313 1314 kthread_queue_work(worker, &fwork.work); 1315 wait_for_completion(&fwork.done); 1316 } 1317 EXPORT_SYMBOL_GPL(kthread_flush_worker); 1318 1319 /** 1320 * kthread_destroy_worker - destroy a kthread worker 1321 * @worker: worker to be destroyed 1322 * 1323 * Flush and destroy @worker. The simple flush is enough because the kthread 1324 * worker API is used only in trivial scenarios. There are no multi-step state 1325 * machines needed. 1326 */ 1327 void kthread_destroy_worker(struct kthread_worker *worker) 1328 { 1329 struct task_struct *task; 1330 1331 task = worker->task; 1332 if (WARN_ON(!task)) 1333 return; 1334 1335 kthread_flush_worker(worker); 1336 kthread_stop(task); 1337 WARN_ON(!list_empty(&worker->work_list)); 1338 kfree(worker); 1339 } 1340 EXPORT_SYMBOL(kthread_destroy_worker); 1341 1342 /** 1343 * kthread_use_mm - make the calling kthread operate on an address space 1344 * @mm: address space to operate on 1345 */ 1346 void kthread_use_mm(struct mm_struct *mm) 1347 { 1348 struct mm_struct *active_mm; 1349 struct task_struct *tsk = current; 1350 1351 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1352 WARN_ON_ONCE(tsk->mm); 1353 1354 task_lock(tsk); 1355 /* Hold off tlb flush IPIs while switching mm's */ 1356 local_irq_disable(); 1357 active_mm = tsk->active_mm; 1358 if (active_mm != mm) { 1359 mmgrab(mm); 1360 tsk->active_mm = mm; 1361 } 1362 tsk->mm = mm; 1363 membarrier_update_current_mm(mm); 1364 switch_mm_irqs_off(active_mm, mm, tsk); 1365 local_irq_enable(); 1366 task_unlock(tsk); 1367 #ifdef finish_arch_post_lock_switch 1368 finish_arch_post_lock_switch(); 1369 #endif 1370 1371 /* 1372 * When a kthread starts operating on an address space, the loop 1373 * in membarrier_{private,global}_expedited() may not observe 1374 * that tsk->mm, and not issue an IPI. Membarrier requires a 1375 * memory barrier after storing to tsk->mm, before accessing 1376 * user-space memory. A full memory barrier for membarrier 1377 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by 1378 * mmdrop(), or explicitly with smp_mb(). 1379 */ 1380 if (active_mm != mm) 1381 mmdrop(active_mm); 1382 else 1383 smp_mb(); 1384 1385 to_kthread(tsk)->oldfs = force_uaccess_begin(); 1386 } 1387 EXPORT_SYMBOL_GPL(kthread_use_mm); 1388 1389 /** 1390 * kthread_unuse_mm - reverse the effect of kthread_use_mm() 1391 * @mm: address space to operate on 1392 */ 1393 void kthread_unuse_mm(struct mm_struct *mm) 1394 { 1395 struct task_struct *tsk = current; 1396 1397 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 1398 WARN_ON_ONCE(!tsk->mm); 1399 1400 force_uaccess_end(to_kthread(tsk)->oldfs); 1401 1402 task_lock(tsk); 1403 /* 1404 * When a kthread stops operating on an address space, the loop 1405 * in membarrier_{private,global}_expedited() may not observe 1406 * that tsk->mm, and not issue an IPI. Membarrier requires a 1407 * memory barrier after accessing user-space memory, before 1408 * clearing tsk->mm. 1409 */ 1410 smp_mb__after_spinlock(); 1411 sync_mm_rss(mm); 1412 local_irq_disable(); 1413 tsk->mm = NULL; 1414 membarrier_update_current_mm(NULL); 1415 /* active_mm is still 'mm' */ 1416 enter_lazy_tlb(mm, tsk); 1417 local_irq_enable(); 1418 task_unlock(tsk); 1419 } 1420 EXPORT_SYMBOL_GPL(kthread_unuse_mm); 1421 1422 #ifdef CONFIG_BLK_CGROUP 1423 /** 1424 * kthread_associate_blkcg - associate blkcg to current kthread 1425 * @css: the cgroup info 1426 * 1427 * Current thread must be a kthread. The thread is running jobs on behalf of 1428 * other threads. In some cases, we expect the jobs attach cgroup info of 1429 * original threads instead of that of current thread. This function stores 1430 * original thread's cgroup info in current kthread context for later 1431 * retrieval. 1432 */ 1433 void kthread_associate_blkcg(struct cgroup_subsys_state *css) 1434 { 1435 struct kthread *kthread; 1436 1437 if (!(current->flags & PF_KTHREAD)) 1438 return; 1439 kthread = to_kthread(current); 1440 if (!kthread) 1441 return; 1442 1443 if (kthread->blkcg_css) { 1444 css_put(kthread->blkcg_css); 1445 kthread->blkcg_css = NULL; 1446 } 1447 if (css) { 1448 css_get(css); 1449 kthread->blkcg_css = css; 1450 } 1451 } 1452 EXPORT_SYMBOL(kthread_associate_blkcg); 1453 1454 /** 1455 * kthread_blkcg - get associated blkcg css of current kthread 1456 * 1457 * Current thread must be a kthread. 1458 */ 1459 struct cgroup_subsys_state *kthread_blkcg(void) 1460 { 1461 struct kthread *kthread; 1462 1463 if (current->flags & PF_KTHREAD) { 1464 kthread = to_kthread(current); 1465 if (kthread) 1466 return kthread->blkcg_css; 1467 } 1468 return NULL; 1469 } 1470 EXPORT_SYMBOL(kthread_blkcg); 1471 #endif 1472