1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/spinlock.h> 3 #include <linux/task_work.h> 4 #include <linux/tracehook.h> 5 6 static struct callback_head work_exited; /* all we need is ->next == NULL */ 7 8 /* 9 * TWA_SIGNAL signaling - use TIF_NOTIFY_SIGNAL, if available, as it's faster 10 * than TIF_SIGPENDING as there's no dependency on ->sighand. The latter is 11 * shared for threads, and can cause contention on sighand->lock. Even for 12 * the non-threaded case TIF_NOTIFY_SIGNAL is more efficient, as no locking 13 * or IRQ disabling is involved for notification (or running) purposes. 14 */ 15 static void task_work_notify_signal(struct task_struct *task) 16 { 17 #if defined(TIF_NOTIFY_SIGNAL) 18 set_notify_signal(task); 19 #else 20 unsigned long flags; 21 22 /* 23 * Only grab the sighand lock if we don't already have some 24 * task_work pending. This pairs with the smp_store_mb() 25 * in get_signal(), see comment there. 26 */ 27 if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && 28 lock_task_sighand(task, &flags)) { 29 task->jobctl |= JOBCTL_TASK_WORK; 30 signal_wake_up(task, 0); 31 unlock_task_sighand(task, &flags); 32 } 33 #endif 34 } 35 36 /** 37 * task_work_add - ask the @task to execute @work->func() 38 * @task: the task which should run the callback 39 * @work: the callback to run 40 * @notify: send the notification if true 41 * 42 * Queue @work for task_work_run() below and notify the @task if @notify. 43 * Fails if the @task is exiting/exited and thus it can't process this @work. 44 * Otherwise @work->func() will be called when the @task returns from kernel 45 * mode or exits. 46 * 47 * This is like the signal handler which runs in kernel mode, but it doesn't 48 * try to wake up the @task. 49 * 50 * Note: there is no ordering guarantee on works queued here. 51 * 52 * RETURNS: 53 * 0 if succeeds or -ESRCH. 54 */ 55 int 56 task_work_add(struct task_struct *task, struct callback_head *work, int notify) 57 { 58 struct callback_head *head; 59 60 do { 61 head = READ_ONCE(task->task_works); 62 if (unlikely(head == &work_exited)) 63 return -ESRCH; 64 work->next = head; 65 } while (cmpxchg(&task->task_works, head, work) != head); 66 67 switch (notify) { 68 case TWA_RESUME: 69 set_notify_resume(task); 70 break; 71 case TWA_SIGNAL: 72 task_work_notify_signal(task); 73 break; 74 } 75 76 return 0; 77 } 78 79 /** 80 * task_work_cancel - cancel a pending work added by task_work_add() 81 * @task: the task which should execute the work 82 * @func: identifies the work to remove 83 * 84 * Find the last queued pending work with ->func == @func and remove 85 * it from queue. 86 * 87 * RETURNS: 88 * The found work or NULL if not found. 89 */ 90 struct callback_head * 91 task_work_cancel(struct task_struct *task, task_work_func_t func) 92 { 93 struct callback_head **pprev = &task->task_works; 94 struct callback_head *work; 95 unsigned long flags; 96 97 if (likely(!task->task_works)) 98 return NULL; 99 /* 100 * If cmpxchg() fails we continue without updating pprev. 101 * Either we raced with task_work_add() which added the 102 * new entry before this work, we will find it again. Or 103 * we raced with task_work_run(), *pprev == NULL/exited. 104 */ 105 raw_spin_lock_irqsave(&task->pi_lock, flags); 106 while ((work = READ_ONCE(*pprev))) { 107 if (work->func != func) 108 pprev = &work->next; 109 else if (cmpxchg(pprev, work, work->next) == work) 110 break; 111 } 112 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 113 114 return work; 115 } 116 117 /** 118 * task_work_run - execute the works added by task_work_add() 119 * 120 * Flush the pending works. Should be used by the core kernel code. 121 * Called before the task returns to the user-mode or stops, or when 122 * it exits. In the latter case task_work_add() can no longer add the 123 * new work after task_work_run() returns. 124 */ 125 void task_work_run(void) 126 { 127 struct task_struct *task = current; 128 struct callback_head *work, *head, *next; 129 130 for (;;) { 131 /* 132 * work->func() can do task_work_add(), do not set 133 * work_exited unless the list is empty. 134 */ 135 do { 136 head = NULL; 137 work = READ_ONCE(task->task_works); 138 if (!work) { 139 if (task->flags & PF_EXITING) 140 head = &work_exited; 141 else 142 break; 143 } 144 } while (cmpxchg(&task->task_works, work, head) != work); 145 146 if (!work) 147 break; 148 /* 149 * Synchronize with task_work_cancel(). It can not remove 150 * the first entry == work, cmpxchg(task_works) must fail. 151 * But it can remove another entry from the ->next list. 152 */ 153 raw_spin_lock_irq(&task->pi_lock); 154 raw_spin_unlock_irq(&task->pi_lock); 155 156 do { 157 next = work->next; 158 work->func(work); 159 work = next; 160 cond_resched(); 161 } while (work); 162 } 163 } 164