xref: /linux-6.15/kernel/async.c (revision eb4400e3)
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 
14 /*
15 
16 Goals and Theory of Operation
17 
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21 
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27 
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31 
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34 
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41 
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48 
49 */
50 
51 #include <linux/async.h>
52 #include <linux/module.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/init.h>
56 #include <linux/kthread.h>
57 #include <asm/atomic.h>
58 
59 static async_cookie_t next_cookie = 1;
60 
61 #define MAX_THREADS	256
62 #define MAX_WORK	32768
63 
64 static LIST_HEAD(async_pending);
65 static LIST_HEAD(async_running);
66 static DEFINE_SPINLOCK(async_lock);
67 
68 static int async_enabled = 0;
69 
70 struct async_entry {
71 	struct list_head list;
72 	async_cookie_t   cookie;
73 	async_func_ptr	 *func;
74 	void             *data;
75 	struct list_head *running;
76 };
77 
78 static DECLARE_WAIT_QUEUE_HEAD(async_done);
79 static DECLARE_WAIT_QUEUE_HEAD(async_new);
80 
81 static atomic_t entry_count;
82 static atomic_t thread_count;
83 
84 extern int initcall_debug;
85 
86 
87 /*
88  * MUST be called with the lock held!
89  */
90 static async_cookie_t  __lowest_in_progress(struct list_head *running)
91 {
92 	struct async_entry *entry;
93 	if (!list_empty(running)) {
94 		entry = list_first_entry(running,
95 			struct async_entry, list);
96 		return entry->cookie;
97 	} else if (!list_empty(&async_pending)) {
98 		entry = list_first_entry(&async_pending,
99 			struct async_entry, list);
100 		return entry->cookie;
101 	} else {
102 		/* nothing in progress... next_cookie is "infinity" */
103 		return next_cookie;
104 	}
105 
106 }
107 
108 static async_cookie_t  lowest_in_progress(struct list_head *running)
109 {
110 	unsigned long flags;
111 	async_cookie_t ret;
112 
113 	spin_lock_irqsave(&async_lock, flags);
114 	ret = __lowest_in_progress(running);
115 	spin_unlock_irqrestore(&async_lock, flags);
116 	return ret;
117 }
118 /*
119  * pick the first pending entry and run it
120  */
121 static void run_one_entry(void)
122 {
123 	unsigned long flags;
124 	struct async_entry *entry;
125 	ktime_t calltime, delta, rettime;
126 
127 	/* 1) pick one task from the pending queue */
128 
129 	spin_lock_irqsave(&async_lock, flags);
130 	if (list_empty(&async_pending))
131 		goto out;
132 	entry = list_first_entry(&async_pending, struct async_entry, list);
133 
134 	/* 2) move it to the running queue */
135 	list_del(&entry->list);
136 	list_add_tail(&entry->list, &async_running);
137 	spin_unlock_irqrestore(&async_lock, flags);
138 
139 	/* 3) run it (and print duration)*/
140 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 		printk("calling  %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
142 		calltime = ktime_get();
143 	}
144 	entry->func(entry->data, entry->cookie);
145 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
146 		rettime = ktime_get();
147 		delta = ktime_sub(rettime, calltime);
148 		printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
149 			entry->func, ktime_to_ns(delta) >> 10);
150 	}
151 
152 	/* 4) remove it from the running queue */
153 	spin_lock_irqsave(&async_lock, flags);
154 	list_del(&entry->list);
155 
156 	/* 5) free the entry  */
157 	kfree(entry);
158 	atomic_dec(&entry_count);
159 
160 	spin_unlock_irqrestore(&async_lock, flags);
161 
162 	/* 6) wake up any waiters. */
163 	wake_up(&async_done);
164 	return;
165 
166 out:
167 	spin_unlock_irqrestore(&async_lock, flags);
168 }
169 
170 
171 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
172 {
173 	struct async_entry *entry;
174 	unsigned long flags;
175 	async_cookie_t newcookie;
176 
177 
178 	/* allow irq-off callers */
179 	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
180 
181 	/*
182 	 * If we're out of memory or if there's too much work
183 	 * pending already, we execute synchronously.
184 	 */
185 	if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
186 		kfree(entry);
187 		spin_lock_irqsave(&async_lock, flags);
188 		newcookie = next_cookie++;
189 		spin_unlock_irqrestore(&async_lock, flags);
190 
191 		/* low on memory.. run synchronously */
192 		ptr(data, newcookie);
193 		return newcookie;
194 	}
195 	entry->func = ptr;
196 	entry->data = data;
197 	entry->running = running;
198 
199 	spin_lock_irqsave(&async_lock, flags);
200 	newcookie = entry->cookie = next_cookie++;
201 	list_add_tail(&entry->list, &async_pending);
202 	atomic_inc(&entry_count);
203 	spin_unlock_irqrestore(&async_lock, flags);
204 	wake_up(&async_new);
205 	return newcookie;
206 }
207 
208 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
209 {
210 	return __async_schedule(ptr, data, &async_pending);
211 }
212 EXPORT_SYMBOL_GPL(async_schedule);
213 
214 async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
215 {
216 	return __async_schedule(ptr, data, running);
217 }
218 EXPORT_SYMBOL_GPL(async_schedule_special);
219 
220 void async_synchronize_full(void)
221 {
222 	do {
223 		async_synchronize_cookie(next_cookie);
224 	} while (!list_empty(&async_running) || !list_empty(&async_pending));
225 }
226 EXPORT_SYMBOL_GPL(async_synchronize_full);
227 
228 void async_synchronize_full_special(struct list_head *list)
229 {
230 	async_synchronize_cookie_special(next_cookie, list);
231 }
232 EXPORT_SYMBOL_GPL(async_synchronize_full_special);
233 
234 void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
235 {
236 	ktime_t starttime, delta, endtime;
237 
238 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
239 		printk("async_waiting @ %i\n", task_pid_nr(current));
240 		starttime = ktime_get();
241 	}
242 
243 	wait_event(async_done, lowest_in_progress(running) >= cookie);
244 
245 	if (initcall_debug && system_state == SYSTEM_BOOTING) {
246 		endtime = ktime_get();
247 		delta = ktime_sub(endtime, starttime);
248 
249 		printk("async_continuing @ %i after %lli usec\n",
250 			task_pid_nr(current), ktime_to_ns(delta) >> 10);
251 	}
252 }
253 EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
254 
255 void async_synchronize_cookie(async_cookie_t cookie)
256 {
257 	async_synchronize_cookie_special(cookie, &async_running);
258 }
259 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
260 
261 
262 static int async_thread(void *unused)
263 {
264 	DECLARE_WAITQUEUE(wq, current);
265 	add_wait_queue(&async_new, &wq);
266 
267 	while (!kthread_should_stop()) {
268 		int ret = HZ;
269 		set_current_state(TASK_INTERRUPTIBLE);
270 		/*
271 		 * check the list head without lock.. false positives
272 		 * are dealt with inside run_one_entry() while holding
273 		 * the lock.
274 		 */
275 		rmb();
276 		if (!list_empty(&async_pending))
277 			run_one_entry();
278 		else
279 			ret = schedule_timeout(HZ);
280 
281 		if (ret == 0) {
282 			/*
283 			 * we timed out, this means we as thread are redundant.
284 			 * we sign off and die, but we to avoid any races there
285 			 * is a last-straw check to see if work snuck in.
286 			 */
287 			atomic_dec(&thread_count);
288 			wmb(); /* manager must see our departure first */
289 			if (list_empty(&async_pending))
290 				break;
291 			/*
292 			 * woops work came in between us timing out and us
293 			 * signing off; we need to stay alive and keep working.
294 			 */
295 			atomic_inc(&thread_count);
296 		}
297 	}
298 	remove_wait_queue(&async_new, &wq);
299 
300 	return 0;
301 }
302 
303 static int async_manager_thread(void *unused)
304 {
305 	DECLARE_WAITQUEUE(wq, current);
306 	add_wait_queue(&async_new, &wq);
307 
308 	while (!kthread_should_stop()) {
309 		int tc, ec;
310 
311 		set_current_state(TASK_INTERRUPTIBLE);
312 
313 		tc = atomic_read(&thread_count);
314 		rmb();
315 		ec = atomic_read(&entry_count);
316 
317 		while (tc < ec && tc < MAX_THREADS) {
318 			kthread_run(async_thread, NULL, "async/%i", tc);
319 			atomic_inc(&thread_count);
320 			tc++;
321 		}
322 
323 		schedule();
324 	}
325 	remove_wait_queue(&async_new, &wq);
326 
327 	return 0;
328 }
329 
330 static int __init async_init(void)
331 {
332 	if (async_enabled)
333 		kthread_run(async_manager_thread, NULL, "async/mgr");
334 	return 0;
335 }
336 
337 static int __init setup_async(char *str)
338 {
339 	async_enabled = 1;
340 	return 1;
341 }
342 
343 __setup("fastboot", setup_async);
344 
345 
346 core_initcall(async_init);
347