xref: /xnu-11215/libkern/os/log_queue.c (revision 94d3b452)
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. Please obtain a copy of the License at
10  * http://www.opensource.apple.com/apsl/ and read it before using this
11  * file.
12  *
13  * The Original Code and all software distributed under the License are
14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18  * Please see the License for the specific language governing rights and
19  * limitations under the License.
20  *           log_queue_failed_intr);
21  *
22  * @APPLE_LICENSE_HEADER_END@
23  */
24 
25 #include <kern/assert.h>
26 #include <kern/counter.h>
27 #include <kern/cpu_data.h>
28 #include <kern/percpu.h>
29 #include <kern/kalloc.h>
30 #include <kern/thread_call.h>
31 #include <libkern/libkern.h>
32 #include <sys/queue.h>
33 #include <vm/vm_kern.h>
34 
35 #include "log_queue.h"
36 #include "log_mem.h"
37 
38 #define LQ_DEFAULT_SZ_ORDER 15 // 32K per slot
39 #define LQ_DEFAULT_FREE_AFTER_CNT 15000 // Deallocate log queue after N logs
40 #define LQ_MAX_SZ_ORDER 20 // 1MB per CPU should really be enough and a hard cap
41 #define LQ_MIN_LOG_SZ_ORDER 5
42 #define LQ_MAX_LOG_SZ_ORDER 11
43 #define LQ_BATCH_SIZE 24
44 #define LQ_MAX_LM_SLOTS 8
45 #define LQ_LOW_MEM_SCALE 3
46 
47 #define LQ_MEM_ENABLE(q, i) ((q)->lq_mem_set |= (1 << (i)))
48 #define LQ_MEM_ENABLED(q, i) ((q)->lq_mem_set & (1 << (i)))
49 #define LQ_MEM_DISABLE(q, i) ((q)->lq_mem_set &= ~(1 << (i)))
50 
51 OS_ENUM(log_queue_entry_state, uint8_t,
52     LOG_QUEUE_ENTRY_STATE_INVALID = 0,
53     LOG_QUEUE_ENTRY_STATE_STORED,
54     LOG_QUEUE_ENTRY_STATE_DISPATCHED,
55     LOG_QUEUE_ENTRY_STATE_SENT,
56     LOG_QUEUE_ENTRY_STATE_FAILED
57     );
58 
59 OS_ENUM(lq_mem_state, uint8_t,
60     LQ_MEM_STATE_READY = 0,
61     LQ_MEM_STATE_ALLOCATING,
62     LQ_MEM_STATE_RELEASING
63     );
64 
65 OS_ENUM(lq_req_state, uint8_t,
66     LQ_REQ_STATE_INVALID = 0,
67     LQ_REQ_STATE_ALLOCATING,
68     LQ_REQ_STATE_RELEASING,
69     LQ_REQ_STATE_READY
70     );
71 
72 typedef struct log_queue_entry {
73 	STAILQ_ENTRY(log_queue_entry)   lqe_link;
74 	uint16_t                        lqe_size;
75 	uint16_t                        lqe_lm_id;
76 	_Atomic log_queue_entry_state_t lqe_state;
77 	log_payload_s                   lqe_payload;
78 } log_queue_entry_s, *log_queue_entry_t;
79 
80 typedef STAILQ_HEAD(, log_queue_entry) log_queue_list_s, *log_queue_list_t;
81 
82 typedef struct {
83 	log_queue_list_s        lq_log_list;
84 	log_queue_list_s        lq_dispatch_list;
85 	logmem_t                lq_mem[LQ_MAX_LM_SLOTS];
86 	size_t                  lq_mem_set;
87 	size_t                  lq_mem_size;
88 	size_t                  lq_mem_size_order;
89 	lq_mem_state_t          lq_mem_state;
90 	thread_call_t           lq_mem_handler;
91 	size_t                  lq_cnt_mem_active;
92 	size_t                  lq_cnt_mem_avail;
93 	_Atomic lq_req_state_t  lq_req_state;
94 	void                    *lq_req_mem;
95 	uint32_t                lq_ready : 1;
96 	uint32_t                lq_suspend : 1;
97 } log_queue_s, *log_queue_t;
98 
99 extern bool os_log_disabled(void);
100 
101 /*
102  * Log Queue
103  *
104  * Log queues are allocated and set up per cpu. When a firehose memory is full
105  * logs are stored in a log queue and sent into the firehose once it has a free
106  * space again. Each log queue (memory) can grow and shrink based on demand by
107  * adding/removing additional memory to/from its memory slots. There are
108  * LQ_MAX_LM_SLOTS memory slots available for every log queue to use. Memory
109  * slots are released when not needed, with one slot always allocated per queue
110  * as a minimum.
111  *
112  * Boot args:
113  *
114  * lq_size_order: Per slot memory size defined as a power of 2 exponent
115  *                (i.e. 2^lq_bootarg_size_order). Zero disables queues.
116  *
117  * lq_nslots: Number of allocated slots to boot with per each log queue.
118  *            Once initial log traffic decreases, log queues release
119  *            slots as needed.
120  *
121  * If extensive number of logs is expected, setting aforementioned boot-args as
122  * needed allows to capture the vast majority of logs and avoid drops.
123  */
124 TUNABLE(size_t, lq_bootarg_size_order, "lq_size_order", LQ_DEFAULT_SZ_ORDER);
125 TUNABLE(size_t, lq_bootarg_nslots, "lq_nslots", LQ_MAX_LM_SLOTS);
126 
127 SCALABLE_COUNTER_DEFINE(log_queue_cnt_received);
128 SCALABLE_COUNTER_DEFINE(log_queue_cnt_rejected_fh);
129 SCALABLE_COUNTER_DEFINE(log_queue_cnt_queued);
130 SCALABLE_COUNTER_DEFINE(log_queue_cnt_sent);
131 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_nomem);
132 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_off);
133 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_allocated);
134 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_released);
135 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_failed);
136 
137 static log_queue_s PERCPU_DATA(oslog_queue);
138 static size_t lq_low_mem_limit;
139 
140 static void *
log_queue_buffer_alloc(size_t amount)141 log_queue_buffer_alloc(size_t amount)
142 {
143 	return kalloc_data_tag(amount, Z_WAITOK_ZERO, VM_KERN_MEMORY_LOG);
144 }
145 
146 static void
log_queue_buffer_free(void * addr,size_t amount)147 log_queue_buffer_free(void *addr, size_t amount)
148 {
149 	kfree_data(addr, amount);
150 }
151 
152 #define log_queue_entry_size(p) (sizeof(log_queue_entry_s) + (p)->lp_data_size)
153 
154 #define publish(a, v) os_atomic_store((a), (v), release)
155 #define read_dependency(v) os_atomic_load((v), dependency)
156 #define read_dependent(v, t) os_atomic_load_with_dependency_on((v), (uintptr_t)(t))
157 #define read_dependent_w(v, t) ({ \
158 	__auto_type _v = os_atomic_inject_dependency((v), (uintptr_t)(t)); \
159 	os_atomic_load_wide(_v, dependency); \
160 })
161 
162 static log_queue_entry_state_t
log_queue_entry_state(const log_queue_entry_t lqe)163 log_queue_entry_state(const log_queue_entry_t lqe)
164 {
165 	log_queue_entry_state_t state = read_dependency(&lqe->lqe_state);
166 	assert(state != LOG_QUEUE_ENTRY_STATE_INVALID);
167 	return state;
168 }
169 
170 static log_queue_entry_t
log_queue_entry_alloc(log_queue_t lq,size_t lqe_size)171 log_queue_entry_alloc(log_queue_t lq, size_t lqe_size)
172 {
173 	for (short i = 0; i < LQ_MAX_LM_SLOTS; i++) {
174 		if (!LQ_MEM_ENABLED(lq, i)) {
175 			continue;
176 		}
177 		log_queue_entry_t lqe = logmem_alloc(&lq->lq_mem[i], &lqe_size);
178 		if (lqe) {
179 			assert(lqe_size <= lq->lq_cnt_mem_avail);
180 			lq->lq_cnt_mem_avail -= lqe_size;
181 			assert(lqe_size <= UINT16_MAX);
182 			lqe->lqe_size = (uint16_t)lqe_size;
183 			lqe->lqe_lm_id = i;
184 			return lqe;
185 		}
186 	}
187 
188 	return NULL;
189 }
190 
191 static void
log_queue_entry_free(log_queue_t lq,log_queue_entry_t lqe)192 log_queue_entry_free(log_queue_t lq, log_queue_entry_t lqe)
193 {
194 	const size_t lqe_size = lqe->lqe_size;
195 	const uint16_t lqe_lm_id = lqe->lqe_lm_id;
196 
197 	bzero(lqe, lqe_size);
198 	logmem_free(&lq->lq_mem[lqe_lm_id], lqe, lqe_size);
199 	lq->lq_cnt_mem_avail += lqe_size;
200 }
201 
202 static bool
log_queue_add_entry(log_queue_t lq,log_payload_t lp,const uint8_t * lp_data)203 log_queue_add_entry(log_queue_t lq, log_payload_t lp, const uint8_t *lp_data)
204 {
205 	log_queue_entry_t lqe = log_queue_entry_alloc(lq, log_queue_entry_size(lp));
206 	if (!lqe) {
207 		counter_inc_preemption_disabled(&log_queue_cnt_dropped_nomem);
208 		return false;
209 	}
210 	assert(lqe->lqe_size >= lp->lp_data_size);
211 
212 	lqe->lqe_payload = *lp;
213 	(void) memcpy((uint8_t *)lqe + sizeof(*lqe), lp_data, lqe->lqe_payload.lp_data_size);
214 	STAILQ_INSERT_TAIL(&lq->lq_log_list, lqe, lqe_link);
215 	publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_STORED);
216 
217 	counter_inc_preemption_disabled(&log_queue_cnt_queued);
218 
219 	return true;
220 }
221 
222 /*
223  * Remove successfully sent logs from a dispatch list and free them.
224  */
225 static size_t
dispatch_list_cleanup(log_queue_t lq)226 dispatch_list_cleanup(log_queue_t lq)
227 {
228 	log_queue_entry_t lqe, lqe_tmp;
229 	size_t freed = 0;
230 
231 	STAILQ_FOREACH_SAFE(lqe, &lq->lq_dispatch_list, lqe_link, lqe_tmp) {
232 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
233 		assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
234 
235 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_SENT) {
236 			STAILQ_REMOVE(&lq->lq_dispatch_list, lqe, log_queue_entry, lqe_link);
237 			publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_INVALID);
238 			log_queue_entry_free(lq, lqe);
239 			counter_dec_preemption_disabled(&log_queue_cnt_queued);
240 			freed++;
241 		}
242 	}
243 
244 	return freed;
245 }
246 
247 /*
248  * Walk and collect logs stored in the log queue suitable for dispatching.
249  * First, collect previously failed logs, then (if still enough space) grab new
250  * logs.
251  */
252 static size_t
log_dispatch_prepare(log_queue_t lq,size_t requested,log_queue_entry_t * buf)253 log_dispatch_prepare(log_queue_t lq, size_t requested, log_queue_entry_t *buf)
254 {
255 	log_queue_entry_t lqe, lqe_tmp;
256 	size_t collected = 0;
257 
258 	STAILQ_FOREACH(lqe, &lq->lq_dispatch_list, lqe_link) {
259 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
260 		assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
261 
262 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_FAILED) {
263 			publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
264 			buf[collected++] = lqe;
265 		}
266 
267 		if (collected == requested) {
268 			return collected;
269 		}
270 	}
271 	assert(collected < requested);
272 
273 	STAILQ_FOREACH_SAFE(lqe, &lq->lq_log_list, lqe_link, lqe_tmp) {
274 		assert(log_queue_entry_state(lqe) == LOG_QUEUE_ENTRY_STATE_STORED);
275 
276 		STAILQ_REMOVE(&lq->lq_log_list, lqe, log_queue_entry, lqe_link);
277 		STAILQ_INSERT_TAIL(&lq->lq_dispatch_list, lqe, lqe_link);
278 		publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
279 
280 		buf[collected++] = lqe;
281 		if (collected == requested) {
282 			break;
283 		}
284 	}
285 
286 	return collected;
287 }
288 
289 /*
290  * Send dispatched logs to the firehose. Skip streaming when replaying.
291  * Streaming does not process timestamps and would therefore show logs out of
292  * order.
293  */
294 static void
log_queue_dispatch_logs(size_t logs_count,log_queue_entry_t * logs)295 log_queue_dispatch_logs(size_t logs_count, log_queue_entry_t *logs)
296 {
297 	for (size_t i = 0; i < logs_count; i++) {
298 		const log_queue_entry_t lqe = logs[i];
299 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
300 
301 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_DISPATCHED) {
302 			const log_payload_t lqe_lp = &lqe->lqe_payload;
303 
304 			log_payload_s lp = {
305 				.lp_ftid = read_dependent_w(&lqe_lp->lp_ftid, lqe_state),
306 				.lp_timestamp = read_dependent_w(&lqe_lp->lp_timestamp, lqe_state),
307 				.lp_stream = read_dependent(&lqe_lp->lp_stream, lqe_state),
308 				.lp_pub_data_size = read_dependent(&lqe_lp->lp_pub_data_size, lqe_state),
309 				.lp_data_size = read_dependent(&lqe_lp->lp_data_size, lqe_state)
310 			};
311 			const void *lp_data = (uint8_t *)lqe + sizeof(*lqe);
312 
313 			/*
314 			 * The log queue mechanism expects only the state to be
315 			 * modified here since we are likely running on a
316 			 * different cpu. Queue cleanup will be done safely
317 			 * later in dispatch_list_cleanup().
318 			 */
319 			if (log_payload_send(&lp, lp_data, false)) {
320 				publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_SENT);
321 				counter_inc(&log_queue_cnt_sent);
322 			} else {
323 				publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_FAILED);
324 			}
325 		}
326 	}
327 }
328 
329 static bool
log_queue_empty(const log_queue_t lq)330 log_queue_empty(const log_queue_t lq)
331 {
332 	return STAILQ_EMPTY(&lq->lq_log_list) && STAILQ_EMPTY(&lq->lq_dispatch_list);
333 }
334 
335 static boolean_t
log_queue_low_mem(const log_queue_t lq)336 log_queue_low_mem(const log_queue_t lq)
337 {
338 	return lq->lq_cnt_mem_avail < (lq->lq_cnt_mem_active * lq_low_mem_limit);
339 }
340 
341 static lq_req_state_t
log_queue_request_state(log_queue_t lq)342 log_queue_request_state(log_queue_t lq)
343 {
344 	lq_req_state_t req_state = read_dependency(&lq->lq_req_state);
345 	return req_state;
346 }
347 
348 static void
log_queue_mem_init(log_queue_t lq,size_t idx,void * buf,size_t buflen)349 log_queue_mem_init(log_queue_t lq, size_t idx, void *buf, size_t buflen)
350 {
351 	assert(buf);
352 	assert(buflen > 0);
353 	assert(idx < LQ_MAX_LM_SLOTS);
354 	assert(!LQ_MEM_ENABLED(lq, idx));
355 
356 	logmem_init(&lq->lq_mem[idx], buf, buflen, lq->lq_mem_size_order,
357 	    LQ_MIN_LOG_SZ_ORDER, LQ_MAX_LOG_SZ_ORDER);
358 }
359 
360 static int
log_queue_mem_free_slot(log_queue_t lq)361 log_queue_mem_free_slot(log_queue_t lq)
362 {
363 	assert(LQ_MEM_ENABLED(lq, 0));
364 
365 	for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
366 		if (!LQ_MEM_ENABLED(lq, i)) {
367 			return i;
368 		}
369 	}
370 	return -1;
371 }
372 
373 static void
log_queue_memory_handler(thread_call_param_t a0,__unused thread_call_param_t a1)374 log_queue_memory_handler(thread_call_param_t a0, __unused thread_call_param_t a1)
375 {
376 	log_queue_t lq = (log_queue_t)a0;
377 	lq_req_state_t req_state = log_queue_request_state(lq);
378 
379 	assert(req_state != LQ_REQ_STATE_INVALID);
380 
381 	if (req_state == LQ_REQ_STATE_ALLOCATING) {
382 		lq->lq_req_mem = log_queue_buffer_alloc(lq->lq_mem_size);
383 		publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
384 
385 		if (lq->lq_req_mem) {
386 			counter_inc(&log_queue_cnt_mem_allocated);
387 		} else {
388 			counter_inc(&log_queue_cnt_mem_failed);
389 		}
390 	} else if (req_state == LQ_REQ_STATE_RELEASING) {
391 		void *buf = read_dependent(&lq->lq_req_mem, req_state);
392 
393 		log_queue_buffer_free(buf, lq->lq_mem_size);
394 		lq->lq_req_mem = NULL;
395 		publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
396 
397 		counter_inc(&log_queue_cnt_mem_released);
398 	}
399 }
400 
401 static void
log_queue_order_memory(log_queue_t lq)402 log_queue_order_memory(log_queue_t lq)
403 {
404 	boolean_t __assert_only running;
405 
406 	lq->lq_req_mem = NULL;
407 	publish(&lq->lq_req_state, LQ_REQ_STATE_ALLOCATING);
408 
409 	running = thread_call_enter(lq->lq_mem_handler);
410 	assert(!running);
411 }
412 
413 static void
log_queue_release_memory(log_queue_t lq,void * buf)414 log_queue_release_memory(log_queue_t lq, void *buf)
415 {
416 	boolean_t __assert_only running;
417 
418 	assert(buf);
419 	lq->lq_req_mem = buf;
420 	publish(&lq->lq_req_state, LQ_REQ_STATE_RELEASING);
421 
422 	running = thread_call_enter(lq->lq_mem_handler);
423 	assert(!running);
424 }
425 
426 static void
log_queue_mem_enable(log_queue_t lq,size_t i)427 log_queue_mem_enable(log_queue_t lq, size_t i)
428 {
429 	logmem_t *lm = &lq->lq_mem[i];
430 	assert(!LQ_MEM_ENABLED(lq, i));
431 
432 	LQ_MEM_ENABLE(lq, i);
433 	lq->lq_cnt_mem_active++;
434 	lq->lq_cnt_mem_avail += lm->lm_cnt_free;
435 }
436 
437 static void
log_queue_mem_disable(log_queue_t lq,size_t i)438 log_queue_mem_disable(log_queue_t lq, size_t i)
439 {
440 	logmem_t *lm = &lq->lq_mem[i];
441 	assert(LQ_MEM_ENABLED(lq, i));
442 
443 	LQ_MEM_DISABLE(lq, i);
444 	lq->lq_cnt_mem_active--;
445 	lq->lq_cnt_mem_avail -= lm->lm_cnt_free;
446 }
447 
448 static void *
log_queue_mem_reclaim(log_queue_t lq)449 log_queue_mem_reclaim(log_queue_t lq)
450 {
451 	for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
452 		logmem_t *lm = &lq->lq_mem[i];
453 		if (LQ_MEM_ENABLED(lq, i) && logmem_empty(lm)) {
454 			assert(lm->lm_mem_size == lq->lq_mem_size);
455 			void *reclaimed = lm->lm_mem;
456 			log_queue_mem_disable(lq, i);
457 			/* Do not use bzero here, see rdar://116922009 */
458 			*lm = (logmem_t){ };
459 			return reclaimed;
460 		}
461 	}
462 	return NULL;
463 }
464 
465 static void
log_queue_mem_reconfigure(log_queue_t lq)466 log_queue_mem_reconfigure(log_queue_t lq)
467 {
468 	assert(lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING ||
469 	    lq->lq_mem_state == LQ_MEM_STATE_RELEASING);
470 
471 	lq_req_state_t req_state = log_queue_request_state(lq);
472 
473 	if (req_state == LQ_REQ_STATE_READY) {
474 		if (lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING) {
475 			void *buf = read_dependent(&lq->lq_req_mem, req_state);
476 			if (buf) {
477 				const int i = log_queue_mem_free_slot(lq);
478 				assert(i > 0);
479 				log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
480 				log_queue_mem_enable(lq, i);
481 			}
482 		}
483 		lq->lq_mem_state = LQ_MEM_STATE_READY;
484 		publish(&lq->lq_req_state, LQ_REQ_STATE_INVALID);
485 	}
486 }
487 
488 static boolean_t
log_queue_needs_memory(log_queue_t lq,boolean_t new_suspend)489 log_queue_needs_memory(log_queue_t lq, boolean_t new_suspend)
490 {
491 	if (new_suspend || log_queue_low_mem(lq)) {
492 		return lq->lq_cnt_mem_active < LQ_MAX_LM_SLOTS;
493 	}
494 	return false;
495 }
496 
497 static boolean_t
log_queue_can_release_memory(log_queue_t lq)498 log_queue_can_release_memory(log_queue_t lq)
499 {
500 	assert(lq->lq_mem_state == LQ_MEM_STATE_READY);
501 
502 	if (lq->lq_cnt_mem_active > 1 && log_queue_empty(lq) && !lq->lq_suspend) {
503 		const uint64_t total_log_cnt = counter_load(&log_queue_cnt_received);
504 		return total_log_cnt > LQ_DEFAULT_FREE_AFTER_CNT;
505 	}
506 	return false;
507 }
508 
509 extern boolean_t tasks_suspend_state;
510 
511 static boolean_t
detect_new_suspend(log_queue_t lq)512 detect_new_suspend(log_queue_t lq)
513 {
514 	if (!tasks_suspend_state) {
515 		lq->lq_suspend = false;
516 		return false;
517 	}
518 
519 	if (!lq->lq_suspend) {
520 		lq->lq_suspend = true;
521 		return true;
522 	}
523 
524 	return false;
525 }
526 
527 static void
log_queue_dispatch(void)528 log_queue_dispatch(void)
529 {
530 	lq_mem_state_t new_mem_state = LQ_MEM_STATE_READY;
531 	void *reclaimed_memory = NULL;
532 
533 	disable_preemption();
534 
535 	log_queue_t lq = PERCPU_GET(oslog_queue);
536 	if (__improbable(!lq->lq_ready)) {
537 		enable_preemption();
538 		return;
539 	}
540 
541 	dispatch_list_cleanup(lq);
542 
543 	log_queue_entry_t logs[LQ_BATCH_SIZE];
544 	size_t logs_count = log_dispatch_prepare(lq, LQ_BATCH_SIZE, (log_queue_entry_t *)&logs);
545 
546 	boolean_t new_suspend = detect_new_suspend(lq);
547 
548 	if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
549 		log_queue_mem_reconfigure(lq);
550 	} else if (logs_count == 0 && log_queue_can_release_memory(lq)) {
551 		reclaimed_memory = log_queue_mem_reclaim(lq);
552 		if (reclaimed_memory) {
553 			lq->lq_mem_state = LQ_MEM_STATE_RELEASING;
554 			new_mem_state = lq->lq_mem_state;
555 		}
556 	} else if (log_queue_needs_memory(lq, new_suspend)) {
557 		lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
558 		new_mem_state = lq->lq_mem_state;
559 	}
560 
561 	enable_preemption();
562 
563 	switch (new_mem_state) {
564 	case LQ_MEM_STATE_RELEASING:
565 		assert(logs_count == 0);
566 		log_queue_release_memory(lq, reclaimed_memory);
567 		break;
568 	case LQ_MEM_STATE_ALLOCATING:
569 		log_queue_order_memory(lq);
570 	/* FALLTHROUGH */
571 	case LQ_MEM_STATE_READY:
572 		log_queue_dispatch_logs(logs_count, logs);
573 		break;
574 	default:
575 		panic("Invalid log memory state %u", new_mem_state);
576 		break;
577 	}
578 }
579 
580 static bool
log_queue_add(log_payload_t lp,const uint8_t * lp_data)581 log_queue_add(log_payload_t lp, const uint8_t *lp_data)
582 {
583 	boolean_t order_memory = false;
584 
585 	disable_preemption();
586 
587 	log_queue_t lq = PERCPU_GET(oslog_queue);
588 	if (__improbable(!lq->lq_ready)) {
589 		enable_preemption();
590 		counter_inc(&log_queue_cnt_dropped_off);
591 		return false;
592 	}
593 
594 	boolean_t new_suspend = detect_new_suspend(lq);
595 
596 	if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
597 		log_queue_mem_reconfigure(lq);
598 	} else if (log_queue_needs_memory(lq, new_suspend)) {
599 		lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
600 		order_memory = true;
601 	}
602 
603 	bool added = log_queue_add_entry(lq, lp, lp_data);
604 	enable_preemption();
605 
606 	if (order_memory) {
607 		log_queue_order_memory(lq);
608 	}
609 
610 	return added;
611 }
612 
613 __startup_func
614 static size_t
log_queue_init_memory(log_queue_t lq,size_t lm_count)615 log_queue_init_memory(log_queue_t lq, size_t lm_count)
616 {
617 	assert(lm_count <= LQ_MAX_LM_SLOTS);
618 
619 	for (size_t i = 0; i < lm_count; i++) {
620 		void *buf = log_queue_buffer_alloc(lq->lq_mem_size);
621 		if (!buf) {
622 			return i;
623 		}
624 		counter_inc(&log_queue_cnt_mem_allocated);
625 		log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
626 		log_queue_mem_enable(lq, i);
627 	}
628 
629 	return lm_count;
630 }
631 
632 __startup_func
633 static void
oslog_init_log_queues(void)634 oslog_init_log_queues(void)
635 {
636 	if (os_log_disabled()) {
637 		printf("Log queues disabled: Logging disabled by ATM\n");
638 		return;
639 	}
640 
641 	if (lq_bootarg_size_order == 0) {
642 		printf("Log queues disabled: Zero lq_size_order boot argument\n");
643 		return;
644 	}
645 
646 	lq_bootarg_size_order = MAX(lq_bootarg_size_order, PAGE_SHIFT);
647 	lq_bootarg_size_order = MIN(lq_bootarg_size_order, LQ_MAX_SZ_ORDER);
648 
649 	lq_bootarg_nslots = MAX(lq_bootarg_nslots, 1);
650 	lq_bootarg_nslots = MIN(lq_bootarg_nslots, LQ_MAX_LM_SLOTS);
651 
652 	lq_low_mem_limit = MAX(1 << (lq_bootarg_size_order - LQ_LOW_MEM_SCALE), 1024);
653 
654 	unsigned int slot_count = 0;
655 
656 	percpu_foreach(lq, oslog_queue) {
657 		lq->lq_mem_size_order = lq_bootarg_size_order;
658 		lq->lq_mem_size = round_page(logmem_required_size(lq->lq_mem_size_order, LQ_MIN_LOG_SZ_ORDER));
659 		lq->lq_mem_handler = thread_call_allocate(log_queue_memory_handler, (thread_call_param_t)lq);
660 		slot_count += log_queue_init_memory(lq, lq_bootarg_nslots);
661 		STAILQ_INIT(&lq->lq_log_list);
662 		STAILQ_INIT(&lq->lq_dispatch_list);
663 		lq->lq_ready = true;
664 	}
665 
666 	printf("Log queues configured: slot count: %u, per-slot size: %u, total size: %u\n",
667 	    slot_count, (1 << lq_bootarg_size_order),
668 	    slot_count * (1 << lq_bootarg_size_order));
669 }
670 STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_log_queues);
671 
672 bool
log_queue_log(log_payload_t lp,const void * lp_data,bool stream)673 log_queue_log(log_payload_t lp, const void *lp_data, bool stream)
674 {
675 	assert(lp);
676 	assert(oslog_is_safe() || startup_phase < STARTUP_SUB_EARLY_BOOT);
677 
678 	counter_inc(&log_queue_cnt_received);
679 
680 	if (log_payload_send(lp, lp_data, stream)) {
681 		counter_inc(&log_queue_cnt_sent);
682 		log_queue_dispatch();
683 		return true;
684 	}
685 	counter_inc(&log_queue_cnt_rejected_fh);
686 
687 	if (!log_queue_add(lp, lp_data)) {
688 		return false;
689 	}
690 
691 	return true;
692 }
693