xref: /linux-6.15/kernel/events/ring_buffer.c (revision 3f9fbe9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance events ring-buffer code:
4  *
5  *  Copyright (C) 2008 Thomas Gleixner <[email protected]>
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <[email protected]>
9  */
10 
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
17 
18 #include "internal.h"
19 
20 static void perf_output_wakeup(struct perf_output_handle *handle)
21 {
22 	atomic_set(&handle->rb->poll, EPOLLIN);
23 
24 	handle->event->pending_wakeup = 1;
25 	irq_work_queue(&handle->event->pending);
26 }
27 
28 /*
29  * We need to ensure a later event_id doesn't publish a head when a former
30  * event isn't done writing. However since we need to deal with NMIs we
31  * cannot fully serialize things.
32  *
33  * We only publish the head (and generate a wakeup) when the outer-most
34  * event completes.
35  */
36 static void perf_output_get_handle(struct perf_output_handle *handle)
37 {
38 	struct ring_buffer *rb = handle->rb;
39 
40 	preempt_disable();
41 	local_inc(&rb->nest);
42 	handle->wakeup = local_read(&rb->wakeup);
43 }
44 
45 static void perf_output_put_handle(struct perf_output_handle *handle)
46 {
47 	struct ring_buffer *rb = handle->rb;
48 	unsigned long head;
49 
50 again:
51 	/*
52 	 * In order to avoid publishing a head value that goes backwards,
53 	 * we must ensure the load of @rb->head happens after we've
54 	 * incremented @rb->nest.
55 	 *
56 	 * Otherwise we can observe a @rb->head value before one published
57 	 * by an IRQ/NMI happening between the load and the increment.
58 	 */
59 	barrier();
60 	head = local_read(&rb->head);
61 
62 	/*
63 	 * IRQ/NMI can happen here and advance @rb->head, causing our
64 	 * load above to be stale.
65 	 */
66 
67 	/*
68 	 * If this isn't the outermost nesting, we don't have to update
69 	 * @rb->user_page->data_head.
70 	 */
71 	if (local_read(&rb->nest) > 1) {
72 		local_dec(&rb->nest);
73 		goto out;
74 	}
75 
76 	/*
77 	 * Since the mmap() consumer (userspace) can run on a different CPU:
78 	 *
79 	 *   kernel				user
80 	 *
81 	 *   if (LOAD ->data_tail) {		LOAD ->data_head
82 	 *			(A)		smp_rmb()	(C)
83 	 *	STORE $data			LOAD $data
84 	 *	smp_wmb()	(B)		smp_mb()	(D)
85 	 *	STORE ->data_head		STORE ->data_tail
86 	 *   }
87 	 *
88 	 * Where A pairs with D, and B pairs with C.
89 	 *
90 	 * In our case (A) is a control dependency that separates the load of
91 	 * the ->data_tail and the stores of $data. In case ->data_tail
92 	 * indicates there is no room in the buffer to store $data we do not.
93 	 *
94 	 * D needs to be a full barrier since it separates the data READ
95 	 * from the tail WRITE.
96 	 *
97 	 * For B a WMB is sufficient since it separates two WRITEs, and for C
98 	 * an RMB is sufficient since it separates two READs.
99 	 *
100 	 * See perf_output_begin().
101 	 */
102 	smp_wmb(); /* B, matches C */
103 	rb->user_page->data_head = head;
104 
105 	/*
106 	 * We must publish the head before decrementing the nest count,
107 	 * otherwise an IRQ/NMI can publish a more recent head value and our
108 	 * write will (temporarily) publish a stale value.
109 	 */
110 	barrier();
111 	local_set(&rb->nest, 0);
112 
113 	/*
114 	 * Ensure we decrement @rb->nest before we validate the @rb->head.
115 	 * Otherwise we cannot be sure we caught the 'last' nested update.
116 	 */
117 	barrier();
118 	if (unlikely(head != local_read(&rb->head))) {
119 		local_inc(&rb->nest);
120 		goto again;
121 	}
122 
123 	if (handle->wakeup != local_read(&rb->wakeup))
124 		perf_output_wakeup(handle);
125 
126 out:
127 	preempt_enable();
128 }
129 
130 static __always_inline bool
131 ring_buffer_has_space(unsigned long head, unsigned long tail,
132 		      unsigned long data_size, unsigned int size,
133 		      bool backward)
134 {
135 	if (!backward)
136 		return CIRC_SPACE(head, tail, data_size) >= size;
137 	else
138 		return CIRC_SPACE(tail, head, data_size) >= size;
139 }
140 
141 static __always_inline int
142 __perf_output_begin(struct perf_output_handle *handle,
143 		    struct perf_event *event, unsigned int size,
144 		    bool backward)
145 {
146 	struct ring_buffer *rb;
147 	unsigned long tail, offset, head;
148 	int have_lost, page_shift;
149 	struct {
150 		struct perf_event_header header;
151 		u64			 id;
152 		u64			 lost;
153 	} lost_event;
154 
155 	rcu_read_lock();
156 	/*
157 	 * For inherited events we send all the output towards the parent.
158 	 */
159 	if (event->parent)
160 		event = event->parent;
161 
162 	rb = rcu_dereference(event->rb);
163 	if (unlikely(!rb))
164 		goto out;
165 
166 	if (unlikely(rb->paused)) {
167 		if (rb->nr_pages)
168 			local_inc(&rb->lost);
169 		goto out;
170 	}
171 
172 	handle->rb    = rb;
173 	handle->event = event;
174 
175 	have_lost = local_read(&rb->lost);
176 	if (unlikely(have_lost)) {
177 		size += sizeof(lost_event);
178 		if (event->attr.sample_id_all)
179 			size += event->id_header_size;
180 	}
181 
182 	perf_output_get_handle(handle);
183 
184 	do {
185 		tail = READ_ONCE(rb->user_page->data_tail);
186 		offset = head = local_read(&rb->head);
187 		if (!rb->overwrite) {
188 			if (unlikely(!ring_buffer_has_space(head, tail,
189 							    perf_data_size(rb),
190 							    size, backward)))
191 				goto fail;
192 		}
193 
194 		/*
195 		 * The above forms a control dependency barrier separating the
196 		 * @tail load above from the data stores below. Since the @tail
197 		 * load is required to compute the branch to fail below.
198 		 *
199 		 * A, matches D; the full memory barrier userspace SHOULD issue
200 		 * after reading the data and before storing the new tail
201 		 * position.
202 		 *
203 		 * See perf_output_put_handle().
204 		 */
205 
206 		if (!backward)
207 			head += size;
208 		else
209 			head -= size;
210 	} while (local_cmpxchg(&rb->head, offset, head) != offset);
211 
212 	if (backward) {
213 		offset = head;
214 		head = (u64)(-head);
215 	}
216 
217 	/*
218 	 * We rely on the implied barrier() by local_cmpxchg() to ensure
219 	 * none of the data stores below can be lifted up by the compiler.
220 	 */
221 
222 	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
223 		local_add(rb->watermark, &rb->wakeup);
224 
225 	page_shift = PAGE_SHIFT + page_order(rb);
226 
227 	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
228 	offset &= (1UL << page_shift) - 1;
229 	handle->addr = rb->data_pages[handle->page] + offset;
230 	handle->size = (1UL << page_shift) - offset;
231 
232 	if (unlikely(have_lost)) {
233 		struct perf_sample_data sample_data;
234 
235 		lost_event.header.size = sizeof(lost_event);
236 		lost_event.header.type = PERF_RECORD_LOST;
237 		lost_event.header.misc = 0;
238 		lost_event.id          = event->id;
239 		lost_event.lost        = local_xchg(&rb->lost, 0);
240 
241 		perf_event_header__init_id(&lost_event.header,
242 					   &sample_data, event);
243 		perf_output_put(handle, lost_event);
244 		perf_event__output_id_sample(event, handle, &sample_data);
245 	}
246 
247 	return 0;
248 
249 fail:
250 	local_inc(&rb->lost);
251 	perf_output_put_handle(handle);
252 out:
253 	rcu_read_unlock();
254 
255 	return -ENOSPC;
256 }
257 
258 int perf_output_begin_forward(struct perf_output_handle *handle,
259 			     struct perf_event *event, unsigned int size)
260 {
261 	return __perf_output_begin(handle, event, size, false);
262 }
263 
264 int perf_output_begin_backward(struct perf_output_handle *handle,
265 			       struct perf_event *event, unsigned int size)
266 {
267 	return __perf_output_begin(handle, event, size, true);
268 }
269 
270 int perf_output_begin(struct perf_output_handle *handle,
271 		      struct perf_event *event, unsigned int size)
272 {
273 
274 	return __perf_output_begin(handle, event, size,
275 				   unlikely(is_write_backward(event)));
276 }
277 
278 unsigned int perf_output_copy(struct perf_output_handle *handle,
279 		      const void *buf, unsigned int len)
280 {
281 	return __output_copy(handle, buf, len);
282 }
283 
284 unsigned int perf_output_skip(struct perf_output_handle *handle,
285 			      unsigned int len)
286 {
287 	return __output_skip(handle, NULL, len);
288 }
289 
290 void perf_output_end(struct perf_output_handle *handle)
291 {
292 	perf_output_put_handle(handle);
293 	rcu_read_unlock();
294 }
295 
296 static void
297 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
298 {
299 	long max_size = perf_data_size(rb);
300 
301 	if (watermark)
302 		rb->watermark = min(max_size, watermark);
303 
304 	if (!rb->watermark)
305 		rb->watermark = max_size / 2;
306 
307 	if (flags & RING_BUFFER_WRITABLE)
308 		rb->overwrite = 0;
309 	else
310 		rb->overwrite = 1;
311 
312 	refcount_set(&rb->refcount, 1);
313 
314 	INIT_LIST_HEAD(&rb->event_list);
315 	spin_lock_init(&rb->event_lock);
316 
317 	/*
318 	 * perf_output_begin() only checks rb->paused, therefore
319 	 * rb->paused must be true if we have no pages for output.
320 	 */
321 	if (!rb->nr_pages)
322 		rb->paused = 1;
323 }
324 
325 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
326 {
327 	/*
328 	 * OVERWRITE is determined by perf_aux_output_end() and can't
329 	 * be passed in directly.
330 	 */
331 	if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
332 		return;
333 
334 	handle->aux_flags |= flags;
335 }
336 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
337 
338 /*
339  * This is called before hardware starts writing to the AUX area to
340  * obtain an output handle and make sure there's room in the buffer.
341  * When the capture completes, call perf_aux_output_end() to commit
342  * the recorded data to the buffer.
343  *
344  * The ordering is similar to that of perf_output_{begin,end}, with
345  * the exception of (B), which should be taken care of by the pmu
346  * driver, since ordering rules will differ depending on hardware.
347  *
348  * Call this from pmu::start(); see the comment in perf_aux_output_end()
349  * about its use in pmu callbacks. Both can also be called from the PMI
350  * handler if needed.
351  */
352 void *perf_aux_output_begin(struct perf_output_handle *handle,
353 			    struct perf_event *event)
354 {
355 	struct perf_event *output_event = event;
356 	unsigned long aux_head, aux_tail;
357 	struct ring_buffer *rb;
358 
359 	if (output_event->parent)
360 		output_event = output_event->parent;
361 
362 	/*
363 	 * Since this will typically be open across pmu::add/pmu::del, we
364 	 * grab ring_buffer's refcount instead of holding rcu read lock
365 	 * to make sure it doesn't disappear under us.
366 	 */
367 	rb = ring_buffer_get(output_event);
368 	if (!rb)
369 		return NULL;
370 
371 	if (!rb_has_aux(rb))
372 		goto err;
373 
374 	/*
375 	 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
376 	 * about to get freed, so we leave immediately.
377 	 *
378 	 * Checking rb::aux_mmap_count and rb::refcount has to be done in
379 	 * the same order, see perf_mmap_close. Otherwise we end up freeing
380 	 * aux pages in this path, which is a bug, because in_atomic().
381 	 */
382 	if (!atomic_read(&rb->aux_mmap_count))
383 		goto err;
384 
385 	if (!refcount_inc_not_zero(&rb->aux_refcount))
386 		goto err;
387 
388 	/*
389 	 * Nesting is not supported for AUX area, make sure nested
390 	 * writers are caught early
391 	 */
392 	if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
393 		goto err_put;
394 
395 	aux_head = rb->aux_head;
396 
397 	handle->rb = rb;
398 	handle->event = event;
399 	handle->head = aux_head;
400 	handle->size = 0;
401 	handle->aux_flags = 0;
402 
403 	/*
404 	 * In overwrite mode, AUX data stores do not depend on aux_tail,
405 	 * therefore (A) control dependency barrier does not exist. The
406 	 * (B) <-> (C) ordering is still observed by the pmu driver.
407 	 */
408 	if (!rb->aux_overwrite) {
409 		aux_tail = READ_ONCE(rb->user_page->aux_tail);
410 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
411 		if (aux_head - aux_tail < perf_aux_size(rb))
412 			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
413 
414 		/*
415 		 * handle->size computation depends on aux_tail load; this forms a
416 		 * control dependency barrier separating aux_tail load from aux data
417 		 * store that will be enabled on successful return
418 		 */
419 		if (!handle->size) { /* A, matches D */
420 			event->pending_disable = smp_processor_id();
421 			perf_output_wakeup(handle);
422 			local_set(&rb->aux_nest, 0);
423 			goto err_put;
424 		}
425 	}
426 
427 	return handle->rb->aux_priv;
428 
429 err_put:
430 	/* can't be last */
431 	rb_free_aux(rb);
432 
433 err:
434 	ring_buffer_put(rb);
435 	handle->event = NULL;
436 
437 	return NULL;
438 }
439 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
440 
441 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
442 {
443 	if (rb->aux_overwrite)
444 		return false;
445 
446 	if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
447 		rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
448 		return true;
449 	}
450 
451 	return false;
452 }
453 
454 /*
455  * Commit the data written by hardware into the ring buffer by adjusting
456  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
457  * pmu driver's responsibility to observe ordering rules of the hardware,
458  * so that all the data is externally visible before this is called.
459  *
460  * Note: this has to be called from pmu::stop() callback, as the assumption
461  * of the AUX buffer management code is that after pmu::stop(), the AUX
462  * transaction must be stopped and therefore drop the AUX reference count.
463  */
464 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
465 {
466 	bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
467 	struct ring_buffer *rb = handle->rb;
468 	unsigned long aux_head;
469 
470 	/* in overwrite mode, driver provides aux_head via handle */
471 	if (rb->aux_overwrite) {
472 		handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
473 
474 		aux_head = handle->head;
475 		rb->aux_head = aux_head;
476 	} else {
477 		handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
478 
479 		aux_head = rb->aux_head;
480 		rb->aux_head += size;
481 	}
482 
483 	/*
484 	 * Only send RECORD_AUX if we have something useful to communicate
485 	 *
486 	 * Note: the OVERWRITE records by themselves are not considered
487 	 * useful, as they don't communicate any *new* information,
488 	 * aside from the short-lived offset, that becomes history at
489 	 * the next event sched-in and therefore isn't useful.
490 	 * The userspace that needs to copy out AUX data in overwrite
491 	 * mode should know to use user_page::aux_head for the actual
492 	 * offset. So, from now on we don't output AUX records that
493 	 * have *only* OVERWRITE flag set.
494 	 */
495 	if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
496 		perf_event_aux_event(handle->event, aux_head, size,
497 				     handle->aux_flags);
498 
499 	rb->user_page->aux_head = rb->aux_head;
500 	if (rb_need_aux_wakeup(rb))
501 		wakeup = true;
502 
503 	if (wakeup) {
504 		if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
505 			handle->event->pending_disable = smp_processor_id();
506 		perf_output_wakeup(handle);
507 	}
508 
509 	handle->event = NULL;
510 
511 	local_set(&rb->aux_nest, 0);
512 	/* can't be last */
513 	rb_free_aux(rb);
514 	ring_buffer_put(rb);
515 }
516 EXPORT_SYMBOL_GPL(perf_aux_output_end);
517 
518 /*
519  * Skip over a given number of bytes in the AUX buffer, due to, for example,
520  * hardware's alignment constraints.
521  */
522 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
523 {
524 	struct ring_buffer *rb = handle->rb;
525 
526 	if (size > handle->size)
527 		return -ENOSPC;
528 
529 	rb->aux_head += size;
530 
531 	rb->user_page->aux_head = rb->aux_head;
532 	if (rb_need_aux_wakeup(rb)) {
533 		perf_output_wakeup(handle);
534 		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
535 	}
536 
537 	handle->head = rb->aux_head;
538 	handle->size -= size;
539 
540 	return 0;
541 }
542 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
543 
544 void *perf_get_aux(struct perf_output_handle *handle)
545 {
546 	/* this is only valid between perf_aux_output_begin and *_end */
547 	if (!handle->event)
548 		return NULL;
549 
550 	return handle->rb->aux_priv;
551 }
552 EXPORT_SYMBOL_GPL(perf_get_aux);
553 
554 #define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
555 
556 static struct page *rb_alloc_aux_page(int node, int order)
557 {
558 	struct page *page;
559 
560 	if (order > MAX_ORDER)
561 		order = MAX_ORDER;
562 
563 	do {
564 		page = alloc_pages_node(node, PERF_AUX_GFP, order);
565 	} while (!page && order--);
566 
567 	if (page && order) {
568 		/*
569 		 * Communicate the allocation size to the driver:
570 		 * if we managed to secure a high-order allocation,
571 		 * set its first page's private to this order;
572 		 * !PagePrivate(page) means it's just a normal page.
573 		 */
574 		split_page(page, order);
575 		SetPagePrivate(page);
576 		set_page_private(page, order);
577 	}
578 
579 	return page;
580 }
581 
582 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
583 {
584 	struct page *page = virt_to_page(rb->aux_pages[idx]);
585 
586 	ClearPagePrivate(page);
587 	page->mapping = NULL;
588 	__free_page(page);
589 }
590 
591 static void __rb_free_aux(struct ring_buffer *rb)
592 {
593 	int pg;
594 
595 	/*
596 	 * Should never happen, the last reference should be dropped from
597 	 * perf_mmap_close() path, which first stops aux transactions (which
598 	 * in turn are the atomic holders of aux_refcount) and then does the
599 	 * last rb_free_aux().
600 	 */
601 	WARN_ON_ONCE(in_atomic());
602 
603 	if (rb->aux_priv) {
604 		rb->free_aux(rb->aux_priv);
605 		rb->free_aux = NULL;
606 		rb->aux_priv = NULL;
607 	}
608 
609 	if (rb->aux_nr_pages) {
610 		for (pg = 0; pg < rb->aux_nr_pages; pg++)
611 			rb_free_aux_page(rb, pg);
612 
613 		kfree(rb->aux_pages);
614 		rb->aux_nr_pages = 0;
615 	}
616 }
617 
618 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
619 		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
620 {
621 	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
622 	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
623 	int ret = -ENOMEM, max_order;
624 
625 	if (!has_aux(event))
626 		return -EOPNOTSUPP;
627 
628 	/*
629 	 * We need to start with the max_order that fits in nr_pages,
630 	 * not the other way around, hence ilog2() and not get_order.
631 	 */
632 	max_order = ilog2(nr_pages);
633 
634 	/*
635 	 * PMU requests more than one contiguous chunks of memory
636 	 * for SW double buffering
637 	 */
638 	if (!overwrite) {
639 		if (!max_order)
640 			return -EINVAL;
641 
642 		max_order--;
643 	}
644 
645 	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
646 				     node);
647 	if (!rb->aux_pages)
648 		return -ENOMEM;
649 
650 	rb->free_aux = event->pmu->free_aux;
651 	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
652 		struct page *page;
653 		int last, order;
654 
655 		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
656 		page = rb_alloc_aux_page(node, order);
657 		if (!page)
658 			goto out;
659 
660 		for (last = rb->aux_nr_pages + (1 << page_private(page));
661 		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
662 			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
663 	}
664 
665 	/*
666 	 * In overwrite mode, PMUs that don't support SG may not handle more
667 	 * than one contiguous allocation, since they rely on PMI to do double
668 	 * buffering. In this case, the entire buffer has to be one contiguous
669 	 * chunk.
670 	 */
671 	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
672 	    overwrite) {
673 		struct page *page = virt_to_page(rb->aux_pages[0]);
674 
675 		if (page_private(page) != max_order)
676 			goto out;
677 	}
678 
679 	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
680 					     overwrite);
681 	if (!rb->aux_priv)
682 		goto out;
683 
684 	ret = 0;
685 
686 	/*
687 	 * aux_pages (and pmu driver's private data, aux_priv) will be
688 	 * referenced in both producer's and consumer's contexts, thus
689 	 * we keep a refcount here to make sure either of the two can
690 	 * reference them safely.
691 	 */
692 	refcount_set(&rb->aux_refcount, 1);
693 
694 	rb->aux_overwrite = overwrite;
695 	rb->aux_watermark = watermark;
696 
697 	if (!rb->aux_watermark && !rb->aux_overwrite)
698 		rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
699 
700 out:
701 	if (!ret)
702 		rb->aux_pgoff = pgoff;
703 	else
704 		__rb_free_aux(rb);
705 
706 	return ret;
707 }
708 
709 void rb_free_aux(struct ring_buffer *rb)
710 {
711 	if (refcount_dec_and_test(&rb->aux_refcount))
712 		__rb_free_aux(rb);
713 }
714 
715 #ifndef CONFIG_PERF_USE_VMALLOC
716 
717 /*
718  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
719  */
720 
721 static struct page *
722 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
723 {
724 	if (pgoff > rb->nr_pages)
725 		return NULL;
726 
727 	if (pgoff == 0)
728 		return virt_to_page(rb->user_page);
729 
730 	return virt_to_page(rb->data_pages[pgoff - 1]);
731 }
732 
733 static void *perf_mmap_alloc_page(int cpu)
734 {
735 	struct page *page;
736 	int node;
737 
738 	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
739 	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
740 	if (!page)
741 		return NULL;
742 
743 	return page_address(page);
744 }
745 
746 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
747 {
748 	struct ring_buffer *rb;
749 	unsigned long size;
750 	int i;
751 
752 	size = sizeof(struct ring_buffer);
753 	size += nr_pages * sizeof(void *);
754 
755 	if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
756 		goto fail;
757 
758 	rb = kzalloc(size, GFP_KERNEL);
759 	if (!rb)
760 		goto fail;
761 
762 	rb->user_page = perf_mmap_alloc_page(cpu);
763 	if (!rb->user_page)
764 		goto fail_user_page;
765 
766 	for (i = 0; i < nr_pages; i++) {
767 		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
768 		if (!rb->data_pages[i])
769 			goto fail_data_pages;
770 	}
771 
772 	rb->nr_pages = nr_pages;
773 
774 	ring_buffer_init(rb, watermark, flags);
775 
776 	return rb;
777 
778 fail_data_pages:
779 	for (i--; i >= 0; i--)
780 		free_page((unsigned long)rb->data_pages[i]);
781 
782 	free_page((unsigned long)rb->user_page);
783 
784 fail_user_page:
785 	kfree(rb);
786 
787 fail:
788 	return NULL;
789 }
790 
791 static void perf_mmap_free_page(unsigned long addr)
792 {
793 	struct page *page = virt_to_page((void *)addr);
794 
795 	page->mapping = NULL;
796 	__free_page(page);
797 }
798 
799 void rb_free(struct ring_buffer *rb)
800 {
801 	int i;
802 
803 	perf_mmap_free_page((unsigned long)rb->user_page);
804 	for (i = 0; i < rb->nr_pages; i++)
805 		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
806 	kfree(rb);
807 }
808 
809 #else
810 static int data_page_nr(struct ring_buffer *rb)
811 {
812 	return rb->nr_pages << page_order(rb);
813 }
814 
815 static struct page *
816 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
817 {
818 	/* The '>' counts in the user page. */
819 	if (pgoff > data_page_nr(rb))
820 		return NULL;
821 
822 	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
823 }
824 
825 static void perf_mmap_unmark_page(void *addr)
826 {
827 	struct page *page = vmalloc_to_page(addr);
828 
829 	page->mapping = NULL;
830 }
831 
832 static void rb_free_work(struct work_struct *work)
833 {
834 	struct ring_buffer *rb;
835 	void *base;
836 	int i, nr;
837 
838 	rb = container_of(work, struct ring_buffer, work);
839 	nr = data_page_nr(rb);
840 
841 	base = rb->user_page;
842 	/* The '<=' counts in the user page. */
843 	for (i = 0; i <= nr; i++)
844 		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
845 
846 	vfree(base);
847 	kfree(rb);
848 }
849 
850 void rb_free(struct ring_buffer *rb)
851 {
852 	schedule_work(&rb->work);
853 }
854 
855 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
856 {
857 	struct ring_buffer *rb;
858 	unsigned long size;
859 	void *all_buf;
860 
861 	size = sizeof(struct ring_buffer);
862 	size += sizeof(void *);
863 
864 	rb = kzalloc(size, GFP_KERNEL);
865 	if (!rb)
866 		goto fail;
867 
868 	INIT_WORK(&rb->work, rb_free_work);
869 
870 	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
871 	if (!all_buf)
872 		goto fail_all_buf;
873 
874 	rb->user_page = all_buf;
875 	rb->data_pages[0] = all_buf + PAGE_SIZE;
876 	if (nr_pages) {
877 		rb->nr_pages = 1;
878 		rb->page_order = ilog2(nr_pages);
879 	}
880 
881 	ring_buffer_init(rb, watermark, flags);
882 
883 	return rb;
884 
885 fail_all_buf:
886 	kfree(rb);
887 
888 fail:
889 	return NULL;
890 }
891 
892 #endif
893 
894 struct page *
895 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
896 {
897 	if (rb->aux_nr_pages) {
898 		/* above AUX space */
899 		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
900 			return NULL;
901 
902 		/* AUX space */
903 		if (pgoff >= rb->aux_pgoff) {
904 			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
905 			return virt_to_page(rb->aux_pages[aux_pgoff]);
906 		}
907 	}
908 
909 	return __perf_mmap_to_page(rb, pgoff);
910 }
911