18e86e015SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
276369139SFrederic Weisbecker /*
376369139SFrederic Weisbecker * Performance events ring-buffer code:
476369139SFrederic Weisbecker *
576369139SFrederic Weisbecker * Copyright (C) 2008 Thomas Gleixner <[email protected]>
676369139SFrederic Weisbecker * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
790eec103SPeter Zijlstra * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8d36b6910SAl Viro * Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]>
976369139SFrederic Weisbecker */
1076369139SFrederic Weisbecker
1176369139SFrederic Weisbecker #include <linux/perf_event.h>
1276369139SFrederic Weisbecker #include <linux/vmalloc.h>
1376369139SFrederic Weisbecker #include <linux/slab.h>
1426c86da8SPeter Zijlstra #include <linux/circ_buf.h>
157c60fc0eSJiri Olsa #include <linux/poll.h>
164411ec1dSPeter Zijlstra #include <linux/nospec.h>
1776369139SFrederic Weisbecker
1876369139SFrederic Weisbecker #include "internal.h"
1976369139SFrederic Weisbecker
perf_output_wakeup(struct perf_output_handle * handle)2076369139SFrederic Weisbecker static void perf_output_wakeup(struct perf_output_handle *handle)
2176369139SFrederic Weisbecker {
22*c96fff39STao Chen atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);
2376369139SFrederic Weisbecker
2476369139SFrederic Weisbecker handle->event->pending_wakeup = 1;
25fd20bb51SKyle Huey
26fd20bb51SKyle Huey if (*perf_event_fasync(handle->event) && !handle->event->pending_kill)
27fd20bb51SKyle Huey handle->event->pending_kill = POLL_IN;
28fd20bb51SKyle Huey
29ca6c2132SPeter Zijlstra irq_work_queue(&handle->event->pending_irq);
3076369139SFrederic Weisbecker }
3176369139SFrederic Weisbecker
3276369139SFrederic Weisbecker /*
3376369139SFrederic Weisbecker * We need to ensure a later event_id doesn't publish a head when a former
3476369139SFrederic Weisbecker * event isn't done writing. However since we need to deal with NMIs we
3576369139SFrederic Weisbecker * cannot fully serialize things.
3676369139SFrederic Weisbecker *
3776369139SFrederic Weisbecker * We only publish the head (and generate a wakeup) when the outer-most
3876369139SFrederic Weisbecker * event completes.
3976369139SFrederic Weisbecker */
perf_output_get_handle(struct perf_output_handle * handle)4076369139SFrederic Weisbecker static void perf_output_get_handle(struct perf_output_handle *handle)
4176369139SFrederic Weisbecker {
4256de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb = handle->rb;
4376369139SFrederic Weisbecker
4476369139SFrederic Weisbecker preempt_disable();
455322ea58SPeter Zijlstra
465322ea58SPeter Zijlstra /*
475322ea58SPeter Zijlstra * Avoid an explicit LOAD/STORE such that architectures with memops
485322ea58SPeter Zijlstra * can use them.
495322ea58SPeter Zijlstra */
505322ea58SPeter Zijlstra (*(volatile unsigned int *)&rb->nest)++;
5176369139SFrederic Weisbecker handle->wakeup = local_read(&rb->wakeup);
5276369139SFrederic Weisbecker }
5376369139SFrederic Weisbecker
perf_output_put_handle(struct perf_output_handle * handle)5476369139SFrederic Weisbecker static void perf_output_put_handle(struct perf_output_handle *handle)
5576369139SFrederic Weisbecker {
5656de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb = handle->rb;
5776369139SFrederic Weisbecker unsigned long head;
585322ea58SPeter Zijlstra unsigned int nest;
595322ea58SPeter Zijlstra
605322ea58SPeter Zijlstra /*
615322ea58SPeter Zijlstra * If this isn't the outermost nesting, we don't have to update
625322ea58SPeter Zijlstra * @rb->user_page->data_head.
635322ea58SPeter Zijlstra */
645322ea58SPeter Zijlstra nest = READ_ONCE(rb->nest);
655322ea58SPeter Zijlstra if (nest > 1) {
665322ea58SPeter Zijlstra WRITE_ONCE(rb->nest, nest - 1);
675322ea58SPeter Zijlstra goto out;
685322ea58SPeter Zijlstra }
6976369139SFrederic Weisbecker
7076369139SFrederic Weisbecker again:
713f9fbe9bSPeter Zijlstra /*
723f9fbe9bSPeter Zijlstra * In order to avoid publishing a head value that goes backwards,
733f9fbe9bSPeter Zijlstra * we must ensure the load of @rb->head happens after we've
743f9fbe9bSPeter Zijlstra * incremented @rb->nest.
753f9fbe9bSPeter Zijlstra *
763f9fbe9bSPeter Zijlstra * Otherwise we can observe a @rb->head value before one published
773f9fbe9bSPeter Zijlstra * by an IRQ/NMI happening between the load and the increment.
783f9fbe9bSPeter Zijlstra */
793f9fbe9bSPeter Zijlstra barrier();
8076369139SFrederic Weisbecker head = local_read(&rb->head);
8176369139SFrederic Weisbecker
8276369139SFrederic Weisbecker /*
831b038c6eSYabin Cui * IRQ/NMI can happen here and advance @rb->head, causing our
841b038c6eSYabin Cui * load above to be stale.
8576369139SFrederic Weisbecker */
8676369139SFrederic Weisbecker
871b038c6eSYabin Cui /*
88bf378d34SPeter Zijlstra * Since the mmap() consumer (userspace) can run on a different CPU:
89bf378d34SPeter Zijlstra *
90bf378d34SPeter Zijlstra * kernel user
91bf378d34SPeter Zijlstra *
92c7f2e3cdSPeter Zijlstra * if (LOAD ->data_tail) { LOAD ->data_head
93c7f2e3cdSPeter Zijlstra * (A) smp_rmb() (C)
94c7f2e3cdSPeter Zijlstra * STORE $data LOAD $data
95bf378d34SPeter Zijlstra * smp_wmb() (B) smp_mb() (D)
96c7f2e3cdSPeter Zijlstra * STORE ->data_head STORE ->data_tail
97c7f2e3cdSPeter Zijlstra * }
98bf378d34SPeter Zijlstra *
99bf378d34SPeter Zijlstra * Where A pairs with D, and B pairs with C.
100bf378d34SPeter Zijlstra *
101c7f2e3cdSPeter Zijlstra * In our case (A) is a control dependency that separates the load of
102c7f2e3cdSPeter Zijlstra * the ->data_tail and the stores of $data. In case ->data_tail
103c7f2e3cdSPeter Zijlstra * indicates there is no room in the buffer to store $data we do not.
104bf378d34SPeter Zijlstra *
105c7f2e3cdSPeter Zijlstra * D needs to be a full barrier since it separates the data READ
106bf378d34SPeter Zijlstra * from the tail WRITE.
107bf378d34SPeter Zijlstra *
108bf378d34SPeter Zijlstra * For B a WMB is sufficient since it separates two WRITEs, and for C
109bf378d34SPeter Zijlstra * an RMB is sufficient since it separates two READs.
110bf378d34SPeter Zijlstra *
111bf378d34SPeter Zijlstra * See perf_output_begin().
11276369139SFrederic Weisbecker */
113c7f2e3cdSPeter Zijlstra smp_wmb(); /* B, matches C */
1144d839dd9SPeter Zijlstra WRITE_ONCE(rb->user_page->data_head, head);
11576369139SFrederic Weisbecker
11676369139SFrederic Weisbecker /*
1171b038c6eSYabin Cui * We must publish the head before decrementing the nest count,
1181b038c6eSYabin Cui * otherwise an IRQ/NMI can publish a more recent head value and our
1191b038c6eSYabin Cui * write will (temporarily) publish a stale value.
12076369139SFrederic Weisbecker */
1211b038c6eSYabin Cui barrier();
1225322ea58SPeter Zijlstra WRITE_ONCE(rb->nest, 0);
1231b038c6eSYabin Cui
1241b038c6eSYabin Cui /*
1251b038c6eSYabin Cui * Ensure we decrement @rb->nest before we validate the @rb->head.
1261b038c6eSYabin Cui * Otherwise we cannot be sure we caught the 'last' nested update.
1271b038c6eSYabin Cui */
1281b038c6eSYabin Cui barrier();
12976369139SFrederic Weisbecker if (unlikely(head != local_read(&rb->head))) {
1305322ea58SPeter Zijlstra WRITE_ONCE(rb->nest, 1);
13176369139SFrederic Weisbecker goto again;
13276369139SFrederic Weisbecker }
13376369139SFrederic Weisbecker
13476369139SFrederic Weisbecker if (handle->wakeup != local_read(&rb->wakeup))
13576369139SFrederic Weisbecker perf_output_wakeup(handle);
13676369139SFrederic Weisbecker
13776369139SFrederic Weisbecker out:
13876369139SFrederic Weisbecker preempt_enable();
13976369139SFrederic Weisbecker }
14076369139SFrederic Weisbecker
14157d6a793SMathieu Malaterre static __always_inline bool
ring_buffer_has_space(unsigned long head,unsigned long tail,unsigned long data_size,unsigned int size,bool backward)142d1b26c70SWang Nan ring_buffer_has_space(unsigned long head, unsigned long tail,
143d1b26c70SWang Nan unsigned long data_size, unsigned int size,
144d1b26c70SWang Nan bool backward)
145d1b26c70SWang Nan {
146d1b26c70SWang Nan if (!backward)
147d1b26c70SWang Nan return CIRC_SPACE(head, tail, data_size) >= size;
148d1b26c70SWang Nan else
149d1b26c70SWang Nan return CIRC_SPACE(tail, head, data_size) >= size;
150d1b26c70SWang Nan }
151d1b26c70SWang Nan
15257d6a793SMathieu Malaterre static __always_inline int
__perf_output_begin(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size,bool backward)153d1b26c70SWang Nan __perf_output_begin(struct perf_output_handle *handle,
154267fb273SPeter Zijlstra struct perf_sample_data *data,
155d1b26c70SWang Nan struct perf_event *event, unsigned int size,
156d1b26c70SWang Nan bool backward)
15776369139SFrederic Weisbecker {
15856de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb;
15976369139SFrederic Weisbecker unsigned long tail, offset, head;
160524feca5SPeter Zijlstra int have_lost, page_shift;
16176369139SFrederic Weisbecker struct {
16276369139SFrederic Weisbecker struct perf_event_header header;
16376369139SFrederic Weisbecker u64 id;
16476369139SFrederic Weisbecker u64 lost;
16576369139SFrederic Weisbecker } lost_event;
16676369139SFrederic Weisbecker
16776369139SFrederic Weisbecker rcu_read_lock();
16876369139SFrederic Weisbecker /*
16976369139SFrederic Weisbecker * For inherited events we send all the output towards the parent.
17076369139SFrederic Weisbecker */
17176369139SFrederic Weisbecker if (event->parent)
17276369139SFrederic Weisbecker event = event->parent;
17376369139SFrederic Weisbecker
17476369139SFrederic Weisbecker rb = rcu_dereference(event->rb);
175c72b42a3SPeter Zijlstra if (unlikely(!rb))
176c72b42a3SPeter Zijlstra goto out;
177c72b42a3SPeter Zijlstra
17886e7972fSWang Nan if (unlikely(rb->paused)) {
179119a784cSNamhyung Kim if (rb->nr_pages) {
18086e7972fSWang Nan local_inc(&rb->lost);
181119a784cSNamhyung Kim atomic64_inc(&event->lost_samples);
182119a784cSNamhyung Kim }
18376369139SFrederic Weisbecker goto out;
18486e7972fSWang Nan }
18576369139SFrederic Weisbecker
18676369139SFrederic Weisbecker handle->rb = rb;
18776369139SFrederic Weisbecker handle->event = event;
1888ce939a0SPeter Zijlstra (Intel) handle->flags = 0;
18976369139SFrederic Weisbecker
19076369139SFrederic Weisbecker have_lost = local_read(&rb->lost);
191c72b42a3SPeter Zijlstra if (unlikely(have_lost)) {
192d20a973fSPeter Zijlstra size += sizeof(lost_event);
193d20a973fSPeter Zijlstra if (event->attr.sample_id_all)
194d20a973fSPeter Zijlstra size += event->id_header_size;
19576369139SFrederic Weisbecker }
19676369139SFrederic Weisbecker
19776369139SFrederic Weisbecker perf_output_get_handle(handle);
19876369139SFrederic Weisbecker
1991af61adbSUros Bizjak offset = local_read(&rb->head);
20076369139SFrederic Weisbecker do {
2011af61adbSUros Bizjak head = offset;
202105ff3cbSLinus Torvalds tail = READ_ONCE(rb->user_page->data_tail);
203d1b26c70SWang Nan if (!rb->overwrite) {
204d1b26c70SWang Nan if (unlikely(!ring_buffer_has_space(head, tail,
205d1b26c70SWang Nan perf_data_size(rb),
206d1b26c70SWang Nan size, backward)))
20776369139SFrederic Weisbecker goto fail;
208d1b26c70SWang Nan }
209c7f2e3cdSPeter Zijlstra
210c7f2e3cdSPeter Zijlstra /*
211c7f2e3cdSPeter Zijlstra * The above forms a control dependency barrier separating the
212c7f2e3cdSPeter Zijlstra * @tail load above from the data stores below. Since the @tail
213c7f2e3cdSPeter Zijlstra * load is required to compute the branch to fail below.
214c7f2e3cdSPeter Zijlstra *
215c7f2e3cdSPeter Zijlstra * A, matches D; the full memory barrier userspace SHOULD issue
216c7f2e3cdSPeter Zijlstra * after reading the data and before storing the new tail
217c7f2e3cdSPeter Zijlstra * position.
218c7f2e3cdSPeter Zijlstra *
219c7f2e3cdSPeter Zijlstra * See perf_output_put_handle().
220c7f2e3cdSPeter Zijlstra */
221c7f2e3cdSPeter Zijlstra
222d1b26c70SWang Nan if (!backward)
22326c86da8SPeter Zijlstra head += size;
224d1b26c70SWang Nan else
225d1b26c70SWang Nan head -= size;
2261af61adbSUros Bizjak } while (!local_try_cmpxchg(&rb->head, &offset, head));
22776369139SFrederic Weisbecker
228d1b26c70SWang Nan if (backward) {
229d1b26c70SWang Nan offset = head;
230d1b26c70SWang Nan head = (u64)(-head);
231d1b26c70SWang Nan }
232d1b26c70SWang Nan
23385f59edfSPeter Zijlstra /*
234c7f2e3cdSPeter Zijlstra * We rely on the implied barrier() by local_cmpxchg() to ensure
235c7f2e3cdSPeter Zijlstra * none of the data stores below can be lifted up by the compiler.
23685f59edfSPeter Zijlstra */
23785f59edfSPeter Zijlstra
238c72b42a3SPeter Zijlstra if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
23976369139SFrederic Weisbecker local_add(rb->watermark, &rb->wakeup);
24076369139SFrederic Weisbecker
241524feca5SPeter Zijlstra page_shift = PAGE_SHIFT + page_order(rb);
242524feca5SPeter Zijlstra
243524feca5SPeter Zijlstra handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
244524feca5SPeter Zijlstra offset &= (1UL << page_shift) - 1;
245524feca5SPeter Zijlstra handle->addr = rb->data_pages[handle->page] + offset;
246524feca5SPeter Zijlstra handle->size = (1UL << page_shift) - offset;
24776369139SFrederic Weisbecker
248c72b42a3SPeter Zijlstra if (unlikely(have_lost)) {
249d20a973fSPeter Zijlstra lost_event.header.size = sizeof(lost_event);
25076369139SFrederic Weisbecker lost_event.header.type = PERF_RECORD_LOST;
25176369139SFrederic Weisbecker lost_event.header.misc = 0;
25276369139SFrederic Weisbecker lost_event.id = event->id;
25376369139SFrederic Weisbecker lost_event.lost = local_xchg(&rb->lost, 0);
25476369139SFrederic Weisbecker
255267fb273SPeter Zijlstra /* XXX mostly redundant; @data is already fully initializes */
256267fb273SPeter Zijlstra perf_event_header__init_id(&lost_event.header, data, event);
25776369139SFrederic Weisbecker perf_output_put(handle, lost_event);
258267fb273SPeter Zijlstra perf_event__output_id_sample(event, handle, data);
25976369139SFrederic Weisbecker }
26076369139SFrederic Weisbecker
26176369139SFrederic Weisbecker return 0;
26276369139SFrederic Weisbecker
26376369139SFrederic Weisbecker fail:
26476369139SFrederic Weisbecker local_inc(&rb->lost);
265119a784cSNamhyung Kim atomic64_inc(&event->lost_samples);
26676369139SFrederic Weisbecker perf_output_put_handle(handle);
26776369139SFrederic Weisbecker out:
26876369139SFrederic Weisbecker rcu_read_unlock();
26976369139SFrederic Weisbecker
27076369139SFrederic Weisbecker return -ENOSPC;
27176369139SFrederic Weisbecker }
27276369139SFrederic Weisbecker
perf_output_begin_forward(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)2739ecda41aSWang Nan int perf_output_begin_forward(struct perf_output_handle *handle,
274267fb273SPeter Zijlstra struct perf_sample_data *data,
275d1b26c70SWang Nan struct perf_event *event, unsigned int size)
276d1b26c70SWang Nan {
277267fb273SPeter Zijlstra return __perf_output_begin(handle, data, event, size, false);
278d1b26c70SWang Nan }
279d1b26c70SWang Nan
perf_output_begin_backward(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)2809ecda41aSWang Nan int perf_output_begin_backward(struct perf_output_handle *handle,
281267fb273SPeter Zijlstra struct perf_sample_data *data,
2829ecda41aSWang Nan struct perf_event *event, unsigned int size)
2839ecda41aSWang Nan {
284267fb273SPeter Zijlstra return __perf_output_begin(handle, data, event, size, true);
2859ecda41aSWang Nan }
2869ecda41aSWang Nan
perf_output_begin(struct perf_output_handle * handle,struct perf_sample_data * data,struct perf_event * event,unsigned int size)2879ecda41aSWang Nan int perf_output_begin(struct perf_output_handle *handle,
288267fb273SPeter Zijlstra struct perf_sample_data *data,
2899ecda41aSWang Nan struct perf_event *event, unsigned int size)
2909ecda41aSWang Nan {
2919ecda41aSWang Nan
292267fb273SPeter Zijlstra return __perf_output_begin(handle, data, event, size,
2939ecda41aSWang Nan unlikely(is_write_backward(event)));
2949ecda41aSWang Nan }
2959ecda41aSWang Nan
perf_output_copy(struct perf_output_handle * handle,const void * buf,unsigned int len)29691d7753aSFrederic Weisbecker unsigned int perf_output_copy(struct perf_output_handle *handle,
29776369139SFrederic Weisbecker const void *buf, unsigned int len)
29876369139SFrederic Weisbecker {
29991d7753aSFrederic Weisbecker return __output_copy(handle, buf, len);
30076369139SFrederic Weisbecker }
30176369139SFrederic Weisbecker
perf_output_skip(struct perf_output_handle * handle,unsigned int len)3025685e0ffSJiri Olsa unsigned int perf_output_skip(struct perf_output_handle *handle,
3035685e0ffSJiri Olsa unsigned int len)
3045685e0ffSJiri Olsa {
3055685e0ffSJiri Olsa return __output_skip(handle, NULL, len);
3065685e0ffSJiri Olsa }
3075685e0ffSJiri Olsa
perf_output_end(struct perf_output_handle * handle)30876369139SFrederic Weisbecker void perf_output_end(struct perf_output_handle *handle)
30976369139SFrederic Weisbecker {
31076369139SFrederic Weisbecker perf_output_put_handle(handle);
31176369139SFrederic Weisbecker rcu_read_unlock();
31276369139SFrederic Weisbecker }
31376369139SFrederic Weisbecker
31476369139SFrederic Weisbecker static void
ring_buffer_init(struct perf_buffer * rb,long watermark,int flags)31556de4e8fSSteven Rostedt (VMware) ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
31676369139SFrederic Weisbecker {
31776369139SFrederic Weisbecker long max_size = perf_data_size(rb);
31876369139SFrederic Weisbecker
31976369139SFrederic Weisbecker if (watermark)
32076369139SFrederic Weisbecker rb->watermark = min(max_size, watermark);
32176369139SFrederic Weisbecker
32276369139SFrederic Weisbecker if (!rb->watermark)
32376369139SFrederic Weisbecker rb->watermark = max_size / 2;
32476369139SFrederic Weisbecker
32576369139SFrederic Weisbecker if (flags & RING_BUFFER_WRITABLE)
326dd9c086dSStephane Eranian rb->overwrite = 0;
327dd9c086dSStephane Eranian else
328dd9c086dSStephane Eranian rb->overwrite = 1;
32976369139SFrederic Weisbecker
330fecb8ed2SElena Reshetova refcount_set(&rb->refcount, 1);
33110c6db11SPeter Zijlstra
33210c6db11SPeter Zijlstra INIT_LIST_HEAD(&rb->event_list);
33310c6db11SPeter Zijlstra spin_lock_init(&rb->event_lock);
33486e7972fSWang Nan
33586e7972fSWang Nan /*
33686e7972fSWang Nan * perf_output_begin() only checks rb->paused, therefore
33786e7972fSWang Nan * rb->paused must be true if we have no pages for output.
33886e7972fSWang Nan */
33986e7972fSWang Nan if (!rb->nr_pages)
34086e7972fSWang Nan rb->paused = 1;
3412ab9d830SPeter Zijlstra
3422ab9d830SPeter Zijlstra mutex_init(&rb->aux_mutex);
34376369139SFrederic Weisbecker }
34476369139SFrederic Weisbecker
perf_aux_output_flag(struct perf_output_handle * handle,u64 flags)345f4c0b0aaSWill Deacon void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
346f4c0b0aaSWill Deacon {
347f4c0b0aaSWill Deacon /*
348f4c0b0aaSWill Deacon * OVERWRITE is determined by perf_aux_output_end() and can't
349f4c0b0aaSWill Deacon * be passed in directly.
350f4c0b0aaSWill Deacon */
351f4c0b0aaSWill Deacon if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
352f4c0b0aaSWill Deacon return;
353f4c0b0aaSWill Deacon
354f4c0b0aaSWill Deacon handle->aux_flags |= flags;
355f4c0b0aaSWill Deacon }
356f4c0b0aaSWill Deacon EXPORT_SYMBOL_GPL(perf_aux_output_flag);
357f4c0b0aaSWill Deacon
358fdc26706SAlexander Shishkin /*
359fdc26706SAlexander Shishkin * This is called before hardware starts writing to the AUX area to
360fdc26706SAlexander Shishkin * obtain an output handle and make sure there's room in the buffer.
361fdc26706SAlexander Shishkin * When the capture completes, call perf_aux_output_end() to commit
362fdc26706SAlexander Shishkin * the recorded data to the buffer.
363fdc26706SAlexander Shishkin *
364fdc26706SAlexander Shishkin * The ordering is similar to that of perf_output_{begin,end}, with
365fdc26706SAlexander Shishkin * the exception of (B), which should be taken care of by the pmu
366fdc26706SAlexander Shishkin * driver, since ordering rules will differ depending on hardware.
367af5bb4edSAlexander Shishkin *
368af5bb4edSAlexander Shishkin * Call this from pmu::start(); see the comment in perf_aux_output_end()
369af5bb4edSAlexander Shishkin * about its use in pmu callbacks. Both can also be called from the PMI
370af5bb4edSAlexander Shishkin * handler if needed.
371fdc26706SAlexander Shishkin */
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)372fdc26706SAlexander Shishkin void *perf_aux_output_begin(struct perf_output_handle *handle,
373fdc26706SAlexander Shishkin struct perf_event *event)
374fdc26706SAlexander Shishkin {
375fdc26706SAlexander Shishkin struct perf_event *output_event = event;
376fdc26706SAlexander Shishkin unsigned long aux_head, aux_tail;
37756de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb;
3785322ea58SPeter Zijlstra unsigned int nest;
379fdc26706SAlexander Shishkin
380fdc26706SAlexander Shishkin if (output_event->parent)
381fdc26706SAlexander Shishkin output_event = output_event->parent;
382fdc26706SAlexander Shishkin
383fdc26706SAlexander Shishkin /*
384fdc26706SAlexander Shishkin * Since this will typically be open across pmu::add/pmu::del, we
385fdc26706SAlexander Shishkin * grab ring_buffer's refcount instead of holding rcu read lock
386fdc26706SAlexander Shishkin * to make sure it doesn't disappear under us.
387fdc26706SAlexander Shishkin */
388fdc26706SAlexander Shishkin rb = ring_buffer_get(output_event);
389fdc26706SAlexander Shishkin if (!rb)
390fdc26706SAlexander Shishkin return NULL;
391fdc26706SAlexander Shishkin
392b79ccaddSAlexander Shishkin if (!rb_has_aux(rb))
393fdc26706SAlexander Shishkin goto err;
394fdc26706SAlexander Shishkin
395fdc26706SAlexander Shishkin /*
396b79ccaddSAlexander Shishkin * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
397b79ccaddSAlexander Shishkin * about to get freed, so we leave immediately.
398b79ccaddSAlexander Shishkin *
399b79ccaddSAlexander Shishkin * Checking rb::aux_mmap_count and rb::refcount has to be done in
400b79ccaddSAlexander Shishkin * the same order, see perf_mmap_close. Otherwise we end up freeing
401b79ccaddSAlexander Shishkin * aux pages in this path, which is a bug, because in_atomic().
402dcb10a96SAlexander Shishkin */
403dcb10a96SAlexander Shishkin if (!atomic_read(&rb->aux_mmap_count))
404b79ccaddSAlexander Shishkin goto err;
405b79ccaddSAlexander Shishkin
406ca3bb3d0SElena Reshetova if (!refcount_inc_not_zero(&rb->aux_refcount))
407b79ccaddSAlexander Shishkin goto err;
408dcb10a96SAlexander Shishkin
4095322ea58SPeter Zijlstra nest = READ_ONCE(rb->aux_nest);
410dcb10a96SAlexander Shishkin /*
411fdc26706SAlexander Shishkin * Nesting is not supported for AUX area, make sure nested
412fdc26706SAlexander Shishkin * writers are caught early
413fdc26706SAlexander Shishkin */
4145322ea58SPeter Zijlstra if (WARN_ON_ONCE(nest))
415fdc26706SAlexander Shishkin goto err_put;
416fdc26706SAlexander Shishkin
4175322ea58SPeter Zijlstra WRITE_ONCE(rb->aux_nest, nest + 1);
4185322ea58SPeter Zijlstra
4192ab346cfSWill Deacon aux_head = rb->aux_head;
420fdc26706SAlexander Shishkin
421fdc26706SAlexander Shishkin handle->rb = rb;
422fdc26706SAlexander Shishkin handle->event = event;
423fdc26706SAlexander Shishkin handle->head = aux_head;
4242023a0d2SAlexander Shishkin handle->size = 0;
425f4c0b0aaSWill Deacon handle->aux_flags = 0;
4262023a0d2SAlexander Shishkin
4272023a0d2SAlexander Shishkin /*
4282023a0d2SAlexander Shishkin * In overwrite mode, AUX data stores do not depend on aux_tail,
4292023a0d2SAlexander Shishkin * therefore (A) control dependency barrier does not exist. The
4302023a0d2SAlexander Shishkin * (B) <-> (C) ordering is still observed by the pmu driver.
4312023a0d2SAlexander Shishkin */
4322023a0d2SAlexander Shishkin if (!rb->aux_overwrite) {
4336aa7de05SMark Rutland aux_tail = READ_ONCE(rb->user_page->aux_tail);
4342ab346cfSWill Deacon handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
435fdc26706SAlexander Shishkin if (aux_head - aux_tail < perf_aux_size(rb))
436fdc26706SAlexander Shishkin handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
437fdc26706SAlexander Shishkin
438fdc26706SAlexander Shishkin /*
439fdc26706SAlexander Shishkin * handle->size computation depends on aux_tail load; this forms a
440fdc26706SAlexander Shishkin * control dependency barrier separating aux_tail load from aux data
441fdc26706SAlexander Shishkin * store that will be enabled on successful return
442fdc26706SAlexander Shishkin */
443fdc26706SAlexander Shishkin if (!handle->size) { /* A, matches D */
4441d54ad94SPeter Zijlstra event->pending_disable = smp_processor_id();
445fdc26706SAlexander Shishkin perf_output_wakeup(handle);
4465322ea58SPeter Zijlstra WRITE_ONCE(rb->aux_nest, 0);
447fdc26706SAlexander Shishkin goto err_put;
448fdc26706SAlexander Shishkin }
4492023a0d2SAlexander Shishkin }
450fdc26706SAlexander Shishkin
451fdc26706SAlexander Shishkin return handle->rb->aux_priv;
452fdc26706SAlexander Shishkin
453fdc26706SAlexander Shishkin err_put:
454af5bb4edSAlexander Shishkin /* can't be last */
455fdc26706SAlexander Shishkin rb_free_aux(rb);
456fdc26706SAlexander Shishkin
457fdc26706SAlexander Shishkin err:
45895ff4ca2SAlexander Shishkin ring_buffer_put(rb);
459fdc26706SAlexander Shishkin handle->event = NULL;
460fdc26706SAlexander Shishkin
461fdc26706SAlexander Shishkin return NULL;
462fdc26706SAlexander Shishkin }
463bc1d2020SWill Deacon EXPORT_SYMBOL_GPL(perf_aux_output_begin);
464fdc26706SAlexander Shishkin
rb_need_aux_wakeup(struct perf_buffer * rb)46556de4e8fSSteven Rostedt (VMware) static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
466441430ebSAlexander Shishkin {
467441430ebSAlexander Shishkin if (rb->aux_overwrite)
468441430ebSAlexander Shishkin return false;
469441430ebSAlexander Shishkin
470441430ebSAlexander Shishkin if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
471441430ebSAlexander Shishkin rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
472441430ebSAlexander Shishkin return true;
473441430ebSAlexander Shishkin }
474441430ebSAlexander Shishkin
475441430ebSAlexander Shishkin return false;
476441430ebSAlexander Shishkin }
477441430ebSAlexander Shishkin
478fdc26706SAlexander Shishkin /*
479fdc26706SAlexander Shishkin * Commit the data written by hardware into the ring buffer by adjusting
480fdc26706SAlexander Shishkin * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
481fdc26706SAlexander Shishkin * pmu driver's responsibility to observe ordering rules of the hardware,
482fdc26706SAlexander Shishkin * so that all the data is externally visible before this is called.
483af5bb4edSAlexander Shishkin *
484af5bb4edSAlexander Shishkin * Note: this has to be called from pmu::stop() callback, as the assumption
485af5bb4edSAlexander Shishkin * of the AUX buffer management code is that after pmu::stop(), the AUX
486af5bb4edSAlexander Shishkin * transaction must be stopped and therefore drop the AUX reference count.
487fdc26706SAlexander Shishkin */
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)488f4c0b0aaSWill Deacon void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
489fdc26706SAlexander Shishkin {
490ae0c2d99SAlexander Shishkin bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
49156de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb = handle->rb;
4922023a0d2SAlexander Shishkin unsigned long aux_head;
493fdc26706SAlexander Shishkin
4942023a0d2SAlexander Shishkin /* in overwrite mode, driver provides aux_head via handle */
4952023a0d2SAlexander Shishkin if (rb->aux_overwrite) {
496f4c0b0aaSWill Deacon handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
4972023a0d2SAlexander Shishkin
4982023a0d2SAlexander Shishkin aux_head = handle->head;
4992ab346cfSWill Deacon rb->aux_head = aux_head;
5002023a0d2SAlexander Shishkin } else {
501f4c0b0aaSWill Deacon handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
502f4c0b0aaSWill Deacon
5032ab346cfSWill Deacon aux_head = rb->aux_head;
5042ab346cfSWill Deacon rb->aux_head += size;
5052023a0d2SAlexander Shishkin }
506fdc26706SAlexander Shishkin
507fdc26706SAlexander Shishkin /*
508fdc26706SAlexander Shishkin * Only send RECORD_AUX if we have something useful to communicate
5091627314fSAlexander Shishkin *
5101627314fSAlexander Shishkin * Note: the OVERWRITE records by themselves are not considered
5111627314fSAlexander Shishkin * useful, as they don't communicate any *new* information,
5121627314fSAlexander Shishkin * aside from the short-lived offset, that becomes history at
5131627314fSAlexander Shishkin * the next event sched-in and therefore isn't useful.
5141627314fSAlexander Shishkin * The userspace that needs to copy out AUX data in overwrite
5151627314fSAlexander Shishkin * mode should know to use user_page::aux_head for the actual
5161627314fSAlexander Shishkin * offset. So, from now on we don't output AUX records that
5171627314fSAlexander Shishkin * have *only* OVERWRITE flag set.
518fdc26706SAlexander Shishkin */
519339bc418SAlexander Shishkin if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
520f4c0b0aaSWill Deacon perf_event_aux_event(handle->event, aux_head, size,
521f4c0b0aaSWill Deacon handle->aux_flags);
522fdc26706SAlexander Shishkin
5234d839dd9SPeter Zijlstra WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
524441430ebSAlexander Shishkin if (rb_need_aux_wakeup(rb))
5253f56e687SAlexander Shishkin wakeup = true;
5263f56e687SAlexander Shishkin
5273f56e687SAlexander Shishkin if (wakeup) {
528f4c0b0aaSWill Deacon if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
5291d54ad94SPeter Zijlstra handle->event->pending_disable = smp_processor_id();
5303f56e687SAlexander Shishkin perf_output_wakeup(handle);
5313f56e687SAlexander Shishkin }
5323f56e687SAlexander Shishkin
533fdc26706SAlexander Shishkin handle->event = NULL;
534fdc26706SAlexander Shishkin
5355322ea58SPeter Zijlstra WRITE_ONCE(rb->aux_nest, 0);
536af5bb4edSAlexander Shishkin /* can't be last */
537fdc26706SAlexander Shishkin rb_free_aux(rb);
53895ff4ca2SAlexander Shishkin ring_buffer_put(rb);
539fdc26706SAlexander Shishkin }
540bc1d2020SWill Deacon EXPORT_SYMBOL_GPL(perf_aux_output_end);
541fdc26706SAlexander Shishkin
542fdc26706SAlexander Shishkin /*
543fdc26706SAlexander Shishkin * Skip over a given number of bytes in the AUX buffer, due to, for example,
544fdc26706SAlexander Shishkin * hardware's alignment constraints.
545fdc26706SAlexander Shishkin */
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)546fdc26706SAlexander Shishkin int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
547fdc26706SAlexander Shishkin {
54856de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb = handle->rb;
549fdc26706SAlexander Shishkin
550fdc26706SAlexander Shishkin if (size > handle->size)
551fdc26706SAlexander Shishkin return -ENOSPC;
552fdc26706SAlexander Shishkin
5532ab346cfSWill Deacon rb->aux_head += size;
554fdc26706SAlexander Shishkin
5554d839dd9SPeter Zijlstra WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
556441430ebSAlexander Shishkin if (rb_need_aux_wakeup(rb)) {
5571a594131SAlexander Shishkin perf_output_wakeup(handle);
5582ab346cfSWill Deacon handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
5591a594131SAlexander Shishkin }
5601a594131SAlexander Shishkin
5612ab346cfSWill Deacon handle->head = rb->aux_head;
562fdc26706SAlexander Shishkin handle->size -= size;
563fdc26706SAlexander Shishkin
564fdc26706SAlexander Shishkin return 0;
565fdc26706SAlexander Shishkin }
566bc1d2020SWill Deacon EXPORT_SYMBOL_GPL(perf_aux_output_skip);
567fdc26706SAlexander Shishkin
perf_get_aux(struct perf_output_handle * handle)568fdc26706SAlexander Shishkin void *perf_get_aux(struct perf_output_handle *handle)
569fdc26706SAlexander Shishkin {
570fdc26706SAlexander Shishkin /* this is only valid between perf_aux_output_begin and *_end */
571fdc26706SAlexander Shishkin if (!handle->event)
572fdc26706SAlexander Shishkin return NULL;
573fdc26706SAlexander Shishkin
574fdc26706SAlexander Shishkin return handle->rb->aux_priv;
575fdc26706SAlexander Shishkin }
576bc1d2020SWill Deacon EXPORT_SYMBOL_GPL(perf_get_aux);
577fdc26706SAlexander Shishkin
578a4faf00dSAlexander Shishkin /*
579a4faf00dSAlexander Shishkin * Copy out AUX data from an AUX handle.
580a4faf00dSAlexander Shishkin */
perf_output_copy_aux(struct perf_output_handle * aux_handle,struct perf_output_handle * handle,unsigned long from,unsigned long to)581a4faf00dSAlexander Shishkin long perf_output_copy_aux(struct perf_output_handle *aux_handle,
582a4faf00dSAlexander Shishkin struct perf_output_handle *handle,
583a4faf00dSAlexander Shishkin unsigned long from, unsigned long to)
584a4faf00dSAlexander Shishkin {
58556de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb = aux_handle->rb;
586a4faf00dSAlexander Shishkin unsigned long tocopy, remainder, len = 0;
587a4faf00dSAlexander Shishkin void *addr;
588a4faf00dSAlexander Shishkin
589a4faf00dSAlexander Shishkin from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
590a4faf00dSAlexander Shishkin to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
591a4faf00dSAlexander Shishkin
592a4faf00dSAlexander Shishkin do {
593a4faf00dSAlexander Shishkin tocopy = PAGE_SIZE - offset_in_page(from);
594a4faf00dSAlexander Shishkin if (to > from)
595a4faf00dSAlexander Shishkin tocopy = min(tocopy, to - from);
596a4faf00dSAlexander Shishkin if (!tocopy)
597a4faf00dSAlexander Shishkin break;
598a4faf00dSAlexander Shishkin
599a4faf00dSAlexander Shishkin addr = rb->aux_pages[from >> PAGE_SHIFT];
600a4faf00dSAlexander Shishkin addr += offset_in_page(from);
601a4faf00dSAlexander Shishkin
602a4faf00dSAlexander Shishkin remainder = perf_output_copy(handle, addr, tocopy);
603a4faf00dSAlexander Shishkin if (remainder)
604a4faf00dSAlexander Shishkin return -EFAULT;
605a4faf00dSAlexander Shishkin
606a4faf00dSAlexander Shishkin len += tocopy;
607a4faf00dSAlexander Shishkin from += tocopy;
608a4faf00dSAlexander Shishkin from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
609a4faf00dSAlexander Shishkin } while (to != from);
610a4faf00dSAlexander Shishkin
611a4faf00dSAlexander Shishkin return len;
612a4faf00dSAlexander Shishkin }
613a4faf00dSAlexander Shishkin
6140a4e38e6SAlexander Shishkin #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
6150a4e38e6SAlexander Shishkin
rb_alloc_aux_page(int node,int order)6160a4e38e6SAlexander Shishkin static struct page *rb_alloc_aux_page(int node, int order)
6170a4e38e6SAlexander Shishkin {
6180a4e38e6SAlexander Shishkin struct page *page;
6190a4e38e6SAlexander Shishkin
6205e0a760bSKirill A. Shutemov if (order > MAX_PAGE_ORDER)
6215e0a760bSKirill A. Shutemov order = MAX_PAGE_ORDER;
6220a4e38e6SAlexander Shishkin
6230a4e38e6SAlexander Shishkin do {
6240a4e38e6SAlexander Shishkin page = alloc_pages_node(node, PERF_AUX_GFP, order);
6250a4e38e6SAlexander Shishkin } while (!page && order--);
6260a4e38e6SAlexander Shishkin
6270a4e38e6SAlexander Shishkin if (page && order) {
6280a4e38e6SAlexander Shishkin /*
629c2ad6b51SAlexander Shishkin * Communicate the allocation size to the driver:
630c2ad6b51SAlexander Shishkin * if we managed to secure a high-order allocation,
631c2ad6b51SAlexander Shishkin * set its first page's private to this order;
632c2ad6b51SAlexander Shishkin * !PagePrivate(page) means it's just a normal page.
6330a4e38e6SAlexander Shishkin */
6340a4e38e6SAlexander Shishkin split_page(page, order);
6350a4e38e6SAlexander Shishkin SetPagePrivate(page);
6360a4e38e6SAlexander Shishkin set_page_private(page, order);
6370a4e38e6SAlexander Shishkin }
6380a4e38e6SAlexander Shishkin
6390a4e38e6SAlexander Shishkin return page;
6400a4e38e6SAlexander Shishkin }
6410a4e38e6SAlexander Shishkin
rb_free_aux_page(struct perf_buffer * rb,int idx)64256de4e8fSSteven Rostedt (VMware) static void rb_free_aux_page(struct perf_buffer *rb, int idx)
6430a4e38e6SAlexander Shishkin {
6440a4e38e6SAlexander Shishkin struct page *page = virt_to_page(rb->aux_pages[idx]);
6450a4e38e6SAlexander Shishkin
6460a4e38e6SAlexander Shishkin ClearPagePrivate(page);
6470a4e38e6SAlexander Shishkin __free_page(page);
6480a4e38e6SAlexander Shishkin }
6490a4e38e6SAlexander Shishkin
__rb_free_aux(struct perf_buffer * rb)65056de4e8fSSteven Rostedt (VMware) static void __rb_free_aux(struct perf_buffer *rb)
65145c815f0SAlexander Shishkin {
65245c815f0SAlexander Shishkin int pg;
65345c815f0SAlexander Shishkin
65495ff4ca2SAlexander Shishkin /*
65595ff4ca2SAlexander Shishkin * Should never happen, the last reference should be dropped from
65695ff4ca2SAlexander Shishkin * perf_mmap_close() path, which first stops aux transactions (which
65795ff4ca2SAlexander Shishkin * in turn are the atomic holders of aux_refcount) and then does the
65895ff4ca2SAlexander Shishkin * last rb_free_aux().
65995ff4ca2SAlexander Shishkin */
66095ff4ca2SAlexander Shishkin WARN_ON_ONCE(in_atomic());
66195ff4ca2SAlexander Shishkin
66245c815f0SAlexander Shishkin if (rb->aux_priv) {
66345c815f0SAlexander Shishkin rb->free_aux(rb->aux_priv);
66445c815f0SAlexander Shishkin rb->free_aux = NULL;
66545c815f0SAlexander Shishkin rb->aux_priv = NULL;
66645c815f0SAlexander Shishkin }
66745c815f0SAlexander Shishkin
66845c815f0SAlexander Shishkin if (rb->aux_nr_pages) {
66945c815f0SAlexander Shishkin for (pg = 0; pg < rb->aux_nr_pages; pg++)
67045c815f0SAlexander Shishkin rb_free_aux_page(rb, pg);
67145c815f0SAlexander Shishkin
67245c815f0SAlexander Shishkin kfree(rb->aux_pages);
67345c815f0SAlexander Shishkin rb->aux_nr_pages = 0;
67445c815f0SAlexander Shishkin }
67545c815f0SAlexander Shishkin }
67645c815f0SAlexander Shishkin
rb_alloc_aux(struct perf_buffer * rb,struct perf_event * event,pgoff_t pgoff,int nr_pages,long watermark,int flags)67756de4e8fSSteven Rostedt (VMware) int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
6781a594131SAlexander Shishkin pgoff_t pgoff, int nr_pages, long watermark, int flags)
67945bfb2e5SPeter Zijlstra {
68045bfb2e5SPeter Zijlstra bool overwrite = !(flags & RING_BUFFER_WRITABLE);
68145bfb2e5SPeter Zijlstra int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
6825768402fSAlexander Shishkin int ret = -ENOMEM, max_order;
68345bfb2e5SPeter Zijlstra
68445bfb2e5SPeter Zijlstra if (!has_aux(event))
6858a1898dbSHendrik Brueckner return -EOPNOTSUPP;
68645bfb2e5SPeter Zijlstra
6870ca4da24SAdrian Hunter if (nr_pages <= 0)
6880ca4da24SAdrian Hunter return -EINVAL;
6890ca4da24SAdrian Hunter
690d68e6799SAlexander Shishkin if (!overwrite) {
691d68e6799SAlexander Shishkin /*
692d68e6799SAlexander Shishkin * Watermark defaults to half the buffer, and so does the
693d68e6799SAlexander Shishkin * max_order, to aid PMU drivers in double buffering.
694d68e6799SAlexander Shishkin */
695d68e6799SAlexander Shishkin if (!watermark)
69643deb76bSAdrian Hunter watermark = min_t(unsigned long,
69743deb76bSAdrian Hunter U32_MAX,
69843deb76bSAdrian Hunter (unsigned long)nr_pages << (PAGE_SHIFT - 1));
699d68e6799SAlexander Shishkin
700d68e6799SAlexander Shishkin /*
701d68e6799SAlexander Shishkin * Use aux_watermark as the basis for chunking to
702d68e6799SAlexander Shishkin * help PMU drivers honor the watermark.
703d68e6799SAlexander Shishkin */
704d68e6799SAlexander Shishkin max_order = get_order(watermark);
705d68e6799SAlexander Shishkin } else {
7060a4e38e6SAlexander Shishkin /*
7070a4e38e6SAlexander Shishkin * We need to start with the max_order that fits in nr_pages,
7080a4e38e6SAlexander Shishkin * not the other way around, hence ilog2() and not get_order.
7090a4e38e6SAlexander Shishkin */
7100a4e38e6SAlexander Shishkin max_order = ilog2(nr_pages);
711d68e6799SAlexander Shishkin watermark = 0;
7126a279230SAlexander Shishkin }
7136a279230SAlexander Shishkin
71454aee5f1SShuai Xue /*
71554aee5f1SShuai Xue * kcalloc_node() is unable to allocate buffer if the size is larger
7165e0a760bSKirill A. Shutemov * than: PAGE_SIZE << MAX_PAGE_ORDER; directly bail out in this case.
71754aee5f1SShuai Xue */
7185e0a760bSKirill A. Shutemov if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_PAGE_ORDER)
71954aee5f1SShuai Xue return -ENOMEM;
720590b5b7dSKees Cook rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
721590b5b7dSKees Cook node);
72245bfb2e5SPeter Zijlstra if (!rb->aux_pages)
72345bfb2e5SPeter Zijlstra return -ENOMEM;
72445bfb2e5SPeter Zijlstra
72545bfb2e5SPeter Zijlstra rb->free_aux = event->pmu->free_aux;
7260a4e38e6SAlexander Shishkin for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
72745bfb2e5SPeter Zijlstra struct page *page;
7280a4e38e6SAlexander Shishkin int last, order;
72945bfb2e5SPeter Zijlstra
7300a4e38e6SAlexander Shishkin order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
7310a4e38e6SAlexander Shishkin page = rb_alloc_aux_page(node, order);
73245bfb2e5SPeter Zijlstra if (!page)
73345bfb2e5SPeter Zijlstra goto out;
73445bfb2e5SPeter Zijlstra
7350a4e38e6SAlexander Shishkin for (last = rb->aux_nr_pages + (1 << page_private(page));
7360a4e38e6SAlexander Shishkin last > rb->aux_nr_pages; rb->aux_nr_pages++)
7370a4e38e6SAlexander Shishkin rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
73845bfb2e5SPeter Zijlstra }
73945bfb2e5SPeter Zijlstra
740aa319bcdSAlexander Shishkin /*
741aa319bcdSAlexander Shishkin * In overwrite mode, PMUs that don't support SG may not handle more
742aa319bcdSAlexander Shishkin * than one contiguous allocation, since they rely on PMI to do double
743aa319bcdSAlexander Shishkin * buffering. In this case, the entire buffer has to be one contiguous
744aa319bcdSAlexander Shishkin * chunk.
745aa319bcdSAlexander Shishkin */
746aa319bcdSAlexander Shishkin if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
747aa319bcdSAlexander Shishkin overwrite) {
748aa319bcdSAlexander Shishkin struct page *page = virt_to_page(rb->aux_pages[0]);
749aa319bcdSAlexander Shishkin
750aa319bcdSAlexander Shishkin if (page_private(page) != max_order)
751aa319bcdSAlexander Shishkin goto out;
752aa319bcdSAlexander Shishkin }
753aa319bcdSAlexander Shishkin
75484001866SMathieu Poirier rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
75545bfb2e5SPeter Zijlstra overwrite);
75645bfb2e5SPeter Zijlstra if (!rb->aux_priv)
75745bfb2e5SPeter Zijlstra goto out;
75845bfb2e5SPeter Zijlstra
75945bfb2e5SPeter Zijlstra ret = 0;
76045bfb2e5SPeter Zijlstra
76145bfb2e5SPeter Zijlstra /*
76245bfb2e5SPeter Zijlstra * aux_pages (and pmu driver's private data, aux_priv) will be
76345bfb2e5SPeter Zijlstra * referenced in both producer's and consumer's contexts, thus
76445bfb2e5SPeter Zijlstra * we keep a refcount here to make sure either of the two can
76545bfb2e5SPeter Zijlstra * reference them safely.
76645bfb2e5SPeter Zijlstra */
767ca3bb3d0SElena Reshetova refcount_set(&rb->aux_refcount, 1);
76845bfb2e5SPeter Zijlstra
7692023a0d2SAlexander Shishkin rb->aux_overwrite = overwrite;
7701a594131SAlexander Shishkin rb->aux_watermark = watermark;
7711a594131SAlexander Shishkin
77245bfb2e5SPeter Zijlstra out:
77345bfb2e5SPeter Zijlstra if (!ret)
77445bfb2e5SPeter Zijlstra rb->aux_pgoff = pgoff;
77545bfb2e5SPeter Zijlstra else
77645c815f0SAlexander Shishkin __rb_free_aux(rb);
77745bfb2e5SPeter Zijlstra
77845bfb2e5SPeter Zijlstra return ret;
77945bfb2e5SPeter Zijlstra }
78045bfb2e5SPeter Zijlstra
rb_free_aux(struct perf_buffer * rb)78156de4e8fSSteven Rostedt (VMware) void rb_free_aux(struct perf_buffer *rb)
78245bfb2e5SPeter Zijlstra {
783ca3bb3d0SElena Reshetova if (refcount_dec_and_test(&rb->aux_refcount))
78445bfb2e5SPeter Zijlstra __rb_free_aux(rb);
78545bfb2e5SPeter Zijlstra }
78645bfb2e5SPeter Zijlstra
78776369139SFrederic Weisbecker #ifndef CONFIG_PERF_USE_VMALLOC
78876369139SFrederic Weisbecker
78976369139SFrederic Weisbecker /*
79076369139SFrederic Weisbecker * Back perf_mmap() with regular GFP_KERNEL-0 pages.
79176369139SFrederic Weisbecker */
79276369139SFrederic Weisbecker
79345bfb2e5SPeter Zijlstra static struct page *
__perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)79456de4e8fSSteven Rostedt (VMware) __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
79576369139SFrederic Weisbecker {
79676369139SFrederic Weisbecker if (pgoff > rb->nr_pages)
79776369139SFrederic Weisbecker return NULL;
79876369139SFrederic Weisbecker
79976369139SFrederic Weisbecker if (pgoff == 0)
80076369139SFrederic Weisbecker return virt_to_page(rb->user_page);
80176369139SFrederic Weisbecker
80276369139SFrederic Weisbecker return virt_to_page(rb->data_pages[pgoff - 1]);
80376369139SFrederic Weisbecker }
80476369139SFrederic Weisbecker
perf_mmap_alloc_page(int cpu)80576369139SFrederic Weisbecker static void *perf_mmap_alloc_page(int cpu)
80676369139SFrederic Weisbecker {
80776369139SFrederic Weisbecker struct page *page;
80876369139SFrederic Weisbecker int node;
80976369139SFrederic Weisbecker
81076369139SFrederic Weisbecker node = (cpu == -1) ? cpu : cpu_to_node(cpu);
81176369139SFrederic Weisbecker page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
81276369139SFrederic Weisbecker if (!page)
81376369139SFrederic Weisbecker return NULL;
81476369139SFrederic Weisbecker
81576369139SFrederic Weisbecker return page_address(page);
81676369139SFrederic Weisbecker }
81776369139SFrederic Weisbecker
perf_mmap_free_page(void * addr)818d7e78706SYunfeng Ye static void perf_mmap_free_page(void *addr)
819d7e78706SYunfeng Ye {
820d7e78706SYunfeng Ye struct page *page = virt_to_page(addr);
821d7e78706SYunfeng Ye
822d7e78706SYunfeng Ye __free_page(page);
823d7e78706SYunfeng Ye }
824d7e78706SYunfeng Ye
rb_alloc(int nr_pages,long watermark,int cpu,int flags)82556de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
82676369139SFrederic Weisbecker {
82756de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb;
82876369139SFrederic Weisbecker unsigned long size;
8299483409aSNamhyung Kim int i, node;
83076369139SFrederic Weisbecker
83156de4e8fSSteven Rostedt (VMware) size = sizeof(struct perf_buffer);
83276369139SFrederic Weisbecker size += nr_pages * sizeof(void *);
83376369139SFrederic Weisbecker
8345e0a760bSKirill A. Shutemov if (order_base_2(size) > PAGE_SHIFT+MAX_PAGE_ORDER)
8359dff0aa9SMark Rutland goto fail;
8369dff0aa9SMark Rutland
8379483409aSNamhyung Kim node = (cpu == -1) ? cpu : cpu_to_node(cpu);
8389483409aSNamhyung Kim rb = kzalloc_node(size, GFP_KERNEL, node);
83976369139SFrederic Weisbecker if (!rb)
84076369139SFrederic Weisbecker goto fail;
84176369139SFrederic Weisbecker
84276369139SFrederic Weisbecker rb->user_page = perf_mmap_alloc_page(cpu);
84376369139SFrederic Weisbecker if (!rb->user_page)
84476369139SFrederic Weisbecker goto fail_user_page;
84576369139SFrederic Weisbecker
84676369139SFrederic Weisbecker for (i = 0; i < nr_pages; i++) {
84776369139SFrederic Weisbecker rb->data_pages[i] = perf_mmap_alloc_page(cpu);
84876369139SFrederic Weisbecker if (!rb->data_pages[i])
84976369139SFrederic Weisbecker goto fail_data_pages;
85076369139SFrederic Weisbecker }
85176369139SFrederic Weisbecker
85276369139SFrederic Weisbecker rb->nr_pages = nr_pages;
85376369139SFrederic Weisbecker
85476369139SFrederic Weisbecker ring_buffer_init(rb, watermark, flags);
85576369139SFrederic Weisbecker
85676369139SFrederic Weisbecker return rb;
85776369139SFrederic Weisbecker
85876369139SFrederic Weisbecker fail_data_pages:
85976369139SFrederic Weisbecker for (i--; i >= 0; i--)
860d7e78706SYunfeng Ye perf_mmap_free_page(rb->data_pages[i]);
86176369139SFrederic Weisbecker
862d7e78706SYunfeng Ye perf_mmap_free_page(rb->user_page);
86376369139SFrederic Weisbecker
86476369139SFrederic Weisbecker fail_user_page:
86576369139SFrederic Weisbecker kfree(rb);
86676369139SFrederic Weisbecker
86776369139SFrederic Weisbecker fail:
86876369139SFrederic Weisbecker return NULL;
86976369139SFrederic Weisbecker }
87076369139SFrederic Weisbecker
rb_free(struct perf_buffer * rb)87156de4e8fSSteven Rostedt (VMware) void rb_free(struct perf_buffer *rb)
87276369139SFrederic Weisbecker {
87376369139SFrederic Weisbecker int i;
87476369139SFrederic Weisbecker
8758a9f91c5SYunfeng Ye perf_mmap_free_page(rb->user_page);
87676369139SFrederic Weisbecker for (i = 0; i < rb->nr_pages; i++)
8778a9f91c5SYunfeng Ye perf_mmap_free_page(rb->data_pages[i]);
87876369139SFrederic Weisbecker kfree(rb);
87976369139SFrederic Weisbecker }
88076369139SFrederic Weisbecker
88176369139SFrederic Weisbecker #else
88245bfb2e5SPeter Zijlstra static struct page *
__perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)88356de4e8fSSteven Rostedt (VMware) __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
88476369139SFrederic Weisbecker {
8855919b309SJiri Olsa /* The '>' counts in the user page. */
8865919b309SJiri Olsa if (pgoff > data_page_nr(rb))
88776369139SFrederic Weisbecker return NULL;
88876369139SFrederic Weisbecker
88976369139SFrederic Weisbecker return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
89076369139SFrederic Weisbecker }
89176369139SFrederic Weisbecker
rb_free_work(struct work_struct * work)89276369139SFrederic Weisbecker static void rb_free_work(struct work_struct *work)
89376369139SFrederic Weisbecker {
89456de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb;
89576369139SFrederic Weisbecker
89656de4e8fSSteven Rostedt (VMware) rb = container_of(work, struct perf_buffer, work);
89776369139SFrederic Weisbecker
898b709eb87SLorenzo Stoakes vfree(rb->user_page);
89976369139SFrederic Weisbecker kfree(rb);
90076369139SFrederic Weisbecker }
90176369139SFrederic Weisbecker
rb_free(struct perf_buffer * rb)90256de4e8fSSteven Rostedt (VMware) void rb_free(struct perf_buffer *rb)
90376369139SFrederic Weisbecker {
90476369139SFrederic Weisbecker schedule_work(&rb->work);
90576369139SFrederic Weisbecker }
90676369139SFrederic Weisbecker
rb_alloc(int nr_pages,long watermark,int cpu,int flags)90756de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
90876369139SFrederic Weisbecker {
90956de4e8fSSteven Rostedt (VMware) struct perf_buffer *rb;
91076369139SFrederic Weisbecker unsigned long size;
91176369139SFrederic Weisbecker void *all_buf;
9129483409aSNamhyung Kim int node;
91376369139SFrederic Weisbecker
91456de4e8fSSteven Rostedt (VMware) size = sizeof(struct perf_buffer);
91576369139SFrederic Weisbecker size += sizeof(void *);
91676369139SFrederic Weisbecker
9179483409aSNamhyung Kim node = (cpu == -1) ? cpu : cpu_to_node(cpu);
9189483409aSNamhyung Kim rb = kzalloc_node(size, GFP_KERNEL, node);
91976369139SFrederic Weisbecker if (!rb)
92076369139SFrederic Weisbecker goto fail;
92176369139SFrederic Weisbecker
92276369139SFrederic Weisbecker INIT_WORK(&rb->work, rb_free_work);
92376369139SFrederic Weisbecker
92476369139SFrederic Weisbecker all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
92576369139SFrederic Weisbecker if (!all_buf)
92676369139SFrederic Weisbecker goto fail_all_buf;
92776369139SFrederic Weisbecker
92876369139SFrederic Weisbecker rb->user_page = all_buf;
92976369139SFrederic Weisbecker rb->data_pages[0] = all_buf + PAGE_SIZE;
9308184059eSPeter Zijlstra if (nr_pages) {
9318184059eSPeter Zijlstra rb->nr_pages = 1;
93276369139SFrederic Weisbecker rb->page_order = ilog2(nr_pages);
9338184059eSPeter Zijlstra }
93476369139SFrederic Weisbecker
93576369139SFrederic Weisbecker ring_buffer_init(rb, watermark, flags);
93676369139SFrederic Weisbecker
93776369139SFrederic Weisbecker return rb;
93876369139SFrederic Weisbecker
93976369139SFrederic Weisbecker fail_all_buf:
94076369139SFrederic Weisbecker kfree(rb);
94176369139SFrederic Weisbecker
94276369139SFrederic Weisbecker fail:
94376369139SFrederic Weisbecker return NULL;
94476369139SFrederic Weisbecker }
94576369139SFrederic Weisbecker
94676369139SFrederic Weisbecker #endif
94745bfb2e5SPeter Zijlstra
94845bfb2e5SPeter Zijlstra struct page *
perf_mmap_to_page(struct perf_buffer * rb,unsigned long pgoff)94956de4e8fSSteven Rostedt (VMware) perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
95045bfb2e5SPeter Zijlstra {
95145bfb2e5SPeter Zijlstra if (rb->aux_nr_pages) {
95245bfb2e5SPeter Zijlstra /* above AUX space */
95345bfb2e5SPeter Zijlstra if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
95445bfb2e5SPeter Zijlstra return NULL;
95545bfb2e5SPeter Zijlstra
95645bfb2e5SPeter Zijlstra /* AUX space */
9574411ec1dSPeter Zijlstra if (pgoff >= rb->aux_pgoff) {
9584411ec1dSPeter Zijlstra int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
9594411ec1dSPeter Zijlstra return virt_to_page(rb->aux_pages[aux_pgoff]);
9604411ec1dSPeter Zijlstra }
96145bfb2e5SPeter Zijlstra }
96245bfb2e5SPeter Zijlstra
96345bfb2e5SPeter Zijlstra return __perf_mmap_to_page(rb, pgoff);
96445bfb2e5SPeter Zijlstra }
965