1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
25092dbc9SSteven Rostedt /*
35092dbc9SSteven Rostedt * ring buffer tester and benchmark
45092dbc9SSteven Rostedt *
55092dbc9SSteven Rostedt * Copyright (C) 2009 Steven Rostedt <[email protected]>
65092dbc9SSteven Rostedt */
75092dbc9SSteven Rostedt #include <linux/ring_buffer.h>
85092dbc9SSteven Rostedt #include <linux/completion.h>
95092dbc9SSteven Rostedt #include <linux/kthread.h>
10ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
115092dbc9SSteven Rostedt #include <linux/module.h>
12da194930STina Ruchandani #include <linux/ktime.h>
1379615760SChristoph Lameter #include <asm/local.h>
145092dbc9SSteven Rostedt
155092dbc9SSteven Rostedt struct rb_page {
165092dbc9SSteven Rostedt u64 ts;
175092dbc9SSteven Rostedt local_t commit;
185092dbc9SSteven Rostedt char data[4080];
195092dbc9SSteven Rostedt };
205092dbc9SSteven Rostedt
215092dbc9SSteven Rostedt /* run time and sleep time in seconds */
22da194930STina Ruchandani #define RUN_TIME 10ULL
235092dbc9SSteven Rostedt #define SLEEP_TIME 10
245092dbc9SSteven Rostedt
255092dbc9SSteven Rostedt /* number of events for writer to wake up the reader */
265092dbc9SSteven Rostedt static int wakeup_interval = 100;
275092dbc9SSteven Rostedt
285092dbc9SSteven Rostedt static int reader_finish;
298b46ff69SPetr Mladek static DECLARE_COMPLETION(read_start);
308b46ff69SPetr Mladek static DECLARE_COMPLETION(read_done);
315092dbc9SSteven Rostedt
3213292494SSteven Rostedt (VMware) static struct trace_buffer *buffer;
335092dbc9SSteven Rostedt static struct task_struct *producer;
345092dbc9SSteven Rostedt static struct task_struct *consumer;
355092dbc9SSteven Rostedt static unsigned long read;
365092dbc9SSteven Rostedt
3733d657d1SWang Long static unsigned int disable_reader;
385092dbc9SSteven Rostedt module_param(disable_reader, uint, 0644);
395092dbc9SSteven Rostedt MODULE_PARM_DESC(disable_reader, "only run producer");
405092dbc9SSteven Rostedt
4133d657d1SWang Long static unsigned int write_iteration = 50;
42a6f0eb6aSSteven Rostedt module_param(write_iteration, uint, 0644);
43a6f0eb6aSSteven Rostedt MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
44a6f0eb6aSSteven Rostedt
452b3942e4SDongsheng Yang static int producer_nice = MAX_NICE;
462b3942e4SDongsheng Yang static int consumer_nice = MAX_NICE;
477ac07434SSteven Rostedt
484fd5750aSPeter Zijlstra static int producer_fifo;
494fd5750aSPeter Zijlstra static int consumer_fifo;
507ac07434SSteven Rostedt
517364e865SWang Long module_param(producer_nice, int, 0644);
527ac07434SSteven Rostedt MODULE_PARM_DESC(producer_nice, "nice prio for producer");
537ac07434SSteven Rostedt
547364e865SWang Long module_param(consumer_nice, int, 0644);
557ac07434SSteven Rostedt MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
567ac07434SSteven Rostedt
577364e865SWang Long module_param(producer_fifo, int, 0644);
584fd5750aSPeter Zijlstra MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo");
597ac07434SSteven Rostedt
607364e865SWang Long module_param(consumer_fifo, int, 0644);
614fd5750aSPeter Zijlstra MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo");
627ac07434SSteven Rostedt
635092dbc9SSteven Rostedt static int read_events;
645092dbc9SSteven Rostedt
65f47cb66dSPetr Mladek static int test_error;
665092dbc9SSteven Rostedt
67f47cb66dSPetr Mladek #define TEST_ERROR() \
685092dbc9SSteven Rostedt do { \
69f47cb66dSPetr Mladek if (!test_error) { \
70f47cb66dSPetr Mladek test_error = 1; \
715092dbc9SSteven Rostedt WARN_ON(1); \
725092dbc9SSteven Rostedt } \
735092dbc9SSteven Rostedt } while (0)
745092dbc9SSteven Rostedt
755092dbc9SSteven Rostedt enum event_status {
765092dbc9SSteven Rostedt EVENT_FOUND,
775092dbc9SSteven Rostedt EVENT_DROPPED,
785092dbc9SSteven Rostedt };
795092dbc9SSteven Rostedt
break_test(void)80f47cb66dSPetr Mladek static bool break_test(void)
81f47cb66dSPetr Mladek {
82f47cb66dSPetr Mladek return test_error || kthread_should_stop();
83f47cb66dSPetr Mladek }
84f47cb66dSPetr Mladek
read_event(int cpu)855092dbc9SSteven Rostedt static enum event_status read_event(int cpu)
865092dbc9SSteven Rostedt {
875092dbc9SSteven Rostedt struct ring_buffer_event *event;
885092dbc9SSteven Rostedt int *entry;
895092dbc9SSteven Rostedt u64 ts;
905092dbc9SSteven Rostedt
9166a8cb95SSteven Rostedt event = ring_buffer_consume(buffer, cpu, &ts, NULL);
925092dbc9SSteven Rostedt if (!event)
935092dbc9SSteven Rostedt return EVENT_DROPPED;
945092dbc9SSteven Rostedt
955092dbc9SSteven Rostedt entry = ring_buffer_event_data(event);
965092dbc9SSteven Rostedt if (*entry != cpu) {
97f47cb66dSPetr Mladek TEST_ERROR();
985092dbc9SSteven Rostedt return EVENT_DROPPED;
995092dbc9SSteven Rostedt }
1005092dbc9SSteven Rostedt
1015092dbc9SSteven Rostedt read++;
1025092dbc9SSteven Rostedt return EVENT_FOUND;
1035092dbc9SSteven Rostedt }
1045092dbc9SSteven Rostedt
read_page(int cpu)1055092dbc9SSteven Rostedt static enum event_status read_page(int cpu)
1065092dbc9SSteven Rostedt {
107bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *bpage;
1085092dbc9SSteven Rostedt struct ring_buffer_event *event;
1095092dbc9SSteven Rostedt struct rb_page *rpage;
1105092dbc9SSteven Rostedt unsigned long commit;
111bce761d7STzvetomir Stoyanov (VMware) int page_size;
1125092dbc9SSteven Rostedt int *entry;
1135092dbc9SSteven Rostedt int ret;
1145092dbc9SSteven Rostedt int inc;
1155092dbc9SSteven Rostedt int i;
1165092dbc9SSteven Rostedt
1177ea59064SVaibhav Nagarnaik bpage = ring_buffer_alloc_read_page(buffer, cpu);
118a7e52ad7SSteven Rostedt (VMware) if (IS_ERR(bpage))
11900c81a58SSteven Rostedt return EVENT_DROPPED;
12000c81a58SSteven Rostedt
121bce761d7STzvetomir Stoyanov (VMware) page_size = ring_buffer_subbuf_size_get(buffer);
122bce761d7STzvetomir Stoyanov (VMware) ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
1235092dbc9SSteven Rostedt if (ret >= 0) {
124bce761d7STzvetomir Stoyanov (VMware) rpage = ring_buffer_read_page_data(bpage);
125a838b2e6SSteven Rostedt /* The commit may have missed event flags set, clear them */
126a838b2e6SSteven Rostedt commit = local_read(&rpage->commit) & 0xfffff;
127f47cb66dSPetr Mladek for (i = 0; i < commit && !test_error ; i += inc) {
1285092dbc9SSteven Rostedt
129bce761d7STzvetomir Stoyanov (VMware) if (i >= (page_size - offsetof(struct rb_page, data))) {
130f47cb66dSPetr Mladek TEST_ERROR();
1315092dbc9SSteven Rostedt break;
1325092dbc9SSteven Rostedt }
1335092dbc9SSteven Rostedt
1345092dbc9SSteven Rostedt inc = -1;
1355092dbc9SSteven Rostedt event = (void *)&rpage->data[i];
1365092dbc9SSteven Rostedt switch (event->type_len) {
1375092dbc9SSteven Rostedt case RINGBUF_TYPE_PADDING:
1389086c7b9SSteven Rostedt /* failed writes may be discarded events */
1399086c7b9SSteven Rostedt if (!event->time_delta)
140f47cb66dSPetr Mladek TEST_ERROR();
1419086c7b9SSteven Rostedt inc = event->array[0] + 4;
1425092dbc9SSteven Rostedt break;
1435092dbc9SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
1445092dbc9SSteven Rostedt inc = 8;
1455092dbc9SSteven Rostedt break;
1465092dbc9SSteven Rostedt case 0:
1475092dbc9SSteven Rostedt entry = ring_buffer_event_data(event);
1485092dbc9SSteven Rostedt if (*entry != cpu) {
149f47cb66dSPetr Mladek TEST_ERROR();
1505092dbc9SSteven Rostedt break;
1515092dbc9SSteven Rostedt }
1525092dbc9SSteven Rostedt read++;
1535092dbc9SSteven Rostedt if (!event->array[0]) {
154f47cb66dSPetr Mladek TEST_ERROR();
1555092dbc9SSteven Rostedt break;
1565092dbc9SSteven Rostedt }
1579086c7b9SSteven Rostedt inc = event->array[0] + 4;
1585092dbc9SSteven Rostedt break;
1595092dbc9SSteven Rostedt default:
1605092dbc9SSteven Rostedt entry = ring_buffer_event_data(event);
1615092dbc9SSteven Rostedt if (*entry != cpu) {
162f47cb66dSPetr Mladek TEST_ERROR();
1635092dbc9SSteven Rostedt break;
1645092dbc9SSteven Rostedt }
1655092dbc9SSteven Rostedt read++;
1665092dbc9SSteven Rostedt inc = ((event->type_len + 1) * 4);
1675092dbc9SSteven Rostedt }
168f47cb66dSPetr Mladek if (test_error)
1695092dbc9SSteven Rostedt break;
1705092dbc9SSteven Rostedt
1715092dbc9SSteven Rostedt if (inc <= 0) {
172f47cb66dSPetr Mladek TEST_ERROR();
1735092dbc9SSteven Rostedt break;
1745092dbc9SSteven Rostedt }
1755092dbc9SSteven Rostedt }
1765092dbc9SSteven Rostedt }
17773a757e6SSteven Rostedt (VMware) ring_buffer_free_read_page(buffer, cpu, bpage);
1785092dbc9SSteven Rostedt
1795092dbc9SSteven Rostedt if (ret < 0)
1805092dbc9SSteven Rostedt return EVENT_DROPPED;
1815092dbc9SSteven Rostedt return EVENT_FOUND;
1825092dbc9SSteven Rostedt }
1835092dbc9SSteven Rostedt
ring_buffer_consumer(void)1845092dbc9SSteven Rostedt static void ring_buffer_consumer(void)
1855092dbc9SSteven Rostedt {
1865092dbc9SSteven Rostedt /* toggle between reading pages and events */
1875092dbc9SSteven Rostedt read_events ^= 1;
1885092dbc9SSteven Rostedt
1895092dbc9SSteven Rostedt read = 0;
1908b46ff69SPetr Mladek /*
1918b46ff69SPetr Mladek * Continue running until the producer specifically asks to stop
1928b46ff69SPetr Mladek * and is ready for the completion.
1938b46ff69SPetr Mladek */
1948b46ff69SPetr Mladek while (!READ_ONCE(reader_finish)) {
1958b46ff69SPetr Mladek int found = 1;
1965092dbc9SSteven Rostedt
197f47cb66dSPetr Mladek while (found && !test_error) {
1985092dbc9SSteven Rostedt int cpu;
1995092dbc9SSteven Rostedt
2005092dbc9SSteven Rostedt found = 0;
2015092dbc9SSteven Rostedt for_each_online_cpu(cpu) {
2025092dbc9SSteven Rostedt enum event_status stat;
2035092dbc9SSteven Rostedt
2045092dbc9SSteven Rostedt if (read_events)
2055092dbc9SSteven Rostedt stat = read_event(cpu);
2065092dbc9SSteven Rostedt else
2075092dbc9SSteven Rostedt stat = read_page(cpu);
2085092dbc9SSteven Rostedt
209f47cb66dSPetr Mladek if (test_error)
2105092dbc9SSteven Rostedt break;
2118b46ff69SPetr Mladek
2125092dbc9SSteven Rostedt if (stat == EVENT_FOUND)
2135092dbc9SSteven Rostedt found = 1;
2145092dbc9SSteven Rostedt
2158b46ff69SPetr Mladek }
2168b46ff69SPetr Mladek }
2178b46ff69SPetr Mladek
2188b46ff69SPetr Mladek /* Wait till the producer wakes us up when there is more data
2198b46ff69SPetr Mladek * available or when the producer wants us to finish reading.
2208b46ff69SPetr Mladek */
2215092dbc9SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
2225092dbc9SSteven Rostedt if (reader_finish)
2235092dbc9SSteven Rostedt break;
2245092dbc9SSteven Rostedt
2255092dbc9SSteven Rostedt schedule();
2265092dbc9SSteven Rostedt }
2278b46ff69SPetr Mladek __set_current_state(TASK_RUNNING);
2285092dbc9SSteven Rostedt reader_finish = 0;
2295092dbc9SSteven Rostedt complete(&read_done);
2305092dbc9SSteven Rostedt }
2315092dbc9SSteven Rostedt
ring_buffer_producer(void)2325092dbc9SSteven Rostedt static void ring_buffer_producer(void)
2335092dbc9SSteven Rostedt {
234da194930STina Ruchandani ktime_t start_time, end_time, timeout;
2355092dbc9SSteven Rostedt unsigned long long time;
2365092dbc9SSteven Rostedt unsigned long long entries;
2375092dbc9SSteven Rostedt unsigned long long overruns;
2385092dbc9SSteven Rostedt unsigned long missed = 0;
2395092dbc9SSteven Rostedt unsigned long hit = 0;
2405092dbc9SSteven Rostedt unsigned long avg;
2415092dbc9SSteven Rostedt int cnt = 0;
2425092dbc9SSteven Rostedt
2435092dbc9SSteven Rostedt /*
2445092dbc9SSteven Rostedt * Hammer the buffer for 10 secs (this may
2455092dbc9SSteven Rostedt * make the system stall)
2465092dbc9SSteven Rostedt */
2474b221f03SSteven Rostedt trace_printk("Starting ring buffer hammer\n");
248da194930STina Ruchandani start_time = ktime_get();
249da194930STina Ruchandani timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
2505092dbc9SSteven Rostedt do {
2515092dbc9SSteven Rostedt struct ring_buffer_event *event;
2525092dbc9SSteven Rostedt int *entry;
253a6f0eb6aSSteven Rostedt int i;
2545092dbc9SSteven Rostedt
255a6f0eb6aSSteven Rostedt for (i = 0; i < write_iteration; i++) {
2565092dbc9SSteven Rostedt event = ring_buffer_lock_reserve(buffer, 10);
2575092dbc9SSteven Rostedt if (!event) {
2585092dbc9SSteven Rostedt missed++;
2595092dbc9SSteven Rostedt } else {
2605092dbc9SSteven Rostedt hit++;
2615092dbc9SSteven Rostedt entry = ring_buffer_event_data(event);
2625092dbc9SSteven Rostedt *entry = smp_processor_id();
26304aabc32SSong Chen ring_buffer_unlock_commit(buffer);
2645092dbc9SSteven Rostedt }
265a6f0eb6aSSteven Rostedt }
266da194930STina Ruchandani end_time = ktime_get();
2675092dbc9SSteven Rostedt
2680574ea42SSteven Rostedt cnt++;
2690574ea42SSteven Rostedt if (consumer && !(cnt % wakeup_interval))
2705092dbc9SSteven Rostedt wake_up_process(consumer);
2715092dbc9SSteven Rostedt
27230c93704SThomas Gleixner #ifndef CONFIG_PREEMPTION
27329c8000eSSteven Rostedt /*
274a82a4804SXianting Tian * If we are a non preempt kernel, the 10 seconds run will
27529c8000eSSteven Rostedt * stop everything while it runs. Instead, we will call
27629c8000eSSteven Rostedt * cond_resched and also add any time that was lost by a
277a82a4804SXianting Tian * reschedule.
2780574ea42SSteven Rostedt *
2790574ea42SSteven Rostedt * Do a cond resched at the same frequency we would wake up
2800574ea42SSteven Rostedt * the reader.
28129c8000eSSteven Rostedt */
2820574ea42SSteven Rostedt if (cnt % wakeup_interval)
28329c8000eSSteven Rostedt cond_resched();
2840574ea42SSteven Rostedt #endif
285f47cb66dSPetr Mladek } while (ktime_before(end_time, timeout) && !break_test());
2864b221f03SSteven Rostedt trace_printk("End ring buffer hammer\n");
2875092dbc9SSteven Rostedt
2885092dbc9SSteven Rostedt if (consumer) {
2895092dbc9SSteven Rostedt /* Init both completions here to avoid races */
2905092dbc9SSteven Rostedt init_completion(&read_start);
2915092dbc9SSteven Rostedt init_completion(&read_done);
2925092dbc9SSteven Rostedt /* the completions must be visible before the finish var */
2935092dbc9SSteven Rostedt smp_wmb();
2945092dbc9SSteven Rostedt reader_finish = 1;
2955092dbc9SSteven Rostedt wake_up_process(consumer);
2965092dbc9SSteven Rostedt wait_for_completion(&read_done);
2975092dbc9SSteven Rostedt }
2985092dbc9SSteven Rostedt
299da194930STina Ruchandani time = ktime_us_delta(end_time, start_time);
3005092dbc9SSteven Rostedt
3015092dbc9SSteven Rostedt entries = ring_buffer_entries(buffer);
3025092dbc9SSteven Rostedt overruns = ring_buffer_overruns(buffer);
3035092dbc9SSteven Rostedt
304f47cb66dSPetr Mladek if (test_error)
3054b221f03SSteven Rostedt trace_printk("ERROR!\n");
3067ac07434SSteven Rostedt
3077ac07434SSteven Rostedt if (!disable_reader) {
3084fd5750aSPeter Zijlstra if (consumer_fifo)
3094fd5750aSPeter Zijlstra trace_printk("Running Consumer at SCHED_FIFO %s\n",
310*514da692SThorsten Blum str_low_high(consumer_fifo == 1));
3114fd5750aSPeter Zijlstra else
3127ac07434SSteven Rostedt trace_printk("Running Consumer at nice: %d\n",
3137ac07434SSteven Rostedt consumer_nice);
3147ac07434SSteven Rostedt }
3154fd5750aSPeter Zijlstra if (producer_fifo)
3164fd5750aSPeter Zijlstra trace_printk("Running Producer at SCHED_FIFO %s\n",
317*514da692SThorsten Blum str_low_high(producer_fifo == 1));
3184fd5750aSPeter Zijlstra else
3197ac07434SSteven Rostedt trace_printk("Running Producer at nice: %d\n",
3207ac07434SSteven Rostedt producer_nice);
3217ac07434SSteven Rostedt
3227ac07434SSteven Rostedt /* Let the user know that the test is running at low priority */
3234fd5750aSPeter Zijlstra if (!producer_fifo && !consumer_fifo &&
3242b3942e4SDongsheng Yang producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
3257ac07434SSteven Rostedt trace_printk("WARNING!!! This test is running at lowest priority.\n");
3267ac07434SSteven Rostedt
3274b221f03SSteven Rostedt trace_printk("Time: %lld (usecs)\n", time);
3284b221f03SSteven Rostedt trace_printk("Overruns: %lld\n", overruns);
3295092dbc9SSteven Rostedt if (disable_reader)
3304b221f03SSteven Rostedt trace_printk("Read: (reader disabled)\n");
3315092dbc9SSteven Rostedt else
3324b221f03SSteven Rostedt trace_printk("Read: %ld (by %s)\n", read,
3335092dbc9SSteven Rostedt read_events ? "events" : "pages");
3344b221f03SSteven Rostedt trace_printk("Entries: %lld\n", entries);
3354b221f03SSteven Rostedt trace_printk("Total: %lld\n", entries + overruns + read);
3364b221f03SSteven Rostedt trace_printk("Missed: %ld\n", missed);
3374b221f03SSteven Rostedt trace_printk("Hit: %ld\n", hit);
3385092dbc9SSteven Rostedt
3395a772b2bSSteven Rostedt /* Convert time from usecs to millisecs */
3405a772b2bSSteven Rostedt do_div(time, USEC_PER_MSEC);
3415092dbc9SSteven Rostedt if (time)
3425092dbc9SSteven Rostedt hit /= (long)time;
3435092dbc9SSteven Rostedt else
3444b221f03SSteven Rostedt trace_printk("TIME IS ZERO??\n");
3455092dbc9SSteven Rostedt
3464b221f03SSteven Rostedt trace_printk("Entries per millisec: %ld\n", hit);
3475092dbc9SSteven Rostedt
3485092dbc9SSteven Rostedt if (hit) {
3495a772b2bSSteven Rostedt /* Calculate the average time in nanosecs */
3505a772b2bSSteven Rostedt avg = NSEC_PER_MSEC / hit;
3514b221f03SSteven Rostedt trace_printk("%ld ns per entry\n", avg);
3525092dbc9SSteven Rostedt }
3537da3046dSSteven Rostedt
3547da3046dSSteven Rostedt if (missed) {
3557da3046dSSteven Rostedt if (time)
3567da3046dSSteven Rostedt missed /= (long)time;
3577da3046dSSteven Rostedt
3584b221f03SSteven Rostedt trace_printk("Total iterations per millisec: %ld\n",
3594b221f03SSteven Rostedt hit + missed);
3607da3046dSSteven Rostedt
361d988ff94SSteven Rostedt /* it is possible that hit + missed will overflow and be zero */
362d988ff94SSteven Rostedt if (!(hit + missed)) {
3634b221f03SSteven Rostedt trace_printk("hit + missed overflowed and totalled zero!\n");
364d988ff94SSteven Rostedt hit--; /* make it non zero */
365d988ff94SSteven Rostedt }
366d988ff94SSteven Rostedt
3675c173bedSYangtao Li /* Calculate the average time in nanosecs */
3685a772b2bSSteven Rostedt avg = NSEC_PER_MSEC / (hit + missed);
3694b221f03SSteven Rostedt trace_printk("%ld ns per entry\n", avg);
3707da3046dSSteven Rostedt }
3715092dbc9SSteven Rostedt }
3725092dbc9SSteven Rostedt
wait_to_die(void)3735092dbc9SSteven Rostedt static void wait_to_die(void)
3745092dbc9SSteven Rostedt {
3755092dbc9SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
3765092dbc9SSteven Rostedt while (!kthread_should_stop()) {
3775092dbc9SSteven Rostedt schedule();
3785092dbc9SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
3795092dbc9SSteven Rostedt }
3805092dbc9SSteven Rostedt __set_current_state(TASK_RUNNING);
3815092dbc9SSteven Rostedt }
3825092dbc9SSteven Rostedt
ring_buffer_consumer_thread(void * arg)3835092dbc9SSteven Rostedt static int ring_buffer_consumer_thread(void *arg)
3845092dbc9SSteven Rostedt {
385f47cb66dSPetr Mladek while (!break_test()) {
3865092dbc9SSteven Rostedt complete(&read_start);
3875092dbc9SSteven Rostedt
3885092dbc9SSteven Rostedt ring_buffer_consumer();
3895092dbc9SSteven Rostedt
3905092dbc9SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
391f47cb66dSPetr Mladek if (break_test())
3925092dbc9SSteven Rostedt break;
3935092dbc9SSteven Rostedt schedule();
3945092dbc9SSteven Rostedt }
3955092dbc9SSteven Rostedt __set_current_state(TASK_RUNNING);
3965092dbc9SSteven Rostedt
397b44754d8SPetr Mladek if (!kthread_should_stop())
3985092dbc9SSteven Rostedt wait_to_die();
3995092dbc9SSteven Rostedt
4005092dbc9SSteven Rostedt return 0;
4015092dbc9SSteven Rostedt }
4025092dbc9SSteven Rostedt
ring_buffer_producer_thread(void * arg)4035092dbc9SSteven Rostedt static int ring_buffer_producer_thread(void *arg)
4045092dbc9SSteven Rostedt {
405f47cb66dSPetr Mladek while (!break_test()) {
4065092dbc9SSteven Rostedt ring_buffer_reset(buffer);
4075092dbc9SSteven Rostedt
4085092dbc9SSteven Rostedt if (consumer) {
4095092dbc9SSteven Rostedt wake_up_process(consumer);
4105092dbc9SSteven Rostedt wait_for_completion(&read_start);
4115092dbc9SSteven Rostedt }
4125092dbc9SSteven Rostedt
4135092dbc9SSteven Rostedt ring_buffer_producer();
414f47cb66dSPetr Mladek if (break_test())
415b44754d8SPetr Mladek goto out_kill;
4165092dbc9SSteven Rostedt
4174b221f03SSteven Rostedt trace_printk("Sleeping for 10 secs\n");
4185092dbc9SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
419f47cb66dSPetr Mladek if (break_test())
420f47cb66dSPetr Mladek goto out_kill;
4215092dbc9SSteven Rostedt schedule_timeout(HZ * SLEEP_TIME);
4225092dbc9SSteven Rostedt }
4235092dbc9SSteven Rostedt
424b44754d8SPetr Mladek out_kill:
425f47cb66dSPetr Mladek __set_current_state(TASK_RUNNING);
426b44754d8SPetr Mladek if (!kthread_should_stop())
4275092dbc9SSteven Rostedt wait_to_die();
4285092dbc9SSteven Rostedt
4295092dbc9SSteven Rostedt return 0;
4305092dbc9SSteven Rostedt }
4315092dbc9SSteven Rostedt
ring_buffer_benchmark_init(void)4325092dbc9SSteven Rostedt static int __init ring_buffer_benchmark_init(void)
4335092dbc9SSteven Rostedt {
4345092dbc9SSteven Rostedt int ret;
4355092dbc9SSteven Rostedt
4365092dbc9SSteven Rostedt /* make a one meg buffer in overwite mode */
4375092dbc9SSteven Rostedt buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
4385092dbc9SSteven Rostedt if (!buffer)
4395092dbc9SSteven Rostedt return -ENOMEM;
4405092dbc9SSteven Rostedt
4415092dbc9SSteven Rostedt if (!disable_reader) {
4425092dbc9SSteven Rostedt consumer = kthread_create(ring_buffer_consumer_thread,
4435092dbc9SSteven Rostedt NULL, "rb_consumer");
4445092dbc9SSteven Rostedt ret = PTR_ERR(consumer);
4455092dbc9SSteven Rostedt if (IS_ERR(consumer))
4465092dbc9SSteven Rostedt goto out_fail;
4475092dbc9SSteven Rostedt }
4485092dbc9SSteven Rostedt
4495092dbc9SSteven Rostedt producer = kthread_run(ring_buffer_producer_thread,
4505092dbc9SSteven Rostedt NULL, "rb_producer");
4515092dbc9SSteven Rostedt ret = PTR_ERR(producer);
4525092dbc9SSteven Rostedt
4535092dbc9SSteven Rostedt if (IS_ERR(producer))
4545092dbc9SSteven Rostedt goto out_kill;
4555092dbc9SSteven Rostedt
45698e4833bSIngo Molnar /*
45798e4833bSIngo Molnar * Run them as low-prio background tasks by default:
45898e4833bSIngo Molnar */
4597ac07434SSteven Rostedt if (!disable_reader) {
4604fd5750aSPeter Zijlstra if (consumer_fifo >= 2)
4614fd5750aSPeter Zijlstra sched_set_fifo(consumer);
4624fd5750aSPeter Zijlstra else if (consumer_fifo == 1)
4634fd5750aSPeter Zijlstra sched_set_fifo_low(consumer);
4644fd5750aSPeter Zijlstra else
4657ac07434SSteven Rostedt set_user_nice(consumer, consumer_nice);
4667ac07434SSteven Rostedt }
4677ac07434SSteven Rostedt
4684fd5750aSPeter Zijlstra if (producer_fifo >= 2)
4694fd5750aSPeter Zijlstra sched_set_fifo(producer);
4704fd5750aSPeter Zijlstra else if (producer_fifo == 1)
4714fd5750aSPeter Zijlstra sched_set_fifo_low(producer);
4724fd5750aSPeter Zijlstra else
4737ac07434SSteven Rostedt set_user_nice(producer, producer_nice);
47498e4833bSIngo Molnar
4755092dbc9SSteven Rostedt return 0;
4765092dbc9SSteven Rostedt
4775092dbc9SSteven Rostedt out_kill:
4785092dbc9SSteven Rostedt if (consumer)
4795092dbc9SSteven Rostedt kthread_stop(consumer);
4805092dbc9SSteven Rostedt
4815092dbc9SSteven Rostedt out_fail:
4825092dbc9SSteven Rostedt ring_buffer_free(buffer);
4835092dbc9SSteven Rostedt return ret;
4845092dbc9SSteven Rostedt }
4855092dbc9SSteven Rostedt
ring_buffer_benchmark_exit(void)4865092dbc9SSteven Rostedt static void __exit ring_buffer_benchmark_exit(void)
4875092dbc9SSteven Rostedt {
4885092dbc9SSteven Rostedt kthread_stop(producer);
4895092dbc9SSteven Rostedt if (consumer)
4905092dbc9SSteven Rostedt kthread_stop(consumer);
4915092dbc9SSteven Rostedt ring_buffer_free(buffer);
4925092dbc9SSteven Rostedt }
4935092dbc9SSteven Rostedt
4945092dbc9SSteven Rostedt module_init(ring_buffer_benchmark_init);
4955092dbc9SSteven Rostedt module_exit(ring_buffer_benchmark_exit);
4965092dbc9SSteven Rostedt
4975092dbc9SSteven Rostedt MODULE_AUTHOR("Steven Rostedt");
4985092dbc9SSteven Rostedt MODULE_DESCRIPTION("ring_buffer_benchmark");
4995092dbc9SSteven Rostedt MODULE_LICENSE("GPL");
500