xref: /linux-6.15/kernel/trace/trace.c (revision 421d9d1b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <[email protected]>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
44 #include <linux/fs.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
51 
52 #include "trace.h"
53 #include "trace_output.h"
54 
55 /*
56  * On boot up, the ring buffer is set to the minimum size, so that
57  * we do not waste memory on systems that are not using tracing.
58  */
59 bool ring_buffer_expanded;
60 
61 /*
62  * We need to change this state when a selftest is running.
63  * A selftest will lurk into the ring-buffer to count the
64  * entries inserted during the selftest although some concurrent
65  * insertions into the ring-buffer such as trace_printk could occurred
66  * at the same time, giving false positive or negative results.
67  */
68 static bool __read_mostly tracing_selftest_running;
69 
70 /*
71  * If boot-time tracing including tracers/events via kernel cmdline
72  * is running, we do not want to run SELFTEST.
73  */
74 bool __read_mostly tracing_selftest_disabled;
75 
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
78 {
79 	if (!tracing_selftest_disabled) {
80 		tracing_selftest_disabled = true;
81 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
82 	}
83 }
84 #endif
85 
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
90 
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
93 	{ }
94 };
95 
96 static int
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
98 {
99 	return 0;
100 }
101 
102 /*
103  * To prevent the comm cache from being overwritten when no
104  * tracing is active, only save the comm when a trace event
105  * occurred.
106  */
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
108 
109 /*
110  * Kill all tracing for good (never come back).
111  * It is initialized to 1 but will turn to zero if the initialization
112  * of the tracer is successful. But that is the only place that sets
113  * this back to zero.
114  */
115 static int tracing_disabled = 1;
116 
117 cpumask_var_t __read_mostly	tracing_buffer_mask;
118 
119 /*
120  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121  *
122  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123  * is set, then ftrace_dump is called. This will output the contents
124  * of the ftrace buffers to the console.  This is very useful for
125  * capturing traces that lead to crashes and outputing it to a
126  * serial console.
127  *
128  * It is default off, but you can enable it with either specifying
129  * "ftrace_dump_on_oops" in the kernel command line, or setting
130  * /proc/sys/kernel/ftrace_dump_on_oops
131  * Set 1 if you want to dump buffers of all CPUs
132  * Set 2 if you want to dump the buffer of the CPU that triggered oops
133  */
134 
135 enum ftrace_dump_mode ftrace_dump_on_oops;
136 
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
139 
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
143 	struct module			*mod;
144 	unsigned long			length;
145 };
146 
147 union trace_eval_map_item;
148 
149 struct trace_eval_map_tail {
150 	/*
151 	 * "end" is first and points to NULL as it must be different
152 	 * than "mod" or "eval_string"
153 	 */
154 	union trace_eval_map_item	*next;
155 	const char			*end;	/* points to NULL */
156 };
157 
158 static DEFINE_MUTEX(trace_eval_mutex);
159 
160 /*
161  * The trace_eval_maps are saved in an array with two extra elements,
162  * one at the beginning, and one at the end. The beginning item contains
163  * the count of the saved maps (head.length), and the module they
164  * belong to if not built in (head.mod). The ending item contains a
165  * pointer to the next array of saved eval_map items.
166  */
167 union trace_eval_map_item {
168 	struct trace_eval_map		map;
169 	struct trace_eval_map_head	head;
170 	struct trace_eval_map_tail	tail;
171 };
172 
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
175 
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 				   struct trace_buffer *buffer,
179 				   unsigned int trace_ctx);
180 
181 #define MAX_TRACER_SIZE		100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
184 
185 static bool allocate_snapshot;
186 
187 static int __init set_cmdline_ftrace(char *str)
188 {
189 	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
190 	default_bootup_tracer = bootup_tracer_buf;
191 	/* We are using ftrace early, expand it */
192 	ring_buffer_expanded = true;
193 	return 1;
194 }
195 __setup("ftrace=", set_cmdline_ftrace);
196 
197 static int __init set_ftrace_dump_on_oops(char *str)
198 {
199 	if (*str++ != '=' || !*str) {
200 		ftrace_dump_on_oops = DUMP_ALL;
201 		return 1;
202 	}
203 
204 	if (!strcmp("orig_cpu", str)) {
205 		ftrace_dump_on_oops = DUMP_ORIG;
206                 return 1;
207         }
208 
209         return 0;
210 }
211 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
212 
213 static int __init stop_trace_on_warning(char *str)
214 {
215 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
216 		__disable_trace_on_warning = 1;
217 	return 1;
218 }
219 __setup("traceoff_on_warning", stop_trace_on_warning);
220 
221 static int __init boot_alloc_snapshot(char *str)
222 {
223 	allocate_snapshot = true;
224 	/* We also need the main ring buffer expanded */
225 	ring_buffer_expanded = true;
226 	return 1;
227 }
228 __setup("alloc_snapshot", boot_alloc_snapshot);
229 
230 
231 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
232 
233 static int __init set_trace_boot_options(char *str)
234 {
235 	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
236 	return 0;
237 }
238 __setup("trace_options=", set_trace_boot_options);
239 
240 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
241 static char *trace_boot_clock __initdata;
242 
243 static int __init set_trace_boot_clock(char *str)
244 {
245 	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
246 	trace_boot_clock = trace_boot_clock_buf;
247 	return 0;
248 }
249 __setup("trace_clock=", set_trace_boot_clock);
250 
251 static int __init set_tracepoint_printk(char *str)
252 {
253 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
254 		tracepoint_printk = 1;
255 	return 1;
256 }
257 __setup("tp_printk", set_tracepoint_printk);
258 
259 unsigned long long ns2usecs(u64 nsec)
260 {
261 	nsec += 500;
262 	do_div(nsec, 1000);
263 	return nsec;
264 }
265 
266 static void
267 trace_process_export(struct trace_export *export,
268 	       struct ring_buffer_event *event, int flag)
269 {
270 	struct trace_entry *entry;
271 	unsigned int size = 0;
272 
273 	if (export->flags & flag) {
274 		entry = ring_buffer_event_data(event);
275 		size = ring_buffer_event_length(event);
276 		export->write(export, entry, size);
277 	}
278 }
279 
280 static DEFINE_MUTEX(ftrace_export_lock);
281 
282 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
283 
284 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
285 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
286 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
287 
288 static inline void ftrace_exports_enable(struct trace_export *export)
289 {
290 	if (export->flags & TRACE_EXPORT_FUNCTION)
291 		static_branch_inc(&trace_function_exports_enabled);
292 
293 	if (export->flags & TRACE_EXPORT_EVENT)
294 		static_branch_inc(&trace_event_exports_enabled);
295 
296 	if (export->flags & TRACE_EXPORT_MARKER)
297 		static_branch_inc(&trace_marker_exports_enabled);
298 }
299 
300 static inline void ftrace_exports_disable(struct trace_export *export)
301 {
302 	if (export->flags & TRACE_EXPORT_FUNCTION)
303 		static_branch_dec(&trace_function_exports_enabled);
304 
305 	if (export->flags & TRACE_EXPORT_EVENT)
306 		static_branch_dec(&trace_event_exports_enabled);
307 
308 	if (export->flags & TRACE_EXPORT_MARKER)
309 		static_branch_dec(&trace_marker_exports_enabled);
310 }
311 
312 static void ftrace_exports(struct ring_buffer_event *event, int flag)
313 {
314 	struct trace_export *export;
315 
316 	preempt_disable_notrace();
317 
318 	export = rcu_dereference_raw_check(ftrace_exports_list);
319 	while (export) {
320 		trace_process_export(export, event, flag);
321 		export = rcu_dereference_raw_check(export->next);
322 	}
323 
324 	preempt_enable_notrace();
325 }
326 
327 static inline void
328 add_trace_export(struct trace_export **list, struct trace_export *export)
329 {
330 	rcu_assign_pointer(export->next, *list);
331 	/*
332 	 * We are entering export into the list but another
333 	 * CPU might be walking that list. We need to make sure
334 	 * the export->next pointer is valid before another CPU sees
335 	 * the export pointer included into the list.
336 	 */
337 	rcu_assign_pointer(*list, export);
338 }
339 
340 static inline int
341 rm_trace_export(struct trace_export **list, struct trace_export *export)
342 {
343 	struct trace_export **p;
344 
345 	for (p = list; *p != NULL; p = &(*p)->next)
346 		if (*p == export)
347 			break;
348 
349 	if (*p != export)
350 		return -1;
351 
352 	rcu_assign_pointer(*p, (*p)->next);
353 
354 	return 0;
355 }
356 
357 static inline void
358 add_ftrace_export(struct trace_export **list, struct trace_export *export)
359 {
360 	ftrace_exports_enable(export);
361 
362 	add_trace_export(list, export);
363 }
364 
365 static inline int
366 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
367 {
368 	int ret;
369 
370 	ret = rm_trace_export(list, export);
371 	ftrace_exports_disable(export);
372 
373 	return ret;
374 }
375 
376 int register_ftrace_export(struct trace_export *export)
377 {
378 	if (WARN_ON_ONCE(!export->write))
379 		return -1;
380 
381 	mutex_lock(&ftrace_export_lock);
382 
383 	add_ftrace_export(&ftrace_exports_list, export);
384 
385 	mutex_unlock(&ftrace_export_lock);
386 
387 	return 0;
388 }
389 EXPORT_SYMBOL_GPL(register_ftrace_export);
390 
391 int unregister_ftrace_export(struct trace_export *export)
392 {
393 	int ret;
394 
395 	mutex_lock(&ftrace_export_lock);
396 
397 	ret = rm_ftrace_export(&ftrace_exports_list, export);
398 
399 	mutex_unlock(&ftrace_export_lock);
400 
401 	return ret;
402 }
403 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
404 
405 /* trace_flags holds trace_options default values */
406 #define TRACE_DEFAULT_FLAGS						\
407 	(FUNCTION_DEFAULT_FLAGS |					\
408 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
409 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
410 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
411 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
412 	 TRACE_ITER_HASH_PTR)
413 
414 /* trace_options that are only supported by global_trace */
415 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
416 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
417 
418 /* trace_flags that are default zero for instances */
419 #define ZEROED_TRACE_FLAGS \
420 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
421 
422 /*
423  * The global_trace is the descriptor that holds the top-level tracing
424  * buffers for the live tracing.
425  */
426 static struct trace_array global_trace = {
427 	.trace_flags = TRACE_DEFAULT_FLAGS,
428 };
429 
430 LIST_HEAD(ftrace_trace_arrays);
431 
432 int trace_array_get(struct trace_array *this_tr)
433 {
434 	struct trace_array *tr;
435 	int ret = -ENODEV;
436 
437 	mutex_lock(&trace_types_lock);
438 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
439 		if (tr == this_tr) {
440 			tr->ref++;
441 			ret = 0;
442 			break;
443 		}
444 	}
445 	mutex_unlock(&trace_types_lock);
446 
447 	return ret;
448 }
449 
450 static void __trace_array_put(struct trace_array *this_tr)
451 {
452 	WARN_ON(!this_tr->ref);
453 	this_tr->ref--;
454 }
455 
456 /**
457  * trace_array_put - Decrement the reference counter for this trace array.
458  * @this_tr : pointer to the trace array
459  *
460  * NOTE: Use this when we no longer need the trace array returned by
461  * trace_array_get_by_name(). This ensures the trace array can be later
462  * destroyed.
463  *
464  */
465 void trace_array_put(struct trace_array *this_tr)
466 {
467 	if (!this_tr)
468 		return;
469 
470 	mutex_lock(&trace_types_lock);
471 	__trace_array_put(this_tr);
472 	mutex_unlock(&trace_types_lock);
473 }
474 EXPORT_SYMBOL_GPL(trace_array_put);
475 
476 int tracing_check_open_get_tr(struct trace_array *tr)
477 {
478 	int ret;
479 
480 	ret = security_locked_down(LOCKDOWN_TRACEFS);
481 	if (ret)
482 		return ret;
483 
484 	if (tracing_disabled)
485 		return -ENODEV;
486 
487 	if (tr && trace_array_get(tr) < 0)
488 		return -ENODEV;
489 
490 	return 0;
491 }
492 
493 int call_filter_check_discard(struct trace_event_call *call, void *rec,
494 			      struct trace_buffer *buffer,
495 			      struct ring_buffer_event *event)
496 {
497 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
498 	    !filter_match_preds(call->filter, rec)) {
499 		__trace_event_discard_commit(buffer, event);
500 		return 1;
501 	}
502 
503 	return 0;
504 }
505 
506 void trace_free_pid_list(struct trace_pid_list *pid_list)
507 {
508 	vfree(pid_list->pids);
509 	kfree(pid_list);
510 }
511 
512 /**
513  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
514  * @filtered_pids: The list of pids to check
515  * @search_pid: The PID to find in @filtered_pids
516  *
517  * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
518  */
519 bool
520 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
521 {
522 	/*
523 	 * If pid_max changed after filtered_pids was created, we
524 	 * by default ignore all pids greater than the previous pid_max.
525 	 */
526 	if (search_pid >= filtered_pids->pid_max)
527 		return false;
528 
529 	return test_bit(search_pid, filtered_pids->pids);
530 }
531 
532 /**
533  * trace_ignore_this_task - should a task be ignored for tracing
534  * @filtered_pids: The list of pids to check
535  * @filtered_no_pids: The list of pids not to be traced
536  * @task: The task that should be ignored if not filtered
537  *
538  * Checks if @task should be traced or not from @filtered_pids.
539  * Returns true if @task should *NOT* be traced.
540  * Returns false if @task should be traced.
541  */
542 bool
543 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 		       struct trace_pid_list *filtered_no_pids,
545 		       struct task_struct *task)
546 {
547 	/*
548 	 * If filterd_no_pids is not empty, and the task's pid is listed
549 	 * in filtered_no_pids, then return true.
550 	 * Otherwise, if filtered_pids is empty, that means we can
551 	 * trace all tasks. If it has content, then only trace pids
552 	 * within filtered_pids.
553 	 */
554 
555 	return (filtered_pids &&
556 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
557 		(filtered_no_pids &&
558 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
559 }
560 
561 /**
562  * trace_filter_add_remove_task - Add or remove a task from a pid_list
563  * @pid_list: The list to modify
564  * @self: The current task for fork or NULL for exit
565  * @task: The task to add or remove
566  *
567  * If adding a task, if @self is defined, the task is only added if @self
568  * is also included in @pid_list. This happens on fork and tasks should
569  * only be added when the parent is listed. If @self is NULL, then the
570  * @task pid will be removed from the list, which would happen on exit
571  * of a task.
572  */
573 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 				  struct task_struct *self,
575 				  struct task_struct *task)
576 {
577 	if (!pid_list)
578 		return;
579 
580 	/* For forks, we only add if the forking task is listed */
581 	if (self) {
582 		if (!trace_find_filtered_pid(pid_list, self->pid))
583 			return;
584 	}
585 
586 	/* Sorry, but we don't support pid_max changing after setting */
587 	if (task->pid >= pid_list->pid_max)
588 		return;
589 
590 	/* "self" is set for forks, and NULL for exits */
591 	if (self)
592 		set_bit(task->pid, pid_list->pids);
593 	else
594 		clear_bit(task->pid, pid_list->pids);
595 }
596 
597 /**
598  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
599  * @pid_list: The pid list to show
600  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
601  * @pos: The position of the file
602  *
603  * This is used by the seq_file "next" operation to iterate the pids
604  * listed in a trace_pid_list structure.
605  *
606  * Returns the pid+1 as we want to display pid of zero, but NULL would
607  * stop the iteration.
608  */
609 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
610 {
611 	unsigned long pid = (unsigned long)v;
612 
613 	(*pos)++;
614 
615 	/* pid already is +1 of the actual prevous bit */
616 	pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
617 
618 	/* Return pid + 1 to allow zero to be represented */
619 	if (pid < pid_list->pid_max)
620 		return (void *)(pid + 1);
621 
622 	return NULL;
623 }
624 
625 /**
626  * trace_pid_start - Used for seq_file to start reading pid lists
627  * @pid_list: The pid list to show
628  * @pos: The position of the file
629  *
630  * This is used by seq_file "start" operation to start the iteration
631  * of listing pids.
632  *
633  * Returns the pid+1 as we want to display pid of zero, but NULL would
634  * stop the iteration.
635  */
636 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
637 {
638 	unsigned long pid;
639 	loff_t l = 0;
640 
641 	pid = find_first_bit(pid_list->pids, pid_list->pid_max);
642 	if (pid >= pid_list->pid_max)
643 		return NULL;
644 
645 	/* Return pid + 1 so that zero can be the exit value */
646 	for (pid++; pid && l < *pos;
647 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
648 		;
649 	return (void *)pid;
650 }
651 
652 /**
653  * trace_pid_show - show the current pid in seq_file processing
654  * @m: The seq_file structure to write into
655  * @v: A void pointer of the pid (+1) value to display
656  *
657  * Can be directly used by seq_file operations to display the current
658  * pid value.
659  */
660 int trace_pid_show(struct seq_file *m, void *v)
661 {
662 	unsigned long pid = (unsigned long)v - 1;
663 
664 	seq_printf(m, "%lu\n", pid);
665 	return 0;
666 }
667 
668 /* 128 should be much more than enough */
669 #define PID_BUF_SIZE		127
670 
671 int trace_pid_write(struct trace_pid_list *filtered_pids,
672 		    struct trace_pid_list **new_pid_list,
673 		    const char __user *ubuf, size_t cnt)
674 {
675 	struct trace_pid_list *pid_list;
676 	struct trace_parser parser;
677 	unsigned long val;
678 	int nr_pids = 0;
679 	ssize_t read = 0;
680 	ssize_t ret = 0;
681 	loff_t pos;
682 	pid_t pid;
683 
684 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
685 		return -ENOMEM;
686 
687 	/*
688 	 * Always recreate a new array. The write is an all or nothing
689 	 * operation. Always create a new array when adding new pids by
690 	 * the user. If the operation fails, then the current list is
691 	 * not modified.
692 	 */
693 	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
694 	if (!pid_list) {
695 		trace_parser_put(&parser);
696 		return -ENOMEM;
697 	}
698 
699 	pid_list->pid_max = READ_ONCE(pid_max);
700 
701 	/* Only truncating will shrink pid_max */
702 	if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
703 		pid_list->pid_max = filtered_pids->pid_max;
704 
705 	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
706 	if (!pid_list->pids) {
707 		trace_parser_put(&parser);
708 		kfree(pid_list);
709 		return -ENOMEM;
710 	}
711 
712 	if (filtered_pids) {
713 		/* copy the current bits to the new max */
714 		for_each_set_bit(pid, filtered_pids->pids,
715 				 filtered_pids->pid_max) {
716 			set_bit(pid, pid_list->pids);
717 			nr_pids++;
718 		}
719 	}
720 
721 	while (cnt > 0) {
722 
723 		pos = 0;
724 
725 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
726 		if (ret < 0 || !trace_parser_loaded(&parser))
727 			break;
728 
729 		read += ret;
730 		ubuf += ret;
731 		cnt -= ret;
732 
733 		ret = -EINVAL;
734 		if (kstrtoul(parser.buffer, 0, &val))
735 			break;
736 		if (val >= pid_list->pid_max)
737 			break;
738 
739 		pid = (pid_t)val;
740 
741 		set_bit(pid, pid_list->pids);
742 		nr_pids++;
743 
744 		trace_parser_clear(&parser);
745 		ret = 0;
746 	}
747 	trace_parser_put(&parser);
748 
749 	if (ret < 0) {
750 		trace_free_pid_list(pid_list);
751 		return ret;
752 	}
753 
754 	if (!nr_pids) {
755 		/* Cleared the list of pids */
756 		trace_free_pid_list(pid_list);
757 		read = ret;
758 		pid_list = NULL;
759 	}
760 
761 	*new_pid_list = pid_list;
762 
763 	return read;
764 }
765 
766 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
767 {
768 	u64 ts;
769 
770 	/* Early boot up does not have a buffer yet */
771 	if (!buf->buffer)
772 		return trace_clock_local();
773 
774 	ts = ring_buffer_time_stamp(buf->buffer, cpu);
775 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
776 
777 	return ts;
778 }
779 
780 u64 ftrace_now(int cpu)
781 {
782 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
783 }
784 
785 /**
786  * tracing_is_enabled - Show if global_trace has been enabled
787  *
788  * Shows if the global trace has been enabled or not. It uses the
789  * mirror flag "buffer_disabled" to be used in fast paths such as for
790  * the irqsoff tracer. But it may be inaccurate due to races. If you
791  * need to know the accurate state, use tracing_is_on() which is a little
792  * slower, but accurate.
793  */
794 int tracing_is_enabled(void)
795 {
796 	/*
797 	 * For quick access (irqsoff uses this in fast path), just
798 	 * return the mirror variable of the state of the ring buffer.
799 	 * It's a little racy, but we don't really care.
800 	 */
801 	smp_rmb();
802 	return !global_trace.buffer_disabled;
803 }
804 
805 /*
806  * trace_buf_size is the size in bytes that is allocated
807  * for a buffer. Note, the number of bytes is always rounded
808  * to page size.
809  *
810  * This number is purposely set to a low number of 16384.
811  * If the dump on oops happens, it will be much appreciated
812  * to not have to wait for all that output. Anyway this can be
813  * boot time and run time configurable.
814  */
815 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
816 
817 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
818 
819 /* trace_types holds a link list of available tracers. */
820 static struct tracer		*trace_types __read_mostly;
821 
822 /*
823  * trace_types_lock is used to protect the trace_types list.
824  */
825 DEFINE_MUTEX(trace_types_lock);
826 
827 /*
828  * serialize the access of the ring buffer
829  *
830  * ring buffer serializes readers, but it is low level protection.
831  * The validity of the events (which returns by ring_buffer_peek() ..etc)
832  * are not protected by ring buffer.
833  *
834  * The content of events may become garbage if we allow other process consumes
835  * these events concurrently:
836  *   A) the page of the consumed events may become a normal page
837  *      (not reader page) in ring buffer, and this page will be rewrited
838  *      by events producer.
839  *   B) The page of the consumed events may become a page for splice_read,
840  *      and this page will be returned to system.
841  *
842  * These primitives allow multi process access to different cpu ring buffer
843  * concurrently.
844  *
845  * These primitives don't distinguish read-only and read-consume access.
846  * Multi read-only access are also serialized.
847  */
848 
849 #ifdef CONFIG_SMP
850 static DECLARE_RWSEM(all_cpu_access_lock);
851 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
852 
853 static inline void trace_access_lock(int cpu)
854 {
855 	if (cpu == RING_BUFFER_ALL_CPUS) {
856 		/* gain it for accessing the whole ring buffer. */
857 		down_write(&all_cpu_access_lock);
858 	} else {
859 		/* gain it for accessing a cpu ring buffer. */
860 
861 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
862 		down_read(&all_cpu_access_lock);
863 
864 		/* Secondly block other access to this @cpu ring buffer. */
865 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
866 	}
867 }
868 
869 static inline void trace_access_unlock(int cpu)
870 {
871 	if (cpu == RING_BUFFER_ALL_CPUS) {
872 		up_write(&all_cpu_access_lock);
873 	} else {
874 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 		up_read(&all_cpu_access_lock);
876 	}
877 }
878 
879 static inline void trace_access_lock_init(void)
880 {
881 	int cpu;
882 
883 	for_each_possible_cpu(cpu)
884 		mutex_init(&per_cpu(cpu_access_lock, cpu));
885 }
886 
887 #else
888 
889 static DEFINE_MUTEX(access_lock);
890 
891 static inline void trace_access_lock(int cpu)
892 {
893 	(void)cpu;
894 	mutex_lock(&access_lock);
895 }
896 
897 static inline void trace_access_unlock(int cpu)
898 {
899 	(void)cpu;
900 	mutex_unlock(&access_lock);
901 }
902 
903 static inline void trace_access_lock_init(void)
904 {
905 }
906 
907 #endif
908 
909 #ifdef CONFIG_STACKTRACE
910 static void __ftrace_trace_stack(struct trace_buffer *buffer,
911 				 unsigned int trace_ctx,
912 				 int skip, struct pt_regs *regs);
913 static inline void ftrace_trace_stack(struct trace_array *tr,
914 				      struct trace_buffer *buffer,
915 				      unsigned int trace_ctx,
916 				      int skip, struct pt_regs *regs);
917 
918 #else
919 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
920 					unsigned int trace_ctx,
921 					int skip, struct pt_regs *regs)
922 {
923 }
924 static inline void ftrace_trace_stack(struct trace_array *tr,
925 				      struct trace_buffer *buffer,
926 				      unsigned long trace_ctx,
927 				      int skip, struct pt_regs *regs)
928 {
929 }
930 
931 #endif
932 
933 static __always_inline void
934 trace_event_setup(struct ring_buffer_event *event,
935 		  int type, unsigned int trace_ctx)
936 {
937 	struct trace_entry *ent = ring_buffer_event_data(event);
938 
939 	tracing_generic_entry_update(ent, type, trace_ctx);
940 }
941 
942 static __always_inline struct ring_buffer_event *
943 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
944 			  int type,
945 			  unsigned long len,
946 			  unsigned int trace_ctx)
947 {
948 	struct ring_buffer_event *event;
949 
950 	event = ring_buffer_lock_reserve(buffer, len);
951 	if (event != NULL)
952 		trace_event_setup(event, type, trace_ctx);
953 
954 	return event;
955 }
956 
957 void tracer_tracing_on(struct trace_array *tr)
958 {
959 	if (tr->array_buffer.buffer)
960 		ring_buffer_record_on(tr->array_buffer.buffer);
961 	/*
962 	 * This flag is looked at when buffers haven't been allocated
963 	 * yet, or by some tracers (like irqsoff), that just want to
964 	 * know if the ring buffer has been disabled, but it can handle
965 	 * races of where it gets disabled but we still do a record.
966 	 * As the check is in the fast path of the tracers, it is more
967 	 * important to be fast than accurate.
968 	 */
969 	tr->buffer_disabled = 0;
970 	/* Make the flag seen by readers */
971 	smp_wmb();
972 }
973 
974 /**
975  * tracing_on - enable tracing buffers
976  *
977  * This function enables tracing buffers that may have been
978  * disabled with tracing_off.
979  */
980 void tracing_on(void)
981 {
982 	tracer_tracing_on(&global_trace);
983 }
984 EXPORT_SYMBOL_GPL(tracing_on);
985 
986 
987 static __always_inline void
988 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
989 {
990 	__this_cpu_write(trace_taskinfo_save, true);
991 
992 	/* If this is the temp buffer, we need to commit fully */
993 	if (this_cpu_read(trace_buffered_event) == event) {
994 		/* Length is in event->array[0] */
995 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 		/* Release the temp buffer */
997 		this_cpu_dec(trace_buffered_event_cnt);
998 	} else
999 		ring_buffer_unlock_commit(buffer, event);
1000 }
1001 
1002 /**
1003  * __trace_puts - write a constant string into the trace buffer.
1004  * @ip:	   The address of the caller
1005  * @str:   The constant string to write
1006  * @size:  The size of the string.
1007  */
1008 int __trace_puts(unsigned long ip, const char *str, int size)
1009 {
1010 	struct ring_buffer_event *event;
1011 	struct trace_buffer *buffer;
1012 	struct print_entry *entry;
1013 	unsigned int trace_ctx;
1014 	int alloc;
1015 
1016 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1017 		return 0;
1018 
1019 	if (unlikely(tracing_selftest_running || tracing_disabled))
1020 		return 0;
1021 
1022 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1023 
1024 	trace_ctx = tracing_gen_ctx();
1025 	buffer = global_trace.array_buffer.buffer;
1026 	ring_buffer_nest_start(buffer);
1027 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1028 					    trace_ctx);
1029 	if (!event) {
1030 		size = 0;
1031 		goto out;
1032 	}
1033 
1034 	entry = ring_buffer_event_data(event);
1035 	entry->ip = ip;
1036 
1037 	memcpy(&entry->buf, str, size);
1038 
1039 	/* Add a newline if necessary */
1040 	if (entry->buf[size - 1] != '\n') {
1041 		entry->buf[size] = '\n';
1042 		entry->buf[size + 1] = '\0';
1043 	} else
1044 		entry->buf[size] = '\0';
1045 
1046 	__buffer_unlock_commit(buffer, event);
1047 	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1048  out:
1049 	ring_buffer_nest_end(buffer);
1050 	return size;
1051 }
1052 EXPORT_SYMBOL_GPL(__trace_puts);
1053 
1054 /**
1055  * __trace_bputs - write the pointer to a constant string into trace buffer
1056  * @ip:	   The address of the caller
1057  * @str:   The constant string to write to the buffer to
1058  */
1059 int __trace_bputs(unsigned long ip, const char *str)
1060 {
1061 	struct ring_buffer_event *event;
1062 	struct trace_buffer *buffer;
1063 	struct bputs_entry *entry;
1064 	unsigned int trace_ctx;
1065 	int size = sizeof(struct bputs_entry);
1066 	int ret = 0;
1067 
1068 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1069 		return 0;
1070 
1071 	if (unlikely(tracing_selftest_running || tracing_disabled))
1072 		return 0;
1073 
1074 	trace_ctx = tracing_gen_ctx();
1075 	buffer = global_trace.array_buffer.buffer;
1076 
1077 	ring_buffer_nest_start(buffer);
1078 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1079 					    trace_ctx);
1080 	if (!event)
1081 		goto out;
1082 
1083 	entry = ring_buffer_event_data(event);
1084 	entry->ip			= ip;
1085 	entry->str			= str;
1086 
1087 	__buffer_unlock_commit(buffer, event);
1088 	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1089 
1090 	ret = 1;
1091  out:
1092 	ring_buffer_nest_end(buffer);
1093 	return ret;
1094 }
1095 EXPORT_SYMBOL_GPL(__trace_bputs);
1096 
1097 #ifdef CONFIG_TRACER_SNAPSHOT
1098 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1099 					   void *cond_data)
1100 {
1101 	struct tracer *tracer = tr->current_trace;
1102 	unsigned long flags;
1103 
1104 	if (in_nmi()) {
1105 		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1106 		internal_trace_puts("*** snapshot is being ignored        ***\n");
1107 		return;
1108 	}
1109 
1110 	if (!tr->allocated_snapshot) {
1111 		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1112 		internal_trace_puts("*** stopping trace here!   ***\n");
1113 		tracing_off();
1114 		return;
1115 	}
1116 
1117 	/* Note, snapshot can not be used when the tracer uses it */
1118 	if (tracer->use_max_tr) {
1119 		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1120 		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1121 		return;
1122 	}
1123 
1124 	local_irq_save(flags);
1125 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1126 	local_irq_restore(flags);
1127 }
1128 
1129 void tracing_snapshot_instance(struct trace_array *tr)
1130 {
1131 	tracing_snapshot_instance_cond(tr, NULL);
1132 }
1133 
1134 /**
1135  * tracing_snapshot - take a snapshot of the current buffer.
1136  *
1137  * This causes a swap between the snapshot buffer and the current live
1138  * tracing buffer. You can use this to take snapshots of the live
1139  * trace when some condition is triggered, but continue to trace.
1140  *
1141  * Note, make sure to allocate the snapshot with either
1142  * a tracing_snapshot_alloc(), or by doing it manually
1143  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1144  *
1145  * If the snapshot buffer is not allocated, it will stop tracing.
1146  * Basically making a permanent snapshot.
1147  */
1148 void tracing_snapshot(void)
1149 {
1150 	struct trace_array *tr = &global_trace;
1151 
1152 	tracing_snapshot_instance(tr);
1153 }
1154 EXPORT_SYMBOL_GPL(tracing_snapshot);
1155 
1156 /**
1157  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1158  * @tr:		The tracing instance to snapshot
1159  * @cond_data:	The data to be tested conditionally, and possibly saved
1160  *
1161  * This is the same as tracing_snapshot() except that the snapshot is
1162  * conditional - the snapshot will only happen if the
1163  * cond_snapshot.update() implementation receiving the cond_data
1164  * returns true, which means that the trace array's cond_snapshot
1165  * update() operation used the cond_data to determine whether the
1166  * snapshot should be taken, and if it was, presumably saved it along
1167  * with the snapshot.
1168  */
1169 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1170 {
1171 	tracing_snapshot_instance_cond(tr, cond_data);
1172 }
1173 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1174 
1175 /**
1176  * tracing_snapshot_cond_data - get the user data associated with a snapshot
1177  * @tr:		The tracing instance
1178  *
1179  * When the user enables a conditional snapshot using
1180  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1181  * with the snapshot.  This accessor is used to retrieve it.
1182  *
1183  * Should not be called from cond_snapshot.update(), since it takes
1184  * the tr->max_lock lock, which the code calling
1185  * cond_snapshot.update() has already done.
1186  *
1187  * Returns the cond_data associated with the trace array's snapshot.
1188  */
1189 void *tracing_cond_snapshot_data(struct trace_array *tr)
1190 {
1191 	void *cond_data = NULL;
1192 
1193 	arch_spin_lock(&tr->max_lock);
1194 
1195 	if (tr->cond_snapshot)
1196 		cond_data = tr->cond_snapshot->cond_data;
1197 
1198 	arch_spin_unlock(&tr->max_lock);
1199 
1200 	return cond_data;
1201 }
1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203 
1204 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1205 					struct array_buffer *size_buf, int cpu_id);
1206 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1207 
1208 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1209 {
1210 	int ret;
1211 
1212 	if (!tr->allocated_snapshot) {
1213 
1214 		/* allocate spare buffer */
1215 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1216 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1217 		if (ret < 0)
1218 			return ret;
1219 
1220 		tr->allocated_snapshot = true;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static void free_snapshot(struct trace_array *tr)
1227 {
1228 	/*
1229 	 * We don't free the ring buffer. instead, resize it because
1230 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1231 	 * we want preserve it.
1232 	 */
1233 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1234 	set_buffer_entries(&tr->max_buffer, 1);
1235 	tracing_reset_online_cpus(&tr->max_buffer);
1236 	tr->allocated_snapshot = false;
1237 }
1238 
1239 /**
1240  * tracing_alloc_snapshot - allocate snapshot buffer.
1241  *
1242  * This only allocates the snapshot buffer if it isn't already
1243  * allocated - it doesn't also take a snapshot.
1244  *
1245  * This is meant to be used in cases where the snapshot buffer needs
1246  * to be set up for events that can't sleep but need to be able to
1247  * trigger a snapshot.
1248  */
1249 int tracing_alloc_snapshot(void)
1250 {
1251 	struct trace_array *tr = &global_trace;
1252 	int ret;
1253 
1254 	ret = tracing_alloc_snapshot_instance(tr);
1255 	WARN_ON(ret < 0);
1256 
1257 	return ret;
1258 }
1259 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1260 
1261 /**
1262  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1263  *
1264  * This is similar to tracing_snapshot(), but it will allocate the
1265  * snapshot buffer if it isn't already allocated. Use this only
1266  * where it is safe to sleep, as the allocation may sleep.
1267  *
1268  * This causes a swap between the snapshot buffer and the current live
1269  * tracing buffer. You can use this to take snapshots of the live
1270  * trace when some condition is triggered, but continue to trace.
1271  */
1272 void tracing_snapshot_alloc(void)
1273 {
1274 	int ret;
1275 
1276 	ret = tracing_alloc_snapshot();
1277 	if (ret < 0)
1278 		return;
1279 
1280 	tracing_snapshot();
1281 }
1282 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1283 
1284 /**
1285  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1286  * @tr:		The tracing instance
1287  * @cond_data:	User data to associate with the snapshot
1288  * @update:	Implementation of the cond_snapshot update function
1289  *
1290  * Check whether the conditional snapshot for the given instance has
1291  * already been enabled, or if the current tracer is already using a
1292  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1293  * save the cond_data and update function inside.
1294  *
1295  * Returns 0 if successful, error otherwise.
1296  */
1297 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1298 				 cond_update_fn_t update)
1299 {
1300 	struct cond_snapshot *cond_snapshot;
1301 	int ret = 0;
1302 
1303 	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1304 	if (!cond_snapshot)
1305 		return -ENOMEM;
1306 
1307 	cond_snapshot->cond_data = cond_data;
1308 	cond_snapshot->update = update;
1309 
1310 	mutex_lock(&trace_types_lock);
1311 
1312 	ret = tracing_alloc_snapshot_instance(tr);
1313 	if (ret)
1314 		goto fail_unlock;
1315 
1316 	if (tr->current_trace->use_max_tr) {
1317 		ret = -EBUSY;
1318 		goto fail_unlock;
1319 	}
1320 
1321 	/*
1322 	 * The cond_snapshot can only change to NULL without the
1323 	 * trace_types_lock. We don't care if we race with it going
1324 	 * to NULL, but we want to make sure that it's not set to
1325 	 * something other than NULL when we get here, which we can
1326 	 * do safely with only holding the trace_types_lock and not
1327 	 * having to take the max_lock.
1328 	 */
1329 	if (tr->cond_snapshot) {
1330 		ret = -EBUSY;
1331 		goto fail_unlock;
1332 	}
1333 
1334 	arch_spin_lock(&tr->max_lock);
1335 	tr->cond_snapshot = cond_snapshot;
1336 	arch_spin_unlock(&tr->max_lock);
1337 
1338 	mutex_unlock(&trace_types_lock);
1339 
1340 	return ret;
1341 
1342  fail_unlock:
1343 	mutex_unlock(&trace_types_lock);
1344 	kfree(cond_snapshot);
1345 	return ret;
1346 }
1347 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1348 
1349 /**
1350  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1351  * @tr:		The tracing instance
1352  *
1353  * Check whether the conditional snapshot for the given instance is
1354  * enabled; if so, free the cond_snapshot associated with it,
1355  * otherwise return -EINVAL.
1356  *
1357  * Returns 0 if successful, error otherwise.
1358  */
1359 int tracing_snapshot_cond_disable(struct trace_array *tr)
1360 {
1361 	int ret = 0;
1362 
1363 	arch_spin_lock(&tr->max_lock);
1364 
1365 	if (!tr->cond_snapshot)
1366 		ret = -EINVAL;
1367 	else {
1368 		kfree(tr->cond_snapshot);
1369 		tr->cond_snapshot = NULL;
1370 	}
1371 
1372 	arch_spin_unlock(&tr->max_lock);
1373 
1374 	return ret;
1375 }
1376 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1377 #else
1378 void tracing_snapshot(void)
1379 {
1380 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1381 }
1382 EXPORT_SYMBOL_GPL(tracing_snapshot);
1383 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1384 {
1385 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1386 }
1387 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1388 int tracing_alloc_snapshot(void)
1389 {
1390 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1391 	return -ENODEV;
1392 }
1393 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1394 void tracing_snapshot_alloc(void)
1395 {
1396 	/* Give warning */
1397 	tracing_snapshot();
1398 }
1399 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1400 void *tracing_cond_snapshot_data(struct trace_array *tr)
1401 {
1402 	return NULL;
1403 }
1404 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1405 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1406 {
1407 	return -ENODEV;
1408 }
1409 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1410 int tracing_snapshot_cond_disable(struct trace_array *tr)
1411 {
1412 	return false;
1413 }
1414 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1415 #endif /* CONFIG_TRACER_SNAPSHOT */
1416 
1417 void tracer_tracing_off(struct trace_array *tr)
1418 {
1419 	if (tr->array_buffer.buffer)
1420 		ring_buffer_record_off(tr->array_buffer.buffer);
1421 	/*
1422 	 * This flag is looked at when buffers haven't been allocated
1423 	 * yet, or by some tracers (like irqsoff), that just want to
1424 	 * know if the ring buffer has been disabled, but it can handle
1425 	 * races of where it gets disabled but we still do a record.
1426 	 * As the check is in the fast path of the tracers, it is more
1427 	 * important to be fast than accurate.
1428 	 */
1429 	tr->buffer_disabled = 1;
1430 	/* Make the flag seen by readers */
1431 	smp_wmb();
1432 }
1433 
1434 /**
1435  * tracing_off - turn off tracing buffers
1436  *
1437  * This function stops the tracing buffers from recording data.
1438  * It does not disable any overhead the tracers themselves may
1439  * be causing. This function simply causes all recording to
1440  * the ring buffers to fail.
1441  */
1442 void tracing_off(void)
1443 {
1444 	tracer_tracing_off(&global_trace);
1445 }
1446 EXPORT_SYMBOL_GPL(tracing_off);
1447 
1448 void disable_trace_on_warning(void)
1449 {
1450 	if (__disable_trace_on_warning) {
1451 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1452 			"Disabling tracing due to warning\n");
1453 		tracing_off();
1454 	}
1455 }
1456 
1457 /**
1458  * tracer_tracing_is_on - show real state of ring buffer enabled
1459  * @tr : the trace array to know if ring buffer is enabled
1460  *
1461  * Shows real state of the ring buffer if it is enabled or not.
1462  */
1463 bool tracer_tracing_is_on(struct trace_array *tr)
1464 {
1465 	if (tr->array_buffer.buffer)
1466 		return ring_buffer_record_is_on(tr->array_buffer.buffer);
1467 	return !tr->buffer_disabled;
1468 }
1469 
1470 /**
1471  * tracing_is_on - show state of ring buffers enabled
1472  */
1473 int tracing_is_on(void)
1474 {
1475 	return tracer_tracing_is_on(&global_trace);
1476 }
1477 EXPORT_SYMBOL_GPL(tracing_is_on);
1478 
1479 static int __init set_buf_size(char *str)
1480 {
1481 	unsigned long buf_size;
1482 
1483 	if (!str)
1484 		return 0;
1485 	buf_size = memparse(str, &str);
1486 	/* nr_entries can not be zero */
1487 	if (buf_size == 0)
1488 		return 0;
1489 	trace_buf_size = buf_size;
1490 	return 1;
1491 }
1492 __setup("trace_buf_size=", set_buf_size);
1493 
1494 static int __init set_tracing_thresh(char *str)
1495 {
1496 	unsigned long threshold;
1497 	int ret;
1498 
1499 	if (!str)
1500 		return 0;
1501 	ret = kstrtoul(str, 0, &threshold);
1502 	if (ret < 0)
1503 		return 0;
1504 	tracing_thresh = threshold * 1000;
1505 	return 1;
1506 }
1507 __setup("tracing_thresh=", set_tracing_thresh);
1508 
1509 unsigned long nsecs_to_usecs(unsigned long nsecs)
1510 {
1511 	return nsecs / 1000;
1512 }
1513 
1514 /*
1515  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1516  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1517  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1518  * of strings in the order that the evals (enum) were defined.
1519  */
1520 #undef C
1521 #define C(a, b) b
1522 
1523 /* These must match the bit postions in trace_iterator_flags */
1524 static const char *trace_options[] = {
1525 	TRACE_FLAGS
1526 	NULL
1527 };
1528 
1529 static struct {
1530 	u64 (*func)(void);
1531 	const char *name;
1532 	int in_ns;		/* is this clock in nanoseconds? */
1533 } trace_clocks[] = {
1534 	{ trace_clock_local,		"local",	1 },
1535 	{ trace_clock_global,		"global",	1 },
1536 	{ trace_clock_counter,		"counter",	0 },
1537 	{ trace_clock_jiffies,		"uptime",	0 },
1538 	{ trace_clock,			"perf",		1 },
1539 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1540 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1541 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1542 	ARCH_TRACE_CLOCKS
1543 };
1544 
1545 bool trace_clock_in_ns(struct trace_array *tr)
1546 {
1547 	if (trace_clocks[tr->clock_id].in_ns)
1548 		return true;
1549 
1550 	return false;
1551 }
1552 
1553 /*
1554  * trace_parser_get_init - gets the buffer for trace parser
1555  */
1556 int trace_parser_get_init(struct trace_parser *parser, int size)
1557 {
1558 	memset(parser, 0, sizeof(*parser));
1559 
1560 	parser->buffer = kmalloc(size, GFP_KERNEL);
1561 	if (!parser->buffer)
1562 		return 1;
1563 
1564 	parser->size = size;
1565 	return 0;
1566 }
1567 
1568 /*
1569  * trace_parser_put - frees the buffer for trace parser
1570  */
1571 void trace_parser_put(struct trace_parser *parser)
1572 {
1573 	kfree(parser->buffer);
1574 	parser->buffer = NULL;
1575 }
1576 
1577 /*
1578  * trace_get_user - reads the user input string separated by  space
1579  * (matched by isspace(ch))
1580  *
1581  * For each string found the 'struct trace_parser' is updated,
1582  * and the function returns.
1583  *
1584  * Returns number of bytes read.
1585  *
1586  * See kernel/trace/trace.h for 'struct trace_parser' details.
1587  */
1588 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1589 	size_t cnt, loff_t *ppos)
1590 {
1591 	char ch;
1592 	size_t read = 0;
1593 	ssize_t ret;
1594 
1595 	if (!*ppos)
1596 		trace_parser_clear(parser);
1597 
1598 	ret = get_user(ch, ubuf++);
1599 	if (ret)
1600 		goto out;
1601 
1602 	read++;
1603 	cnt--;
1604 
1605 	/*
1606 	 * The parser is not finished with the last write,
1607 	 * continue reading the user input without skipping spaces.
1608 	 */
1609 	if (!parser->cont) {
1610 		/* skip white space */
1611 		while (cnt && isspace(ch)) {
1612 			ret = get_user(ch, ubuf++);
1613 			if (ret)
1614 				goto out;
1615 			read++;
1616 			cnt--;
1617 		}
1618 
1619 		parser->idx = 0;
1620 
1621 		/* only spaces were written */
1622 		if (isspace(ch) || !ch) {
1623 			*ppos += read;
1624 			ret = read;
1625 			goto out;
1626 		}
1627 	}
1628 
1629 	/* read the non-space input */
1630 	while (cnt && !isspace(ch) && ch) {
1631 		if (parser->idx < parser->size - 1)
1632 			parser->buffer[parser->idx++] = ch;
1633 		else {
1634 			ret = -EINVAL;
1635 			goto out;
1636 		}
1637 		ret = get_user(ch, ubuf++);
1638 		if (ret)
1639 			goto out;
1640 		read++;
1641 		cnt--;
1642 	}
1643 
1644 	/* We either got finished input or we have to wait for another call. */
1645 	if (isspace(ch) || !ch) {
1646 		parser->buffer[parser->idx] = 0;
1647 		parser->cont = false;
1648 	} else if (parser->idx < parser->size - 1) {
1649 		parser->cont = true;
1650 		parser->buffer[parser->idx++] = ch;
1651 		/* Make sure the parsed string always terminates with '\0'. */
1652 		parser->buffer[parser->idx] = 0;
1653 	} else {
1654 		ret = -EINVAL;
1655 		goto out;
1656 	}
1657 
1658 	*ppos += read;
1659 	ret = read;
1660 
1661 out:
1662 	return ret;
1663 }
1664 
1665 /* TODO add a seq_buf_to_buffer() */
1666 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1667 {
1668 	int len;
1669 
1670 	if (trace_seq_used(s) <= s->seq.readpos)
1671 		return -EBUSY;
1672 
1673 	len = trace_seq_used(s) - s->seq.readpos;
1674 	if (cnt > len)
1675 		cnt = len;
1676 	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1677 
1678 	s->seq.readpos += cnt;
1679 	return cnt;
1680 }
1681 
1682 unsigned long __read_mostly	tracing_thresh;
1683 static const struct file_operations tracing_max_lat_fops;
1684 
1685 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1686 	defined(CONFIG_FSNOTIFY)
1687 
1688 static struct workqueue_struct *fsnotify_wq;
1689 
1690 static void latency_fsnotify_workfn(struct work_struct *work)
1691 {
1692 	struct trace_array *tr = container_of(work, struct trace_array,
1693 					      fsnotify_work);
1694 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1695 }
1696 
1697 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1698 {
1699 	struct trace_array *tr = container_of(iwork, struct trace_array,
1700 					      fsnotify_irqwork);
1701 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1702 }
1703 
1704 static void trace_create_maxlat_file(struct trace_array *tr,
1705 				     struct dentry *d_tracer)
1706 {
1707 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1708 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1709 	tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1710 					      d_tracer, &tr->max_latency,
1711 					      &tracing_max_lat_fops);
1712 }
1713 
1714 __init static int latency_fsnotify_init(void)
1715 {
1716 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1717 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1718 	if (!fsnotify_wq) {
1719 		pr_err("Unable to allocate tr_max_lat_wq\n");
1720 		return -ENOMEM;
1721 	}
1722 	return 0;
1723 }
1724 
1725 late_initcall_sync(latency_fsnotify_init);
1726 
1727 void latency_fsnotify(struct trace_array *tr)
1728 {
1729 	if (!fsnotify_wq)
1730 		return;
1731 	/*
1732 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1733 	 * possible that we are called from __schedule() or do_idle(), which
1734 	 * could cause a deadlock.
1735 	 */
1736 	irq_work_queue(&tr->fsnotify_irqwork);
1737 }
1738 
1739 /*
1740  * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1741  *  defined(CONFIG_FSNOTIFY)
1742  */
1743 #else
1744 
1745 #define trace_create_maxlat_file(tr, d_tracer)				\
1746 	trace_create_file("tracing_max_latency", 0644, d_tracer,	\
1747 			  &tr->max_latency, &tracing_max_lat_fops)
1748 
1749 #endif
1750 
1751 #ifdef CONFIG_TRACER_MAX_TRACE
1752 /*
1753  * Copy the new maximum trace into the separate maximum-trace
1754  * structure. (this way the maximum trace is permanently saved,
1755  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1756  */
1757 static void
1758 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1759 {
1760 	struct array_buffer *trace_buf = &tr->array_buffer;
1761 	struct array_buffer *max_buf = &tr->max_buffer;
1762 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1763 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1764 
1765 	max_buf->cpu = cpu;
1766 	max_buf->time_start = data->preempt_timestamp;
1767 
1768 	max_data->saved_latency = tr->max_latency;
1769 	max_data->critical_start = data->critical_start;
1770 	max_data->critical_end = data->critical_end;
1771 
1772 	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1773 	max_data->pid = tsk->pid;
1774 	/*
1775 	 * If tsk == current, then use current_uid(), as that does not use
1776 	 * RCU. The irq tracer can be called out of RCU scope.
1777 	 */
1778 	if (tsk == current)
1779 		max_data->uid = current_uid();
1780 	else
1781 		max_data->uid = task_uid(tsk);
1782 
1783 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1784 	max_data->policy = tsk->policy;
1785 	max_data->rt_priority = tsk->rt_priority;
1786 
1787 	/* record this tasks comm */
1788 	tracing_record_cmdline(tsk);
1789 	latency_fsnotify(tr);
1790 }
1791 
1792 /**
1793  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1794  * @tr: tracer
1795  * @tsk: the task with the latency
1796  * @cpu: The cpu that initiated the trace.
1797  * @cond_data: User data associated with a conditional snapshot
1798  *
1799  * Flip the buffers between the @tr and the max_tr and record information
1800  * about which task was the cause of this latency.
1801  */
1802 void
1803 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1804 	      void *cond_data)
1805 {
1806 	if (tr->stop_count)
1807 		return;
1808 
1809 	WARN_ON_ONCE(!irqs_disabled());
1810 
1811 	if (!tr->allocated_snapshot) {
1812 		/* Only the nop tracer should hit this when disabling */
1813 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1814 		return;
1815 	}
1816 
1817 	arch_spin_lock(&tr->max_lock);
1818 
1819 	/* Inherit the recordable setting from array_buffer */
1820 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1821 		ring_buffer_record_on(tr->max_buffer.buffer);
1822 	else
1823 		ring_buffer_record_off(tr->max_buffer.buffer);
1824 
1825 #ifdef CONFIG_TRACER_SNAPSHOT
1826 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1827 		goto out_unlock;
1828 #endif
1829 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1830 
1831 	__update_max_tr(tr, tsk, cpu);
1832 
1833  out_unlock:
1834 	arch_spin_unlock(&tr->max_lock);
1835 }
1836 
1837 /**
1838  * update_max_tr_single - only copy one trace over, and reset the rest
1839  * @tr: tracer
1840  * @tsk: task with the latency
1841  * @cpu: the cpu of the buffer to copy.
1842  *
1843  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1844  */
1845 void
1846 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1847 {
1848 	int ret;
1849 
1850 	if (tr->stop_count)
1851 		return;
1852 
1853 	WARN_ON_ONCE(!irqs_disabled());
1854 	if (!tr->allocated_snapshot) {
1855 		/* Only the nop tracer should hit this when disabling */
1856 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1857 		return;
1858 	}
1859 
1860 	arch_spin_lock(&tr->max_lock);
1861 
1862 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1863 
1864 	if (ret == -EBUSY) {
1865 		/*
1866 		 * We failed to swap the buffer due to a commit taking
1867 		 * place on this CPU. We fail to record, but we reset
1868 		 * the max trace buffer (no one writes directly to it)
1869 		 * and flag that it failed.
1870 		 */
1871 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1872 			"Failed to swap buffers due to commit in progress\n");
1873 	}
1874 
1875 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1876 
1877 	__update_max_tr(tr, tsk, cpu);
1878 	arch_spin_unlock(&tr->max_lock);
1879 }
1880 #endif /* CONFIG_TRACER_MAX_TRACE */
1881 
1882 static int wait_on_pipe(struct trace_iterator *iter, int full)
1883 {
1884 	/* Iterators are static, they should be filled or empty */
1885 	if (trace_buffer_iter(iter, iter->cpu_file))
1886 		return 0;
1887 
1888 	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1889 				full);
1890 }
1891 
1892 #ifdef CONFIG_FTRACE_STARTUP_TEST
1893 static bool selftests_can_run;
1894 
1895 struct trace_selftests {
1896 	struct list_head		list;
1897 	struct tracer			*type;
1898 };
1899 
1900 static LIST_HEAD(postponed_selftests);
1901 
1902 static int save_selftest(struct tracer *type)
1903 {
1904 	struct trace_selftests *selftest;
1905 
1906 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1907 	if (!selftest)
1908 		return -ENOMEM;
1909 
1910 	selftest->type = type;
1911 	list_add(&selftest->list, &postponed_selftests);
1912 	return 0;
1913 }
1914 
1915 static int run_tracer_selftest(struct tracer *type)
1916 {
1917 	struct trace_array *tr = &global_trace;
1918 	struct tracer *saved_tracer = tr->current_trace;
1919 	int ret;
1920 
1921 	if (!type->selftest || tracing_selftest_disabled)
1922 		return 0;
1923 
1924 	/*
1925 	 * If a tracer registers early in boot up (before scheduling is
1926 	 * initialized and such), then do not run its selftests yet.
1927 	 * Instead, run it a little later in the boot process.
1928 	 */
1929 	if (!selftests_can_run)
1930 		return save_selftest(type);
1931 
1932 	if (!tracing_is_on()) {
1933 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1934 			type->name);
1935 		return 0;
1936 	}
1937 
1938 	/*
1939 	 * Run a selftest on this tracer.
1940 	 * Here we reset the trace buffer, and set the current
1941 	 * tracer to be this tracer. The tracer can then run some
1942 	 * internal tracing to verify that everything is in order.
1943 	 * If we fail, we do not register this tracer.
1944 	 */
1945 	tracing_reset_online_cpus(&tr->array_buffer);
1946 
1947 	tr->current_trace = type;
1948 
1949 #ifdef CONFIG_TRACER_MAX_TRACE
1950 	if (type->use_max_tr) {
1951 		/* If we expanded the buffers, make sure the max is expanded too */
1952 		if (ring_buffer_expanded)
1953 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1954 					   RING_BUFFER_ALL_CPUS);
1955 		tr->allocated_snapshot = true;
1956 	}
1957 #endif
1958 
1959 	/* the test is responsible for initializing and enabling */
1960 	pr_info("Testing tracer %s: ", type->name);
1961 	ret = type->selftest(type, tr);
1962 	/* the test is responsible for resetting too */
1963 	tr->current_trace = saved_tracer;
1964 	if (ret) {
1965 		printk(KERN_CONT "FAILED!\n");
1966 		/* Add the warning after printing 'FAILED' */
1967 		WARN_ON(1);
1968 		return -1;
1969 	}
1970 	/* Only reset on passing, to avoid touching corrupted buffers */
1971 	tracing_reset_online_cpus(&tr->array_buffer);
1972 
1973 #ifdef CONFIG_TRACER_MAX_TRACE
1974 	if (type->use_max_tr) {
1975 		tr->allocated_snapshot = false;
1976 
1977 		/* Shrink the max buffer again */
1978 		if (ring_buffer_expanded)
1979 			ring_buffer_resize(tr->max_buffer.buffer, 1,
1980 					   RING_BUFFER_ALL_CPUS);
1981 	}
1982 #endif
1983 
1984 	printk(KERN_CONT "PASSED\n");
1985 	return 0;
1986 }
1987 
1988 static __init int init_trace_selftests(void)
1989 {
1990 	struct trace_selftests *p, *n;
1991 	struct tracer *t, **last;
1992 	int ret;
1993 
1994 	selftests_can_run = true;
1995 
1996 	mutex_lock(&trace_types_lock);
1997 
1998 	if (list_empty(&postponed_selftests))
1999 		goto out;
2000 
2001 	pr_info("Running postponed tracer tests:\n");
2002 
2003 	tracing_selftest_running = true;
2004 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2005 		/* This loop can take minutes when sanitizers are enabled, so
2006 		 * lets make sure we allow RCU processing.
2007 		 */
2008 		cond_resched();
2009 		ret = run_tracer_selftest(p->type);
2010 		/* If the test fails, then warn and remove from available_tracers */
2011 		if (ret < 0) {
2012 			WARN(1, "tracer: %s failed selftest, disabling\n",
2013 			     p->type->name);
2014 			last = &trace_types;
2015 			for (t = trace_types; t; t = t->next) {
2016 				if (t == p->type) {
2017 					*last = t->next;
2018 					break;
2019 				}
2020 				last = &t->next;
2021 			}
2022 		}
2023 		list_del(&p->list);
2024 		kfree(p);
2025 	}
2026 	tracing_selftest_running = false;
2027 
2028  out:
2029 	mutex_unlock(&trace_types_lock);
2030 
2031 	return 0;
2032 }
2033 core_initcall(init_trace_selftests);
2034 #else
2035 static inline int run_tracer_selftest(struct tracer *type)
2036 {
2037 	return 0;
2038 }
2039 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2040 
2041 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2042 
2043 static void __init apply_trace_boot_options(void);
2044 
2045 /**
2046  * register_tracer - register a tracer with the ftrace system.
2047  * @type: the plugin for the tracer
2048  *
2049  * Register a new plugin tracer.
2050  */
2051 int __init register_tracer(struct tracer *type)
2052 {
2053 	struct tracer *t;
2054 	int ret = 0;
2055 
2056 	if (!type->name) {
2057 		pr_info("Tracer must have a name\n");
2058 		return -1;
2059 	}
2060 
2061 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2062 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2063 		return -1;
2064 	}
2065 
2066 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2067 		pr_warn("Can not register tracer %s due to lockdown\n",
2068 			   type->name);
2069 		return -EPERM;
2070 	}
2071 
2072 	mutex_lock(&trace_types_lock);
2073 
2074 	tracing_selftest_running = true;
2075 
2076 	for (t = trace_types; t; t = t->next) {
2077 		if (strcmp(type->name, t->name) == 0) {
2078 			/* already found */
2079 			pr_info("Tracer %s already registered\n",
2080 				type->name);
2081 			ret = -1;
2082 			goto out;
2083 		}
2084 	}
2085 
2086 	if (!type->set_flag)
2087 		type->set_flag = &dummy_set_flag;
2088 	if (!type->flags) {
2089 		/*allocate a dummy tracer_flags*/
2090 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2091 		if (!type->flags) {
2092 			ret = -ENOMEM;
2093 			goto out;
2094 		}
2095 		type->flags->val = 0;
2096 		type->flags->opts = dummy_tracer_opt;
2097 	} else
2098 		if (!type->flags->opts)
2099 			type->flags->opts = dummy_tracer_opt;
2100 
2101 	/* store the tracer for __set_tracer_option */
2102 	type->flags->trace = type;
2103 
2104 	ret = run_tracer_selftest(type);
2105 	if (ret < 0)
2106 		goto out;
2107 
2108 	type->next = trace_types;
2109 	trace_types = type;
2110 	add_tracer_options(&global_trace, type);
2111 
2112  out:
2113 	tracing_selftest_running = false;
2114 	mutex_unlock(&trace_types_lock);
2115 
2116 	if (ret || !default_bootup_tracer)
2117 		goto out_unlock;
2118 
2119 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2120 		goto out_unlock;
2121 
2122 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2123 	/* Do we want this tracer to start on bootup? */
2124 	tracing_set_tracer(&global_trace, type->name);
2125 	default_bootup_tracer = NULL;
2126 
2127 	apply_trace_boot_options();
2128 
2129 	/* disable other selftests, since this will break it. */
2130 	disable_tracing_selftest("running a tracer");
2131 
2132  out_unlock:
2133 	return ret;
2134 }
2135 
2136 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2137 {
2138 	struct trace_buffer *buffer = buf->buffer;
2139 
2140 	if (!buffer)
2141 		return;
2142 
2143 	ring_buffer_record_disable(buffer);
2144 
2145 	/* Make sure all commits have finished */
2146 	synchronize_rcu();
2147 	ring_buffer_reset_cpu(buffer, cpu);
2148 
2149 	ring_buffer_record_enable(buffer);
2150 }
2151 
2152 void tracing_reset_online_cpus(struct array_buffer *buf)
2153 {
2154 	struct trace_buffer *buffer = buf->buffer;
2155 
2156 	if (!buffer)
2157 		return;
2158 
2159 	ring_buffer_record_disable(buffer);
2160 
2161 	/* Make sure all commits have finished */
2162 	synchronize_rcu();
2163 
2164 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2165 
2166 	ring_buffer_reset_online_cpus(buffer);
2167 
2168 	ring_buffer_record_enable(buffer);
2169 }
2170 
2171 /* Must have trace_types_lock held */
2172 void tracing_reset_all_online_cpus(void)
2173 {
2174 	struct trace_array *tr;
2175 
2176 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2177 		if (!tr->clear_trace)
2178 			continue;
2179 		tr->clear_trace = false;
2180 		tracing_reset_online_cpus(&tr->array_buffer);
2181 #ifdef CONFIG_TRACER_MAX_TRACE
2182 		tracing_reset_online_cpus(&tr->max_buffer);
2183 #endif
2184 	}
2185 }
2186 
2187 static int *tgid_map;
2188 
2189 #define SAVED_CMDLINES_DEFAULT 128
2190 #define NO_CMDLINE_MAP UINT_MAX
2191 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2192 struct saved_cmdlines_buffer {
2193 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2194 	unsigned *map_cmdline_to_pid;
2195 	unsigned cmdline_num;
2196 	int cmdline_idx;
2197 	char *saved_cmdlines;
2198 };
2199 static struct saved_cmdlines_buffer *savedcmd;
2200 
2201 /* temporary disable recording */
2202 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2203 
2204 static inline char *get_saved_cmdlines(int idx)
2205 {
2206 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2207 }
2208 
2209 static inline void set_cmdline(int idx, const char *cmdline)
2210 {
2211 	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2212 }
2213 
2214 static int allocate_cmdlines_buffer(unsigned int val,
2215 				    struct saved_cmdlines_buffer *s)
2216 {
2217 	s->map_cmdline_to_pid = kmalloc_array(val,
2218 					      sizeof(*s->map_cmdline_to_pid),
2219 					      GFP_KERNEL);
2220 	if (!s->map_cmdline_to_pid)
2221 		return -ENOMEM;
2222 
2223 	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2224 	if (!s->saved_cmdlines) {
2225 		kfree(s->map_cmdline_to_pid);
2226 		return -ENOMEM;
2227 	}
2228 
2229 	s->cmdline_idx = 0;
2230 	s->cmdline_num = val;
2231 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2232 	       sizeof(s->map_pid_to_cmdline));
2233 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2234 	       val * sizeof(*s->map_cmdline_to_pid));
2235 
2236 	return 0;
2237 }
2238 
2239 static int trace_create_savedcmd(void)
2240 {
2241 	int ret;
2242 
2243 	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2244 	if (!savedcmd)
2245 		return -ENOMEM;
2246 
2247 	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2248 	if (ret < 0) {
2249 		kfree(savedcmd);
2250 		savedcmd = NULL;
2251 		return -ENOMEM;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 int is_tracing_stopped(void)
2258 {
2259 	return global_trace.stop_count;
2260 }
2261 
2262 /**
2263  * tracing_start - quick start of the tracer
2264  *
2265  * If tracing is enabled but was stopped by tracing_stop,
2266  * this will start the tracer back up.
2267  */
2268 void tracing_start(void)
2269 {
2270 	struct trace_buffer *buffer;
2271 	unsigned long flags;
2272 
2273 	if (tracing_disabled)
2274 		return;
2275 
2276 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2277 	if (--global_trace.stop_count) {
2278 		if (global_trace.stop_count < 0) {
2279 			/* Someone screwed up their debugging */
2280 			WARN_ON_ONCE(1);
2281 			global_trace.stop_count = 0;
2282 		}
2283 		goto out;
2284 	}
2285 
2286 	/* Prevent the buffers from switching */
2287 	arch_spin_lock(&global_trace.max_lock);
2288 
2289 	buffer = global_trace.array_buffer.buffer;
2290 	if (buffer)
2291 		ring_buffer_record_enable(buffer);
2292 
2293 #ifdef CONFIG_TRACER_MAX_TRACE
2294 	buffer = global_trace.max_buffer.buffer;
2295 	if (buffer)
2296 		ring_buffer_record_enable(buffer);
2297 #endif
2298 
2299 	arch_spin_unlock(&global_trace.max_lock);
2300 
2301  out:
2302 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2303 }
2304 
2305 static void tracing_start_tr(struct trace_array *tr)
2306 {
2307 	struct trace_buffer *buffer;
2308 	unsigned long flags;
2309 
2310 	if (tracing_disabled)
2311 		return;
2312 
2313 	/* If global, we need to also start the max tracer */
2314 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2315 		return tracing_start();
2316 
2317 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2318 
2319 	if (--tr->stop_count) {
2320 		if (tr->stop_count < 0) {
2321 			/* Someone screwed up their debugging */
2322 			WARN_ON_ONCE(1);
2323 			tr->stop_count = 0;
2324 		}
2325 		goto out;
2326 	}
2327 
2328 	buffer = tr->array_buffer.buffer;
2329 	if (buffer)
2330 		ring_buffer_record_enable(buffer);
2331 
2332  out:
2333 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2334 }
2335 
2336 /**
2337  * tracing_stop - quick stop of the tracer
2338  *
2339  * Light weight way to stop tracing. Use in conjunction with
2340  * tracing_start.
2341  */
2342 void tracing_stop(void)
2343 {
2344 	struct trace_buffer *buffer;
2345 	unsigned long flags;
2346 
2347 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2348 	if (global_trace.stop_count++)
2349 		goto out;
2350 
2351 	/* Prevent the buffers from switching */
2352 	arch_spin_lock(&global_trace.max_lock);
2353 
2354 	buffer = global_trace.array_buffer.buffer;
2355 	if (buffer)
2356 		ring_buffer_record_disable(buffer);
2357 
2358 #ifdef CONFIG_TRACER_MAX_TRACE
2359 	buffer = global_trace.max_buffer.buffer;
2360 	if (buffer)
2361 		ring_buffer_record_disable(buffer);
2362 #endif
2363 
2364 	arch_spin_unlock(&global_trace.max_lock);
2365 
2366  out:
2367 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2368 }
2369 
2370 static void tracing_stop_tr(struct trace_array *tr)
2371 {
2372 	struct trace_buffer *buffer;
2373 	unsigned long flags;
2374 
2375 	/* If global, we need to also stop the max tracer */
2376 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2377 		return tracing_stop();
2378 
2379 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2380 	if (tr->stop_count++)
2381 		goto out;
2382 
2383 	buffer = tr->array_buffer.buffer;
2384 	if (buffer)
2385 		ring_buffer_record_disable(buffer);
2386 
2387  out:
2388 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2389 }
2390 
2391 static int trace_save_cmdline(struct task_struct *tsk)
2392 {
2393 	unsigned pid, idx;
2394 
2395 	/* treat recording of idle task as a success */
2396 	if (!tsk->pid)
2397 		return 1;
2398 
2399 	if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2400 		return 0;
2401 
2402 	/*
2403 	 * It's not the end of the world if we don't get
2404 	 * the lock, but we also don't want to spin
2405 	 * nor do we want to disable interrupts,
2406 	 * so if we miss here, then better luck next time.
2407 	 */
2408 	if (!arch_spin_trylock(&trace_cmdline_lock))
2409 		return 0;
2410 
2411 	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2412 	if (idx == NO_CMDLINE_MAP) {
2413 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2414 
2415 		/*
2416 		 * Check whether the cmdline buffer at idx has a pid
2417 		 * mapped. We are going to overwrite that entry so we
2418 		 * need to clear the map_pid_to_cmdline. Otherwise we
2419 		 * would read the new comm for the old pid.
2420 		 */
2421 		pid = savedcmd->map_cmdline_to_pid[idx];
2422 		if (pid != NO_CMDLINE_MAP)
2423 			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2424 
2425 		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2426 		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2427 
2428 		savedcmd->cmdline_idx = idx;
2429 	}
2430 
2431 	set_cmdline(idx, tsk->comm);
2432 
2433 	arch_spin_unlock(&trace_cmdline_lock);
2434 
2435 	return 1;
2436 }
2437 
2438 static void __trace_find_cmdline(int pid, char comm[])
2439 {
2440 	unsigned map;
2441 
2442 	if (!pid) {
2443 		strcpy(comm, "<idle>");
2444 		return;
2445 	}
2446 
2447 	if (WARN_ON_ONCE(pid < 0)) {
2448 		strcpy(comm, "<XXX>");
2449 		return;
2450 	}
2451 
2452 	if (pid > PID_MAX_DEFAULT) {
2453 		strcpy(comm, "<...>");
2454 		return;
2455 	}
2456 
2457 	map = savedcmd->map_pid_to_cmdline[pid];
2458 	if (map != NO_CMDLINE_MAP)
2459 		strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2460 	else
2461 		strcpy(comm, "<...>");
2462 }
2463 
2464 void trace_find_cmdline(int pid, char comm[])
2465 {
2466 	preempt_disable();
2467 	arch_spin_lock(&trace_cmdline_lock);
2468 
2469 	__trace_find_cmdline(pid, comm);
2470 
2471 	arch_spin_unlock(&trace_cmdline_lock);
2472 	preempt_enable();
2473 }
2474 
2475 int trace_find_tgid(int pid)
2476 {
2477 	if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2478 		return 0;
2479 
2480 	return tgid_map[pid];
2481 }
2482 
2483 static int trace_save_tgid(struct task_struct *tsk)
2484 {
2485 	/* treat recording of idle task as a success */
2486 	if (!tsk->pid)
2487 		return 1;
2488 
2489 	if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2490 		return 0;
2491 
2492 	tgid_map[tsk->pid] = tsk->tgid;
2493 	return 1;
2494 }
2495 
2496 static bool tracing_record_taskinfo_skip(int flags)
2497 {
2498 	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2499 		return true;
2500 	if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2501 		return true;
2502 	if (!__this_cpu_read(trace_taskinfo_save))
2503 		return true;
2504 	return false;
2505 }
2506 
2507 /**
2508  * tracing_record_taskinfo - record the task info of a task
2509  *
2510  * @task:  task to record
2511  * @flags: TRACE_RECORD_CMDLINE for recording comm
2512  *         TRACE_RECORD_TGID for recording tgid
2513  */
2514 void tracing_record_taskinfo(struct task_struct *task, int flags)
2515 {
2516 	bool done;
2517 
2518 	if (tracing_record_taskinfo_skip(flags))
2519 		return;
2520 
2521 	/*
2522 	 * Record as much task information as possible. If some fail, continue
2523 	 * to try to record the others.
2524 	 */
2525 	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2526 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2527 
2528 	/* If recording any information failed, retry again soon. */
2529 	if (!done)
2530 		return;
2531 
2532 	__this_cpu_write(trace_taskinfo_save, false);
2533 }
2534 
2535 /**
2536  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2537  *
2538  * @prev: previous task during sched_switch
2539  * @next: next task during sched_switch
2540  * @flags: TRACE_RECORD_CMDLINE for recording comm
2541  *         TRACE_RECORD_TGID for recording tgid
2542  */
2543 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2544 					  struct task_struct *next, int flags)
2545 {
2546 	bool done;
2547 
2548 	if (tracing_record_taskinfo_skip(flags))
2549 		return;
2550 
2551 	/*
2552 	 * Record as much task information as possible. If some fail, continue
2553 	 * to try to record the others.
2554 	 */
2555 	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2556 	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2557 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2558 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2559 
2560 	/* If recording any information failed, retry again soon. */
2561 	if (!done)
2562 		return;
2563 
2564 	__this_cpu_write(trace_taskinfo_save, false);
2565 }
2566 
2567 /* Helpers to record a specific task information */
2568 void tracing_record_cmdline(struct task_struct *task)
2569 {
2570 	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2571 }
2572 
2573 void tracing_record_tgid(struct task_struct *task)
2574 {
2575 	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2576 }
2577 
2578 /*
2579  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2580  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2581  * simplifies those functions and keeps them in sync.
2582  */
2583 enum print_line_t trace_handle_return(struct trace_seq *s)
2584 {
2585 	return trace_seq_has_overflowed(s) ?
2586 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2587 }
2588 EXPORT_SYMBOL_GPL(trace_handle_return);
2589 
2590 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2591 {
2592 	unsigned int trace_flags = irqs_status;
2593 	unsigned int pc;
2594 
2595 	pc = preempt_count();
2596 
2597 	if (pc & NMI_MASK)
2598 		trace_flags |= TRACE_FLAG_NMI;
2599 	if (pc & HARDIRQ_MASK)
2600 		trace_flags |= TRACE_FLAG_HARDIRQ;
2601 	if (in_serving_softirq())
2602 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2603 
2604 	if (tif_need_resched())
2605 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2606 	if (test_preempt_need_resched())
2607 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2608 	return (trace_flags << 16) | (pc & 0xff);
2609 }
2610 
2611 struct ring_buffer_event *
2612 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2613 			  int type,
2614 			  unsigned long len,
2615 			  unsigned int trace_ctx)
2616 {
2617 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2618 }
2619 
2620 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2621 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2622 static int trace_buffered_event_ref;
2623 
2624 /**
2625  * trace_buffered_event_enable - enable buffering events
2626  *
2627  * When events are being filtered, it is quicker to use a temporary
2628  * buffer to write the event data into if there's a likely chance
2629  * that it will not be committed. The discard of the ring buffer
2630  * is not as fast as committing, and is much slower than copying
2631  * a commit.
2632  *
2633  * When an event is to be filtered, allocate per cpu buffers to
2634  * write the event data into, and if the event is filtered and discarded
2635  * it is simply dropped, otherwise, the entire data is to be committed
2636  * in one shot.
2637  */
2638 void trace_buffered_event_enable(void)
2639 {
2640 	struct ring_buffer_event *event;
2641 	struct page *page;
2642 	int cpu;
2643 
2644 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2645 
2646 	if (trace_buffered_event_ref++)
2647 		return;
2648 
2649 	for_each_tracing_cpu(cpu) {
2650 		page = alloc_pages_node(cpu_to_node(cpu),
2651 					GFP_KERNEL | __GFP_NORETRY, 0);
2652 		if (!page)
2653 			goto failed;
2654 
2655 		event = page_address(page);
2656 		memset(event, 0, sizeof(*event));
2657 
2658 		per_cpu(trace_buffered_event, cpu) = event;
2659 
2660 		preempt_disable();
2661 		if (cpu == smp_processor_id() &&
2662 		    __this_cpu_read(trace_buffered_event) !=
2663 		    per_cpu(trace_buffered_event, cpu))
2664 			WARN_ON_ONCE(1);
2665 		preempt_enable();
2666 	}
2667 
2668 	return;
2669  failed:
2670 	trace_buffered_event_disable();
2671 }
2672 
2673 static void enable_trace_buffered_event(void *data)
2674 {
2675 	/* Probably not needed, but do it anyway */
2676 	smp_rmb();
2677 	this_cpu_dec(trace_buffered_event_cnt);
2678 }
2679 
2680 static void disable_trace_buffered_event(void *data)
2681 {
2682 	this_cpu_inc(trace_buffered_event_cnt);
2683 }
2684 
2685 /**
2686  * trace_buffered_event_disable - disable buffering events
2687  *
2688  * When a filter is removed, it is faster to not use the buffered
2689  * events, and to commit directly into the ring buffer. Free up
2690  * the temp buffers when there are no more users. This requires
2691  * special synchronization with current events.
2692  */
2693 void trace_buffered_event_disable(void)
2694 {
2695 	int cpu;
2696 
2697 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2698 
2699 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2700 		return;
2701 
2702 	if (--trace_buffered_event_ref)
2703 		return;
2704 
2705 	preempt_disable();
2706 	/* For each CPU, set the buffer as used. */
2707 	smp_call_function_many(tracing_buffer_mask,
2708 			       disable_trace_buffered_event, NULL, 1);
2709 	preempt_enable();
2710 
2711 	/* Wait for all current users to finish */
2712 	synchronize_rcu();
2713 
2714 	for_each_tracing_cpu(cpu) {
2715 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2716 		per_cpu(trace_buffered_event, cpu) = NULL;
2717 	}
2718 	/*
2719 	 * Make sure trace_buffered_event is NULL before clearing
2720 	 * trace_buffered_event_cnt.
2721 	 */
2722 	smp_wmb();
2723 
2724 	preempt_disable();
2725 	/* Do the work on each cpu */
2726 	smp_call_function_many(tracing_buffer_mask,
2727 			       enable_trace_buffered_event, NULL, 1);
2728 	preempt_enable();
2729 }
2730 
2731 static struct trace_buffer *temp_buffer;
2732 
2733 struct ring_buffer_event *
2734 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2735 			  struct trace_event_file *trace_file,
2736 			  int type, unsigned long len,
2737 			  unsigned int trace_ctx)
2738 {
2739 	struct ring_buffer_event *entry;
2740 	struct trace_array *tr = trace_file->tr;
2741 	int val;
2742 
2743 	*current_rb = tr->array_buffer.buffer;
2744 
2745 	if (!tr->no_filter_buffering_ref &&
2746 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2747 	    (entry = this_cpu_read(trace_buffered_event))) {
2748 		/* Try to use the per cpu buffer first */
2749 		val = this_cpu_inc_return(trace_buffered_event_cnt);
2750 		if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
2751 			trace_event_setup(entry, type, trace_ctx);
2752 			entry->array[0] = len;
2753 			return entry;
2754 		}
2755 		this_cpu_dec(trace_buffered_event_cnt);
2756 	}
2757 
2758 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2759 					    trace_ctx);
2760 	/*
2761 	 * If tracing is off, but we have triggers enabled
2762 	 * we still need to look at the event data. Use the temp_buffer
2763 	 * to store the trace event for the trigger to use. It's recursive
2764 	 * safe and will not be recorded anywhere.
2765 	 */
2766 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2767 		*current_rb = temp_buffer;
2768 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2769 						    trace_ctx);
2770 	}
2771 	return entry;
2772 }
2773 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2774 
2775 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2776 static DEFINE_MUTEX(tracepoint_printk_mutex);
2777 
2778 static void output_printk(struct trace_event_buffer *fbuffer)
2779 {
2780 	struct trace_event_call *event_call;
2781 	struct trace_event_file *file;
2782 	struct trace_event *event;
2783 	unsigned long flags;
2784 	struct trace_iterator *iter = tracepoint_print_iter;
2785 
2786 	/* We should never get here if iter is NULL */
2787 	if (WARN_ON_ONCE(!iter))
2788 		return;
2789 
2790 	event_call = fbuffer->trace_file->event_call;
2791 	if (!event_call || !event_call->event.funcs ||
2792 	    !event_call->event.funcs->trace)
2793 		return;
2794 
2795 	file = fbuffer->trace_file;
2796 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2797 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2798 	     !filter_match_preds(file->filter, fbuffer->entry)))
2799 		return;
2800 
2801 	event = &fbuffer->trace_file->event_call->event;
2802 
2803 	spin_lock_irqsave(&tracepoint_iter_lock, flags);
2804 	trace_seq_init(&iter->seq);
2805 	iter->ent = fbuffer->entry;
2806 	event_call->event.funcs->trace(iter, 0, event);
2807 	trace_seq_putc(&iter->seq, 0);
2808 	printk("%s", iter->seq.buffer);
2809 
2810 	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2811 }
2812 
2813 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2814 			     void *buffer, size_t *lenp,
2815 			     loff_t *ppos)
2816 {
2817 	int save_tracepoint_printk;
2818 	int ret;
2819 
2820 	mutex_lock(&tracepoint_printk_mutex);
2821 	save_tracepoint_printk = tracepoint_printk;
2822 
2823 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2824 
2825 	/*
2826 	 * This will force exiting early, as tracepoint_printk
2827 	 * is always zero when tracepoint_printk_iter is not allocated
2828 	 */
2829 	if (!tracepoint_print_iter)
2830 		tracepoint_printk = 0;
2831 
2832 	if (save_tracepoint_printk == tracepoint_printk)
2833 		goto out;
2834 
2835 	if (tracepoint_printk)
2836 		static_key_enable(&tracepoint_printk_key.key);
2837 	else
2838 		static_key_disable(&tracepoint_printk_key.key);
2839 
2840  out:
2841 	mutex_unlock(&tracepoint_printk_mutex);
2842 
2843 	return ret;
2844 }
2845 
2846 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2847 {
2848 	if (static_key_false(&tracepoint_printk_key.key))
2849 		output_printk(fbuffer);
2850 
2851 	if (static_branch_unlikely(&trace_event_exports_enabled))
2852 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2853 	event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2854 				    fbuffer->event, fbuffer->entry,
2855 				    fbuffer->trace_ctx, fbuffer->regs);
2856 }
2857 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2858 
2859 /*
2860  * Skip 3:
2861  *
2862  *   trace_buffer_unlock_commit_regs()
2863  *   trace_event_buffer_commit()
2864  *   trace_event_raw_event_xxx()
2865  */
2866 # define STACK_SKIP 3
2867 
2868 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2869 				     struct trace_buffer *buffer,
2870 				     struct ring_buffer_event *event,
2871 				     unsigned int trace_ctx,
2872 				     struct pt_regs *regs)
2873 {
2874 	__buffer_unlock_commit(buffer, event);
2875 
2876 	/*
2877 	 * If regs is not set, then skip the necessary functions.
2878 	 * Note, we can still get here via blktrace, wakeup tracer
2879 	 * and mmiotrace, but that's ok if they lose a function or
2880 	 * two. They are not that meaningful.
2881 	 */
2882 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2883 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2884 }
2885 
2886 /*
2887  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2888  */
2889 void
2890 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2891 				   struct ring_buffer_event *event)
2892 {
2893 	__buffer_unlock_commit(buffer, event);
2894 }
2895 
2896 void
2897 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2898 	       parent_ip, unsigned int trace_ctx)
2899 {
2900 	struct trace_event_call *call = &event_function;
2901 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2902 	struct ring_buffer_event *event;
2903 	struct ftrace_entry *entry;
2904 
2905 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2906 					    trace_ctx);
2907 	if (!event)
2908 		return;
2909 	entry	= ring_buffer_event_data(event);
2910 	entry->ip			= ip;
2911 	entry->parent_ip		= parent_ip;
2912 
2913 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2914 		if (static_branch_unlikely(&trace_function_exports_enabled))
2915 			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2916 		__buffer_unlock_commit(buffer, event);
2917 	}
2918 }
2919 
2920 #ifdef CONFIG_STACKTRACE
2921 
2922 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2923 #define FTRACE_KSTACK_NESTING	4
2924 
2925 #define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
2926 
2927 struct ftrace_stack {
2928 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2929 };
2930 
2931 
2932 struct ftrace_stacks {
2933 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2934 };
2935 
2936 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2937 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2938 
2939 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2940 				 unsigned int trace_ctx,
2941 				 int skip, struct pt_regs *regs)
2942 {
2943 	struct trace_event_call *call = &event_kernel_stack;
2944 	struct ring_buffer_event *event;
2945 	unsigned int size, nr_entries;
2946 	struct ftrace_stack *fstack;
2947 	struct stack_entry *entry;
2948 	int stackidx;
2949 
2950 	/*
2951 	 * Add one, for this function and the call to save_stack_trace()
2952 	 * If regs is set, then these functions will not be in the way.
2953 	 */
2954 #ifndef CONFIG_UNWINDER_ORC
2955 	if (!regs)
2956 		skip++;
2957 #endif
2958 
2959 	preempt_disable_notrace();
2960 
2961 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2962 
2963 	/* This should never happen. If it does, yell once and skip */
2964 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2965 		goto out;
2966 
2967 	/*
2968 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2969 	 * interrupt will either see the value pre increment or post
2970 	 * increment. If the interrupt happens pre increment it will have
2971 	 * restored the counter when it returns.  We just need a barrier to
2972 	 * keep gcc from moving things around.
2973 	 */
2974 	barrier();
2975 
2976 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2977 	size = ARRAY_SIZE(fstack->calls);
2978 
2979 	if (regs) {
2980 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2981 						   size, skip);
2982 	} else {
2983 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2984 	}
2985 
2986 	size = nr_entries * sizeof(unsigned long);
2987 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2988 					    sizeof(*entry) + size, trace_ctx);
2989 	if (!event)
2990 		goto out;
2991 	entry = ring_buffer_event_data(event);
2992 
2993 	memcpy(&entry->caller, fstack->calls, size);
2994 	entry->size = nr_entries;
2995 
2996 	if (!call_filter_check_discard(call, entry, buffer, event))
2997 		__buffer_unlock_commit(buffer, event);
2998 
2999  out:
3000 	/* Again, don't let gcc optimize things here */
3001 	barrier();
3002 	__this_cpu_dec(ftrace_stack_reserve);
3003 	preempt_enable_notrace();
3004 
3005 }
3006 
3007 static inline void ftrace_trace_stack(struct trace_array *tr,
3008 				      struct trace_buffer *buffer,
3009 				      unsigned int trace_ctx,
3010 				      int skip, struct pt_regs *regs)
3011 {
3012 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3013 		return;
3014 
3015 	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3016 }
3017 
3018 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3019 		   int skip)
3020 {
3021 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3022 
3023 	if (rcu_is_watching()) {
3024 		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3025 		return;
3026 	}
3027 
3028 	/*
3029 	 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3030 	 * but if the above rcu_is_watching() failed, then the NMI
3031 	 * triggered someplace critical, and rcu_irq_enter() should
3032 	 * not be called from NMI.
3033 	 */
3034 	if (unlikely(in_nmi()))
3035 		return;
3036 
3037 	rcu_irq_enter_irqson();
3038 	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3039 	rcu_irq_exit_irqson();
3040 }
3041 
3042 /**
3043  * trace_dump_stack - record a stack back trace in the trace buffer
3044  * @skip: Number of functions to skip (helper handlers)
3045  */
3046 void trace_dump_stack(int skip)
3047 {
3048 	if (tracing_disabled || tracing_selftest_running)
3049 		return;
3050 
3051 #ifndef CONFIG_UNWINDER_ORC
3052 	/* Skip 1 to skip this function. */
3053 	skip++;
3054 #endif
3055 	__ftrace_trace_stack(global_trace.array_buffer.buffer,
3056 			     tracing_gen_ctx(), skip, NULL);
3057 }
3058 EXPORT_SYMBOL_GPL(trace_dump_stack);
3059 
3060 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3061 static DEFINE_PER_CPU(int, user_stack_count);
3062 
3063 static void
3064 ftrace_trace_userstack(struct trace_array *tr,
3065 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3066 {
3067 	struct trace_event_call *call = &event_user_stack;
3068 	struct ring_buffer_event *event;
3069 	struct userstack_entry *entry;
3070 
3071 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3072 		return;
3073 
3074 	/*
3075 	 * NMIs can not handle page faults, even with fix ups.
3076 	 * The save user stack can (and often does) fault.
3077 	 */
3078 	if (unlikely(in_nmi()))
3079 		return;
3080 
3081 	/*
3082 	 * prevent recursion, since the user stack tracing may
3083 	 * trigger other kernel events.
3084 	 */
3085 	preempt_disable();
3086 	if (__this_cpu_read(user_stack_count))
3087 		goto out;
3088 
3089 	__this_cpu_inc(user_stack_count);
3090 
3091 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3092 					    sizeof(*entry), trace_ctx);
3093 	if (!event)
3094 		goto out_drop_count;
3095 	entry	= ring_buffer_event_data(event);
3096 
3097 	entry->tgid		= current->tgid;
3098 	memset(&entry->caller, 0, sizeof(entry->caller));
3099 
3100 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3101 	if (!call_filter_check_discard(call, entry, buffer, event))
3102 		__buffer_unlock_commit(buffer, event);
3103 
3104  out_drop_count:
3105 	__this_cpu_dec(user_stack_count);
3106  out:
3107 	preempt_enable();
3108 }
3109 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3110 static void ftrace_trace_userstack(struct trace_array *tr,
3111 				   struct trace_buffer *buffer,
3112 				   unsigned int trace_ctx)
3113 {
3114 }
3115 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3116 
3117 #endif /* CONFIG_STACKTRACE */
3118 
3119 /* created for use with alloc_percpu */
3120 struct trace_buffer_struct {
3121 	int nesting;
3122 	char buffer[4][TRACE_BUF_SIZE];
3123 };
3124 
3125 static struct trace_buffer_struct *trace_percpu_buffer;
3126 
3127 /*
3128  * This allows for lockless recording.  If we're nested too deeply, then
3129  * this returns NULL.
3130  */
3131 static char *get_trace_buf(void)
3132 {
3133 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3134 
3135 	if (!buffer || buffer->nesting >= 4)
3136 		return NULL;
3137 
3138 	buffer->nesting++;
3139 
3140 	/* Interrupts must see nesting incremented before we use the buffer */
3141 	barrier();
3142 	return &buffer->buffer[buffer->nesting - 1][0];
3143 }
3144 
3145 static void put_trace_buf(void)
3146 {
3147 	/* Don't let the decrement of nesting leak before this */
3148 	barrier();
3149 	this_cpu_dec(trace_percpu_buffer->nesting);
3150 }
3151 
3152 static int alloc_percpu_trace_buffer(void)
3153 {
3154 	struct trace_buffer_struct *buffers;
3155 
3156 	if (trace_percpu_buffer)
3157 		return 0;
3158 
3159 	buffers = alloc_percpu(struct trace_buffer_struct);
3160 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3161 		return -ENOMEM;
3162 
3163 	trace_percpu_buffer = buffers;
3164 	return 0;
3165 }
3166 
3167 static int buffers_allocated;
3168 
3169 void trace_printk_init_buffers(void)
3170 {
3171 	if (buffers_allocated)
3172 		return;
3173 
3174 	if (alloc_percpu_trace_buffer())
3175 		return;
3176 
3177 	/* trace_printk() is for debug use only. Don't use it in production. */
3178 
3179 	pr_warn("\n");
3180 	pr_warn("**********************************************************\n");
3181 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3182 	pr_warn("**                                                      **\n");
3183 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3184 	pr_warn("**                                                      **\n");
3185 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3186 	pr_warn("** unsafe for production use.                           **\n");
3187 	pr_warn("**                                                      **\n");
3188 	pr_warn("** If you see this message and you are not debugging    **\n");
3189 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3190 	pr_warn("**                                                      **\n");
3191 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3192 	pr_warn("**********************************************************\n");
3193 
3194 	/* Expand the buffers to set size */
3195 	tracing_update_buffers();
3196 
3197 	buffers_allocated = 1;
3198 
3199 	/*
3200 	 * trace_printk_init_buffers() can be called by modules.
3201 	 * If that happens, then we need to start cmdline recording
3202 	 * directly here. If the global_trace.buffer is already
3203 	 * allocated here, then this was called by module code.
3204 	 */
3205 	if (global_trace.array_buffer.buffer)
3206 		tracing_start_cmdline_record();
3207 }
3208 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3209 
3210 void trace_printk_start_comm(void)
3211 {
3212 	/* Start tracing comms if trace printk is set */
3213 	if (!buffers_allocated)
3214 		return;
3215 	tracing_start_cmdline_record();
3216 }
3217 
3218 static void trace_printk_start_stop_comm(int enabled)
3219 {
3220 	if (!buffers_allocated)
3221 		return;
3222 
3223 	if (enabled)
3224 		tracing_start_cmdline_record();
3225 	else
3226 		tracing_stop_cmdline_record();
3227 }
3228 
3229 /**
3230  * trace_vbprintk - write binary msg to tracing buffer
3231  * @ip:    The address of the caller
3232  * @fmt:   The string format to write to the buffer
3233  * @args:  Arguments for @fmt
3234  */
3235 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3236 {
3237 	struct trace_event_call *call = &event_bprint;
3238 	struct ring_buffer_event *event;
3239 	struct trace_buffer *buffer;
3240 	struct trace_array *tr = &global_trace;
3241 	struct bprint_entry *entry;
3242 	unsigned int trace_ctx;
3243 	char *tbuffer;
3244 	int len = 0, size;
3245 
3246 	if (unlikely(tracing_selftest_running || tracing_disabled))
3247 		return 0;
3248 
3249 	/* Don't pollute graph traces with trace_vprintk internals */
3250 	pause_graph_tracing();
3251 
3252 	trace_ctx = tracing_gen_ctx();
3253 	preempt_disable_notrace();
3254 
3255 	tbuffer = get_trace_buf();
3256 	if (!tbuffer) {
3257 		len = 0;
3258 		goto out_nobuffer;
3259 	}
3260 
3261 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3262 
3263 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3264 		goto out_put;
3265 
3266 	size = sizeof(*entry) + sizeof(u32) * len;
3267 	buffer = tr->array_buffer.buffer;
3268 	ring_buffer_nest_start(buffer);
3269 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3270 					    trace_ctx);
3271 	if (!event)
3272 		goto out;
3273 	entry = ring_buffer_event_data(event);
3274 	entry->ip			= ip;
3275 	entry->fmt			= fmt;
3276 
3277 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3278 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3279 		__buffer_unlock_commit(buffer, event);
3280 		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3281 	}
3282 
3283 out:
3284 	ring_buffer_nest_end(buffer);
3285 out_put:
3286 	put_trace_buf();
3287 
3288 out_nobuffer:
3289 	preempt_enable_notrace();
3290 	unpause_graph_tracing();
3291 
3292 	return len;
3293 }
3294 EXPORT_SYMBOL_GPL(trace_vbprintk);
3295 
3296 __printf(3, 0)
3297 static int
3298 __trace_array_vprintk(struct trace_buffer *buffer,
3299 		      unsigned long ip, const char *fmt, va_list args)
3300 {
3301 	struct trace_event_call *call = &event_print;
3302 	struct ring_buffer_event *event;
3303 	int len = 0, size;
3304 	struct print_entry *entry;
3305 	unsigned int trace_ctx;
3306 	char *tbuffer;
3307 
3308 	if (tracing_disabled || tracing_selftest_running)
3309 		return 0;
3310 
3311 	/* Don't pollute graph traces with trace_vprintk internals */
3312 	pause_graph_tracing();
3313 
3314 	trace_ctx = tracing_gen_ctx();
3315 	preempt_disable_notrace();
3316 
3317 
3318 	tbuffer = get_trace_buf();
3319 	if (!tbuffer) {
3320 		len = 0;
3321 		goto out_nobuffer;
3322 	}
3323 
3324 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3325 
3326 	size = sizeof(*entry) + len + 1;
3327 	ring_buffer_nest_start(buffer);
3328 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3329 					    trace_ctx);
3330 	if (!event)
3331 		goto out;
3332 	entry = ring_buffer_event_data(event);
3333 	entry->ip = ip;
3334 
3335 	memcpy(&entry->buf, tbuffer, len + 1);
3336 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3337 		__buffer_unlock_commit(buffer, event);
3338 		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3339 	}
3340 
3341 out:
3342 	ring_buffer_nest_end(buffer);
3343 	put_trace_buf();
3344 
3345 out_nobuffer:
3346 	preempt_enable_notrace();
3347 	unpause_graph_tracing();
3348 
3349 	return len;
3350 }
3351 
3352 __printf(3, 0)
3353 int trace_array_vprintk(struct trace_array *tr,
3354 			unsigned long ip, const char *fmt, va_list args)
3355 {
3356 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3357 }
3358 
3359 /**
3360  * trace_array_printk - Print a message to a specific instance
3361  * @tr: The instance trace_array descriptor
3362  * @ip: The instruction pointer that this is called from.
3363  * @fmt: The format to print (printf format)
3364  *
3365  * If a subsystem sets up its own instance, they have the right to
3366  * printk strings into their tracing instance buffer using this
3367  * function. Note, this function will not write into the top level
3368  * buffer (use trace_printk() for that), as writing into the top level
3369  * buffer should only have events that can be individually disabled.
3370  * trace_printk() is only used for debugging a kernel, and should not
3371  * be ever encorporated in normal use.
3372  *
3373  * trace_array_printk() can be used, as it will not add noise to the
3374  * top level tracing buffer.
3375  *
3376  * Note, trace_array_init_printk() must be called on @tr before this
3377  * can be used.
3378  */
3379 __printf(3, 0)
3380 int trace_array_printk(struct trace_array *tr,
3381 		       unsigned long ip, const char *fmt, ...)
3382 {
3383 	int ret;
3384 	va_list ap;
3385 
3386 	if (!tr)
3387 		return -ENOENT;
3388 
3389 	/* This is only allowed for created instances */
3390 	if (tr == &global_trace)
3391 		return 0;
3392 
3393 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3394 		return 0;
3395 
3396 	va_start(ap, fmt);
3397 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3398 	va_end(ap);
3399 	return ret;
3400 }
3401 EXPORT_SYMBOL_GPL(trace_array_printk);
3402 
3403 /**
3404  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3405  * @tr: The trace array to initialize the buffers for
3406  *
3407  * As trace_array_printk() only writes into instances, they are OK to
3408  * have in the kernel (unlike trace_printk()). This needs to be called
3409  * before trace_array_printk() can be used on a trace_array.
3410  */
3411 int trace_array_init_printk(struct trace_array *tr)
3412 {
3413 	if (!tr)
3414 		return -ENOENT;
3415 
3416 	/* This is only allowed for created instances */
3417 	if (tr == &global_trace)
3418 		return -EINVAL;
3419 
3420 	return alloc_percpu_trace_buffer();
3421 }
3422 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3423 
3424 __printf(3, 4)
3425 int trace_array_printk_buf(struct trace_buffer *buffer,
3426 			   unsigned long ip, const char *fmt, ...)
3427 {
3428 	int ret;
3429 	va_list ap;
3430 
3431 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3432 		return 0;
3433 
3434 	va_start(ap, fmt);
3435 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3436 	va_end(ap);
3437 	return ret;
3438 }
3439 
3440 __printf(2, 0)
3441 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3442 {
3443 	return trace_array_vprintk(&global_trace, ip, fmt, args);
3444 }
3445 EXPORT_SYMBOL_GPL(trace_vprintk);
3446 
3447 static void trace_iterator_increment(struct trace_iterator *iter)
3448 {
3449 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3450 
3451 	iter->idx++;
3452 	if (buf_iter)
3453 		ring_buffer_iter_advance(buf_iter);
3454 }
3455 
3456 static struct trace_entry *
3457 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3458 		unsigned long *lost_events)
3459 {
3460 	struct ring_buffer_event *event;
3461 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3462 
3463 	if (buf_iter) {
3464 		event = ring_buffer_iter_peek(buf_iter, ts);
3465 		if (lost_events)
3466 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3467 				(unsigned long)-1 : 0;
3468 	} else {
3469 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3470 					 lost_events);
3471 	}
3472 
3473 	if (event) {
3474 		iter->ent_size = ring_buffer_event_length(event);
3475 		return ring_buffer_event_data(event);
3476 	}
3477 	iter->ent_size = 0;
3478 	return NULL;
3479 }
3480 
3481 static struct trace_entry *
3482 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3483 		  unsigned long *missing_events, u64 *ent_ts)
3484 {
3485 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3486 	struct trace_entry *ent, *next = NULL;
3487 	unsigned long lost_events = 0, next_lost = 0;
3488 	int cpu_file = iter->cpu_file;
3489 	u64 next_ts = 0, ts;
3490 	int next_cpu = -1;
3491 	int next_size = 0;
3492 	int cpu;
3493 
3494 	/*
3495 	 * If we are in a per_cpu trace file, don't bother by iterating over
3496 	 * all cpu and peek directly.
3497 	 */
3498 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3499 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3500 			return NULL;
3501 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3502 		if (ent_cpu)
3503 			*ent_cpu = cpu_file;
3504 
3505 		return ent;
3506 	}
3507 
3508 	for_each_tracing_cpu(cpu) {
3509 
3510 		if (ring_buffer_empty_cpu(buffer, cpu))
3511 			continue;
3512 
3513 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3514 
3515 		/*
3516 		 * Pick the entry with the smallest timestamp:
3517 		 */
3518 		if (ent && (!next || ts < next_ts)) {
3519 			next = ent;
3520 			next_cpu = cpu;
3521 			next_ts = ts;
3522 			next_lost = lost_events;
3523 			next_size = iter->ent_size;
3524 		}
3525 	}
3526 
3527 	iter->ent_size = next_size;
3528 
3529 	if (ent_cpu)
3530 		*ent_cpu = next_cpu;
3531 
3532 	if (ent_ts)
3533 		*ent_ts = next_ts;
3534 
3535 	if (missing_events)
3536 		*missing_events = next_lost;
3537 
3538 	return next;
3539 }
3540 
3541 #define STATIC_FMT_BUF_SIZE	128
3542 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3543 
3544 static char *trace_iter_expand_format(struct trace_iterator *iter)
3545 {
3546 	char *tmp;
3547 
3548 	if (iter->fmt == static_fmt_buf)
3549 		return NULL;
3550 
3551 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3552 		       GFP_KERNEL);
3553 	if (tmp) {
3554 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3555 		iter->fmt = tmp;
3556 	}
3557 
3558 	return tmp;
3559 }
3560 
3561 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3562 {
3563 	const char *p, *new_fmt;
3564 	char *q;
3565 
3566 	if (WARN_ON_ONCE(!fmt))
3567 		return fmt;
3568 
3569 	if (iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3570 		return fmt;
3571 
3572 	p = fmt;
3573 	new_fmt = q = iter->fmt;
3574 	while (*p) {
3575 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3576 			if (!trace_iter_expand_format(iter))
3577 				return fmt;
3578 
3579 			q += iter->fmt - new_fmt;
3580 			new_fmt = iter->fmt;
3581 		}
3582 
3583 		*q++ = *p++;
3584 
3585 		/* Replace %p with %px */
3586 		if (p[-1] == '%') {
3587 			if (p[0] == '%') {
3588 				*q++ = *p++;
3589 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3590 				*q++ = *p++;
3591 				*q++ = 'x';
3592 			}
3593 		}
3594 	}
3595 	*q = '\0';
3596 
3597 	return new_fmt;
3598 }
3599 
3600 #define STATIC_TEMP_BUF_SIZE	128
3601 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3602 
3603 /* Find the next real entry, without updating the iterator itself */
3604 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3605 					  int *ent_cpu, u64 *ent_ts)
3606 {
3607 	/* __find_next_entry will reset ent_size */
3608 	int ent_size = iter->ent_size;
3609 	struct trace_entry *entry;
3610 
3611 	/*
3612 	 * If called from ftrace_dump(), then the iter->temp buffer
3613 	 * will be the static_temp_buf and not created from kmalloc.
3614 	 * If the entry size is greater than the buffer, we can
3615 	 * not save it. Just return NULL in that case. This is only
3616 	 * used to add markers when two consecutive events' time
3617 	 * stamps have a large delta. See trace_print_lat_context()
3618 	 */
3619 	if (iter->temp == static_temp_buf &&
3620 	    STATIC_TEMP_BUF_SIZE < ent_size)
3621 		return NULL;
3622 
3623 	/*
3624 	 * The __find_next_entry() may call peek_next_entry(), which may
3625 	 * call ring_buffer_peek() that may make the contents of iter->ent
3626 	 * undefined. Need to copy iter->ent now.
3627 	 */
3628 	if (iter->ent && iter->ent != iter->temp) {
3629 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3630 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3631 			void *temp;
3632 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3633 			if (!temp)
3634 				return NULL;
3635 			kfree(iter->temp);
3636 			iter->temp = temp;
3637 			iter->temp_size = iter->ent_size;
3638 		}
3639 		memcpy(iter->temp, iter->ent, iter->ent_size);
3640 		iter->ent = iter->temp;
3641 	}
3642 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3643 	/* Put back the original ent_size */
3644 	iter->ent_size = ent_size;
3645 
3646 	return entry;
3647 }
3648 
3649 /* Find the next real entry, and increment the iterator to the next entry */
3650 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3651 {
3652 	iter->ent = __find_next_entry(iter, &iter->cpu,
3653 				      &iter->lost_events, &iter->ts);
3654 
3655 	if (iter->ent)
3656 		trace_iterator_increment(iter);
3657 
3658 	return iter->ent ? iter : NULL;
3659 }
3660 
3661 static void trace_consume(struct trace_iterator *iter)
3662 {
3663 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3664 			    &iter->lost_events);
3665 }
3666 
3667 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3668 {
3669 	struct trace_iterator *iter = m->private;
3670 	int i = (int)*pos;
3671 	void *ent;
3672 
3673 	WARN_ON_ONCE(iter->leftover);
3674 
3675 	(*pos)++;
3676 
3677 	/* can't go backwards */
3678 	if (iter->idx > i)
3679 		return NULL;
3680 
3681 	if (iter->idx < 0)
3682 		ent = trace_find_next_entry_inc(iter);
3683 	else
3684 		ent = iter;
3685 
3686 	while (ent && iter->idx < i)
3687 		ent = trace_find_next_entry_inc(iter);
3688 
3689 	iter->pos = *pos;
3690 
3691 	return ent;
3692 }
3693 
3694 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3695 {
3696 	struct ring_buffer_iter *buf_iter;
3697 	unsigned long entries = 0;
3698 	u64 ts;
3699 
3700 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3701 
3702 	buf_iter = trace_buffer_iter(iter, cpu);
3703 	if (!buf_iter)
3704 		return;
3705 
3706 	ring_buffer_iter_reset(buf_iter);
3707 
3708 	/*
3709 	 * We could have the case with the max latency tracers
3710 	 * that a reset never took place on a cpu. This is evident
3711 	 * by the timestamp being before the start of the buffer.
3712 	 */
3713 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
3714 		if (ts >= iter->array_buffer->time_start)
3715 			break;
3716 		entries++;
3717 		ring_buffer_iter_advance(buf_iter);
3718 	}
3719 
3720 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3721 }
3722 
3723 /*
3724  * The current tracer is copied to avoid a global locking
3725  * all around.
3726  */
3727 static void *s_start(struct seq_file *m, loff_t *pos)
3728 {
3729 	struct trace_iterator *iter = m->private;
3730 	struct trace_array *tr = iter->tr;
3731 	int cpu_file = iter->cpu_file;
3732 	void *p = NULL;
3733 	loff_t l = 0;
3734 	int cpu;
3735 
3736 	/*
3737 	 * copy the tracer to avoid using a global lock all around.
3738 	 * iter->trace is a copy of current_trace, the pointer to the
3739 	 * name may be used instead of a strcmp(), as iter->trace->name
3740 	 * will point to the same string as current_trace->name.
3741 	 */
3742 	mutex_lock(&trace_types_lock);
3743 	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3744 		*iter->trace = *tr->current_trace;
3745 	mutex_unlock(&trace_types_lock);
3746 
3747 #ifdef CONFIG_TRACER_MAX_TRACE
3748 	if (iter->snapshot && iter->trace->use_max_tr)
3749 		return ERR_PTR(-EBUSY);
3750 #endif
3751 
3752 	if (!iter->snapshot)
3753 		atomic_inc(&trace_record_taskinfo_disabled);
3754 
3755 	if (*pos != iter->pos) {
3756 		iter->ent = NULL;
3757 		iter->cpu = 0;
3758 		iter->idx = -1;
3759 
3760 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3761 			for_each_tracing_cpu(cpu)
3762 				tracing_iter_reset(iter, cpu);
3763 		} else
3764 			tracing_iter_reset(iter, cpu_file);
3765 
3766 		iter->leftover = 0;
3767 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3768 			;
3769 
3770 	} else {
3771 		/*
3772 		 * If we overflowed the seq_file before, then we want
3773 		 * to just reuse the trace_seq buffer again.
3774 		 */
3775 		if (iter->leftover)
3776 			p = iter;
3777 		else {
3778 			l = *pos - 1;
3779 			p = s_next(m, p, &l);
3780 		}
3781 	}
3782 
3783 	trace_event_read_lock();
3784 	trace_access_lock(cpu_file);
3785 	return p;
3786 }
3787 
3788 static void s_stop(struct seq_file *m, void *p)
3789 {
3790 	struct trace_iterator *iter = m->private;
3791 
3792 #ifdef CONFIG_TRACER_MAX_TRACE
3793 	if (iter->snapshot && iter->trace->use_max_tr)
3794 		return;
3795 #endif
3796 
3797 	if (!iter->snapshot)
3798 		atomic_dec(&trace_record_taskinfo_disabled);
3799 
3800 	trace_access_unlock(iter->cpu_file);
3801 	trace_event_read_unlock();
3802 }
3803 
3804 static void
3805 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3806 		      unsigned long *entries, int cpu)
3807 {
3808 	unsigned long count;
3809 
3810 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3811 	/*
3812 	 * If this buffer has skipped entries, then we hold all
3813 	 * entries for the trace and we need to ignore the
3814 	 * ones before the time stamp.
3815 	 */
3816 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3817 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3818 		/* total is the same as the entries */
3819 		*total = count;
3820 	} else
3821 		*total = count +
3822 			ring_buffer_overrun_cpu(buf->buffer, cpu);
3823 	*entries = count;
3824 }
3825 
3826 static void
3827 get_total_entries(struct array_buffer *buf,
3828 		  unsigned long *total, unsigned long *entries)
3829 {
3830 	unsigned long t, e;
3831 	int cpu;
3832 
3833 	*total = 0;
3834 	*entries = 0;
3835 
3836 	for_each_tracing_cpu(cpu) {
3837 		get_total_entries_cpu(buf, &t, &e, cpu);
3838 		*total += t;
3839 		*entries += e;
3840 	}
3841 }
3842 
3843 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3844 {
3845 	unsigned long total, entries;
3846 
3847 	if (!tr)
3848 		tr = &global_trace;
3849 
3850 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3851 
3852 	return entries;
3853 }
3854 
3855 unsigned long trace_total_entries(struct trace_array *tr)
3856 {
3857 	unsigned long total, entries;
3858 
3859 	if (!tr)
3860 		tr = &global_trace;
3861 
3862 	get_total_entries(&tr->array_buffer, &total, &entries);
3863 
3864 	return entries;
3865 }
3866 
3867 static void print_lat_help_header(struct seq_file *m)
3868 {
3869 	seq_puts(m, "#                    _------=> CPU#            \n"
3870 		    "#                   / _-----=> irqs-off        \n"
3871 		    "#                  | / _----=> need-resched    \n"
3872 		    "#                  || / _---=> hardirq/softirq \n"
3873 		    "#                  ||| / _--=> preempt-depth   \n"
3874 		    "#                  |||| /     delay            \n"
3875 		    "#  cmd     pid     ||||| time  |   caller      \n"
3876 		    "#     \\   /        |||||  \\    |   /         \n");
3877 }
3878 
3879 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3880 {
3881 	unsigned long total;
3882 	unsigned long entries;
3883 
3884 	get_total_entries(buf, &total, &entries);
3885 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
3886 		   entries, total, num_online_cpus());
3887 	seq_puts(m, "#\n");
3888 }
3889 
3890 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3891 				   unsigned int flags)
3892 {
3893 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
3894 
3895 	print_event_info(buf, m);
3896 
3897 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
3898 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
3899 }
3900 
3901 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3902 				       unsigned int flags)
3903 {
3904 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
3905 	const char *space = "            ";
3906 	int prec = tgid ? 12 : 2;
3907 
3908 	print_event_info(buf, m);
3909 
3910 	seq_printf(m, "#                            %.*s  _-----=> irqs-off\n", prec, space);
3911 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
3912 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
3913 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
3914 	seq_printf(m, "#                            %.*s||| /     delay\n", prec, space);
3915 	seq_printf(m, "#           TASK-PID  %.*s CPU#  ||||   TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
3916 	seq_printf(m, "#              | |    %.*s   |   ||||      |         |\n", prec, "       |    ");
3917 }
3918 
3919 void
3920 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3921 {
3922 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3923 	struct array_buffer *buf = iter->array_buffer;
3924 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3925 	struct tracer *type = iter->trace;
3926 	unsigned long entries;
3927 	unsigned long total;
3928 	const char *name = "preemption";
3929 
3930 	name = type->name;
3931 
3932 	get_total_entries(buf, &total, &entries);
3933 
3934 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3935 		   name, UTS_RELEASE);
3936 	seq_puts(m, "# -----------------------------------"
3937 		 "---------------------------------\n");
3938 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3939 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3940 		   nsecs_to_usecs(data->saved_latency),
3941 		   entries,
3942 		   total,
3943 		   buf->cpu,
3944 #if defined(CONFIG_PREEMPT_NONE)
3945 		   "server",
3946 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3947 		   "desktop",
3948 #elif defined(CONFIG_PREEMPT)
3949 		   "preempt",
3950 #elif defined(CONFIG_PREEMPT_RT)
3951 		   "preempt_rt",
3952 #else
3953 		   "unknown",
3954 #endif
3955 		   /* These are reserved for later use */
3956 		   0, 0, 0, 0);
3957 #ifdef CONFIG_SMP
3958 	seq_printf(m, " #P:%d)\n", num_online_cpus());
3959 #else
3960 	seq_puts(m, ")\n");
3961 #endif
3962 	seq_puts(m, "#    -----------------\n");
3963 	seq_printf(m, "#    | task: %.16s-%d "
3964 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3965 		   data->comm, data->pid,
3966 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3967 		   data->policy, data->rt_priority);
3968 	seq_puts(m, "#    -----------------\n");
3969 
3970 	if (data->critical_start) {
3971 		seq_puts(m, "#  => started at: ");
3972 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3973 		trace_print_seq(m, &iter->seq);
3974 		seq_puts(m, "\n#  => ended at:   ");
3975 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3976 		trace_print_seq(m, &iter->seq);
3977 		seq_puts(m, "\n#\n");
3978 	}
3979 
3980 	seq_puts(m, "#\n");
3981 }
3982 
3983 static void test_cpu_buff_start(struct trace_iterator *iter)
3984 {
3985 	struct trace_seq *s = &iter->seq;
3986 	struct trace_array *tr = iter->tr;
3987 
3988 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3989 		return;
3990 
3991 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3992 		return;
3993 
3994 	if (cpumask_available(iter->started) &&
3995 	    cpumask_test_cpu(iter->cpu, iter->started))
3996 		return;
3997 
3998 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3999 		return;
4000 
4001 	if (cpumask_available(iter->started))
4002 		cpumask_set_cpu(iter->cpu, iter->started);
4003 
4004 	/* Don't print started cpu buffer for the first entry of the trace */
4005 	if (iter->idx > 1)
4006 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4007 				iter->cpu);
4008 }
4009 
4010 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4011 {
4012 	struct trace_array *tr = iter->tr;
4013 	struct trace_seq *s = &iter->seq;
4014 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4015 	struct trace_entry *entry;
4016 	struct trace_event *event;
4017 
4018 	entry = iter->ent;
4019 
4020 	test_cpu_buff_start(iter);
4021 
4022 	event = ftrace_find_event(entry->type);
4023 
4024 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4025 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4026 			trace_print_lat_context(iter);
4027 		else
4028 			trace_print_context(iter);
4029 	}
4030 
4031 	if (trace_seq_has_overflowed(s))
4032 		return TRACE_TYPE_PARTIAL_LINE;
4033 
4034 	if (event)
4035 		return event->funcs->trace(iter, sym_flags, event);
4036 
4037 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4038 
4039 	return trace_handle_return(s);
4040 }
4041 
4042 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4043 {
4044 	struct trace_array *tr = iter->tr;
4045 	struct trace_seq *s = &iter->seq;
4046 	struct trace_entry *entry;
4047 	struct trace_event *event;
4048 
4049 	entry = iter->ent;
4050 
4051 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4052 		trace_seq_printf(s, "%d %d %llu ",
4053 				 entry->pid, iter->cpu, iter->ts);
4054 
4055 	if (trace_seq_has_overflowed(s))
4056 		return TRACE_TYPE_PARTIAL_LINE;
4057 
4058 	event = ftrace_find_event(entry->type);
4059 	if (event)
4060 		return event->funcs->raw(iter, 0, event);
4061 
4062 	trace_seq_printf(s, "%d ?\n", entry->type);
4063 
4064 	return trace_handle_return(s);
4065 }
4066 
4067 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4068 {
4069 	struct trace_array *tr = iter->tr;
4070 	struct trace_seq *s = &iter->seq;
4071 	unsigned char newline = '\n';
4072 	struct trace_entry *entry;
4073 	struct trace_event *event;
4074 
4075 	entry = iter->ent;
4076 
4077 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4078 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4079 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4080 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4081 		if (trace_seq_has_overflowed(s))
4082 			return TRACE_TYPE_PARTIAL_LINE;
4083 	}
4084 
4085 	event = ftrace_find_event(entry->type);
4086 	if (event) {
4087 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4088 		if (ret != TRACE_TYPE_HANDLED)
4089 			return ret;
4090 	}
4091 
4092 	SEQ_PUT_FIELD(s, newline);
4093 
4094 	return trace_handle_return(s);
4095 }
4096 
4097 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4098 {
4099 	struct trace_array *tr = iter->tr;
4100 	struct trace_seq *s = &iter->seq;
4101 	struct trace_entry *entry;
4102 	struct trace_event *event;
4103 
4104 	entry = iter->ent;
4105 
4106 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4107 		SEQ_PUT_FIELD(s, entry->pid);
4108 		SEQ_PUT_FIELD(s, iter->cpu);
4109 		SEQ_PUT_FIELD(s, iter->ts);
4110 		if (trace_seq_has_overflowed(s))
4111 			return TRACE_TYPE_PARTIAL_LINE;
4112 	}
4113 
4114 	event = ftrace_find_event(entry->type);
4115 	return event ? event->funcs->binary(iter, 0, event) :
4116 		TRACE_TYPE_HANDLED;
4117 }
4118 
4119 int trace_empty(struct trace_iterator *iter)
4120 {
4121 	struct ring_buffer_iter *buf_iter;
4122 	int cpu;
4123 
4124 	/* If we are looking at one CPU buffer, only check that one */
4125 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4126 		cpu = iter->cpu_file;
4127 		buf_iter = trace_buffer_iter(iter, cpu);
4128 		if (buf_iter) {
4129 			if (!ring_buffer_iter_empty(buf_iter))
4130 				return 0;
4131 		} else {
4132 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4133 				return 0;
4134 		}
4135 		return 1;
4136 	}
4137 
4138 	for_each_tracing_cpu(cpu) {
4139 		buf_iter = trace_buffer_iter(iter, cpu);
4140 		if (buf_iter) {
4141 			if (!ring_buffer_iter_empty(buf_iter))
4142 				return 0;
4143 		} else {
4144 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4145 				return 0;
4146 		}
4147 	}
4148 
4149 	return 1;
4150 }
4151 
4152 /*  Called with trace_event_read_lock() held. */
4153 enum print_line_t print_trace_line(struct trace_iterator *iter)
4154 {
4155 	struct trace_array *tr = iter->tr;
4156 	unsigned long trace_flags = tr->trace_flags;
4157 	enum print_line_t ret;
4158 
4159 	if (iter->lost_events) {
4160 		if (iter->lost_events == (unsigned long)-1)
4161 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4162 					 iter->cpu);
4163 		else
4164 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4165 					 iter->cpu, iter->lost_events);
4166 		if (trace_seq_has_overflowed(&iter->seq))
4167 			return TRACE_TYPE_PARTIAL_LINE;
4168 	}
4169 
4170 	if (iter->trace && iter->trace->print_line) {
4171 		ret = iter->trace->print_line(iter);
4172 		if (ret != TRACE_TYPE_UNHANDLED)
4173 			return ret;
4174 	}
4175 
4176 	if (iter->ent->type == TRACE_BPUTS &&
4177 			trace_flags & TRACE_ITER_PRINTK &&
4178 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4179 		return trace_print_bputs_msg_only(iter);
4180 
4181 	if (iter->ent->type == TRACE_BPRINT &&
4182 			trace_flags & TRACE_ITER_PRINTK &&
4183 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4184 		return trace_print_bprintk_msg_only(iter);
4185 
4186 	if (iter->ent->type == TRACE_PRINT &&
4187 			trace_flags & TRACE_ITER_PRINTK &&
4188 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4189 		return trace_print_printk_msg_only(iter);
4190 
4191 	if (trace_flags & TRACE_ITER_BIN)
4192 		return print_bin_fmt(iter);
4193 
4194 	if (trace_flags & TRACE_ITER_HEX)
4195 		return print_hex_fmt(iter);
4196 
4197 	if (trace_flags & TRACE_ITER_RAW)
4198 		return print_raw_fmt(iter);
4199 
4200 	return print_trace_fmt(iter);
4201 }
4202 
4203 void trace_latency_header(struct seq_file *m)
4204 {
4205 	struct trace_iterator *iter = m->private;
4206 	struct trace_array *tr = iter->tr;
4207 
4208 	/* print nothing if the buffers are empty */
4209 	if (trace_empty(iter))
4210 		return;
4211 
4212 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4213 		print_trace_header(m, iter);
4214 
4215 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4216 		print_lat_help_header(m);
4217 }
4218 
4219 void trace_default_header(struct seq_file *m)
4220 {
4221 	struct trace_iterator *iter = m->private;
4222 	struct trace_array *tr = iter->tr;
4223 	unsigned long trace_flags = tr->trace_flags;
4224 
4225 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4226 		return;
4227 
4228 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4229 		/* print nothing if the buffers are empty */
4230 		if (trace_empty(iter))
4231 			return;
4232 		print_trace_header(m, iter);
4233 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4234 			print_lat_help_header(m);
4235 	} else {
4236 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4237 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4238 				print_func_help_header_irq(iter->array_buffer,
4239 							   m, trace_flags);
4240 			else
4241 				print_func_help_header(iter->array_buffer, m,
4242 						       trace_flags);
4243 		}
4244 	}
4245 }
4246 
4247 static void test_ftrace_alive(struct seq_file *m)
4248 {
4249 	if (!ftrace_is_dead())
4250 		return;
4251 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4252 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4253 }
4254 
4255 #ifdef CONFIG_TRACER_MAX_TRACE
4256 static void show_snapshot_main_help(struct seq_file *m)
4257 {
4258 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4259 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4260 		    "#                      Takes a snapshot of the main buffer.\n"
4261 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4262 		    "#                      (Doesn't have to be '2' works with any number that\n"
4263 		    "#                       is not a '0' or '1')\n");
4264 }
4265 
4266 static void show_snapshot_percpu_help(struct seq_file *m)
4267 {
4268 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4269 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4270 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4271 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4272 #else
4273 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4274 		    "#                     Must use main snapshot file to allocate.\n");
4275 #endif
4276 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4277 		    "#                      (Doesn't have to be '2' works with any number that\n"
4278 		    "#                       is not a '0' or '1')\n");
4279 }
4280 
4281 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4282 {
4283 	if (iter->tr->allocated_snapshot)
4284 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4285 	else
4286 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4287 
4288 	seq_puts(m, "# Snapshot commands:\n");
4289 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4290 		show_snapshot_main_help(m);
4291 	else
4292 		show_snapshot_percpu_help(m);
4293 }
4294 #else
4295 /* Should never be called */
4296 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4297 #endif
4298 
4299 static int s_show(struct seq_file *m, void *v)
4300 {
4301 	struct trace_iterator *iter = v;
4302 	int ret;
4303 
4304 	if (iter->ent == NULL) {
4305 		if (iter->tr) {
4306 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4307 			seq_puts(m, "#\n");
4308 			test_ftrace_alive(m);
4309 		}
4310 		if (iter->snapshot && trace_empty(iter))
4311 			print_snapshot_help(m, iter);
4312 		else if (iter->trace && iter->trace->print_header)
4313 			iter->trace->print_header(m);
4314 		else
4315 			trace_default_header(m);
4316 
4317 	} else if (iter->leftover) {
4318 		/*
4319 		 * If we filled the seq_file buffer earlier, we
4320 		 * want to just show it now.
4321 		 */
4322 		ret = trace_print_seq(m, &iter->seq);
4323 
4324 		/* ret should this time be zero, but you never know */
4325 		iter->leftover = ret;
4326 
4327 	} else {
4328 		print_trace_line(iter);
4329 		ret = trace_print_seq(m, &iter->seq);
4330 		/*
4331 		 * If we overflow the seq_file buffer, then it will
4332 		 * ask us for this data again at start up.
4333 		 * Use that instead.
4334 		 *  ret is 0 if seq_file write succeeded.
4335 		 *        -1 otherwise.
4336 		 */
4337 		iter->leftover = ret;
4338 	}
4339 
4340 	return 0;
4341 }
4342 
4343 /*
4344  * Should be used after trace_array_get(), trace_types_lock
4345  * ensures that i_cdev was already initialized.
4346  */
4347 static inline int tracing_get_cpu(struct inode *inode)
4348 {
4349 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4350 		return (long)inode->i_cdev - 1;
4351 	return RING_BUFFER_ALL_CPUS;
4352 }
4353 
4354 static const struct seq_operations tracer_seq_ops = {
4355 	.start		= s_start,
4356 	.next		= s_next,
4357 	.stop		= s_stop,
4358 	.show		= s_show,
4359 };
4360 
4361 static struct trace_iterator *
4362 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4363 {
4364 	struct trace_array *tr = inode->i_private;
4365 	struct trace_iterator *iter;
4366 	int cpu;
4367 
4368 	if (tracing_disabled)
4369 		return ERR_PTR(-ENODEV);
4370 
4371 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4372 	if (!iter)
4373 		return ERR_PTR(-ENOMEM);
4374 
4375 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4376 				    GFP_KERNEL);
4377 	if (!iter->buffer_iter)
4378 		goto release;
4379 
4380 	/*
4381 	 * trace_find_next_entry() may need to save off iter->ent.
4382 	 * It will place it into the iter->temp buffer. As most
4383 	 * events are less than 128, allocate a buffer of that size.
4384 	 * If one is greater, then trace_find_next_entry() will
4385 	 * allocate a new buffer to adjust for the bigger iter->ent.
4386 	 * It's not critical if it fails to get allocated here.
4387 	 */
4388 	iter->temp = kmalloc(128, GFP_KERNEL);
4389 	if (iter->temp)
4390 		iter->temp_size = 128;
4391 
4392 	/*
4393 	 * trace_event_printf() may need to modify given format
4394 	 * string to replace %p with %px so that it shows real address
4395 	 * instead of hash value. However, that is only for the event
4396 	 * tracing, other tracer may not need. Defer the allocation
4397 	 * until it is needed.
4398 	 */
4399 	iter->fmt = NULL;
4400 	iter->fmt_size = 0;
4401 
4402 	/*
4403 	 * We make a copy of the current tracer to avoid concurrent
4404 	 * changes on it while we are reading.
4405 	 */
4406 	mutex_lock(&trace_types_lock);
4407 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4408 	if (!iter->trace)
4409 		goto fail;
4410 
4411 	*iter->trace = *tr->current_trace;
4412 
4413 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4414 		goto fail;
4415 
4416 	iter->tr = tr;
4417 
4418 #ifdef CONFIG_TRACER_MAX_TRACE
4419 	/* Currently only the top directory has a snapshot */
4420 	if (tr->current_trace->print_max || snapshot)
4421 		iter->array_buffer = &tr->max_buffer;
4422 	else
4423 #endif
4424 		iter->array_buffer = &tr->array_buffer;
4425 	iter->snapshot = snapshot;
4426 	iter->pos = -1;
4427 	iter->cpu_file = tracing_get_cpu(inode);
4428 	mutex_init(&iter->mutex);
4429 
4430 	/* Notify the tracer early; before we stop tracing. */
4431 	if (iter->trace->open)
4432 		iter->trace->open(iter);
4433 
4434 	/* Annotate start of buffers if we had overruns */
4435 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4436 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4437 
4438 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4439 	if (trace_clocks[tr->clock_id].in_ns)
4440 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4441 
4442 	/*
4443 	 * If pause-on-trace is enabled, then stop the trace while
4444 	 * dumping, unless this is the "snapshot" file
4445 	 */
4446 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4447 		tracing_stop_tr(tr);
4448 
4449 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4450 		for_each_tracing_cpu(cpu) {
4451 			iter->buffer_iter[cpu] =
4452 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4453 							 cpu, GFP_KERNEL);
4454 		}
4455 		ring_buffer_read_prepare_sync();
4456 		for_each_tracing_cpu(cpu) {
4457 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4458 			tracing_iter_reset(iter, cpu);
4459 		}
4460 	} else {
4461 		cpu = iter->cpu_file;
4462 		iter->buffer_iter[cpu] =
4463 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4464 						 cpu, GFP_KERNEL);
4465 		ring_buffer_read_prepare_sync();
4466 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4467 		tracing_iter_reset(iter, cpu);
4468 	}
4469 
4470 	mutex_unlock(&trace_types_lock);
4471 
4472 	return iter;
4473 
4474  fail:
4475 	mutex_unlock(&trace_types_lock);
4476 	kfree(iter->trace);
4477 	kfree(iter->temp);
4478 	kfree(iter->buffer_iter);
4479 release:
4480 	seq_release_private(inode, file);
4481 	return ERR_PTR(-ENOMEM);
4482 }
4483 
4484 int tracing_open_generic(struct inode *inode, struct file *filp)
4485 {
4486 	int ret;
4487 
4488 	ret = tracing_check_open_get_tr(NULL);
4489 	if (ret)
4490 		return ret;
4491 
4492 	filp->private_data = inode->i_private;
4493 	return 0;
4494 }
4495 
4496 bool tracing_is_disabled(void)
4497 {
4498 	return (tracing_disabled) ? true: false;
4499 }
4500 
4501 /*
4502  * Open and update trace_array ref count.
4503  * Must have the current trace_array passed to it.
4504  */
4505 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4506 {
4507 	struct trace_array *tr = inode->i_private;
4508 	int ret;
4509 
4510 	ret = tracing_check_open_get_tr(tr);
4511 	if (ret)
4512 		return ret;
4513 
4514 	filp->private_data = inode->i_private;
4515 
4516 	return 0;
4517 }
4518 
4519 static int tracing_release(struct inode *inode, struct file *file)
4520 {
4521 	struct trace_array *tr = inode->i_private;
4522 	struct seq_file *m = file->private_data;
4523 	struct trace_iterator *iter;
4524 	int cpu;
4525 
4526 	if (!(file->f_mode & FMODE_READ)) {
4527 		trace_array_put(tr);
4528 		return 0;
4529 	}
4530 
4531 	/* Writes do not use seq_file */
4532 	iter = m->private;
4533 	mutex_lock(&trace_types_lock);
4534 
4535 	for_each_tracing_cpu(cpu) {
4536 		if (iter->buffer_iter[cpu])
4537 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4538 	}
4539 
4540 	if (iter->trace && iter->trace->close)
4541 		iter->trace->close(iter);
4542 
4543 	if (!iter->snapshot && tr->stop_count)
4544 		/* reenable tracing if it was previously enabled */
4545 		tracing_start_tr(tr);
4546 
4547 	__trace_array_put(tr);
4548 
4549 	mutex_unlock(&trace_types_lock);
4550 
4551 	mutex_destroy(&iter->mutex);
4552 	free_cpumask_var(iter->started);
4553 	kfree(iter->fmt);
4554 	kfree(iter->temp);
4555 	kfree(iter->trace);
4556 	kfree(iter->buffer_iter);
4557 	seq_release_private(inode, file);
4558 
4559 	return 0;
4560 }
4561 
4562 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4563 {
4564 	struct trace_array *tr = inode->i_private;
4565 
4566 	trace_array_put(tr);
4567 	return 0;
4568 }
4569 
4570 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4571 {
4572 	struct trace_array *tr = inode->i_private;
4573 
4574 	trace_array_put(tr);
4575 
4576 	return single_release(inode, file);
4577 }
4578 
4579 static int tracing_open(struct inode *inode, struct file *file)
4580 {
4581 	struct trace_array *tr = inode->i_private;
4582 	struct trace_iterator *iter;
4583 	int ret;
4584 
4585 	ret = tracing_check_open_get_tr(tr);
4586 	if (ret)
4587 		return ret;
4588 
4589 	/* If this file was open for write, then erase contents */
4590 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4591 		int cpu = tracing_get_cpu(inode);
4592 		struct array_buffer *trace_buf = &tr->array_buffer;
4593 
4594 #ifdef CONFIG_TRACER_MAX_TRACE
4595 		if (tr->current_trace->print_max)
4596 			trace_buf = &tr->max_buffer;
4597 #endif
4598 
4599 		if (cpu == RING_BUFFER_ALL_CPUS)
4600 			tracing_reset_online_cpus(trace_buf);
4601 		else
4602 			tracing_reset_cpu(trace_buf, cpu);
4603 	}
4604 
4605 	if (file->f_mode & FMODE_READ) {
4606 		iter = __tracing_open(inode, file, false);
4607 		if (IS_ERR(iter))
4608 			ret = PTR_ERR(iter);
4609 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4610 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4611 	}
4612 
4613 	if (ret < 0)
4614 		trace_array_put(tr);
4615 
4616 	return ret;
4617 }
4618 
4619 /*
4620  * Some tracers are not suitable for instance buffers.
4621  * A tracer is always available for the global array (toplevel)
4622  * or if it explicitly states that it is.
4623  */
4624 static bool
4625 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4626 {
4627 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4628 }
4629 
4630 /* Find the next tracer that this trace array may use */
4631 static struct tracer *
4632 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4633 {
4634 	while (t && !trace_ok_for_array(t, tr))
4635 		t = t->next;
4636 
4637 	return t;
4638 }
4639 
4640 static void *
4641 t_next(struct seq_file *m, void *v, loff_t *pos)
4642 {
4643 	struct trace_array *tr = m->private;
4644 	struct tracer *t = v;
4645 
4646 	(*pos)++;
4647 
4648 	if (t)
4649 		t = get_tracer_for_array(tr, t->next);
4650 
4651 	return t;
4652 }
4653 
4654 static void *t_start(struct seq_file *m, loff_t *pos)
4655 {
4656 	struct trace_array *tr = m->private;
4657 	struct tracer *t;
4658 	loff_t l = 0;
4659 
4660 	mutex_lock(&trace_types_lock);
4661 
4662 	t = get_tracer_for_array(tr, trace_types);
4663 	for (; t && l < *pos; t = t_next(m, t, &l))
4664 			;
4665 
4666 	return t;
4667 }
4668 
4669 static void t_stop(struct seq_file *m, void *p)
4670 {
4671 	mutex_unlock(&trace_types_lock);
4672 }
4673 
4674 static int t_show(struct seq_file *m, void *v)
4675 {
4676 	struct tracer *t = v;
4677 
4678 	if (!t)
4679 		return 0;
4680 
4681 	seq_puts(m, t->name);
4682 	if (t->next)
4683 		seq_putc(m, ' ');
4684 	else
4685 		seq_putc(m, '\n');
4686 
4687 	return 0;
4688 }
4689 
4690 static const struct seq_operations show_traces_seq_ops = {
4691 	.start		= t_start,
4692 	.next		= t_next,
4693 	.stop		= t_stop,
4694 	.show		= t_show,
4695 };
4696 
4697 static int show_traces_open(struct inode *inode, struct file *file)
4698 {
4699 	struct trace_array *tr = inode->i_private;
4700 	struct seq_file *m;
4701 	int ret;
4702 
4703 	ret = tracing_check_open_get_tr(tr);
4704 	if (ret)
4705 		return ret;
4706 
4707 	ret = seq_open(file, &show_traces_seq_ops);
4708 	if (ret) {
4709 		trace_array_put(tr);
4710 		return ret;
4711 	}
4712 
4713 	m = file->private_data;
4714 	m->private = tr;
4715 
4716 	return 0;
4717 }
4718 
4719 static int show_traces_release(struct inode *inode, struct file *file)
4720 {
4721 	struct trace_array *tr = inode->i_private;
4722 
4723 	trace_array_put(tr);
4724 	return seq_release(inode, file);
4725 }
4726 
4727 static ssize_t
4728 tracing_write_stub(struct file *filp, const char __user *ubuf,
4729 		   size_t count, loff_t *ppos)
4730 {
4731 	return count;
4732 }
4733 
4734 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4735 {
4736 	int ret;
4737 
4738 	if (file->f_mode & FMODE_READ)
4739 		ret = seq_lseek(file, offset, whence);
4740 	else
4741 		file->f_pos = ret = 0;
4742 
4743 	return ret;
4744 }
4745 
4746 static const struct file_operations tracing_fops = {
4747 	.open		= tracing_open,
4748 	.read		= seq_read,
4749 	.write		= tracing_write_stub,
4750 	.llseek		= tracing_lseek,
4751 	.release	= tracing_release,
4752 };
4753 
4754 static const struct file_operations show_traces_fops = {
4755 	.open		= show_traces_open,
4756 	.read		= seq_read,
4757 	.llseek		= seq_lseek,
4758 	.release	= show_traces_release,
4759 };
4760 
4761 static ssize_t
4762 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4763 		     size_t count, loff_t *ppos)
4764 {
4765 	struct trace_array *tr = file_inode(filp)->i_private;
4766 	char *mask_str;
4767 	int len;
4768 
4769 	len = snprintf(NULL, 0, "%*pb\n",
4770 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
4771 	mask_str = kmalloc(len, GFP_KERNEL);
4772 	if (!mask_str)
4773 		return -ENOMEM;
4774 
4775 	len = snprintf(mask_str, len, "%*pb\n",
4776 		       cpumask_pr_args(tr->tracing_cpumask));
4777 	if (len >= count) {
4778 		count = -EINVAL;
4779 		goto out_err;
4780 	}
4781 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4782 
4783 out_err:
4784 	kfree(mask_str);
4785 
4786 	return count;
4787 }
4788 
4789 int tracing_set_cpumask(struct trace_array *tr,
4790 			cpumask_var_t tracing_cpumask_new)
4791 {
4792 	int cpu;
4793 
4794 	if (!tr)
4795 		return -EINVAL;
4796 
4797 	local_irq_disable();
4798 	arch_spin_lock(&tr->max_lock);
4799 	for_each_tracing_cpu(cpu) {
4800 		/*
4801 		 * Increase/decrease the disabled counter if we are
4802 		 * about to flip a bit in the cpumask:
4803 		 */
4804 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4805 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4806 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4807 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4808 		}
4809 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4810 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4811 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4812 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4813 		}
4814 	}
4815 	arch_spin_unlock(&tr->max_lock);
4816 	local_irq_enable();
4817 
4818 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4819 
4820 	return 0;
4821 }
4822 
4823 static ssize_t
4824 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4825 		      size_t count, loff_t *ppos)
4826 {
4827 	struct trace_array *tr = file_inode(filp)->i_private;
4828 	cpumask_var_t tracing_cpumask_new;
4829 	int err;
4830 
4831 	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4832 		return -ENOMEM;
4833 
4834 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4835 	if (err)
4836 		goto err_free;
4837 
4838 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
4839 	if (err)
4840 		goto err_free;
4841 
4842 	free_cpumask_var(tracing_cpumask_new);
4843 
4844 	return count;
4845 
4846 err_free:
4847 	free_cpumask_var(tracing_cpumask_new);
4848 
4849 	return err;
4850 }
4851 
4852 static const struct file_operations tracing_cpumask_fops = {
4853 	.open		= tracing_open_generic_tr,
4854 	.read		= tracing_cpumask_read,
4855 	.write		= tracing_cpumask_write,
4856 	.release	= tracing_release_generic_tr,
4857 	.llseek		= generic_file_llseek,
4858 };
4859 
4860 static int tracing_trace_options_show(struct seq_file *m, void *v)
4861 {
4862 	struct tracer_opt *trace_opts;
4863 	struct trace_array *tr = m->private;
4864 	u32 tracer_flags;
4865 	int i;
4866 
4867 	mutex_lock(&trace_types_lock);
4868 	tracer_flags = tr->current_trace->flags->val;
4869 	trace_opts = tr->current_trace->flags->opts;
4870 
4871 	for (i = 0; trace_options[i]; i++) {
4872 		if (tr->trace_flags & (1 << i))
4873 			seq_printf(m, "%s\n", trace_options[i]);
4874 		else
4875 			seq_printf(m, "no%s\n", trace_options[i]);
4876 	}
4877 
4878 	for (i = 0; trace_opts[i].name; i++) {
4879 		if (tracer_flags & trace_opts[i].bit)
4880 			seq_printf(m, "%s\n", trace_opts[i].name);
4881 		else
4882 			seq_printf(m, "no%s\n", trace_opts[i].name);
4883 	}
4884 	mutex_unlock(&trace_types_lock);
4885 
4886 	return 0;
4887 }
4888 
4889 static int __set_tracer_option(struct trace_array *tr,
4890 			       struct tracer_flags *tracer_flags,
4891 			       struct tracer_opt *opts, int neg)
4892 {
4893 	struct tracer *trace = tracer_flags->trace;
4894 	int ret;
4895 
4896 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4897 	if (ret)
4898 		return ret;
4899 
4900 	if (neg)
4901 		tracer_flags->val &= ~opts->bit;
4902 	else
4903 		tracer_flags->val |= opts->bit;
4904 	return 0;
4905 }
4906 
4907 /* Try to assign a tracer specific option */
4908 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4909 {
4910 	struct tracer *trace = tr->current_trace;
4911 	struct tracer_flags *tracer_flags = trace->flags;
4912 	struct tracer_opt *opts = NULL;
4913 	int i;
4914 
4915 	for (i = 0; tracer_flags->opts[i].name; i++) {
4916 		opts = &tracer_flags->opts[i];
4917 
4918 		if (strcmp(cmp, opts->name) == 0)
4919 			return __set_tracer_option(tr, trace->flags, opts, neg);
4920 	}
4921 
4922 	return -EINVAL;
4923 }
4924 
4925 /* Some tracers require overwrite to stay enabled */
4926 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4927 {
4928 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4929 		return -1;
4930 
4931 	return 0;
4932 }
4933 
4934 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4935 {
4936 	if ((mask == TRACE_ITER_RECORD_TGID) ||
4937 	    (mask == TRACE_ITER_RECORD_CMD))
4938 		lockdep_assert_held(&event_mutex);
4939 
4940 	/* do nothing if flag is already set */
4941 	if (!!(tr->trace_flags & mask) == !!enabled)
4942 		return 0;
4943 
4944 	/* Give the tracer a chance to approve the change */
4945 	if (tr->current_trace->flag_changed)
4946 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4947 			return -EINVAL;
4948 
4949 	if (enabled)
4950 		tr->trace_flags |= mask;
4951 	else
4952 		tr->trace_flags &= ~mask;
4953 
4954 	if (mask == TRACE_ITER_RECORD_CMD)
4955 		trace_event_enable_cmd_record(enabled);
4956 
4957 	if (mask == TRACE_ITER_RECORD_TGID) {
4958 		if (!tgid_map)
4959 			tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4960 					   sizeof(*tgid_map),
4961 					   GFP_KERNEL);
4962 		if (!tgid_map) {
4963 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4964 			return -ENOMEM;
4965 		}
4966 
4967 		trace_event_enable_tgid_record(enabled);
4968 	}
4969 
4970 	if (mask == TRACE_ITER_EVENT_FORK)
4971 		trace_event_follow_fork(tr, enabled);
4972 
4973 	if (mask == TRACE_ITER_FUNC_FORK)
4974 		ftrace_pid_follow_fork(tr, enabled);
4975 
4976 	if (mask == TRACE_ITER_OVERWRITE) {
4977 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4978 #ifdef CONFIG_TRACER_MAX_TRACE
4979 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4980 #endif
4981 	}
4982 
4983 	if (mask == TRACE_ITER_PRINTK) {
4984 		trace_printk_start_stop_comm(enabled);
4985 		trace_printk_control(enabled);
4986 	}
4987 
4988 	return 0;
4989 }
4990 
4991 int trace_set_options(struct trace_array *tr, char *option)
4992 {
4993 	char *cmp;
4994 	int neg = 0;
4995 	int ret;
4996 	size_t orig_len = strlen(option);
4997 	int len;
4998 
4999 	cmp = strstrip(option);
5000 
5001 	len = str_has_prefix(cmp, "no");
5002 	if (len)
5003 		neg = 1;
5004 
5005 	cmp += len;
5006 
5007 	mutex_lock(&event_mutex);
5008 	mutex_lock(&trace_types_lock);
5009 
5010 	ret = match_string(trace_options, -1, cmp);
5011 	/* If no option could be set, test the specific tracer options */
5012 	if (ret < 0)
5013 		ret = set_tracer_option(tr, cmp, neg);
5014 	else
5015 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5016 
5017 	mutex_unlock(&trace_types_lock);
5018 	mutex_unlock(&event_mutex);
5019 
5020 	/*
5021 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5022 	 * turn it back into a space.
5023 	 */
5024 	if (orig_len > strlen(option))
5025 		option[strlen(option)] = ' ';
5026 
5027 	return ret;
5028 }
5029 
5030 static void __init apply_trace_boot_options(void)
5031 {
5032 	char *buf = trace_boot_options_buf;
5033 	char *option;
5034 
5035 	while (true) {
5036 		option = strsep(&buf, ",");
5037 
5038 		if (!option)
5039 			break;
5040 
5041 		if (*option)
5042 			trace_set_options(&global_trace, option);
5043 
5044 		/* Put back the comma to allow this to be called again */
5045 		if (buf)
5046 			*(buf - 1) = ',';
5047 	}
5048 }
5049 
5050 static ssize_t
5051 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5052 			size_t cnt, loff_t *ppos)
5053 {
5054 	struct seq_file *m = filp->private_data;
5055 	struct trace_array *tr = m->private;
5056 	char buf[64];
5057 	int ret;
5058 
5059 	if (cnt >= sizeof(buf))
5060 		return -EINVAL;
5061 
5062 	if (copy_from_user(buf, ubuf, cnt))
5063 		return -EFAULT;
5064 
5065 	buf[cnt] = 0;
5066 
5067 	ret = trace_set_options(tr, buf);
5068 	if (ret < 0)
5069 		return ret;
5070 
5071 	*ppos += cnt;
5072 
5073 	return cnt;
5074 }
5075 
5076 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5077 {
5078 	struct trace_array *tr = inode->i_private;
5079 	int ret;
5080 
5081 	ret = tracing_check_open_get_tr(tr);
5082 	if (ret)
5083 		return ret;
5084 
5085 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5086 	if (ret < 0)
5087 		trace_array_put(tr);
5088 
5089 	return ret;
5090 }
5091 
5092 static const struct file_operations tracing_iter_fops = {
5093 	.open		= tracing_trace_options_open,
5094 	.read		= seq_read,
5095 	.llseek		= seq_lseek,
5096 	.release	= tracing_single_release_tr,
5097 	.write		= tracing_trace_options_write,
5098 };
5099 
5100 static const char readme_msg[] =
5101 	"tracing mini-HOWTO:\n\n"
5102 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5103 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5104 	" Important files:\n"
5105 	"  trace\t\t\t- The static contents of the buffer\n"
5106 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5107 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5108 	"  current_tracer\t- function and latency tracers\n"
5109 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5110 	"  error_log\t- error log for failed commands (that support it)\n"
5111 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5112 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5113 	"  trace_clock\t\t-change the clock used to order events\n"
5114 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5115 	"      global:   Synced across CPUs but slows tracing down.\n"
5116 	"     counter:   Not a clock, but just an increment\n"
5117 	"      uptime:   Jiffy counter from time of boot\n"
5118 	"        perf:   Same clock that perf events use\n"
5119 #ifdef CONFIG_X86_64
5120 	"     x86-tsc:   TSC cycle counter\n"
5121 #endif
5122 	"\n  timestamp_mode\t-view the mode used to timestamp events\n"
5123 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5124 	"    absolute:   Absolute (standalone) timestamp\n"
5125 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5126 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5127 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5128 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5129 	"\t\t\t  Remove sub-buffer with rmdir\n"
5130 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5131 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5132 	"\t\t\t  option name\n"
5133 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5134 #ifdef CONFIG_DYNAMIC_FTRACE
5135 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5136 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5137 	"\t\t\t  functions\n"
5138 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5139 	"\t     modules: Can select a group via module\n"
5140 	"\t      Format: :mod:<module-name>\n"
5141 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5142 	"\t    triggers: a command to perform when function is hit\n"
5143 	"\t      Format: <function>:<trigger>[:count]\n"
5144 	"\t     trigger: traceon, traceoff\n"
5145 	"\t\t      enable_event:<system>:<event>\n"
5146 	"\t\t      disable_event:<system>:<event>\n"
5147 #ifdef CONFIG_STACKTRACE
5148 	"\t\t      stacktrace\n"
5149 #endif
5150 #ifdef CONFIG_TRACER_SNAPSHOT
5151 	"\t\t      snapshot\n"
5152 #endif
5153 	"\t\t      dump\n"
5154 	"\t\t      cpudump\n"
5155 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5156 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5157 	"\t     The first one will disable tracing every time do_fault is hit\n"
5158 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5159 	"\t       The first time do trap is hit and it disables tracing, the\n"
5160 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5161 	"\t       the counter will not decrement. It only decrements when the\n"
5162 	"\t       trigger did work\n"
5163 	"\t     To remove trigger without count:\n"
5164 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5165 	"\t     To remove trigger with a count:\n"
5166 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5167 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5168 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5169 	"\t    modules: Can select a group via module command :mod:\n"
5170 	"\t    Does not accept triggers\n"
5171 #endif /* CONFIG_DYNAMIC_FTRACE */
5172 #ifdef CONFIG_FUNCTION_TRACER
5173 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5174 	"\t\t    (function)\n"
5175 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5176 	"\t\t    (function)\n"
5177 #endif
5178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5179 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5180 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5181 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5182 #endif
5183 #ifdef CONFIG_TRACER_SNAPSHOT
5184 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5185 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5186 	"\t\t\t  information\n"
5187 #endif
5188 #ifdef CONFIG_STACK_TRACER
5189 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5190 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5191 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5192 	"\t\t\t  new trace)\n"
5193 #ifdef CONFIG_DYNAMIC_FTRACE
5194 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5195 	"\t\t\t  traces\n"
5196 #endif
5197 #endif /* CONFIG_STACK_TRACER */
5198 #ifdef CONFIG_DYNAMIC_EVENTS
5199 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5200 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5201 #endif
5202 #ifdef CONFIG_KPROBE_EVENTS
5203 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5204 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5205 #endif
5206 #ifdef CONFIG_UPROBE_EVENTS
5207 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5208 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5209 #endif
5210 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5211 	"\t  accepts: event-definitions (one definition per line)\n"
5212 	"\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
5213 	"\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5214 #ifdef CONFIG_HIST_TRIGGERS
5215 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5216 #endif
5217 	"\t           -:[<group>/]<event>\n"
5218 #ifdef CONFIG_KPROBE_EVENTS
5219 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5220   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5221 #endif
5222 #ifdef CONFIG_UPROBE_EVENTS
5223   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5224 #endif
5225 	"\t     args: <name>=fetcharg[:type]\n"
5226 	"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5227 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5228 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5229 #else
5230 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5231 #endif
5232 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5233 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5234 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5235 	"\t           <type>\\[<array-size>\\]\n"
5236 #ifdef CONFIG_HIST_TRIGGERS
5237 	"\t    field: <stype> <name>;\n"
5238 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5239 	"\t           [unsigned] char/int/long\n"
5240 #endif
5241 #endif
5242 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5243 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5244 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5245 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5246 	"\t\t\t  events\n"
5247 	"      filter\t\t- If set, only events passing filter are traced\n"
5248 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5249 	"\t\t\t  <event>:\n"
5250 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5251 	"      filter\t\t- If set, only events passing filter are traced\n"
5252 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5253 	"\t    Format: <trigger>[:count][if <filter>]\n"
5254 	"\t   trigger: traceon, traceoff\n"
5255 	"\t            enable_event:<system>:<event>\n"
5256 	"\t            disable_event:<system>:<event>\n"
5257 #ifdef CONFIG_HIST_TRIGGERS
5258 	"\t            enable_hist:<system>:<event>\n"
5259 	"\t            disable_hist:<system>:<event>\n"
5260 #endif
5261 #ifdef CONFIG_STACKTRACE
5262 	"\t\t    stacktrace\n"
5263 #endif
5264 #ifdef CONFIG_TRACER_SNAPSHOT
5265 	"\t\t    snapshot\n"
5266 #endif
5267 #ifdef CONFIG_HIST_TRIGGERS
5268 	"\t\t    hist (see below)\n"
5269 #endif
5270 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5271 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5272 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5273 	"\t                  events/block/block_unplug/trigger\n"
5274 	"\t   The first disables tracing every time block_unplug is hit.\n"
5275 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5276 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5277 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5278 	"\t   Like function triggers, the counter is only decremented if it\n"
5279 	"\t    enabled or disabled tracing.\n"
5280 	"\t   To remove a trigger without a count:\n"
5281 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5282 	"\t   To remove a trigger with a count:\n"
5283 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5284 	"\t   Filters can be ignored when removing a trigger.\n"
5285 #ifdef CONFIG_HIST_TRIGGERS
5286 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5287 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5288 	"\t            [:values=<field1[,field2,...]>]\n"
5289 	"\t            [:sort=<field1[,field2,...]>]\n"
5290 	"\t            [:size=#entries]\n"
5291 	"\t            [:pause][:continue][:clear]\n"
5292 	"\t            [:name=histname1]\n"
5293 	"\t            [:<handler>.<action>]\n"
5294 	"\t            [if <filter>]\n\n"
5295 	"\t    When a matching event is hit, an entry is added to a hash\n"
5296 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5297 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5298 	"\t    correspond to fields in the event's format description.  Keys\n"
5299 	"\t    can be any field, or the special string 'stacktrace'.\n"
5300 	"\t    Compound keys consisting of up to two fields can be specified\n"
5301 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5302 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5303 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5304 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5305 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5306 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5307 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5308 	"\t    its histogram data will be shared with other triggers of the\n"
5309 	"\t    same name, and trigger hits will update this common data.\n\n"
5310 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5311 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5312 	"\t    triggers attached to an event, there will be a table for each\n"
5313 	"\t    trigger in the output.  The table displayed for a named\n"
5314 	"\t    trigger will be the same as any other instance having the\n"
5315 	"\t    same name.  The default format used to display a given field\n"
5316 	"\t    can be modified by appending any of the following modifiers\n"
5317 	"\t    to the field name, as applicable:\n\n"
5318 	"\t            .hex        display a number as a hex value\n"
5319 	"\t            .sym        display an address as a symbol\n"
5320 	"\t            .sym-offset display an address as a symbol and offset\n"
5321 	"\t            .execname   display a common_pid as a program name\n"
5322 	"\t            .syscall    display a syscall id as a syscall name\n"
5323 	"\t            .log2       display log2 value rather than raw number\n"
5324 	"\t            .usecs      display a common_timestamp in microseconds\n\n"
5325 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5326 	"\t    trigger or to start a hist trigger but not log any events\n"
5327 	"\t    until told to do so.  'continue' can be used to start or\n"
5328 	"\t    restart a paused hist trigger.\n\n"
5329 	"\t    The 'clear' parameter will clear the contents of a running\n"
5330 	"\t    hist trigger and leave its current paused/active state\n"
5331 	"\t    unchanged.\n\n"
5332 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5333 	"\t    have one event conditionally start and stop another event's\n"
5334 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5335 	"\t    the enable_event and disable_event triggers.\n\n"
5336 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5337 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5338 	"\t        <handler>.<action>\n\n"
5339 	"\t    The available handlers are:\n\n"
5340 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5341 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5342 	"\t        onchange(var)            - invoke action if var changes\n\n"
5343 	"\t    The available actions are:\n\n"
5344 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5345 	"\t        save(field,...)                      - save current event fields\n"
5346 #ifdef CONFIG_TRACER_SNAPSHOT
5347 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5348 #endif
5349 #ifdef CONFIG_SYNTH_EVENTS
5350 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5351 	"\t  Write into this file to define/undefine new synthetic events.\n"
5352 	"\t     example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5353 #endif
5354 #endif
5355 ;
5356 
5357 static ssize_t
5358 tracing_readme_read(struct file *filp, char __user *ubuf,
5359 		       size_t cnt, loff_t *ppos)
5360 {
5361 	return simple_read_from_buffer(ubuf, cnt, ppos,
5362 					readme_msg, strlen(readme_msg));
5363 }
5364 
5365 static const struct file_operations tracing_readme_fops = {
5366 	.open		= tracing_open_generic,
5367 	.read		= tracing_readme_read,
5368 	.llseek		= generic_file_llseek,
5369 };
5370 
5371 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5372 {
5373 	int *ptr = v;
5374 
5375 	if (*pos || m->count)
5376 		ptr++;
5377 
5378 	(*pos)++;
5379 
5380 	for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5381 		if (trace_find_tgid(*ptr))
5382 			return ptr;
5383 	}
5384 
5385 	return NULL;
5386 }
5387 
5388 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5389 {
5390 	void *v;
5391 	loff_t l = 0;
5392 
5393 	if (!tgid_map)
5394 		return NULL;
5395 
5396 	v = &tgid_map[0];
5397 	while (l <= *pos) {
5398 		v = saved_tgids_next(m, v, &l);
5399 		if (!v)
5400 			return NULL;
5401 	}
5402 
5403 	return v;
5404 }
5405 
5406 static void saved_tgids_stop(struct seq_file *m, void *v)
5407 {
5408 }
5409 
5410 static int saved_tgids_show(struct seq_file *m, void *v)
5411 {
5412 	int pid = (int *)v - tgid_map;
5413 
5414 	seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5415 	return 0;
5416 }
5417 
5418 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5419 	.start		= saved_tgids_start,
5420 	.stop		= saved_tgids_stop,
5421 	.next		= saved_tgids_next,
5422 	.show		= saved_tgids_show,
5423 };
5424 
5425 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5426 {
5427 	int ret;
5428 
5429 	ret = tracing_check_open_get_tr(NULL);
5430 	if (ret)
5431 		return ret;
5432 
5433 	return seq_open(filp, &tracing_saved_tgids_seq_ops);
5434 }
5435 
5436 
5437 static const struct file_operations tracing_saved_tgids_fops = {
5438 	.open		= tracing_saved_tgids_open,
5439 	.read		= seq_read,
5440 	.llseek		= seq_lseek,
5441 	.release	= seq_release,
5442 };
5443 
5444 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5445 {
5446 	unsigned int *ptr = v;
5447 
5448 	if (*pos || m->count)
5449 		ptr++;
5450 
5451 	(*pos)++;
5452 
5453 	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5454 	     ptr++) {
5455 		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5456 			continue;
5457 
5458 		return ptr;
5459 	}
5460 
5461 	return NULL;
5462 }
5463 
5464 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5465 {
5466 	void *v;
5467 	loff_t l = 0;
5468 
5469 	preempt_disable();
5470 	arch_spin_lock(&trace_cmdline_lock);
5471 
5472 	v = &savedcmd->map_cmdline_to_pid[0];
5473 	while (l <= *pos) {
5474 		v = saved_cmdlines_next(m, v, &l);
5475 		if (!v)
5476 			return NULL;
5477 	}
5478 
5479 	return v;
5480 }
5481 
5482 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5483 {
5484 	arch_spin_unlock(&trace_cmdline_lock);
5485 	preempt_enable();
5486 }
5487 
5488 static int saved_cmdlines_show(struct seq_file *m, void *v)
5489 {
5490 	char buf[TASK_COMM_LEN];
5491 	unsigned int *pid = v;
5492 
5493 	__trace_find_cmdline(*pid, buf);
5494 	seq_printf(m, "%d %s\n", *pid, buf);
5495 	return 0;
5496 }
5497 
5498 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5499 	.start		= saved_cmdlines_start,
5500 	.next		= saved_cmdlines_next,
5501 	.stop		= saved_cmdlines_stop,
5502 	.show		= saved_cmdlines_show,
5503 };
5504 
5505 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5506 {
5507 	int ret;
5508 
5509 	ret = tracing_check_open_get_tr(NULL);
5510 	if (ret)
5511 		return ret;
5512 
5513 	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5514 }
5515 
5516 static const struct file_operations tracing_saved_cmdlines_fops = {
5517 	.open		= tracing_saved_cmdlines_open,
5518 	.read		= seq_read,
5519 	.llseek		= seq_lseek,
5520 	.release	= seq_release,
5521 };
5522 
5523 static ssize_t
5524 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5525 				 size_t cnt, loff_t *ppos)
5526 {
5527 	char buf[64];
5528 	int r;
5529 
5530 	arch_spin_lock(&trace_cmdline_lock);
5531 	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5532 	arch_spin_unlock(&trace_cmdline_lock);
5533 
5534 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5535 }
5536 
5537 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5538 {
5539 	kfree(s->saved_cmdlines);
5540 	kfree(s->map_cmdline_to_pid);
5541 	kfree(s);
5542 }
5543 
5544 static int tracing_resize_saved_cmdlines(unsigned int val)
5545 {
5546 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
5547 
5548 	s = kmalloc(sizeof(*s), GFP_KERNEL);
5549 	if (!s)
5550 		return -ENOMEM;
5551 
5552 	if (allocate_cmdlines_buffer(val, s) < 0) {
5553 		kfree(s);
5554 		return -ENOMEM;
5555 	}
5556 
5557 	arch_spin_lock(&trace_cmdline_lock);
5558 	savedcmd_temp = savedcmd;
5559 	savedcmd = s;
5560 	arch_spin_unlock(&trace_cmdline_lock);
5561 	free_saved_cmdlines_buffer(savedcmd_temp);
5562 
5563 	return 0;
5564 }
5565 
5566 static ssize_t
5567 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5568 				  size_t cnt, loff_t *ppos)
5569 {
5570 	unsigned long val;
5571 	int ret;
5572 
5573 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5574 	if (ret)
5575 		return ret;
5576 
5577 	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
5578 	if (!val || val > PID_MAX_DEFAULT)
5579 		return -EINVAL;
5580 
5581 	ret = tracing_resize_saved_cmdlines((unsigned int)val);
5582 	if (ret < 0)
5583 		return ret;
5584 
5585 	*ppos += cnt;
5586 
5587 	return cnt;
5588 }
5589 
5590 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5591 	.open		= tracing_open_generic,
5592 	.read		= tracing_saved_cmdlines_size_read,
5593 	.write		= tracing_saved_cmdlines_size_write,
5594 };
5595 
5596 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5597 static union trace_eval_map_item *
5598 update_eval_map(union trace_eval_map_item *ptr)
5599 {
5600 	if (!ptr->map.eval_string) {
5601 		if (ptr->tail.next) {
5602 			ptr = ptr->tail.next;
5603 			/* Set ptr to the next real item (skip head) */
5604 			ptr++;
5605 		} else
5606 			return NULL;
5607 	}
5608 	return ptr;
5609 }
5610 
5611 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5612 {
5613 	union trace_eval_map_item *ptr = v;
5614 
5615 	/*
5616 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5617 	 * This really should never happen.
5618 	 */
5619 	(*pos)++;
5620 	ptr = update_eval_map(ptr);
5621 	if (WARN_ON_ONCE(!ptr))
5622 		return NULL;
5623 
5624 	ptr++;
5625 	ptr = update_eval_map(ptr);
5626 
5627 	return ptr;
5628 }
5629 
5630 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5631 {
5632 	union trace_eval_map_item *v;
5633 	loff_t l = 0;
5634 
5635 	mutex_lock(&trace_eval_mutex);
5636 
5637 	v = trace_eval_maps;
5638 	if (v)
5639 		v++;
5640 
5641 	while (v && l < *pos) {
5642 		v = eval_map_next(m, v, &l);
5643 	}
5644 
5645 	return v;
5646 }
5647 
5648 static void eval_map_stop(struct seq_file *m, void *v)
5649 {
5650 	mutex_unlock(&trace_eval_mutex);
5651 }
5652 
5653 static int eval_map_show(struct seq_file *m, void *v)
5654 {
5655 	union trace_eval_map_item *ptr = v;
5656 
5657 	seq_printf(m, "%s %ld (%s)\n",
5658 		   ptr->map.eval_string, ptr->map.eval_value,
5659 		   ptr->map.system);
5660 
5661 	return 0;
5662 }
5663 
5664 static const struct seq_operations tracing_eval_map_seq_ops = {
5665 	.start		= eval_map_start,
5666 	.next		= eval_map_next,
5667 	.stop		= eval_map_stop,
5668 	.show		= eval_map_show,
5669 };
5670 
5671 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5672 {
5673 	int ret;
5674 
5675 	ret = tracing_check_open_get_tr(NULL);
5676 	if (ret)
5677 		return ret;
5678 
5679 	return seq_open(filp, &tracing_eval_map_seq_ops);
5680 }
5681 
5682 static const struct file_operations tracing_eval_map_fops = {
5683 	.open		= tracing_eval_map_open,
5684 	.read		= seq_read,
5685 	.llseek		= seq_lseek,
5686 	.release	= seq_release,
5687 };
5688 
5689 static inline union trace_eval_map_item *
5690 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5691 {
5692 	/* Return tail of array given the head */
5693 	return ptr + ptr->head.length + 1;
5694 }
5695 
5696 static void
5697 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5698 			   int len)
5699 {
5700 	struct trace_eval_map **stop;
5701 	struct trace_eval_map **map;
5702 	union trace_eval_map_item *map_array;
5703 	union trace_eval_map_item *ptr;
5704 
5705 	stop = start + len;
5706 
5707 	/*
5708 	 * The trace_eval_maps contains the map plus a head and tail item,
5709 	 * where the head holds the module and length of array, and the
5710 	 * tail holds a pointer to the next list.
5711 	 */
5712 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5713 	if (!map_array) {
5714 		pr_warn("Unable to allocate trace eval mapping\n");
5715 		return;
5716 	}
5717 
5718 	mutex_lock(&trace_eval_mutex);
5719 
5720 	if (!trace_eval_maps)
5721 		trace_eval_maps = map_array;
5722 	else {
5723 		ptr = trace_eval_maps;
5724 		for (;;) {
5725 			ptr = trace_eval_jmp_to_tail(ptr);
5726 			if (!ptr->tail.next)
5727 				break;
5728 			ptr = ptr->tail.next;
5729 
5730 		}
5731 		ptr->tail.next = map_array;
5732 	}
5733 	map_array->head.mod = mod;
5734 	map_array->head.length = len;
5735 	map_array++;
5736 
5737 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5738 		map_array->map = **map;
5739 		map_array++;
5740 	}
5741 	memset(map_array, 0, sizeof(*map_array));
5742 
5743 	mutex_unlock(&trace_eval_mutex);
5744 }
5745 
5746 static void trace_create_eval_file(struct dentry *d_tracer)
5747 {
5748 	trace_create_file("eval_map", 0444, d_tracer,
5749 			  NULL, &tracing_eval_map_fops);
5750 }
5751 
5752 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5753 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5754 static inline void trace_insert_eval_map_file(struct module *mod,
5755 			      struct trace_eval_map **start, int len) { }
5756 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5757 
5758 static void trace_insert_eval_map(struct module *mod,
5759 				  struct trace_eval_map **start, int len)
5760 {
5761 	struct trace_eval_map **map;
5762 
5763 	if (len <= 0)
5764 		return;
5765 
5766 	map = start;
5767 
5768 	trace_event_eval_update(map, len);
5769 
5770 	trace_insert_eval_map_file(mod, start, len);
5771 }
5772 
5773 static ssize_t
5774 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5775 		       size_t cnt, loff_t *ppos)
5776 {
5777 	struct trace_array *tr = filp->private_data;
5778 	char buf[MAX_TRACER_SIZE+2];
5779 	int r;
5780 
5781 	mutex_lock(&trace_types_lock);
5782 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5783 	mutex_unlock(&trace_types_lock);
5784 
5785 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5786 }
5787 
5788 int tracer_init(struct tracer *t, struct trace_array *tr)
5789 {
5790 	tracing_reset_online_cpus(&tr->array_buffer);
5791 	return t->init(tr);
5792 }
5793 
5794 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5795 {
5796 	int cpu;
5797 
5798 	for_each_tracing_cpu(cpu)
5799 		per_cpu_ptr(buf->data, cpu)->entries = val;
5800 }
5801 
5802 #ifdef CONFIG_TRACER_MAX_TRACE
5803 /* resize @tr's buffer to the size of @size_tr's entries */
5804 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5805 					struct array_buffer *size_buf, int cpu_id)
5806 {
5807 	int cpu, ret = 0;
5808 
5809 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5810 		for_each_tracing_cpu(cpu) {
5811 			ret = ring_buffer_resize(trace_buf->buffer,
5812 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5813 			if (ret < 0)
5814 				break;
5815 			per_cpu_ptr(trace_buf->data, cpu)->entries =
5816 				per_cpu_ptr(size_buf->data, cpu)->entries;
5817 		}
5818 	} else {
5819 		ret = ring_buffer_resize(trace_buf->buffer,
5820 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5821 		if (ret == 0)
5822 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5823 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5824 	}
5825 
5826 	return ret;
5827 }
5828 #endif /* CONFIG_TRACER_MAX_TRACE */
5829 
5830 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5831 					unsigned long size, int cpu)
5832 {
5833 	int ret;
5834 
5835 	/*
5836 	 * If kernel or user changes the size of the ring buffer
5837 	 * we use the size that was given, and we can forget about
5838 	 * expanding it later.
5839 	 */
5840 	ring_buffer_expanded = true;
5841 
5842 	/* May be called before buffers are initialized */
5843 	if (!tr->array_buffer.buffer)
5844 		return 0;
5845 
5846 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5847 	if (ret < 0)
5848 		return ret;
5849 
5850 #ifdef CONFIG_TRACER_MAX_TRACE
5851 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5852 	    !tr->current_trace->use_max_tr)
5853 		goto out;
5854 
5855 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5856 	if (ret < 0) {
5857 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
5858 						     &tr->array_buffer, cpu);
5859 		if (r < 0) {
5860 			/*
5861 			 * AARGH! We are left with different
5862 			 * size max buffer!!!!
5863 			 * The max buffer is our "snapshot" buffer.
5864 			 * When a tracer needs a snapshot (one of the
5865 			 * latency tracers), it swaps the max buffer
5866 			 * with the saved snap shot. We succeeded to
5867 			 * update the size of the main buffer, but failed to
5868 			 * update the size of the max buffer. But when we tried
5869 			 * to reset the main buffer to the original size, we
5870 			 * failed there too. This is very unlikely to
5871 			 * happen, but if it does, warn and kill all
5872 			 * tracing.
5873 			 */
5874 			WARN_ON(1);
5875 			tracing_disabled = 1;
5876 		}
5877 		return ret;
5878 	}
5879 
5880 	if (cpu == RING_BUFFER_ALL_CPUS)
5881 		set_buffer_entries(&tr->max_buffer, size);
5882 	else
5883 		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5884 
5885  out:
5886 #endif /* CONFIG_TRACER_MAX_TRACE */
5887 
5888 	if (cpu == RING_BUFFER_ALL_CPUS)
5889 		set_buffer_entries(&tr->array_buffer, size);
5890 	else
5891 		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5892 
5893 	return ret;
5894 }
5895 
5896 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5897 				  unsigned long size, int cpu_id)
5898 {
5899 	int ret = size;
5900 
5901 	mutex_lock(&trace_types_lock);
5902 
5903 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5904 		/* make sure, this cpu is enabled in the mask */
5905 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5906 			ret = -EINVAL;
5907 			goto out;
5908 		}
5909 	}
5910 
5911 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5912 	if (ret < 0)
5913 		ret = -ENOMEM;
5914 
5915 out:
5916 	mutex_unlock(&trace_types_lock);
5917 
5918 	return ret;
5919 }
5920 
5921 
5922 /**
5923  * tracing_update_buffers - used by tracing facility to expand ring buffers
5924  *
5925  * To save on memory when the tracing is never used on a system with it
5926  * configured in. The ring buffers are set to a minimum size. But once
5927  * a user starts to use the tracing facility, then they need to grow
5928  * to their default size.
5929  *
5930  * This function is to be called when a tracer is about to be used.
5931  */
5932 int tracing_update_buffers(void)
5933 {
5934 	int ret = 0;
5935 
5936 	mutex_lock(&trace_types_lock);
5937 	if (!ring_buffer_expanded)
5938 		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5939 						RING_BUFFER_ALL_CPUS);
5940 	mutex_unlock(&trace_types_lock);
5941 
5942 	return ret;
5943 }
5944 
5945 struct trace_option_dentry;
5946 
5947 static void
5948 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5949 
5950 /*
5951  * Used to clear out the tracer before deletion of an instance.
5952  * Must have trace_types_lock held.
5953  */
5954 static void tracing_set_nop(struct trace_array *tr)
5955 {
5956 	if (tr->current_trace == &nop_trace)
5957 		return;
5958 
5959 	tr->current_trace->enabled--;
5960 
5961 	if (tr->current_trace->reset)
5962 		tr->current_trace->reset(tr);
5963 
5964 	tr->current_trace = &nop_trace;
5965 }
5966 
5967 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5968 {
5969 	/* Only enable if the directory has been created already. */
5970 	if (!tr->dir)
5971 		return;
5972 
5973 	create_trace_option_files(tr, t);
5974 }
5975 
5976 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5977 {
5978 	struct tracer *t;
5979 #ifdef CONFIG_TRACER_MAX_TRACE
5980 	bool had_max_tr;
5981 #endif
5982 	int ret = 0;
5983 
5984 	mutex_lock(&trace_types_lock);
5985 
5986 	if (!ring_buffer_expanded) {
5987 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5988 						RING_BUFFER_ALL_CPUS);
5989 		if (ret < 0)
5990 			goto out;
5991 		ret = 0;
5992 	}
5993 
5994 	for (t = trace_types; t; t = t->next) {
5995 		if (strcmp(t->name, buf) == 0)
5996 			break;
5997 	}
5998 	if (!t) {
5999 		ret = -EINVAL;
6000 		goto out;
6001 	}
6002 	if (t == tr->current_trace)
6003 		goto out;
6004 
6005 #ifdef CONFIG_TRACER_SNAPSHOT
6006 	if (t->use_max_tr) {
6007 		arch_spin_lock(&tr->max_lock);
6008 		if (tr->cond_snapshot)
6009 			ret = -EBUSY;
6010 		arch_spin_unlock(&tr->max_lock);
6011 		if (ret)
6012 			goto out;
6013 	}
6014 #endif
6015 	/* Some tracers won't work on kernel command line */
6016 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6017 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6018 			t->name);
6019 		goto out;
6020 	}
6021 
6022 	/* Some tracers are only allowed for the top level buffer */
6023 	if (!trace_ok_for_array(t, tr)) {
6024 		ret = -EINVAL;
6025 		goto out;
6026 	}
6027 
6028 	/* If trace pipe files are being read, we can't change the tracer */
6029 	if (tr->trace_ref) {
6030 		ret = -EBUSY;
6031 		goto out;
6032 	}
6033 
6034 	trace_branch_disable();
6035 
6036 	tr->current_trace->enabled--;
6037 
6038 	if (tr->current_trace->reset)
6039 		tr->current_trace->reset(tr);
6040 
6041 	/* Current trace needs to be nop_trace before synchronize_rcu */
6042 	tr->current_trace = &nop_trace;
6043 
6044 #ifdef CONFIG_TRACER_MAX_TRACE
6045 	had_max_tr = tr->allocated_snapshot;
6046 
6047 	if (had_max_tr && !t->use_max_tr) {
6048 		/*
6049 		 * We need to make sure that the update_max_tr sees that
6050 		 * current_trace changed to nop_trace to keep it from
6051 		 * swapping the buffers after we resize it.
6052 		 * The update_max_tr is called from interrupts disabled
6053 		 * so a synchronized_sched() is sufficient.
6054 		 */
6055 		synchronize_rcu();
6056 		free_snapshot(tr);
6057 	}
6058 #endif
6059 
6060 #ifdef CONFIG_TRACER_MAX_TRACE
6061 	if (t->use_max_tr && !had_max_tr) {
6062 		ret = tracing_alloc_snapshot_instance(tr);
6063 		if (ret < 0)
6064 			goto out;
6065 	}
6066 #endif
6067 
6068 	if (t->init) {
6069 		ret = tracer_init(t, tr);
6070 		if (ret)
6071 			goto out;
6072 	}
6073 
6074 	tr->current_trace = t;
6075 	tr->current_trace->enabled++;
6076 	trace_branch_enable(tr);
6077  out:
6078 	mutex_unlock(&trace_types_lock);
6079 
6080 	return ret;
6081 }
6082 
6083 static ssize_t
6084 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6085 			size_t cnt, loff_t *ppos)
6086 {
6087 	struct trace_array *tr = filp->private_data;
6088 	char buf[MAX_TRACER_SIZE+1];
6089 	int i;
6090 	size_t ret;
6091 	int err;
6092 
6093 	ret = cnt;
6094 
6095 	if (cnt > MAX_TRACER_SIZE)
6096 		cnt = MAX_TRACER_SIZE;
6097 
6098 	if (copy_from_user(buf, ubuf, cnt))
6099 		return -EFAULT;
6100 
6101 	buf[cnt] = 0;
6102 
6103 	/* strip ending whitespace. */
6104 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6105 		buf[i] = 0;
6106 
6107 	err = tracing_set_tracer(tr, buf);
6108 	if (err)
6109 		return err;
6110 
6111 	*ppos += ret;
6112 
6113 	return ret;
6114 }
6115 
6116 static ssize_t
6117 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6118 		   size_t cnt, loff_t *ppos)
6119 {
6120 	char buf[64];
6121 	int r;
6122 
6123 	r = snprintf(buf, sizeof(buf), "%ld\n",
6124 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6125 	if (r > sizeof(buf))
6126 		r = sizeof(buf);
6127 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6128 }
6129 
6130 static ssize_t
6131 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6132 		    size_t cnt, loff_t *ppos)
6133 {
6134 	unsigned long val;
6135 	int ret;
6136 
6137 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6138 	if (ret)
6139 		return ret;
6140 
6141 	*ptr = val * 1000;
6142 
6143 	return cnt;
6144 }
6145 
6146 static ssize_t
6147 tracing_thresh_read(struct file *filp, char __user *ubuf,
6148 		    size_t cnt, loff_t *ppos)
6149 {
6150 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6151 }
6152 
6153 static ssize_t
6154 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6155 		     size_t cnt, loff_t *ppos)
6156 {
6157 	struct trace_array *tr = filp->private_data;
6158 	int ret;
6159 
6160 	mutex_lock(&trace_types_lock);
6161 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6162 	if (ret < 0)
6163 		goto out;
6164 
6165 	if (tr->current_trace->update_thresh) {
6166 		ret = tr->current_trace->update_thresh(tr);
6167 		if (ret < 0)
6168 			goto out;
6169 	}
6170 
6171 	ret = cnt;
6172 out:
6173 	mutex_unlock(&trace_types_lock);
6174 
6175 	return ret;
6176 }
6177 
6178 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6179 
6180 static ssize_t
6181 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6182 		     size_t cnt, loff_t *ppos)
6183 {
6184 	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6185 }
6186 
6187 static ssize_t
6188 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6189 		      size_t cnt, loff_t *ppos)
6190 {
6191 	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6192 }
6193 
6194 #endif
6195 
6196 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6197 {
6198 	struct trace_array *tr = inode->i_private;
6199 	struct trace_iterator *iter;
6200 	int ret;
6201 
6202 	ret = tracing_check_open_get_tr(tr);
6203 	if (ret)
6204 		return ret;
6205 
6206 	mutex_lock(&trace_types_lock);
6207 
6208 	/* create a buffer to store the information to pass to userspace */
6209 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6210 	if (!iter) {
6211 		ret = -ENOMEM;
6212 		__trace_array_put(tr);
6213 		goto out;
6214 	}
6215 
6216 	trace_seq_init(&iter->seq);
6217 	iter->trace = tr->current_trace;
6218 
6219 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6220 		ret = -ENOMEM;
6221 		goto fail;
6222 	}
6223 
6224 	/* trace pipe does not show start of buffer */
6225 	cpumask_setall(iter->started);
6226 
6227 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6228 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6229 
6230 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6231 	if (trace_clocks[tr->clock_id].in_ns)
6232 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6233 
6234 	iter->tr = tr;
6235 	iter->array_buffer = &tr->array_buffer;
6236 	iter->cpu_file = tracing_get_cpu(inode);
6237 	mutex_init(&iter->mutex);
6238 	filp->private_data = iter;
6239 
6240 	if (iter->trace->pipe_open)
6241 		iter->trace->pipe_open(iter);
6242 
6243 	nonseekable_open(inode, filp);
6244 
6245 	tr->trace_ref++;
6246 out:
6247 	mutex_unlock(&trace_types_lock);
6248 	return ret;
6249 
6250 fail:
6251 	kfree(iter);
6252 	__trace_array_put(tr);
6253 	mutex_unlock(&trace_types_lock);
6254 	return ret;
6255 }
6256 
6257 static int tracing_release_pipe(struct inode *inode, struct file *file)
6258 {
6259 	struct trace_iterator *iter = file->private_data;
6260 	struct trace_array *tr = inode->i_private;
6261 
6262 	mutex_lock(&trace_types_lock);
6263 
6264 	tr->trace_ref--;
6265 
6266 	if (iter->trace->pipe_close)
6267 		iter->trace->pipe_close(iter);
6268 
6269 	mutex_unlock(&trace_types_lock);
6270 
6271 	free_cpumask_var(iter->started);
6272 	mutex_destroy(&iter->mutex);
6273 	kfree(iter);
6274 
6275 	trace_array_put(tr);
6276 
6277 	return 0;
6278 }
6279 
6280 static __poll_t
6281 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6282 {
6283 	struct trace_array *tr = iter->tr;
6284 
6285 	/* Iterators are static, they should be filled or empty */
6286 	if (trace_buffer_iter(iter, iter->cpu_file))
6287 		return EPOLLIN | EPOLLRDNORM;
6288 
6289 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6290 		/*
6291 		 * Always select as readable when in blocking mode
6292 		 */
6293 		return EPOLLIN | EPOLLRDNORM;
6294 	else
6295 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6296 					     filp, poll_table);
6297 }
6298 
6299 static __poll_t
6300 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6301 {
6302 	struct trace_iterator *iter = filp->private_data;
6303 
6304 	return trace_poll(iter, filp, poll_table);
6305 }
6306 
6307 /* Must be called with iter->mutex held. */
6308 static int tracing_wait_pipe(struct file *filp)
6309 {
6310 	struct trace_iterator *iter = filp->private_data;
6311 	int ret;
6312 
6313 	while (trace_empty(iter)) {
6314 
6315 		if ((filp->f_flags & O_NONBLOCK)) {
6316 			return -EAGAIN;
6317 		}
6318 
6319 		/*
6320 		 * We block until we read something and tracing is disabled.
6321 		 * We still block if tracing is disabled, but we have never
6322 		 * read anything. This allows a user to cat this file, and
6323 		 * then enable tracing. But after we have read something,
6324 		 * we give an EOF when tracing is again disabled.
6325 		 *
6326 		 * iter->pos will be 0 if we haven't read anything.
6327 		 */
6328 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6329 			break;
6330 
6331 		mutex_unlock(&iter->mutex);
6332 
6333 		ret = wait_on_pipe(iter, 0);
6334 
6335 		mutex_lock(&iter->mutex);
6336 
6337 		if (ret)
6338 			return ret;
6339 	}
6340 
6341 	return 1;
6342 }
6343 
6344 /*
6345  * Consumer reader.
6346  */
6347 static ssize_t
6348 tracing_read_pipe(struct file *filp, char __user *ubuf,
6349 		  size_t cnt, loff_t *ppos)
6350 {
6351 	struct trace_iterator *iter = filp->private_data;
6352 	ssize_t sret;
6353 
6354 	/*
6355 	 * Avoid more than one consumer on a single file descriptor
6356 	 * This is just a matter of traces coherency, the ring buffer itself
6357 	 * is protected.
6358 	 */
6359 	mutex_lock(&iter->mutex);
6360 
6361 	/* return any leftover data */
6362 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6363 	if (sret != -EBUSY)
6364 		goto out;
6365 
6366 	trace_seq_init(&iter->seq);
6367 
6368 	if (iter->trace->read) {
6369 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6370 		if (sret)
6371 			goto out;
6372 	}
6373 
6374 waitagain:
6375 	sret = tracing_wait_pipe(filp);
6376 	if (sret <= 0)
6377 		goto out;
6378 
6379 	/* stop when tracing is finished */
6380 	if (trace_empty(iter)) {
6381 		sret = 0;
6382 		goto out;
6383 	}
6384 
6385 	if (cnt >= PAGE_SIZE)
6386 		cnt = PAGE_SIZE - 1;
6387 
6388 	/* reset all but tr, trace, and overruns */
6389 	memset(&iter->seq, 0,
6390 	       sizeof(struct trace_iterator) -
6391 	       offsetof(struct trace_iterator, seq));
6392 	cpumask_clear(iter->started);
6393 	trace_seq_init(&iter->seq);
6394 	iter->pos = -1;
6395 
6396 	trace_event_read_lock();
6397 	trace_access_lock(iter->cpu_file);
6398 	while (trace_find_next_entry_inc(iter) != NULL) {
6399 		enum print_line_t ret;
6400 		int save_len = iter->seq.seq.len;
6401 
6402 		ret = print_trace_line(iter);
6403 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6404 			/* don't print partial lines */
6405 			iter->seq.seq.len = save_len;
6406 			break;
6407 		}
6408 		if (ret != TRACE_TYPE_NO_CONSUME)
6409 			trace_consume(iter);
6410 
6411 		if (trace_seq_used(&iter->seq) >= cnt)
6412 			break;
6413 
6414 		/*
6415 		 * Setting the full flag means we reached the trace_seq buffer
6416 		 * size and we should leave by partial output condition above.
6417 		 * One of the trace_seq_* functions is not used properly.
6418 		 */
6419 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6420 			  iter->ent->type);
6421 	}
6422 	trace_access_unlock(iter->cpu_file);
6423 	trace_event_read_unlock();
6424 
6425 	/* Now copy what we have to the user */
6426 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6427 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6428 		trace_seq_init(&iter->seq);
6429 
6430 	/*
6431 	 * If there was nothing to send to user, in spite of consuming trace
6432 	 * entries, go back to wait for more entries.
6433 	 */
6434 	if (sret == -EBUSY)
6435 		goto waitagain;
6436 
6437 out:
6438 	mutex_unlock(&iter->mutex);
6439 
6440 	return sret;
6441 }
6442 
6443 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6444 				     unsigned int idx)
6445 {
6446 	__free_page(spd->pages[idx]);
6447 }
6448 
6449 static size_t
6450 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6451 {
6452 	size_t count;
6453 	int save_len;
6454 	int ret;
6455 
6456 	/* Seq buffer is page-sized, exactly what we need. */
6457 	for (;;) {
6458 		save_len = iter->seq.seq.len;
6459 		ret = print_trace_line(iter);
6460 
6461 		if (trace_seq_has_overflowed(&iter->seq)) {
6462 			iter->seq.seq.len = save_len;
6463 			break;
6464 		}
6465 
6466 		/*
6467 		 * This should not be hit, because it should only
6468 		 * be set if the iter->seq overflowed. But check it
6469 		 * anyway to be safe.
6470 		 */
6471 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6472 			iter->seq.seq.len = save_len;
6473 			break;
6474 		}
6475 
6476 		count = trace_seq_used(&iter->seq) - save_len;
6477 		if (rem < count) {
6478 			rem = 0;
6479 			iter->seq.seq.len = save_len;
6480 			break;
6481 		}
6482 
6483 		if (ret != TRACE_TYPE_NO_CONSUME)
6484 			trace_consume(iter);
6485 		rem -= count;
6486 		if (!trace_find_next_entry_inc(iter))	{
6487 			rem = 0;
6488 			iter->ent = NULL;
6489 			break;
6490 		}
6491 	}
6492 
6493 	return rem;
6494 }
6495 
6496 static ssize_t tracing_splice_read_pipe(struct file *filp,
6497 					loff_t *ppos,
6498 					struct pipe_inode_info *pipe,
6499 					size_t len,
6500 					unsigned int flags)
6501 {
6502 	struct page *pages_def[PIPE_DEF_BUFFERS];
6503 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6504 	struct trace_iterator *iter = filp->private_data;
6505 	struct splice_pipe_desc spd = {
6506 		.pages		= pages_def,
6507 		.partial	= partial_def,
6508 		.nr_pages	= 0, /* This gets updated below. */
6509 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6510 		.ops		= &default_pipe_buf_ops,
6511 		.spd_release	= tracing_spd_release_pipe,
6512 	};
6513 	ssize_t ret;
6514 	size_t rem;
6515 	unsigned int i;
6516 
6517 	if (splice_grow_spd(pipe, &spd))
6518 		return -ENOMEM;
6519 
6520 	mutex_lock(&iter->mutex);
6521 
6522 	if (iter->trace->splice_read) {
6523 		ret = iter->trace->splice_read(iter, filp,
6524 					       ppos, pipe, len, flags);
6525 		if (ret)
6526 			goto out_err;
6527 	}
6528 
6529 	ret = tracing_wait_pipe(filp);
6530 	if (ret <= 0)
6531 		goto out_err;
6532 
6533 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6534 		ret = -EFAULT;
6535 		goto out_err;
6536 	}
6537 
6538 	trace_event_read_lock();
6539 	trace_access_lock(iter->cpu_file);
6540 
6541 	/* Fill as many pages as possible. */
6542 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6543 		spd.pages[i] = alloc_page(GFP_KERNEL);
6544 		if (!spd.pages[i])
6545 			break;
6546 
6547 		rem = tracing_fill_pipe_page(rem, iter);
6548 
6549 		/* Copy the data into the page, so we can start over. */
6550 		ret = trace_seq_to_buffer(&iter->seq,
6551 					  page_address(spd.pages[i]),
6552 					  trace_seq_used(&iter->seq));
6553 		if (ret < 0) {
6554 			__free_page(spd.pages[i]);
6555 			break;
6556 		}
6557 		spd.partial[i].offset = 0;
6558 		spd.partial[i].len = trace_seq_used(&iter->seq);
6559 
6560 		trace_seq_init(&iter->seq);
6561 	}
6562 
6563 	trace_access_unlock(iter->cpu_file);
6564 	trace_event_read_unlock();
6565 	mutex_unlock(&iter->mutex);
6566 
6567 	spd.nr_pages = i;
6568 
6569 	if (i)
6570 		ret = splice_to_pipe(pipe, &spd);
6571 	else
6572 		ret = 0;
6573 out:
6574 	splice_shrink_spd(&spd);
6575 	return ret;
6576 
6577 out_err:
6578 	mutex_unlock(&iter->mutex);
6579 	goto out;
6580 }
6581 
6582 static ssize_t
6583 tracing_entries_read(struct file *filp, char __user *ubuf,
6584 		     size_t cnt, loff_t *ppos)
6585 {
6586 	struct inode *inode = file_inode(filp);
6587 	struct trace_array *tr = inode->i_private;
6588 	int cpu = tracing_get_cpu(inode);
6589 	char buf[64];
6590 	int r = 0;
6591 	ssize_t ret;
6592 
6593 	mutex_lock(&trace_types_lock);
6594 
6595 	if (cpu == RING_BUFFER_ALL_CPUS) {
6596 		int cpu, buf_size_same;
6597 		unsigned long size;
6598 
6599 		size = 0;
6600 		buf_size_same = 1;
6601 		/* check if all cpu sizes are same */
6602 		for_each_tracing_cpu(cpu) {
6603 			/* fill in the size from first enabled cpu */
6604 			if (size == 0)
6605 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6606 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6607 				buf_size_same = 0;
6608 				break;
6609 			}
6610 		}
6611 
6612 		if (buf_size_same) {
6613 			if (!ring_buffer_expanded)
6614 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6615 					    size >> 10,
6616 					    trace_buf_size >> 10);
6617 			else
6618 				r = sprintf(buf, "%lu\n", size >> 10);
6619 		} else
6620 			r = sprintf(buf, "X\n");
6621 	} else
6622 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6623 
6624 	mutex_unlock(&trace_types_lock);
6625 
6626 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6627 	return ret;
6628 }
6629 
6630 static ssize_t
6631 tracing_entries_write(struct file *filp, const char __user *ubuf,
6632 		      size_t cnt, loff_t *ppos)
6633 {
6634 	struct inode *inode = file_inode(filp);
6635 	struct trace_array *tr = inode->i_private;
6636 	unsigned long val;
6637 	int ret;
6638 
6639 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6640 	if (ret)
6641 		return ret;
6642 
6643 	/* must have at least 1 entry */
6644 	if (!val)
6645 		return -EINVAL;
6646 
6647 	/* value is in KB */
6648 	val <<= 10;
6649 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6650 	if (ret < 0)
6651 		return ret;
6652 
6653 	*ppos += cnt;
6654 
6655 	return cnt;
6656 }
6657 
6658 static ssize_t
6659 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6660 				size_t cnt, loff_t *ppos)
6661 {
6662 	struct trace_array *tr = filp->private_data;
6663 	char buf[64];
6664 	int r, cpu;
6665 	unsigned long size = 0, expanded_size = 0;
6666 
6667 	mutex_lock(&trace_types_lock);
6668 	for_each_tracing_cpu(cpu) {
6669 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6670 		if (!ring_buffer_expanded)
6671 			expanded_size += trace_buf_size >> 10;
6672 	}
6673 	if (ring_buffer_expanded)
6674 		r = sprintf(buf, "%lu\n", size);
6675 	else
6676 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6677 	mutex_unlock(&trace_types_lock);
6678 
6679 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6680 }
6681 
6682 static ssize_t
6683 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6684 			  size_t cnt, loff_t *ppos)
6685 {
6686 	/*
6687 	 * There is no need to read what the user has written, this function
6688 	 * is just to make sure that there is no error when "echo" is used
6689 	 */
6690 
6691 	*ppos += cnt;
6692 
6693 	return cnt;
6694 }
6695 
6696 static int
6697 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6698 {
6699 	struct trace_array *tr = inode->i_private;
6700 
6701 	/* disable tracing ? */
6702 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6703 		tracer_tracing_off(tr);
6704 	/* resize the ring buffer to 0 */
6705 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6706 
6707 	trace_array_put(tr);
6708 
6709 	return 0;
6710 }
6711 
6712 static ssize_t
6713 tracing_mark_write(struct file *filp, const char __user *ubuf,
6714 					size_t cnt, loff_t *fpos)
6715 {
6716 	struct trace_array *tr = filp->private_data;
6717 	struct ring_buffer_event *event;
6718 	enum event_trigger_type tt = ETT_NONE;
6719 	struct trace_buffer *buffer;
6720 	struct print_entry *entry;
6721 	ssize_t written;
6722 	int size;
6723 	int len;
6724 
6725 /* Used in tracing_mark_raw_write() as well */
6726 #define FAULTED_STR "<faulted>"
6727 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6728 
6729 	if (tracing_disabled)
6730 		return -EINVAL;
6731 
6732 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6733 		return -EINVAL;
6734 
6735 	if (cnt > TRACE_BUF_SIZE)
6736 		cnt = TRACE_BUF_SIZE;
6737 
6738 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6739 
6740 	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6741 
6742 	/* If less than "<faulted>", then make sure we can still add that */
6743 	if (cnt < FAULTED_SIZE)
6744 		size += FAULTED_SIZE - cnt;
6745 
6746 	buffer = tr->array_buffer.buffer;
6747 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6748 					    tracing_gen_ctx());
6749 	if (unlikely(!event))
6750 		/* Ring buffer disabled, return as if not open for write */
6751 		return -EBADF;
6752 
6753 	entry = ring_buffer_event_data(event);
6754 	entry->ip = _THIS_IP_;
6755 
6756 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6757 	if (len) {
6758 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6759 		cnt = FAULTED_SIZE;
6760 		written = -EFAULT;
6761 	} else
6762 		written = cnt;
6763 
6764 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6765 		/* do not add \n before testing triggers, but add \0 */
6766 		entry->buf[cnt] = '\0';
6767 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6768 	}
6769 
6770 	if (entry->buf[cnt - 1] != '\n') {
6771 		entry->buf[cnt] = '\n';
6772 		entry->buf[cnt + 1] = '\0';
6773 	} else
6774 		entry->buf[cnt] = '\0';
6775 
6776 	if (static_branch_unlikely(&trace_marker_exports_enabled))
6777 		ftrace_exports(event, TRACE_EXPORT_MARKER);
6778 	__buffer_unlock_commit(buffer, event);
6779 
6780 	if (tt)
6781 		event_triggers_post_call(tr->trace_marker_file, tt);
6782 
6783 	if (written > 0)
6784 		*fpos += written;
6785 
6786 	return written;
6787 }
6788 
6789 /* Limit it for now to 3K (including tag) */
6790 #define RAW_DATA_MAX_SIZE (1024*3)
6791 
6792 static ssize_t
6793 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6794 					size_t cnt, loff_t *fpos)
6795 {
6796 	struct trace_array *tr = filp->private_data;
6797 	struct ring_buffer_event *event;
6798 	struct trace_buffer *buffer;
6799 	struct raw_data_entry *entry;
6800 	ssize_t written;
6801 	int size;
6802 	int len;
6803 
6804 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6805 
6806 	if (tracing_disabled)
6807 		return -EINVAL;
6808 
6809 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6810 		return -EINVAL;
6811 
6812 	/* The marker must at least have a tag id */
6813 	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6814 		return -EINVAL;
6815 
6816 	if (cnt > TRACE_BUF_SIZE)
6817 		cnt = TRACE_BUF_SIZE;
6818 
6819 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6820 
6821 	size = sizeof(*entry) + cnt;
6822 	if (cnt < FAULT_SIZE_ID)
6823 		size += FAULT_SIZE_ID - cnt;
6824 
6825 	buffer = tr->array_buffer.buffer;
6826 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6827 					    tracing_gen_ctx());
6828 	if (!event)
6829 		/* Ring buffer disabled, return as if not open for write */
6830 		return -EBADF;
6831 
6832 	entry = ring_buffer_event_data(event);
6833 
6834 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6835 	if (len) {
6836 		entry->id = -1;
6837 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6838 		written = -EFAULT;
6839 	} else
6840 		written = cnt;
6841 
6842 	__buffer_unlock_commit(buffer, event);
6843 
6844 	if (written > 0)
6845 		*fpos += written;
6846 
6847 	return written;
6848 }
6849 
6850 static int tracing_clock_show(struct seq_file *m, void *v)
6851 {
6852 	struct trace_array *tr = m->private;
6853 	int i;
6854 
6855 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6856 		seq_printf(m,
6857 			"%s%s%s%s", i ? " " : "",
6858 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6859 			i == tr->clock_id ? "]" : "");
6860 	seq_putc(m, '\n');
6861 
6862 	return 0;
6863 }
6864 
6865 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6866 {
6867 	int i;
6868 
6869 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6870 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
6871 			break;
6872 	}
6873 	if (i == ARRAY_SIZE(trace_clocks))
6874 		return -EINVAL;
6875 
6876 	mutex_lock(&trace_types_lock);
6877 
6878 	tr->clock_id = i;
6879 
6880 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6881 
6882 	/*
6883 	 * New clock may not be consistent with the previous clock.
6884 	 * Reset the buffer so that it doesn't have incomparable timestamps.
6885 	 */
6886 	tracing_reset_online_cpus(&tr->array_buffer);
6887 
6888 #ifdef CONFIG_TRACER_MAX_TRACE
6889 	if (tr->max_buffer.buffer)
6890 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6891 	tracing_reset_online_cpus(&tr->max_buffer);
6892 #endif
6893 
6894 	mutex_unlock(&trace_types_lock);
6895 
6896 	return 0;
6897 }
6898 
6899 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6900 				   size_t cnt, loff_t *fpos)
6901 {
6902 	struct seq_file *m = filp->private_data;
6903 	struct trace_array *tr = m->private;
6904 	char buf[64];
6905 	const char *clockstr;
6906 	int ret;
6907 
6908 	if (cnt >= sizeof(buf))
6909 		return -EINVAL;
6910 
6911 	if (copy_from_user(buf, ubuf, cnt))
6912 		return -EFAULT;
6913 
6914 	buf[cnt] = 0;
6915 
6916 	clockstr = strstrip(buf);
6917 
6918 	ret = tracing_set_clock(tr, clockstr);
6919 	if (ret)
6920 		return ret;
6921 
6922 	*fpos += cnt;
6923 
6924 	return cnt;
6925 }
6926 
6927 static int tracing_clock_open(struct inode *inode, struct file *file)
6928 {
6929 	struct trace_array *tr = inode->i_private;
6930 	int ret;
6931 
6932 	ret = tracing_check_open_get_tr(tr);
6933 	if (ret)
6934 		return ret;
6935 
6936 	ret = single_open(file, tracing_clock_show, inode->i_private);
6937 	if (ret < 0)
6938 		trace_array_put(tr);
6939 
6940 	return ret;
6941 }
6942 
6943 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6944 {
6945 	struct trace_array *tr = m->private;
6946 
6947 	mutex_lock(&trace_types_lock);
6948 
6949 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6950 		seq_puts(m, "delta [absolute]\n");
6951 	else
6952 		seq_puts(m, "[delta] absolute\n");
6953 
6954 	mutex_unlock(&trace_types_lock);
6955 
6956 	return 0;
6957 }
6958 
6959 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6960 {
6961 	struct trace_array *tr = inode->i_private;
6962 	int ret;
6963 
6964 	ret = tracing_check_open_get_tr(tr);
6965 	if (ret)
6966 		return ret;
6967 
6968 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6969 	if (ret < 0)
6970 		trace_array_put(tr);
6971 
6972 	return ret;
6973 }
6974 
6975 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
6976 {
6977 	if (rbe == this_cpu_read(trace_buffered_event))
6978 		return ring_buffer_time_stamp(buffer, smp_processor_id());
6979 
6980 	return ring_buffer_event_time_stamp(buffer, rbe);
6981 }
6982 
6983 /*
6984  * Set or disable using the per CPU trace_buffer_event when possible.
6985  */
6986 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
6987 {
6988 	int ret = 0;
6989 
6990 	mutex_lock(&trace_types_lock);
6991 
6992 	if (set && tr->no_filter_buffering_ref++)
6993 		goto out;
6994 
6995 	if (!set) {
6996 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
6997 			ret = -EINVAL;
6998 			goto out;
6999 		}
7000 
7001 		--tr->no_filter_buffering_ref;
7002 	}
7003  out:
7004 	mutex_unlock(&trace_types_lock);
7005 
7006 	return ret;
7007 }
7008 
7009 struct ftrace_buffer_info {
7010 	struct trace_iterator	iter;
7011 	void			*spare;
7012 	unsigned int		spare_cpu;
7013 	unsigned int		read;
7014 };
7015 
7016 #ifdef CONFIG_TRACER_SNAPSHOT
7017 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7018 {
7019 	struct trace_array *tr = inode->i_private;
7020 	struct trace_iterator *iter;
7021 	struct seq_file *m;
7022 	int ret;
7023 
7024 	ret = tracing_check_open_get_tr(tr);
7025 	if (ret)
7026 		return ret;
7027 
7028 	if (file->f_mode & FMODE_READ) {
7029 		iter = __tracing_open(inode, file, true);
7030 		if (IS_ERR(iter))
7031 			ret = PTR_ERR(iter);
7032 	} else {
7033 		/* Writes still need the seq_file to hold the private data */
7034 		ret = -ENOMEM;
7035 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7036 		if (!m)
7037 			goto out;
7038 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7039 		if (!iter) {
7040 			kfree(m);
7041 			goto out;
7042 		}
7043 		ret = 0;
7044 
7045 		iter->tr = tr;
7046 		iter->array_buffer = &tr->max_buffer;
7047 		iter->cpu_file = tracing_get_cpu(inode);
7048 		m->private = iter;
7049 		file->private_data = m;
7050 	}
7051 out:
7052 	if (ret < 0)
7053 		trace_array_put(tr);
7054 
7055 	return ret;
7056 }
7057 
7058 static ssize_t
7059 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7060 		       loff_t *ppos)
7061 {
7062 	struct seq_file *m = filp->private_data;
7063 	struct trace_iterator *iter = m->private;
7064 	struct trace_array *tr = iter->tr;
7065 	unsigned long val;
7066 	int ret;
7067 
7068 	ret = tracing_update_buffers();
7069 	if (ret < 0)
7070 		return ret;
7071 
7072 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7073 	if (ret)
7074 		return ret;
7075 
7076 	mutex_lock(&trace_types_lock);
7077 
7078 	if (tr->current_trace->use_max_tr) {
7079 		ret = -EBUSY;
7080 		goto out;
7081 	}
7082 
7083 	arch_spin_lock(&tr->max_lock);
7084 	if (tr->cond_snapshot)
7085 		ret = -EBUSY;
7086 	arch_spin_unlock(&tr->max_lock);
7087 	if (ret)
7088 		goto out;
7089 
7090 	switch (val) {
7091 	case 0:
7092 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7093 			ret = -EINVAL;
7094 			break;
7095 		}
7096 		if (tr->allocated_snapshot)
7097 			free_snapshot(tr);
7098 		break;
7099 	case 1:
7100 /* Only allow per-cpu swap if the ring buffer supports it */
7101 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7102 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7103 			ret = -EINVAL;
7104 			break;
7105 		}
7106 #endif
7107 		if (tr->allocated_snapshot)
7108 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7109 					&tr->array_buffer, iter->cpu_file);
7110 		else
7111 			ret = tracing_alloc_snapshot_instance(tr);
7112 		if (ret < 0)
7113 			break;
7114 		local_irq_disable();
7115 		/* Now, we're going to swap */
7116 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7117 			update_max_tr(tr, current, smp_processor_id(), NULL);
7118 		else
7119 			update_max_tr_single(tr, current, iter->cpu_file);
7120 		local_irq_enable();
7121 		break;
7122 	default:
7123 		if (tr->allocated_snapshot) {
7124 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7125 				tracing_reset_online_cpus(&tr->max_buffer);
7126 			else
7127 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7128 		}
7129 		break;
7130 	}
7131 
7132 	if (ret >= 0) {
7133 		*ppos += cnt;
7134 		ret = cnt;
7135 	}
7136 out:
7137 	mutex_unlock(&trace_types_lock);
7138 	return ret;
7139 }
7140 
7141 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7142 {
7143 	struct seq_file *m = file->private_data;
7144 	int ret;
7145 
7146 	ret = tracing_release(inode, file);
7147 
7148 	if (file->f_mode & FMODE_READ)
7149 		return ret;
7150 
7151 	/* If write only, the seq_file is just a stub */
7152 	if (m)
7153 		kfree(m->private);
7154 	kfree(m);
7155 
7156 	return 0;
7157 }
7158 
7159 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7160 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7161 				    size_t count, loff_t *ppos);
7162 static int tracing_buffers_release(struct inode *inode, struct file *file);
7163 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7164 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7165 
7166 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7167 {
7168 	struct ftrace_buffer_info *info;
7169 	int ret;
7170 
7171 	/* The following checks for tracefs lockdown */
7172 	ret = tracing_buffers_open(inode, filp);
7173 	if (ret < 0)
7174 		return ret;
7175 
7176 	info = filp->private_data;
7177 
7178 	if (info->iter.trace->use_max_tr) {
7179 		tracing_buffers_release(inode, filp);
7180 		return -EBUSY;
7181 	}
7182 
7183 	info->iter.snapshot = true;
7184 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7185 
7186 	return ret;
7187 }
7188 
7189 #endif /* CONFIG_TRACER_SNAPSHOT */
7190 
7191 
7192 static const struct file_operations tracing_thresh_fops = {
7193 	.open		= tracing_open_generic,
7194 	.read		= tracing_thresh_read,
7195 	.write		= tracing_thresh_write,
7196 	.llseek		= generic_file_llseek,
7197 };
7198 
7199 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7200 static const struct file_operations tracing_max_lat_fops = {
7201 	.open		= tracing_open_generic,
7202 	.read		= tracing_max_lat_read,
7203 	.write		= tracing_max_lat_write,
7204 	.llseek		= generic_file_llseek,
7205 };
7206 #endif
7207 
7208 static const struct file_operations set_tracer_fops = {
7209 	.open		= tracing_open_generic,
7210 	.read		= tracing_set_trace_read,
7211 	.write		= tracing_set_trace_write,
7212 	.llseek		= generic_file_llseek,
7213 };
7214 
7215 static const struct file_operations tracing_pipe_fops = {
7216 	.open		= tracing_open_pipe,
7217 	.poll		= tracing_poll_pipe,
7218 	.read		= tracing_read_pipe,
7219 	.splice_read	= tracing_splice_read_pipe,
7220 	.release	= tracing_release_pipe,
7221 	.llseek		= no_llseek,
7222 };
7223 
7224 static const struct file_operations tracing_entries_fops = {
7225 	.open		= tracing_open_generic_tr,
7226 	.read		= tracing_entries_read,
7227 	.write		= tracing_entries_write,
7228 	.llseek		= generic_file_llseek,
7229 	.release	= tracing_release_generic_tr,
7230 };
7231 
7232 static const struct file_operations tracing_total_entries_fops = {
7233 	.open		= tracing_open_generic_tr,
7234 	.read		= tracing_total_entries_read,
7235 	.llseek		= generic_file_llseek,
7236 	.release	= tracing_release_generic_tr,
7237 };
7238 
7239 static const struct file_operations tracing_free_buffer_fops = {
7240 	.open		= tracing_open_generic_tr,
7241 	.write		= tracing_free_buffer_write,
7242 	.release	= tracing_free_buffer_release,
7243 };
7244 
7245 static const struct file_operations tracing_mark_fops = {
7246 	.open		= tracing_open_generic_tr,
7247 	.write		= tracing_mark_write,
7248 	.llseek		= generic_file_llseek,
7249 	.release	= tracing_release_generic_tr,
7250 };
7251 
7252 static const struct file_operations tracing_mark_raw_fops = {
7253 	.open		= tracing_open_generic_tr,
7254 	.write		= tracing_mark_raw_write,
7255 	.llseek		= generic_file_llseek,
7256 	.release	= tracing_release_generic_tr,
7257 };
7258 
7259 static const struct file_operations trace_clock_fops = {
7260 	.open		= tracing_clock_open,
7261 	.read		= seq_read,
7262 	.llseek		= seq_lseek,
7263 	.release	= tracing_single_release_tr,
7264 	.write		= tracing_clock_write,
7265 };
7266 
7267 static const struct file_operations trace_time_stamp_mode_fops = {
7268 	.open		= tracing_time_stamp_mode_open,
7269 	.read		= seq_read,
7270 	.llseek		= seq_lseek,
7271 	.release	= tracing_single_release_tr,
7272 };
7273 
7274 #ifdef CONFIG_TRACER_SNAPSHOT
7275 static const struct file_operations snapshot_fops = {
7276 	.open		= tracing_snapshot_open,
7277 	.read		= seq_read,
7278 	.write		= tracing_snapshot_write,
7279 	.llseek		= tracing_lseek,
7280 	.release	= tracing_snapshot_release,
7281 };
7282 
7283 static const struct file_operations snapshot_raw_fops = {
7284 	.open		= snapshot_raw_open,
7285 	.read		= tracing_buffers_read,
7286 	.release	= tracing_buffers_release,
7287 	.splice_read	= tracing_buffers_splice_read,
7288 	.llseek		= no_llseek,
7289 };
7290 
7291 #endif /* CONFIG_TRACER_SNAPSHOT */
7292 
7293 #define TRACING_LOG_ERRS_MAX	8
7294 #define TRACING_LOG_LOC_MAX	128
7295 
7296 #define CMD_PREFIX "  Command: "
7297 
7298 struct err_info {
7299 	const char	**errs;	/* ptr to loc-specific array of err strings */
7300 	u8		type;	/* index into errs -> specific err string */
7301 	u8		pos;	/* MAX_FILTER_STR_VAL = 256 */
7302 	u64		ts;
7303 };
7304 
7305 struct tracing_log_err {
7306 	struct list_head	list;
7307 	struct err_info		info;
7308 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7309 	char			cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7310 };
7311 
7312 static DEFINE_MUTEX(tracing_err_log_lock);
7313 
7314 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7315 {
7316 	struct tracing_log_err *err;
7317 
7318 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7319 		err = kzalloc(sizeof(*err), GFP_KERNEL);
7320 		if (!err)
7321 			err = ERR_PTR(-ENOMEM);
7322 		tr->n_err_log_entries++;
7323 
7324 		return err;
7325 	}
7326 
7327 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7328 	list_del(&err->list);
7329 
7330 	return err;
7331 }
7332 
7333 /**
7334  * err_pos - find the position of a string within a command for error careting
7335  * @cmd: The tracing command that caused the error
7336  * @str: The string to position the caret at within @cmd
7337  *
7338  * Finds the position of the first occurence of @str within @cmd.  The
7339  * return value can be passed to tracing_log_err() for caret placement
7340  * within @cmd.
7341  *
7342  * Returns the index within @cmd of the first occurence of @str or 0
7343  * if @str was not found.
7344  */
7345 unsigned int err_pos(char *cmd, const char *str)
7346 {
7347 	char *found;
7348 
7349 	if (WARN_ON(!strlen(cmd)))
7350 		return 0;
7351 
7352 	found = strstr(cmd, str);
7353 	if (found)
7354 		return found - cmd;
7355 
7356 	return 0;
7357 }
7358 
7359 /**
7360  * tracing_log_err - write an error to the tracing error log
7361  * @tr: The associated trace array for the error (NULL for top level array)
7362  * @loc: A string describing where the error occurred
7363  * @cmd: The tracing command that caused the error
7364  * @errs: The array of loc-specific static error strings
7365  * @type: The index into errs[], which produces the specific static err string
7366  * @pos: The position the caret should be placed in the cmd
7367  *
7368  * Writes an error into tracing/error_log of the form:
7369  *
7370  * <loc>: error: <text>
7371  *   Command: <cmd>
7372  *              ^
7373  *
7374  * tracing/error_log is a small log file containing the last
7375  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7376  * unless there has been a tracing error, and the error log can be
7377  * cleared and have its memory freed by writing the empty string in
7378  * truncation mode to it i.e. echo > tracing/error_log.
7379  *
7380  * NOTE: the @errs array along with the @type param are used to
7381  * produce a static error string - this string is not copied and saved
7382  * when the error is logged - only a pointer to it is saved.  See
7383  * existing callers for examples of how static strings are typically
7384  * defined for use with tracing_log_err().
7385  */
7386 void tracing_log_err(struct trace_array *tr,
7387 		     const char *loc, const char *cmd,
7388 		     const char **errs, u8 type, u8 pos)
7389 {
7390 	struct tracing_log_err *err;
7391 
7392 	if (!tr)
7393 		tr = &global_trace;
7394 
7395 	mutex_lock(&tracing_err_log_lock);
7396 	err = get_tracing_log_err(tr);
7397 	if (PTR_ERR(err) == -ENOMEM) {
7398 		mutex_unlock(&tracing_err_log_lock);
7399 		return;
7400 	}
7401 
7402 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7403 	snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7404 
7405 	err->info.errs = errs;
7406 	err->info.type = type;
7407 	err->info.pos = pos;
7408 	err->info.ts = local_clock();
7409 
7410 	list_add_tail(&err->list, &tr->err_log);
7411 	mutex_unlock(&tracing_err_log_lock);
7412 }
7413 
7414 static void clear_tracing_err_log(struct trace_array *tr)
7415 {
7416 	struct tracing_log_err *err, *next;
7417 
7418 	mutex_lock(&tracing_err_log_lock);
7419 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7420 		list_del(&err->list);
7421 		kfree(err);
7422 	}
7423 
7424 	tr->n_err_log_entries = 0;
7425 	mutex_unlock(&tracing_err_log_lock);
7426 }
7427 
7428 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7429 {
7430 	struct trace_array *tr = m->private;
7431 
7432 	mutex_lock(&tracing_err_log_lock);
7433 
7434 	return seq_list_start(&tr->err_log, *pos);
7435 }
7436 
7437 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7438 {
7439 	struct trace_array *tr = m->private;
7440 
7441 	return seq_list_next(v, &tr->err_log, pos);
7442 }
7443 
7444 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7445 {
7446 	mutex_unlock(&tracing_err_log_lock);
7447 }
7448 
7449 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7450 {
7451 	u8 i;
7452 
7453 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7454 		seq_putc(m, ' ');
7455 	for (i = 0; i < pos; i++)
7456 		seq_putc(m, ' ');
7457 	seq_puts(m, "^\n");
7458 }
7459 
7460 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7461 {
7462 	struct tracing_log_err *err = v;
7463 
7464 	if (err) {
7465 		const char *err_text = err->info.errs[err->info.type];
7466 		u64 sec = err->info.ts;
7467 		u32 nsec;
7468 
7469 		nsec = do_div(sec, NSEC_PER_SEC);
7470 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7471 			   err->loc, err_text);
7472 		seq_printf(m, "%s", err->cmd);
7473 		tracing_err_log_show_pos(m, err->info.pos);
7474 	}
7475 
7476 	return 0;
7477 }
7478 
7479 static const struct seq_operations tracing_err_log_seq_ops = {
7480 	.start  = tracing_err_log_seq_start,
7481 	.next   = tracing_err_log_seq_next,
7482 	.stop   = tracing_err_log_seq_stop,
7483 	.show   = tracing_err_log_seq_show
7484 };
7485 
7486 static int tracing_err_log_open(struct inode *inode, struct file *file)
7487 {
7488 	struct trace_array *tr = inode->i_private;
7489 	int ret = 0;
7490 
7491 	ret = tracing_check_open_get_tr(tr);
7492 	if (ret)
7493 		return ret;
7494 
7495 	/* If this file was opened for write, then erase contents */
7496 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7497 		clear_tracing_err_log(tr);
7498 
7499 	if (file->f_mode & FMODE_READ) {
7500 		ret = seq_open(file, &tracing_err_log_seq_ops);
7501 		if (!ret) {
7502 			struct seq_file *m = file->private_data;
7503 			m->private = tr;
7504 		} else {
7505 			trace_array_put(tr);
7506 		}
7507 	}
7508 	return ret;
7509 }
7510 
7511 static ssize_t tracing_err_log_write(struct file *file,
7512 				     const char __user *buffer,
7513 				     size_t count, loff_t *ppos)
7514 {
7515 	return count;
7516 }
7517 
7518 static int tracing_err_log_release(struct inode *inode, struct file *file)
7519 {
7520 	struct trace_array *tr = inode->i_private;
7521 
7522 	trace_array_put(tr);
7523 
7524 	if (file->f_mode & FMODE_READ)
7525 		seq_release(inode, file);
7526 
7527 	return 0;
7528 }
7529 
7530 static const struct file_operations tracing_err_log_fops = {
7531 	.open           = tracing_err_log_open,
7532 	.write		= tracing_err_log_write,
7533 	.read           = seq_read,
7534 	.llseek         = seq_lseek,
7535 	.release        = tracing_err_log_release,
7536 };
7537 
7538 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7539 {
7540 	struct trace_array *tr = inode->i_private;
7541 	struct ftrace_buffer_info *info;
7542 	int ret;
7543 
7544 	ret = tracing_check_open_get_tr(tr);
7545 	if (ret)
7546 		return ret;
7547 
7548 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
7549 	if (!info) {
7550 		trace_array_put(tr);
7551 		return -ENOMEM;
7552 	}
7553 
7554 	mutex_lock(&trace_types_lock);
7555 
7556 	info->iter.tr		= tr;
7557 	info->iter.cpu_file	= tracing_get_cpu(inode);
7558 	info->iter.trace	= tr->current_trace;
7559 	info->iter.array_buffer = &tr->array_buffer;
7560 	info->spare		= NULL;
7561 	/* Force reading ring buffer for first read */
7562 	info->read		= (unsigned int)-1;
7563 
7564 	filp->private_data = info;
7565 
7566 	tr->trace_ref++;
7567 
7568 	mutex_unlock(&trace_types_lock);
7569 
7570 	ret = nonseekable_open(inode, filp);
7571 	if (ret < 0)
7572 		trace_array_put(tr);
7573 
7574 	return ret;
7575 }
7576 
7577 static __poll_t
7578 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7579 {
7580 	struct ftrace_buffer_info *info = filp->private_data;
7581 	struct trace_iterator *iter = &info->iter;
7582 
7583 	return trace_poll(iter, filp, poll_table);
7584 }
7585 
7586 static ssize_t
7587 tracing_buffers_read(struct file *filp, char __user *ubuf,
7588 		     size_t count, loff_t *ppos)
7589 {
7590 	struct ftrace_buffer_info *info = filp->private_data;
7591 	struct trace_iterator *iter = &info->iter;
7592 	ssize_t ret = 0;
7593 	ssize_t size;
7594 
7595 	if (!count)
7596 		return 0;
7597 
7598 #ifdef CONFIG_TRACER_MAX_TRACE
7599 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7600 		return -EBUSY;
7601 #endif
7602 
7603 	if (!info->spare) {
7604 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7605 							  iter->cpu_file);
7606 		if (IS_ERR(info->spare)) {
7607 			ret = PTR_ERR(info->spare);
7608 			info->spare = NULL;
7609 		} else {
7610 			info->spare_cpu = iter->cpu_file;
7611 		}
7612 	}
7613 	if (!info->spare)
7614 		return ret;
7615 
7616 	/* Do we have previous read data to read? */
7617 	if (info->read < PAGE_SIZE)
7618 		goto read;
7619 
7620  again:
7621 	trace_access_lock(iter->cpu_file);
7622 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
7623 				    &info->spare,
7624 				    count,
7625 				    iter->cpu_file, 0);
7626 	trace_access_unlock(iter->cpu_file);
7627 
7628 	if (ret < 0) {
7629 		if (trace_empty(iter)) {
7630 			if ((filp->f_flags & O_NONBLOCK))
7631 				return -EAGAIN;
7632 
7633 			ret = wait_on_pipe(iter, 0);
7634 			if (ret)
7635 				return ret;
7636 
7637 			goto again;
7638 		}
7639 		return 0;
7640 	}
7641 
7642 	info->read = 0;
7643  read:
7644 	size = PAGE_SIZE - info->read;
7645 	if (size > count)
7646 		size = count;
7647 
7648 	ret = copy_to_user(ubuf, info->spare + info->read, size);
7649 	if (ret == size)
7650 		return -EFAULT;
7651 
7652 	size -= ret;
7653 
7654 	*ppos += size;
7655 	info->read += size;
7656 
7657 	return size;
7658 }
7659 
7660 static int tracing_buffers_release(struct inode *inode, struct file *file)
7661 {
7662 	struct ftrace_buffer_info *info = file->private_data;
7663 	struct trace_iterator *iter = &info->iter;
7664 
7665 	mutex_lock(&trace_types_lock);
7666 
7667 	iter->tr->trace_ref--;
7668 
7669 	__trace_array_put(iter->tr);
7670 
7671 	if (info->spare)
7672 		ring_buffer_free_read_page(iter->array_buffer->buffer,
7673 					   info->spare_cpu, info->spare);
7674 	kvfree(info);
7675 
7676 	mutex_unlock(&trace_types_lock);
7677 
7678 	return 0;
7679 }
7680 
7681 struct buffer_ref {
7682 	struct trace_buffer	*buffer;
7683 	void			*page;
7684 	int			cpu;
7685 	refcount_t		refcount;
7686 };
7687 
7688 static void buffer_ref_release(struct buffer_ref *ref)
7689 {
7690 	if (!refcount_dec_and_test(&ref->refcount))
7691 		return;
7692 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7693 	kfree(ref);
7694 }
7695 
7696 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7697 				    struct pipe_buffer *buf)
7698 {
7699 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7700 
7701 	buffer_ref_release(ref);
7702 	buf->private = 0;
7703 }
7704 
7705 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7706 				struct pipe_buffer *buf)
7707 {
7708 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7709 
7710 	if (refcount_read(&ref->refcount) > INT_MAX/2)
7711 		return false;
7712 
7713 	refcount_inc(&ref->refcount);
7714 	return true;
7715 }
7716 
7717 /* Pipe buffer operations for a buffer. */
7718 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7719 	.release		= buffer_pipe_buf_release,
7720 	.get			= buffer_pipe_buf_get,
7721 };
7722 
7723 /*
7724  * Callback from splice_to_pipe(), if we need to release some pages
7725  * at the end of the spd in case we error'ed out in filling the pipe.
7726  */
7727 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7728 {
7729 	struct buffer_ref *ref =
7730 		(struct buffer_ref *)spd->partial[i].private;
7731 
7732 	buffer_ref_release(ref);
7733 	spd->partial[i].private = 0;
7734 }
7735 
7736 static ssize_t
7737 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7738 			    struct pipe_inode_info *pipe, size_t len,
7739 			    unsigned int flags)
7740 {
7741 	struct ftrace_buffer_info *info = file->private_data;
7742 	struct trace_iterator *iter = &info->iter;
7743 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
7744 	struct page *pages_def[PIPE_DEF_BUFFERS];
7745 	struct splice_pipe_desc spd = {
7746 		.pages		= pages_def,
7747 		.partial	= partial_def,
7748 		.nr_pages_max	= PIPE_DEF_BUFFERS,
7749 		.ops		= &buffer_pipe_buf_ops,
7750 		.spd_release	= buffer_spd_release,
7751 	};
7752 	struct buffer_ref *ref;
7753 	int entries, i;
7754 	ssize_t ret = 0;
7755 
7756 #ifdef CONFIG_TRACER_MAX_TRACE
7757 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7758 		return -EBUSY;
7759 #endif
7760 
7761 	if (*ppos & (PAGE_SIZE - 1))
7762 		return -EINVAL;
7763 
7764 	if (len & (PAGE_SIZE - 1)) {
7765 		if (len < PAGE_SIZE)
7766 			return -EINVAL;
7767 		len &= PAGE_MASK;
7768 	}
7769 
7770 	if (splice_grow_spd(pipe, &spd))
7771 		return -ENOMEM;
7772 
7773  again:
7774 	trace_access_lock(iter->cpu_file);
7775 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7776 
7777 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7778 		struct page *page;
7779 		int r;
7780 
7781 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7782 		if (!ref) {
7783 			ret = -ENOMEM;
7784 			break;
7785 		}
7786 
7787 		refcount_set(&ref->refcount, 1);
7788 		ref->buffer = iter->array_buffer->buffer;
7789 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7790 		if (IS_ERR(ref->page)) {
7791 			ret = PTR_ERR(ref->page);
7792 			ref->page = NULL;
7793 			kfree(ref);
7794 			break;
7795 		}
7796 		ref->cpu = iter->cpu_file;
7797 
7798 		r = ring_buffer_read_page(ref->buffer, &ref->page,
7799 					  len, iter->cpu_file, 1);
7800 		if (r < 0) {
7801 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
7802 						   ref->page);
7803 			kfree(ref);
7804 			break;
7805 		}
7806 
7807 		page = virt_to_page(ref->page);
7808 
7809 		spd.pages[i] = page;
7810 		spd.partial[i].len = PAGE_SIZE;
7811 		spd.partial[i].offset = 0;
7812 		spd.partial[i].private = (unsigned long)ref;
7813 		spd.nr_pages++;
7814 		*ppos += PAGE_SIZE;
7815 
7816 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7817 	}
7818 
7819 	trace_access_unlock(iter->cpu_file);
7820 	spd.nr_pages = i;
7821 
7822 	/* did we read anything? */
7823 	if (!spd.nr_pages) {
7824 		if (ret)
7825 			goto out;
7826 
7827 		ret = -EAGAIN;
7828 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7829 			goto out;
7830 
7831 		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7832 		if (ret)
7833 			goto out;
7834 
7835 		goto again;
7836 	}
7837 
7838 	ret = splice_to_pipe(pipe, &spd);
7839 out:
7840 	splice_shrink_spd(&spd);
7841 
7842 	return ret;
7843 }
7844 
7845 static const struct file_operations tracing_buffers_fops = {
7846 	.open		= tracing_buffers_open,
7847 	.read		= tracing_buffers_read,
7848 	.poll		= tracing_buffers_poll,
7849 	.release	= tracing_buffers_release,
7850 	.splice_read	= tracing_buffers_splice_read,
7851 	.llseek		= no_llseek,
7852 };
7853 
7854 static ssize_t
7855 tracing_stats_read(struct file *filp, char __user *ubuf,
7856 		   size_t count, loff_t *ppos)
7857 {
7858 	struct inode *inode = file_inode(filp);
7859 	struct trace_array *tr = inode->i_private;
7860 	struct array_buffer *trace_buf = &tr->array_buffer;
7861 	int cpu = tracing_get_cpu(inode);
7862 	struct trace_seq *s;
7863 	unsigned long cnt;
7864 	unsigned long long t;
7865 	unsigned long usec_rem;
7866 
7867 	s = kmalloc(sizeof(*s), GFP_KERNEL);
7868 	if (!s)
7869 		return -ENOMEM;
7870 
7871 	trace_seq_init(s);
7872 
7873 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7874 	trace_seq_printf(s, "entries: %ld\n", cnt);
7875 
7876 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7877 	trace_seq_printf(s, "overrun: %ld\n", cnt);
7878 
7879 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7880 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7881 
7882 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7883 	trace_seq_printf(s, "bytes: %ld\n", cnt);
7884 
7885 	if (trace_clocks[tr->clock_id].in_ns) {
7886 		/* local or global for trace_clock */
7887 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7888 		usec_rem = do_div(t, USEC_PER_SEC);
7889 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7890 								t, usec_rem);
7891 
7892 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7893 		usec_rem = do_div(t, USEC_PER_SEC);
7894 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7895 	} else {
7896 		/* counter or tsc mode for trace_clock */
7897 		trace_seq_printf(s, "oldest event ts: %llu\n",
7898 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7899 
7900 		trace_seq_printf(s, "now ts: %llu\n",
7901 				ring_buffer_time_stamp(trace_buf->buffer, cpu));
7902 	}
7903 
7904 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7905 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
7906 
7907 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7908 	trace_seq_printf(s, "read events: %ld\n", cnt);
7909 
7910 	count = simple_read_from_buffer(ubuf, count, ppos,
7911 					s->buffer, trace_seq_used(s));
7912 
7913 	kfree(s);
7914 
7915 	return count;
7916 }
7917 
7918 static const struct file_operations tracing_stats_fops = {
7919 	.open		= tracing_open_generic_tr,
7920 	.read		= tracing_stats_read,
7921 	.llseek		= generic_file_llseek,
7922 	.release	= tracing_release_generic_tr,
7923 };
7924 
7925 #ifdef CONFIG_DYNAMIC_FTRACE
7926 
7927 static ssize_t
7928 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7929 		  size_t cnt, loff_t *ppos)
7930 {
7931 	ssize_t ret;
7932 	char *buf;
7933 	int r;
7934 
7935 	/* 256 should be plenty to hold the amount needed */
7936 	buf = kmalloc(256, GFP_KERNEL);
7937 	if (!buf)
7938 		return -ENOMEM;
7939 
7940 	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7941 		      ftrace_update_tot_cnt,
7942 		      ftrace_number_of_pages,
7943 		      ftrace_number_of_groups);
7944 
7945 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7946 	kfree(buf);
7947 	return ret;
7948 }
7949 
7950 static const struct file_operations tracing_dyn_info_fops = {
7951 	.open		= tracing_open_generic,
7952 	.read		= tracing_read_dyn_info,
7953 	.llseek		= generic_file_llseek,
7954 };
7955 #endif /* CONFIG_DYNAMIC_FTRACE */
7956 
7957 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7958 static void
7959 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7960 		struct trace_array *tr, struct ftrace_probe_ops *ops,
7961 		void *data)
7962 {
7963 	tracing_snapshot_instance(tr);
7964 }
7965 
7966 static void
7967 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7968 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
7969 		      void *data)
7970 {
7971 	struct ftrace_func_mapper *mapper = data;
7972 	long *count = NULL;
7973 
7974 	if (mapper)
7975 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7976 
7977 	if (count) {
7978 
7979 		if (*count <= 0)
7980 			return;
7981 
7982 		(*count)--;
7983 	}
7984 
7985 	tracing_snapshot_instance(tr);
7986 }
7987 
7988 static int
7989 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7990 		      struct ftrace_probe_ops *ops, void *data)
7991 {
7992 	struct ftrace_func_mapper *mapper = data;
7993 	long *count = NULL;
7994 
7995 	seq_printf(m, "%ps:", (void *)ip);
7996 
7997 	seq_puts(m, "snapshot");
7998 
7999 	if (mapper)
8000 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8001 
8002 	if (count)
8003 		seq_printf(m, ":count=%ld\n", *count);
8004 	else
8005 		seq_puts(m, ":unlimited\n");
8006 
8007 	return 0;
8008 }
8009 
8010 static int
8011 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8012 		     unsigned long ip, void *init_data, void **data)
8013 {
8014 	struct ftrace_func_mapper *mapper = *data;
8015 
8016 	if (!mapper) {
8017 		mapper = allocate_ftrace_func_mapper();
8018 		if (!mapper)
8019 			return -ENOMEM;
8020 		*data = mapper;
8021 	}
8022 
8023 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8024 }
8025 
8026 static void
8027 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8028 		     unsigned long ip, void *data)
8029 {
8030 	struct ftrace_func_mapper *mapper = data;
8031 
8032 	if (!ip) {
8033 		if (!mapper)
8034 			return;
8035 		free_ftrace_func_mapper(mapper, NULL);
8036 		return;
8037 	}
8038 
8039 	ftrace_func_mapper_remove_ip(mapper, ip);
8040 }
8041 
8042 static struct ftrace_probe_ops snapshot_probe_ops = {
8043 	.func			= ftrace_snapshot,
8044 	.print			= ftrace_snapshot_print,
8045 };
8046 
8047 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8048 	.func			= ftrace_count_snapshot,
8049 	.print			= ftrace_snapshot_print,
8050 	.init			= ftrace_snapshot_init,
8051 	.free			= ftrace_snapshot_free,
8052 };
8053 
8054 static int
8055 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8056 			       char *glob, char *cmd, char *param, int enable)
8057 {
8058 	struct ftrace_probe_ops *ops;
8059 	void *count = (void *)-1;
8060 	char *number;
8061 	int ret;
8062 
8063 	if (!tr)
8064 		return -ENODEV;
8065 
8066 	/* hash funcs only work with set_ftrace_filter */
8067 	if (!enable)
8068 		return -EINVAL;
8069 
8070 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8071 
8072 	if (glob[0] == '!')
8073 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8074 
8075 	if (!param)
8076 		goto out_reg;
8077 
8078 	number = strsep(&param, ":");
8079 
8080 	if (!strlen(number))
8081 		goto out_reg;
8082 
8083 	/*
8084 	 * We use the callback data field (which is a pointer)
8085 	 * as our counter.
8086 	 */
8087 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8088 	if (ret)
8089 		return ret;
8090 
8091  out_reg:
8092 	ret = tracing_alloc_snapshot_instance(tr);
8093 	if (ret < 0)
8094 		goto out;
8095 
8096 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8097 
8098  out:
8099 	return ret < 0 ? ret : 0;
8100 }
8101 
8102 static struct ftrace_func_command ftrace_snapshot_cmd = {
8103 	.name			= "snapshot",
8104 	.func			= ftrace_trace_snapshot_callback,
8105 };
8106 
8107 static __init int register_snapshot_cmd(void)
8108 {
8109 	return register_ftrace_command(&ftrace_snapshot_cmd);
8110 }
8111 #else
8112 static inline __init int register_snapshot_cmd(void) { return 0; }
8113 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8114 
8115 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8116 {
8117 	if (WARN_ON(!tr->dir))
8118 		return ERR_PTR(-ENODEV);
8119 
8120 	/* Top directory uses NULL as the parent */
8121 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8122 		return NULL;
8123 
8124 	/* All sub buffers have a descriptor */
8125 	return tr->dir;
8126 }
8127 
8128 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8129 {
8130 	struct dentry *d_tracer;
8131 
8132 	if (tr->percpu_dir)
8133 		return tr->percpu_dir;
8134 
8135 	d_tracer = tracing_get_dentry(tr);
8136 	if (IS_ERR(d_tracer))
8137 		return NULL;
8138 
8139 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8140 
8141 	MEM_FAIL(!tr->percpu_dir,
8142 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8143 
8144 	return tr->percpu_dir;
8145 }
8146 
8147 static struct dentry *
8148 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8149 		      void *data, long cpu, const struct file_operations *fops)
8150 {
8151 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8152 
8153 	if (ret) /* See tracing_get_cpu() */
8154 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8155 	return ret;
8156 }
8157 
8158 static void
8159 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8160 {
8161 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8162 	struct dentry *d_cpu;
8163 	char cpu_dir[30]; /* 30 characters should be more than enough */
8164 
8165 	if (!d_percpu)
8166 		return;
8167 
8168 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8169 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8170 	if (!d_cpu) {
8171 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8172 		return;
8173 	}
8174 
8175 	/* per cpu trace_pipe */
8176 	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8177 				tr, cpu, &tracing_pipe_fops);
8178 
8179 	/* per cpu trace */
8180 	trace_create_cpu_file("trace", 0644, d_cpu,
8181 				tr, cpu, &tracing_fops);
8182 
8183 	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8184 				tr, cpu, &tracing_buffers_fops);
8185 
8186 	trace_create_cpu_file("stats", 0444, d_cpu,
8187 				tr, cpu, &tracing_stats_fops);
8188 
8189 	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8190 				tr, cpu, &tracing_entries_fops);
8191 
8192 #ifdef CONFIG_TRACER_SNAPSHOT
8193 	trace_create_cpu_file("snapshot", 0644, d_cpu,
8194 				tr, cpu, &snapshot_fops);
8195 
8196 	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8197 				tr, cpu, &snapshot_raw_fops);
8198 #endif
8199 }
8200 
8201 #ifdef CONFIG_FTRACE_SELFTEST
8202 /* Let selftest have access to static functions in this file */
8203 #include "trace_selftest.c"
8204 #endif
8205 
8206 static ssize_t
8207 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8208 			loff_t *ppos)
8209 {
8210 	struct trace_option_dentry *topt = filp->private_data;
8211 	char *buf;
8212 
8213 	if (topt->flags->val & topt->opt->bit)
8214 		buf = "1\n";
8215 	else
8216 		buf = "0\n";
8217 
8218 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8219 }
8220 
8221 static ssize_t
8222 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8223 			 loff_t *ppos)
8224 {
8225 	struct trace_option_dentry *topt = filp->private_data;
8226 	unsigned long val;
8227 	int ret;
8228 
8229 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8230 	if (ret)
8231 		return ret;
8232 
8233 	if (val != 0 && val != 1)
8234 		return -EINVAL;
8235 
8236 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8237 		mutex_lock(&trace_types_lock);
8238 		ret = __set_tracer_option(topt->tr, topt->flags,
8239 					  topt->opt, !val);
8240 		mutex_unlock(&trace_types_lock);
8241 		if (ret)
8242 			return ret;
8243 	}
8244 
8245 	*ppos += cnt;
8246 
8247 	return cnt;
8248 }
8249 
8250 
8251 static const struct file_operations trace_options_fops = {
8252 	.open = tracing_open_generic,
8253 	.read = trace_options_read,
8254 	.write = trace_options_write,
8255 	.llseek	= generic_file_llseek,
8256 };
8257 
8258 /*
8259  * In order to pass in both the trace_array descriptor as well as the index
8260  * to the flag that the trace option file represents, the trace_array
8261  * has a character array of trace_flags_index[], which holds the index
8262  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8263  * The address of this character array is passed to the flag option file
8264  * read/write callbacks.
8265  *
8266  * In order to extract both the index and the trace_array descriptor,
8267  * get_tr_index() uses the following algorithm.
8268  *
8269  *   idx = *ptr;
8270  *
8271  * As the pointer itself contains the address of the index (remember
8272  * index[1] == 1).
8273  *
8274  * Then to get the trace_array descriptor, by subtracting that index
8275  * from the ptr, we get to the start of the index itself.
8276  *
8277  *   ptr - idx == &index[0]
8278  *
8279  * Then a simple container_of() from that pointer gets us to the
8280  * trace_array descriptor.
8281  */
8282 static void get_tr_index(void *data, struct trace_array **ptr,
8283 			 unsigned int *pindex)
8284 {
8285 	*pindex = *(unsigned char *)data;
8286 
8287 	*ptr = container_of(data - *pindex, struct trace_array,
8288 			    trace_flags_index);
8289 }
8290 
8291 static ssize_t
8292 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8293 			loff_t *ppos)
8294 {
8295 	void *tr_index = filp->private_data;
8296 	struct trace_array *tr;
8297 	unsigned int index;
8298 	char *buf;
8299 
8300 	get_tr_index(tr_index, &tr, &index);
8301 
8302 	if (tr->trace_flags & (1 << index))
8303 		buf = "1\n";
8304 	else
8305 		buf = "0\n";
8306 
8307 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8308 }
8309 
8310 static ssize_t
8311 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8312 			 loff_t *ppos)
8313 {
8314 	void *tr_index = filp->private_data;
8315 	struct trace_array *tr;
8316 	unsigned int index;
8317 	unsigned long val;
8318 	int ret;
8319 
8320 	get_tr_index(tr_index, &tr, &index);
8321 
8322 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8323 	if (ret)
8324 		return ret;
8325 
8326 	if (val != 0 && val != 1)
8327 		return -EINVAL;
8328 
8329 	mutex_lock(&event_mutex);
8330 	mutex_lock(&trace_types_lock);
8331 	ret = set_tracer_flag(tr, 1 << index, val);
8332 	mutex_unlock(&trace_types_lock);
8333 	mutex_unlock(&event_mutex);
8334 
8335 	if (ret < 0)
8336 		return ret;
8337 
8338 	*ppos += cnt;
8339 
8340 	return cnt;
8341 }
8342 
8343 static const struct file_operations trace_options_core_fops = {
8344 	.open = tracing_open_generic,
8345 	.read = trace_options_core_read,
8346 	.write = trace_options_core_write,
8347 	.llseek = generic_file_llseek,
8348 };
8349 
8350 struct dentry *trace_create_file(const char *name,
8351 				 umode_t mode,
8352 				 struct dentry *parent,
8353 				 void *data,
8354 				 const struct file_operations *fops)
8355 {
8356 	struct dentry *ret;
8357 
8358 	ret = tracefs_create_file(name, mode, parent, data, fops);
8359 	if (!ret)
8360 		pr_warn("Could not create tracefs '%s' entry\n", name);
8361 
8362 	return ret;
8363 }
8364 
8365 
8366 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8367 {
8368 	struct dentry *d_tracer;
8369 
8370 	if (tr->options)
8371 		return tr->options;
8372 
8373 	d_tracer = tracing_get_dentry(tr);
8374 	if (IS_ERR(d_tracer))
8375 		return NULL;
8376 
8377 	tr->options = tracefs_create_dir("options", d_tracer);
8378 	if (!tr->options) {
8379 		pr_warn("Could not create tracefs directory 'options'\n");
8380 		return NULL;
8381 	}
8382 
8383 	return tr->options;
8384 }
8385 
8386 static void
8387 create_trace_option_file(struct trace_array *tr,
8388 			 struct trace_option_dentry *topt,
8389 			 struct tracer_flags *flags,
8390 			 struct tracer_opt *opt)
8391 {
8392 	struct dentry *t_options;
8393 
8394 	t_options = trace_options_init_dentry(tr);
8395 	if (!t_options)
8396 		return;
8397 
8398 	topt->flags = flags;
8399 	topt->opt = opt;
8400 	topt->tr = tr;
8401 
8402 	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8403 				    &trace_options_fops);
8404 
8405 }
8406 
8407 static void
8408 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8409 {
8410 	struct trace_option_dentry *topts;
8411 	struct trace_options *tr_topts;
8412 	struct tracer_flags *flags;
8413 	struct tracer_opt *opts;
8414 	int cnt;
8415 	int i;
8416 
8417 	if (!tracer)
8418 		return;
8419 
8420 	flags = tracer->flags;
8421 
8422 	if (!flags || !flags->opts)
8423 		return;
8424 
8425 	/*
8426 	 * If this is an instance, only create flags for tracers
8427 	 * the instance may have.
8428 	 */
8429 	if (!trace_ok_for_array(tracer, tr))
8430 		return;
8431 
8432 	for (i = 0; i < tr->nr_topts; i++) {
8433 		/* Make sure there's no duplicate flags. */
8434 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8435 			return;
8436 	}
8437 
8438 	opts = flags->opts;
8439 
8440 	for (cnt = 0; opts[cnt].name; cnt++)
8441 		;
8442 
8443 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8444 	if (!topts)
8445 		return;
8446 
8447 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8448 			    GFP_KERNEL);
8449 	if (!tr_topts) {
8450 		kfree(topts);
8451 		return;
8452 	}
8453 
8454 	tr->topts = tr_topts;
8455 	tr->topts[tr->nr_topts].tracer = tracer;
8456 	tr->topts[tr->nr_topts].topts = topts;
8457 	tr->nr_topts++;
8458 
8459 	for (cnt = 0; opts[cnt].name; cnt++) {
8460 		create_trace_option_file(tr, &topts[cnt], flags,
8461 					 &opts[cnt]);
8462 		MEM_FAIL(topts[cnt].entry == NULL,
8463 			  "Failed to create trace option: %s",
8464 			  opts[cnt].name);
8465 	}
8466 }
8467 
8468 static struct dentry *
8469 create_trace_option_core_file(struct trace_array *tr,
8470 			      const char *option, long index)
8471 {
8472 	struct dentry *t_options;
8473 
8474 	t_options = trace_options_init_dentry(tr);
8475 	if (!t_options)
8476 		return NULL;
8477 
8478 	return trace_create_file(option, 0644, t_options,
8479 				 (void *)&tr->trace_flags_index[index],
8480 				 &trace_options_core_fops);
8481 }
8482 
8483 static void create_trace_options_dir(struct trace_array *tr)
8484 {
8485 	struct dentry *t_options;
8486 	bool top_level = tr == &global_trace;
8487 	int i;
8488 
8489 	t_options = trace_options_init_dentry(tr);
8490 	if (!t_options)
8491 		return;
8492 
8493 	for (i = 0; trace_options[i]; i++) {
8494 		if (top_level ||
8495 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8496 			create_trace_option_core_file(tr, trace_options[i], i);
8497 	}
8498 }
8499 
8500 static ssize_t
8501 rb_simple_read(struct file *filp, char __user *ubuf,
8502 	       size_t cnt, loff_t *ppos)
8503 {
8504 	struct trace_array *tr = filp->private_data;
8505 	char buf[64];
8506 	int r;
8507 
8508 	r = tracer_tracing_is_on(tr);
8509 	r = sprintf(buf, "%d\n", r);
8510 
8511 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8512 }
8513 
8514 static ssize_t
8515 rb_simple_write(struct file *filp, const char __user *ubuf,
8516 		size_t cnt, loff_t *ppos)
8517 {
8518 	struct trace_array *tr = filp->private_data;
8519 	struct trace_buffer *buffer = tr->array_buffer.buffer;
8520 	unsigned long val;
8521 	int ret;
8522 
8523 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8524 	if (ret)
8525 		return ret;
8526 
8527 	if (buffer) {
8528 		mutex_lock(&trace_types_lock);
8529 		if (!!val == tracer_tracing_is_on(tr)) {
8530 			val = 0; /* do nothing */
8531 		} else if (val) {
8532 			tracer_tracing_on(tr);
8533 			if (tr->current_trace->start)
8534 				tr->current_trace->start(tr);
8535 		} else {
8536 			tracer_tracing_off(tr);
8537 			if (tr->current_trace->stop)
8538 				tr->current_trace->stop(tr);
8539 		}
8540 		mutex_unlock(&trace_types_lock);
8541 	}
8542 
8543 	(*ppos)++;
8544 
8545 	return cnt;
8546 }
8547 
8548 static const struct file_operations rb_simple_fops = {
8549 	.open		= tracing_open_generic_tr,
8550 	.read		= rb_simple_read,
8551 	.write		= rb_simple_write,
8552 	.release	= tracing_release_generic_tr,
8553 	.llseek		= default_llseek,
8554 };
8555 
8556 static ssize_t
8557 buffer_percent_read(struct file *filp, char __user *ubuf,
8558 		    size_t cnt, loff_t *ppos)
8559 {
8560 	struct trace_array *tr = filp->private_data;
8561 	char buf[64];
8562 	int r;
8563 
8564 	r = tr->buffer_percent;
8565 	r = sprintf(buf, "%d\n", r);
8566 
8567 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8568 }
8569 
8570 static ssize_t
8571 buffer_percent_write(struct file *filp, const char __user *ubuf,
8572 		     size_t cnt, loff_t *ppos)
8573 {
8574 	struct trace_array *tr = filp->private_data;
8575 	unsigned long val;
8576 	int ret;
8577 
8578 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8579 	if (ret)
8580 		return ret;
8581 
8582 	if (val > 100)
8583 		return -EINVAL;
8584 
8585 	if (!val)
8586 		val = 1;
8587 
8588 	tr->buffer_percent = val;
8589 
8590 	(*ppos)++;
8591 
8592 	return cnt;
8593 }
8594 
8595 static const struct file_operations buffer_percent_fops = {
8596 	.open		= tracing_open_generic_tr,
8597 	.read		= buffer_percent_read,
8598 	.write		= buffer_percent_write,
8599 	.release	= tracing_release_generic_tr,
8600 	.llseek		= default_llseek,
8601 };
8602 
8603 static struct dentry *trace_instance_dir;
8604 
8605 static void
8606 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8607 
8608 static int
8609 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8610 {
8611 	enum ring_buffer_flags rb_flags;
8612 
8613 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8614 
8615 	buf->tr = tr;
8616 
8617 	buf->buffer = ring_buffer_alloc(size, rb_flags);
8618 	if (!buf->buffer)
8619 		return -ENOMEM;
8620 
8621 	buf->data = alloc_percpu(struct trace_array_cpu);
8622 	if (!buf->data) {
8623 		ring_buffer_free(buf->buffer);
8624 		buf->buffer = NULL;
8625 		return -ENOMEM;
8626 	}
8627 
8628 	/* Allocate the first page for all buffers */
8629 	set_buffer_entries(&tr->array_buffer,
8630 			   ring_buffer_size(tr->array_buffer.buffer, 0));
8631 
8632 	return 0;
8633 }
8634 
8635 static int allocate_trace_buffers(struct trace_array *tr, int size)
8636 {
8637 	int ret;
8638 
8639 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8640 	if (ret)
8641 		return ret;
8642 
8643 #ifdef CONFIG_TRACER_MAX_TRACE
8644 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
8645 				    allocate_snapshot ? size : 1);
8646 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8647 		ring_buffer_free(tr->array_buffer.buffer);
8648 		tr->array_buffer.buffer = NULL;
8649 		free_percpu(tr->array_buffer.data);
8650 		tr->array_buffer.data = NULL;
8651 		return -ENOMEM;
8652 	}
8653 	tr->allocated_snapshot = allocate_snapshot;
8654 
8655 	/*
8656 	 * Only the top level trace array gets its snapshot allocated
8657 	 * from the kernel command line.
8658 	 */
8659 	allocate_snapshot = false;
8660 #endif
8661 
8662 	return 0;
8663 }
8664 
8665 static void free_trace_buffer(struct array_buffer *buf)
8666 {
8667 	if (buf->buffer) {
8668 		ring_buffer_free(buf->buffer);
8669 		buf->buffer = NULL;
8670 		free_percpu(buf->data);
8671 		buf->data = NULL;
8672 	}
8673 }
8674 
8675 static void free_trace_buffers(struct trace_array *tr)
8676 {
8677 	if (!tr)
8678 		return;
8679 
8680 	free_trace_buffer(&tr->array_buffer);
8681 
8682 #ifdef CONFIG_TRACER_MAX_TRACE
8683 	free_trace_buffer(&tr->max_buffer);
8684 #endif
8685 }
8686 
8687 static void init_trace_flags_index(struct trace_array *tr)
8688 {
8689 	int i;
8690 
8691 	/* Used by the trace options files */
8692 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8693 		tr->trace_flags_index[i] = i;
8694 }
8695 
8696 static void __update_tracer_options(struct trace_array *tr)
8697 {
8698 	struct tracer *t;
8699 
8700 	for (t = trace_types; t; t = t->next)
8701 		add_tracer_options(tr, t);
8702 }
8703 
8704 static void update_tracer_options(struct trace_array *tr)
8705 {
8706 	mutex_lock(&trace_types_lock);
8707 	__update_tracer_options(tr);
8708 	mutex_unlock(&trace_types_lock);
8709 }
8710 
8711 /* Must have trace_types_lock held */
8712 struct trace_array *trace_array_find(const char *instance)
8713 {
8714 	struct trace_array *tr, *found = NULL;
8715 
8716 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8717 		if (tr->name && strcmp(tr->name, instance) == 0) {
8718 			found = tr;
8719 			break;
8720 		}
8721 	}
8722 
8723 	return found;
8724 }
8725 
8726 struct trace_array *trace_array_find_get(const char *instance)
8727 {
8728 	struct trace_array *tr;
8729 
8730 	mutex_lock(&trace_types_lock);
8731 	tr = trace_array_find(instance);
8732 	if (tr)
8733 		tr->ref++;
8734 	mutex_unlock(&trace_types_lock);
8735 
8736 	return tr;
8737 }
8738 
8739 static int trace_array_create_dir(struct trace_array *tr)
8740 {
8741 	int ret;
8742 
8743 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8744 	if (!tr->dir)
8745 		return -EINVAL;
8746 
8747 	ret = event_trace_add_tracer(tr->dir, tr);
8748 	if (ret)
8749 		tracefs_remove(tr->dir);
8750 
8751 	init_tracer_tracefs(tr, tr->dir);
8752 	__update_tracer_options(tr);
8753 
8754 	return ret;
8755 }
8756 
8757 static struct trace_array *trace_array_create(const char *name)
8758 {
8759 	struct trace_array *tr;
8760 	int ret;
8761 
8762 	ret = -ENOMEM;
8763 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8764 	if (!tr)
8765 		return ERR_PTR(ret);
8766 
8767 	tr->name = kstrdup(name, GFP_KERNEL);
8768 	if (!tr->name)
8769 		goto out_free_tr;
8770 
8771 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8772 		goto out_free_tr;
8773 
8774 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8775 
8776 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8777 
8778 	raw_spin_lock_init(&tr->start_lock);
8779 
8780 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8781 
8782 	tr->current_trace = &nop_trace;
8783 
8784 	INIT_LIST_HEAD(&tr->systems);
8785 	INIT_LIST_HEAD(&tr->events);
8786 	INIT_LIST_HEAD(&tr->hist_vars);
8787 	INIT_LIST_HEAD(&tr->err_log);
8788 
8789 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8790 		goto out_free_tr;
8791 
8792 	if (ftrace_allocate_ftrace_ops(tr) < 0)
8793 		goto out_free_tr;
8794 
8795 	ftrace_init_trace_array(tr);
8796 
8797 	init_trace_flags_index(tr);
8798 
8799 	if (trace_instance_dir) {
8800 		ret = trace_array_create_dir(tr);
8801 		if (ret)
8802 			goto out_free_tr;
8803 	} else
8804 		__trace_early_add_events(tr);
8805 
8806 	list_add(&tr->list, &ftrace_trace_arrays);
8807 
8808 	tr->ref++;
8809 
8810 	return tr;
8811 
8812  out_free_tr:
8813 	ftrace_free_ftrace_ops(tr);
8814 	free_trace_buffers(tr);
8815 	free_cpumask_var(tr->tracing_cpumask);
8816 	kfree(tr->name);
8817 	kfree(tr);
8818 
8819 	return ERR_PTR(ret);
8820 }
8821 
8822 static int instance_mkdir(const char *name)
8823 {
8824 	struct trace_array *tr;
8825 	int ret;
8826 
8827 	mutex_lock(&event_mutex);
8828 	mutex_lock(&trace_types_lock);
8829 
8830 	ret = -EEXIST;
8831 	if (trace_array_find(name))
8832 		goto out_unlock;
8833 
8834 	tr = trace_array_create(name);
8835 
8836 	ret = PTR_ERR_OR_ZERO(tr);
8837 
8838 out_unlock:
8839 	mutex_unlock(&trace_types_lock);
8840 	mutex_unlock(&event_mutex);
8841 	return ret;
8842 }
8843 
8844 /**
8845  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8846  * @name: The name of the trace array to be looked up/created.
8847  *
8848  * Returns pointer to trace array with given name.
8849  * NULL, if it cannot be created.
8850  *
8851  * NOTE: This function increments the reference counter associated with the
8852  * trace array returned. This makes sure it cannot be freed while in use.
8853  * Use trace_array_put() once the trace array is no longer needed.
8854  * If the trace_array is to be freed, trace_array_destroy() needs to
8855  * be called after the trace_array_put(), or simply let user space delete
8856  * it from the tracefs instances directory. But until the
8857  * trace_array_put() is called, user space can not delete it.
8858  *
8859  */
8860 struct trace_array *trace_array_get_by_name(const char *name)
8861 {
8862 	struct trace_array *tr;
8863 
8864 	mutex_lock(&event_mutex);
8865 	mutex_lock(&trace_types_lock);
8866 
8867 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8868 		if (tr->name && strcmp(tr->name, name) == 0)
8869 			goto out_unlock;
8870 	}
8871 
8872 	tr = trace_array_create(name);
8873 
8874 	if (IS_ERR(tr))
8875 		tr = NULL;
8876 out_unlock:
8877 	if (tr)
8878 		tr->ref++;
8879 
8880 	mutex_unlock(&trace_types_lock);
8881 	mutex_unlock(&event_mutex);
8882 	return tr;
8883 }
8884 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8885 
8886 static int __remove_instance(struct trace_array *tr)
8887 {
8888 	int i;
8889 
8890 	/* Reference counter for a newly created trace array = 1. */
8891 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8892 		return -EBUSY;
8893 
8894 	list_del(&tr->list);
8895 
8896 	/* Disable all the flags that were enabled coming in */
8897 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8898 		if ((1 << i) & ZEROED_TRACE_FLAGS)
8899 			set_tracer_flag(tr, 1 << i, 0);
8900 	}
8901 
8902 	tracing_set_nop(tr);
8903 	clear_ftrace_function_probes(tr);
8904 	event_trace_del_tracer(tr);
8905 	ftrace_clear_pids(tr);
8906 	ftrace_destroy_function_files(tr);
8907 	tracefs_remove(tr->dir);
8908 	free_trace_buffers(tr);
8909 
8910 	for (i = 0; i < tr->nr_topts; i++) {
8911 		kfree(tr->topts[i].topts);
8912 	}
8913 	kfree(tr->topts);
8914 
8915 	free_cpumask_var(tr->tracing_cpumask);
8916 	kfree(tr->name);
8917 	kfree(tr);
8918 
8919 	return 0;
8920 }
8921 
8922 int trace_array_destroy(struct trace_array *this_tr)
8923 {
8924 	struct trace_array *tr;
8925 	int ret;
8926 
8927 	if (!this_tr)
8928 		return -EINVAL;
8929 
8930 	mutex_lock(&event_mutex);
8931 	mutex_lock(&trace_types_lock);
8932 
8933 	ret = -ENODEV;
8934 
8935 	/* Making sure trace array exists before destroying it. */
8936 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8937 		if (tr == this_tr) {
8938 			ret = __remove_instance(tr);
8939 			break;
8940 		}
8941 	}
8942 
8943 	mutex_unlock(&trace_types_lock);
8944 	mutex_unlock(&event_mutex);
8945 
8946 	return ret;
8947 }
8948 EXPORT_SYMBOL_GPL(trace_array_destroy);
8949 
8950 static int instance_rmdir(const char *name)
8951 {
8952 	struct trace_array *tr;
8953 	int ret;
8954 
8955 	mutex_lock(&event_mutex);
8956 	mutex_lock(&trace_types_lock);
8957 
8958 	ret = -ENODEV;
8959 	tr = trace_array_find(name);
8960 	if (tr)
8961 		ret = __remove_instance(tr);
8962 
8963 	mutex_unlock(&trace_types_lock);
8964 	mutex_unlock(&event_mutex);
8965 
8966 	return ret;
8967 }
8968 
8969 static __init void create_trace_instances(struct dentry *d_tracer)
8970 {
8971 	struct trace_array *tr;
8972 
8973 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8974 							 instance_mkdir,
8975 							 instance_rmdir);
8976 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8977 		return;
8978 
8979 	mutex_lock(&event_mutex);
8980 	mutex_lock(&trace_types_lock);
8981 
8982 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8983 		if (!tr->name)
8984 			continue;
8985 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8986 			     "Failed to create instance directory\n"))
8987 			break;
8988 	}
8989 
8990 	mutex_unlock(&trace_types_lock);
8991 	mutex_unlock(&event_mutex);
8992 }
8993 
8994 static void
8995 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8996 {
8997 	struct trace_event_file *file;
8998 	int cpu;
8999 
9000 	trace_create_file("available_tracers", 0444, d_tracer,
9001 			tr, &show_traces_fops);
9002 
9003 	trace_create_file("current_tracer", 0644, d_tracer,
9004 			tr, &set_tracer_fops);
9005 
9006 	trace_create_file("tracing_cpumask", 0644, d_tracer,
9007 			  tr, &tracing_cpumask_fops);
9008 
9009 	trace_create_file("trace_options", 0644, d_tracer,
9010 			  tr, &tracing_iter_fops);
9011 
9012 	trace_create_file("trace", 0644, d_tracer,
9013 			  tr, &tracing_fops);
9014 
9015 	trace_create_file("trace_pipe", 0444, d_tracer,
9016 			  tr, &tracing_pipe_fops);
9017 
9018 	trace_create_file("buffer_size_kb", 0644, d_tracer,
9019 			  tr, &tracing_entries_fops);
9020 
9021 	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9022 			  tr, &tracing_total_entries_fops);
9023 
9024 	trace_create_file("free_buffer", 0200, d_tracer,
9025 			  tr, &tracing_free_buffer_fops);
9026 
9027 	trace_create_file("trace_marker", 0220, d_tracer,
9028 			  tr, &tracing_mark_fops);
9029 
9030 	file = __find_event_file(tr, "ftrace", "print");
9031 	if (file && file->dir)
9032 		trace_create_file("trigger", 0644, file->dir, file,
9033 				  &event_trigger_fops);
9034 	tr->trace_marker_file = file;
9035 
9036 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9037 			  tr, &tracing_mark_raw_fops);
9038 
9039 	trace_create_file("trace_clock", 0644, d_tracer, tr,
9040 			  &trace_clock_fops);
9041 
9042 	trace_create_file("tracing_on", 0644, d_tracer,
9043 			  tr, &rb_simple_fops);
9044 
9045 	trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9046 			  &trace_time_stamp_mode_fops);
9047 
9048 	tr->buffer_percent = 50;
9049 
9050 	trace_create_file("buffer_percent", 0444, d_tracer,
9051 			tr, &buffer_percent_fops);
9052 
9053 	create_trace_options_dir(tr);
9054 
9055 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9056 	trace_create_maxlat_file(tr, d_tracer);
9057 #endif
9058 
9059 	if (ftrace_create_function_files(tr, d_tracer))
9060 		MEM_FAIL(1, "Could not allocate function filter files");
9061 
9062 #ifdef CONFIG_TRACER_SNAPSHOT
9063 	trace_create_file("snapshot", 0644, d_tracer,
9064 			  tr, &snapshot_fops);
9065 #endif
9066 
9067 	trace_create_file("error_log", 0644, d_tracer,
9068 			  tr, &tracing_err_log_fops);
9069 
9070 	for_each_tracing_cpu(cpu)
9071 		tracing_init_tracefs_percpu(tr, cpu);
9072 
9073 	ftrace_init_tracefs(tr, d_tracer);
9074 }
9075 
9076 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9077 {
9078 	struct vfsmount *mnt;
9079 	struct file_system_type *type;
9080 
9081 	/*
9082 	 * To maintain backward compatibility for tools that mount
9083 	 * debugfs to get to the tracing facility, tracefs is automatically
9084 	 * mounted to the debugfs/tracing directory.
9085 	 */
9086 	type = get_fs_type("tracefs");
9087 	if (!type)
9088 		return NULL;
9089 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9090 	put_filesystem(type);
9091 	if (IS_ERR(mnt))
9092 		return NULL;
9093 	mntget(mnt);
9094 
9095 	return mnt;
9096 }
9097 
9098 /**
9099  * tracing_init_dentry - initialize top level trace array
9100  *
9101  * This is called when creating files or directories in the tracing
9102  * directory. It is called via fs_initcall() by any of the boot up code
9103  * and expects to return the dentry of the top level tracing directory.
9104  */
9105 int tracing_init_dentry(void)
9106 {
9107 	struct trace_array *tr = &global_trace;
9108 
9109 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9110 		pr_warn("Tracing disabled due to lockdown\n");
9111 		return -EPERM;
9112 	}
9113 
9114 	/* The top level trace array uses  NULL as parent */
9115 	if (tr->dir)
9116 		return 0;
9117 
9118 	if (WARN_ON(!tracefs_initialized()))
9119 		return -ENODEV;
9120 
9121 	/*
9122 	 * As there may still be users that expect the tracing
9123 	 * files to exist in debugfs/tracing, we must automount
9124 	 * the tracefs file system there, so older tools still
9125 	 * work with the newer kerenl.
9126 	 */
9127 	tr->dir = debugfs_create_automount("tracing", NULL,
9128 					   trace_automount, NULL);
9129 
9130 	return 0;
9131 }
9132 
9133 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9134 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9135 
9136 static struct workqueue_struct *eval_map_wq __initdata;
9137 static struct work_struct eval_map_work __initdata;
9138 
9139 static void __init eval_map_work_func(struct work_struct *work)
9140 {
9141 	int len;
9142 
9143 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9144 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9145 }
9146 
9147 static int __init trace_eval_init(void)
9148 {
9149 	INIT_WORK(&eval_map_work, eval_map_work_func);
9150 
9151 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9152 	if (!eval_map_wq) {
9153 		pr_err("Unable to allocate eval_map_wq\n");
9154 		/* Do work here */
9155 		eval_map_work_func(&eval_map_work);
9156 		return -ENOMEM;
9157 	}
9158 
9159 	queue_work(eval_map_wq, &eval_map_work);
9160 	return 0;
9161 }
9162 
9163 static int __init trace_eval_sync(void)
9164 {
9165 	/* Make sure the eval map updates are finished */
9166 	if (eval_map_wq)
9167 		destroy_workqueue(eval_map_wq);
9168 	return 0;
9169 }
9170 
9171 late_initcall_sync(trace_eval_sync);
9172 
9173 
9174 #ifdef CONFIG_MODULES
9175 static void trace_module_add_evals(struct module *mod)
9176 {
9177 	if (!mod->num_trace_evals)
9178 		return;
9179 
9180 	/*
9181 	 * Modules with bad taint do not have events created, do
9182 	 * not bother with enums either.
9183 	 */
9184 	if (trace_module_has_bad_taint(mod))
9185 		return;
9186 
9187 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9188 }
9189 
9190 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9191 static void trace_module_remove_evals(struct module *mod)
9192 {
9193 	union trace_eval_map_item *map;
9194 	union trace_eval_map_item **last = &trace_eval_maps;
9195 
9196 	if (!mod->num_trace_evals)
9197 		return;
9198 
9199 	mutex_lock(&trace_eval_mutex);
9200 
9201 	map = trace_eval_maps;
9202 
9203 	while (map) {
9204 		if (map->head.mod == mod)
9205 			break;
9206 		map = trace_eval_jmp_to_tail(map);
9207 		last = &map->tail.next;
9208 		map = map->tail.next;
9209 	}
9210 	if (!map)
9211 		goto out;
9212 
9213 	*last = trace_eval_jmp_to_tail(map)->tail.next;
9214 	kfree(map);
9215  out:
9216 	mutex_unlock(&trace_eval_mutex);
9217 }
9218 #else
9219 static inline void trace_module_remove_evals(struct module *mod) { }
9220 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9221 
9222 static int trace_module_notify(struct notifier_block *self,
9223 			       unsigned long val, void *data)
9224 {
9225 	struct module *mod = data;
9226 
9227 	switch (val) {
9228 	case MODULE_STATE_COMING:
9229 		trace_module_add_evals(mod);
9230 		break;
9231 	case MODULE_STATE_GOING:
9232 		trace_module_remove_evals(mod);
9233 		break;
9234 	}
9235 
9236 	return NOTIFY_OK;
9237 }
9238 
9239 static struct notifier_block trace_module_nb = {
9240 	.notifier_call = trace_module_notify,
9241 	.priority = 0,
9242 };
9243 #endif /* CONFIG_MODULES */
9244 
9245 static __init int tracer_init_tracefs(void)
9246 {
9247 	int ret;
9248 
9249 	trace_access_lock_init();
9250 
9251 	ret = tracing_init_dentry();
9252 	if (ret)
9253 		return 0;
9254 
9255 	event_trace_init();
9256 
9257 	init_tracer_tracefs(&global_trace, NULL);
9258 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
9259 
9260 	trace_create_file("tracing_thresh", 0644, NULL,
9261 			&global_trace, &tracing_thresh_fops);
9262 
9263 	trace_create_file("README", 0444, NULL,
9264 			NULL, &tracing_readme_fops);
9265 
9266 	trace_create_file("saved_cmdlines", 0444, NULL,
9267 			NULL, &tracing_saved_cmdlines_fops);
9268 
9269 	trace_create_file("saved_cmdlines_size", 0644, NULL,
9270 			  NULL, &tracing_saved_cmdlines_size_fops);
9271 
9272 	trace_create_file("saved_tgids", 0444, NULL,
9273 			NULL, &tracing_saved_tgids_fops);
9274 
9275 	trace_eval_init();
9276 
9277 	trace_create_eval_file(NULL);
9278 
9279 #ifdef CONFIG_MODULES
9280 	register_module_notifier(&trace_module_nb);
9281 #endif
9282 
9283 #ifdef CONFIG_DYNAMIC_FTRACE
9284 	trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9285 			NULL, &tracing_dyn_info_fops);
9286 #endif
9287 
9288 	create_trace_instances(NULL);
9289 
9290 	update_tracer_options(&global_trace);
9291 
9292 	return 0;
9293 }
9294 
9295 static int trace_panic_handler(struct notifier_block *this,
9296 			       unsigned long event, void *unused)
9297 {
9298 	if (ftrace_dump_on_oops)
9299 		ftrace_dump(ftrace_dump_on_oops);
9300 	return NOTIFY_OK;
9301 }
9302 
9303 static struct notifier_block trace_panic_notifier = {
9304 	.notifier_call  = trace_panic_handler,
9305 	.next           = NULL,
9306 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
9307 };
9308 
9309 static int trace_die_handler(struct notifier_block *self,
9310 			     unsigned long val,
9311 			     void *data)
9312 {
9313 	switch (val) {
9314 	case DIE_OOPS:
9315 		if (ftrace_dump_on_oops)
9316 			ftrace_dump(ftrace_dump_on_oops);
9317 		break;
9318 	default:
9319 		break;
9320 	}
9321 	return NOTIFY_OK;
9322 }
9323 
9324 static struct notifier_block trace_die_notifier = {
9325 	.notifier_call = trace_die_handler,
9326 	.priority = 200
9327 };
9328 
9329 /*
9330  * printk is set to max of 1024, we really don't need it that big.
9331  * Nothing should be printing 1000 characters anyway.
9332  */
9333 #define TRACE_MAX_PRINT		1000
9334 
9335 /*
9336  * Define here KERN_TRACE so that we have one place to modify
9337  * it if we decide to change what log level the ftrace dump
9338  * should be at.
9339  */
9340 #define KERN_TRACE		KERN_EMERG
9341 
9342 void
9343 trace_printk_seq(struct trace_seq *s)
9344 {
9345 	/* Probably should print a warning here. */
9346 	if (s->seq.len >= TRACE_MAX_PRINT)
9347 		s->seq.len = TRACE_MAX_PRINT;
9348 
9349 	/*
9350 	 * More paranoid code. Although the buffer size is set to
9351 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9352 	 * an extra layer of protection.
9353 	 */
9354 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9355 		s->seq.len = s->seq.size - 1;
9356 
9357 	/* should be zero ended, but we are paranoid. */
9358 	s->buffer[s->seq.len] = 0;
9359 
9360 	printk(KERN_TRACE "%s", s->buffer);
9361 
9362 	trace_seq_init(s);
9363 }
9364 
9365 void trace_init_global_iter(struct trace_iterator *iter)
9366 {
9367 	iter->tr = &global_trace;
9368 	iter->trace = iter->tr->current_trace;
9369 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
9370 	iter->array_buffer = &global_trace.array_buffer;
9371 
9372 	if (iter->trace && iter->trace->open)
9373 		iter->trace->open(iter);
9374 
9375 	/* Annotate start of buffers if we had overruns */
9376 	if (ring_buffer_overruns(iter->array_buffer->buffer))
9377 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
9378 
9379 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
9380 	if (trace_clocks[iter->tr->clock_id].in_ns)
9381 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9382 }
9383 
9384 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9385 {
9386 	/* use static because iter can be a bit big for the stack */
9387 	static struct trace_iterator iter;
9388 	static atomic_t dump_running;
9389 	struct trace_array *tr = &global_trace;
9390 	unsigned int old_userobj;
9391 	unsigned long flags;
9392 	int cnt = 0, cpu;
9393 
9394 	/* Only allow one dump user at a time. */
9395 	if (atomic_inc_return(&dump_running) != 1) {
9396 		atomic_dec(&dump_running);
9397 		return;
9398 	}
9399 
9400 	/*
9401 	 * Always turn off tracing when we dump.
9402 	 * We don't need to show trace output of what happens
9403 	 * between multiple crashes.
9404 	 *
9405 	 * If the user does a sysrq-z, then they can re-enable
9406 	 * tracing with echo 1 > tracing_on.
9407 	 */
9408 	tracing_off();
9409 
9410 	local_irq_save(flags);
9411 	printk_nmi_direct_enter();
9412 
9413 	/* Simulate the iterator */
9414 	trace_init_global_iter(&iter);
9415 	/* Can not use kmalloc for iter.temp and iter.fmt */
9416 	iter.temp = static_temp_buf;
9417 	iter.temp_size = STATIC_TEMP_BUF_SIZE;
9418 	iter.fmt = static_fmt_buf;
9419 	iter.fmt_size = STATIC_FMT_BUF_SIZE;
9420 
9421 	for_each_tracing_cpu(cpu) {
9422 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9423 	}
9424 
9425 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9426 
9427 	/* don't look at user memory in panic mode */
9428 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9429 
9430 	switch (oops_dump_mode) {
9431 	case DUMP_ALL:
9432 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
9433 		break;
9434 	case DUMP_ORIG:
9435 		iter.cpu_file = raw_smp_processor_id();
9436 		break;
9437 	case DUMP_NONE:
9438 		goto out_enable;
9439 	default:
9440 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9441 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
9442 	}
9443 
9444 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
9445 
9446 	/* Did function tracer already get disabled? */
9447 	if (ftrace_is_dead()) {
9448 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9449 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
9450 	}
9451 
9452 	/*
9453 	 * We need to stop all tracing on all CPUS to read
9454 	 * the next buffer. This is a bit expensive, but is
9455 	 * not done often. We fill all what we can read,
9456 	 * and then release the locks again.
9457 	 */
9458 
9459 	while (!trace_empty(&iter)) {
9460 
9461 		if (!cnt)
9462 			printk(KERN_TRACE "---------------------------------\n");
9463 
9464 		cnt++;
9465 
9466 		trace_iterator_reset(&iter);
9467 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
9468 
9469 		if (trace_find_next_entry_inc(&iter) != NULL) {
9470 			int ret;
9471 
9472 			ret = print_trace_line(&iter);
9473 			if (ret != TRACE_TYPE_NO_CONSUME)
9474 				trace_consume(&iter);
9475 		}
9476 		touch_nmi_watchdog();
9477 
9478 		trace_printk_seq(&iter.seq);
9479 	}
9480 
9481 	if (!cnt)
9482 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
9483 	else
9484 		printk(KERN_TRACE "---------------------------------\n");
9485 
9486  out_enable:
9487 	tr->trace_flags |= old_userobj;
9488 
9489 	for_each_tracing_cpu(cpu) {
9490 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9491 	}
9492 	atomic_dec(&dump_running);
9493 	printk_nmi_direct_exit();
9494 	local_irq_restore(flags);
9495 }
9496 EXPORT_SYMBOL_GPL(ftrace_dump);
9497 
9498 #define WRITE_BUFSIZE  4096
9499 
9500 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9501 				size_t count, loff_t *ppos,
9502 				int (*createfn)(const char *))
9503 {
9504 	char *kbuf, *buf, *tmp;
9505 	int ret = 0;
9506 	size_t done = 0;
9507 	size_t size;
9508 
9509 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9510 	if (!kbuf)
9511 		return -ENOMEM;
9512 
9513 	while (done < count) {
9514 		size = count - done;
9515 
9516 		if (size >= WRITE_BUFSIZE)
9517 			size = WRITE_BUFSIZE - 1;
9518 
9519 		if (copy_from_user(kbuf, buffer + done, size)) {
9520 			ret = -EFAULT;
9521 			goto out;
9522 		}
9523 		kbuf[size] = '\0';
9524 		buf = kbuf;
9525 		do {
9526 			tmp = strchr(buf, '\n');
9527 			if (tmp) {
9528 				*tmp = '\0';
9529 				size = tmp - buf + 1;
9530 			} else {
9531 				size = strlen(buf);
9532 				if (done + size < count) {
9533 					if (buf != kbuf)
9534 						break;
9535 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9536 					pr_warn("Line length is too long: Should be less than %d\n",
9537 						WRITE_BUFSIZE - 2);
9538 					ret = -EINVAL;
9539 					goto out;
9540 				}
9541 			}
9542 			done += size;
9543 
9544 			/* Remove comments */
9545 			tmp = strchr(buf, '#');
9546 
9547 			if (tmp)
9548 				*tmp = '\0';
9549 
9550 			ret = createfn(buf);
9551 			if (ret)
9552 				goto out;
9553 			buf += size;
9554 
9555 		} while (done < count);
9556 	}
9557 	ret = done;
9558 
9559 out:
9560 	kfree(kbuf);
9561 
9562 	return ret;
9563 }
9564 
9565 __init static int tracer_alloc_buffers(void)
9566 {
9567 	int ring_buf_size;
9568 	int ret = -ENOMEM;
9569 
9570 
9571 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9572 		pr_warn("Tracing disabled due to lockdown\n");
9573 		return -EPERM;
9574 	}
9575 
9576 	/*
9577 	 * Make sure we don't accidentally add more trace options
9578 	 * than we have bits for.
9579 	 */
9580 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9581 
9582 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9583 		goto out;
9584 
9585 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9586 		goto out_free_buffer_mask;
9587 
9588 	/* Only allocate trace_printk buffers if a trace_printk exists */
9589 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9590 		/* Must be called before global_trace.buffer is allocated */
9591 		trace_printk_init_buffers();
9592 
9593 	/* To save memory, keep the ring buffer size to its minimum */
9594 	if (ring_buffer_expanded)
9595 		ring_buf_size = trace_buf_size;
9596 	else
9597 		ring_buf_size = 1;
9598 
9599 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9600 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9601 
9602 	raw_spin_lock_init(&global_trace.start_lock);
9603 
9604 	/*
9605 	 * The prepare callbacks allocates some memory for the ring buffer. We
9606 	 * don't free the buffer if the CPU goes down. If we were to free
9607 	 * the buffer, then the user would lose any trace that was in the
9608 	 * buffer. The memory will be removed once the "instance" is removed.
9609 	 */
9610 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9611 				      "trace/RB:preapre", trace_rb_cpu_prepare,
9612 				      NULL);
9613 	if (ret < 0)
9614 		goto out_free_cpumask;
9615 	/* Used for event triggers */
9616 	ret = -ENOMEM;
9617 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9618 	if (!temp_buffer)
9619 		goto out_rm_hp_state;
9620 
9621 	if (trace_create_savedcmd() < 0)
9622 		goto out_free_temp_buffer;
9623 
9624 	/* TODO: make the number of buffers hot pluggable with CPUS */
9625 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9626 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9627 		goto out_free_savedcmd;
9628 	}
9629 
9630 	if (global_trace.buffer_disabled)
9631 		tracing_off();
9632 
9633 	if (trace_boot_clock) {
9634 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
9635 		if (ret < 0)
9636 			pr_warn("Trace clock %s not defined, going back to default\n",
9637 				trace_boot_clock);
9638 	}
9639 
9640 	/*
9641 	 * register_tracer() might reference current_trace, so it
9642 	 * needs to be set before we register anything. This is
9643 	 * just a bootstrap of current_trace anyway.
9644 	 */
9645 	global_trace.current_trace = &nop_trace;
9646 
9647 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9648 
9649 	ftrace_init_global_array_ops(&global_trace);
9650 
9651 	init_trace_flags_index(&global_trace);
9652 
9653 	register_tracer(&nop_trace);
9654 
9655 	/* Function tracing may start here (via kernel command line) */
9656 	init_function_trace();
9657 
9658 	/* All seems OK, enable tracing */
9659 	tracing_disabled = 0;
9660 
9661 	atomic_notifier_chain_register(&panic_notifier_list,
9662 				       &trace_panic_notifier);
9663 
9664 	register_die_notifier(&trace_die_notifier);
9665 
9666 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9667 
9668 	INIT_LIST_HEAD(&global_trace.systems);
9669 	INIT_LIST_HEAD(&global_trace.events);
9670 	INIT_LIST_HEAD(&global_trace.hist_vars);
9671 	INIT_LIST_HEAD(&global_trace.err_log);
9672 	list_add(&global_trace.list, &ftrace_trace_arrays);
9673 
9674 	apply_trace_boot_options();
9675 
9676 	register_snapshot_cmd();
9677 
9678 	return 0;
9679 
9680 out_free_savedcmd:
9681 	free_saved_cmdlines_buffer(savedcmd);
9682 out_free_temp_buffer:
9683 	ring_buffer_free(temp_buffer);
9684 out_rm_hp_state:
9685 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9686 out_free_cpumask:
9687 	free_cpumask_var(global_trace.tracing_cpumask);
9688 out_free_buffer_mask:
9689 	free_cpumask_var(tracing_buffer_mask);
9690 out:
9691 	return ret;
9692 }
9693 
9694 void __init early_trace_init(void)
9695 {
9696 	if (tracepoint_printk) {
9697 		tracepoint_print_iter =
9698 			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9699 		if (MEM_FAIL(!tracepoint_print_iter,
9700 			     "Failed to allocate trace iterator\n"))
9701 			tracepoint_printk = 0;
9702 		else
9703 			static_key_enable(&tracepoint_printk_key.key);
9704 	}
9705 	tracer_alloc_buffers();
9706 }
9707 
9708 void __init trace_init(void)
9709 {
9710 	trace_event_init();
9711 }
9712 
9713 __init static int clear_boot_tracer(void)
9714 {
9715 	/*
9716 	 * The default tracer at boot buffer is an init section.
9717 	 * This function is called in lateinit. If we did not
9718 	 * find the boot tracer, then clear it out, to prevent
9719 	 * later registration from accessing the buffer that is
9720 	 * about to be freed.
9721 	 */
9722 	if (!default_bootup_tracer)
9723 		return 0;
9724 
9725 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9726 	       default_bootup_tracer);
9727 	default_bootup_tracer = NULL;
9728 
9729 	return 0;
9730 }
9731 
9732 fs_initcall(tracer_init_tracefs);
9733 late_initcall_sync(clear_boot_tracer);
9734 
9735 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9736 __init static int tracing_set_default_clock(void)
9737 {
9738 	/* sched_clock_stable() is determined in late_initcall */
9739 	if (!trace_boot_clock && !sched_clock_stable()) {
9740 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
9741 			pr_warn("Can not set tracing clock due to lockdown\n");
9742 			return -EPERM;
9743 		}
9744 
9745 		printk(KERN_WARNING
9746 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
9747 		       "If you want to keep using the local clock, then add:\n"
9748 		       "  \"trace_clock=local\"\n"
9749 		       "on the kernel command line\n");
9750 		tracing_set_clock(&global_trace, "global");
9751 	}
9752 
9753 	return 0;
9754 }
9755 late_initcall_sync(tracing_set_default_clock);
9756 #endif
9757