xref: /linux-6.15/kernel/trace/trace.c (revision 486fbcb3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <[email protected]>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/cleanup.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52 
53 #include <asm/setup.h> /* COMMAND_LINE_SIZE and kaslr_offset() */
54 
55 #include "trace.h"
56 #include "trace_output.h"
57 
58 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 /*
60  * We need to change this state when a selftest is running.
61  * A selftest will lurk into the ring-buffer to count the
62  * entries inserted during the selftest although some concurrent
63  * insertions into the ring-buffer such as trace_printk could occurred
64  * at the same time, giving false positive or negative results.
65  */
66 static bool __read_mostly tracing_selftest_running;
67 
68 /*
69  * If boot-time tracing including tracers/events via kernel cmdline
70  * is running, we do not want to run SELFTEST.
71  */
72 bool __read_mostly tracing_selftest_disabled;
73 
74 void __init disable_tracing_selftest(const char *reason)
75 {
76 	if (!tracing_selftest_disabled) {
77 		tracing_selftest_disabled = true;
78 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
79 	}
80 }
81 #else
82 #define tracing_selftest_running	0
83 #define tracing_selftest_disabled	0
84 #endif
85 
86 /* Pipe tracepoints to printk */
87 static struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
94 	{ }
95 };
96 
97 static int
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
99 {
100 	return 0;
101 }
102 
103 /*
104  * To prevent the comm cache from being overwritten when no
105  * tracing is active, only save the comm when a trace event
106  * occurred.
107  */
108 DEFINE_PER_CPU(bool, trace_taskinfo_save);
109 
110 /*
111  * Kill all tracing for good (never come back).
112  * It is initialized to 1 but will turn to zero if the initialization
113  * of the tracer is successful. But that is the only place that sets
114  * this back to zero.
115  */
116 static int tracing_disabled = 1;
117 
118 cpumask_var_t __read_mostly	tracing_buffer_mask;
119 
120 /*
121  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122  *
123  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124  * is set, then ftrace_dump is called. This will output the contents
125  * of the ftrace buffers to the console.  This is very useful for
126  * capturing traces that lead to crashes and outputing it to a
127  * serial console.
128  *
129  * It is default off, but you can enable it with either specifying
130  * "ftrace_dump_on_oops" in the kernel command line, or setting
131  * /proc/sys/kernel/ftrace_dump_on_oops
132  * Set 1 if you want to dump buffers of all CPUs
133  * Set 2 if you want to dump the buffer of the CPU that triggered oops
134  * Set instance name if you want to dump the specific trace instance
135  * Multiple instance dump is also supported, and instances are seperated
136  * by commas.
137  */
138 /* Set to string format zero to disable by default */
139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
140 
141 /* When set, tracing will stop when a WARN*() is hit */
142 int __disable_trace_on_warning;
143 
144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
145 /* Map of enums to their values, for "eval_map" file */
146 struct trace_eval_map_head {
147 	struct module			*mod;
148 	unsigned long			length;
149 };
150 
151 union trace_eval_map_item;
152 
153 struct trace_eval_map_tail {
154 	/*
155 	 * "end" is first and points to NULL as it must be different
156 	 * than "mod" or "eval_string"
157 	 */
158 	union trace_eval_map_item	*next;
159 	const char			*end;	/* points to NULL */
160 };
161 
162 static DEFINE_MUTEX(trace_eval_mutex);
163 
164 /*
165  * The trace_eval_maps are saved in an array with two extra elements,
166  * one at the beginning, and one at the end. The beginning item contains
167  * the count of the saved maps (head.length), and the module they
168  * belong to if not built in (head.mod). The ending item contains a
169  * pointer to the next array of saved eval_map items.
170  */
171 union trace_eval_map_item {
172 	struct trace_eval_map		map;
173 	struct trace_eval_map_head	head;
174 	struct trace_eval_map_tail	tail;
175 };
176 
177 static union trace_eval_map_item *trace_eval_maps;
178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
179 
180 int tracing_set_tracer(struct trace_array *tr, const char *buf);
181 static void ftrace_trace_userstack(struct trace_array *tr,
182 				   struct trace_buffer *buffer,
183 				   unsigned int trace_ctx);
184 
185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
186 static char *default_bootup_tracer;
187 
188 static bool allocate_snapshot;
189 static bool snapshot_at_boot;
190 
191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_instance_index;
193 
194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_snapshot_index;
196 
197 static int __init set_cmdline_ftrace(char *str)
198 {
199 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
200 	default_bootup_tracer = bootup_tracer_buf;
201 	/* We are using ftrace early, expand it */
202 	trace_set_ring_buffer_expanded(NULL);
203 	return 1;
204 }
205 __setup("ftrace=", set_cmdline_ftrace);
206 
207 int ftrace_dump_on_oops_enabled(void)
208 {
209 	if (!strcmp("0", ftrace_dump_on_oops))
210 		return 0;
211 	else
212 		return 1;
213 }
214 
215 static int __init set_ftrace_dump_on_oops(char *str)
216 {
217 	if (!*str) {
218 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
219 		return 1;
220 	}
221 
222 	if (*str == ',') {
223 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
224 		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
225 		return 1;
226 	}
227 
228 	if (*str++ == '=') {
229 		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
230 		return 1;
231 	}
232 
233 	return 0;
234 }
235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
236 
237 static int __init stop_trace_on_warning(char *str)
238 {
239 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
240 		__disable_trace_on_warning = 1;
241 	return 1;
242 }
243 __setup("traceoff_on_warning", stop_trace_on_warning);
244 
245 static int __init boot_alloc_snapshot(char *str)
246 {
247 	char *slot = boot_snapshot_info + boot_snapshot_index;
248 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
249 	int ret;
250 
251 	if (str[0] == '=') {
252 		str++;
253 		if (strlen(str) >= left)
254 			return -1;
255 
256 		ret = snprintf(slot, left, "%s\t", str);
257 		boot_snapshot_index += ret;
258 	} else {
259 		allocate_snapshot = true;
260 		/* We also need the main ring buffer expanded */
261 		trace_set_ring_buffer_expanded(NULL);
262 	}
263 	return 1;
264 }
265 __setup("alloc_snapshot", boot_alloc_snapshot);
266 
267 
268 static int __init boot_snapshot(char *str)
269 {
270 	snapshot_at_boot = true;
271 	boot_alloc_snapshot(str);
272 	return 1;
273 }
274 __setup("ftrace_boot_snapshot", boot_snapshot);
275 
276 
277 static int __init boot_instance(char *str)
278 {
279 	char *slot = boot_instance_info + boot_instance_index;
280 	int left = sizeof(boot_instance_info) - boot_instance_index;
281 	int ret;
282 
283 	if (strlen(str) >= left)
284 		return -1;
285 
286 	ret = snprintf(slot, left, "%s\t", str);
287 	boot_instance_index += ret;
288 
289 	return 1;
290 }
291 __setup("trace_instance=", boot_instance);
292 
293 
294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
295 
296 static int __init set_trace_boot_options(char *str)
297 {
298 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
299 	return 1;
300 }
301 __setup("trace_options=", set_trace_boot_options);
302 
303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
304 static char *trace_boot_clock __initdata;
305 
306 static int __init set_trace_boot_clock(char *str)
307 {
308 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
309 	trace_boot_clock = trace_boot_clock_buf;
310 	return 1;
311 }
312 __setup("trace_clock=", set_trace_boot_clock);
313 
314 static int __init set_tracepoint_printk(char *str)
315 {
316 	/* Ignore the "tp_printk_stop_on_boot" param */
317 	if (*str == '_')
318 		return 0;
319 
320 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
321 		tracepoint_printk = 1;
322 	return 1;
323 }
324 __setup("tp_printk", set_tracepoint_printk);
325 
326 static int __init set_tracepoint_printk_stop(char *str)
327 {
328 	tracepoint_printk_stop_on_boot = true;
329 	return 1;
330 }
331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
332 
333 unsigned long long ns2usecs(u64 nsec)
334 {
335 	nsec += 500;
336 	do_div(nsec, 1000);
337 	return nsec;
338 }
339 
340 static void
341 trace_process_export(struct trace_export *export,
342 	       struct ring_buffer_event *event, int flag)
343 {
344 	struct trace_entry *entry;
345 	unsigned int size = 0;
346 
347 	if (export->flags & flag) {
348 		entry = ring_buffer_event_data(event);
349 		size = ring_buffer_event_length(event);
350 		export->write(export, entry, size);
351 	}
352 }
353 
354 static DEFINE_MUTEX(ftrace_export_lock);
355 
356 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
357 
358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
361 
362 static inline void ftrace_exports_enable(struct trace_export *export)
363 {
364 	if (export->flags & TRACE_EXPORT_FUNCTION)
365 		static_branch_inc(&trace_function_exports_enabled);
366 
367 	if (export->flags & TRACE_EXPORT_EVENT)
368 		static_branch_inc(&trace_event_exports_enabled);
369 
370 	if (export->flags & TRACE_EXPORT_MARKER)
371 		static_branch_inc(&trace_marker_exports_enabled);
372 }
373 
374 static inline void ftrace_exports_disable(struct trace_export *export)
375 {
376 	if (export->flags & TRACE_EXPORT_FUNCTION)
377 		static_branch_dec(&trace_function_exports_enabled);
378 
379 	if (export->flags & TRACE_EXPORT_EVENT)
380 		static_branch_dec(&trace_event_exports_enabled);
381 
382 	if (export->flags & TRACE_EXPORT_MARKER)
383 		static_branch_dec(&trace_marker_exports_enabled);
384 }
385 
386 static void ftrace_exports(struct ring_buffer_event *event, int flag)
387 {
388 	struct trace_export *export;
389 
390 	preempt_disable_notrace();
391 
392 	export = rcu_dereference_raw_check(ftrace_exports_list);
393 	while (export) {
394 		trace_process_export(export, event, flag);
395 		export = rcu_dereference_raw_check(export->next);
396 	}
397 
398 	preempt_enable_notrace();
399 }
400 
401 static inline void
402 add_trace_export(struct trace_export **list, struct trace_export *export)
403 {
404 	rcu_assign_pointer(export->next, *list);
405 	/*
406 	 * We are entering export into the list but another
407 	 * CPU might be walking that list. We need to make sure
408 	 * the export->next pointer is valid before another CPU sees
409 	 * the export pointer included into the list.
410 	 */
411 	rcu_assign_pointer(*list, export);
412 }
413 
414 static inline int
415 rm_trace_export(struct trace_export **list, struct trace_export *export)
416 {
417 	struct trace_export **p;
418 
419 	for (p = list; *p != NULL; p = &(*p)->next)
420 		if (*p == export)
421 			break;
422 
423 	if (*p != export)
424 		return -1;
425 
426 	rcu_assign_pointer(*p, (*p)->next);
427 
428 	return 0;
429 }
430 
431 static inline void
432 add_ftrace_export(struct trace_export **list, struct trace_export *export)
433 {
434 	ftrace_exports_enable(export);
435 
436 	add_trace_export(list, export);
437 }
438 
439 static inline int
440 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
441 {
442 	int ret;
443 
444 	ret = rm_trace_export(list, export);
445 	ftrace_exports_disable(export);
446 
447 	return ret;
448 }
449 
450 int register_ftrace_export(struct trace_export *export)
451 {
452 	if (WARN_ON_ONCE(!export->write))
453 		return -1;
454 
455 	mutex_lock(&ftrace_export_lock);
456 
457 	add_ftrace_export(&ftrace_exports_list, export);
458 
459 	mutex_unlock(&ftrace_export_lock);
460 
461 	return 0;
462 }
463 EXPORT_SYMBOL_GPL(register_ftrace_export);
464 
465 int unregister_ftrace_export(struct trace_export *export)
466 {
467 	int ret;
468 
469 	mutex_lock(&ftrace_export_lock);
470 
471 	ret = rm_ftrace_export(&ftrace_exports_list, export);
472 
473 	mutex_unlock(&ftrace_export_lock);
474 
475 	return ret;
476 }
477 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
478 
479 /* trace_flags holds trace_options default values */
480 #define TRACE_DEFAULT_FLAGS						\
481 	(FUNCTION_DEFAULT_FLAGS |					\
482 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
483 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
484 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
485 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
486 	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
487 
488 /* trace_options that are only supported by global_trace */
489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
490 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
491 
492 /* trace_flags that are default zero for instances */
493 #define ZEROED_TRACE_FLAGS \
494 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
495 
496 /*
497  * The global_trace is the descriptor that holds the top-level tracing
498  * buffers for the live tracing.
499  */
500 static struct trace_array global_trace = {
501 	.trace_flags = TRACE_DEFAULT_FLAGS,
502 };
503 
504 static struct trace_array *printk_trace = &global_trace;
505 
506 static __always_inline bool printk_binsafe(struct trace_array *tr)
507 {
508 	/*
509 	 * The binary format of traceprintk can cause a crash if used
510 	 * by a buffer from another boot. Force the use of the
511 	 * non binary version of trace_printk if the trace_printk
512 	 * buffer is a boot mapped ring buffer.
513 	 */
514 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
515 }
516 
517 static void update_printk_trace(struct trace_array *tr)
518 {
519 	if (printk_trace == tr)
520 		return;
521 
522 	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
523 	printk_trace = tr;
524 	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
525 }
526 
527 void trace_set_ring_buffer_expanded(struct trace_array *tr)
528 {
529 	if (!tr)
530 		tr = &global_trace;
531 	tr->ring_buffer_expanded = true;
532 }
533 
534 LIST_HEAD(ftrace_trace_arrays);
535 
536 int trace_array_get(struct trace_array *this_tr)
537 {
538 	struct trace_array *tr;
539 
540 	guard(mutex)(&trace_types_lock);
541 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
542 		if (tr == this_tr) {
543 			tr->ref++;
544 			return 0;
545 		}
546 	}
547 
548 	return -ENODEV;
549 }
550 
551 static void __trace_array_put(struct trace_array *this_tr)
552 {
553 	WARN_ON(!this_tr->ref);
554 	this_tr->ref--;
555 }
556 
557 /**
558  * trace_array_put - Decrement the reference counter for this trace array.
559  * @this_tr : pointer to the trace array
560  *
561  * NOTE: Use this when we no longer need the trace array returned by
562  * trace_array_get_by_name(). This ensures the trace array can be later
563  * destroyed.
564  *
565  */
566 void trace_array_put(struct trace_array *this_tr)
567 {
568 	if (!this_tr)
569 		return;
570 
571 	mutex_lock(&trace_types_lock);
572 	__trace_array_put(this_tr);
573 	mutex_unlock(&trace_types_lock);
574 }
575 EXPORT_SYMBOL_GPL(trace_array_put);
576 
577 int tracing_check_open_get_tr(struct trace_array *tr)
578 {
579 	int ret;
580 
581 	ret = security_locked_down(LOCKDOWN_TRACEFS);
582 	if (ret)
583 		return ret;
584 
585 	if (tracing_disabled)
586 		return -ENODEV;
587 
588 	if (tr && trace_array_get(tr) < 0)
589 		return -ENODEV;
590 
591 	return 0;
592 }
593 
594 /**
595  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
596  * @filtered_pids: The list of pids to check
597  * @search_pid: The PID to find in @filtered_pids
598  *
599  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
600  */
601 bool
602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
603 {
604 	return trace_pid_list_is_set(filtered_pids, search_pid);
605 }
606 
607 /**
608  * trace_ignore_this_task - should a task be ignored for tracing
609  * @filtered_pids: The list of pids to check
610  * @filtered_no_pids: The list of pids not to be traced
611  * @task: The task that should be ignored if not filtered
612  *
613  * Checks if @task should be traced or not from @filtered_pids.
614  * Returns true if @task should *NOT* be traced.
615  * Returns false if @task should be traced.
616  */
617 bool
618 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
619 		       struct trace_pid_list *filtered_no_pids,
620 		       struct task_struct *task)
621 {
622 	/*
623 	 * If filtered_no_pids is not empty, and the task's pid is listed
624 	 * in filtered_no_pids, then return true.
625 	 * Otherwise, if filtered_pids is empty, that means we can
626 	 * trace all tasks. If it has content, then only trace pids
627 	 * within filtered_pids.
628 	 */
629 
630 	return (filtered_pids &&
631 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
632 		(filtered_no_pids &&
633 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
634 }
635 
636 /**
637  * trace_filter_add_remove_task - Add or remove a task from a pid_list
638  * @pid_list: The list to modify
639  * @self: The current task for fork or NULL for exit
640  * @task: The task to add or remove
641  *
642  * If adding a task, if @self is defined, the task is only added if @self
643  * is also included in @pid_list. This happens on fork and tasks should
644  * only be added when the parent is listed. If @self is NULL, then the
645  * @task pid will be removed from the list, which would happen on exit
646  * of a task.
647  */
648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
649 				  struct task_struct *self,
650 				  struct task_struct *task)
651 {
652 	if (!pid_list)
653 		return;
654 
655 	/* For forks, we only add if the forking task is listed */
656 	if (self) {
657 		if (!trace_find_filtered_pid(pid_list, self->pid))
658 			return;
659 	}
660 
661 	/* "self" is set for forks, and NULL for exits */
662 	if (self)
663 		trace_pid_list_set(pid_list, task->pid);
664 	else
665 		trace_pid_list_clear(pid_list, task->pid);
666 }
667 
668 /**
669  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
670  * @pid_list: The pid list to show
671  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
672  * @pos: The position of the file
673  *
674  * This is used by the seq_file "next" operation to iterate the pids
675  * listed in a trace_pid_list structure.
676  *
677  * Returns the pid+1 as we want to display pid of zero, but NULL would
678  * stop the iteration.
679  */
680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
681 {
682 	long pid = (unsigned long)v;
683 	unsigned int next;
684 
685 	(*pos)++;
686 
687 	/* pid already is +1 of the actual previous bit */
688 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
689 		return NULL;
690 
691 	pid = next;
692 
693 	/* Return pid + 1 to allow zero to be represented */
694 	return (void *)(pid + 1);
695 }
696 
697 /**
698  * trace_pid_start - Used for seq_file to start reading pid lists
699  * @pid_list: The pid list to show
700  * @pos: The position of the file
701  *
702  * This is used by seq_file "start" operation to start the iteration
703  * of listing pids.
704  *
705  * Returns the pid+1 as we want to display pid of zero, but NULL would
706  * stop the iteration.
707  */
708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
709 {
710 	unsigned long pid;
711 	unsigned int first;
712 	loff_t l = 0;
713 
714 	if (trace_pid_list_first(pid_list, &first) < 0)
715 		return NULL;
716 
717 	pid = first;
718 
719 	/* Return pid + 1 so that zero can be the exit value */
720 	for (pid++; pid && l < *pos;
721 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
722 		;
723 	return (void *)pid;
724 }
725 
726 /**
727  * trace_pid_show - show the current pid in seq_file processing
728  * @m: The seq_file structure to write into
729  * @v: A void pointer of the pid (+1) value to display
730  *
731  * Can be directly used by seq_file operations to display the current
732  * pid value.
733  */
734 int trace_pid_show(struct seq_file *m, void *v)
735 {
736 	unsigned long pid = (unsigned long)v - 1;
737 
738 	seq_printf(m, "%lu\n", pid);
739 	return 0;
740 }
741 
742 /* 128 should be much more than enough */
743 #define PID_BUF_SIZE		127
744 
745 int trace_pid_write(struct trace_pid_list *filtered_pids,
746 		    struct trace_pid_list **new_pid_list,
747 		    const char __user *ubuf, size_t cnt)
748 {
749 	struct trace_pid_list *pid_list;
750 	struct trace_parser parser;
751 	unsigned long val;
752 	int nr_pids = 0;
753 	ssize_t read = 0;
754 	ssize_t ret;
755 	loff_t pos;
756 	pid_t pid;
757 
758 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
759 		return -ENOMEM;
760 
761 	/*
762 	 * Always recreate a new array. The write is an all or nothing
763 	 * operation. Always create a new array when adding new pids by
764 	 * the user. If the operation fails, then the current list is
765 	 * not modified.
766 	 */
767 	pid_list = trace_pid_list_alloc();
768 	if (!pid_list) {
769 		trace_parser_put(&parser);
770 		return -ENOMEM;
771 	}
772 
773 	if (filtered_pids) {
774 		/* copy the current bits to the new max */
775 		ret = trace_pid_list_first(filtered_pids, &pid);
776 		while (!ret) {
777 			trace_pid_list_set(pid_list, pid);
778 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
779 			nr_pids++;
780 		}
781 	}
782 
783 	ret = 0;
784 	while (cnt > 0) {
785 
786 		pos = 0;
787 
788 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
789 		if (ret < 0)
790 			break;
791 
792 		read += ret;
793 		ubuf += ret;
794 		cnt -= ret;
795 
796 		if (!trace_parser_loaded(&parser))
797 			break;
798 
799 		ret = -EINVAL;
800 		if (kstrtoul(parser.buffer, 0, &val))
801 			break;
802 
803 		pid = (pid_t)val;
804 
805 		if (trace_pid_list_set(pid_list, pid) < 0) {
806 			ret = -1;
807 			break;
808 		}
809 		nr_pids++;
810 
811 		trace_parser_clear(&parser);
812 		ret = 0;
813 	}
814 	trace_parser_put(&parser);
815 
816 	if (ret < 0) {
817 		trace_pid_list_free(pid_list);
818 		return ret;
819 	}
820 
821 	if (!nr_pids) {
822 		/* Cleared the list of pids */
823 		trace_pid_list_free(pid_list);
824 		pid_list = NULL;
825 	}
826 
827 	*new_pid_list = pid_list;
828 
829 	return read;
830 }
831 
832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
833 {
834 	u64 ts;
835 
836 	/* Early boot up does not have a buffer yet */
837 	if (!buf->buffer)
838 		return trace_clock_local();
839 
840 	ts = ring_buffer_time_stamp(buf->buffer);
841 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
842 
843 	return ts;
844 }
845 
846 u64 ftrace_now(int cpu)
847 {
848 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
849 }
850 
851 /**
852  * tracing_is_enabled - Show if global_trace has been enabled
853  *
854  * Shows if the global trace has been enabled or not. It uses the
855  * mirror flag "buffer_disabled" to be used in fast paths such as for
856  * the irqsoff tracer. But it may be inaccurate due to races. If you
857  * need to know the accurate state, use tracing_is_on() which is a little
858  * slower, but accurate.
859  */
860 int tracing_is_enabled(void)
861 {
862 	/*
863 	 * For quick access (irqsoff uses this in fast path), just
864 	 * return the mirror variable of the state of the ring buffer.
865 	 * It's a little racy, but we don't really care.
866 	 */
867 	smp_rmb();
868 	return !global_trace.buffer_disabled;
869 }
870 
871 /*
872  * trace_buf_size is the size in bytes that is allocated
873  * for a buffer. Note, the number of bytes is always rounded
874  * to page size.
875  *
876  * This number is purposely set to a low number of 16384.
877  * If the dump on oops happens, it will be much appreciated
878  * to not have to wait for all that output. Anyway this can be
879  * boot time and run time configurable.
880  */
881 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
882 
883 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
884 
885 /* trace_types holds a link list of available tracers. */
886 static struct tracer		*trace_types __read_mostly;
887 
888 /*
889  * trace_types_lock is used to protect the trace_types list.
890  */
891 DEFINE_MUTEX(trace_types_lock);
892 
893 /*
894  * serialize the access of the ring buffer
895  *
896  * ring buffer serializes readers, but it is low level protection.
897  * The validity of the events (which returns by ring_buffer_peek() ..etc)
898  * are not protected by ring buffer.
899  *
900  * The content of events may become garbage if we allow other process consumes
901  * these events concurrently:
902  *   A) the page of the consumed events may become a normal page
903  *      (not reader page) in ring buffer, and this page will be rewritten
904  *      by events producer.
905  *   B) The page of the consumed events may become a page for splice_read,
906  *      and this page will be returned to system.
907  *
908  * These primitives allow multi process access to different cpu ring buffer
909  * concurrently.
910  *
911  * These primitives don't distinguish read-only and read-consume access.
912  * Multi read-only access are also serialized.
913  */
914 
915 #ifdef CONFIG_SMP
916 static DECLARE_RWSEM(all_cpu_access_lock);
917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
918 
919 static inline void trace_access_lock(int cpu)
920 {
921 	if (cpu == RING_BUFFER_ALL_CPUS) {
922 		/* gain it for accessing the whole ring buffer. */
923 		down_write(&all_cpu_access_lock);
924 	} else {
925 		/* gain it for accessing a cpu ring buffer. */
926 
927 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
928 		down_read(&all_cpu_access_lock);
929 
930 		/* Secondly block other access to this @cpu ring buffer. */
931 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
932 	}
933 }
934 
935 static inline void trace_access_unlock(int cpu)
936 {
937 	if (cpu == RING_BUFFER_ALL_CPUS) {
938 		up_write(&all_cpu_access_lock);
939 	} else {
940 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
941 		up_read(&all_cpu_access_lock);
942 	}
943 }
944 
945 static inline void trace_access_lock_init(void)
946 {
947 	int cpu;
948 
949 	for_each_possible_cpu(cpu)
950 		mutex_init(&per_cpu(cpu_access_lock, cpu));
951 }
952 
953 #else
954 
955 static DEFINE_MUTEX(access_lock);
956 
957 static inline void trace_access_lock(int cpu)
958 {
959 	(void)cpu;
960 	mutex_lock(&access_lock);
961 }
962 
963 static inline void trace_access_unlock(int cpu)
964 {
965 	(void)cpu;
966 	mutex_unlock(&access_lock);
967 }
968 
969 static inline void trace_access_lock_init(void)
970 {
971 }
972 
973 #endif
974 
975 #ifdef CONFIG_STACKTRACE
976 static void __ftrace_trace_stack(struct trace_array *tr,
977 				 struct trace_buffer *buffer,
978 				 unsigned int trace_ctx,
979 				 int skip, struct pt_regs *regs);
980 static inline void ftrace_trace_stack(struct trace_array *tr,
981 				      struct trace_buffer *buffer,
982 				      unsigned int trace_ctx,
983 				      int skip, struct pt_regs *regs);
984 
985 #else
986 static inline void __ftrace_trace_stack(struct trace_array *tr,
987 					struct trace_buffer *buffer,
988 					unsigned int trace_ctx,
989 					int skip, struct pt_regs *regs)
990 {
991 }
992 static inline void ftrace_trace_stack(struct trace_array *tr,
993 				      struct trace_buffer *buffer,
994 				      unsigned long trace_ctx,
995 				      int skip, struct pt_regs *regs)
996 {
997 }
998 
999 #endif
1000 
1001 static __always_inline void
1002 trace_event_setup(struct ring_buffer_event *event,
1003 		  int type, unsigned int trace_ctx)
1004 {
1005 	struct trace_entry *ent = ring_buffer_event_data(event);
1006 
1007 	tracing_generic_entry_update(ent, type, trace_ctx);
1008 }
1009 
1010 static __always_inline struct ring_buffer_event *
1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1012 			  int type,
1013 			  unsigned long len,
1014 			  unsigned int trace_ctx)
1015 {
1016 	struct ring_buffer_event *event;
1017 
1018 	event = ring_buffer_lock_reserve(buffer, len);
1019 	if (event != NULL)
1020 		trace_event_setup(event, type, trace_ctx);
1021 
1022 	return event;
1023 }
1024 
1025 void tracer_tracing_on(struct trace_array *tr)
1026 {
1027 	if (tr->array_buffer.buffer)
1028 		ring_buffer_record_on(tr->array_buffer.buffer);
1029 	/*
1030 	 * This flag is looked at when buffers haven't been allocated
1031 	 * yet, or by some tracers (like irqsoff), that just want to
1032 	 * know if the ring buffer has been disabled, but it can handle
1033 	 * races of where it gets disabled but we still do a record.
1034 	 * As the check is in the fast path of the tracers, it is more
1035 	 * important to be fast than accurate.
1036 	 */
1037 	tr->buffer_disabled = 0;
1038 	/* Make the flag seen by readers */
1039 	smp_wmb();
1040 }
1041 
1042 /**
1043  * tracing_on - enable tracing buffers
1044  *
1045  * This function enables tracing buffers that may have been
1046  * disabled with tracing_off.
1047  */
1048 void tracing_on(void)
1049 {
1050 	tracer_tracing_on(&global_trace);
1051 }
1052 EXPORT_SYMBOL_GPL(tracing_on);
1053 
1054 
1055 static __always_inline void
1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1057 {
1058 	__this_cpu_write(trace_taskinfo_save, true);
1059 
1060 	/* If this is the temp buffer, we need to commit fully */
1061 	if (this_cpu_read(trace_buffered_event) == event) {
1062 		/* Length is in event->array[0] */
1063 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1064 		/* Release the temp buffer */
1065 		this_cpu_dec(trace_buffered_event_cnt);
1066 		/* ring_buffer_unlock_commit() enables preemption */
1067 		preempt_enable_notrace();
1068 	} else
1069 		ring_buffer_unlock_commit(buffer);
1070 }
1071 
1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1073 		       const char *str, int size)
1074 {
1075 	struct ring_buffer_event *event;
1076 	struct trace_buffer *buffer;
1077 	struct print_entry *entry;
1078 	unsigned int trace_ctx;
1079 	int alloc;
1080 
1081 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1082 		return 0;
1083 
1084 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1085 		return 0;
1086 
1087 	if (unlikely(tracing_disabled))
1088 		return 0;
1089 
1090 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1091 
1092 	trace_ctx = tracing_gen_ctx();
1093 	buffer = tr->array_buffer.buffer;
1094 	ring_buffer_nest_start(buffer);
1095 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1096 					    trace_ctx);
1097 	if (!event) {
1098 		size = 0;
1099 		goto out;
1100 	}
1101 
1102 	entry = ring_buffer_event_data(event);
1103 	entry->ip = ip;
1104 
1105 	memcpy(&entry->buf, str, size);
1106 
1107 	/* Add a newline if necessary */
1108 	if (entry->buf[size - 1] != '\n') {
1109 		entry->buf[size] = '\n';
1110 		entry->buf[size + 1] = '\0';
1111 	} else
1112 		entry->buf[size] = '\0';
1113 
1114 	__buffer_unlock_commit(buffer, event);
1115 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1116  out:
1117 	ring_buffer_nest_end(buffer);
1118 	return size;
1119 }
1120 EXPORT_SYMBOL_GPL(__trace_array_puts);
1121 
1122 /**
1123  * __trace_puts - write a constant string into the trace buffer.
1124  * @ip:	   The address of the caller
1125  * @str:   The constant string to write
1126  * @size:  The size of the string.
1127  */
1128 int __trace_puts(unsigned long ip, const char *str, int size)
1129 {
1130 	return __trace_array_puts(printk_trace, ip, str, size);
1131 }
1132 EXPORT_SYMBOL_GPL(__trace_puts);
1133 
1134 /**
1135  * __trace_bputs - write the pointer to a constant string into trace buffer
1136  * @ip:	   The address of the caller
1137  * @str:   The constant string to write to the buffer to
1138  */
1139 int __trace_bputs(unsigned long ip, const char *str)
1140 {
1141 	struct trace_array *tr = READ_ONCE(printk_trace);
1142 	struct ring_buffer_event *event;
1143 	struct trace_buffer *buffer;
1144 	struct bputs_entry *entry;
1145 	unsigned int trace_ctx;
1146 	int size = sizeof(struct bputs_entry);
1147 	int ret = 0;
1148 
1149 	if (!printk_binsafe(tr))
1150 		return __trace_puts(ip, str, strlen(str));
1151 
1152 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1153 		return 0;
1154 
1155 	if (unlikely(tracing_selftest_running || tracing_disabled))
1156 		return 0;
1157 
1158 	trace_ctx = tracing_gen_ctx();
1159 	buffer = tr->array_buffer.buffer;
1160 
1161 	ring_buffer_nest_start(buffer);
1162 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1163 					    trace_ctx);
1164 	if (!event)
1165 		goto out;
1166 
1167 	entry = ring_buffer_event_data(event);
1168 	entry->ip			= ip;
1169 	entry->str			= str;
1170 
1171 	__buffer_unlock_commit(buffer, event);
1172 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1173 
1174 	ret = 1;
1175  out:
1176 	ring_buffer_nest_end(buffer);
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(__trace_bputs);
1180 
1181 #ifdef CONFIG_TRACER_SNAPSHOT
1182 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1183 					   void *cond_data)
1184 {
1185 	struct tracer *tracer = tr->current_trace;
1186 	unsigned long flags;
1187 
1188 	if (in_nmi()) {
1189 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1190 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1191 		return;
1192 	}
1193 
1194 	if (!tr->allocated_snapshot) {
1195 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1196 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1197 		tracer_tracing_off(tr);
1198 		return;
1199 	}
1200 
1201 	/* Note, snapshot can not be used when the tracer uses it */
1202 	if (tracer->use_max_tr) {
1203 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1204 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1205 		return;
1206 	}
1207 
1208 	if (tr->mapped) {
1209 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1210 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1211 		return;
1212 	}
1213 
1214 	local_irq_save(flags);
1215 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1216 	local_irq_restore(flags);
1217 }
1218 
1219 void tracing_snapshot_instance(struct trace_array *tr)
1220 {
1221 	tracing_snapshot_instance_cond(tr, NULL);
1222 }
1223 
1224 /**
1225  * tracing_snapshot - take a snapshot of the current buffer.
1226  *
1227  * This causes a swap between the snapshot buffer and the current live
1228  * tracing buffer. You can use this to take snapshots of the live
1229  * trace when some condition is triggered, but continue to trace.
1230  *
1231  * Note, make sure to allocate the snapshot with either
1232  * a tracing_snapshot_alloc(), or by doing it manually
1233  * with: echo 1 > /sys/kernel/tracing/snapshot
1234  *
1235  * If the snapshot buffer is not allocated, it will stop tracing.
1236  * Basically making a permanent snapshot.
1237  */
1238 void tracing_snapshot(void)
1239 {
1240 	struct trace_array *tr = &global_trace;
1241 
1242 	tracing_snapshot_instance(tr);
1243 }
1244 EXPORT_SYMBOL_GPL(tracing_snapshot);
1245 
1246 /**
1247  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1248  * @tr:		The tracing instance to snapshot
1249  * @cond_data:	The data to be tested conditionally, and possibly saved
1250  *
1251  * This is the same as tracing_snapshot() except that the snapshot is
1252  * conditional - the snapshot will only happen if the
1253  * cond_snapshot.update() implementation receiving the cond_data
1254  * returns true, which means that the trace array's cond_snapshot
1255  * update() operation used the cond_data to determine whether the
1256  * snapshot should be taken, and if it was, presumably saved it along
1257  * with the snapshot.
1258  */
1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1260 {
1261 	tracing_snapshot_instance_cond(tr, cond_data);
1262 }
1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1264 
1265 /**
1266  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1267  * @tr:		The tracing instance
1268  *
1269  * When the user enables a conditional snapshot using
1270  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1271  * with the snapshot.  This accessor is used to retrieve it.
1272  *
1273  * Should not be called from cond_snapshot.update(), since it takes
1274  * the tr->max_lock lock, which the code calling
1275  * cond_snapshot.update() has already done.
1276  *
1277  * Returns the cond_data associated with the trace array's snapshot.
1278  */
1279 void *tracing_cond_snapshot_data(struct trace_array *tr)
1280 {
1281 	void *cond_data = NULL;
1282 
1283 	local_irq_disable();
1284 	arch_spin_lock(&tr->max_lock);
1285 
1286 	if (tr->cond_snapshot)
1287 		cond_data = tr->cond_snapshot->cond_data;
1288 
1289 	arch_spin_unlock(&tr->max_lock);
1290 	local_irq_enable();
1291 
1292 	return cond_data;
1293 }
1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1295 
1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1297 					struct array_buffer *size_buf, int cpu_id);
1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1299 
1300 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1301 {
1302 	int order;
1303 	int ret;
1304 
1305 	if (!tr->allocated_snapshot) {
1306 
1307 		/* Make the snapshot buffer have the same order as main buffer */
1308 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1309 		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		/* allocate spare buffer */
1314 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1315 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		tr->allocated_snapshot = true;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static void free_snapshot(struct trace_array *tr)
1326 {
1327 	/*
1328 	 * We don't free the ring buffer. instead, resize it because
1329 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1330 	 * we want preserve it.
1331 	 */
1332 	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1333 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1334 	set_buffer_entries(&tr->max_buffer, 1);
1335 	tracing_reset_online_cpus(&tr->max_buffer);
1336 	tr->allocated_snapshot = false;
1337 }
1338 
1339 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1340 {
1341 	int ret;
1342 
1343 	lockdep_assert_held(&trace_types_lock);
1344 
1345 	spin_lock(&tr->snapshot_trigger_lock);
1346 	if (tr->snapshot == UINT_MAX || tr->mapped) {
1347 		spin_unlock(&tr->snapshot_trigger_lock);
1348 		return -EBUSY;
1349 	}
1350 
1351 	tr->snapshot++;
1352 	spin_unlock(&tr->snapshot_trigger_lock);
1353 
1354 	ret = tracing_alloc_snapshot_instance(tr);
1355 	if (ret) {
1356 		spin_lock(&tr->snapshot_trigger_lock);
1357 		tr->snapshot--;
1358 		spin_unlock(&tr->snapshot_trigger_lock);
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 int tracing_arm_snapshot(struct trace_array *tr)
1365 {
1366 	int ret;
1367 
1368 	mutex_lock(&trace_types_lock);
1369 	ret = tracing_arm_snapshot_locked(tr);
1370 	mutex_unlock(&trace_types_lock);
1371 
1372 	return ret;
1373 }
1374 
1375 void tracing_disarm_snapshot(struct trace_array *tr)
1376 {
1377 	spin_lock(&tr->snapshot_trigger_lock);
1378 	if (!WARN_ON(!tr->snapshot))
1379 		tr->snapshot--;
1380 	spin_unlock(&tr->snapshot_trigger_lock);
1381 }
1382 
1383 /**
1384  * tracing_alloc_snapshot - allocate snapshot buffer.
1385  *
1386  * This only allocates the snapshot buffer if it isn't already
1387  * allocated - it doesn't also take a snapshot.
1388  *
1389  * This is meant to be used in cases where the snapshot buffer needs
1390  * to be set up for events that can't sleep but need to be able to
1391  * trigger a snapshot.
1392  */
1393 int tracing_alloc_snapshot(void)
1394 {
1395 	struct trace_array *tr = &global_trace;
1396 	int ret;
1397 
1398 	ret = tracing_alloc_snapshot_instance(tr);
1399 	WARN_ON(ret < 0);
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1404 
1405 /**
1406  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1407  *
1408  * This is similar to tracing_snapshot(), but it will allocate the
1409  * snapshot buffer if it isn't already allocated. Use this only
1410  * where it is safe to sleep, as the allocation may sleep.
1411  *
1412  * This causes a swap between the snapshot buffer and the current live
1413  * tracing buffer. You can use this to take snapshots of the live
1414  * trace when some condition is triggered, but continue to trace.
1415  */
1416 void tracing_snapshot_alloc(void)
1417 {
1418 	int ret;
1419 
1420 	ret = tracing_alloc_snapshot();
1421 	if (ret < 0)
1422 		return;
1423 
1424 	tracing_snapshot();
1425 }
1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1427 
1428 /**
1429  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1430  * @tr:		The tracing instance
1431  * @cond_data:	User data to associate with the snapshot
1432  * @update:	Implementation of the cond_snapshot update function
1433  *
1434  * Check whether the conditional snapshot for the given instance has
1435  * already been enabled, or if the current tracer is already using a
1436  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1437  * save the cond_data and update function inside.
1438  *
1439  * Returns 0 if successful, error otherwise.
1440  */
1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1442 				 cond_update_fn_t update)
1443 {
1444 	struct cond_snapshot *cond_snapshot __free(kfree) =
1445 		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1446 	int ret;
1447 
1448 	if (!cond_snapshot)
1449 		return -ENOMEM;
1450 
1451 	cond_snapshot->cond_data = cond_data;
1452 	cond_snapshot->update = update;
1453 
1454 	guard(mutex)(&trace_types_lock);
1455 
1456 	if (tr->current_trace->use_max_tr)
1457 		return -EBUSY;
1458 
1459 	/*
1460 	 * The cond_snapshot can only change to NULL without the
1461 	 * trace_types_lock. We don't care if we race with it going
1462 	 * to NULL, but we want to make sure that it's not set to
1463 	 * something other than NULL when we get here, which we can
1464 	 * do safely with only holding the trace_types_lock and not
1465 	 * having to take the max_lock.
1466 	 */
1467 	if (tr->cond_snapshot)
1468 		return -EBUSY;
1469 
1470 	ret = tracing_arm_snapshot_locked(tr);
1471 	if (ret)
1472 		return ret;
1473 
1474 	local_irq_disable();
1475 	arch_spin_lock(&tr->max_lock);
1476 	tr->cond_snapshot = no_free_ptr(cond_snapshot);
1477 	arch_spin_unlock(&tr->max_lock);
1478 	local_irq_enable();
1479 
1480 	return 0;
1481 }
1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1483 
1484 /**
1485  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1486  * @tr:		The tracing instance
1487  *
1488  * Check whether the conditional snapshot for the given instance is
1489  * enabled; if so, free the cond_snapshot associated with it,
1490  * otherwise return -EINVAL.
1491  *
1492  * Returns 0 if successful, error otherwise.
1493  */
1494 int tracing_snapshot_cond_disable(struct trace_array *tr)
1495 {
1496 	int ret = 0;
1497 
1498 	local_irq_disable();
1499 	arch_spin_lock(&tr->max_lock);
1500 
1501 	if (!tr->cond_snapshot)
1502 		ret = -EINVAL;
1503 	else {
1504 		kfree(tr->cond_snapshot);
1505 		tr->cond_snapshot = NULL;
1506 	}
1507 
1508 	arch_spin_unlock(&tr->max_lock);
1509 	local_irq_enable();
1510 
1511 	tracing_disarm_snapshot(tr);
1512 
1513 	return ret;
1514 }
1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1516 #else
1517 void tracing_snapshot(void)
1518 {
1519 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1520 }
1521 EXPORT_SYMBOL_GPL(tracing_snapshot);
1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1523 {
1524 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1525 }
1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1527 int tracing_alloc_snapshot(void)
1528 {
1529 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1530 	return -ENODEV;
1531 }
1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1533 void tracing_snapshot_alloc(void)
1534 {
1535 	/* Give warning */
1536 	tracing_snapshot();
1537 }
1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1539 void *tracing_cond_snapshot_data(struct trace_array *tr)
1540 {
1541 	return NULL;
1542 }
1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1545 {
1546 	return -ENODEV;
1547 }
1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1549 int tracing_snapshot_cond_disable(struct trace_array *tr)
1550 {
1551 	return false;
1552 }
1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1554 #define free_snapshot(tr)	do { } while (0)
1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1556 #endif /* CONFIG_TRACER_SNAPSHOT */
1557 
1558 void tracer_tracing_off(struct trace_array *tr)
1559 {
1560 	if (tr->array_buffer.buffer)
1561 		ring_buffer_record_off(tr->array_buffer.buffer);
1562 	/*
1563 	 * This flag is looked at when buffers haven't been allocated
1564 	 * yet, or by some tracers (like irqsoff), that just want to
1565 	 * know if the ring buffer has been disabled, but it can handle
1566 	 * races of where it gets disabled but we still do a record.
1567 	 * As the check is in the fast path of the tracers, it is more
1568 	 * important to be fast than accurate.
1569 	 */
1570 	tr->buffer_disabled = 1;
1571 	/* Make the flag seen by readers */
1572 	smp_wmb();
1573 }
1574 
1575 /**
1576  * tracing_off - turn off tracing buffers
1577  *
1578  * This function stops the tracing buffers from recording data.
1579  * It does not disable any overhead the tracers themselves may
1580  * be causing. This function simply causes all recording to
1581  * the ring buffers to fail.
1582  */
1583 void tracing_off(void)
1584 {
1585 	tracer_tracing_off(&global_trace);
1586 }
1587 EXPORT_SYMBOL_GPL(tracing_off);
1588 
1589 void disable_trace_on_warning(void)
1590 {
1591 	if (__disable_trace_on_warning) {
1592 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1593 			"Disabling tracing due to warning\n");
1594 		tracing_off();
1595 	}
1596 }
1597 
1598 /**
1599  * tracer_tracing_is_on - show real state of ring buffer enabled
1600  * @tr : the trace array to know if ring buffer is enabled
1601  *
1602  * Shows real state of the ring buffer if it is enabled or not.
1603  */
1604 bool tracer_tracing_is_on(struct trace_array *tr)
1605 {
1606 	if (tr->array_buffer.buffer)
1607 		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1608 	return !tr->buffer_disabled;
1609 }
1610 
1611 /**
1612  * tracing_is_on - show state of ring buffers enabled
1613  */
1614 int tracing_is_on(void)
1615 {
1616 	return tracer_tracing_is_on(&global_trace);
1617 }
1618 EXPORT_SYMBOL_GPL(tracing_is_on);
1619 
1620 static int __init set_buf_size(char *str)
1621 {
1622 	unsigned long buf_size;
1623 
1624 	if (!str)
1625 		return 0;
1626 	buf_size = memparse(str, &str);
1627 	/*
1628 	 * nr_entries can not be zero and the startup
1629 	 * tests require some buffer space. Therefore
1630 	 * ensure we have at least 4096 bytes of buffer.
1631 	 */
1632 	trace_buf_size = max(4096UL, buf_size);
1633 	return 1;
1634 }
1635 __setup("trace_buf_size=", set_buf_size);
1636 
1637 static int __init set_tracing_thresh(char *str)
1638 {
1639 	unsigned long threshold;
1640 	int ret;
1641 
1642 	if (!str)
1643 		return 0;
1644 	ret = kstrtoul(str, 0, &threshold);
1645 	if (ret < 0)
1646 		return 0;
1647 	tracing_thresh = threshold * 1000;
1648 	return 1;
1649 }
1650 __setup("tracing_thresh=", set_tracing_thresh);
1651 
1652 unsigned long nsecs_to_usecs(unsigned long nsecs)
1653 {
1654 	return nsecs / 1000;
1655 }
1656 
1657 /*
1658  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1659  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1660  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1661  * of strings in the order that the evals (enum) were defined.
1662  */
1663 #undef C
1664 #define C(a, b) b
1665 
1666 /* These must match the bit positions in trace_iterator_flags */
1667 static const char *trace_options[] = {
1668 	TRACE_FLAGS
1669 	NULL
1670 };
1671 
1672 static struct {
1673 	u64 (*func)(void);
1674 	const char *name;
1675 	int in_ns;		/* is this clock in nanoseconds? */
1676 } trace_clocks[] = {
1677 	{ trace_clock_local,		"local",	1 },
1678 	{ trace_clock_global,		"global",	1 },
1679 	{ trace_clock_counter,		"counter",	0 },
1680 	{ trace_clock_jiffies,		"uptime",	0 },
1681 	{ trace_clock,			"perf",		1 },
1682 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1683 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1684 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1685 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1686 	ARCH_TRACE_CLOCKS
1687 };
1688 
1689 bool trace_clock_in_ns(struct trace_array *tr)
1690 {
1691 	if (trace_clocks[tr->clock_id].in_ns)
1692 		return true;
1693 
1694 	return false;
1695 }
1696 
1697 /*
1698  * trace_parser_get_init - gets the buffer for trace parser
1699  */
1700 int trace_parser_get_init(struct trace_parser *parser, int size)
1701 {
1702 	memset(parser, 0, sizeof(*parser));
1703 
1704 	parser->buffer = kmalloc(size, GFP_KERNEL);
1705 	if (!parser->buffer)
1706 		return 1;
1707 
1708 	parser->size = size;
1709 	return 0;
1710 }
1711 
1712 /*
1713  * trace_parser_put - frees the buffer for trace parser
1714  */
1715 void trace_parser_put(struct trace_parser *parser)
1716 {
1717 	kfree(parser->buffer);
1718 	parser->buffer = NULL;
1719 }
1720 
1721 /*
1722  * trace_get_user - reads the user input string separated by  space
1723  * (matched by isspace(ch))
1724  *
1725  * For each string found the 'struct trace_parser' is updated,
1726  * and the function returns.
1727  *
1728  * Returns number of bytes read.
1729  *
1730  * See kernel/trace/trace.h for 'struct trace_parser' details.
1731  */
1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1733 	size_t cnt, loff_t *ppos)
1734 {
1735 	char ch;
1736 	size_t read = 0;
1737 	ssize_t ret;
1738 
1739 	if (!*ppos)
1740 		trace_parser_clear(parser);
1741 
1742 	ret = get_user(ch, ubuf++);
1743 	if (ret)
1744 		goto out;
1745 
1746 	read++;
1747 	cnt--;
1748 
1749 	/*
1750 	 * The parser is not finished with the last write,
1751 	 * continue reading the user input without skipping spaces.
1752 	 */
1753 	if (!parser->cont) {
1754 		/* skip white space */
1755 		while (cnt && isspace(ch)) {
1756 			ret = get_user(ch, ubuf++);
1757 			if (ret)
1758 				goto out;
1759 			read++;
1760 			cnt--;
1761 		}
1762 
1763 		parser->idx = 0;
1764 
1765 		/* only spaces were written */
1766 		if (isspace(ch) || !ch) {
1767 			*ppos += read;
1768 			ret = read;
1769 			goto out;
1770 		}
1771 	}
1772 
1773 	/* read the non-space input */
1774 	while (cnt && !isspace(ch) && ch) {
1775 		if (parser->idx < parser->size - 1)
1776 			parser->buffer[parser->idx++] = ch;
1777 		else {
1778 			ret = -EINVAL;
1779 			goto out;
1780 		}
1781 		ret = get_user(ch, ubuf++);
1782 		if (ret)
1783 			goto out;
1784 		read++;
1785 		cnt--;
1786 	}
1787 
1788 	/* We either got finished input or we have to wait for another call. */
1789 	if (isspace(ch) || !ch) {
1790 		parser->buffer[parser->idx] = 0;
1791 		parser->cont = false;
1792 	} else if (parser->idx < parser->size - 1) {
1793 		parser->cont = true;
1794 		parser->buffer[parser->idx++] = ch;
1795 		/* Make sure the parsed string always terminates with '\0'. */
1796 		parser->buffer[parser->idx] = 0;
1797 	} else {
1798 		ret = -EINVAL;
1799 		goto out;
1800 	}
1801 
1802 	*ppos += read;
1803 	ret = read;
1804 
1805 out:
1806 	return ret;
1807 }
1808 
1809 /* TODO add a seq_buf_to_buffer() */
1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1811 {
1812 	int len;
1813 
1814 	if (trace_seq_used(s) <= s->readpos)
1815 		return -EBUSY;
1816 
1817 	len = trace_seq_used(s) - s->readpos;
1818 	if (cnt > len)
1819 		cnt = len;
1820 	memcpy(buf, s->buffer + s->readpos, cnt);
1821 
1822 	s->readpos += cnt;
1823 	return cnt;
1824 }
1825 
1826 unsigned long __read_mostly	tracing_thresh;
1827 
1828 #ifdef CONFIG_TRACER_MAX_TRACE
1829 static const struct file_operations tracing_max_lat_fops;
1830 
1831 #ifdef LATENCY_FS_NOTIFY
1832 
1833 static struct workqueue_struct *fsnotify_wq;
1834 
1835 static void latency_fsnotify_workfn(struct work_struct *work)
1836 {
1837 	struct trace_array *tr = container_of(work, struct trace_array,
1838 					      fsnotify_work);
1839 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1840 }
1841 
1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1843 {
1844 	struct trace_array *tr = container_of(iwork, struct trace_array,
1845 					      fsnotify_irqwork);
1846 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1847 }
1848 
1849 static void trace_create_maxlat_file(struct trace_array *tr,
1850 				     struct dentry *d_tracer)
1851 {
1852 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1853 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1854 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1855 					      TRACE_MODE_WRITE,
1856 					      d_tracer, tr,
1857 					      &tracing_max_lat_fops);
1858 }
1859 
1860 __init static int latency_fsnotify_init(void)
1861 {
1862 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1863 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1864 	if (!fsnotify_wq) {
1865 		pr_err("Unable to allocate tr_max_lat_wq\n");
1866 		return -ENOMEM;
1867 	}
1868 	return 0;
1869 }
1870 
1871 late_initcall_sync(latency_fsnotify_init);
1872 
1873 void latency_fsnotify(struct trace_array *tr)
1874 {
1875 	if (!fsnotify_wq)
1876 		return;
1877 	/*
1878 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1879 	 * possible that we are called from __schedule() or do_idle(), which
1880 	 * could cause a deadlock.
1881 	 */
1882 	irq_work_queue(&tr->fsnotify_irqwork);
1883 }
1884 
1885 #else /* !LATENCY_FS_NOTIFY */
1886 
1887 #define trace_create_maxlat_file(tr, d_tracer)				\
1888 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1889 			  d_tracer, tr, &tracing_max_lat_fops)
1890 
1891 #endif
1892 
1893 /*
1894  * Copy the new maximum trace into the separate maximum-trace
1895  * structure. (this way the maximum trace is permanently saved,
1896  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1897  */
1898 static void
1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1900 {
1901 	struct array_buffer *trace_buf = &tr->array_buffer;
1902 	struct array_buffer *max_buf = &tr->max_buffer;
1903 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1904 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1905 
1906 	max_buf->cpu = cpu;
1907 	max_buf->time_start = data->preempt_timestamp;
1908 
1909 	max_data->saved_latency = tr->max_latency;
1910 	max_data->critical_start = data->critical_start;
1911 	max_data->critical_end = data->critical_end;
1912 
1913 	strscpy(max_data->comm, tsk->comm);
1914 	max_data->pid = tsk->pid;
1915 	/*
1916 	 * If tsk == current, then use current_uid(), as that does not use
1917 	 * RCU. The irq tracer can be called out of RCU scope.
1918 	 */
1919 	if (tsk == current)
1920 		max_data->uid = current_uid();
1921 	else
1922 		max_data->uid = task_uid(tsk);
1923 
1924 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1925 	max_data->policy = tsk->policy;
1926 	max_data->rt_priority = tsk->rt_priority;
1927 
1928 	/* record this tasks comm */
1929 	tracing_record_cmdline(tsk);
1930 	latency_fsnotify(tr);
1931 }
1932 
1933 /**
1934  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1935  * @tr: tracer
1936  * @tsk: the task with the latency
1937  * @cpu: The cpu that initiated the trace.
1938  * @cond_data: User data associated with a conditional snapshot
1939  *
1940  * Flip the buffers between the @tr and the max_tr and record information
1941  * about which task was the cause of this latency.
1942  */
1943 void
1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1945 	      void *cond_data)
1946 {
1947 	if (tr->stop_count)
1948 		return;
1949 
1950 	WARN_ON_ONCE(!irqs_disabled());
1951 
1952 	if (!tr->allocated_snapshot) {
1953 		/* Only the nop tracer should hit this when disabling */
1954 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1955 		return;
1956 	}
1957 
1958 	arch_spin_lock(&tr->max_lock);
1959 
1960 	/* Inherit the recordable setting from array_buffer */
1961 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1962 		ring_buffer_record_on(tr->max_buffer.buffer);
1963 	else
1964 		ring_buffer_record_off(tr->max_buffer.buffer);
1965 
1966 #ifdef CONFIG_TRACER_SNAPSHOT
1967 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1968 		arch_spin_unlock(&tr->max_lock);
1969 		return;
1970 	}
1971 #endif
1972 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1973 
1974 	__update_max_tr(tr, tsk, cpu);
1975 
1976 	arch_spin_unlock(&tr->max_lock);
1977 
1978 	/* Any waiters on the old snapshot buffer need to wake up */
1979 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1980 }
1981 
1982 /**
1983  * update_max_tr_single - only copy one trace over, and reset the rest
1984  * @tr: tracer
1985  * @tsk: task with the latency
1986  * @cpu: the cpu of the buffer to copy.
1987  *
1988  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1989  */
1990 void
1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1992 {
1993 	int ret;
1994 
1995 	if (tr->stop_count)
1996 		return;
1997 
1998 	WARN_ON_ONCE(!irqs_disabled());
1999 	if (!tr->allocated_snapshot) {
2000 		/* Only the nop tracer should hit this when disabling */
2001 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
2002 		return;
2003 	}
2004 
2005 	arch_spin_lock(&tr->max_lock);
2006 
2007 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2008 
2009 	if (ret == -EBUSY) {
2010 		/*
2011 		 * We failed to swap the buffer due to a commit taking
2012 		 * place on this CPU. We fail to record, but we reset
2013 		 * the max trace buffer (no one writes directly to it)
2014 		 * and flag that it failed.
2015 		 * Another reason is resize is in progress.
2016 		 */
2017 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2018 			"Failed to swap buffers due to commit or resize in progress\n");
2019 	}
2020 
2021 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2022 
2023 	__update_max_tr(tr, tsk, cpu);
2024 	arch_spin_unlock(&tr->max_lock);
2025 }
2026 
2027 #endif /* CONFIG_TRACER_MAX_TRACE */
2028 
2029 struct pipe_wait {
2030 	struct trace_iterator		*iter;
2031 	int				wait_index;
2032 };
2033 
2034 static bool wait_pipe_cond(void *data)
2035 {
2036 	struct pipe_wait *pwait = data;
2037 	struct trace_iterator *iter = pwait->iter;
2038 
2039 	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2040 		return true;
2041 
2042 	return iter->closed;
2043 }
2044 
2045 static int wait_on_pipe(struct trace_iterator *iter, int full)
2046 {
2047 	struct pipe_wait pwait;
2048 	int ret;
2049 
2050 	/* Iterators are static, they should be filled or empty */
2051 	if (trace_buffer_iter(iter, iter->cpu_file))
2052 		return 0;
2053 
2054 	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2055 	pwait.iter = iter;
2056 
2057 	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2058 			       wait_pipe_cond, &pwait);
2059 
2060 #ifdef CONFIG_TRACER_MAX_TRACE
2061 	/*
2062 	 * Make sure this is still the snapshot buffer, as if a snapshot were
2063 	 * to happen, this would now be the main buffer.
2064 	 */
2065 	if (iter->snapshot)
2066 		iter->array_buffer = &iter->tr->max_buffer;
2067 #endif
2068 	return ret;
2069 }
2070 
2071 #ifdef CONFIG_FTRACE_STARTUP_TEST
2072 static bool selftests_can_run;
2073 
2074 struct trace_selftests {
2075 	struct list_head		list;
2076 	struct tracer			*type;
2077 };
2078 
2079 static LIST_HEAD(postponed_selftests);
2080 
2081 static int save_selftest(struct tracer *type)
2082 {
2083 	struct trace_selftests *selftest;
2084 
2085 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2086 	if (!selftest)
2087 		return -ENOMEM;
2088 
2089 	selftest->type = type;
2090 	list_add(&selftest->list, &postponed_selftests);
2091 	return 0;
2092 }
2093 
2094 static int run_tracer_selftest(struct tracer *type)
2095 {
2096 	struct trace_array *tr = &global_trace;
2097 	struct tracer *saved_tracer = tr->current_trace;
2098 	int ret;
2099 
2100 	if (!type->selftest || tracing_selftest_disabled)
2101 		return 0;
2102 
2103 	/*
2104 	 * If a tracer registers early in boot up (before scheduling is
2105 	 * initialized and such), then do not run its selftests yet.
2106 	 * Instead, run it a little later in the boot process.
2107 	 */
2108 	if (!selftests_can_run)
2109 		return save_selftest(type);
2110 
2111 	if (!tracing_is_on()) {
2112 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2113 			type->name);
2114 		return 0;
2115 	}
2116 
2117 	/*
2118 	 * Run a selftest on this tracer.
2119 	 * Here we reset the trace buffer, and set the current
2120 	 * tracer to be this tracer. The tracer can then run some
2121 	 * internal tracing to verify that everything is in order.
2122 	 * If we fail, we do not register this tracer.
2123 	 */
2124 	tracing_reset_online_cpus(&tr->array_buffer);
2125 
2126 	tr->current_trace = type;
2127 
2128 #ifdef CONFIG_TRACER_MAX_TRACE
2129 	if (type->use_max_tr) {
2130 		/* If we expanded the buffers, make sure the max is expanded too */
2131 		if (tr->ring_buffer_expanded)
2132 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2133 					   RING_BUFFER_ALL_CPUS);
2134 		tr->allocated_snapshot = true;
2135 	}
2136 #endif
2137 
2138 	/* the test is responsible for initializing and enabling */
2139 	pr_info("Testing tracer %s: ", type->name);
2140 	ret = type->selftest(type, tr);
2141 	/* the test is responsible for resetting too */
2142 	tr->current_trace = saved_tracer;
2143 	if (ret) {
2144 		printk(KERN_CONT "FAILED!\n");
2145 		/* Add the warning after printing 'FAILED' */
2146 		WARN_ON(1);
2147 		return -1;
2148 	}
2149 	/* Only reset on passing, to avoid touching corrupted buffers */
2150 	tracing_reset_online_cpus(&tr->array_buffer);
2151 
2152 #ifdef CONFIG_TRACER_MAX_TRACE
2153 	if (type->use_max_tr) {
2154 		tr->allocated_snapshot = false;
2155 
2156 		/* Shrink the max buffer again */
2157 		if (tr->ring_buffer_expanded)
2158 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2159 					   RING_BUFFER_ALL_CPUS);
2160 	}
2161 #endif
2162 
2163 	printk(KERN_CONT "PASSED\n");
2164 	return 0;
2165 }
2166 
2167 static int do_run_tracer_selftest(struct tracer *type)
2168 {
2169 	int ret;
2170 
2171 	/*
2172 	 * Tests can take a long time, especially if they are run one after the
2173 	 * other, as does happen during bootup when all the tracers are
2174 	 * registered. This could cause the soft lockup watchdog to trigger.
2175 	 */
2176 	cond_resched();
2177 
2178 	tracing_selftest_running = true;
2179 	ret = run_tracer_selftest(type);
2180 	tracing_selftest_running = false;
2181 
2182 	return ret;
2183 }
2184 
2185 static __init int init_trace_selftests(void)
2186 {
2187 	struct trace_selftests *p, *n;
2188 	struct tracer *t, **last;
2189 	int ret;
2190 
2191 	selftests_can_run = true;
2192 
2193 	guard(mutex)(&trace_types_lock);
2194 
2195 	if (list_empty(&postponed_selftests))
2196 		return 0;
2197 
2198 	pr_info("Running postponed tracer tests:\n");
2199 
2200 	tracing_selftest_running = true;
2201 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2202 		/* This loop can take minutes when sanitizers are enabled, so
2203 		 * lets make sure we allow RCU processing.
2204 		 */
2205 		cond_resched();
2206 		ret = run_tracer_selftest(p->type);
2207 		/* If the test fails, then warn and remove from available_tracers */
2208 		if (ret < 0) {
2209 			WARN(1, "tracer: %s failed selftest, disabling\n",
2210 			     p->type->name);
2211 			last = &trace_types;
2212 			for (t = trace_types; t; t = t->next) {
2213 				if (t == p->type) {
2214 					*last = t->next;
2215 					break;
2216 				}
2217 				last = &t->next;
2218 			}
2219 		}
2220 		list_del(&p->list);
2221 		kfree(p);
2222 	}
2223 	tracing_selftest_running = false;
2224 
2225 	return 0;
2226 }
2227 core_initcall(init_trace_selftests);
2228 #else
2229 static inline int do_run_tracer_selftest(struct tracer *type)
2230 {
2231 	return 0;
2232 }
2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2234 
2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2236 
2237 static void __init apply_trace_boot_options(void);
2238 
2239 /**
2240  * register_tracer - register a tracer with the ftrace system.
2241  * @type: the plugin for the tracer
2242  *
2243  * Register a new plugin tracer.
2244  */
2245 int __init register_tracer(struct tracer *type)
2246 {
2247 	struct tracer *t;
2248 	int ret = 0;
2249 
2250 	if (!type->name) {
2251 		pr_info("Tracer must have a name\n");
2252 		return -1;
2253 	}
2254 
2255 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2256 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2257 		return -1;
2258 	}
2259 
2260 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2261 		pr_warn("Can not register tracer %s due to lockdown\n",
2262 			   type->name);
2263 		return -EPERM;
2264 	}
2265 
2266 	mutex_lock(&trace_types_lock);
2267 
2268 	for (t = trace_types; t; t = t->next) {
2269 		if (strcmp(type->name, t->name) == 0) {
2270 			/* already found */
2271 			pr_info("Tracer %s already registered\n",
2272 				type->name);
2273 			ret = -1;
2274 			goto out;
2275 		}
2276 	}
2277 
2278 	if (!type->set_flag)
2279 		type->set_flag = &dummy_set_flag;
2280 	if (!type->flags) {
2281 		/*allocate a dummy tracer_flags*/
2282 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2283 		if (!type->flags) {
2284 			ret = -ENOMEM;
2285 			goto out;
2286 		}
2287 		type->flags->val = 0;
2288 		type->flags->opts = dummy_tracer_opt;
2289 	} else
2290 		if (!type->flags->opts)
2291 			type->flags->opts = dummy_tracer_opt;
2292 
2293 	/* store the tracer for __set_tracer_option */
2294 	type->flags->trace = type;
2295 
2296 	ret = do_run_tracer_selftest(type);
2297 	if (ret < 0)
2298 		goto out;
2299 
2300 	type->next = trace_types;
2301 	trace_types = type;
2302 	add_tracer_options(&global_trace, type);
2303 
2304  out:
2305 	mutex_unlock(&trace_types_lock);
2306 
2307 	if (ret || !default_bootup_tracer)
2308 		goto out_unlock;
2309 
2310 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2311 		goto out_unlock;
2312 
2313 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2314 	/* Do we want this tracer to start on bootup? */
2315 	tracing_set_tracer(&global_trace, type->name);
2316 	default_bootup_tracer = NULL;
2317 
2318 	apply_trace_boot_options();
2319 
2320 	/* disable other selftests, since this will break it. */
2321 	disable_tracing_selftest("running a tracer");
2322 
2323  out_unlock:
2324 	return ret;
2325 }
2326 
2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2328 {
2329 	struct trace_buffer *buffer = buf->buffer;
2330 
2331 	if (!buffer)
2332 		return;
2333 
2334 	ring_buffer_record_disable(buffer);
2335 
2336 	/* Make sure all commits have finished */
2337 	synchronize_rcu();
2338 	ring_buffer_reset_cpu(buffer, cpu);
2339 
2340 	ring_buffer_record_enable(buffer);
2341 }
2342 
2343 void tracing_reset_online_cpus(struct array_buffer *buf)
2344 {
2345 	struct trace_buffer *buffer = buf->buffer;
2346 
2347 	if (!buffer)
2348 		return;
2349 
2350 	ring_buffer_record_disable(buffer);
2351 
2352 	/* Make sure all commits have finished */
2353 	synchronize_rcu();
2354 
2355 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2356 
2357 	ring_buffer_reset_online_cpus(buffer);
2358 
2359 	ring_buffer_record_enable(buffer);
2360 }
2361 
2362 static void tracing_reset_all_cpus(struct array_buffer *buf)
2363 {
2364 	struct trace_buffer *buffer = buf->buffer;
2365 
2366 	if (!buffer)
2367 		return;
2368 
2369 	ring_buffer_record_disable(buffer);
2370 
2371 	/* Make sure all commits have finished */
2372 	synchronize_rcu();
2373 
2374 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2375 
2376 	ring_buffer_reset(buffer);
2377 
2378 	ring_buffer_record_enable(buffer);
2379 }
2380 
2381 /* Must have trace_types_lock held */
2382 void tracing_reset_all_online_cpus_unlocked(void)
2383 {
2384 	struct trace_array *tr;
2385 
2386 	lockdep_assert_held(&trace_types_lock);
2387 
2388 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2389 		if (!tr->clear_trace)
2390 			continue;
2391 		tr->clear_trace = false;
2392 		tracing_reset_online_cpus(&tr->array_buffer);
2393 #ifdef CONFIG_TRACER_MAX_TRACE
2394 		tracing_reset_online_cpus(&tr->max_buffer);
2395 #endif
2396 	}
2397 }
2398 
2399 void tracing_reset_all_online_cpus(void)
2400 {
2401 	mutex_lock(&trace_types_lock);
2402 	tracing_reset_all_online_cpus_unlocked();
2403 	mutex_unlock(&trace_types_lock);
2404 }
2405 
2406 int is_tracing_stopped(void)
2407 {
2408 	return global_trace.stop_count;
2409 }
2410 
2411 static void tracing_start_tr(struct trace_array *tr)
2412 {
2413 	struct trace_buffer *buffer;
2414 	unsigned long flags;
2415 
2416 	if (tracing_disabled)
2417 		return;
2418 
2419 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2420 	if (--tr->stop_count) {
2421 		if (WARN_ON_ONCE(tr->stop_count < 0)) {
2422 			/* Someone screwed up their debugging */
2423 			tr->stop_count = 0;
2424 		}
2425 		goto out;
2426 	}
2427 
2428 	/* Prevent the buffers from switching */
2429 	arch_spin_lock(&tr->max_lock);
2430 
2431 	buffer = tr->array_buffer.buffer;
2432 	if (buffer)
2433 		ring_buffer_record_enable(buffer);
2434 
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 	buffer = tr->max_buffer.buffer;
2437 	if (buffer)
2438 		ring_buffer_record_enable(buffer);
2439 #endif
2440 
2441 	arch_spin_unlock(&tr->max_lock);
2442 
2443  out:
2444 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2445 }
2446 
2447 /**
2448  * tracing_start - quick start of the tracer
2449  *
2450  * If tracing is enabled but was stopped by tracing_stop,
2451  * this will start the tracer back up.
2452  */
2453 void tracing_start(void)
2454 
2455 {
2456 	return tracing_start_tr(&global_trace);
2457 }
2458 
2459 static void tracing_stop_tr(struct trace_array *tr)
2460 {
2461 	struct trace_buffer *buffer;
2462 	unsigned long flags;
2463 
2464 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2465 	if (tr->stop_count++)
2466 		goto out;
2467 
2468 	/* Prevent the buffers from switching */
2469 	arch_spin_lock(&tr->max_lock);
2470 
2471 	buffer = tr->array_buffer.buffer;
2472 	if (buffer)
2473 		ring_buffer_record_disable(buffer);
2474 
2475 #ifdef CONFIG_TRACER_MAX_TRACE
2476 	buffer = tr->max_buffer.buffer;
2477 	if (buffer)
2478 		ring_buffer_record_disable(buffer);
2479 #endif
2480 
2481 	arch_spin_unlock(&tr->max_lock);
2482 
2483  out:
2484 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2485 }
2486 
2487 /**
2488  * tracing_stop - quick stop of the tracer
2489  *
2490  * Light weight way to stop tracing. Use in conjunction with
2491  * tracing_start.
2492  */
2493 void tracing_stop(void)
2494 {
2495 	return tracing_stop_tr(&global_trace);
2496 }
2497 
2498 /*
2499  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2500  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2501  * simplifies those functions and keeps them in sync.
2502  */
2503 enum print_line_t trace_handle_return(struct trace_seq *s)
2504 {
2505 	return trace_seq_has_overflowed(s) ?
2506 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2507 }
2508 EXPORT_SYMBOL_GPL(trace_handle_return);
2509 
2510 static unsigned short migration_disable_value(void)
2511 {
2512 #if defined(CONFIG_SMP)
2513 	return current->migration_disabled;
2514 #else
2515 	return 0;
2516 #endif
2517 }
2518 
2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2520 {
2521 	unsigned int trace_flags = irqs_status;
2522 	unsigned int pc;
2523 
2524 	pc = preempt_count();
2525 
2526 	if (pc & NMI_MASK)
2527 		trace_flags |= TRACE_FLAG_NMI;
2528 	if (pc & HARDIRQ_MASK)
2529 		trace_flags |= TRACE_FLAG_HARDIRQ;
2530 	if (in_serving_softirq())
2531 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2532 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2533 		trace_flags |= TRACE_FLAG_BH_OFF;
2534 
2535 	if (tif_need_resched())
2536 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2537 	if (test_preempt_need_resched())
2538 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2539 	if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
2540 		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2541 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2542 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2543 }
2544 
2545 struct ring_buffer_event *
2546 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2547 			  int type,
2548 			  unsigned long len,
2549 			  unsigned int trace_ctx)
2550 {
2551 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2552 }
2553 
2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2556 static int trace_buffered_event_ref;
2557 
2558 /**
2559  * trace_buffered_event_enable - enable buffering events
2560  *
2561  * When events are being filtered, it is quicker to use a temporary
2562  * buffer to write the event data into if there's a likely chance
2563  * that it will not be committed. The discard of the ring buffer
2564  * is not as fast as committing, and is much slower than copying
2565  * a commit.
2566  *
2567  * When an event is to be filtered, allocate per cpu buffers to
2568  * write the event data into, and if the event is filtered and discarded
2569  * it is simply dropped, otherwise, the entire data is to be committed
2570  * in one shot.
2571  */
2572 void trace_buffered_event_enable(void)
2573 {
2574 	struct ring_buffer_event *event;
2575 	struct page *page;
2576 	int cpu;
2577 
2578 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2579 
2580 	if (trace_buffered_event_ref++)
2581 		return;
2582 
2583 	for_each_tracing_cpu(cpu) {
2584 		page = alloc_pages_node(cpu_to_node(cpu),
2585 					GFP_KERNEL | __GFP_NORETRY, 0);
2586 		/* This is just an optimization and can handle failures */
2587 		if (!page) {
2588 			pr_err("Failed to allocate event buffer\n");
2589 			break;
2590 		}
2591 
2592 		event = page_address(page);
2593 		memset(event, 0, sizeof(*event));
2594 
2595 		per_cpu(trace_buffered_event, cpu) = event;
2596 
2597 		preempt_disable();
2598 		if (cpu == smp_processor_id() &&
2599 		    __this_cpu_read(trace_buffered_event) !=
2600 		    per_cpu(trace_buffered_event, cpu))
2601 			WARN_ON_ONCE(1);
2602 		preempt_enable();
2603 	}
2604 }
2605 
2606 static void enable_trace_buffered_event(void *data)
2607 {
2608 	/* Probably not needed, but do it anyway */
2609 	smp_rmb();
2610 	this_cpu_dec(trace_buffered_event_cnt);
2611 }
2612 
2613 static void disable_trace_buffered_event(void *data)
2614 {
2615 	this_cpu_inc(trace_buffered_event_cnt);
2616 }
2617 
2618 /**
2619  * trace_buffered_event_disable - disable buffering events
2620  *
2621  * When a filter is removed, it is faster to not use the buffered
2622  * events, and to commit directly into the ring buffer. Free up
2623  * the temp buffers when there are no more users. This requires
2624  * special synchronization with current events.
2625  */
2626 void trace_buffered_event_disable(void)
2627 {
2628 	int cpu;
2629 
2630 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2631 
2632 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2633 		return;
2634 
2635 	if (--trace_buffered_event_ref)
2636 		return;
2637 
2638 	/* For each CPU, set the buffer as used. */
2639 	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2640 			 NULL, true);
2641 
2642 	/* Wait for all current users to finish */
2643 	synchronize_rcu();
2644 
2645 	for_each_tracing_cpu(cpu) {
2646 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2647 		per_cpu(trace_buffered_event, cpu) = NULL;
2648 	}
2649 
2650 	/*
2651 	 * Wait for all CPUs that potentially started checking if they can use
2652 	 * their event buffer only after the previous synchronize_rcu() call and
2653 	 * they still read a valid pointer from trace_buffered_event. It must be
2654 	 * ensured they don't see cleared trace_buffered_event_cnt else they
2655 	 * could wrongly decide to use the pointed-to buffer which is now freed.
2656 	 */
2657 	synchronize_rcu();
2658 
2659 	/* For each CPU, relinquish the buffer */
2660 	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2661 			 true);
2662 }
2663 
2664 static struct trace_buffer *temp_buffer;
2665 
2666 struct ring_buffer_event *
2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2668 			  struct trace_event_file *trace_file,
2669 			  int type, unsigned long len,
2670 			  unsigned int trace_ctx)
2671 {
2672 	struct ring_buffer_event *entry;
2673 	struct trace_array *tr = trace_file->tr;
2674 	int val;
2675 
2676 	*current_rb = tr->array_buffer.buffer;
2677 
2678 	if (!tr->no_filter_buffering_ref &&
2679 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2680 		preempt_disable_notrace();
2681 		/*
2682 		 * Filtering is on, so try to use the per cpu buffer first.
2683 		 * This buffer will simulate a ring_buffer_event,
2684 		 * where the type_len is zero and the array[0] will
2685 		 * hold the full length.
2686 		 * (see include/linux/ring-buffer.h for details on
2687 		 *  how the ring_buffer_event is structured).
2688 		 *
2689 		 * Using a temp buffer during filtering and copying it
2690 		 * on a matched filter is quicker than writing directly
2691 		 * into the ring buffer and then discarding it when
2692 		 * it doesn't match. That is because the discard
2693 		 * requires several atomic operations to get right.
2694 		 * Copying on match and doing nothing on a failed match
2695 		 * is still quicker than no copy on match, but having
2696 		 * to discard out of the ring buffer on a failed match.
2697 		 */
2698 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2699 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2700 
2701 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2702 
2703 			/*
2704 			 * Preemption is disabled, but interrupts and NMIs
2705 			 * can still come in now. If that happens after
2706 			 * the above increment, then it will have to go
2707 			 * back to the old method of allocating the event
2708 			 * on the ring buffer, and if the filter fails, it
2709 			 * will have to call ring_buffer_discard_commit()
2710 			 * to remove it.
2711 			 *
2712 			 * Need to also check the unlikely case that the
2713 			 * length is bigger than the temp buffer size.
2714 			 * If that happens, then the reserve is pretty much
2715 			 * guaranteed to fail, as the ring buffer currently
2716 			 * only allows events less than a page. But that may
2717 			 * change in the future, so let the ring buffer reserve
2718 			 * handle the failure in that case.
2719 			 */
2720 			if (val == 1 && likely(len <= max_len)) {
2721 				trace_event_setup(entry, type, trace_ctx);
2722 				entry->array[0] = len;
2723 				/* Return with preemption disabled */
2724 				return entry;
2725 			}
2726 			this_cpu_dec(trace_buffered_event_cnt);
2727 		}
2728 		/* __trace_buffer_lock_reserve() disables preemption */
2729 		preempt_enable_notrace();
2730 	}
2731 
2732 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2733 					    trace_ctx);
2734 	/*
2735 	 * If tracing is off, but we have triggers enabled
2736 	 * we still need to look at the event data. Use the temp_buffer
2737 	 * to store the trace event for the trigger to use. It's recursive
2738 	 * safe and will not be recorded anywhere.
2739 	 */
2740 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2741 		*current_rb = temp_buffer;
2742 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2743 						    trace_ctx);
2744 	}
2745 	return entry;
2746 }
2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2748 
2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2750 static DEFINE_MUTEX(tracepoint_printk_mutex);
2751 
2752 static void output_printk(struct trace_event_buffer *fbuffer)
2753 {
2754 	struct trace_event_call *event_call;
2755 	struct trace_event_file *file;
2756 	struct trace_event *event;
2757 	unsigned long flags;
2758 	struct trace_iterator *iter = tracepoint_print_iter;
2759 
2760 	/* We should never get here if iter is NULL */
2761 	if (WARN_ON_ONCE(!iter))
2762 		return;
2763 
2764 	event_call = fbuffer->trace_file->event_call;
2765 	if (!event_call || !event_call->event.funcs ||
2766 	    !event_call->event.funcs->trace)
2767 		return;
2768 
2769 	file = fbuffer->trace_file;
2770 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2771 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2772 	     !filter_match_preds(file->filter, fbuffer->entry)))
2773 		return;
2774 
2775 	event = &fbuffer->trace_file->event_call->event;
2776 
2777 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2778 	trace_seq_init(&iter->seq);
2779 	iter->ent = fbuffer->entry;
2780 	event_call->event.funcs->trace(iter, 0, event);
2781 	trace_seq_putc(&iter->seq, 0);
2782 	printk("%s", iter->seq.buffer);
2783 
2784 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2785 }
2786 
2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
2788 			     void *buffer, size_t *lenp,
2789 			     loff_t *ppos)
2790 {
2791 	int save_tracepoint_printk;
2792 	int ret;
2793 
2794 	guard(mutex)(&tracepoint_printk_mutex);
2795 	save_tracepoint_printk = tracepoint_printk;
2796 
2797 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2798 
2799 	/*
2800 	 * This will force exiting early, as tracepoint_printk
2801 	 * is always zero when tracepoint_printk_iter is not allocated
2802 	 */
2803 	if (!tracepoint_print_iter)
2804 		tracepoint_printk = 0;
2805 
2806 	if (save_tracepoint_printk == tracepoint_printk)
2807 		return ret;
2808 
2809 	if (tracepoint_printk)
2810 		static_key_enable(&tracepoint_printk_key.key);
2811 	else
2812 		static_key_disable(&tracepoint_printk_key.key);
2813 
2814 	return ret;
2815 }
2816 
2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2818 {
2819 	enum event_trigger_type tt = ETT_NONE;
2820 	struct trace_event_file *file = fbuffer->trace_file;
2821 
2822 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2823 			fbuffer->entry, &tt))
2824 		goto discard;
2825 
2826 	if (static_key_false(&tracepoint_printk_key.key))
2827 		output_printk(fbuffer);
2828 
2829 	if (static_branch_unlikely(&trace_event_exports_enabled))
2830 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2831 
2832 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2833 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2834 
2835 discard:
2836 	if (tt)
2837 		event_triggers_post_call(file, tt);
2838 
2839 }
2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2841 
2842 /*
2843  * Skip 3:
2844  *
2845  *   trace_buffer_unlock_commit_regs()
2846  *   trace_event_buffer_commit()
2847  *   trace_event_raw_event_xxx()
2848  */
2849 # define STACK_SKIP 3
2850 
2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2852 				     struct trace_buffer *buffer,
2853 				     struct ring_buffer_event *event,
2854 				     unsigned int trace_ctx,
2855 				     struct pt_regs *regs)
2856 {
2857 	__buffer_unlock_commit(buffer, event);
2858 
2859 	/*
2860 	 * If regs is not set, then skip the necessary functions.
2861 	 * Note, we can still get here via blktrace, wakeup tracer
2862 	 * and mmiotrace, but that's ok if they lose a function or
2863 	 * two. They are not that meaningful.
2864 	 */
2865 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2866 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2867 }
2868 
2869 /*
2870  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2871  */
2872 void
2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2874 				   struct ring_buffer_event *event)
2875 {
2876 	__buffer_unlock_commit(buffer, event);
2877 }
2878 
2879 void
2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2881 	       parent_ip, unsigned int trace_ctx)
2882 {
2883 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2884 	struct ring_buffer_event *event;
2885 	struct ftrace_entry *entry;
2886 
2887 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2888 					    trace_ctx);
2889 	if (!event)
2890 		return;
2891 	entry	= ring_buffer_event_data(event);
2892 	entry->ip			= ip;
2893 	entry->parent_ip		= parent_ip;
2894 
2895 	if (static_branch_unlikely(&trace_function_exports_enabled))
2896 		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2897 	__buffer_unlock_commit(buffer, event);
2898 }
2899 
2900 #ifdef CONFIG_STACKTRACE
2901 
2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2903 #define FTRACE_KSTACK_NESTING	4
2904 
2905 #define FTRACE_KSTACK_ENTRIES	(SZ_4K / FTRACE_KSTACK_NESTING)
2906 
2907 struct ftrace_stack {
2908 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2909 };
2910 
2911 
2912 struct ftrace_stacks {
2913 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2914 };
2915 
2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2918 
2919 static void __ftrace_trace_stack(struct trace_array *tr,
2920 				 struct trace_buffer *buffer,
2921 				 unsigned int trace_ctx,
2922 				 int skip, struct pt_regs *regs)
2923 {
2924 	struct ring_buffer_event *event;
2925 	unsigned int size, nr_entries;
2926 	struct ftrace_stack *fstack;
2927 	struct stack_entry *entry;
2928 	int stackidx;
2929 
2930 	/*
2931 	 * Add one, for this function and the call to save_stack_trace()
2932 	 * If regs is set, then these functions will not be in the way.
2933 	 */
2934 #ifndef CONFIG_UNWINDER_ORC
2935 	if (!regs)
2936 		skip++;
2937 #endif
2938 
2939 	preempt_disable_notrace();
2940 
2941 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2942 
2943 	/* This should never happen. If it does, yell once and skip */
2944 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2945 		goto out;
2946 
2947 	/*
2948 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2949 	 * interrupt will either see the value pre increment or post
2950 	 * increment. If the interrupt happens pre increment it will have
2951 	 * restored the counter when it returns.  We just need a barrier to
2952 	 * keep gcc from moving things around.
2953 	 */
2954 	barrier();
2955 
2956 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2957 	size = ARRAY_SIZE(fstack->calls);
2958 
2959 	if (regs) {
2960 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2961 						   size, skip);
2962 	} else {
2963 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2964 	}
2965 
2966 #ifdef CONFIG_DYNAMIC_FTRACE
2967 	/* Mark entry of stack trace as trampoline code */
2968 	if (tr->ops && tr->ops->trampoline) {
2969 		unsigned long tramp_start = tr->ops->trampoline;
2970 		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
2971 		unsigned long *calls = fstack->calls;
2972 
2973 		for (int i = 0; i < nr_entries; i++) {
2974 			if (calls[i] >= tramp_start && calls[i] < tramp_end)
2975 				calls[i] = FTRACE_TRAMPOLINE_MARKER;
2976 		}
2977 	}
2978 #endif
2979 
2980 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2981 				    struct_size(entry, caller, nr_entries),
2982 				    trace_ctx);
2983 	if (!event)
2984 		goto out;
2985 	entry = ring_buffer_event_data(event);
2986 
2987 	entry->size = nr_entries;
2988 	memcpy(&entry->caller, fstack->calls,
2989 	       flex_array_size(entry, caller, nr_entries));
2990 
2991 	__buffer_unlock_commit(buffer, event);
2992 
2993  out:
2994 	/* Again, don't let gcc optimize things here */
2995 	barrier();
2996 	__this_cpu_dec(ftrace_stack_reserve);
2997 	preempt_enable_notrace();
2998 
2999 }
3000 
3001 static inline void ftrace_trace_stack(struct trace_array *tr,
3002 				      struct trace_buffer *buffer,
3003 				      unsigned int trace_ctx,
3004 				      int skip, struct pt_regs *regs)
3005 {
3006 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3007 		return;
3008 
3009 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
3010 }
3011 
3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3013 		   int skip)
3014 {
3015 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3016 
3017 	if (rcu_is_watching()) {
3018 		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3019 		return;
3020 	}
3021 
3022 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3023 		return;
3024 
3025 	/*
3026 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3027 	 * but if the above rcu_is_watching() failed, then the NMI
3028 	 * triggered someplace critical, and ct_irq_enter() should
3029 	 * not be called from NMI.
3030 	 */
3031 	if (unlikely(in_nmi()))
3032 		return;
3033 
3034 	ct_irq_enter_irqson();
3035 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3036 	ct_irq_exit_irqson();
3037 }
3038 
3039 /**
3040  * trace_dump_stack - record a stack back trace in the trace buffer
3041  * @skip: Number of functions to skip (helper handlers)
3042  */
3043 void trace_dump_stack(int skip)
3044 {
3045 	if (tracing_disabled || tracing_selftest_running)
3046 		return;
3047 
3048 #ifndef CONFIG_UNWINDER_ORC
3049 	/* Skip 1 to skip this function. */
3050 	skip++;
3051 #endif
3052 	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3053 				tracing_gen_ctx(), skip, NULL);
3054 }
3055 EXPORT_SYMBOL_GPL(trace_dump_stack);
3056 
3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3058 static DEFINE_PER_CPU(int, user_stack_count);
3059 
3060 static void
3061 ftrace_trace_userstack(struct trace_array *tr,
3062 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3063 {
3064 	struct ring_buffer_event *event;
3065 	struct userstack_entry *entry;
3066 
3067 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3068 		return;
3069 
3070 	/*
3071 	 * NMIs can not handle page faults, even with fix ups.
3072 	 * The save user stack can (and often does) fault.
3073 	 */
3074 	if (unlikely(in_nmi()))
3075 		return;
3076 
3077 	/*
3078 	 * prevent recursion, since the user stack tracing may
3079 	 * trigger other kernel events.
3080 	 */
3081 	preempt_disable();
3082 	if (__this_cpu_read(user_stack_count))
3083 		goto out;
3084 
3085 	__this_cpu_inc(user_stack_count);
3086 
3087 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3088 					    sizeof(*entry), trace_ctx);
3089 	if (!event)
3090 		goto out_drop_count;
3091 	entry	= ring_buffer_event_data(event);
3092 
3093 	entry->tgid		= current->tgid;
3094 	memset(&entry->caller, 0, sizeof(entry->caller));
3095 
3096 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3097 	__buffer_unlock_commit(buffer, event);
3098 
3099  out_drop_count:
3100 	__this_cpu_dec(user_stack_count);
3101  out:
3102 	preempt_enable();
3103 }
3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3105 static void ftrace_trace_userstack(struct trace_array *tr,
3106 				   struct trace_buffer *buffer,
3107 				   unsigned int trace_ctx)
3108 {
3109 }
3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3111 
3112 #endif /* CONFIG_STACKTRACE */
3113 
3114 static inline void
3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3116 			  unsigned long long delta)
3117 {
3118 	entry->bottom_delta_ts = delta & U32_MAX;
3119 	entry->top_delta_ts = (delta >> 32);
3120 }
3121 
3122 void trace_last_func_repeats(struct trace_array *tr,
3123 			     struct trace_func_repeats *last_info,
3124 			     unsigned int trace_ctx)
3125 {
3126 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3127 	struct func_repeats_entry *entry;
3128 	struct ring_buffer_event *event;
3129 	u64 delta;
3130 
3131 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3132 					    sizeof(*entry), trace_ctx);
3133 	if (!event)
3134 		return;
3135 
3136 	delta = ring_buffer_event_time_stamp(buffer, event) -
3137 		last_info->ts_last_call;
3138 
3139 	entry = ring_buffer_event_data(event);
3140 	entry->ip = last_info->ip;
3141 	entry->parent_ip = last_info->parent_ip;
3142 	entry->count = last_info->count;
3143 	func_repeats_set_delta_ts(entry, delta);
3144 
3145 	__buffer_unlock_commit(buffer, event);
3146 }
3147 
3148 /* created for use with alloc_percpu */
3149 struct trace_buffer_struct {
3150 	int nesting;
3151 	char buffer[4][TRACE_BUF_SIZE];
3152 };
3153 
3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3155 
3156 /*
3157  * This allows for lockless recording.  If we're nested too deeply, then
3158  * this returns NULL.
3159  */
3160 static char *get_trace_buf(void)
3161 {
3162 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3163 
3164 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3165 		return NULL;
3166 
3167 	buffer->nesting++;
3168 
3169 	/* Interrupts must see nesting incremented before we use the buffer */
3170 	barrier();
3171 	return &buffer->buffer[buffer->nesting - 1][0];
3172 }
3173 
3174 static void put_trace_buf(void)
3175 {
3176 	/* Don't let the decrement of nesting leak before this */
3177 	barrier();
3178 	this_cpu_dec(trace_percpu_buffer->nesting);
3179 }
3180 
3181 static int alloc_percpu_trace_buffer(void)
3182 {
3183 	struct trace_buffer_struct __percpu *buffers;
3184 
3185 	if (trace_percpu_buffer)
3186 		return 0;
3187 
3188 	buffers = alloc_percpu(struct trace_buffer_struct);
3189 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3190 		return -ENOMEM;
3191 
3192 	trace_percpu_buffer = buffers;
3193 	return 0;
3194 }
3195 
3196 static int buffers_allocated;
3197 
3198 void trace_printk_init_buffers(void)
3199 {
3200 	if (buffers_allocated)
3201 		return;
3202 
3203 	if (alloc_percpu_trace_buffer())
3204 		return;
3205 
3206 	/* trace_printk() is for debug use only. Don't use it in production. */
3207 
3208 	pr_warn("\n");
3209 	pr_warn("**********************************************************\n");
3210 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3211 	pr_warn("**                                                      **\n");
3212 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3213 	pr_warn("**                                                      **\n");
3214 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3215 	pr_warn("** unsafe for production use.                           **\n");
3216 	pr_warn("**                                                      **\n");
3217 	pr_warn("** If you see this message and you are not debugging    **\n");
3218 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3219 	pr_warn("**                                                      **\n");
3220 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3221 	pr_warn("**********************************************************\n");
3222 
3223 	/* Expand the buffers to set size */
3224 	tracing_update_buffers(&global_trace);
3225 
3226 	buffers_allocated = 1;
3227 
3228 	/*
3229 	 * trace_printk_init_buffers() can be called by modules.
3230 	 * If that happens, then we need to start cmdline recording
3231 	 * directly here. If the global_trace.buffer is already
3232 	 * allocated here, then this was called by module code.
3233 	 */
3234 	if (global_trace.array_buffer.buffer)
3235 		tracing_start_cmdline_record();
3236 }
3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3238 
3239 void trace_printk_start_comm(void)
3240 {
3241 	/* Start tracing comms if trace printk is set */
3242 	if (!buffers_allocated)
3243 		return;
3244 	tracing_start_cmdline_record();
3245 }
3246 
3247 static void trace_printk_start_stop_comm(int enabled)
3248 {
3249 	if (!buffers_allocated)
3250 		return;
3251 
3252 	if (enabled)
3253 		tracing_start_cmdline_record();
3254 	else
3255 		tracing_stop_cmdline_record();
3256 }
3257 
3258 /**
3259  * trace_vbprintk - write binary msg to tracing buffer
3260  * @ip:    The address of the caller
3261  * @fmt:   The string format to write to the buffer
3262  * @args:  Arguments for @fmt
3263  */
3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3265 {
3266 	struct ring_buffer_event *event;
3267 	struct trace_buffer *buffer;
3268 	struct trace_array *tr = READ_ONCE(printk_trace);
3269 	struct bprint_entry *entry;
3270 	unsigned int trace_ctx;
3271 	char *tbuffer;
3272 	int len = 0, size;
3273 
3274 	if (!printk_binsafe(tr))
3275 		return trace_vprintk(ip, fmt, args);
3276 
3277 	if (unlikely(tracing_selftest_running || tracing_disabled))
3278 		return 0;
3279 
3280 	/* Don't pollute graph traces with trace_vprintk internals */
3281 	pause_graph_tracing();
3282 
3283 	trace_ctx = tracing_gen_ctx();
3284 	preempt_disable_notrace();
3285 
3286 	tbuffer = get_trace_buf();
3287 	if (!tbuffer) {
3288 		len = 0;
3289 		goto out_nobuffer;
3290 	}
3291 
3292 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3293 
3294 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3295 		goto out_put;
3296 
3297 	size = sizeof(*entry) + sizeof(u32) * len;
3298 	buffer = tr->array_buffer.buffer;
3299 	ring_buffer_nest_start(buffer);
3300 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3301 					    trace_ctx);
3302 	if (!event)
3303 		goto out;
3304 	entry = ring_buffer_event_data(event);
3305 	entry->ip			= ip;
3306 	entry->fmt			= fmt;
3307 
3308 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3309 	__buffer_unlock_commit(buffer, event);
3310 	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3311 
3312 out:
3313 	ring_buffer_nest_end(buffer);
3314 out_put:
3315 	put_trace_buf();
3316 
3317 out_nobuffer:
3318 	preempt_enable_notrace();
3319 	unpause_graph_tracing();
3320 
3321 	return len;
3322 }
3323 EXPORT_SYMBOL_GPL(trace_vbprintk);
3324 
3325 __printf(3, 0)
3326 static int
3327 __trace_array_vprintk(struct trace_buffer *buffer,
3328 		      unsigned long ip, const char *fmt, va_list args)
3329 {
3330 	struct ring_buffer_event *event;
3331 	int len = 0, size;
3332 	struct print_entry *entry;
3333 	unsigned int trace_ctx;
3334 	char *tbuffer;
3335 
3336 	if (tracing_disabled)
3337 		return 0;
3338 
3339 	/* Don't pollute graph traces with trace_vprintk internals */
3340 	pause_graph_tracing();
3341 
3342 	trace_ctx = tracing_gen_ctx();
3343 	preempt_disable_notrace();
3344 
3345 
3346 	tbuffer = get_trace_buf();
3347 	if (!tbuffer) {
3348 		len = 0;
3349 		goto out_nobuffer;
3350 	}
3351 
3352 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3353 
3354 	size = sizeof(*entry) + len + 1;
3355 	ring_buffer_nest_start(buffer);
3356 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3357 					    trace_ctx);
3358 	if (!event)
3359 		goto out;
3360 	entry = ring_buffer_event_data(event);
3361 	entry->ip = ip;
3362 
3363 	memcpy(&entry->buf, tbuffer, len + 1);
3364 	__buffer_unlock_commit(buffer, event);
3365 	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
3366 
3367 out:
3368 	ring_buffer_nest_end(buffer);
3369 	put_trace_buf();
3370 
3371 out_nobuffer:
3372 	preempt_enable_notrace();
3373 	unpause_graph_tracing();
3374 
3375 	return len;
3376 }
3377 
3378 __printf(3, 0)
3379 int trace_array_vprintk(struct trace_array *tr,
3380 			unsigned long ip, const char *fmt, va_list args)
3381 {
3382 	if (tracing_selftest_running && tr == &global_trace)
3383 		return 0;
3384 
3385 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3386 }
3387 
3388 /**
3389  * trace_array_printk - Print a message to a specific instance
3390  * @tr: The instance trace_array descriptor
3391  * @ip: The instruction pointer that this is called from.
3392  * @fmt: The format to print (printf format)
3393  *
3394  * If a subsystem sets up its own instance, they have the right to
3395  * printk strings into their tracing instance buffer using this
3396  * function. Note, this function will not write into the top level
3397  * buffer (use trace_printk() for that), as writing into the top level
3398  * buffer should only have events that can be individually disabled.
3399  * trace_printk() is only used for debugging a kernel, and should not
3400  * be ever incorporated in normal use.
3401  *
3402  * trace_array_printk() can be used, as it will not add noise to the
3403  * top level tracing buffer.
3404  *
3405  * Note, trace_array_init_printk() must be called on @tr before this
3406  * can be used.
3407  */
3408 __printf(3, 0)
3409 int trace_array_printk(struct trace_array *tr,
3410 		       unsigned long ip, const char *fmt, ...)
3411 {
3412 	int ret;
3413 	va_list ap;
3414 
3415 	if (!tr)
3416 		return -ENOENT;
3417 
3418 	/* This is only allowed for created instances */
3419 	if (tr == &global_trace)
3420 		return 0;
3421 
3422 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3423 		return 0;
3424 
3425 	va_start(ap, fmt);
3426 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3427 	va_end(ap);
3428 	return ret;
3429 }
3430 EXPORT_SYMBOL_GPL(trace_array_printk);
3431 
3432 /**
3433  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3434  * @tr: The trace array to initialize the buffers for
3435  *
3436  * As trace_array_printk() only writes into instances, they are OK to
3437  * have in the kernel (unlike trace_printk()). This needs to be called
3438  * before trace_array_printk() can be used on a trace_array.
3439  */
3440 int trace_array_init_printk(struct trace_array *tr)
3441 {
3442 	if (!tr)
3443 		return -ENOENT;
3444 
3445 	/* This is only allowed for created instances */
3446 	if (tr == &global_trace)
3447 		return -EINVAL;
3448 
3449 	return alloc_percpu_trace_buffer();
3450 }
3451 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3452 
3453 __printf(3, 4)
3454 int trace_array_printk_buf(struct trace_buffer *buffer,
3455 			   unsigned long ip, const char *fmt, ...)
3456 {
3457 	int ret;
3458 	va_list ap;
3459 
3460 	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3461 		return 0;
3462 
3463 	va_start(ap, fmt);
3464 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3465 	va_end(ap);
3466 	return ret;
3467 }
3468 
3469 __printf(2, 0)
3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3471 {
3472 	return trace_array_vprintk(printk_trace, ip, fmt, args);
3473 }
3474 EXPORT_SYMBOL_GPL(trace_vprintk);
3475 
3476 static void trace_iterator_increment(struct trace_iterator *iter)
3477 {
3478 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3479 
3480 	iter->idx++;
3481 	if (buf_iter)
3482 		ring_buffer_iter_advance(buf_iter);
3483 }
3484 
3485 static struct trace_entry *
3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3487 		unsigned long *lost_events)
3488 {
3489 	struct ring_buffer_event *event;
3490 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3491 
3492 	if (buf_iter) {
3493 		event = ring_buffer_iter_peek(buf_iter, ts);
3494 		if (lost_events)
3495 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3496 				(unsigned long)-1 : 0;
3497 	} else {
3498 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3499 					 lost_events);
3500 	}
3501 
3502 	if (event) {
3503 		iter->ent_size = ring_buffer_event_length(event);
3504 		return ring_buffer_event_data(event);
3505 	}
3506 	iter->ent_size = 0;
3507 	return NULL;
3508 }
3509 
3510 static struct trace_entry *
3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3512 		  unsigned long *missing_events, u64 *ent_ts)
3513 {
3514 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3515 	struct trace_entry *ent, *next = NULL;
3516 	unsigned long lost_events = 0, next_lost = 0;
3517 	int cpu_file = iter->cpu_file;
3518 	u64 next_ts = 0, ts;
3519 	int next_cpu = -1;
3520 	int next_size = 0;
3521 	int cpu;
3522 
3523 	/*
3524 	 * If we are in a per_cpu trace file, don't bother by iterating over
3525 	 * all cpu and peek directly.
3526 	 */
3527 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3528 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3529 			return NULL;
3530 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3531 		if (ent_cpu)
3532 			*ent_cpu = cpu_file;
3533 
3534 		return ent;
3535 	}
3536 
3537 	for_each_tracing_cpu(cpu) {
3538 
3539 		if (ring_buffer_empty_cpu(buffer, cpu))
3540 			continue;
3541 
3542 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3543 
3544 		/*
3545 		 * Pick the entry with the smallest timestamp:
3546 		 */
3547 		if (ent && (!next || ts < next_ts)) {
3548 			next = ent;
3549 			next_cpu = cpu;
3550 			next_ts = ts;
3551 			next_lost = lost_events;
3552 			next_size = iter->ent_size;
3553 		}
3554 	}
3555 
3556 	iter->ent_size = next_size;
3557 
3558 	if (ent_cpu)
3559 		*ent_cpu = next_cpu;
3560 
3561 	if (ent_ts)
3562 		*ent_ts = next_ts;
3563 
3564 	if (missing_events)
3565 		*missing_events = next_lost;
3566 
3567 	return next;
3568 }
3569 
3570 #define STATIC_FMT_BUF_SIZE	128
3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3572 
3573 char *trace_iter_expand_format(struct trace_iterator *iter)
3574 {
3575 	char *tmp;
3576 
3577 	/*
3578 	 * iter->tr is NULL when used with tp_printk, which makes
3579 	 * this get called where it is not safe to call krealloc().
3580 	 */
3581 	if (!iter->tr || iter->fmt == static_fmt_buf)
3582 		return NULL;
3583 
3584 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3585 		       GFP_KERNEL);
3586 	if (tmp) {
3587 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3588 		iter->fmt = tmp;
3589 	}
3590 
3591 	return tmp;
3592 }
3593 
3594 /* Returns true if the string is safe to dereference from an event */
3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3596 {
3597 	unsigned long addr = (unsigned long)str;
3598 	struct trace_event *trace_event;
3599 	struct trace_event_call *event;
3600 
3601 	/* OK if part of the event data */
3602 	if ((addr >= (unsigned long)iter->ent) &&
3603 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3604 		return true;
3605 
3606 	/* OK if part of the temp seq buffer */
3607 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3608 	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3609 		return true;
3610 
3611 	/* Core rodata can not be freed */
3612 	if (is_kernel_rodata(addr))
3613 		return true;
3614 
3615 	if (trace_is_tracepoint_string(str))
3616 		return true;
3617 
3618 	/*
3619 	 * Now this could be a module event, referencing core module
3620 	 * data, which is OK.
3621 	 */
3622 	if (!iter->ent)
3623 		return false;
3624 
3625 	trace_event = ftrace_find_event(iter->ent->type);
3626 	if (!trace_event)
3627 		return false;
3628 
3629 	event = container_of(trace_event, struct trace_event_call, event);
3630 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3631 		return false;
3632 
3633 	/* Would rather have rodata, but this will suffice */
3634 	if (within_module_core(addr, event->module))
3635 		return true;
3636 
3637 	return false;
3638 }
3639 
3640 /**
3641  * ignore_event - Check dereferenced fields while writing to the seq buffer
3642  * @iter: The iterator that holds the seq buffer and the event being printed
3643  *
3644  * At boot up, test_event_printk() will flag any event that dereferences
3645  * a string with "%s" that does exist in the ring buffer. It may still
3646  * be valid, as the string may point to a static string in the kernel
3647  * rodata that never gets freed. But if the string pointer is pointing
3648  * to something that was allocated, there's a chance that it can be freed
3649  * by the time the user reads the trace. This would cause a bad memory
3650  * access by the kernel and possibly crash the system.
3651  *
3652  * This function will check if the event has any fields flagged as needing
3653  * to be checked at runtime and perform those checks.
3654  *
3655  * If it is found that a field is unsafe, it will write into the @iter->seq
3656  * a message stating what was found to be unsafe.
3657  *
3658  * @return: true if the event is unsafe and should be ignored,
3659  *          false otherwise.
3660  */
3661 bool ignore_event(struct trace_iterator *iter)
3662 {
3663 	struct ftrace_event_field *field;
3664 	struct trace_event *trace_event;
3665 	struct trace_event_call *event;
3666 	struct list_head *head;
3667 	struct trace_seq *seq;
3668 	const void *ptr;
3669 
3670 	trace_event = ftrace_find_event(iter->ent->type);
3671 
3672 	seq = &iter->seq;
3673 
3674 	if (!trace_event) {
3675 		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3676 		return true;
3677 	}
3678 
3679 	event = container_of(trace_event, struct trace_event_call, event);
3680 	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3681 		return false;
3682 
3683 	head = trace_get_fields(event);
3684 	if (!head) {
3685 		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
3686 				 trace_event_name(event));
3687 		return true;
3688 	}
3689 
3690 	/* Offsets are from the iter->ent that points to the raw event */
3691 	ptr = iter->ent;
3692 
3693 	list_for_each_entry(field, head, link) {
3694 		const char *str;
3695 		bool good;
3696 
3697 		if (!field->needs_test)
3698 			continue;
3699 
3700 		str = *(const char **)(ptr + field->offset);
3701 
3702 		good = trace_safe_str(iter, str);
3703 
3704 		/*
3705 		 * If you hit this warning, it is likely that the
3706 		 * trace event in question used %s on a string that
3707 		 * was saved at the time of the event, but may not be
3708 		 * around when the trace is read. Use __string(),
3709 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3710 		 * instead. See samples/trace_events/trace-events-sample.h
3711 		 * for reference.
3712 		 */
3713 		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
3714 			      trace_event_name(event), field->name)) {
3715 			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
3716 					 trace_event_name(event), field->name);
3717 			return true;
3718 		}
3719 	}
3720 	return false;
3721 }
3722 
3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3724 {
3725 	const char *p, *new_fmt;
3726 	char *q;
3727 
3728 	if (WARN_ON_ONCE(!fmt))
3729 		return fmt;
3730 
3731 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3732 		return fmt;
3733 
3734 	p = fmt;
3735 	new_fmt = q = iter->fmt;
3736 	while (*p) {
3737 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3738 			if (!trace_iter_expand_format(iter))
3739 				return fmt;
3740 
3741 			q += iter->fmt - new_fmt;
3742 			new_fmt = iter->fmt;
3743 		}
3744 
3745 		*q++ = *p++;
3746 
3747 		/* Replace %p with %px */
3748 		if (p[-1] == '%') {
3749 			if (p[0] == '%') {
3750 				*q++ = *p++;
3751 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3752 				*q++ = *p++;
3753 				*q++ = 'x';
3754 			}
3755 		}
3756 	}
3757 	*q = '\0';
3758 
3759 	return new_fmt;
3760 }
3761 
3762 #define STATIC_TEMP_BUF_SIZE	128
3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3764 
3765 /* Find the next real entry, without updating the iterator itself */
3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3767 					  int *ent_cpu, u64 *ent_ts)
3768 {
3769 	/* __find_next_entry will reset ent_size */
3770 	int ent_size = iter->ent_size;
3771 	struct trace_entry *entry;
3772 
3773 	/*
3774 	 * If called from ftrace_dump(), then the iter->temp buffer
3775 	 * will be the static_temp_buf and not created from kmalloc.
3776 	 * If the entry size is greater than the buffer, we can
3777 	 * not save it. Just return NULL in that case. This is only
3778 	 * used to add markers when two consecutive events' time
3779 	 * stamps have a large delta. See trace_print_lat_context()
3780 	 */
3781 	if (iter->temp == static_temp_buf &&
3782 	    STATIC_TEMP_BUF_SIZE < ent_size)
3783 		return NULL;
3784 
3785 	/*
3786 	 * The __find_next_entry() may call peek_next_entry(), which may
3787 	 * call ring_buffer_peek() that may make the contents of iter->ent
3788 	 * undefined. Need to copy iter->ent now.
3789 	 */
3790 	if (iter->ent && iter->ent != iter->temp) {
3791 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3792 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3793 			void *temp;
3794 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3795 			if (!temp)
3796 				return NULL;
3797 			kfree(iter->temp);
3798 			iter->temp = temp;
3799 			iter->temp_size = iter->ent_size;
3800 		}
3801 		memcpy(iter->temp, iter->ent, iter->ent_size);
3802 		iter->ent = iter->temp;
3803 	}
3804 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3805 	/* Put back the original ent_size */
3806 	iter->ent_size = ent_size;
3807 
3808 	return entry;
3809 }
3810 
3811 /* Find the next real entry, and increment the iterator to the next entry */
3812 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3813 {
3814 	iter->ent = __find_next_entry(iter, &iter->cpu,
3815 				      &iter->lost_events, &iter->ts);
3816 
3817 	if (iter->ent)
3818 		trace_iterator_increment(iter);
3819 
3820 	return iter->ent ? iter : NULL;
3821 }
3822 
3823 static void trace_consume(struct trace_iterator *iter)
3824 {
3825 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3826 			    &iter->lost_events);
3827 }
3828 
3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3830 {
3831 	struct trace_iterator *iter = m->private;
3832 	int i = (int)*pos;
3833 	void *ent;
3834 
3835 	WARN_ON_ONCE(iter->leftover);
3836 
3837 	(*pos)++;
3838 
3839 	/* can't go backwards */
3840 	if (iter->idx > i)
3841 		return NULL;
3842 
3843 	if (iter->idx < 0)
3844 		ent = trace_find_next_entry_inc(iter);
3845 	else
3846 		ent = iter;
3847 
3848 	while (ent && iter->idx < i)
3849 		ent = trace_find_next_entry_inc(iter);
3850 
3851 	iter->pos = *pos;
3852 
3853 	return ent;
3854 }
3855 
3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3857 {
3858 	struct ring_buffer_iter *buf_iter;
3859 	unsigned long entries = 0;
3860 	u64 ts;
3861 
3862 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3863 
3864 	buf_iter = trace_buffer_iter(iter, cpu);
3865 	if (!buf_iter)
3866 		return;
3867 
3868 	ring_buffer_iter_reset(buf_iter);
3869 
3870 	/*
3871 	 * We could have the case with the max latency tracers
3872 	 * that a reset never took place on a cpu. This is evident
3873 	 * by the timestamp being before the start of the buffer.
3874 	 */
3875 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
3876 		if (ts >= iter->array_buffer->time_start)
3877 			break;
3878 		entries++;
3879 		ring_buffer_iter_advance(buf_iter);
3880 		/* This could be a big loop */
3881 		cond_resched();
3882 	}
3883 
3884 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3885 }
3886 
3887 /*
3888  * The current tracer is copied to avoid a global locking
3889  * all around.
3890  */
3891 static void *s_start(struct seq_file *m, loff_t *pos)
3892 {
3893 	struct trace_iterator *iter = m->private;
3894 	struct trace_array *tr = iter->tr;
3895 	int cpu_file = iter->cpu_file;
3896 	void *p = NULL;
3897 	loff_t l = 0;
3898 	int cpu;
3899 
3900 	mutex_lock(&trace_types_lock);
3901 	if (unlikely(tr->current_trace != iter->trace)) {
3902 		/* Close iter->trace before switching to the new current tracer */
3903 		if (iter->trace->close)
3904 			iter->trace->close(iter);
3905 		iter->trace = tr->current_trace;
3906 		/* Reopen the new current tracer */
3907 		if (iter->trace->open)
3908 			iter->trace->open(iter);
3909 	}
3910 	mutex_unlock(&trace_types_lock);
3911 
3912 #ifdef CONFIG_TRACER_MAX_TRACE
3913 	if (iter->snapshot && iter->trace->use_max_tr)
3914 		return ERR_PTR(-EBUSY);
3915 #endif
3916 
3917 	if (*pos != iter->pos) {
3918 		iter->ent = NULL;
3919 		iter->cpu = 0;
3920 		iter->idx = -1;
3921 
3922 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3923 			for_each_tracing_cpu(cpu)
3924 				tracing_iter_reset(iter, cpu);
3925 		} else
3926 			tracing_iter_reset(iter, cpu_file);
3927 
3928 		iter->leftover = 0;
3929 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3930 			;
3931 
3932 	} else {
3933 		/*
3934 		 * If we overflowed the seq_file before, then we want
3935 		 * to just reuse the trace_seq buffer again.
3936 		 */
3937 		if (iter->leftover)
3938 			p = iter;
3939 		else {
3940 			l = *pos - 1;
3941 			p = s_next(m, p, &l);
3942 		}
3943 	}
3944 
3945 	trace_event_read_lock();
3946 	trace_access_lock(cpu_file);
3947 	return p;
3948 }
3949 
3950 static void s_stop(struct seq_file *m, void *p)
3951 {
3952 	struct trace_iterator *iter = m->private;
3953 
3954 #ifdef CONFIG_TRACER_MAX_TRACE
3955 	if (iter->snapshot && iter->trace->use_max_tr)
3956 		return;
3957 #endif
3958 
3959 	trace_access_unlock(iter->cpu_file);
3960 	trace_event_read_unlock();
3961 }
3962 
3963 static void
3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3965 		      unsigned long *entries, int cpu)
3966 {
3967 	unsigned long count;
3968 
3969 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3970 	/*
3971 	 * If this buffer has skipped entries, then we hold all
3972 	 * entries for the trace and we need to ignore the
3973 	 * ones before the time stamp.
3974 	 */
3975 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3976 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3977 		/* total is the same as the entries */
3978 		*total = count;
3979 	} else
3980 		*total = count +
3981 			ring_buffer_overrun_cpu(buf->buffer, cpu);
3982 	*entries = count;
3983 }
3984 
3985 static void
3986 get_total_entries(struct array_buffer *buf,
3987 		  unsigned long *total, unsigned long *entries)
3988 {
3989 	unsigned long t, e;
3990 	int cpu;
3991 
3992 	*total = 0;
3993 	*entries = 0;
3994 
3995 	for_each_tracing_cpu(cpu) {
3996 		get_total_entries_cpu(buf, &t, &e, cpu);
3997 		*total += t;
3998 		*entries += e;
3999 	}
4000 }
4001 
4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4003 {
4004 	unsigned long total, entries;
4005 
4006 	if (!tr)
4007 		tr = &global_trace;
4008 
4009 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4010 
4011 	return entries;
4012 }
4013 
4014 unsigned long trace_total_entries(struct trace_array *tr)
4015 {
4016 	unsigned long total, entries;
4017 
4018 	if (!tr)
4019 		tr = &global_trace;
4020 
4021 	get_total_entries(&tr->array_buffer, &total, &entries);
4022 
4023 	return entries;
4024 }
4025 
4026 static void print_lat_help_header(struct seq_file *m)
4027 {
4028 	seq_puts(m, "#                    _------=> CPU#            \n"
4029 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4030 		    "#                  | / _----=> need-resched    \n"
4031 		    "#                  || / _---=> hardirq/softirq \n"
4032 		    "#                  ||| / _--=> preempt-depth   \n"
4033 		    "#                  |||| / _-=> migrate-disable \n"
4034 		    "#                  ||||| /     delay           \n"
4035 		    "#  cmd     pid     |||||| time  |   caller     \n"
4036 		    "#     \\   /        ||||||  \\    |    /       \n");
4037 }
4038 
4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4040 {
4041 	unsigned long total;
4042 	unsigned long entries;
4043 
4044 	get_total_entries(buf, &total, &entries);
4045 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4046 		   entries, total, num_online_cpus());
4047 	seq_puts(m, "#\n");
4048 }
4049 
4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4051 				   unsigned int flags)
4052 {
4053 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4054 
4055 	print_event_info(buf, m);
4056 
4057 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4058 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4059 }
4060 
4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4062 				       unsigned int flags)
4063 {
4064 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4065 	static const char space[] = "            ";
4066 	int prec = tgid ? 12 : 2;
4067 
4068 	print_event_info(buf, m);
4069 
4070 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4071 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4072 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4073 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4074 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4075 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4076 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4077 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4078 }
4079 
4080 void
4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4082 {
4083 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4084 	struct array_buffer *buf = iter->array_buffer;
4085 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4086 	struct tracer *type = iter->trace;
4087 	unsigned long entries;
4088 	unsigned long total;
4089 	const char *name = type->name;
4090 
4091 	get_total_entries(buf, &total, &entries);
4092 
4093 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4094 		   name, init_utsname()->release);
4095 	seq_puts(m, "# -----------------------------------"
4096 		 "---------------------------------\n");
4097 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4098 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4099 		   nsecs_to_usecs(data->saved_latency),
4100 		   entries,
4101 		   total,
4102 		   buf->cpu,
4103 		   preempt_model_none()      ? "server" :
4104 		   preempt_model_voluntary() ? "desktop" :
4105 		   preempt_model_full()      ? "preempt" :
4106 		   preempt_model_lazy()	     ? "lazy"    :
4107 		   preempt_model_rt()        ? "preempt_rt" :
4108 		   "unknown",
4109 		   /* These are reserved for later use */
4110 		   0, 0, 0, 0);
4111 #ifdef CONFIG_SMP
4112 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4113 #else
4114 	seq_puts(m, ")\n");
4115 #endif
4116 	seq_puts(m, "#    -----------------\n");
4117 	seq_printf(m, "#    | task: %.16s-%d "
4118 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4119 		   data->comm, data->pid,
4120 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4121 		   data->policy, data->rt_priority);
4122 	seq_puts(m, "#    -----------------\n");
4123 
4124 	if (data->critical_start) {
4125 		seq_puts(m, "#  => started at: ");
4126 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4127 		trace_print_seq(m, &iter->seq);
4128 		seq_puts(m, "\n#  => ended at:   ");
4129 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4130 		trace_print_seq(m, &iter->seq);
4131 		seq_puts(m, "\n#\n");
4132 	}
4133 
4134 	seq_puts(m, "#\n");
4135 }
4136 
4137 static void test_cpu_buff_start(struct trace_iterator *iter)
4138 {
4139 	struct trace_seq *s = &iter->seq;
4140 	struct trace_array *tr = iter->tr;
4141 
4142 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4143 		return;
4144 
4145 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4146 		return;
4147 
4148 	if (cpumask_available(iter->started) &&
4149 	    cpumask_test_cpu(iter->cpu, iter->started))
4150 		return;
4151 
4152 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4153 		return;
4154 
4155 	if (cpumask_available(iter->started))
4156 		cpumask_set_cpu(iter->cpu, iter->started);
4157 
4158 	/* Don't print started cpu buffer for the first entry of the trace */
4159 	if (iter->idx > 1)
4160 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4161 				iter->cpu);
4162 }
4163 
4164 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4165 {
4166 	struct trace_array *tr = iter->tr;
4167 	struct trace_seq *s = &iter->seq;
4168 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4169 	struct trace_entry *entry;
4170 	struct trace_event *event;
4171 
4172 	entry = iter->ent;
4173 
4174 	test_cpu_buff_start(iter);
4175 
4176 	event = ftrace_find_event(entry->type);
4177 
4178 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4179 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4180 			trace_print_lat_context(iter);
4181 		else
4182 			trace_print_context(iter);
4183 	}
4184 
4185 	if (trace_seq_has_overflowed(s))
4186 		return TRACE_TYPE_PARTIAL_LINE;
4187 
4188 	if (event) {
4189 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4190 			return print_event_fields(iter, event);
4191 		/*
4192 		 * For TRACE_EVENT() events, the print_fmt is not
4193 		 * safe to use if the array has delta offsets
4194 		 * Force printing via the fields.
4195 		 */
4196 		if ((tr->text_delta) &&
4197 		    event->type > __TRACE_LAST_TYPE)
4198 			return print_event_fields(iter, event);
4199 
4200 		return event->funcs->trace(iter, sym_flags, event);
4201 	}
4202 
4203 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4204 
4205 	return trace_handle_return(s);
4206 }
4207 
4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4209 {
4210 	struct trace_array *tr = iter->tr;
4211 	struct trace_seq *s = &iter->seq;
4212 	struct trace_entry *entry;
4213 	struct trace_event *event;
4214 
4215 	entry = iter->ent;
4216 
4217 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4218 		trace_seq_printf(s, "%d %d %llu ",
4219 				 entry->pid, iter->cpu, iter->ts);
4220 
4221 	if (trace_seq_has_overflowed(s))
4222 		return TRACE_TYPE_PARTIAL_LINE;
4223 
4224 	event = ftrace_find_event(entry->type);
4225 	if (event)
4226 		return event->funcs->raw(iter, 0, event);
4227 
4228 	trace_seq_printf(s, "%d ?\n", entry->type);
4229 
4230 	return trace_handle_return(s);
4231 }
4232 
4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4234 {
4235 	struct trace_array *tr = iter->tr;
4236 	struct trace_seq *s = &iter->seq;
4237 	unsigned char newline = '\n';
4238 	struct trace_entry *entry;
4239 	struct trace_event *event;
4240 
4241 	entry = iter->ent;
4242 
4243 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4244 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4245 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4246 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4247 		if (trace_seq_has_overflowed(s))
4248 			return TRACE_TYPE_PARTIAL_LINE;
4249 	}
4250 
4251 	event = ftrace_find_event(entry->type);
4252 	if (event) {
4253 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4254 		if (ret != TRACE_TYPE_HANDLED)
4255 			return ret;
4256 	}
4257 
4258 	SEQ_PUT_FIELD(s, newline);
4259 
4260 	return trace_handle_return(s);
4261 }
4262 
4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4264 {
4265 	struct trace_array *tr = iter->tr;
4266 	struct trace_seq *s = &iter->seq;
4267 	struct trace_entry *entry;
4268 	struct trace_event *event;
4269 
4270 	entry = iter->ent;
4271 
4272 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4273 		SEQ_PUT_FIELD(s, entry->pid);
4274 		SEQ_PUT_FIELD(s, iter->cpu);
4275 		SEQ_PUT_FIELD(s, iter->ts);
4276 		if (trace_seq_has_overflowed(s))
4277 			return TRACE_TYPE_PARTIAL_LINE;
4278 	}
4279 
4280 	event = ftrace_find_event(entry->type);
4281 	return event ? event->funcs->binary(iter, 0, event) :
4282 		TRACE_TYPE_HANDLED;
4283 }
4284 
4285 int trace_empty(struct trace_iterator *iter)
4286 {
4287 	struct ring_buffer_iter *buf_iter;
4288 	int cpu;
4289 
4290 	/* If we are looking at one CPU buffer, only check that one */
4291 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4292 		cpu = iter->cpu_file;
4293 		buf_iter = trace_buffer_iter(iter, cpu);
4294 		if (buf_iter) {
4295 			if (!ring_buffer_iter_empty(buf_iter))
4296 				return 0;
4297 		} else {
4298 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4299 				return 0;
4300 		}
4301 		return 1;
4302 	}
4303 
4304 	for_each_tracing_cpu(cpu) {
4305 		buf_iter = trace_buffer_iter(iter, cpu);
4306 		if (buf_iter) {
4307 			if (!ring_buffer_iter_empty(buf_iter))
4308 				return 0;
4309 		} else {
4310 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4311 				return 0;
4312 		}
4313 	}
4314 
4315 	return 1;
4316 }
4317 
4318 /*  Called with trace_event_read_lock() held. */
4319 enum print_line_t print_trace_line(struct trace_iterator *iter)
4320 {
4321 	struct trace_array *tr = iter->tr;
4322 	unsigned long trace_flags = tr->trace_flags;
4323 	enum print_line_t ret;
4324 
4325 	if (iter->lost_events) {
4326 		if (iter->lost_events == (unsigned long)-1)
4327 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4328 					 iter->cpu);
4329 		else
4330 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4331 					 iter->cpu, iter->lost_events);
4332 		if (trace_seq_has_overflowed(&iter->seq))
4333 			return TRACE_TYPE_PARTIAL_LINE;
4334 	}
4335 
4336 	if (iter->trace && iter->trace->print_line) {
4337 		ret = iter->trace->print_line(iter);
4338 		if (ret != TRACE_TYPE_UNHANDLED)
4339 			return ret;
4340 	}
4341 
4342 	if (iter->ent->type == TRACE_BPUTS &&
4343 			trace_flags & TRACE_ITER_PRINTK &&
4344 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4345 		return trace_print_bputs_msg_only(iter);
4346 
4347 	if (iter->ent->type == TRACE_BPRINT &&
4348 			trace_flags & TRACE_ITER_PRINTK &&
4349 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4350 		return trace_print_bprintk_msg_only(iter);
4351 
4352 	if (iter->ent->type == TRACE_PRINT &&
4353 			trace_flags & TRACE_ITER_PRINTK &&
4354 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4355 		return trace_print_printk_msg_only(iter);
4356 
4357 	if (trace_flags & TRACE_ITER_BIN)
4358 		return print_bin_fmt(iter);
4359 
4360 	if (trace_flags & TRACE_ITER_HEX)
4361 		return print_hex_fmt(iter);
4362 
4363 	if (trace_flags & TRACE_ITER_RAW)
4364 		return print_raw_fmt(iter);
4365 
4366 	return print_trace_fmt(iter);
4367 }
4368 
4369 void trace_latency_header(struct seq_file *m)
4370 {
4371 	struct trace_iterator *iter = m->private;
4372 	struct trace_array *tr = iter->tr;
4373 
4374 	/* print nothing if the buffers are empty */
4375 	if (trace_empty(iter))
4376 		return;
4377 
4378 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4379 		print_trace_header(m, iter);
4380 
4381 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4382 		print_lat_help_header(m);
4383 }
4384 
4385 void trace_default_header(struct seq_file *m)
4386 {
4387 	struct trace_iterator *iter = m->private;
4388 	struct trace_array *tr = iter->tr;
4389 	unsigned long trace_flags = tr->trace_flags;
4390 
4391 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4392 		return;
4393 
4394 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4395 		/* print nothing if the buffers are empty */
4396 		if (trace_empty(iter))
4397 			return;
4398 		print_trace_header(m, iter);
4399 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4400 			print_lat_help_header(m);
4401 	} else {
4402 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4403 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4404 				print_func_help_header_irq(iter->array_buffer,
4405 							   m, trace_flags);
4406 			else
4407 				print_func_help_header(iter->array_buffer, m,
4408 						       trace_flags);
4409 		}
4410 	}
4411 }
4412 
4413 static void test_ftrace_alive(struct seq_file *m)
4414 {
4415 	if (!ftrace_is_dead())
4416 		return;
4417 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4418 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4419 }
4420 
4421 #ifdef CONFIG_TRACER_MAX_TRACE
4422 static void show_snapshot_main_help(struct seq_file *m)
4423 {
4424 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4425 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4426 		    "#                      Takes a snapshot of the main buffer.\n"
4427 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4428 		    "#                      (Doesn't have to be '2' works with any number that\n"
4429 		    "#                       is not a '0' or '1')\n");
4430 }
4431 
4432 static void show_snapshot_percpu_help(struct seq_file *m)
4433 {
4434 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4436 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4437 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4438 #else
4439 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4440 		    "#                     Must use main snapshot file to allocate.\n");
4441 #endif
4442 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4443 		    "#                      (Doesn't have to be '2' works with any number that\n"
4444 		    "#                       is not a '0' or '1')\n");
4445 }
4446 
4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4448 {
4449 	if (iter->tr->allocated_snapshot)
4450 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4451 	else
4452 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4453 
4454 	seq_puts(m, "# Snapshot commands:\n");
4455 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4456 		show_snapshot_main_help(m);
4457 	else
4458 		show_snapshot_percpu_help(m);
4459 }
4460 #else
4461 /* Should never be called */
4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4463 #endif
4464 
4465 static int s_show(struct seq_file *m, void *v)
4466 {
4467 	struct trace_iterator *iter = v;
4468 	int ret;
4469 
4470 	if (iter->ent == NULL) {
4471 		if (iter->tr) {
4472 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4473 			seq_puts(m, "#\n");
4474 			test_ftrace_alive(m);
4475 		}
4476 		if (iter->snapshot && trace_empty(iter))
4477 			print_snapshot_help(m, iter);
4478 		else if (iter->trace && iter->trace->print_header)
4479 			iter->trace->print_header(m);
4480 		else
4481 			trace_default_header(m);
4482 
4483 	} else if (iter->leftover) {
4484 		/*
4485 		 * If we filled the seq_file buffer earlier, we
4486 		 * want to just show it now.
4487 		 */
4488 		ret = trace_print_seq(m, &iter->seq);
4489 
4490 		/* ret should this time be zero, but you never know */
4491 		iter->leftover = ret;
4492 
4493 	} else {
4494 		ret = print_trace_line(iter);
4495 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4496 			iter->seq.full = 0;
4497 			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4498 		}
4499 		ret = trace_print_seq(m, &iter->seq);
4500 		/*
4501 		 * If we overflow the seq_file buffer, then it will
4502 		 * ask us for this data again at start up.
4503 		 * Use that instead.
4504 		 *  ret is 0 if seq_file write succeeded.
4505 		 *        -1 otherwise.
4506 		 */
4507 		iter->leftover = ret;
4508 	}
4509 
4510 	return 0;
4511 }
4512 
4513 /*
4514  * Should be used after trace_array_get(), trace_types_lock
4515  * ensures that i_cdev was already initialized.
4516  */
4517 static inline int tracing_get_cpu(struct inode *inode)
4518 {
4519 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4520 		return (long)inode->i_cdev - 1;
4521 	return RING_BUFFER_ALL_CPUS;
4522 }
4523 
4524 static const struct seq_operations tracer_seq_ops = {
4525 	.start		= s_start,
4526 	.next		= s_next,
4527 	.stop		= s_stop,
4528 	.show		= s_show,
4529 };
4530 
4531 /*
4532  * Note, as iter itself can be allocated and freed in different
4533  * ways, this function is only used to free its content, and not
4534  * the iterator itself. The only requirement to all the allocations
4535  * is that it must zero all fields (kzalloc), as freeing works with
4536  * ethier allocated content or NULL.
4537  */
4538 static void free_trace_iter_content(struct trace_iterator *iter)
4539 {
4540 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4541 	if (iter->fmt != static_fmt_buf)
4542 		kfree(iter->fmt);
4543 
4544 	kfree(iter->temp);
4545 	kfree(iter->buffer_iter);
4546 	mutex_destroy(&iter->mutex);
4547 	free_cpumask_var(iter->started);
4548 }
4549 
4550 static struct trace_iterator *
4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4552 {
4553 	struct trace_array *tr = inode->i_private;
4554 	struct trace_iterator *iter;
4555 	int cpu;
4556 
4557 	if (tracing_disabled)
4558 		return ERR_PTR(-ENODEV);
4559 
4560 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4561 	if (!iter)
4562 		return ERR_PTR(-ENOMEM);
4563 
4564 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4565 				    GFP_KERNEL);
4566 	if (!iter->buffer_iter)
4567 		goto release;
4568 
4569 	/*
4570 	 * trace_find_next_entry() may need to save off iter->ent.
4571 	 * It will place it into the iter->temp buffer. As most
4572 	 * events are less than 128, allocate a buffer of that size.
4573 	 * If one is greater, then trace_find_next_entry() will
4574 	 * allocate a new buffer to adjust for the bigger iter->ent.
4575 	 * It's not critical if it fails to get allocated here.
4576 	 */
4577 	iter->temp = kmalloc(128, GFP_KERNEL);
4578 	if (iter->temp)
4579 		iter->temp_size = 128;
4580 
4581 	/*
4582 	 * trace_event_printf() may need to modify given format
4583 	 * string to replace %p with %px so that it shows real address
4584 	 * instead of hash value. However, that is only for the event
4585 	 * tracing, other tracer may not need. Defer the allocation
4586 	 * until it is needed.
4587 	 */
4588 	iter->fmt = NULL;
4589 	iter->fmt_size = 0;
4590 
4591 	mutex_lock(&trace_types_lock);
4592 	iter->trace = tr->current_trace;
4593 
4594 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4595 		goto fail;
4596 
4597 	iter->tr = tr;
4598 
4599 #ifdef CONFIG_TRACER_MAX_TRACE
4600 	/* Currently only the top directory has a snapshot */
4601 	if (tr->current_trace->print_max || snapshot)
4602 		iter->array_buffer = &tr->max_buffer;
4603 	else
4604 #endif
4605 		iter->array_buffer = &tr->array_buffer;
4606 	iter->snapshot = snapshot;
4607 	iter->pos = -1;
4608 	iter->cpu_file = tracing_get_cpu(inode);
4609 	mutex_init(&iter->mutex);
4610 
4611 	/* Notify the tracer early; before we stop tracing. */
4612 	if (iter->trace->open)
4613 		iter->trace->open(iter);
4614 
4615 	/* Annotate start of buffers if we had overruns */
4616 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4617 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4618 
4619 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4620 	if (trace_clocks[tr->clock_id].in_ns)
4621 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4622 
4623 	/*
4624 	 * If pause-on-trace is enabled, then stop the trace while
4625 	 * dumping, unless this is the "snapshot" file
4626 	 */
4627 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4628 		tracing_stop_tr(tr);
4629 
4630 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4631 		for_each_tracing_cpu(cpu) {
4632 			iter->buffer_iter[cpu] =
4633 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4634 							 cpu, GFP_KERNEL);
4635 		}
4636 		ring_buffer_read_prepare_sync();
4637 		for_each_tracing_cpu(cpu) {
4638 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4639 			tracing_iter_reset(iter, cpu);
4640 		}
4641 	} else {
4642 		cpu = iter->cpu_file;
4643 		iter->buffer_iter[cpu] =
4644 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4645 						 cpu, GFP_KERNEL);
4646 		ring_buffer_read_prepare_sync();
4647 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4648 		tracing_iter_reset(iter, cpu);
4649 	}
4650 
4651 	mutex_unlock(&trace_types_lock);
4652 
4653 	return iter;
4654 
4655  fail:
4656 	mutex_unlock(&trace_types_lock);
4657 	free_trace_iter_content(iter);
4658 release:
4659 	seq_release_private(inode, file);
4660 	return ERR_PTR(-ENOMEM);
4661 }
4662 
4663 int tracing_open_generic(struct inode *inode, struct file *filp)
4664 {
4665 	int ret;
4666 
4667 	ret = tracing_check_open_get_tr(NULL);
4668 	if (ret)
4669 		return ret;
4670 
4671 	filp->private_data = inode->i_private;
4672 	return 0;
4673 }
4674 
4675 bool tracing_is_disabled(void)
4676 {
4677 	return (tracing_disabled) ? true: false;
4678 }
4679 
4680 /*
4681  * Open and update trace_array ref count.
4682  * Must have the current trace_array passed to it.
4683  */
4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4685 {
4686 	struct trace_array *tr = inode->i_private;
4687 	int ret;
4688 
4689 	ret = tracing_check_open_get_tr(tr);
4690 	if (ret)
4691 		return ret;
4692 
4693 	filp->private_data = inode->i_private;
4694 
4695 	return 0;
4696 }
4697 
4698 /*
4699  * The private pointer of the inode is the trace_event_file.
4700  * Update the tr ref count associated to it.
4701  */
4702 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4703 {
4704 	struct trace_event_file *file = inode->i_private;
4705 	int ret;
4706 
4707 	ret = tracing_check_open_get_tr(file->tr);
4708 	if (ret)
4709 		return ret;
4710 
4711 	mutex_lock(&event_mutex);
4712 
4713 	/* Fail if the file is marked for removal */
4714 	if (file->flags & EVENT_FILE_FL_FREED) {
4715 		trace_array_put(file->tr);
4716 		ret = -ENODEV;
4717 	} else {
4718 		event_file_get(file);
4719 	}
4720 
4721 	mutex_unlock(&event_mutex);
4722 	if (ret)
4723 		return ret;
4724 
4725 	filp->private_data = inode->i_private;
4726 
4727 	return 0;
4728 }
4729 
4730 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4731 {
4732 	struct trace_event_file *file = inode->i_private;
4733 
4734 	trace_array_put(file->tr);
4735 	event_file_put(file);
4736 
4737 	return 0;
4738 }
4739 
4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4741 {
4742 	tracing_release_file_tr(inode, filp);
4743 	return single_release(inode, filp);
4744 }
4745 
4746 static int tracing_mark_open(struct inode *inode, struct file *filp)
4747 {
4748 	stream_open(inode, filp);
4749 	return tracing_open_generic_tr(inode, filp);
4750 }
4751 
4752 static int tracing_release(struct inode *inode, struct file *file)
4753 {
4754 	struct trace_array *tr = inode->i_private;
4755 	struct seq_file *m = file->private_data;
4756 	struct trace_iterator *iter;
4757 	int cpu;
4758 
4759 	if (!(file->f_mode & FMODE_READ)) {
4760 		trace_array_put(tr);
4761 		return 0;
4762 	}
4763 
4764 	/* Writes do not use seq_file */
4765 	iter = m->private;
4766 	mutex_lock(&trace_types_lock);
4767 
4768 	for_each_tracing_cpu(cpu) {
4769 		if (iter->buffer_iter[cpu])
4770 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4771 	}
4772 
4773 	if (iter->trace && iter->trace->close)
4774 		iter->trace->close(iter);
4775 
4776 	if (!iter->snapshot && tr->stop_count)
4777 		/* reenable tracing if it was previously enabled */
4778 		tracing_start_tr(tr);
4779 
4780 	__trace_array_put(tr);
4781 
4782 	mutex_unlock(&trace_types_lock);
4783 
4784 	free_trace_iter_content(iter);
4785 	seq_release_private(inode, file);
4786 
4787 	return 0;
4788 }
4789 
4790 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4791 {
4792 	struct trace_array *tr = inode->i_private;
4793 
4794 	trace_array_put(tr);
4795 	return 0;
4796 }
4797 
4798 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4799 {
4800 	struct trace_array *tr = inode->i_private;
4801 
4802 	trace_array_put(tr);
4803 
4804 	return single_release(inode, file);
4805 }
4806 
4807 static int tracing_open(struct inode *inode, struct file *file)
4808 {
4809 	struct trace_array *tr = inode->i_private;
4810 	struct trace_iterator *iter;
4811 	int ret;
4812 
4813 	ret = tracing_check_open_get_tr(tr);
4814 	if (ret)
4815 		return ret;
4816 
4817 	/* If this file was open for write, then erase contents */
4818 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4819 		int cpu = tracing_get_cpu(inode);
4820 		struct array_buffer *trace_buf = &tr->array_buffer;
4821 
4822 #ifdef CONFIG_TRACER_MAX_TRACE
4823 		if (tr->current_trace->print_max)
4824 			trace_buf = &tr->max_buffer;
4825 #endif
4826 
4827 		if (cpu == RING_BUFFER_ALL_CPUS)
4828 			tracing_reset_online_cpus(trace_buf);
4829 		else
4830 			tracing_reset_cpu(trace_buf, cpu);
4831 	}
4832 
4833 	if (file->f_mode & FMODE_READ) {
4834 		iter = __tracing_open(inode, file, false);
4835 		if (IS_ERR(iter))
4836 			ret = PTR_ERR(iter);
4837 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4838 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4839 	}
4840 
4841 	if (ret < 0)
4842 		trace_array_put(tr);
4843 
4844 	return ret;
4845 }
4846 
4847 /*
4848  * Some tracers are not suitable for instance buffers.
4849  * A tracer is always available for the global array (toplevel)
4850  * or if it explicitly states that it is.
4851  */
4852 static bool
4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4854 {
4855 #ifdef CONFIG_TRACER_SNAPSHOT
4856 	/* arrays with mapped buffer range do not have snapshots */
4857 	if (tr->range_addr_start && t->use_max_tr)
4858 		return false;
4859 #endif
4860 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4861 }
4862 
4863 /* Find the next tracer that this trace array may use */
4864 static struct tracer *
4865 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4866 {
4867 	while (t && !trace_ok_for_array(t, tr))
4868 		t = t->next;
4869 
4870 	return t;
4871 }
4872 
4873 static void *
4874 t_next(struct seq_file *m, void *v, loff_t *pos)
4875 {
4876 	struct trace_array *tr = m->private;
4877 	struct tracer *t = v;
4878 
4879 	(*pos)++;
4880 
4881 	if (t)
4882 		t = get_tracer_for_array(tr, t->next);
4883 
4884 	return t;
4885 }
4886 
4887 static void *t_start(struct seq_file *m, loff_t *pos)
4888 {
4889 	struct trace_array *tr = m->private;
4890 	struct tracer *t;
4891 	loff_t l = 0;
4892 
4893 	mutex_lock(&trace_types_lock);
4894 
4895 	t = get_tracer_for_array(tr, trace_types);
4896 	for (; t && l < *pos; t = t_next(m, t, &l))
4897 			;
4898 
4899 	return t;
4900 }
4901 
4902 static void t_stop(struct seq_file *m, void *p)
4903 {
4904 	mutex_unlock(&trace_types_lock);
4905 }
4906 
4907 static int t_show(struct seq_file *m, void *v)
4908 {
4909 	struct tracer *t = v;
4910 
4911 	if (!t)
4912 		return 0;
4913 
4914 	seq_puts(m, t->name);
4915 	if (t->next)
4916 		seq_putc(m, ' ');
4917 	else
4918 		seq_putc(m, '\n');
4919 
4920 	return 0;
4921 }
4922 
4923 static const struct seq_operations show_traces_seq_ops = {
4924 	.start		= t_start,
4925 	.next		= t_next,
4926 	.stop		= t_stop,
4927 	.show		= t_show,
4928 };
4929 
4930 static int show_traces_open(struct inode *inode, struct file *file)
4931 {
4932 	struct trace_array *tr = inode->i_private;
4933 	struct seq_file *m;
4934 	int ret;
4935 
4936 	ret = tracing_check_open_get_tr(tr);
4937 	if (ret)
4938 		return ret;
4939 
4940 	ret = seq_open(file, &show_traces_seq_ops);
4941 	if (ret) {
4942 		trace_array_put(tr);
4943 		return ret;
4944 	}
4945 
4946 	m = file->private_data;
4947 	m->private = tr;
4948 
4949 	return 0;
4950 }
4951 
4952 static int tracing_seq_release(struct inode *inode, struct file *file)
4953 {
4954 	struct trace_array *tr = inode->i_private;
4955 
4956 	trace_array_put(tr);
4957 	return seq_release(inode, file);
4958 }
4959 
4960 static ssize_t
4961 tracing_write_stub(struct file *filp, const char __user *ubuf,
4962 		   size_t count, loff_t *ppos)
4963 {
4964 	return count;
4965 }
4966 
4967 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4968 {
4969 	int ret;
4970 
4971 	if (file->f_mode & FMODE_READ)
4972 		ret = seq_lseek(file, offset, whence);
4973 	else
4974 		file->f_pos = ret = 0;
4975 
4976 	return ret;
4977 }
4978 
4979 static const struct file_operations tracing_fops = {
4980 	.open		= tracing_open,
4981 	.read		= seq_read,
4982 	.read_iter	= seq_read_iter,
4983 	.splice_read	= copy_splice_read,
4984 	.write		= tracing_write_stub,
4985 	.llseek		= tracing_lseek,
4986 	.release	= tracing_release,
4987 };
4988 
4989 static const struct file_operations show_traces_fops = {
4990 	.open		= show_traces_open,
4991 	.read		= seq_read,
4992 	.llseek		= seq_lseek,
4993 	.release	= tracing_seq_release,
4994 };
4995 
4996 static ssize_t
4997 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4998 		     size_t count, loff_t *ppos)
4999 {
5000 	struct trace_array *tr = file_inode(filp)->i_private;
5001 	char *mask_str;
5002 	int len;
5003 
5004 	len = snprintf(NULL, 0, "%*pb\n",
5005 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5006 	mask_str = kmalloc(len, GFP_KERNEL);
5007 	if (!mask_str)
5008 		return -ENOMEM;
5009 
5010 	len = snprintf(mask_str, len, "%*pb\n",
5011 		       cpumask_pr_args(tr->tracing_cpumask));
5012 	if (len >= count) {
5013 		count = -EINVAL;
5014 		goto out_err;
5015 	}
5016 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5017 
5018 out_err:
5019 	kfree(mask_str);
5020 
5021 	return count;
5022 }
5023 
5024 int tracing_set_cpumask(struct trace_array *tr,
5025 			cpumask_var_t tracing_cpumask_new)
5026 {
5027 	int cpu;
5028 
5029 	if (!tr)
5030 		return -EINVAL;
5031 
5032 	local_irq_disable();
5033 	arch_spin_lock(&tr->max_lock);
5034 	for_each_tracing_cpu(cpu) {
5035 		/*
5036 		 * Increase/decrease the disabled counter if we are
5037 		 * about to flip a bit in the cpumask:
5038 		 */
5039 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5040 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5041 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5042 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5043 #ifdef CONFIG_TRACER_MAX_TRACE
5044 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5045 #endif
5046 		}
5047 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5048 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5049 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5050 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5051 #ifdef CONFIG_TRACER_MAX_TRACE
5052 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5053 #endif
5054 		}
5055 	}
5056 	arch_spin_unlock(&tr->max_lock);
5057 	local_irq_enable();
5058 
5059 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5060 
5061 	return 0;
5062 }
5063 
5064 static ssize_t
5065 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5066 		      size_t count, loff_t *ppos)
5067 {
5068 	struct trace_array *tr = file_inode(filp)->i_private;
5069 	cpumask_var_t tracing_cpumask_new;
5070 	int err;
5071 
5072 	if (count == 0 || count > KMALLOC_MAX_SIZE)
5073 		return -EINVAL;
5074 
5075 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5076 		return -ENOMEM;
5077 
5078 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5079 	if (err)
5080 		goto err_free;
5081 
5082 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5083 	if (err)
5084 		goto err_free;
5085 
5086 	free_cpumask_var(tracing_cpumask_new);
5087 
5088 	return count;
5089 
5090 err_free:
5091 	free_cpumask_var(tracing_cpumask_new);
5092 
5093 	return err;
5094 }
5095 
5096 static const struct file_operations tracing_cpumask_fops = {
5097 	.open		= tracing_open_generic_tr,
5098 	.read		= tracing_cpumask_read,
5099 	.write		= tracing_cpumask_write,
5100 	.release	= tracing_release_generic_tr,
5101 	.llseek		= generic_file_llseek,
5102 };
5103 
5104 static int tracing_trace_options_show(struct seq_file *m, void *v)
5105 {
5106 	struct tracer_opt *trace_opts;
5107 	struct trace_array *tr = m->private;
5108 	u32 tracer_flags;
5109 	int i;
5110 
5111 	guard(mutex)(&trace_types_lock);
5112 
5113 	tracer_flags = tr->current_trace->flags->val;
5114 	trace_opts = tr->current_trace->flags->opts;
5115 
5116 	for (i = 0; trace_options[i]; i++) {
5117 		if (tr->trace_flags & (1 << i))
5118 			seq_printf(m, "%s\n", trace_options[i]);
5119 		else
5120 			seq_printf(m, "no%s\n", trace_options[i]);
5121 	}
5122 
5123 	for (i = 0; trace_opts[i].name; i++) {
5124 		if (tracer_flags & trace_opts[i].bit)
5125 			seq_printf(m, "%s\n", trace_opts[i].name);
5126 		else
5127 			seq_printf(m, "no%s\n", trace_opts[i].name);
5128 	}
5129 
5130 	return 0;
5131 }
5132 
5133 static int __set_tracer_option(struct trace_array *tr,
5134 			       struct tracer_flags *tracer_flags,
5135 			       struct tracer_opt *opts, int neg)
5136 {
5137 	struct tracer *trace = tracer_flags->trace;
5138 	int ret;
5139 
5140 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5141 	if (ret)
5142 		return ret;
5143 
5144 	if (neg)
5145 		tracer_flags->val &= ~opts->bit;
5146 	else
5147 		tracer_flags->val |= opts->bit;
5148 	return 0;
5149 }
5150 
5151 /* Try to assign a tracer specific option */
5152 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5153 {
5154 	struct tracer *trace = tr->current_trace;
5155 	struct tracer_flags *tracer_flags = trace->flags;
5156 	struct tracer_opt *opts = NULL;
5157 	int i;
5158 
5159 	for (i = 0; tracer_flags->opts[i].name; i++) {
5160 		opts = &tracer_flags->opts[i];
5161 
5162 		if (strcmp(cmp, opts->name) == 0)
5163 			return __set_tracer_option(tr, trace->flags, opts, neg);
5164 	}
5165 
5166 	return -EINVAL;
5167 }
5168 
5169 /* Some tracers require overwrite to stay enabled */
5170 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5171 {
5172 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5173 		return -1;
5174 
5175 	return 0;
5176 }
5177 
5178 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5179 {
5180 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5181 	    (mask == TRACE_ITER_RECORD_CMD) ||
5182 	    (mask == TRACE_ITER_TRACE_PRINTK))
5183 		lockdep_assert_held(&event_mutex);
5184 
5185 	/* do nothing if flag is already set */
5186 	if (!!(tr->trace_flags & mask) == !!enabled)
5187 		return 0;
5188 
5189 	/* Give the tracer a chance to approve the change */
5190 	if (tr->current_trace->flag_changed)
5191 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5192 			return -EINVAL;
5193 
5194 	if (mask == TRACE_ITER_TRACE_PRINTK) {
5195 		if (enabled) {
5196 			update_printk_trace(tr);
5197 		} else {
5198 			/*
5199 			 * The global_trace cannot clear this.
5200 			 * It's flag only gets cleared if another instance sets it.
5201 			 */
5202 			if (printk_trace == &global_trace)
5203 				return -EINVAL;
5204 			/*
5205 			 * An instance must always have it set.
5206 			 * by default, that's the global_trace instane.
5207 			 */
5208 			if (printk_trace == tr)
5209 				update_printk_trace(&global_trace);
5210 		}
5211 	}
5212 
5213 	if (enabled)
5214 		tr->trace_flags |= mask;
5215 	else
5216 		tr->trace_flags &= ~mask;
5217 
5218 	if (mask == TRACE_ITER_RECORD_CMD)
5219 		trace_event_enable_cmd_record(enabled);
5220 
5221 	if (mask == TRACE_ITER_RECORD_TGID) {
5222 
5223 		if (trace_alloc_tgid_map() < 0) {
5224 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5225 			return -ENOMEM;
5226 		}
5227 
5228 		trace_event_enable_tgid_record(enabled);
5229 	}
5230 
5231 	if (mask == TRACE_ITER_EVENT_FORK)
5232 		trace_event_follow_fork(tr, enabled);
5233 
5234 	if (mask == TRACE_ITER_FUNC_FORK)
5235 		ftrace_pid_follow_fork(tr, enabled);
5236 
5237 	if (mask == TRACE_ITER_OVERWRITE) {
5238 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5239 #ifdef CONFIG_TRACER_MAX_TRACE
5240 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5241 #endif
5242 	}
5243 
5244 	if (mask == TRACE_ITER_PRINTK) {
5245 		trace_printk_start_stop_comm(enabled);
5246 		trace_printk_control(enabled);
5247 	}
5248 
5249 	return 0;
5250 }
5251 
5252 int trace_set_options(struct trace_array *tr, char *option)
5253 {
5254 	char *cmp;
5255 	int neg = 0;
5256 	int ret;
5257 	size_t orig_len = strlen(option);
5258 	int len;
5259 
5260 	cmp = strstrip(option);
5261 
5262 	len = str_has_prefix(cmp, "no");
5263 	if (len)
5264 		neg = 1;
5265 
5266 	cmp += len;
5267 
5268 	mutex_lock(&event_mutex);
5269 	mutex_lock(&trace_types_lock);
5270 
5271 	ret = match_string(trace_options, -1, cmp);
5272 	/* If no option could be set, test the specific tracer options */
5273 	if (ret < 0)
5274 		ret = set_tracer_option(tr, cmp, neg);
5275 	else
5276 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5277 
5278 	mutex_unlock(&trace_types_lock);
5279 	mutex_unlock(&event_mutex);
5280 
5281 	/*
5282 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5283 	 * turn it back into a space.
5284 	 */
5285 	if (orig_len > strlen(option))
5286 		option[strlen(option)] = ' ';
5287 
5288 	return ret;
5289 }
5290 
5291 static void __init apply_trace_boot_options(void)
5292 {
5293 	char *buf = trace_boot_options_buf;
5294 	char *option;
5295 
5296 	while (true) {
5297 		option = strsep(&buf, ",");
5298 
5299 		if (!option)
5300 			break;
5301 
5302 		if (*option)
5303 			trace_set_options(&global_trace, option);
5304 
5305 		/* Put back the comma to allow this to be called again */
5306 		if (buf)
5307 			*(buf - 1) = ',';
5308 	}
5309 }
5310 
5311 static ssize_t
5312 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5313 			size_t cnt, loff_t *ppos)
5314 {
5315 	struct seq_file *m = filp->private_data;
5316 	struct trace_array *tr = m->private;
5317 	char buf[64];
5318 	int ret;
5319 
5320 	if (cnt >= sizeof(buf))
5321 		return -EINVAL;
5322 
5323 	if (copy_from_user(buf, ubuf, cnt))
5324 		return -EFAULT;
5325 
5326 	buf[cnt] = 0;
5327 
5328 	ret = trace_set_options(tr, buf);
5329 	if (ret < 0)
5330 		return ret;
5331 
5332 	*ppos += cnt;
5333 
5334 	return cnt;
5335 }
5336 
5337 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5338 {
5339 	struct trace_array *tr = inode->i_private;
5340 	int ret;
5341 
5342 	ret = tracing_check_open_get_tr(tr);
5343 	if (ret)
5344 		return ret;
5345 
5346 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5347 	if (ret < 0)
5348 		trace_array_put(tr);
5349 
5350 	return ret;
5351 }
5352 
5353 static const struct file_operations tracing_iter_fops = {
5354 	.open		= tracing_trace_options_open,
5355 	.read		= seq_read,
5356 	.llseek		= seq_lseek,
5357 	.release	= tracing_single_release_tr,
5358 	.write		= tracing_trace_options_write,
5359 };
5360 
5361 static const char readme_msg[] =
5362 	"tracing mini-HOWTO:\n\n"
5363 	"By default tracefs removes all OTH file permission bits.\n"
5364 	"When mounting tracefs an optional group id can be specified\n"
5365 	"which adds the group to every directory and file in tracefs:\n\n"
5366 	"\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5367 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5368 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5369 	" Important files:\n"
5370 	"  trace\t\t\t- The static contents of the buffer\n"
5371 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5372 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5373 	"  current_tracer\t- function and latency tracers\n"
5374 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5375 	"  error_log\t- error log for failed commands (that support it)\n"
5376 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5377 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5378 	"  trace_clock\t\t- change the clock used to order events\n"
5379 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5380 	"      global:   Synced across CPUs but slows tracing down.\n"
5381 	"     counter:   Not a clock, but just an increment\n"
5382 	"      uptime:   Jiffy counter from time of boot\n"
5383 	"        perf:   Same clock that perf events use\n"
5384 #ifdef CONFIG_X86_64
5385 	"     x86-tsc:   TSC cycle counter\n"
5386 #endif
5387 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5388 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5389 	"    absolute:   Absolute (standalone) timestamp\n"
5390 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5391 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5392 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5393 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5394 	"\t\t\t  Remove sub-buffer with rmdir\n"
5395 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5396 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5397 	"\t\t\t  option name\n"
5398 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5399 #ifdef CONFIG_DYNAMIC_FTRACE
5400 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5401 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5402 	"\t\t\t  functions\n"
5403 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5404 	"\t     modules: Can select a group via module\n"
5405 	"\t      Format: :mod:<module-name>\n"
5406 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5407 	"\t    triggers: a command to perform when function is hit\n"
5408 	"\t      Format: <function>:<trigger>[:count]\n"
5409 	"\t     trigger: traceon, traceoff\n"
5410 	"\t\t      enable_event:<system>:<event>\n"
5411 	"\t\t      disable_event:<system>:<event>\n"
5412 #ifdef CONFIG_STACKTRACE
5413 	"\t\t      stacktrace\n"
5414 #endif
5415 #ifdef CONFIG_TRACER_SNAPSHOT
5416 	"\t\t      snapshot\n"
5417 #endif
5418 	"\t\t      dump\n"
5419 	"\t\t      cpudump\n"
5420 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5421 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5422 	"\t     The first one will disable tracing every time do_fault is hit\n"
5423 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5424 	"\t       The first time do trap is hit and it disables tracing, the\n"
5425 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5426 	"\t       the counter will not decrement. It only decrements when the\n"
5427 	"\t       trigger did work\n"
5428 	"\t     To remove trigger without count:\n"
5429 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5430 	"\t     To remove trigger with a count:\n"
5431 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5432 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5433 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5434 	"\t    modules: Can select a group via module command :mod:\n"
5435 	"\t    Does not accept triggers\n"
5436 #endif /* CONFIG_DYNAMIC_FTRACE */
5437 #ifdef CONFIG_FUNCTION_TRACER
5438 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5439 	"\t\t    (function)\n"
5440 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5441 	"\t\t    (function)\n"
5442 #endif
5443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5444 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5445 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5446 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5447 #endif
5448 #ifdef CONFIG_TRACER_SNAPSHOT
5449 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5450 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5451 	"\t\t\t  information\n"
5452 #endif
5453 #ifdef CONFIG_STACK_TRACER
5454 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5455 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5456 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5457 	"\t\t\t  new trace)\n"
5458 #ifdef CONFIG_DYNAMIC_FTRACE
5459 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5460 	"\t\t\t  traces\n"
5461 #endif
5462 #endif /* CONFIG_STACK_TRACER */
5463 #ifdef CONFIG_DYNAMIC_EVENTS
5464 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5465 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5466 #endif
5467 #ifdef CONFIG_KPROBE_EVENTS
5468 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5469 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5470 #endif
5471 #ifdef CONFIG_UPROBE_EVENTS
5472 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5473 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5474 #endif
5475 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5476     defined(CONFIG_FPROBE_EVENTS)
5477 	"\t  accepts: event-definitions (one definition per line)\n"
5478 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5479 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5480 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5481 #endif
5482 #ifdef CONFIG_FPROBE_EVENTS
5483 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5484 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5485 #endif
5486 #ifdef CONFIG_HIST_TRIGGERS
5487 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5488 #endif
5489 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5490 	"\t           -:[<group>/][<event>]\n"
5491 #ifdef CONFIG_KPROBE_EVENTS
5492 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5493   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5494 #endif
5495 #ifdef CONFIG_UPROBE_EVENTS
5496   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5497 #endif
5498 	"\t     args: <name>=fetcharg[:type]\n"
5499 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5500 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5501 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5502 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5503 	"\t           <argname>[->field[->field|.field...]],\n"
5504 #endif
5505 #else
5506 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5507 #endif
5508 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5509 	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5510 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5511 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5512 	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5513 #ifdef CONFIG_HIST_TRIGGERS
5514 	"\t    field: <stype> <name>;\n"
5515 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5516 	"\t           [unsigned] char/int/long\n"
5517 #endif
5518 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5519 	"\t            of the <attached-group>/<attached-event>.\n"
5520 #endif
5521 	"  set_event\t\t- Enables events by name written into it\n"
5522 	"\t\t\t  Can enable module events via: :mod:<module>\n"
5523 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5524 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5525 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5526 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5527 	"\t\t\t  events\n"
5528 	"      filter\t\t- If set, only events passing filter are traced\n"
5529 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5530 	"\t\t\t  <event>:\n"
5531 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5532 	"      filter\t\t- If set, only events passing filter are traced\n"
5533 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5534 	"\t    Format: <trigger>[:count][if <filter>]\n"
5535 	"\t   trigger: traceon, traceoff\n"
5536 	"\t            enable_event:<system>:<event>\n"
5537 	"\t            disable_event:<system>:<event>\n"
5538 #ifdef CONFIG_HIST_TRIGGERS
5539 	"\t            enable_hist:<system>:<event>\n"
5540 	"\t            disable_hist:<system>:<event>\n"
5541 #endif
5542 #ifdef CONFIG_STACKTRACE
5543 	"\t\t    stacktrace\n"
5544 #endif
5545 #ifdef CONFIG_TRACER_SNAPSHOT
5546 	"\t\t    snapshot\n"
5547 #endif
5548 #ifdef CONFIG_HIST_TRIGGERS
5549 	"\t\t    hist (see below)\n"
5550 #endif
5551 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5552 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5553 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5554 	"\t                  events/block/block_unplug/trigger\n"
5555 	"\t   The first disables tracing every time block_unplug is hit.\n"
5556 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5557 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5558 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5559 	"\t   Like function triggers, the counter is only decremented if it\n"
5560 	"\t    enabled or disabled tracing.\n"
5561 	"\t   To remove a trigger without a count:\n"
5562 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5563 	"\t   To remove a trigger with a count:\n"
5564 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5565 	"\t   Filters can be ignored when removing a trigger.\n"
5566 #ifdef CONFIG_HIST_TRIGGERS
5567 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5568 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5569 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5570 	"\t            [:values=<field1[,field2,...]>]\n"
5571 	"\t            [:sort=<field1[,field2,...]>]\n"
5572 	"\t            [:size=#entries]\n"
5573 	"\t            [:pause][:continue][:clear]\n"
5574 	"\t            [:name=histname1]\n"
5575 	"\t            [:nohitcount]\n"
5576 	"\t            [:<handler>.<action>]\n"
5577 	"\t            [if <filter>]\n\n"
5578 	"\t    Note, special fields can be used as well:\n"
5579 	"\t            common_timestamp - to record current timestamp\n"
5580 	"\t            common_cpu - to record the CPU the event happened on\n"
5581 	"\n"
5582 	"\t    A hist trigger variable can be:\n"
5583 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5584 	"\t        - a reference to another variable e.g. y=$x,\n"
5585 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5586 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5587 	"\n"
5588 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5589 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5590 	"\t    variable reference, field or numeric literal.\n"
5591 	"\n"
5592 	"\t    When a matching event is hit, an entry is added to a hash\n"
5593 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5594 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5595 	"\t    correspond to fields in the event's format description.  Keys\n"
5596 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5597 	"\t    Compound keys consisting of up to two fields can be specified\n"
5598 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5599 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5600 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5601 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5602 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5603 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5604 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5605 	"\t    its histogram data will be shared with other triggers of the\n"
5606 	"\t    same name, and trigger hits will update this common data.\n\n"
5607 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5608 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5609 	"\t    triggers attached to an event, there will be a table for each\n"
5610 	"\t    trigger in the output.  The table displayed for a named\n"
5611 	"\t    trigger will be the same as any other instance having the\n"
5612 	"\t    same name.  The default format used to display a given field\n"
5613 	"\t    can be modified by appending any of the following modifiers\n"
5614 	"\t    to the field name, as applicable:\n\n"
5615 	"\t            .hex        display a number as a hex value\n"
5616 	"\t            .sym        display an address as a symbol\n"
5617 	"\t            .sym-offset display an address as a symbol and offset\n"
5618 	"\t            .execname   display a common_pid as a program name\n"
5619 	"\t            .syscall    display a syscall id as a syscall name\n"
5620 	"\t            .log2       display log2 value rather than raw number\n"
5621 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5622 	"\t            .usecs      display a common_timestamp in microseconds\n"
5623 	"\t            .percent    display a number of percentage value\n"
5624 	"\t            .graph      display a bar-graph of a value\n\n"
5625 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5626 	"\t    trigger or to start a hist trigger but not log any events\n"
5627 	"\t    until told to do so.  'continue' can be used to start or\n"
5628 	"\t    restart a paused hist trigger.\n\n"
5629 	"\t    The 'clear' parameter will clear the contents of a running\n"
5630 	"\t    hist trigger and leave its current paused/active state\n"
5631 	"\t    unchanged.\n\n"
5632 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5633 	"\t    raw hitcount in the histogram.\n\n"
5634 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5635 	"\t    have one event conditionally start and stop another event's\n"
5636 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5637 	"\t    the enable_event and disable_event triggers.\n\n"
5638 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5639 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5640 	"\t        <handler>.<action>\n\n"
5641 	"\t    The available handlers are:\n\n"
5642 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5643 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5644 	"\t        onchange(var)            - invoke action if var changes\n\n"
5645 	"\t    The available actions are:\n\n"
5646 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5647 	"\t        save(field,...)                      - save current event fields\n"
5648 #ifdef CONFIG_TRACER_SNAPSHOT
5649 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5650 #endif
5651 #ifdef CONFIG_SYNTH_EVENTS
5652 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5653 	"\t  Write into this file to define/undefine new synthetic events.\n"
5654 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5655 #endif
5656 #endif
5657 ;
5658 
5659 static ssize_t
5660 tracing_readme_read(struct file *filp, char __user *ubuf,
5661 		       size_t cnt, loff_t *ppos)
5662 {
5663 	return simple_read_from_buffer(ubuf, cnt, ppos,
5664 					readme_msg, strlen(readme_msg));
5665 }
5666 
5667 static const struct file_operations tracing_readme_fops = {
5668 	.open		= tracing_open_generic,
5669 	.read		= tracing_readme_read,
5670 	.llseek		= generic_file_llseek,
5671 };
5672 
5673 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5674 static union trace_eval_map_item *
5675 update_eval_map(union trace_eval_map_item *ptr)
5676 {
5677 	if (!ptr->map.eval_string) {
5678 		if (ptr->tail.next) {
5679 			ptr = ptr->tail.next;
5680 			/* Set ptr to the next real item (skip head) */
5681 			ptr++;
5682 		} else
5683 			return NULL;
5684 	}
5685 	return ptr;
5686 }
5687 
5688 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5689 {
5690 	union trace_eval_map_item *ptr = v;
5691 
5692 	/*
5693 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5694 	 * This really should never happen.
5695 	 */
5696 	(*pos)++;
5697 	ptr = update_eval_map(ptr);
5698 	if (WARN_ON_ONCE(!ptr))
5699 		return NULL;
5700 
5701 	ptr++;
5702 	ptr = update_eval_map(ptr);
5703 
5704 	return ptr;
5705 }
5706 
5707 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5708 {
5709 	union trace_eval_map_item *v;
5710 	loff_t l = 0;
5711 
5712 	mutex_lock(&trace_eval_mutex);
5713 
5714 	v = trace_eval_maps;
5715 	if (v)
5716 		v++;
5717 
5718 	while (v && l < *pos) {
5719 		v = eval_map_next(m, v, &l);
5720 	}
5721 
5722 	return v;
5723 }
5724 
5725 static void eval_map_stop(struct seq_file *m, void *v)
5726 {
5727 	mutex_unlock(&trace_eval_mutex);
5728 }
5729 
5730 static int eval_map_show(struct seq_file *m, void *v)
5731 {
5732 	union trace_eval_map_item *ptr = v;
5733 
5734 	seq_printf(m, "%s %ld (%s)\n",
5735 		   ptr->map.eval_string, ptr->map.eval_value,
5736 		   ptr->map.system);
5737 
5738 	return 0;
5739 }
5740 
5741 static const struct seq_operations tracing_eval_map_seq_ops = {
5742 	.start		= eval_map_start,
5743 	.next		= eval_map_next,
5744 	.stop		= eval_map_stop,
5745 	.show		= eval_map_show,
5746 };
5747 
5748 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5749 {
5750 	int ret;
5751 
5752 	ret = tracing_check_open_get_tr(NULL);
5753 	if (ret)
5754 		return ret;
5755 
5756 	return seq_open(filp, &tracing_eval_map_seq_ops);
5757 }
5758 
5759 static const struct file_operations tracing_eval_map_fops = {
5760 	.open		= tracing_eval_map_open,
5761 	.read		= seq_read,
5762 	.llseek		= seq_lseek,
5763 	.release	= seq_release,
5764 };
5765 
5766 static inline union trace_eval_map_item *
5767 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5768 {
5769 	/* Return tail of array given the head */
5770 	return ptr + ptr->head.length + 1;
5771 }
5772 
5773 static void
5774 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5775 			   int len)
5776 {
5777 	struct trace_eval_map **stop;
5778 	struct trace_eval_map **map;
5779 	union trace_eval_map_item *map_array;
5780 	union trace_eval_map_item *ptr;
5781 
5782 	stop = start + len;
5783 
5784 	/*
5785 	 * The trace_eval_maps contains the map plus a head and tail item,
5786 	 * where the head holds the module and length of array, and the
5787 	 * tail holds a pointer to the next list.
5788 	 */
5789 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5790 	if (!map_array) {
5791 		pr_warn("Unable to allocate trace eval mapping\n");
5792 		return;
5793 	}
5794 
5795 	guard(mutex)(&trace_eval_mutex);
5796 
5797 	if (!trace_eval_maps)
5798 		trace_eval_maps = map_array;
5799 	else {
5800 		ptr = trace_eval_maps;
5801 		for (;;) {
5802 			ptr = trace_eval_jmp_to_tail(ptr);
5803 			if (!ptr->tail.next)
5804 				break;
5805 			ptr = ptr->tail.next;
5806 
5807 		}
5808 		ptr->tail.next = map_array;
5809 	}
5810 	map_array->head.mod = mod;
5811 	map_array->head.length = len;
5812 	map_array++;
5813 
5814 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5815 		map_array->map = **map;
5816 		map_array++;
5817 	}
5818 	memset(map_array, 0, sizeof(*map_array));
5819 }
5820 
5821 static void trace_create_eval_file(struct dentry *d_tracer)
5822 {
5823 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5824 			  NULL, &tracing_eval_map_fops);
5825 }
5826 
5827 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5828 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5829 static inline void trace_insert_eval_map_file(struct module *mod,
5830 			      struct trace_eval_map **start, int len) { }
5831 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5832 
5833 static void trace_insert_eval_map(struct module *mod,
5834 				  struct trace_eval_map **start, int len)
5835 {
5836 	struct trace_eval_map **map;
5837 
5838 	if (len <= 0)
5839 		return;
5840 
5841 	map = start;
5842 
5843 	trace_event_eval_update(map, len);
5844 
5845 	trace_insert_eval_map_file(mod, start, len);
5846 }
5847 
5848 static ssize_t
5849 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5850 		       size_t cnt, loff_t *ppos)
5851 {
5852 	struct trace_array *tr = filp->private_data;
5853 	char buf[MAX_TRACER_SIZE+2];
5854 	int r;
5855 
5856 	mutex_lock(&trace_types_lock);
5857 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5858 	mutex_unlock(&trace_types_lock);
5859 
5860 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5861 }
5862 
5863 int tracer_init(struct tracer *t, struct trace_array *tr)
5864 {
5865 	tracing_reset_online_cpus(&tr->array_buffer);
5866 	return t->init(tr);
5867 }
5868 
5869 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5870 {
5871 	int cpu;
5872 
5873 	for_each_tracing_cpu(cpu)
5874 		per_cpu_ptr(buf->data, cpu)->entries = val;
5875 }
5876 
5877 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5878 {
5879 	if (cpu == RING_BUFFER_ALL_CPUS) {
5880 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5881 	} else {
5882 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5883 	}
5884 }
5885 
5886 #ifdef CONFIG_TRACER_MAX_TRACE
5887 /* resize @tr's buffer to the size of @size_tr's entries */
5888 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5889 					struct array_buffer *size_buf, int cpu_id)
5890 {
5891 	int cpu, ret = 0;
5892 
5893 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5894 		for_each_tracing_cpu(cpu) {
5895 			ret = ring_buffer_resize(trace_buf->buffer,
5896 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5897 			if (ret < 0)
5898 				break;
5899 			per_cpu_ptr(trace_buf->data, cpu)->entries =
5900 				per_cpu_ptr(size_buf->data, cpu)->entries;
5901 		}
5902 	} else {
5903 		ret = ring_buffer_resize(trace_buf->buffer,
5904 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5905 		if (ret == 0)
5906 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5907 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5908 	}
5909 
5910 	return ret;
5911 }
5912 #endif /* CONFIG_TRACER_MAX_TRACE */
5913 
5914 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5915 					unsigned long size, int cpu)
5916 {
5917 	int ret;
5918 
5919 	/*
5920 	 * If kernel or user changes the size of the ring buffer
5921 	 * we use the size that was given, and we can forget about
5922 	 * expanding it later.
5923 	 */
5924 	trace_set_ring_buffer_expanded(tr);
5925 
5926 	/* May be called before buffers are initialized */
5927 	if (!tr->array_buffer.buffer)
5928 		return 0;
5929 
5930 	/* Do not allow tracing while resizing ring buffer */
5931 	tracing_stop_tr(tr);
5932 
5933 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5934 	if (ret < 0)
5935 		goto out_start;
5936 
5937 #ifdef CONFIG_TRACER_MAX_TRACE
5938 	if (!tr->allocated_snapshot)
5939 		goto out;
5940 
5941 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5942 	if (ret < 0) {
5943 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
5944 						     &tr->array_buffer, cpu);
5945 		if (r < 0) {
5946 			/*
5947 			 * AARGH! We are left with different
5948 			 * size max buffer!!!!
5949 			 * The max buffer is our "snapshot" buffer.
5950 			 * When a tracer needs a snapshot (one of the
5951 			 * latency tracers), it swaps the max buffer
5952 			 * with the saved snap shot. We succeeded to
5953 			 * update the size of the main buffer, but failed to
5954 			 * update the size of the max buffer. But when we tried
5955 			 * to reset the main buffer to the original size, we
5956 			 * failed there too. This is very unlikely to
5957 			 * happen, but if it does, warn and kill all
5958 			 * tracing.
5959 			 */
5960 			WARN_ON(1);
5961 			tracing_disabled = 1;
5962 		}
5963 		goto out_start;
5964 	}
5965 
5966 	update_buffer_entries(&tr->max_buffer, cpu);
5967 
5968  out:
5969 #endif /* CONFIG_TRACER_MAX_TRACE */
5970 
5971 	update_buffer_entries(&tr->array_buffer, cpu);
5972  out_start:
5973 	tracing_start_tr(tr);
5974 	return ret;
5975 }
5976 
5977 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5978 				  unsigned long size, int cpu_id)
5979 {
5980 	guard(mutex)(&trace_types_lock);
5981 
5982 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5983 		/* make sure, this cpu is enabled in the mask */
5984 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
5985 			return -EINVAL;
5986 	}
5987 
5988 	return __tracing_resize_ring_buffer(tr, size, cpu_id);
5989 }
5990 
5991 struct trace_mod_entry {
5992 	unsigned long	mod_addr;
5993 	char		mod_name[MODULE_NAME_LEN];
5994 };
5995 
5996 struct trace_scratch {
5997 	unsigned long		kaslr_addr;
5998 	unsigned long		nr_entries;
5999 	struct trace_mod_entry	entries[];
6000 };
6001 
6002 static DEFINE_MUTEX(scratch_mutex);
6003 
6004 #ifdef CONFIG_MODULES
6005 static int save_mod(struct module *mod, void *data)
6006 {
6007 	struct trace_array *tr = data;
6008 	struct trace_scratch *tscratch;
6009 	struct trace_mod_entry *entry;
6010 	unsigned int size;
6011 
6012 	tscratch = tr->scratch;
6013 	if (!tscratch)
6014 		return -1;
6015 	size = tr->scratch_size;
6016 
6017 	if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
6018 		return -1;
6019 
6020 	entry = &tscratch->entries[tscratch->nr_entries];
6021 
6022 	tscratch->nr_entries++;
6023 
6024 	entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
6025 	strscpy(entry->mod_name, mod->name);
6026 
6027 	return 0;
6028 }
6029 #else
6030 static int save_mod(struct module *mod, void *data)
6031 {
6032 	return 0;
6033 }
6034 #endif
6035 
6036 static void update_last_data(struct trace_array *tr)
6037 {
6038 	struct trace_scratch *tscratch;
6039 
6040 	if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
6041 		return;
6042 
6043 	if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6044 		return;
6045 
6046 	/* Only if the buffer has previous boot data clear and update it. */
6047 	tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
6048 
6049 	/* Reset the module list and reload them */
6050 	if (tr->scratch) {
6051 		struct trace_scratch *tscratch = tr->scratch;
6052 
6053 		memset(tscratch->entries, 0,
6054 		       flex_array_size(tscratch, entries, tscratch->nr_entries));
6055 		tscratch->nr_entries = 0;
6056 
6057 		guard(mutex)(&scratch_mutex);
6058 		module_for_each_mod(save_mod, tr);
6059 	}
6060 
6061 	/*
6062 	 * Need to clear all CPU buffers as there cannot be events
6063 	 * from the previous boot mixed with events with this boot
6064 	 * as that will cause a confusing trace. Need to clear all
6065 	 * CPU buffers, even for those that may currently be offline.
6066 	 */
6067 	tracing_reset_all_cpus(&tr->array_buffer);
6068 
6069 	/* Using current data now */
6070 	tr->text_delta = 0;
6071 
6072 	if (!tr->scratch)
6073 		return;
6074 
6075 	tscratch = tr->scratch;
6076 
6077 	/* Set the persistent ring buffer meta data to this address */
6078 #ifdef CONFIG_RANDOMIZE_BASE
6079 	tscratch->kaslr_addr = kaslr_offset();
6080 #else
6081 	tscratch->kaslr_addr = 0;
6082 #endif
6083 }
6084 
6085 /**
6086  * tracing_update_buffers - used by tracing facility to expand ring buffers
6087  * @tr: The tracing instance
6088  *
6089  * To save on memory when the tracing is never used on a system with it
6090  * configured in. The ring buffers are set to a minimum size. But once
6091  * a user starts to use the tracing facility, then they need to grow
6092  * to their default size.
6093  *
6094  * This function is to be called when a tracer is about to be used.
6095  */
6096 int tracing_update_buffers(struct trace_array *tr)
6097 {
6098 	int ret = 0;
6099 
6100 	mutex_lock(&trace_types_lock);
6101 
6102 	update_last_data(tr);
6103 
6104 	if (!tr->ring_buffer_expanded)
6105 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6106 						RING_BUFFER_ALL_CPUS);
6107 	mutex_unlock(&trace_types_lock);
6108 
6109 	return ret;
6110 }
6111 
6112 struct trace_option_dentry;
6113 
6114 static void
6115 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6116 
6117 /*
6118  * Used to clear out the tracer before deletion of an instance.
6119  * Must have trace_types_lock held.
6120  */
6121 static void tracing_set_nop(struct trace_array *tr)
6122 {
6123 	if (tr->current_trace == &nop_trace)
6124 		return;
6125 
6126 	tr->current_trace->enabled--;
6127 
6128 	if (tr->current_trace->reset)
6129 		tr->current_trace->reset(tr);
6130 
6131 	tr->current_trace = &nop_trace;
6132 }
6133 
6134 static bool tracer_options_updated;
6135 
6136 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6137 {
6138 	/* Only enable if the directory has been created already. */
6139 	if (!tr->dir)
6140 		return;
6141 
6142 	/* Only create trace option files after update_tracer_options finish */
6143 	if (!tracer_options_updated)
6144 		return;
6145 
6146 	create_trace_option_files(tr, t);
6147 }
6148 
6149 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6150 {
6151 	struct tracer *t;
6152 #ifdef CONFIG_TRACER_MAX_TRACE
6153 	bool had_max_tr;
6154 #endif
6155 	int ret;
6156 
6157 	guard(mutex)(&trace_types_lock);
6158 
6159 	update_last_data(tr);
6160 
6161 	if (!tr->ring_buffer_expanded) {
6162 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6163 						RING_BUFFER_ALL_CPUS);
6164 		if (ret < 0)
6165 			return ret;
6166 		ret = 0;
6167 	}
6168 
6169 	for (t = trace_types; t; t = t->next) {
6170 		if (strcmp(t->name, buf) == 0)
6171 			break;
6172 	}
6173 	if (!t)
6174 		return -EINVAL;
6175 
6176 	if (t == tr->current_trace)
6177 		return 0;
6178 
6179 #ifdef CONFIG_TRACER_SNAPSHOT
6180 	if (t->use_max_tr) {
6181 		local_irq_disable();
6182 		arch_spin_lock(&tr->max_lock);
6183 		ret = tr->cond_snapshot ? -EBUSY : 0;
6184 		arch_spin_unlock(&tr->max_lock);
6185 		local_irq_enable();
6186 		if (ret)
6187 			return ret;
6188 	}
6189 #endif
6190 	/* Some tracers won't work on kernel command line */
6191 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6192 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6193 			t->name);
6194 		return -EINVAL;
6195 	}
6196 
6197 	/* Some tracers are only allowed for the top level buffer */
6198 	if (!trace_ok_for_array(t, tr))
6199 		return -EINVAL;
6200 
6201 	/* If trace pipe files are being read, we can't change the tracer */
6202 	if (tr->trace_ref)
6203 		return -EBUSY;
6204 
6205 	trace_branch_disable();
6206 
6207 	tr->current_trace->enabled--;
6208 
6209 	if (tr->current_trace->reset)
6210 		tr->current_trace->reset(tr);
6211 
6212 #ifdef CONFIG_TRACER_MAX_TRACE
6213 	had_max_tr = tr->current_trace->use_max_tr;
6214 
6215 	/* Current trace needs to be nop_trace before synchronize_rcu */
6216 	tr->current_trace = &nop_trace;
6217 
6218 	if (had_max_tr && !t->use_max_tr) {
6219 		/*
6220 		 * We need to make sure that the update_max_tr sees that
6221 		 * current_trace changed to nop_trace to keep it from
6222 		 * swapping the buffers after we resize it.
6223 		 * The update_max_tr is called from interrupts disabled
6224 		 * so a synchronized_sched() is sufficient.
6225 		 */
6226 		synchronize_rcu();
6227 		free_snapshot(tr);
6228 		tracing_disarm_snapshot(tr);
6229 	}
6230 
6231 	if (!had_max_tr && t->use_max_tr) {
6232 		ret = tracing_arm_snapshot_locked(tr);
6233 		if (ret)
6234 			return ret;
6235 	}
6236 #else
6237 	tr->current_trace = &nop_trace;
6238 #endif
6239 
6240 	if (t->init) {
6241 		ret = tracer_init(t, tr);
6242 		if (ret) {
6243 #ifdef CONFIG_TRACER_MAX_TRACE
6244 			if (t->use_max_tr)
6245 				tracing_disarm_snapshot(tr);
6246 #endif
6247 			return ret;
6248 		}
6249 	}
6250 
6251 	tr->current_trace = t;
6252 	tr->current_trace->enabled++;
6253 	trace_branch_enable(tr);
6254 
6255 	return 0;
6256 }
6257 
6258 static ssize_t
6259 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6260 			size_t cnt, loff_t *ppos)
6261 {
6262 	struct trace_array *tr = filp->private_data;
6263 	char buf[MAX_TRACER_SIZE+1];
6264 	char *name;
6265 	size_t ret;
6266 	int err;
6267 
6268 	ret = cnt;
6269 
6270 	if (cnt > MAX_TRACER_SIZE)
6271 		cnt = MAX_TRACER_SIZE;
6272 
6273 	if (copy_from_user(buf, ubuf, cnt))
6274 		return -EFAULT;
6275 
6276 	buf[cnt] = 0;
6277 
6278 	name = strim(buf);
6279 
6280 	err = tracing_set_tracer(tr, name);
6281 	if (err)
6282 		return err;
6283 
6284 	*ppos += ret;
6285 
6286 	return ret;
6287 }
6288 
6289 static ssize_t
6290 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6291 		   size_t cnt, loff_t *ppos)
6292 {
6293 	char buf[64];
6294 	int r;
6295 
6296 	r = snprintf(buf, sizeof(buf), "%ld\n",
6297 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6298 	if (r > sizeof(buf))
6299 		r = sizeof(buf);
6300 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6301 }
6302 
6303 static ssize_t
6304 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6305 		    size_t cnt, loff_t *ppos)
6306 {
6307 	unsigned long val;
6308 	int ret;
6309 
6310 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6311 	if (ret)
6312 		return ret;
6313 
6314 	*ptr = val * 1000;
6315 
6316 	return cnt;
6317 }
6318 
6319 static ssize_t
6320 tracing_thresh_read(struct file *filp, char __user *ubuf,
6321 		    size_t cnt, loff_t *ppos)
6322 {
6323 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6324 }
6325 
6326 static ssize_t
6327 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6328 		     size_t cnt, loff_t *ppos)
6329 {
6330 	struct trace_array *tr = filp->private_data;
6331 	int ret;
6332 
6333 	guard(mutex)(&trace_types_lock);
6334 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6335 	if (ret < 0)
6336 		return ret;
6337 
6338 	if (tr->current_trace->update_thresh) {
6339 		ret = tr->current_trace->update_thresh(tr);
6340 		if (ret < 0)
6341 			return ret;
6342 	}
6343 
6344 	return cnt;
6345 }
6346 
6347 #ifdef CONFIG_TRACER_MAX_TRACE
6348 
6349 static ssize_t
6350 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6351 		     size_t cnt, loff_t *ppos)
6352 {
6353 	struct trace_array *tr = filp->private_data;
6354 
6355 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6356 }
6357 
6358 static ssize_t
6359 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6360 		      size_t cnt, loff_t *ppos)
6361 {
6362 	struct trace_array *tr = filp->private_data;
6363 
6364 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6365 }
6366 
6367 #endif
6368 
6369 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6370 {
6371 	if (cpu == RING_BUFFER_ALL_CPUS) {
6372 		if (cpumask_empty(tr->pipe_cpumask)) {
6373 			cpumask_setall(tr->pipe_cpumask);
6374 			return 0;
6375 		}
6376 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6377 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6378 		return 0;
6379 	}
6380 	return -EBUSY;
6381 }
6382 
6383 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6384 {
6385 	if (cpu == RING_BUFFER_ALL_CPUS) {
6386 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6387 		cpumask_clear(tr->pipe_cpumask);
6388 	} else {
6389 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6390 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6391 	}
6392 }
6393 
6394 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6395 {
6396 	struct trace_array *tr = inode->i_private;
6397 	struct trace_iterator *iter;
6398 	int cpu;
6399 	int ret;
6400 
6401 	ret = tracing_check_open_get_tr(tr);
6402 	if (ret)
6403 		return ret;
6404 
6405 	mutex_lock(&trace_types_lock);
6406 	cpu = tracing_get_cpu(inode);
6407 	ret = open_pipe_on_cpu(tr, cpu);
6408 	if (ret)
6409 		goto fail_pipe_on_cpu;
6410 
6411 	/* create a buffer to store the information to pass to userspace */
6412 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6413 	if (!iter) {
6414 		ret = -ENOMEM;
6415 		goto fail_alloc_iter;
6416 	}
6417 
6418 	trace_seq_init(&iter->seq);
6419 	iter->trace = tr->current_trace;
6420 
6421 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6422 		ret = -ENOMEM;
6423 		goto fail;
6424 	}
6425 
6426 	/* trace pipe does not show start of buffer */
6427 	cpumask_setall(iter->started);
6428 
6429 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6430 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6431 
6432 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6433 	if (trace_clocks[tr->clock_id].in_ns)
6434 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6435 
6436 	iter->tr = tr;
6437 	iter->array_buffer = &tr->array_buffer;
6438 	iter->cpu_file = cpu;
6439 	mutex_init(&iter->mutex);
6440 	filp->private_data = iter;
6441 
6442 	if (iter->trace->pipe_open)
6443 		iter->trace->pipe_open(iter);
6444 
6445 	nonseekable_open(inode, filp);
6446 
6447 	tr->trace_ref++;
6448 
6449 	mutex_unlock(&trace_types_lock);
6450 	return ret;
6451 
6452 fail:
6453 	kfree(iter);
6454 fail_alloc_iter:
6455 	close_pipe_on_cpu(tr, cpu);
6456 fail_pipe_on_cpu:
6457 	__trace_array_put(tr);
6458 	mutex_unlock(&trace_types_lock);
6459 	return ret;
6460 }
6461 
6462 static int tracing_release_pipe(struct inode *inode, struct file *file)
6463 {
6464 	struct trace_iterator *iter = file->private_data;
6465 	struct trace_array *tr = inode->i_private;
6466 
6467 	mutex_lock(&trace_types_lock);
6468 
6469 	tr->trace_ref--;
6470 
6471 	if (iter->trace->pipe_close)
6472 		iter->trace->pipe_close(iter);
6473 	close_pipe_on_cpu(tr, iter->cpu_file);
6474 	mutex_unlock(&trace_types_lock);
6475 
6476 	free_trace_iter_content(iter);
6477 	kfree(iter);
6478 
6479 	trace_array_put(tr);
6480 
6481 	return 0;
6482 }
6483 
6484 static __poll_t
6485 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6486 {
6487 	struct trace_array *tr = iter->tr;
6488 
6489 	/* Iterators are static, they should be filled or empty */
6490 	if (trace_buffer_iter(iter, iter->cpu_file))
6491 		return EPOLLIN | EPOLLRDNORM;
6492 
6493 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6494 		/*
6495 		 * Always select as readable when in blocking mode
6496 		 */
6497 		return EPOLLIN | EPOLLRDNORM;
6498 	else
6499 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6500 					     filp, poll_table, iter->tr->buffer_percent);
6501 }
6502 
6503 static __poll_t
6504 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6505 {
6506 	struct trace_iterator *iter = filp->private_data;
6507 
6508 	return trace_poll(iter, filp, poll_table);
6509 }
6510 
6511 /* Must be called with iter->mutex held. */
6512 static int tracing_wait_pipe(struct file *filp)
6513 {
6514 	struct trace_iterator *iter = filp->private_data;
6515 	int ret;
6516 
6517 	while (trace_empty(iter)) {
6518 
6519 		if ((filp->f_flags & O_NONBLOCK)) {
6520 			return -EAGAIN;
6521 		}
6522 
6523 		/*
6524 		 * We block until we read something and tracing is disabled.
6525 		 * We still block if tracing is disabled, but we have never
6526 		 * read anything. This allows a user to cat this file, and
6527 		 * then enable tracing. But after we have read something,
6528 		 * we give an EOF when tracing is again disabled.
6529 		 *
6530 		 * iter->pos will be 0 if we haven't read anything.
6531 		 */
6532 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6533 			break;
6534 
6535 		mutex_unlock(&iter->mutex);
6536 
6537 		ret = wait_on_pipe(iter, 0);
6538 
6539 		mutex_lock(&iter->mutex);
6540 
6541 		if (ret)
6542 			return ret;
6543 	}
6544 
6545 	return 1;
6546 }
6547 
6548 /*
6549  * Consumer reader.
6550  */
6551 static ssize_t
6552 tracing_read_pipe(struct file *filp, char __user *ubuf,
6553 		  size_t cnt, loff_t *ppos)
6554 {
6555 	struct trace_iterator *iter = filp->private_data;
6556 	ssize_t sret;
6557 
6558 	/*
6559 	 * Avoid more than one consumer on a single file descriptor
6560 	 * This is just a matter of traces coherency, the ring buffer itself
6561 	 * is protected.
6562 	 */
6563 	guard(mutex)(&iter->mutex);
6564 
6565 	/* return any leftover data */
6566 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6567 	if (sret != -EBUSY)
6568 		return sret;
6569 
6570 	trace_seq_init(&iter->seq);
6571 
6572 	if (iter->trace->read) {
6573 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6574 		if (sret)
6575 			return sret;
6576 	}
6577 
6578 waitagain:
6579 	sret = tracing_wait_pipe(filp);
6580 	if (sret <= 0)
6581 		return sret;
6582 
6583 	/* stop when tracing is finished */
6584 	if (trace_empty(iter))
6585 		return 0;
6586 
6587 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6588 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6589 
6590 	/* reset all but tr, trace, and overruns */
6591 	trace_iterator_reset(iter);
6592 	cpumask_clear(iter->started);
6593 	trace_seq_init(&iter->seq);
6594 
6595 	trace_event_read_lock();
6596 	trace_access_lock(iter->cpu_file);
6597 	while (trace_find_next_entry_inc(iter) != NULL) {
6598 		enum print_line_t ret;
6599 		int save_len = iter->seq.seq.len;
6600 
6601 		ret = print_trace_line(iter);
6602 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6603 			/*
6604 			 * If one print_trace_line() fills entire trace_seq in one shot,
6605 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6606 			 * In this case, we need to consume it, otherwise, loop will peek
6607 			 * this event next time, resulting in an infinite loop.
6608 			 */
6609 			if (save_len == 0) {
6610 				iter->seq.full = 0;
6611 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6612 				trace_consume(iter);
6613 				break;
6614 			}
6615 
6616 			/* In other cases, don't print partial lines */
6617 			iter->seq.seq.len = save_len;
6618 			break;
6619 		}
6620 		if (ret != TRACE_TYPE_NO_CONSUME)
6621 			trace_consume(iter);
6622 
6623 		if (trace_seq_used(&iter->seq) >= cnt)
6624 			break;
6625 
6626 		/*
6627 		 * Setting the full flag means we reached the trace_seq buffer
6628 		 * size and we should leave by partial output condition above.
6629 		 * One of the trace_seq_* functions is not used properly.
6630 		 */
6631 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6632 			  iter->ent->type);
6633 	}
6634 	trace_access_unlock(iter->cpu_file);
6635 	trace_event_read_unlock();
6636 
6637 	/* Now copy what we have to the user */
6638 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6639 	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6640 		trace_seq_init(&iter->seq);
6641 
6642 	/*
6643 	 * If there was nothing to send to user, in spite of consuming trace
6644 	 * entries, go back to wait for more entries.
6645 	 */
6646 	if (sret == -EBUSY)
6647 		goto waitagain;
6648 
6649 	return sret;
6650 }
6651 
6652 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6653 				     unsigned int idx)
6654 {
6655 	__free_page(spd->pages[idx]);
6656 }
6657 
6658 static size_t
6659 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6660 {
6661 	size_t count;
6662 	int save_len;
6663 	int ret;
6664 
6665 	/* Seq buffer is page-sized, exactly what we need. */
6666 	for (;;) {
6667 		save_len = iter->seq.seq.len;
6668 		ret = print_trace_line(iter);
6669 
6670 		if (trace_seq_has_overflowed(&iter->seq)) {
6671 			iter->seq.seq.len = save_len;
6672 			break;
6673 		}
6674 
6675 		/*
6676 		 * This should not be hit, because it should only
6677 		 * be set if the iter->seq overflowed. But check it
6678 		 * anyway to be safe.
6679 		 */
6680 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6681 			iter->seq.seq.len = save_len;
6682 			break;
6683 		}
6684 
6685 		count = trace_seq_used(&iter->seq) - save_len;
6686 		if (rem < count) {
6687 			rem = 0;
6688 			iter->seq.seq.len = save_len;
6689 			break;
6690 		}
6691 
6692 		if (ret != TRACE_TYPE_NO_CONSUME)
6693 			trace_consume(iter);
6694 		rem -= count;
6695 		if (!trace_find_next_entry_inc(iter))	{
6696 			rem = 0;
6697 			iter->ent = NULL;
6698 			break;
6699 		}
6700 	}
6701 
6702 	return rem;
6703 }
6704 
6705 static ssize_t tracing_splice_read_pipe(struct file *filp,
6706 					loff_t *ppos,
6707 					struct pipe_inode_info *pipe,
6708 					size_t len,
6709 					unsigned int flags)
6710 {
6711 	struct page *pages_def[PIPE_DEF_BUFFERS];
6712 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6713 	struct trace_iterator *iter = filp->private_data;
6714 	struct splice_pipe_desc spd = {
6715 		.pages		= pages_def,
6716 		.partial	= partial_def,
6717 		.nr_pages	= 0, /* This gets updated below. */
6718 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6719 		.ops		= &default_pipe_buf_ops,
6720 		.spd_release	= tracing_spd_release_pipe,
6721 	};
6722 	ssize_t ret;
6723 	size_t rem;
6724 	unsigned int i;
6725 
6726 	if (splice_grow_spd(pipe, &spd))
6727 		return -ENOMEM;
6728 
6729 	mutex_lock(&iter->mutex);
6730 
6731 	if (iter->trace->splice_read) {
6732 		ret = iter->trace->splice_read(iter, filp,
6733 					       ppos, pipe, len, flags);
6734 		if (ret)
6735 			goto out_err;
6736 	}
6737 
6738 	ret = tracing_wait_pipe(filp);
6739 	if (ret <= 0)
6740 		goto out_err;
6741 
6742 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6743 		ret = -EFAULT;
6744 		goto out_err;
6745 	}
6746 
6747 	trace_event_read_lock();
6748 	trace_access_lock(iter->cpu_file);
6749 
6750 	/* Fill as many pages as possible. */
6751 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6752 		spd.pages[i] = alloc_page(GFP_KERNEL);
6753 		if (!spd.pages[i])
6754 			break;
6755 
6756 		rem = tracing_fill_pipe_page(rem, iter);
6757 
6758 		/* Copy the data into the page, so we can start over. */
6759 		ret = trace_seq_to_buffer(&iter->seq,
6760 					  page_address(spd.pages[i]),
6761 					  trace_seq_used(&iter->seq));
6762 		if (ret < 0) {
6763 			__free_page(spd.pages[i]);
6764 			break;
6765 		}
6766 		spd.partial[i].offset = 0;
6767 		spd.partial[i].len = trace_seq_used(&iter->seq);
6768 
6769 		trace_seq_init(&iter->seq);
6770 	}
6771 
6772 	trace_access_unlock(iter->cpu_file);
6773 	trace_event_read_unlock();
6774 	mutex_unlock(&iter->mutex);
6775 
6776 	spd.nr_pages = i;
6777 
6778 	if (i)
6779 		ret = splice_to_pipe(pipe, &spd);
6780 	else
6781 		ret = 0;
6782 out:
6783 	splice_shrink_spd(&spd);
6784 	return ret;
6785 
6786 out_err:
6787 	mutex_unlock(&iter->mutex);
6788 	goto out;
6789 }
6790 
6791 static ssize_t
6792 tracing_entries_read(struct file *filp, char __user *ubuf,
6793 		     size_t cnt, loff_t *ppos)
6794 {
6795 	struct inode *inode = file_inode(filp);
6796 	struct trace_array *tr = inode->i_private;
6797 	int cpu = tracing_get_cpu(inode);
6798 	char buf[64];
6799 	int r = 0;
6800 	ssize_t ret;
6801 
6802 	mutex_lock(&trace_types_lock);
6803 
6804 	if (cpu == RING_BUFFER_ALL_CPUS) {
6805 		int cpu, buf_size_same;
6806 		unsigned long size;
6807 
6808 		size = 0;
6809 		buf_size_same = 1;
6810 		/* check if all cpu sizes are same */
6811 		for_each_tracing_cpu(cpu) {
6812 			/* fill in the size from first enabled cpu */
6813 			if (size == 0)
6814 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6815 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6816 				buf_size_same = 0;
6817 				break;
6818 			}
6819 		}
6820 
6821 		if (buf_size_same) {
6822 			if (!tr->ring_buffer_expanded)
6823 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6824 					    size >> 10,
6825 					    trace_buf_size >> 10);
6826 			else
6827 				r = sprintf(buf, "%lu\n", size >> 10);
6828 		} else
6829 			r = sprintf(buf, "X\n");
6830 	} else
6831 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6832 
6833 	mutex_unlock(&trace_types_lock);
6834 
6835 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6836 	return ret;
6837 }
6838 
6839 static ssize_t
6840 tracing_entries_write(struct file *filp, const char __user *ubuf,
6841 		      size_t cnt, loff_t *ppos)
6842 {
6843 	struct inode *inode = file_inode(filp);
6844 	struct trace_array *tr = inode->i_private;
6845 	unsigned long val;
6846 	int ret;
6847 
6848 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6849 	if (ret)
6850 		return ret;
6851 
6852 	/* must have at least 1 entry */
6853 	if (!val)
6854 		return -EINVAL;
6855 
6856 	/* value is in KB */
6857 	val <<= 10;
6858 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6859 	if (ret < 0)
6860 		return ret;
6861 
6862 	*ppos += cnt;
6863 
6864 	return cnt;
6865 }
6866 
6867 static ssize_t
6868 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6869 				size_t cnt, loff_t *ppos)
6870 {
6871 	struct trace_array *tr = filp->private_data;
6872 	char buf[64];
6873 	int r, cpu;
6874 	unsigned long size = 0, expanded_size = 0;
6875 
6876 	mutex_lock(&trace_types_lock);
6877 	for_each_tracing_cpu(cpu) {
6878 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6879 		if (!tr->ring_buffer_expanded)
6880 			expanded_size += trace_buf_size >> 10;
6881 	}
6882 	if (tr->ring_buffer_expanded)
6883 		r = sprintf(buf, "%lu\n", size);
6884 	else
6885 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6886 	mutex_unlock(&trace_types_lock);
6887 
6888 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6889 }
6890 
6891 #define LAST_BOOT_HEADER ((void *)1)
6892 
6893 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
6894 {
6895 	struct trace_array *tr = m->private;
6896 	struct trace_scratch *tscratch = tr->scratch;
6897 	unsigned int index = *pos;
6898 
6899 	(*pos)++;
6900 
6901 	if (*pos == 1)
6902 		return LAST_BOOT_HEADER;
6903 
6904 	/* Only show offsets of the last boot data */
6905 	if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6906 		return NULL;
6907 
6908 	/* *pos 0 is for the header, 1 is for the first module */
6909 	index--;
6910 
6911 	if (index >= tscratch->nr_entries)
6912 		return NULL;
6913 
6914 	return &tscratch->entries[index];
6915 }
6916 
6917 static void *l_start(struct seq_file *m, loff_t *pos)
6918 {
6919 	mutex_lock(&scratch_mutex);
6920 
6921 	return l_next(m, NULL, pos);
6922 }
6923 
6924 static void l_stop(struct seq_file *m, void *p)
6925 {
6926 	mutex_unlock(&scratch_mutex);
6927 }
6928 
6929 static void show_last_boot_header(struct seq_file *m, struct trace_array *tr)
6930 {
6931 	struct trace_scratch *tscratch = tr->scratch;
6932 
6933 	/*
6934 	 * Do not leak KASLR address. This only shows the KASLR address of
6935 	 * the last boot. When the ring buffer is started, the LAST_BOOT
6936 	 * flag gets cleared, and this should only report "current".
6937 	 * Otherwise it shows the KASLR address from the previous boot which
6938 	 * should not be the same as the current boot.
6939 	 */
6940 	if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6941 		seq_printf(m, "%lx\t[kernel]\n", tscratch->kaslr_addr);
6942 	else
6943 		seq_puts(m, "# Current\n");
6944 }
6945 
6946 static int l_show(struct seq_file *m, void *v)
6947 {
6948 	struct trace_array *tr = m->private;
6949 	struct trace_mod_entry *entry = v;
6950 
6951 	if (v == LAST_BOOT_HEADER) {
6952 		show_last_boot_header(m, tr);
6953 		return 0;
6954 	}
6955 
6956 	seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name);
6957 	return 0;
6958 }
6959 
6960 static const struct seq_operations last_boot_seq_ops = {
6961 	.start		= l_start,
6962 	.next		= l_next,
6963 	.stop		= l_stop,
6964 	.show		= l_show,
6965 };
6966 
6967 static int tracing_last_boot_open(struct inode *inode, struct file *file)
6968 {
6969 	struct trace_array *tr = inode->i_private;
6970 	struct seq_file *m;
6971 	int ret;
6972 
6973 	ret = tracing_check_open_get_tr(tr);
6974 	if (ret)
6975 		return ret;
6976 
6977 	ret = seq_open(file, &last_boot_seq_ops);
6978 	if (ret) {
6979 		trace_array_put(tr);
6980 		return ret;
6981 	}
6982 
6983 	m = file->private_data;
6984 	m->private = tr;
6985 
6986 	return 0;
6987 }
6988 
6989 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
6990 {
6991 	struct trace_array *tr = inode->i_private;
6992 	int cpu = tracing_get_cpu(inode);
6993 	int ret;
6994 
6995 	ret = tracing_check_open_get_tr(tr);
6996 	if (ret)
6997 		return ret;
6998 
6999 	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
7000 	if (ret < 0)
7001 		__trace_array_put(tr);
7002 	return ret;
7003 }
7004 
7005 static ssize_t
7006 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7007 			  size_t cnt, loff_t *ppos)
7008 {
7009 	/*
7010 	 * There is no need to read what the user has written, this function
7011 	 * is just to make sure that there is no error when "echo" is used
7012 	 */
7013 
7014 	*ppos += cnt;
7015 
7016 	return cnt;
7017 }
7018 
7019 static int
7020 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7021 {
7022 	struct trace_array *tr = inode->i_private;
7023 
7024 	/* disable tracing ? */
7025 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7026 		tracer_tracing_off(tr);
7027 	/* resize the ring buffer to 0 */
7028 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7029 
7030 	trace_array_put(tr);
7031 
7032 	return 0;
7033 }
7034 
7035 #define TRACE_MARKER_MAX_SIZE		4096
7036 
7037 static ssize_t
7038 tracing_mark_write(struct file *filp, const char __user *ubuf,
7039 					size_t cnt, loff_t *fpos)
7040 {
7041 	struct trace_array *tr = filp->private_data;
7042 	struct ring_buffer_event *event;
7043 	enum event_trigger_type tt = ETT_NONE;
7044 	struct trace_buffer *buffer;
7045 	struct print_entry *entry;
7046 	int meta_size;
7047 	ssize_t written;
7048 	size_t size;
7049 	int len;
7050 
7051 /* Used in tracing_mark_raw_write() as well */
7052 #define FAULTED_STR "<faulted>"
7053 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7054 
7055 	if (tracing_disabled)
7056 		return -EINVAL;
7057 
7058 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7059 		return -EINVAL;
7060 
7061 	if ((ssize_t)cnt < 0)
7062 		return -EINVAL;
7063 
7064 	if (cnt > TRACE_MARKER_MAX_SIZE)
7065 		cnt = TRACE_MARKER_MAX_SIZE;
7066 
7067 	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
7068  again:
7069 	size = cnt + meta_size;
7070 
7071 	/* If less than "<faulted>", then make sure we can still add that */
7072 	if (cnt < FAULTED_SIZE)
7073 		size += FAULTED_SIZE - cnt;
7074 
7075 	buffer = tr->array_buffer.buffer;
7076 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7077 					    tracing_gen_ctx());
7078 	if (unlikely(!event)) {
7079 		/*
7080 		 * If the size was greater than what was allowed, then
7081 		 * make it smaller and try again.
7082 		 */
7083 		if (size > ring_buffer_max_event_size(buffer)) {
7084 			/* cnt < FAULTED size should never be bigger than max */
7085 			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
7086 				return -EBADF;
7087 			cnt = ring_buffer_max_event_size(buffer) - meta_size;
7088 			/* The above should only happen once */
7089 			if (WARN_ON_ONCE(cnt + meta_size == size))
7090 				return -EBADF;
7091 			goto again;
7092 		}
7093 
7094 		/* Ring buffer disabled, return as if not open for write */
7095 		return -EBADF;
7096 	}
7097 
7098 	entry = ring_buffer_event_data(event);
7099 	entry->ip = _THIS_IP_;
7100 
7101 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7102 	if (len) {
7103 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7104 		cnt = FAULTED_SIZE;
7105 		written = -EFAULT;
7106 	} else
7107 		written = cnt;
7108 
7109 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7110 		/* do not add \n before testing triggers, but add \0 */
7111 		entry->buf[cnt] = '\0';
7112 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7113 	}
7114 
7115 	if (entry->buf[cnt - 1] != '\n') {
7116 		entry->buf[cnt] = '\n';
7117 		entry->buf[cnt + 1] = '\0';
7118 	} else
7119 		entry->buf[cnt] = '\0';
7120 
7121 	if (static_branch_unlikely(&trace_marker_exports_enabled))
7122 		ftrace_exports(event, TRACE_EXPORT_MARKER);
7123 	__buffer_unlock_commit(buffer, event);
7124 
7125 	if (tt)
7126 		event_triggers_post_call(tr->trace_marker_file, tt);
7127 
7128 	return written;
7129 }
7130 
7131 static ssize_t
7132 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7133 					size_t cnt, loff_t *fpos)
7134 {
7135 	struct trace_array *tr = filp->private_data;
7136 	struct ring_buffer_event *event;
7137 	struct trace_buffer *buffer;
7138 	struct raw_data_entry *entry;
7139 	ssize_t written;
7140 	int size;
7141 	int len;
7142 
7143 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7144 
7145 	if (tracing_disabled)
7146 		return -EINVAL;
7147 
7148 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7149 		return -EINVAL;
7150 
7151 	/* The marker must at least have a tag id */
7152 	if (cnt < sizeof(unsigned int))
7153 		return -EINVAL;
7154 
7155 	size = sizeof(*entry) + cnt;
7156 	if (cnt < FAULT_SIZE_ID)
7157 		size += FAULT_SIZE_ID - cnt;
7158 
7159 	buffer = tr->array_buffer.buffer;
7160 
7161 	if (size > ring_buffer_max_event_size(buffer))
7162 		return -EINVAL;
7163 
7164 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7165 					    tracing_gen_ctx());
7166 	if (!event)
7167 		/* Ring buffer disabled, return as if not open for write */
7168 		return -EBADF;
7169 
7170 	entry = ring_buffer_event_data(event);
7171 
7172 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7173 	if (len) {
7174 		entry->id = -1;
7175 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7176 		written = -EFAULT;
7177 	} else
7178 		written = cnt;
7179 
7180 	__buffer_unlock_commit(buffer, event);
7181 
7182 	return written;
7183 }
7184 
7185 static int tracing_clock_show(struct seq_file *m, void *v)
7186 {
7187 	struct trace_array *tr = m->private;
7188 	int i;
7189 
7190 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7191 		seq_printf(m,
7192 			"%s%s%s%s", i ? " " : "",
7193 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7194 			i == tr->clock_id ? "]" : "");
7195 	seq_putc(m, '\n');
7196 
7197 	return 0;
7198 }
7199 
7200 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7201 {
7202 	int i;
7203 
7204 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7205 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7206 			break;
7207 	}
7208 	if (i == ARRAY_SIZE(trace_clocks))
7209 		return -EINVAL;
7210 
7211 	mutex_lock(&trace_types_lock);
7212 
7213 	tr->clock_id = i;
7214 
7215 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7216 
7217 	/*
7218 	 * New clock may not be consistent with the previous clock.
7219 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7220 	 */
7221 	tracing_reset_online_cpus(&tr->array_buffer);
7222 
7223 #ifdef CONFIG_TRACER_MAX_TRACE
7224 	if (tr->max_buffer.buffer)
7225 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7226 	tracing_reset_online_cpus(&tr->max_buffer);
7227 #endif
7228 
7229 	mutex_unlock(&trace_types_lock);
7230 
7231 	return 0;
7232 }
7233 
7234 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7235 				   size_t cnt, loff_t *fpos)
7236 {
7237 	struct seq_file *m = filp->private_data;
7238 	struct trace_array *tr = m->private;
7239 	char buf[64];
7240 	const char *clockstr;
7241 	int ret;
7242 
7243 	if (cnt >= sizeof(buf))
7244 		return -EINVAL;
7245 
7246 	if (copy_from_user(buf, ubuf, cnt))
7247 		return -EFAULT;
7248 
7249 	buf[cnt] = 0;
7250 
7251 	clockstr = strstrip(buf);
7252 
7253 	ret = tracing_set_clock(tr, clockstr);
7254 	if (ret)
7255 		return ret;
7256 
7257 	*fpos += cnt;
7258 
7259 	return cnt;
7260 }
7261 
7262 static int tracing_clock_open(struct inode *inode, struct file *file)
7263 {
7264 	struct trace_array *tr = inode->i_private;
7265 	int ret;
7266 
7267 	ret = tracing_check_open_get_tr(tr);
7268 	if (ret)
7269 		return ret;
7270 
7271 	ret = single_open(file, tracing_clock_show, inode->i_private);
7272 	if (ret < 0)
7273 		trace_array_put(tr);
7274 
7275 	return ret;
7276 }
7277 
7278 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7279 {
7280 	struct trace_array *tr = m->private;
7281 
7282 	mutex_lock(&trace_types_lock);
7283 
7284 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7285 		seq_puts(m, "delta [absolute]\n");
7286 	else
7287 		seq_puts(m, "[delta] absolute\n");
7288 
7289 	mutex_unlock(&trace_types_lock);
7290 
7291 	return 0;
7292 }
7293 
7294 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7295 {
7296 	struct trace_array *tr = inode->i_private;
7297 	int ret;
7298 
7299 	ret = tracing_check_open_get_tr(tr);
7300 	if (ret)
7301 		return ret;
7302 
7303 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7304 	if (ret < 0)
7305 		trace_array_put(tr);
7306 
7307 	return ret;
7308 }
7309 
7310 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7311 {
7312 	if (rbe == this_cpu_read(trace_buffered_event))
7313 		return ring_buffer_time_stamp(buffer);
7314 
7315 	return ring_buffer_event_time_stamp(buffer, rbe);
7316 }
7317 
7318 /*
7319  * Set or disable using the per CPU trace_buffer_event when possible.
7320  */
7321 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7322 {
7323 	guard(mutex)(&trace_types_lock);
7324 
7325 	if (set && tr->no_filter_buffering_ref++)
7326 		return 0;
7327 
7328 	if (!set) {
7329 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7330 			return -EINVAL;
7331 
7332 		--tr->no_filter_buffering_ref;
7333 	}
7334 
7335 	return 0;
7336 }
7337 
7338 struct ftrace_buffer_info {
7339 	struct trace_iterator	iter;
7340 	void			*spare;
7341 	unsigned int		spare_cpu;
7342 	unsigned int		spare_size;
7343 	unsigned int		read;
7344 };
7345 
7346 #ifdef CONFIG_TRACER_SNAPSHOT
7347 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7348 {
7349 	struct trace_array *tr = inode->i_private;
7350 	struct trace_iterator *iter;
7351 	struct seq_file *m;
7352 	int ret;
7353 
7354 	ret = tracing_check_open_get_tr(tr);
7355 	if (ret)
7356 		return ret;
7357 
7358 	if (file->f_mode & FMODE_READ) {
7359 		iter = __tracing_open(inode, file, true);
7360 		if (IS_ERR(iter))
7361 			ret = PTR_ERR(iter);
7362 	} else {
7363 		/* Writes still need the seq_file to hold the private data */
7364 		ret = -ENOMEM;
7365 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7366 		if (!m)
7367 			goto out;
7368 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7369 		if (!iter) {
7370 			kfree(m);
7371 			goto out;
7372 		}
7373 		ret = 0;
7374 
7375 		iter->tr = tr;
7376 		iter->array_buffer = &tr->max_buffer;
7377 		iter->cpu_file = tracing_get_cpu(inode);
7378 		m->private = iter;
7379 		file->private_data = m;
7380 	}
7381 out:
7382 	if (ret < 0)
7383 		trace_array_put(tr);
7384 
7385 	return ret;
7386 }
7387 
7388 static void tracing_swap_cpu_buffer(void *tr)
7389 {
7390 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7391 }
7392 
7393 static ssize_t
7394 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7395 		       loff_t *ppos)
7396 {
7397 	struct seq_file *m = filp->private_data;
7398 	struct trace_iterator *iter = m->private;
7399 	struct trace_array *tr = iter->tr;
7400 	unsigned long val;
7401 	int ret;
7402 
7403 	ret = tracing_update_buffers(tr);
7404 	if (ret < 0)
7405 		return ret;
7406 
7407 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7408 	if (ret)
7409 		return ret;
7410 
7411 	guard(mutex)(&trace_types_lock);
7412 
7413 	if (tr->current_trace->use_max_tr)
7414 		return -EBUSY;
7415 
7416 	local_irq_disable();
7417 	arch_spin_lock(&tr->max_lock);
7418 	if (tr->cond_snapshot)
7419 		ret = -EBUSY;
7420 	arch_spin_unlock(&tr->max_lock);
7421 	local_irq_enable();
7422 	if (ret)
7423 		return ret;
7424 
7425 	switch (val) {
7426 	case 0:
7427 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7428 			return -EINVAL;
7429 		if (tr->allocated_snapshot)
7430 			free_snapshot(tr);
7431 		break;
7432 	case 1:
7433 /* Only allow per-cpu swap if the ring buffer supports it */
7434 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7435 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7436 			return -EINVAL;
7437 #endif
7438 		if (tr->allocated_snapshot)
7439 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7440 					&tr->array_buffer, iter->cpu_file);
7441 
7442 		ret = tracing_arm_snapshot_locked(tr);
7443 		if (ret)
7444 			return ret;
7445 
7446 		/* Now, we're going to swap */
7447 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7448 			local_irq_disable();
7449 			update_max_tr(tr, current, smp_processor_id(), NULL);
7450 			local_irq_enable();
7451 		} else {
7452 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7453 						 (void *)tr, 1);
7454 		}
7455 		tracing_disarm_snapshot(tr);
7456 		break;
7457 	default:
7458 		if (tr->allocated_snapshot) {
7459 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7460 				tracing_reset_online_cpus(&tr->max_buffer);
7461 			else
7462 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7463 		}
7464 		break;
7465 	}
7466 
7467 	if (ret >= 0) {
7468 		*ppos += cnt;
7469 		ret = cnt;
7470 	}
7471 
7472 	return ret;
7473 }
7474 
7475 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7476 {
7477 	struct seq_file *m = file->private_data;
7478 	int ret;
7479 
7480 	ret = tracing_release(inode, file);
7481 
7482 	if (file->f_mode & FMODE_READ)
7483 		return ret;
7484 
7485 	/* If write only, the seq_file is just a stub */
7486 	if (m)
7487 		kfree(m->private);
7488 	kfree(m);
7489 
7490 	return 0;
7491 }
7492 
7493 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7494 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7495 				    size_t count, loff_t *ppos);
7496 static int tracing_buffers_release(struct inode *inode, struct file *file);
7497 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7498 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7499 
7500 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7501 {
7502 	struct ftrace_buffer_info *info;
7503 	int ret;
7504 
7505 	/* The following checks for tracefs lockdown */
7506 	ret = tracing_buffers_open(inode, filp);
7507 	if (ret < 0)
7508 		return ret;
7509 
7510 	info = filp->private_data;
7511 
7512 	if (info->iter.trace->use_max_tr) {
7513 		tracing_buffers_release(inode, filp);
7514 		return -EBUSY;
7515 	}
7516 
7517 	info->iter.snapshot = true;
7518 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7519 
7520 	return ret;
7521 }
7522 
7523 #endif /* CONFIG_TRACER_SNAPSHOT */
7524 
7525 
7526 static const struct file_operations tracing_thresh_fops = {
7527 	.open		= tracing_open_generic,
7528 	.read		= tracing_thresh_read,
7529 	.write		= tracing_thresh_write,
7530 	.llseek		= generic_file_llseek,
7531 };
7532 
7533 #ifdef CONFIG_TRACER_MAX_TRACE
7534 static const struct file_operations tracing_max_lat_fops = {
7535 	.open		= tracing_open_generic_tr,
7536 	.read		= tracing_max_lat_read,
7537 	.write		= tracing_max_lat_write,
7538 	.llseek		= generic_file_llseek,
7539 	.release	= tracing_release_generic_tr,
7540 };
7541 #endif
7542 
7543 static const struct file_operations set_tracer_fops = {
7544 	.open		= tracing_open_generic_tr,
7545 	.read		= tracing_set_trace_read,
7546 	.write		= tracing_set_trace_write,
7547 	.llseek		= generic_file_llseek,
7548 	.release	= tracing_release_generic_tr,
7549 };
7550 
7551 static const struct file_operations tracing_pipe_fops = {
7552 	.open		= tracing_open_pipe,
7553 	.poll		= tracing_poll_pipe,
7554 	.read		= tracing_read_pipe,
7555 	.splice_read	= tracing_splice_read_pipe,
7556 	.release	= tracing_release_pipe,
7557 };
7558 
7559 static const struct file_operations tracing_entries_fops = {
7560 	.open		= tracing_open_generic_tr,
7561 	.read		= tracing_entries_read,
7562 	.write		= tracing_entries_write,
7563 	.llseek		= generic_file_llseek,
7564 	.release	= tracing_release_generic_tr,
7565 };
7566 
7567 static const struct file_operations tracing_buffer_meta_fops = {
7568 	.open		= tracing_buffer_meta_open,
7569 	.read		= seq_read,
7570 	.llseek		= seq_lseek,
7571 	.release	= tracing_seq_release,
7572 };
7573 
7574 static const struct file_operations tracing_total_entries_fops = {
7575 	.open		= tracing_open_generic_tr,
7576 	.read		= tracing_total_entries_read,
7577 	.llseek		= generic_file_llseek,
7578 	.release	= tracing_release_generic_tr,
7579 };
7580 
7581 static const struct file_operations tracing_free_buffer_fops = {
7582 	.open		= tracing_open_generic_tr,
7583 	.write		= tracing_free_buffer_write,
7584 	.release	= tracing_free_buffer_release,
7585 };
7586 
7587 static const struct file_operations tracing_mark_fops = {
7588 	.open		= tracing_mark_open,
7589 	.write		= tracing_mark_write,
7590 	.release	= tracing_release_generic_tr,
7591 };
7592 
7593 static const struct file_operations tracing_mark_raw_fops = {
7594 	.open		= tracing_mark_open,
7595 	.write		= tracing_mark_raw_write,
7596 	.release	= tracing_release_generic_tr,
7597 };
7598 
7599 static const struct file_operations trace_clock_fops = {
7600 	.open		= tracing_clock_open,
7601 	.read		= seq_read,
7602 	.llseek		= seq_lseek,
7603 	.release	= tracing_single_release_tr,
7604 	.write		= tracing_clock_write,
7605 };
7606 
7607 static const struct file_operations trace_time_stamp_mode_fops = {
7608 	.open		= tracing_time_stamp_mode_open,
7609 	.read		= seq_read,
7610 	.llseek		= seq_lseek,
7611 	.release	= tracing_single_release_tr,
7612 };
7613 
7614 static const struct file_operations last_boot_fops = {
7615 	.open		= tracing_last_boot_open,
7616 	.read		= seq_read,
7617 	.llseek		= seq_lseek,
7618 	.release	= tracing_seq_release,
7619 };
7620 
7621 #ifdef CONFIG_TRACER_SNAPSHOT
7622 static const struct file_operations snapshot_fops = {
7623 	.open		= tracing_snapshot_open,
7624 	.read		= seq_read,
7625 	.write		= tracing_snapshot_write,
7626 	.llseek		= tracing_lseek,
7627 	.release	= tracing_snapshot_release,
7628 };
7629 
7630 static const struct file_operations snapshot_raw_fops = {
7631 	.open		= snapshot_raw_open,
7632 	.read		= tracing_buffers_read,
7633 	.release	= tracing_buffers_release,
7634 	.splice_read	= tracing_buffers_splice_read,
7635 };
7636 
7637 #endif /* CONFIG_TRACER_SNAPSHOT */
7638 
7639 /*
7640  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7641  * @filp: The active open file structure
7642  * @ubuf: The userspace provided buffer to read value into
7643  * @cnt: The maximum number of bytes to read
7644  * @ppos: The current "file" position
7645  *
7646  * This function implements the write interface for a struct trace_min_max_param.
7647  * The filp->private_data must point to a trace_min_max_param structure that
7648  * defines where to write the value, the min and the max acceptable values,
7649  * and a lock to protect the write.
7650  */
7651 static ssize_t
7652 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7653 {
7654 	struct trace_min_max_param *param = filp->private_data;
7655 	u64 val;
7656 	int err;
7657 
7658 	if (!param)
7659 		return -EFAULT;
7660 
7661 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7662 	if (err)
7663 		return err;
7664 
7665 	if (param->lock)
7666 		mutex_lock(param->lock);
7667 
7668 	if (param->min && val < *param->min)
7669 		err = -EINVAL;
7670 
7671 	if (param->max && val > *param->max)
7672 		err = -EINVAL;
7673 
7674 	if (!err)
7675 		*param->val = val;
7676 
7677 	if (param->lock)
7678 		mutex_unlock(param->lock);
7679 
7680 	if (err)
7681 		return err;
7682 
7683 	return cnt;
7684 }
7685 
7686 /*
7687  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7688  * @filp: The active open file structure
7689  * @ubuf: The userspace provided buffer to read value into
7690  * @cnt: The maximum number of bytes to read
7691  * @ppos: The current "file" position
7692  *
7693  * This function implements the read interface for a struct trace_min_max_param.
7694  * The filp->private_data must point to a trace_min_max_param struct with valid
7695  * data.
7696  */
7697 static ssize_t
7698 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7699 {
7700 	struct trace_min_max_param *param = filp->private_data;
7701 	char buf[U64_STR_SIZE];
7702 	int len;
7703 	u64 val;
7704 
7705 	if (!param)
7706 		return -EFAULT;
7707 
7708 	val = *param->val;
7709 
7710 	if (cnt > sizeof(buf))
7711 		cnt = sizeof(buf);
7712 
7713 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7714 
7715 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7716 }
7717 
7718 const struct file_operations trace_min_max_fops = {
7719 	.open		= tracing_open_generic,
7720 	.read		= trace_min_max_read,
7721 	.write		= trace_min_max_write,
7722 };
7723 
7724 #define TRACING_LOG_ERRS_MAX	8
7725 #define TRACING_LOG_LOC_MAX	128
7726 
7727 #define CMD_PREFIX "  Command: "
7728 
7729 struct err_info {
7730 	const char	**errs;	/* ptr to loc-specific array of err strings */
7731 	u8		type;	/* index into errs -> specific err string */
7732 	u16		pos;	/* caret position */
7733 	u64		ts;
7734 };
7735 
7736 struct tracing_log_err {
7737 	struct list_head	list;
7738 	struct err_info		info;
7739 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7740 	char			*cmd;                     /* what caused err */
7741 };
7742 
7743 static DEFINE_MUTEX(tracing_err_log_lock);
7744 
7745 static struct tracing_log_err *alloc_tracing_log_err(int len)
7746 {
7747 	struct tracing_log_err *err;
7748 
7749 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7750 	if (!err)
7751 		return ERR_PTR(-ENOMEM);
7752 
7753 	err->cmd = kzalloc(len, GFP_KERNEL);
7754 	if (!err->cmd) {
7755 		kfree(err);
7756 		return ERR_PTR(-ENOMEM);
7757 	}
7758 
7759 	return err;
7760 }
7761 
7762 static void free_tracing_log_err(struct tracing_log_err *err)
7763 {
7764 	kfree(err->cmd);
7765 	kfree(err);
7766 }
7767 
7768 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7769 						   int len)
7770 {
7771 	struct tracing_log_err *err;
7772 	char *cmd;
7773 
7774 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7775 		err = alloc_tracing_log_err(len);
7776 		if (PTR_ERR(err) != -ENOMEM)
7777 			tr->n_err_log_entries++;
7778 
7779 		return err;
7780 	}
7781 	cmd = kzalloc(len, GFP_KERNEL);
7782 	if (!cmd)
7783 		return ERR_PTR(-ENOMEM);
7784 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7785 	kfree(err->cmd);
7786 	err->cmd = cmd;
7787 	list_del(&err->list);
7788 
7789 	return err;
7790 }
7791 
7792 /**
7793  * err_pos - find the position of a string within a command for error careting
7794  * @cmd: The tracing command that caused the error
7795  * @str: The string to position the caret at within @cmd
7796  *
7797  * Finds the position of the first occurrence of @str within @cmd.  The
7798  * return value can be passed to tracing_log_err() for caret placement
7799  * within @cmd.
7800  *
7801  * Returns the index within @cmd of the first occurrence of @str or 0
7802  * if @str was not found.
7803  */
7804 unsigned int err_pos(char *cmd, const char *str)
7805 {
7806 	char *found;
7807 
7808 	if (WARN_ON(!strlen(cmd)))
7809 		return 0;
7810 
7811 	found = strstr(cmd, str);
7812 	if (found)
7813 		return found - cmd;
7814 
7815 	return 0;
7816 }
7817 
7818 /**
7819  * tracing_log_err - write an error to the tracing error log
7820  * @tr: The associated trace array for the error (NULL for top level array)
7821  * @loc: A string describing where the error occurred
7822  * @cmd: The tracing command that caused the error
7823  * @errs: The array of loc-specific static error strings
7824  * @type: The index into errs[], which produces the specific static err string
7825  * @pos: The position the caret should be placed in the cmd
7826  *
7827  * Writes an error into tracing/error_log of the form:
7828  *
7829  * <loc>: error: <text>
7830  *   Command: <cmd>
7831  *              ^
7832  *
7833  * tracing/error_log is a small log file containing the last
7834  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7835  * unless there has been a tracing error, and the error log can be
7836  * cleared and have its memory freed by writing the empty string in
7837  * truncation mode to it i.e. echo > tracing/error_log.
7838  *
7839  * NOTE: the @errs array along with the @type param are used to
7840  * produce a static error string - this string is not copied and saved
7841  * when the error is logged - only a pointer to it is saved.  See
7842  * existing callers for examples of how static strings are typically
7843  * defined for use with tracing_log_err().
7844  */
7845 void tracing_log_err(struct trace_array *tr,
7846 		     const char *loc, const char *cmd,
7847 		     const char **errs, u8 type, u16 pos)
7848 {
7849 	struct tracing_log_err *err;
7850 	int len = 0;
7851 
7852 	if (!tr)
7853 		tr = &global_trace;
7854 
7855 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7856 
7857 	guard(mutex)(&tracing_err_log_lock);
7858 
7859 	err = get_tracing_log_err(tr, len);
7860 	if (PTR_ERR(err) == -ENOMEM)
7861 		return;
7862 
7863 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7864 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7865 
7866 	err->info.errs = errs;
7867 	err->info.type = type;
7868 	err->info.pos = pos;
7869 	err->info.ts = local_clock();
7870 
7871 	list_add_tail(&err->list, &tr->err_log);
7872 }
7873 
7874 static void clear_tracing_err_log(struct trace_array *tr)
7875 {
7876 	struct tracing_log_err *err, *next;
7877 
7878 	mutex_lock(&tracing_err_log_lock);
7879 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7880 		list_del(&err->list);
7881 		free_tracing_log_err(err);
7882 	}
7883 
7884 	tr->n_err_log_entries = 0;
7885 	mutex_unlock(&tracing_err_log_lock);
7886 }
7887 
7888 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7889 {
7890 	struct trace_array *tr = m->private;
7891 
7892 	mutex_lock(&tracing_err_log_lock);
7893 
7894 	return seq_list_start(&tr->err_log, *pos);
7895 }
7896 
7897 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7898 {
7899 	struct trace_array *tr = m->private;
7900 
7901 	return seq_list_next(v, &tr->err_log, pos);
7902 }
7903 
7904 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7905 {
7906 	mutex_unlock(&tracing_err_log_lock);
7907 }
7908 
7909 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7910 {
7911 	u16 i;
7912 
7913 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7914 		seq_putc(m, ' ');
7915 	for (i = 0; i < pos; i++)
7916 		seq_putc(m, ' ');
7917 	seq_puts(m, "^\n");
7918 }
7919 
7920 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7921 {
7922 	struct tracing_log_err *err = v;
7923 
7924 	if (err) {
7925 		const char *err_text = err->info.errs[err->info.type];
7926 		u64 sec = err->info.ts;
7927 		u32 nsec;
7928 
7929 		nsec = do_div(sec, NSEC_PER_SEC);
7930 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7931 			   err->loc, err_text);
7932 		seq_printf(m, "%s", err->cmd);
7933 		tracing_err_log_show_pos(m, err->info.pos);
7934 	}
7935 
7936 	return 0;
7937 }
7938 
7939 static const struct seq_operations tracing_err_log_seq_ops = {
7940 	.start  = tracing_err_log_seq_start,
7941 	.next   = tracing_err_log_seq_next,
7942 	.stop   = tracing_err_log_seq_stop,
7943 	.show   = tracing_err_log_seq_show
7944 };
7945 
7946 static int tracing_err_log_open(struct inode *inode, struct file *file)
7947 {
7948 	struct trace_array *tr = inode->i_private;
7949 	int ret = 0;
7950 
7951 	ret = tracing_check_open_get_tr(tr);
7952 	if (ret)
7953 		return ret;
7954 
7955 	/* If this file was opened for write, then erase contents */
7956 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7957 		clear_tracing_err_log(tr);
7958 
7959 	if (file->f_mode & FMODE_READ) {
7960 		ret = seq_open(file, &tracing_err_log_seq_ops);
7961 		if (!ret) {
7962 			struct seq_file *m = file->private_data;
7963 			m->private = tr;
7964 		} else {
7965 			trace_array_put(tr);
7966 		}
7967 	}
7968 	return ret;
7969 }
7970 
7971 static ssize_t tracing_err_log_write(struct file *file,
7972 				     const char __user *buffer,
7973 				     size_t count, loff_t *ppos)
7974 {
7975 	return count;
7976 }
7977 
7978 static int tracing_err_log_release(struct inode *inode, struct file *file)
7979 {
7980 	struct trace_array *tr = inode->i_private;
7981 
7982 	trace_array_put(tr);
7983 
7984 	if (file->f_mode & FMODE_READ)
7985 		seq_release(inode, file);
7986 
7987 	return 0;
7988 }
7989 
7990 static const struct file_operations tracing_err_log_fops = {
7991 	.open           = tracing_err_log_open,
7992 	.write		= tracing_err_log_write,
7993 	.read           = seq_read,
7994 	.llseek         = tracing_lseek,
7995 	.release        = tracing_err_log_release,
7996 };
7997 
7998 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7999 {
8000 	struct trace_array *tr = inode->i_private;
8001 	struct ftrace_buffer_info *info;
8002 	int ret;
8003 
8004 	ret = tracing_check_open_get_tr(tr);
8005 	if (ret)
8006 		return ret;
8007 
8008 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
8009 	if (!info) {
8010 		trace_array_put(tr);
8011 		return -ENOMEM;
8012 	}
8013 
8014 	mutex_lock(&trace_types_lock);
8015 
8016 	info->iter.tr		= tr;
8017 	info->iter.cpu_file	= tracing_get_cpu(inode);
8018 	info->iter.trace	= tr->current_trace;
8019 	info->iter.array_buffer = &tr->array_buffer;
8020 	info->spare		= NULL;
8021 	/* Force reading ring buffer for first read */
8022 	info->read		= (unsigned int)-1;
8023 
8024 	filp->private_data = info;
8025 
8026 	tr->trace_ref++;
8027 
8028 	mutex_unlock(&trace_types_lock);
8029 
8030 	ret = nonseekable_open(inode, filp);
8031 	if (ret < 0)
8032 		trace_array_put(tr);
8033 
8034 	return ret;
8035 }
8036 
8037 static __poll_t
8038 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8039 {
8040 	struct ftrace_buffer_info *info = filp->private_data;
8041 	struct trace_iterator *iter = &info->iter;
8042 
8043 	return trace_poll(iter, filp, poll_table);
8044 }
8045 
8046 static ssize_t
8047 tracing_buffers_read(struct file *filp, char __user *ubuf,
8048 		     size_t count, loff_t *ppos)
8049 {
8050 	struct ftrace_buffer_info *info = filp->private_data;
8051 	struct trace_iterator *iter = &info->iter;
8052 	void *trace_data;
8053 	int page_size;
8054 	ssize_t ret = 0;
8055 	ssize_t size;
8056 
8057 	if (!count)
8058 		return 0;
8059 
8060 #ifdef CONFIG_TRACER_MAX_TRACE
8061 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8062 		return -EBUSY;
8063 #endif
8064 
8065 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8066 
8067 	/* Make sure the spare matches the current sub buffer size */
8068 	if (info->spare) {
8069 		if (page_size != info->spare_size) {
8070 			ring_buffer_free_read_page(iter->array_buffer->buffer,
8071 						   info->spare_cpu, info->spare);
8072 			info->spare = NULL;
8073 		}
8074 	}
8075 
8076 	if (!info->spare) {
8077 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8078 							  iter->cpu_file);
8079 		if (IS_ERR(info->spare)) {
8080 			ret = PTR_ERR(info->spare);
8081 			info->spare = NULL;
8082 		} else {
8083 			info->spare_cpu = iter->cpu_file;
8084 			info->spare_size = page_size;
8085 		}
8086 	}
8087 	if (!info->spare)
8088 		return ret;
8089 
8090 	/* Do we have previous read data to read? */
8091 	if (info->read < page_size)
8092 		goto read;
8093 
8094  again:
8095 	trace_access_lock(iter->cpu_file);
8096 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
8097 				    info->spare,
8098 				    count,
8099 				    iter->cpu_file, 0);
8100 	trace_access_unlock(iter->cpu_file);
8101 
8102 	if (ret < 0) {
8103 		if (trace_empty(iter) && !iter->closed) {
8104 			if ((filp->f_flags & O_NONBLOCK))
8105 				return -EAGAIN;
8106 
8107 			ret = wait_on_pipe(iter, 0);
8108 			if (ret)
8109 				return ret;
8110 
8111 			goto again;
8112 		}
8113 		return 0;
8114 	}
8115 
8116 	info->read = 0;
8117  read:
8118 	size = page_size - info->read;
8119 	if (size > count)
8120 		size = count;
8121 	trace_data = ring_buffer_read_page_data(info->spare);
8122 	ret = copy_to_user(ubuf, trace_data + info->read, size);
8123 	if (ret == size)
8124 		return -EFAULT;
8125 
8126 	size -= ret;
8127 
8128 	*ppos += size;
8129 	info->read += size;
8130 
8131 	return size;
8132 }
8133 
8134 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
8135 {
8136 	struct ftrace_buffer_info *info = file->private_data;
8137 	struct trace_iterator *iter = &info->iter;
8138 
8139 	iter->closed = true;
8140 	/* Make sure the waiters see the new wait_index */
8141 	(void)atomic_fetch_inc_release(&iter->wait_index);
8142 
8143 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8144 
8145 	return 0;
8146 }
8147 
8148 static int tracing_buffers_release(struct inode *inode, struct file *file)
8149 {
8150 	struct ftrace_buffer_info *info = file->private_data;
8151 	struct trace_iterator *iter = &info->iter;
8152 
8153 	mutex_lock(&trace_types_lock);
8154 
8155 	iter->tr->trace_ref--;
8156 
8157 	__trace_array_put(iter->tr);
8158 
8159 	if (info->spare)
8160 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8161 					   info->spare_cpu, info->spare);
8162 	kvfree(info);
8163 
8164 	mutex_unlock(&trace_types_lock);
8165 
8166 	return 0;
8167 }
8168 
8169 struct buffer_ref {
8170 	struct trace_buffer	*buffer;
8171 	void			*page;
8172 	int			cpu;
8173 	refcount_t		refcount;
8174 };
8175 
8176 static void buffer_ref_release(struct buffer_ref *ref)
8177 {
8178 	if (!refcount_dec_and_test(&ref->refcount))
8179 		return;
8180 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8181 	kfree(ref);
8182 }
8183 
8184 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8185 				    struct pipe_buffer *buf)
8186 {
8187 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8188 
8189 	buffer_ref_release(ref);
8190 	buf->private = 0;
8191 }
8192 
8193 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8194 				struct pipe_buffer *buf)
8195 {
8196 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8197 
8198 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8199 		return false;
8200 
8201 	refcount_inc(&ref->refcount);
8202 	return true;
8203 }
8204 
8205 /* Pipe buffer operations for a buffer. */
8206 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8207 	.release		= buffer_pipe_buf_release,
8208 	.get			= buffer_pipe_buf_get,
8209 };
8210 
8211 /*
8212  * Callback from splice_to_pipe(), if we need to release some pages
8213  * at the end of the spd in case we error'ed out in filling the pipe.
8214  */
8215 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8216 {
8217 	struct buffer_ref *ref =
8218 		(struct buffer_ref *)spd->partial[i].private;
8219 
8220 	buffer_ref_release(ref);
8221 	spd->partial[i].private = 0;
8222 }
8223 
8224 static ssize_t
8225 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8226 			    struct pipe_inode_info *pipe, size_t len,
8227 			    unsigned int flags)
8228 {
8229 	struct ftrace_buffer_info *info = file->private_data;
8230 	struct trace_iterator *iter = &info->iter;
8231 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8232 	struct page *pages_def[PIPE_DEF_BUFFERS];
8233 	struct splice_pipe_desc spd = {
8234 		.pages		= pages_def,
8235 		.partial	= partial_def,
8236 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8237 		.ops		= &buffer_pipe_buf_ops,
8238 		.spd_release	= buffer_spd_release,
8239 	};
8240 	struct buffer_ref *ref;
8241 	bool woken = false;
8242 	int page_size;
8243 	int entries, i;
8244 	ssize_t ret = 0;
8245 
8246 #ifdef CONFIG_TRACER_MAX_TRACE
8247 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8248 		return -EBUSY;
8249 #endif
8250 
8251 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8252 	if (*ppos & (page_size - 1))
8253 		return -EINVAL;
8254 
8255 	if (len & (page_size - 1)) {
8256 		if (len < page_size)
8257 			return -EINVAL;
8258 		len &= (~(page_size - 1));
8259 	}
8260 
8261 	if (splice_grow_spd(pipe, &spd))
8262 		return -ENOMEM;
8263 
8264  again:
8265 	trace_access_lock(iter->cpu_file);
8266 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8267 
8268 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8269 		struct page *page;
8270 		int r;
8271 
8272 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8273 		if (!ref) {
8274 			ret = -ENOMEM;
8275 			break;
8276 		}
8277 
8278 		refcount_set(&ref->refcount, 1);
8279 		ref->buffer = iter->array_buffer->buffer;
8280 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8281 		if (IS_ERR(ref->page)) {
8282 			ret = PTR_ERR(ref->page);
8283 			ref->page = NULL;
8284 			kfree(ref);
8285 			break;
8286 		}
8287 		ref->cpu = iter->cpu_file;
8288 
8289 		r = ring_buffer_read_page(ref->buffer, ref->page,
8290 					  len, iter->cpu_file, 1);
8291 		if (r < 0) {
8292 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8293 						   ref->page);
8294 			kfree(ref);
8295 			break;
8296 		}
8297 
8298 		page = virt_to_page(ring_buffer_read_page_data(ref->page));
8299 
8300 		spd.pages[i] = page;
8301 		spd.partial[i].len = page_size;
8302 		spd.partial[i].offset = 0;
8303 		spd.partial[i].private = (unsigned long)ref;
8304 		spd.nr_pages++;
8305 		*ppos += page_size;
8306 
8307 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8308 	}
8309 
8310 	trace_access_unlock(iter->cpu_file);
8311 	spd.nr_pages = i;
8312 
8313 	/* did we read anything? */
8314 	if (!spd.nr_pages) {
8315 
8316 		if (ret)
8317 			goto out;
8318 
8319 		if (woken)
8320 			goto out;
8321 
8322 		ret = -EAGAIN;
8323 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8324 			goto out;
8325 
8326 		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8327 		if (ret)
8328 			goto out;
8329 
8330 		/* No need to wait after waking up when tracing is off */
8331 		if (!tracer_tracing_is_on(iter->tr))
8332 			goto out;
8333 
8334 		/* Iterate one more time to collect any new data then exit */
8335 		woken = true;
8336 
8337 		goto again;
8338 	}
8339 
8340 	ret = splice_to_pipe(pipe, &spd);
8341 out:
8342 	splice_shrink_spd(&spd);
8343 
8344 	return ret;
8345 }
8346 
8347 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8348 {
8349 	struct ftrace_buffer_info *info = file->private_data;
8350 	struct trace_iterator *iter = &info->iter;
8351 	int err;
8352 
8353 	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8354 		if (!(file->f_flags & O_NONBLOCK)) {
8355 			err = ring_buffer_wait(iter->array_buffer->buffer,
8356 					       iter->cpu_file,
8357 					       iter->tr->buffer_percent,
8358 					       NULL, NULL);
8359 			if (err)
8360 				return err;
8361 		}
8362 
8363 		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8364 						  iter->cpu_file);
8365 	} else if (cmd) {
8366 		return -ENOTTY;
8367 	}
8368 
8369 	/*
8370 	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8371 	 * waiters
8372 	 */
8373 	mutex_lock(&trace_types_lock);
8374 
8375 	/* Make sure the waiters see the new wait_index */
8376 	(void)atomic_fetch_inc_release(&iter->wait_index);
8377 
8378 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8379 
8380 	mutex_unlock(&trace_types_lock);
8381 	return 0;
8382 }
8383 
8384 #ifdef CONFIG_TRACER_MAX_TRACE
8385 static int get_snapshot_map(struct trace_array *tr)
8386 {
8387 	int err = 0;
8388 
8389 	/*
8390 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
8391 	 * take trace_types_lock. Instead use the specific
8392 	 * snapshot_trigger_lock.
8393 	 */
8394 	spin_lock(&tr->snapshot_trigger_lock);
8395 
8396 	if (tr->snapshot || tr->mapped == UINT_MAX)
8397 		err = -EBUSY;
8398 	else
8399 		tr->mapped++;
8400 
8401 	spin_unlock(&tr->snapshot_trigger_lock);
8402 
8403 	/* Wait for update_max_tr() to observe iter->tr->mapped */
8404 	if (tr->mapped == 1)
8405 		synchronize_rcu();
8406 
8407 	return err;
8408 
8409 }
8410 static void put_snapshot_map(struct trace_array *tr)
8411 {
8412 	spin_lock(&tr->snapshot_trigger_lock);
8413 	if (!WARN_ON(!tr->mapped))
8414 		tr->mapped--;
8415 	spin_unlock(&tr->snapshot_trigger_lock);
8416 }
8417 #else
8418 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8419 static inline void put_snapshot_map(struct trace_array *tr) { }
8420 #endif
8421 
8422 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8423 {
8424 	struct ftrace_buffer_info *info = vma->vm_file->private_data;
8425 	struct trace_iterator *iter = &info->iter;
8426 
8427 	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8428 	put_snapshot_map(iter->tr);
8429 }
8430 
8431 static const struct vm_operations_struct tracing_buffers_vmops = {
8432 	.close		= tracing_buffers_mmap_close,
8433 };
8434 
8435 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8436 {
8437 	struct ftrace_buffer_info *info = filp->private_data;
8438 	struct trace_iterator *iter = &info->iter;
8439 	int ret = 0;
8440 
8441 	/* Currently the boot mapped buffer is not supported for mmap */
8442 	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
8443 		return -ENODEV;
8444 
8445 	ret = get_snapshot_map(iter->tr);
8446 	if (ret)
8447 		return ret;
8448 
8449 	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8450 	if (ret)
8451 		put_snapshot_map(iter->tr);
8452 
8453 	vma->vm_ops = &tracing_buffers_vmops;
8454 
8455 	return ret;
8456 }
8457 
8458 static const struct file_operations tracing_buffers_fops = {
8459 	.open		= tracing_buffers_open,
8460 	.read		= tracing_buffers_read,
8461 	.poll		= tracing_buffers_poll,
8462 	.release	= tracing_buffers_release,
8463 	.flush		= tracing_buffers_flush,
8464 	.splice_read	= tracing_buffers_splice_read,
8465 	.unlocked_ioctl = tracing_buffers_ioctl,
8466 	.mmap		= tracing_buffers_mmap,
8467 };
8468 
8469 static ssize_t
8470 tracing_stats_read(struct file *filp, char __user *ubuf,
8471 		   size_t count, loff_t *ppos)
8472 {
8473 	struct inode *inode = file_inode(filp);
8474 	struct trace_array *tr = inode->i_private;
8475 	struct array_buffer *trace_buf = &tr->array_buffer;
8476 	int cpu = tracing_get_cpu(inode);
8477 	struct trace_seq *s;
8478 	unsigned long cnt;
8479 	unsigned long long t;
8480 	unsigned long usec_rem;
8481 
8482 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8483 	if (!s)
8484 		return -ENOMEM;
8485 
8486 	trace_seq_init(s);
8487 
8488 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8489 	trace_seq_printf(s, "entries: %ld\n", cnt);
8490 
8491 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8492 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8493 
8494 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8495 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8496 
8497 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8498 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8499 
8500 	if (trace_clocks[tr->clock_id].in_ns) {
8501 		/* local or global for trace_clock */
8502 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8503 		usec_rem = do_div(t, USEC_PER_SEC);
8504 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8505 								t, usec_rem);
8506 
8507 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8508 		usec_rem = do_div(t, USEC_PER_SEC);
8509 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8510 	} else {
8511 		/* counter or tsc mode for trace_clock */
8512 		trace_seq_printf(s, "oldest event ts: %llu\n",
8513 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8514 
8515 		trace_seq_printf(s, "now ts: %llu\n",
8516 				ring_buffer_time_stamp(trace_buf->buffer));
8517 	}
8518 
8519 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8520 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8521 
8522 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8523 	trace_seq_printf(s, "read events: %ld\n", cnt);
8524 
8525 	count = simple_read_from_buffer(ubuf, count, ppos,
8526 					s->buffer, trace_seq_used(s));
8527 
8528 	kfree(s);
8529 
8530 	return count;
8531 }
8532 
8533 static const struct file_operations tracing_stats_fops = {
8534 	.open		= tracing_open_generic_tr,
8535 	.read		= tracing_stats_read,
8536 	.llseek		= generic_file_llseek,
8537 	.release	= tracing_release_generic_tr,
8538 };
8539 
8540 #ifdef CONFIG_DYNAMIC_FTRACE
8541 
8542 static ssize_t
8543 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8544 		  size_t cnt, loff_t *ppos)
8545 {
8546 	ssize_t ret;
8547 	char *buf;
8548 	int r;
8549 
8550 	/* 512 should be plenty to hold the amount needed */
8551 #define DYN_INFO_BUF_SIZE	512
8552 
8553 	buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL);
8554 	if (!buf)
8555 		return -ENOMEM;
8556 
8557 	r = scnprintf(buf, DYN_INFO_BUF_SIZE,
8558 		      "%ld pages:%ld groups: %ld\n"
8559 		      "ftrace boot update time = %llu (ns)\n"
8560 		      "ftrace module total update time = %llu (ns)\n",
8561 		      ftrace_update_tot_cnt,
8562 		      ftrace_number_of_pages,
8563 		      ftrace_number_of_groups,
8564 		      ftrace_update_time,
8565 		      ftrace_total_mod_time);
8566 
8567 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8568 	kfree(buf);
8569 	return ret;
8570 }
8571 
8572 static const struct file_operations tracing_dyn_info_fops = {
8573 	.open		= tracing_open_generic,
8574 	.read		= tracing_read_dyn_info,
8575 	.llseek		= generic_file_llseek,
8576 };
8577 #endif /* CONFIG_DYNAMIC_FTRACE */
8578 
8579 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8580 static void
8581 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8582 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8583 		void *data)
8584 {
8585 	tracing_snapshot_instance(tr);
8586 }
8587 
8588 static void
8589 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8590 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8591 		      void *data)
8592 {
8593 	struct ftrace_func_mapper *mapper = data;
8594 	long *count = NULL;
8595 
8596 	if (mapper)
8597 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8598 
8599 	if (count) {
8600 
8601 		if (*count <= 0)
8602 			return;
8603 
8604 		(*count)--;
8605 	}
8606 
8607 	tracing_snapshot_instance(tr);
8608 }
8609 
8610 static int
8611 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8612 		      struct ftrace_probe_ops *ops, void *data)
8613 {
8614 	struct ftrace_func_mapper *mapper = data;
8615 	long *count = NULL;
8616 
8617 	seq_printf(m, "%ps:", (void *)ip);
8618 
8619 	seq_puts(m, "snapshot");
8620 
8621 	if (mapper)
8622 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8623 
8624 	if (count)
8625 		seq_printf(m, ":count=%ld\n", *count);
8626 	else
8627 		seq_puts(m, ":unlimited\n");
8628 
8629 	return 0;
8630 }
8631 
8632 static int
8633 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8634 		     unsigned long ip, void *init_data, void **data)
8635 {
8636 	struct ftrace_func_mapper *mapper = *data;
8637 
8638 	if (!mapper) {
8639 		mapper = allocate_ftrace_func_mapper();
8640 		if (!mapper)
8641 			return -ENOMEM;
8642 		*data = mapper;
8643 	}
8644 
8645 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8646 }
8647 
8648 static void
8649 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8650 		     unsigned long ip, void *data)
8651 {
8652 	struct ftrace_func_mapper *mapper = data;
8653 
8654 	if (!ip) {
8655 		if (!mapper)
8656 			return;
8657 		free_ftrace_func_mapper(mapper, NULL);
8658 		return;
8659 	}
8660 
8661 	ftrace_func_mapper_remove_ip(mapper, ip);
8662 }
8663 
8664 static struct ftrace_probe_ops snapshot_probe_ops = {
8665 	.func			= ftrace_snapshot,
8666 	.print			= ftrace_snapshot_print,
8667 };
8668 
8669 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8670 	.func			= ftrace_count_snapshot,
8671 	.print			= ftrace_snapshot_print,
8672 	.init			= ftrace_snapshot_init,
8673 	.free			= ftrace_snapshot_free,
8674 };
8675 
8676 static int
8677 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8678 			       char *glob, char *cmd, char *param, int enable)
8679 {
8680 	struct ftrace_probe_ops *ops;
8681 	void *count = (void *)-1;
8682 	char *number;
8683 	int ret;
8684 
8685 	if (!tr)
8686 		return -ENODEV;
8687 
8688 	/* hash funcs only work with set_ftrace_filter */
8689 	if (!enable)
8690 		return -EINVAL;
8691 
8692 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8693 
8694 	if (glob[0] == '!') {
8695 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8696 		if (!ret)
8697 			tracing_disarm_snapshot(tr);
8698 
8699 		return ret;
8700 	}
8701 
8702 	if (!param)
8703 		goto out_reg;
8704 
8705 	number = strsep(&param, ":");
8706 
8707 	if (!strlen(number))
8708 		goto out_reg;
8709 
8710 	/*
8711 	 * We use the callback data field (which is a pointer)
8712 	 * as our counter.
8713 	 */
8714 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8715 	if (ret)
8716 		return ret;
8717 
8718  out_reg:
8719 	ret = tracing_arm_snapshot(tr);
8720 	if (ret < 0)
8721 		goto out;
8722 
8723 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8724 	if (ret < 0)
8725 		tracing_disarm_snapshot(tr);
8726  out:
8727 	return ret < 0 ? ret : 0;
8728 }
8729 
8730 static struct ftrace_func_command ftrace_snapshot_cmd = {
8731 	.name			= "snapshot",
8732 	.func			= ftrace_trace_snapshot_callback,
8733 };
8734 
8735 static __init int register_snapshot_cmd(void)
8736 {
8737 	return register_ftrace_command(&ftrace_snapshot_cmd);
8738 }
8739 #else
8740 static inline __init int register_snapshot_cmd(void) { return 0; }
8741 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8742 
8743 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8744 {
8745 	if (WARN_ON(!tr->dir))
8746 		return ERR_PTR(-ENODEV);
8747 
8748 	/* Top directory uses NULL as the parent */
8749 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8750 		return NULL;
8751 
8752 	/* All sub buffers have a descriptor */
8753 	return tr->dir;
8754 }
8755 
8756 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8757 {
8758 	struct dentry *d_tracer;
8759 
8760 	if (tr->percpu_dir)
8761 		return tr->percpu_dir;
8762 
8763 	d_tracer = tracing_get_dentry(tr);
8764 	if (IS_ERR(d_tracer))
8765 		return NULL;
8766 
8767 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8768 
8769 	MEM_FAIL(!tr->percpu_dir,
8770 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8771 
8772 	return tr->percpu_dir;
8773 }
8774 
8775 static struct dentry *
8776 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8777 		      void *data, long cpu, const struct file_operations *fops)
8778 {
8779 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8780 
8781 	if (ret) /* See tracing_get_cpu() */
8782 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8783 	return ret;
8784 }
8785 
8786 static void
8787 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8788 {
8789 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8790 	struct dentry *d_cpu;
8791 	char cpu_dir[30]; /* 30 characters should be more than enough */
8792 
8793 	if (!d_percpu)
8794 		return;
8795 
8796 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8797 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8798 	if (!d_cpu) {
8799 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8800 		return;
8801 	}
8802 
8803 	/* per cpu trace_pipe */
8804 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8805 				tr, cpu, &tracing_pipe_fops);
8806 
8807 	/* per cpu trace */
8808 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8809 				tr, cpu, &tracing_fops);
8810 
8811 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8812 				tr, cpu, &tracing_buffers_fops);
8813 
8814 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8815 				tr, cpu, &tracing_stats_fops);
8816 
8817 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8818 				tr, cpu, &tracing_entries_fops);
8819 
8820 	if (tr->range_addr_start)
8821 		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
8822 				      tr, cpu, &tracing_buffer_meta_fops);
8823 #ifdef CONFIG_TRACER_SNAPSHOT
8824 	if (!tr->range_addr_start) {
8825 		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8826 				      tr, cpu, &snapshot_fops);
8827 
8828 		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8829 				      tr, cpu, &snapshot_raw_fops);
8830 	}
8831 #endif
8832 }
8833 
8834 #ifdef CONFIG_FTRACE_SELFTEST
8835 /* Let selftest have access to static functions in this file */
8836 #include "trace_selftest.c"
8837 #endif
8838 
8839 static ssize_t
8840 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8841 			loff_t *ppos)
8842 {
8843 	struct trace_option_dentry *topt = filp->private_data;
8844 	char *buf;
8845 
8846 	if (topt->flags->val & topt->opt->bit)
8847 		buf = "1\n";
8848 	else
8849 		buf = "0\n";
8850 
8851 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8852 }
8853 
8854 static ssize_t
8855 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8856 			 loff_t *ppos)
8857 {
8858 	struct trace_option_dentry *topt = filp->private_data;
8859 	unsigned long val;
8860 	int ret;
8861 
8862 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8863 	if (ret)
8864 		return ret;
8865 
8866 	if (val != 0 && val != 1)
8867 		return -EINVAL;
8868 
8869 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8870 		mutex_lock(&trace_types_lock);
8871 		ret = __set_tracer_option(topt->tr, topt->flags,
8872 					  topt->opt, !val);
8873 		mutex_unlock(&trace_types_lock);
8874 		if (ret)
8875 			return ret;
8876 	}
8877 
8878 	*ppos += cnt;
8879 
8880 	return cnt;
8881 }
8882 
8883 static int tracing_open_options(struct inode *inode, struct file *filp)
8884 {
8885 	struct trace_option_dentry *topt = inode->i_private;
8886 	int ret;
8887 
8888 	ret = tracing_check_open_get_tr(topt->tr);
8889 	if (ret)
8890 		return ret;
8891 
8892 	filp->private_data = inode->i_private;
8893 	return 0;
8894 }
8895 
8896 static int tracing_release_options(struct inode *inode, struct file *file)
8897 {
8898 	struct trace_option_dentry *topt = file->private_data;
8899 
8900 	trace_array_put(topt->tr);
8901 	return 0;
8902 }
8903 
8904 static const struct file_operations trace_options_fops = {
8905 	.open = tracing_open_options,
8906 	.read = trace_options_read,
8907 	.write = trace_options_write,
8908 	.llseek	= generic_file_llseek,
8909 	.release = tracing_release_options,
8910 };
8911 
8912 /*
8913  * In order to pass in both the trace_array descriptor as well as the index
8914  * to the flag that the trace option file represents, the trace_array
8915  * has a character array of trace_flags_index[], which holds the index
8916  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8917  * The address of this character array is passed to the flag option file
8918  * read/write callbacks.
8919  *
8920  * In order to extract both the index and the trace_array descriptor,
8921  * get_tr_index() uses the following algorithm.
8922  *
8923  *   idx = *ptr;
8924  *
8925  * As the pointer itself contains the address of the index (remember
8926  * index[1] == 1).
8927  *
8928  * Then to get the trace_array descriptor, by subtracting that index
8929  * from the ptr, we get to the start of the index itself.
8930  *
8931  *   ptr - idx == &index[0]
8932  *
8933  * Then a simple container_of() from that pointer gets us to the
8934  * trace_array descriptor.
8935  */
8936 static void get_tr_index(void *data, struct trace_array **ptr,
8937 			 unsigned int *pindex)
8938 {
8939 	*pindex = *(unsigned char *)data;
8940 
8941 	*ptr = container_of(data - *pindex, struct trace_array,
8942 			    trace_flags_index);
8943 }
8944 
8945 static ssize_t
8946 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8947 			loff_t *ppos)
8948 {
8949 	void *tr_index = filp->private_data;
8950 	struct trace_array *tr;
8951 	unsigned int index;
8952 	char *buf;
8953 
8954 	get_tr_index(tr_index, &tr, &index);
8955 
8956 	if (tr->trace_flags & (1 << index))
8957 		buf = "1\n";
8958 	else
8959 		buf = "0\n";
8960 
8961 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8962 }
8963 
8964 static ssize_t
8965 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8966 			 loff_t *ppos)
8967 {
8968 	void *tr_index = filp->private_data;
8969 	struct trace_array *tr;
8970 	unsigned int index;
8971 	unsigned long val;
8972 	int ret;
8973 
8974 	get_tr_index(tr_index, &tr, &index);
8975 
8976 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8977 	if (ret)
8978 		return ret;
8979 
8980 	if (val != 0 && val != 1)
8981 		return -EINVAL;
8982 
8983 	mutex_lock(&event_mutex);
8984 	mutex_lock(&trace_types_lock);
8985 	ret = set_tracer_flag(tr, 1 << index, val);
8986 	mutex_unlock(&trace_types_lock);
8987 	mutex_unlock(&event_mutex);
8988 
8989 	if (ret < 0)
8990 		return ret;
8991 
8992 	*ppos += cnt;
8993 
8994 	return cnt;
8995 }
8996 
8997 static const struct file_operations trace_options_core_fops = {
8998 	.open = tracing_open_generic,
8999 	.read = trace_options_core_read,
9000 	.write = trace_options_core_write,
9001 	.llseek = generic_file_llseek,
9002 };
9003 
9004 struct dentry *trace_create_file(const char *name,
9005 				 umode_t mode,
9006 				 struct dentry *parent,
9007 				 void *data,
9008 				 const struct file_operations *fops)
9009 {
9010 	struct dentry *ret;
9011 
9012 	ret = tracefs_create_file(name, mode, parent, data, fops);
9013 	if (!ret)
9014 		pr_warn("Could not create tracefs '%s' entry\n", name);
9015 
9016 	return ret;
9017 }
9018 
9019 
9020 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9021 {
9022 	struct dentry *d_tracer;
9023 
9024 	if (tr->options)
9025 		return tr->options;
9026 
9027 	d_tracer = tracing_get_dentry(tr);
9028 	if (IS_ERR(d_tracer))
9029 		return NULL;
9030 
9031 	tr->options = tracefs_create_dir("options", d_tracer);
9032 	if (!tr->options) {
9033 		pr_warn("Could not create tracefs directory 'options'\n");
9034 		return NULL;
9035 	}
9036 
9037 	return tr->options;
9038 }
9039 
9040 static void
9041 create_trace_option_file(struct trace_array *tr,
9042 			 struct trace_option_dentry *topt,
9043 			 struct tracer_flags *flags,
9044 			 struct tracer_opt *opt)
9045 {
9046 	struct dentry *t_options;
9047 
9048 	t_options = trace_options_init_dentry(tr);
9049 	if (!t_options)
9050 		return;
9051 
9052 	topt->flags = flags;
9053 	topt->opt = opt;
9054 	topt->tr = tr;
9055 
9056 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9057 					t_options, topt, &trace_options_fops);
9058 
9059 }
9060 
9061 static void
9062 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9063 {
9064 	struct trace_option_dentry *topts;
9065 	struct trace_options *tr_topts;
9066 	struct tracer_flags *flags;
9067 	struct tracer_opt *opts;
9068 	int cnt;
9069 	int i;
9070 
9071 	if (!tracer)
9072 		return;
9073 
9074 	flags = tracer->flags;
9075 
9076 	if (!flags || !flags->opts)
9077 		return;
9078 
9079 	/*
9080 	 * If this is an instance, only create flags for tracers
9081 	 * the instance may have.
9082 	 */
9083 	if (!trace_ok_for_array(tracer, tr))
9084 		return;
9085 
9086 	for (i = 0; i < tr->nr_topts; i++) {
9087 		/* Make sure there's no duplicate flags. */
9088 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9089 			return;
9090 	}
9091 
9092 	opts = flags->opts;
9093 
9094 	for (cnt = 0; opts[cnt].name; cnt++)
9095 		;
9096 
9097 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9098 	if (!topts)
9099 		return;
9100 
9101 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9102 			    GFP_KERNEL);
9103 	if (!tr_topts) {
9104 		kfree(topts);
9105 		return;
9106 	}
9107 
9108 	tr->topts = tr_topts;
9109 	tr->topts[tr->nr_topts].tracer = tracer;
9110 	tr->topts[tr->nr_topts].topts = topts;
9111 	tr->nr_topts++;
9112 
9113 	for (cnt = 0; opts[cnt].name; cnt++) {
9114 		create_trace_option_file(tr, &topts[cnt], flags,
9115 					 &opts[cnt]);
9116 		MEM_FAIL(topts[cnt].entry == NULL,
9117 			  "Failed to create trace option: %s",
9118 			  opts[cnt].name);
9119 	}
9120 }
9121 
9122 static struct dentry *
9123 create_trace_option_core_file(struct trace_array *tr,
9124 			      const char *option, long index)
9125 {
9126 	struct dentry *t_options;
9127 
9128 	t_options = trace_options_init_dentry(tr);
9129 	if (!t_options)
9130 		return NULL;
9131 
9132 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9133 				 (void *)&tr->trace_flags_index[index],
9134 				 &trace_options_core_fops);
9135 }
9136 
9137 static void create_trace_options_dir(struct trace_array *tr)
9138 {
9139 	struct dentry *t_options;
9140 	bool top_level = tr == &global_trace;
9141 	int i;
9142 
9143 	t_options = trace_options_init_dentry(tr);
9144 	if (!t_options)
9145 		return;
9146 
9147 	for (i = 0; trace_options[i]; i++) {
9148 		if (top_level ||
9149 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9150 			create_trace_option_core_file(tr, trace_options[i], i);
9151 	}
9152 }
9153 
9154 static ssize_t
9155 rb_simple_read(struct file *filp, char __user *ubuf,
9156 	       size_t cnt, loff_t *ppos)
9157 {
9158 	struct trace_array *tr = filp->private_data;
9159 	char buf[64];
9160 	int r;
9161 
9162 	r = tracer_tracing_is_on(tr);
9163 	r = sprintf(buf, "%d\n", r);
9164 
9165 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9166 }
9167 
9168 static ssize_t
9169 rb_simple_write(struct file *filp, const char __user *ubuf,
9170 		size_t cnt, loff_t *ppos)
9171 {
9172 	struct trace_array *tr = filp->private_data;
9173 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9174 	unsigned long val;
9175 	int ret;
9176 
9177 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9178 	if (ret)
9179 		return ret;
9180 
9181 	if (buffer) {
9182 		mutex_lock(&trace_types_lock);
9183 		if (!!val == tracer_tracing_is_on(tr)) {
9184 			val = 0; /* do nothing */
9185 		} else if (val) {
9186 			tracer_tracing_on(tr);
9187 			if (tr->current_trace->start)
9188 				tr->current_trace->start(tr);
9189 		} else {
9190 			tracer_tracing_off(tr);
9191 			if (tr->current_trace->stop)
9192 				tr->current_trace->stop(tr);
9193 			/* Wake up any waiters */
9194 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9195 		}
9196 		mutex_unlock(&trace_types_lock);
9197 	}
9198 
9199 	(*ppos)++;
9200 
9201 	return cnt;
9202 }
9203 
9204 static const struct file_operations rb_simple_fops = {
9205 	.open		= tracing_open_generic_tr,
9206 	.read		= rb_simple_read,
9207 	.write		= rb_simple_write,
9208 	.release	= tracing_release_generic_tr,
9209 	.llseek		= default_llseek,
9210 };
9211 
9212 static ssize_t
9213 buffer_percent_read(struct file *filp, char __user *ubuf,
9214 		    size_t cnt, loff_t *ppos)
9215 {
9216 	struct trace_array *tr = filp->private_data;
9217 	char buf[64];
9218 	int r;
9219 
9220 	r = tr->buffer_percent;
9221 	r = sprintf(buf, "%d\n", r);
9222 
9223 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9224 }
9225 
9226 static ssize_t
9227 buffer_percent_write(struct file *filp, const char __user *ubuf,
9228 		     size_t cnt, loff_t *ppos)
9229 {
9230 	struct trace_array *tr = filp->private_data;
9231 	unsigned long val;
9232 	int ret;
9233 
9234 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9235 	if (ret)
9236 		return ret;
9237 
9238 	if (val > 100)
9239 		return -EINVAL;
9240 
9241 	tr->buffer_percent = val;
9242 
9243 	(*ppos)++;
9244 
9245 	return cnt;
9246 }
9247 
9248 static const struct file_operations buffer_percent_fops = {
9249 	.open		= tracing_open_generic_tr,
9250 	.read		= buffer_percent_read,
9251 	.write		= buffer_percent_write,
9252 	.release	= tracing_release_generic_tr,
9253 	.llseek		= default_llseek,
9254 };
9255 
9256 static ssize_t
9257 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9258 {
9259 	struct trace_array *tr = filp->private_data;
9260 	size_t size;
9261 	char buf[64];
9262 	int order;
9263 	int r;
9264 
9265 	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9266 	size = (PAGE_SIZE << order) / 1024;
9267 
9268 	r = sprintf(buf, "%zd\n", size);
9269 
9270 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9271 }
9272 
9273 static ssize_t
9274 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9275 			 size_t cnt, loff_t *ppos)
9276 {
9277 	struct trace_array *tr = filp->private_data;
9278 	unsigned long val;
9279 	int old_order;
9280 	int order;
9281 	int pages;
9282 	int ret;
9283 
9284 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9285 	if (ret)
9286 		return ret;
9287 
9288 	val *= 1024; /* value passed in is in KB */
9289 
9290 	pages = DIV_ROUND_UP(val, PAGE_SIZE);
9291 	order = fls(pages - 1);
9292 
9293 	/* limit between 1 and 128 system pages */
9294 	if (order < 0 || order > 7)
9295 		return -EINVAL;
9296 
9297 	/* Do not allow tracing while changing the order of the ring buffer */
9298 	tracing_stop_tr(tr);
9299 
9300 	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9301 	if (old_order == order)
9302 		goto out;
9303 
9304 	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9305 	if (ret)
9306 		goto out;
9307 
9308 #ifdef CONFIG_TRACER_MAX_TRACE
9309 
9310 	if (!tr->allocated_snapshot)
9311 		goto out_max;
9312 
9313 	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9314 	if (ret) {
9315 		/* Put back the old order */
9316 		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9317 		if (WARN_ON_ONCE(cnt)) {
9318 			/*
9319 			 * AARGH! We are left with different orders!
9320 			 * The max buffer is our "snapshot" buffer.
9321 			 * When a tracer needs a snapshot (one of the
9322 			 * latency tracers), it swaps the max buffer
9323 			 * with the saved snap shot. We succeeded to
9324 			 * update the order of the main buffer, but failed to
9325 			 * update the order of the max buffer. But when we tried
9326 			 * to reset the main buffer to the original size, we
9327 			 * failed there too. This is very unlikely to
9328 			 * happen, but if it does, warn and kill all
9329 			 * tracing.
9330 			 */
9331 			tracing_disabled = 1;
9332 		}
9333 		goto out;
9334 	}
9335  out_max:
9336 #endif
9337 	(*ppos)++;
9338  out:
9339 	if (ret)
9340 		cnt = ret;
9341 	tracing_start_tr(tr);
9342 	return cnt;
9343 }
9344 
9345 static const struct file_operations buffer_subbuf_size_fops = {
9346 	.open		= tracing_open_generic_tr,
9347 	.read		= buffer_subbuf_size_read,
9348 	.write		= buffer_subbuf_size_write,
9349 	.release	= tracing_release_generic_tr,
9350 	.llseek		= default_llseek,
9351 };
9352 
9353 static struct dentry *trace_instance_dir;
9354 
9355 static void
9356 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9357 
9358 static void setup_trace_scratch(struct trace_array *tr,
9359 				struct trace_scratch *tscratch, unsigned int size)
9360 {
9361 	struct trace_mod_entry *entry;
9362 
9363 	if (!tscratch)
9364 		return;
9365 
9366 	tr->scratch = tscratch;
9367 	tr->scratch_size = size;
9368 
9369 #ifdef CONFIG_RANDOMIZE_BASE
9370 	if (tscratch->kaslr_addr)
9371 		tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
9372 #endif
9373 
9374 	if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
9375 		goto reset;
9376 
9377 	/* Check if each module name is a valid string */
9378 	for (int i = 0; i < tscratch->nr_entries; i++) {
9379 		int n;
9380 
9381 		entry = &tscratch->entries[i];
9382 
9383 		for (n = 0; n < MODULE_NAME_LEN; n++) {
9384 			if (entry->mod_name[n] == '\0')
9385 				break;
9386 			if (!isprint(entry->mod_name[n]))
9387 				goto reset;
9388 		}
9389 		if (n == MODULE_NAME_LEN)
9390 			goto reset;
9391 	}
9392 	return;
9393  reset:
9394 	/* Invalid trace modules */
9395 	memset(tscratch, 0, size);
9396 }
9397 
9398 static int
9399 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9400 {
9401 	enum ring_buffer_flags rb_flags;
9402 	struct trace_scratch *tscratch;
9403 	unsigned int scratch_size = 0;
9404 
9405 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9406 
9407 	buf->tr = tr;
9408 
9409 	if (tr->range_addr_start && tr->range_addr_size) {
9410 		/* Add scratch buffer to handle 128 modules */
9411 		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9412 						      tr->range_addr_start,
9413 						      tr->range_addr_size,
9414 						      struct_size(tscratch, entries, 128));
9415 
9416 		tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
9417 		setup_trace_scratch(tr, tscratch, scratch_size);
9418 
9419 		/*
9420 		 * This is basically the same as a mapped buffer,
9421 		 * with the same restrictions.
9422 		 */
9423 		tr->mapped++;
9424 	} else {
9425 		buf->buffer = ring_buffer_alloc(size, rb_flags);
9426 	}
9427 	if (!buf->buffer)
9428 		return -ENOMEM;
9429 
9430 	buf->data = alloc_percpu(struct trace_array_cpu);
9431 	if (!buf->data) {
9432 		ring_buffer_free(buf->buffer);
9433 		buf->buffer = NULL;
9434 		return -ENOMEM;
9435 	}
9436 
9437 	/* Allocate the first page for all buffers */
9438 	set_buffer_entries(&tr->array_buffer,
9439 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9440 
9441 	return 0;
9442 }
9443 
9444 static void free_trace_buffer(struct array_buffer *buf)
9445 {
9446 	if (buf->buffer) {
9447 		ring_buffer_free(buf->buffer);
9448 		buf->buffer = NULL;
9449 		free_percpu(buf->data);
9450 		buf->data = NULL;
9451 	}
9452 }
9453 
9454 static int allocate_trace_buffers(struct trace_array *tr, int size)
9455 {
9456 	int ret;
9457 
9458 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9459 	if (ret)
9460 		return ret;
9461 
9462 #ifdef CONFIG_TRACER_MAX_TRACE
9463 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
9464 	if (tr->range_addr_start)
9465 		return 0;
9466 
9467 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9468 				    allocate_snapshot ? size : 1);
9469 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9470 		free_trace_buffer(&tr->array_buffer);
9471 		return -ENOMEM;
9472 	}
9473 	tr->allocated_snapshot = allocate_snapshot;
9474 
9475 	allocate_snapshot = false;
9476 #endif
9477 
9478 	return 0;
9479 }
9480 
9481 static void free_trace_buffers(struct trace_array *tr)
9482 {
9483 	if (!tr)
9484 		return;
9485 
9486 	free_trace_buffer(&tr->array_buffer);
9487 
9488 #ifdef CONFIG_TRACER_MAX_TRACE
9489 	free_trace_buffer(&tr->max_buffer);
9490 #endif
9491 
9492 	if (tr->range_addr_start)
9493 		vunmap((void *)tr->range_addr_start);
9494 }
9495 
9496 static void init_trace_flags_index(struct trace_array *tr)
9497 {
9498 	int i;
9499 
9500 	/* Used by the trace options files */
9501 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9502 		tr->trace_flags_index[i] = i;
9503 }
9504 
9505 static void __update_tracer_options(struct trace_array *tr)
9506 {
9507 	struct tracer *t;
9508 
9509 	for (t = trace_types; t; t = t->next)
9510 		add_tracer_options(tr, t);
9511 }
9512 
9513 static void update_tracer_options(struct trace_array *tr)
9514 {
9515 	mutex_lock(&trace_types_lock);
9516 	tracer_options_updated = true;
9517 	__update_tracer_options(tr);
9518 	mutex_unlock(&trace_types_lock);
9519 }
9520 
9521 /* Must have trace_types_lock held */
9522 struct trace_array *trace_array_find(const char *instance)
9523 {
9524 	struct trace_array *tr, *found = NULL;
9525 
9526 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9527 		if (tr->name && strcmp(tr->name, instance) == 0) {
9528 			found = tr;
9529 			break;
9530 		}
9531 	}
9532 
9533 	return found;
9534 }
9535 
9536 struct trace_array *trace_array_find_get(const char *instance)
9537 {
9538 	struct trace_array *tr;
9539 
9540 	mutex_lock(&trace_types_lock);
9541 	tr = trace_array_find(instance);
9542 	if (tr)
9543 		tr->ref++;
9544 	mutex_unlock(&trace_types_lock);
9545 
9546 	return tr;
9547 }
9548 
9549 static int trace_array_create_dir(struct trace_array *tr)
9550 {
9551 	int ret;
9552 
9553 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9554 	if (!tr->dir)
9555 		return -EINVAL;
9556 
9557 	ret = event_trace_add_tracer(tr->dir, tr);
9558 	if (ret) {
9559 		tracefs_remove(tr->dir);
9560 		return ret;
9561 	}
9562 
9563 	init_tracer_tracefs(tr, tr->dir);
9564 	__update_tracer_options(tr);
9565 
9566 	return ret;
9567 }
9568 
9569 static struct trace_array *
9570 trace_array_create_systems(const char *name, const char *systems,
9571 			   unsigned long range_addr_start,
9572 			   unsigned long range_addr_size)
9573 {
9574 	struct trace_array *tr;
9575 	int ret;
9576 
9577 	ret = -ENOMEM;
9578 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9579 	if (!tr)
9580 		return ERR_PTR(ret);
9581 
9582 	tr->name = kstrdup(name, GFP_KERNEL);
9583 	if (!tr->name)
9584 		goto out_free_tr;
9585 
9586 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9587 		goto out_free_tr;
9588 
9589 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9590 		goto out_free_tr;
9591 
9592 	if (systems) {
9593 		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9594 		if (!tr->system_names)
9595 			goto out_free_tr;
9596 	}
9597 
9598 	/* Only for boot up memory mapped ring buffers */
9599 	tr->range_addr_start = range_addr_start;
9600 	tr->range_addr_size = range_addr_size;
9601 
9602 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9603 
9604 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9605 
9606 	raw_spin_lock_init(&tr->start_lock);
9607 
9608 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9609 #ifdef CONFIG_TRACER_MAX_TRACE
9610 	spin_lock_init(&tr->snapshot_trigger_lock);
9611 #endif
9612 	tr->current_trace = &nop_trace;
9613 
9614 	INIT_LIST_HEAD(&tr->systems);
9615 	INIT_LIST_HEAD(&tr->events);
9616 	INIT_LIST_HEAD(&tr->hist_vars);
9617 	INIT_LIST_HEAD(&tr->err_log);
9618 
9619 #ifdef CONFIG_MODULES
9620 	INIT_LIST_HEAD(&tr->mod_events);
9621 #endif
9622 
9623 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9624 		goto out_free_tr;
9625 
9626 	/* The ring buffer is defaultly expanded */
9627 	trace_set_ring_buffer_expanded(tr);
9628 
9629 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9630 		goto out_free_tr;
9631 
9632 	ftrace_init_trace_array(tr);
9633 
9634 	init_trace_flags_index(tr);
9635 
9636 	if (trace_instance_dir) {
9637 		ret = trace_array_create_dir(tr);
9638 		if (ret)
9639 			goto out_free_tr;
9640 	} else
9641 		__trace_early_add_events(tr);
9642 
9643 	list_add(&tr->list, &ftrace_trace_arrays);
9644 
9645 	tr->ref++;
9646 
9647 	return tr;
9648 
9649  out_free_tr:
9650 	ftrace_free_ftrace_ops(tr);
9651 	free_trace_buffers(tr);
9652 	free_cpumask_var(tr->pipe_cpumask);
9653 	free_cpumask_var(tr->tracing_cpumask);
9654 	kfree_const(tr->system_names);
9655 	kfree(tr->range_name);
9656 	kfree(tr->name);
9657 	kfree(tr);
9658 
9659 	return ERR_PTR(ret);
9660 }
9661 
9662 static struct trace_array *trace_array_create(const char *name)
9663 {
9664 	return trace_array_create_systems(name, NULL, 0, 0);
9665 }
9666 
9667 static int instance_mkdir(const char *name)
9668 {
9669 	struct trace_array *tr;
9670 	int ret;
9671 
9672 	guard(mutex)(&event_mutex);
9673 	guard(mutex)(&trace_types_lock);
9674 
9675 	ret = -EEXIST;
9676 	if (trace_array_find(name))
9677 		return -EEXIST;
9678 
9679 	tr = trace_array_create(name);
9680 
9681 	ret = PTR_ERR_OR_ZERO(tr);
9682 
9683 	return ret;
9684 }
9685 
9686 static u64 map_pages(u64 start, u64 size)
9687 {
9688 	struct page **pages;
9689 	phys_addr_t page_start;
9690 	unsigned int page_count;
9691 	unsigned int i;
9692 	void *vaddr;
9693 
9694 	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
9695 
9696 	page_start = start;
9697 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
9698 	if (!pages)
9699 		return 0;
9700 
9701 	for (i = 0; i < page_count; i++) {
9702 		phys_addr_t addr = page_start + i * PAGE_SIZE;
9703 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
9704 	}
9705 	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
9706 	kfree(pages);
9707 
9708 	return (u64)(unsigned long)vaddr;
9709 }
9710 
9711 /**
9712  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9713  * @name: The name of the trace array to be looked up/created.
9714  * @systems: A list of systems to create event directories for (NULL for all)
9715  *
9716  * Returns pointer to trace array with given name.
9717  * NULL, if it cannot be created.
9718  *
9719  * NOTE: This function increments the reference counter associated with the
9720  * trace array returned. This makes sure it cannot be freed while in use.
9721  * Use trace_array_put() once the trace array is no longer needed.
9722  * If the trace_array is to be freed, trace_array_destroy() needs to
9723  * be called after the trace_array_put(), or simply let user space delete
9724  * it from the tracefs instances directory. But until the
9725  * trace_array_put() is called, user space can not delete it.
9726  *
9727  */
9728 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9729 {
9730 	struct trace_array *tr;
9731 
9732 	guard(mutex)(&event_mutex);
9733 	guard(mutex)(&trace_types_lock);
9734 
9735 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9736 		if (tr->name && strcmp(tr->name, name) == 0) {
9737 			tr->ref++;
9738 			return tr;
9739 		}
9740 	}
9741 
9742 	tr = trace_array_create_systems(name, systems, 0, 0);
9743 
9744 	if (IS_ERR(tr))
9745 		tr = NULL;
9746 	else
9747 		tr->ref++;
9748 
9749 	return tr;
9750 }
9751 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9752 
9753 static int __remove_instance(struct trace_array *tr)
9754 {
9755 	int i;
9756 
9757 	/* Reference counter for a newly created trace array = 1. */
9758 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9759 		return -EBUSY;
9760 
9761 	list_del(&tr->list);
9762 
9763 	/* Disable all the flags that were enabled coming in */
9764 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9765 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9766 			set_tracer_flag(tr, 1 << i, 0);
9767 	}
9768 
9769 	if (printk_trace == tr)
9770 		update_printk_trace(&global_trace);
9771 
9772 	tracing_set_nop(tr);
9773 	clear_ftrace_function_probes(tr);
9774 	event_trace_del_tracer(tr);
9775 	ftrace_clear_pids(tr);
9776 	ftrace_destroy_function_files(tr);
9777 	tracefs_remove(tr->dir);
9778 	free_percpu(tr->last_func_repeats);
9779 	free_trace_buffers(tr);
9780 	clear_tracing_err_log(tr);
9781 
9782 	if (tr->range_name) {
9783 		reserve_mem_release_by_name(tr->range_name);
9784 		kfree(tr->range_name);
9785 	}
9786 
9787 	for (i = 0; i < tr->nr_topts; i++) {
9788 		kfree(tr->topts[i].topts);
9789 	}
9790 	kfree(tr->topts);
9791 
9792 	free_cpumask_var(tr->pipe_cpumask);
9793 	free_cpumask_var(tr->tracing_cpumask);
9794 	kfree_const(tr->system_names);
9795 	kfree(tr->name);
9796 	kfree(tr);
9797 
9798 	return 0;
9799 }
9800 
9801 int trace_array_destroy(struct trace_array *this_tr)
9802 {
9803 	struct trace_array *tr;
9804 
9805 	if (!this_tr)
9806 		return -EINVAL;
9807 
9808 	guard(mutex)(&event_mutex);
9809 	guard(mutex)(&trace_types_lock);
9810 
9811 
9812 	/* Making sure trace array exists before destroying it. */
9813 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9814 		if (tr == this_tr)
9815 			return __remove_instance(tr);
9816 	}
9817 
9818 	return -ENODEV;
9819 }
9820 EXPORT_SYMBOL_GPL(trace_array_destroy);
9821 
9822 static int instance_rmdir(const char *name)
9823 {
9824 	struct trace_array *tr;
9825 
9826 	guard(mutex)(&event_mutex);
9827 	guard(mutex)(&trace_types_lock);
9828 
9829 	tr = trace_array_find(name);
9830 	if (!tr)
9831 		return -ENODEV;
9832 
9833 	return __remove_instance(tr);
9834 }
9835 
9836 static __init void create_trace_instances(struct dentry *d_tracer)
9837 {
9838 	struct trace_array *tr;
9839 
9840 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9841 							 instance_mkdir,
9842 							 instance_rmdir);
9843 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9844 		return;
9845 
9846 	guard(mutex)(&event_mutex);
9847 	guard(mutex)(&trace_types_lock);
9848 
9849 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9850 		if (!tr->name)
9851 			continue;
9852 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9853 			     "Failed to create instance directory\n"))
9854 			return;
9855 	}
9856 }
9857 
9858 static void
9859 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9860 {
9861 	int cpu;
9862 
9863 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9864 			tr, &show_traces_fops);
9865 
9866 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9867 			tr, &set_tracer_fops);
9868 
9869 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9870 			  tr, &tracing_cpumask_fops);
9871 
9872 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9873 			  tr, &tracing_iter_fops);
9874 
9875 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9876 			  tr, &tracing_fops);
9877 
9878 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9879 			  tr, &tracing_pipe_fops);
9880 
9881 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9882 			  tr, &tracing_entries_fops);
9883 
9884 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9885 			  tr, &tracing_total_entries_fops);
9886 
9887 	trace_create_file("free_buffer", 0200, d_tracer,
9888 			  tr, &tracing_free_buffer_fops);
9889 
9890 	trace_create_file("trace_marker", 0220, d_tracer,
9891 			  tr, &tracing_mark_fops);
9892 
9893 	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9894 
9895 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9896 			  tr, &tracing_mark_raw_fops);
9897 
9898 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9899 			  &trace_clock_fops);
9900 
9901 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9902 			  tr, &rb_simple_fops);
9903 
9904 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9905 			  &trace_time_stamp_mode_fops);
9906 
9907 	tr->buffer_percent = 50;
9908 
9909 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9910 			tr, &buffer_percent_fops);
9911 
9912 	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9913 			  tr, &buffer_subbuf_size_fops);
9914 
9915 	create_trace_options_dir(tr);
9916 
9917 #ifdef CONFIG_TRACER_MAX_TRACE
9918 	trace_create_maxlat_file(tr, d_tracer);
9919 #endif
9920 
9921 	if (ftrace_create_function_files(tr, d_tracer))
9922 		MEM_FAIL(1, "Could not allocate function filter files");
9923 
9924 	if (tr->range_addr_start) {
9925 		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
9926 				  tr, &last_boot_fops);
9927 #ifdef CONFIG_TRACER_SNAPSHOT
9928 	} else {
9929 		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9930 				  tr, &snapshot_fops);
9931 #endif
9932 	}
9933 
9934 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9935 			  tr, &tracing_err_log_fops);
9936 
9937 	for_each_tracing_cpu(cpu)
9938 		tracing_init_tracefs_percpu(tr, cpu);
9939 
9940 	ftrace_init_tracefs(tr, d_tracer);
9941 }
9942 
9943 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9944 {
9945 	struct vfsmount *mnt;
9946 	struct file_system_type *type;
9947 
9948 	/*
9949 	 * To maintain backward compatibility for tools that mount
9950 	 * debugfs to get to the tracing facility, tracefs is automatically
9951 	 * mounted to the debugfs/tracing directory.
9952 	 */
9953 	type = get_fs_type("tracefs");
9954 	if (!type)
9955 		return NULL;
9956 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9957 	put_filesystem(type);
9958 	if (IS_ERR(mnt))
9959 		return NULL;
9960 	mntget(mnt);
9961 
9962 	return mnt;
9963 }
9964 
9965 /**
9966  * tracing_init_dentry - initialize top level trace array
9967  *
9968  * This is called when creating files or directories in the tracing
9969  * directory. It is called via fs_initcall() by any of the boot up code
9970  * and expects to return the dentry of the top level tracing directory.
9971  */
9972 int tracing_init_dentry(void)
9973 {
9974 	struct trace_array *tr = &global_trace;
9975 
9976 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9977 		pr_warn("Tracing disabled due to lockdown\n");
9978 		return -EPERM;
9979 	}
9980 
9981 	/* The top level trace array uses  NULL as parent */
9982 	if (tr->dir)
9983 		return 0;
9984 
9985 	if (WARN_ON(!tracefs_initialized()))
9986 		return -ENODEV;
9987 
9988 	/*
9989 	 * As there may still be users that expect the tracing
9990 	 * files to exist in debugfs/tracing, we must automount
9991 	 * the tracefs file system there, so older tools still
9992 	 * work with the newer kernel.
9993 	 */
9994 	tr->dir = debugfs_create_automount("tracing", NULL,
9995 					   trace_automount, NULL);
9996 
9997 	return 0;
9998 }
9999 
10000 extern struct trace_eval_map *__start_ftrace_eval_maps[];
10001 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
10002 
10003 static struct workqueue_struct *eval_map_wq __initdata;
10004 static struct work_struct eval_map_work __initdata;
10005 static struct work_struct tracerfs_init_work __initdata;
10006 
10007 static void __init eval_map_work_func(struct work_struct *work)
10008 {
10009 	int len;
10010 
10011 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
10012 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
10013 }
10014 
10015 static int __init trace_eval_init(void)
10016 {
10017 	INIT_WORK(&eval_map_work, eval_map_work_func);
10018 
10019 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
10020 	if (!eval_map_wq) {
10021 		pr_err("Unable to allocate eval_map_wq\n");
10022 		/* Do work here */
10023 		eval_map_work_func(&eval_map_work);
10024 		return -ENOMEM;
10025 	}
10026 
10027 	queue_work(eval_map_wq, &eval_map_work);
10028 	return 0;
10029 }
10030 
10031 subsys_initcall(trace_eval_init);
10032 
10033 static int __init trace_eval_sync(void)
10034 {
10035 	/* Make sure the eval map updates are finished */
10036 	if (eval_map_wq)
10037 		destroy_workqueue(eval_map_wq);
10038 	return 0;
10039 }
10040 
10041 late_initcall_sync(trace_eval_sync);
10042 
10043 
10044 #ifdef CONFIG_MODULES
10045 
10046 bool module_exists(const char *module)
10047 {
10048 	/* All modules have the symbol __this_module */
10049 	static const char this_mod[] = "__this_module";
10050 	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
10051 	unsigned long val;
10052 	int n;
10053 
10054 	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
10055 
10056 	if (n > sizeof(modname) - 1)
10057 		return false;
10058 
10059 	val = module_kallsyms_lookup_name(modname);
10060 	return val != 0;
10061 }
10062 
10063 static void trace_module_add_evals(struct module *mod)
10064 {
10065 	if (!mod->num_trace_evals)
10066 		return;
10067 
10068 	/*
10069 	 * Modules with bad taint do not have events created, do
10070 	 * not bother with enums either.
10071 	 */
10072 	if (trace_module_has_bad_taint(mod))
10073 		return;
10074 
10075 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
10076 }
10077 
10078 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
10079 static void trace_module_remove_evals(struct module *mod)
10080 {
10081 	union trace_eval_map_item *map;
10082 	union trace_eval_map_item **last = &trace_eval_maps;
10083 
10084 	if (!mod->num_trace_evals)
10085 		return;
10086 
10087 	guard(mutex)(&trace_eval_mutex);
10088 
10089 	map = trace_eval_maps;
10090 
10091 	while (map) {
10092 		if (map->head.mod == mod)
10093 			break;
10094 		map = trace_eval_jmp_to_tail(map);
10095 		last = &map->tail.next;
10096 		map = map->tail.next;
10097 	}
10098 	if (!map)
10099 		return;
10100 
10101 	*last = trace_eval_jmp_to_tail(map)->tail.next;
10102 	kfree(map);
10103 }
10104 #else
10105 static inline void trace_module_remove_evals(struct module *mod) { }
10106 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
10107 
10108 static void trace_module_record(struct module *mod)
10109 {
10110 	struct trace_array *tr;
10111 
10112 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10113 		/* Update any persistent trace array that has already been started */
10114 		if ((tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT)) ==
10115 		    TRACE_ARRAY_FL_BOOT) {
10116 			guard(mutex)(&scratch_mutex);
10117 			save_mod(mod, tr);
10118 		}
10119 	}
10120 }
10121 
10122 static int trace_module_notify(struct notifier_block *self,
10123 			       unsigned long val, void *data)
10124 {
10125 	struct module *mod = data;
10126 
10127 	switch (val) {
10128 	case MODULE_STATE_COMING:
10129 		trace_module_add_evals(mod);
10130 		trace_module_record(mod);
10131 		break;
10132 	case MODULE_STATE_GOING:
10133 		trace_module_remove_evals(mod);
10134 		break;
10135 	}
10136 
10137 	return NOTIFY_OK;
10138 }
10139 
10140 static struct notifier_block trace_module_nb = {
10141 	.notifier_call = trace_module_notify,
10142 	.priority = 0,
10143 };
10144 #endif /* CONFIG_MODULES */
10145 
10146 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10147 {
10148 
10149 	event_trace_init();
10150 
10151 	init_tracer_tracefs(&global_trace, NULL);
10152 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
10153 
10154 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10155 			&global_trace, &tracing_thresh_fops);
10156 
10157 	trace_create_file("README", TRACE_MODE_READ, NULL,
10158 			NULL, &tracing_readme_fops);
10159 
10160 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10161 			NULL, &tracing_saved_cmdlines_fops);
10162 
10163 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10164 			  NULL, &tracing_saved_cmdlines_size_fops);
10165 
10166 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10167 			NULL, &tracing_saved_tgids_fops);
10168 
10169 	trace_create_eval_file(NULL);
10170 
10171 #ifdef CONFIG_MODULES
10172 	register_module_notifier(&trace_module_nb);
10173 #endif
10174 
10175 #ifdef CONFIG_DYNAMIC_FTRACE
10176 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10177 			NULL, &tracing_dyn_info_fops);
10178 #endif
10179 
10180 	create_trace_instances(NULL);
10181 
10182 	update_tracer_options(&global_trace);
10183 }
10184 
10185 static __init int tracer_init_tracefs(void)
10186 {
10187 	int ret;
10188 
10189 	trace_access_lock_init();
10190 
10191 	ret = tracing_init_dentry();
10192 	if (ret)
10193 		return 0;
10194 
10195 	if (eval_map_wq) {
10196 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10197 		queue_work(eval_map_wq, &tracerfs_init_work);
10198 	} else {
10199 		tracer_init_tracefs_work_func(NULL);
10200 	}
10201 
10202 	rv_init_interface();
10203 
10204 	return 0;
10205 }
10206 
10207 fs_initcall(tracer_init_tracefs);
10208 
10209 static int trace_die_panic_handler(struct notifier_block *self,
10210 				unsigned long ev, void *unused);
10211 
10212 static struct notifier_block trace_panic_notifier = {
10213 	.notifier_call = trace_die_panic_handler,
10214 	.priority = INT_MAX - 1,
10215 };
10216 
10217 static struct notifier_block trace_die_notifier = {
10218 	.notifier_call = trace_die_panic_handler,
10219 	.priority = INT_MAX - 1,
10220 };
10221 
10222 /*
10223  * The idea is to execute the following die/panic callback early, in order
10224  * to avoid showing irrelevant information in the trace (like other panic
10225  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10226  * warnings get disabled (to prevent potential log flooding).
10227  */
10228 static int trace_die_panic_handler(struct notifier_block *self,
10229 				unsigned long ev, void *unused)
10230 {
10231 	if (!ftrace_dump_on_oops_enabled())
10232 		return NOTIFY_DONE;
10233 
10234 	/* The die notifier requires DIE_OOPS to trigger */
10235 	if (self == &trace_die_notifier && ev != DIE_OOPS)
10236 		return NOTIFY_DONE;
10237 
10238 	ftrace_dump(DUMP_PARAM);
10239 
10240 	return NOTIFY_DONE;
10241 }
10242 
10243 /*
10244  * printk is set to max of 1024, we really don't need it that big.
10245  * Nothing should be printing 1000 characters anyway.
10246  */
10247 #define TRACE_MAX_PRINT		1000
10248 
10249 /*
10250  * Define here KERN_TRACE so that we have one place to modify
10251  * it if we decide to change what log level the ftrace dump
10252  * should be at.
10253  */
10254 #define KERN_TRACE		KERN_EMERG
10255 
10256 void
10257 trace_printk_seq(struct trace_seq *s)
10258 {
10259 	/* Probably should print a warning here. */
10260 	if (s->seq.len >= TRACE_MAX_PRINT)
10261 		s->seq.len = TRACE_MAX_PRINT;
10262 
10263 	/*
10264 	 * More paranoid code. Although the buffer size is set to
10265 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10266 	 * an extra layer of protection.
10267 	 */
10268 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10269 		s->seq.len = s->seq.size - 1;
10270 
10271 	/* should be zero ended, but we are paranoid. */
10272 	s->buffer[s->seq.len] = 0;
10273 
10274 	printk(KERN_TRACE "%s", s->buffer);
10275 
10276 	trace_seq_init(s);
10277 }
10278 
10279 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10280 {
10281 	iter->tr = tr;
10282 	iter->trace = iter->tr->current_trace;
10283 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10284 	iter->array_buffer = &tr->array_buffer;
10285 
10286 	if (iter->trace && iter->trace->open)
10287 		iter->trace->open(iter);
10288 
10289 	/* Annotate start of buffers if we had overruns */
10290 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10291 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10292 
10293 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10294 	if (trace_clocks[iter->tr->clock_id].in_ns)
10295 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10296 
10297 	/* Can not use kmalloc for iter.temp and iter.fmt */
10298 	iter->temp = static_temp_buf;
10299 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10300 	iter->fmt = static_fmt_buf;
10301 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10302 }
10303 
10304 void trace_init_global_iter(struct trace_iterator *iter)
10305 {
10306 	trace_init_iter(iter, &global_trace);
10307 }
10308 
10309 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10310 {
10311 	/* use static because iter can be a bit big for the stack */
10312 	static struct trace_iterator iter;
10313 	unsigned int old_userobj;
10314 	unsigned long flags;
10315 	int cnt = 0, cpu;
10316 
10317 	/*
10318 	 * Always turn off tracing when we dump.
10319 	 * We don't need to show trace output of what happens
10320 	 * between multiple crashes.
10321 	 *
10322 	 * If the user does a sysrq-z, then they can re-enable
10323 	 * tracing with echo 1 > tracing_on.
10324 	 */
10325 	tracer_tracing_off(tr);
10326 
10327 	local_irq_save(flags);
10328 
10329 	/* Simulate the iterator */
10330 	trace_init_iter(&iter, tr);
10331 
10332 	for_each_tracing_cpu(cpu) {
10333 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10334 	}
10335 
10336 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10337 
10338 	/* don't look at user memory in panic mode */
10339 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10340 
10341 	if (dump_mode == DUMP_ORIG)
10342 		iter.cpu_file = raw_smp_processor_id();
10343 	else
10344 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10345 
10346 	if (tr == &global_trace)
10347 		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10348 	else
10349 		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10350 
10351 	/* Did function tracer already get disabled? */
10352 	if (ftrace_is_dead()) {
10353 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10354 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10355 	}
10356 
10357 	/*
10358 	 * We need to stop all tracing on all CPUS to read
10359 	 * the next buffer. This is a bit expensive, but is
10360 	 * not done often. We fill all what we can read,
10361 	 * and then release the locks again.
10362 	 */
10363 
10364 	while (!trace_empty(&iter)) {
10365 
10366 		if (!cnt)
10367 			printk(KERN_TRACE "---------------------------------\n");
10368 
10369 		cnt++;
10370 
10371 		trace_iterator_reset(&iter);
10372 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10373 
10374 		if (trace_find_next_entry_inc(&iter) != NULL) {
10375 			int ret;
10376 
10377 			ret = print_trace_line(&iter);
10378 			if (ret != TRACE_TYPE_NO_CONSUME)
10379 				trace_consume(&iter);
10380 		}
10381 		touch_nmi_watchdog();
10382 
10383 		trace_printk_seq(&iter.seq);
10384 	}
10385 
10386 	if (!cnt)
10387 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10388 	else
10389 		printk(KERN_TRACE "---------------------------------\n");
10390 
10391 	tr->trace_flags |= old_userobj;
10392 
10393 	for_each_tracing_cpu(cpu) {
10394 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10395 	}
10396 	local_irq_restore(flags);
10397 }
10398 
10399 static void ftrace_dump_by_param(void)
10400 {
10401 	bool first_param = true;
10402 	char dump_param[MAX_TRACER_SIZE];
10403 	char *buf, *token, *inst_name;
10404 	struct trace_array *tr;
10405 
10406 	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10407 	buf = dump_param;
10408 
10409 	while ((token = strsep(&buf, ",")) != NULL) {
10410 		if (first_param) {
10411 			first_param = false;
10412 			if (!strcmp("0", token))
10413 				continue;
10414 			else if (!strcmp("1", token)) {
10415 				ftrace_dump_one(&global_trace, DUMP_ALL);
10416 				continue;
10417 			}
10418 			else if (!strcmp("2", token) ||
10419 			  !strcmp("orig_cpu", token)) {
10420 				ftrace_dump_one(&global_trace, DUMP_ORIG);
10421 				continue;
10422 			}
10423 		}
10424 
10425 		inst_name = strsep(&token, "=");
10426 		tr = trace_array_find(inst_name);
10427 		if (!tr) {
10428 			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10429 			continue;
10430 		}
10431 
10432 		if (token && (!strcmp("2", token) ||
10433 			  !strcmp("orig_cpu", token)))
10434 			ftrace_dump_one(tr, DUMP_ORIG);
10435 		else
10436 			ftrace_dump_one(tr, DUMP_ALL);
10437 	}
10438 }
10439 
10440 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10441 {
10442 	static atomic_t dump_running;
10443 
10444 	/* Only allow one dump user at a time. */
10445 	if (atomic_inc_return(&dump_running) != 1) {
10446 		atomic_dec(&dump_running);
10447 		return;
10448 	}
10449 
10450 	switch (oops_dump_mode) {
10451 	case DUMP_ALL:
10452 		ftrace_dump_one(&global_trace, DUMP_ALL);
10453 		break;
10454 	case DUMP_ORIG:
10455 		ftrace_dump_one(&global_trace, DUMP_ORIG);
10456 		break;
10457 	case DUMP_PARAM:
10458 		ftrace_dump_by_param();
10459 		break;
10460 	case DUMP_NONE:
10461 		break;
10462 	default:
10463 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10464 		ftrace_dump_one(&global_trace, DUMP_ALL);
10465 	}
10466 
10467 	atomic_dec(&dump_running);
10468 }
10469 EXPORT_SYMBOL_GPL(ftrace_dump);
10470 
10471 #define WRITE_BUFSIZE  4096
10472 
10473 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10474 				size_t count, loff_t *ppos,
10475 				int (*createfn)(const char *))
10476 {
10477 	char *kbuf, *buf, *tmp;
10478 	int ret = 0;
10479 	size_t done = 0;
10480 	size_t size;
10481 
10482 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10483 	if (!kbuf)
10484 		return -ENOMEM;
10485 
10486 	while (done < count) {
10487 		size = count - done;
10488 
10489 		if (size >= WRITE_BUFSIZE)
10490 			size = WRITE_BUFSIZE - 1;
10491 
10492 		if (copy_from_user(kbuf, buffer + done, size)) {
10493 			ret = -EFAULT;
10494 			goto out;
10495 		}
10496 		kbuf[size] = '\0';
10497 		buf = kbuf;
10498 		do {
10499 			tmp = strchr(buf, '\n');
10500 			if (tmp) {
10501 				*tmp = '\0';
10502 				size = tmp - buf + 1;
10503 			} else {
10504 				size = strlen(buf);
10505 				if (done + size < count) {
10506 					if (buf != kbuf)
10507 						break;
10508 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10509 					pr_warn("Line length is too long: Should be less than %d\n",
10510 						WRITE_BUFSIZE - 2);
10511 					ret = -EINVAL;
10512 					goto out;
10513 				}
10514 			}
10515 			done += size;
10516 
10517 			/* Remove comments */
10518 			tmp = strchr(buf, '#');
10519 
10520 			if (tmp)
10521 				*tmp = '\0';
10522 
10523 			ret = createfn(buf);
10524 			if (ret)
10525 				goto out;
10526 			buf += size;
10527 
10528 		} while (done < count);
10529 	}
10530 	ret = done;
10531 
10532 out:
10533 	kfree(kbuf);
10534 
10535 	return ret;
10536 }
10537 
10538 #ifdef CONFIG_TRACER_MAX_TRACE
10539 __init static bool tr_needs_alloc_snapshot(const char *name)
10540 {
10541 	char *test;
10542 	int len = strlen(name);
10543 	bool ret;
10544 
10545 	if (!boot_snapshot_index)
10546 		return false;
10547 
10548 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10549 	    boot_snapshot_info[len] == '\t')
10550 		return true;
10551 
10552 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10553 	if (!test)
10554 		return false;
10555 
10556 	sprintf(test, "\t%s\t", name);
10557 	ret = strstr(boot_snapshot_info, test) == NULL;
10558 	kfree(test);
10559 	return ret;
10560 }
10561 
10562 __init static void do_allocate_snapshot(const char *name)
10563 {
10564 	if (!tr_needs_alloc_snapshot(name))
10565 		return;
10566 
10567 	/*
10568 	 * When allocate_snapshot is set, the next call to
10569 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10570 	 * will allocate the snapshot buffer. That will alse clear
10571 	 * this flag.
10572 	 */
10573 	allocate_snapshot = true;
10574 }
10575 #else
10576 static inline void do_allocate_snapshot(const char *name) { }
10577 #endif
10578 
10579 __init static void enable_instances(void)
10580 {
10581 	struct trace_array *tr;
10582 	char *curr_str;
10583 	char *name;
10584 	char *str;
10585 	char *tok;
10586 
10587 	/* A tab is always appended */
10588 	boot_instance_info[boot_instance_index - 1] = '\0';
10589 	str = boot_instance_info;
10590 
10591 	while ((curr_str = strsep(&str, "\t"))) {
10592 		phys_addr_t start = 0;
10593 		phys_addr_t size = 0;
10594 		unsigned long addr = 0;
10595 		bool traceprintk = false;
10596 		bool traceoff = false;
10597 		char *flag_delim;
10598 		char *addr_delim;
10599 		char *rname __free(kfree) = NULL;
10600 
10601 		tok = strsep(&curr_str, ",");
10602 
10603 		flag_delim = strchr(tok, '^');
10604 		addr_delim = strchr(tok, '@');
10605 
10606 		if (addr_delim)
10607 			*addr_delim++ = '\0';
10608 
10609 		if (flag_delim)
10610 			*flag_delim++ = '\0';
10611 
10612 		name = tok;
10613 
10614 		if (flag_delim) {
10615 			char *flag;
10616 
10617 			while ((flag = strsep(&flag_delim, "^"))) {
10618 				if (strcmp(flag, "traceoff") == 0) {
10619 					traceoff = true;
10620 				} else if ((strcmp(flag, "printk") == 0) ||
10621 					   (strcmp(flag, "traceprintk") == 0) ||
10622 					   (strcmp(flag, "trace_printk") == 0)) {
10623 					traceprintk = true;
10624 				} else {
10625 					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10626 						flag, name);
10627 				}
10628 			}
10629 		}
10630 
10631 		tok = addr_delim;
10632 		if (tok && isdigit(*tok)) {
10633 			start = memparse(tok, &tok);
10634 			if (!start) {
10635 				pr_warn("Tracing: Invalid boot instance address for %s\n",
10636 					name);
10637 				continue;
10638 			}
10639 			if (*tok != ':') {
10640 				pr_warn("Tracing: No size specified for instance %s\n", name);
10641 				continue;
10642 			}
10643 			tok++;
10644 			size = memparse(tok, &tok);
10645 			if (!size) {
10646 				pr_warn("Tracing: Invalid boot instance size for %s\n",
10647 					name);
10648 				continue;
10649 			}
10650 		} else if (tok) {
10651 			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10652 				start = 0;
10653 				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10654 				continue;
10655 			}
10656 			rname = kstrdup(tok, GFP_KERNEL);
10657 		}
10658 
10659 		if (start) {
10660 			addr = map_pages(start, size);
10661 			if (addr) {
10662 				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10663 					name, &start, (unsigned long)size);
10664 			} else {
10665 				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10666 				continue;
10667 			}
10668 		} else {
10669 			/* Only non mapped buffers have snapshot buffers */
10670 			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10671 				do_allocate_snapshot(name);
10672 		}
10673 
10674 		tr = trace_array_create_systems(name, NULL, addr, size);
10675 		if (IS_ERR(tr)) {
10676 			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10677 			continue;
10678 		}
10679 
10680 		if (traceoff)
10681 			tracer_tracing_off(tr);
10682 
10683 		if (traceprintk)
10684 			update_printk_trace(tr);
10685 
10686 		/*
10687 		 * If start is set, then this is a mapped buffer, and
10688 		 * cannot be deleted by user space, so keep the reference
10689 		 * to it.
10690 		 */
10691 		if (start) {
10692 			tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
10693 			tr->range_name = no_free_ptr(rname);
10694 		}
10695 
10696 		while ((tok = strsep(&curr_str, ","))) {
10697 			early_enable_events(tr, tok, true);
10698 		}
10699 	}
10700 }
10701 
10702 __init static int tracer_alloc_buffers(void)
10703 {
10704 	int ring_buf_size;
10705 	int ret = -ENOMEM;
10706 
10707 
10708 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10709 		pr_warn("Tracing disabled due to lockdown\n");
10710 		return -EPERM;
10711 	}
10712 
10713 	/*
10714 	 * Make sure we don't accidentally add more trace options
10715 	 * than we have bits for.
10716 	 */
10717 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10718 
10719 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10720 		goto out;
10721 
10722 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10723 		goto out_free_buffer_mask;
10724 
10725 	/* Only allocate trace_printk buffers if a trace_printk exists */
10726 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10727 		/* Must be called before global_trace.buffer is allocated */
10728 		trace_printk_init_buffers();
10729 
10730 	/* To save memory, keep the ring buffer size to its minimum */
10731 	if (global_trace.ring_buffer_expanded)
10732 		ring_buf_size = trace_buf_size;
10733 	else
10734 		ring_buf_size = 1;
10735 
10736 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10737 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10738 
10739 	raw_spin_lock_init(&global_trace.start_lock);
10740 
10741 	/*
10742 	 * The prepare callbacks allocates some memory for the ring buffer. We
10743 	 * don't free the buffer if the CPU goes down. If we were to free
10744 	 * the buffer, then the user would lose any trace that was in the
10745 	 * buffer. The memory will be removed once the "instance" is removed.
10746 	 */
10747 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10748 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10749 				      NULL);
10750 	if (ret < 0)
10751 		goto out_free_cpumask;
10752 	/* Used for event triggers */
10753 	ret = -ENOMEM;
10754 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10755 	if (!temp_buffer)
10756 		goto out_rm_hp_state;
10757 
10758 	if (trace_create_savedcmd() < 0)
10759 		goto out_free_temp_buffer;
10760 
10761 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10762 		goto out_free_savedcmd;
10763 
10764 	/* TODO: make the number of buffers hot pluggable with CPUS */
10765 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10766 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10767 		goto out_free_pipe_cpumask;
10768 	}
10769 	if (global_trace.buffer_disabled)
10770 		tracing_off();
10771 
10772 	if (trace_boot_clock) {
10773 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10774 		if (ret < 0)
10775 			pr_warn("Trace clock %s not defined, going back to default\n",
10776 				trace_boot_clock);
10777 	}
10778 
10779 	/*
10780 	 * register_tracer() might reference current_trace, so it
10781 	 * needs to be set before we register anything. This is
10782 	 * just a bootstrap of current_trace anyway.
10783 	 */
10784 	global_trace.current_trace = &nop_trace;
10785 
10786 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10787 #ifdef CONFIG_TRACER_MAX_TRACE
10788 	spin_lock_init(&global_trace.snapshot_trigger_lock);
10789 #endif
10790 	ftrace_init_global_array_ops(&global_trace);
10791 
10792 #ifdef CONFIG_MODULES
10793 	INIT_LIST_HEAD(&global_trace.mod_events);
10794 #endif
10795 
10796 	init_trace_flags_index(&global_trace);
10797 
10798 	register_tracer(&nop_trace);
10799 
10800 	/* Function tracing may start here (via kernel command line) */
10801 	init_function_trace();
10802 
10803 	/* All seems OK, enable tracing */
10804 	tracing_disabled = 0;
10805 
10806 	atomic_notifier_chain_register(&panic_notifier_list,
10807 				       &trace_panic_notifier);
10808 
10809 	register_die_notifier(&trace_die_notifier);
10810 
10811 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10812 
10813 	INIT_LIST_HEAD(&global_trace.systems);
10814 	INIT_LIST_HEAD(&global_trace.events);
10815 	INIT_LIST_HEAD(&global_trace.hist_vars);
10816 	INIT_LIST_HEAD(&global_trace.err_log);
10817 	list_add(&global_trace.list, &ftrace_trace_arrays);
10818 
10819 	apply_trace_boot_options();
10820 
10821 	register_snapshot_cmd();
10822 
10823 	return 0;
10824 
10825 out_free_pipe_cpumask:
10826 	free_cpumask_var(global_trace.pipe_cpumask);
10827 out_free_savedcmd:
10828 	trace_free_saved_cmdlines_buffer();
10829 out_free_temp_buffer:
10830 	ring_buffer_free(temp_buffer);
10831 out_rm_hp_state:
10832 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10833 out_free_cpumask:
10834 	free_cpumask_var(global_trace.tracing_cpumask);
10835 out_free_buffer_mask:
10836 	free_cpumask_var(tracing_buffer_mask);
10837 out:
10838 	return ret;
10839 }
10840 
10841 #ifdef CONFIG_FUNCTION_TRACER
10842 /* Used to set module cached ftrace filtering at boot up */
10843 __init struct trace_array *trace_get_global_array(void)
10844 {
10845 	return &global_trace;
10846 }
10847 #endif
10848 
10849 void __init ftrace_boot_snapshot(void)
10850 {
10851 #ifdef CONFIG_TRACER_MAX_TRACE
10852 	struct trace_array *tr;
10853 
10854 	if (!snapshot_at_boot)
10855 		return;
10856 
10857 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10858 		if (!tr->allocated_snapshot)
10859 			continue;
10860 
10861 		tracing_snapshot_instance(tr);
10862 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10863 	}
10864 #endif
10865 }
10866 
10867 void __init early_trace_init(void)
10868 {
10869 	if (tracepoint_printk) {
10870 		tracepoint_print_iter =
10871 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10872 		if (MEM_FAIL(!tracepoint_print_iter,
10873 			     "Failed to allocate trace iterator\n"))
10874 			tracepoint_printk = 0;
10875 		else
10876 			static_key_enable(&tracepoint_printk_key.key);
10877 	}
10878 	tracer_alloc_buffers();
10879 
10880 	init_events();
10881 }
10882 
10883 void __init trace_init(void)
10884 {
10885 	trace_event_init();
10886 
10887 	if (boot_instance_index)
10888 		enable_instances();
10889 }
10890 
10891 __init static void clear_boot_tracer(void)
10892 {
10893 	/*
10894 	 * The default tracer at boot buffer is an init section.
10895 	 * This function is called in lateinit. If we did not
10896 	 * find the boot tracer, then clear it out, to prevent
10897 	 * later registration from accessing the buffer that is
10898 	 * about to be freed.
10899 	 */
10900 	if (!default_bootup_tracer)
10901 		return;
10902 
10903 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10904 	       default_bootup_tracer);
10905 	default_bootup_tracer = NULL;
10906 }
10907 
10908 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10909 __init static void tracing_set_default_clock(void)
10910 {
10911 	/* sched_clock_stable() is determined in late_initcall */
10912 	if (!trace_boot_clock && !sched_clock_stable()) {
10913 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10914 			pr_warn("Can not set tracing clock due to lockdown\n");
10915 			return;
10916 		}
10917 
10918 		printk(KERN_WARNING
10919 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10920 		       "If you want to keep using the local clock, then add:\n"
10921 		       "  \"trace_clock=local\"\n"
10922 		       "on the kernel command line\n");
10923 		tracing_set_clock(&global_trace, "global");
10924 	}
10925 }
10926 #else
10927 static inline void tracing_set_default_clock(void) { }
10928 #endif
10929 
10930 __init static int late_trace_init(void)
10931 {
10932 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10933 		static_key_disable(&tracepoint_printk_key.key);
10934 		tracepoint_printk = 0;
10935 	}
10936 
10937 	tracing_set_default_clock();
10938 	clear_boot_tracer();
10939 	return 0;
10940 }
10941 
10942 late_initcall_sync(late_trace_init);
10943