xref: /linux-6.15/kernel/trace/trace.c (revision 34ea8fa0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <[email protected]>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/cleanup.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52 #include <linux/sort.h>
53 
54 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 
56 #include "trace.h"
57 #include "trace_output.h"
58 
59 #ifdef CONFIG_FTRACE_STARTUP_TEST
60 /*
61  * We need to change this state when a selftest is running.
62  * A selftest will lurk into the ring-buffer to count the
63  * entries inserted during the selftest although some concurrent
64  * insertions into the ring-buffer such as trace_printk could occurred
65  * at the same time, giving false positive or negative results.
66  */
67 static bool __read_mostly tracing_selftest_running;
68 
69 /*
70  * If boot-time tracing including tracers/events via kernel cmdline
71  * is running, we do not want to run SELFTEST.
72  */
73 bool __read_mostly tracing_selftest_disabled;
74 
75 void __init disable_tracing_selftest(const char *reason)
76 {
77 	if (!tracing_selftest_disabled) {
78 		tracing_selftest_disabled = true;
79 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
80 	}
81 }
82 #else
83 #define tracing_selftest_running	0
84 #define tracing_selftest_disabled	0
85 #endif
86 
87 /* Pipe tracepoints to printk */
88 static struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
95 	{ }
96 };
97 
98 static int
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
100 {
101 	return 0;
102 }
103 
104 /*
105  * To prevent the comm cache from being overwritten when no
106  * tracing is active, only save the comm when a trace event
107  * occurred.
108  */
109 DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 
111 /*
112  * Kill all tracing for good (never come back).
113  * It is initialized to 1 but will turn to zero if the initialization
114  * of the tracer is successful. But that is the only place that sets
115  * this back to zero.
116  */
117 static int tracing_disabled = 1;
118 
119 cpumask_var_t __read_mostly	tracing_buffer_mask;
120 
121 /*
122  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123  *
124  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125  * is set, then ftrace_dump is called. This will output the contents
126  * of the ftrace buffers to the console.  This is very useful for
127  * capturing traces that lead to crashes and outputing it to a
128  * serial console.
129  *
130  * It is default off, but you can enable it with either specifying
131  * "ftrace_dump_on_oops" in the kernel command line, or setting
132  * /proc/sys/kernel/ftrace_dump_on_oops
133  * Set 1 if you want to dump buffers of all CPUs
134  * Set 2 if you want to dump the buffer of the CPU that triggered oops
135  * Set instance name if you want to dump the specific trace instance
136  * Multiple instance dump is also supported, and instances are seperated
137  * by commas.
138  */
139 /* Set to string format zero to disable by default */
140 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
141 
142 /* When set, tracing will stop when a WARN*() is hit */
143 int __disable_trace_on_warning;
144 
145 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
146 /* Map of enums to their values, for "eval_map" file */
147 struct trace_eval_map_head {
148 	struct module			*mod;
149 	unsigned long			length;
150 };
151 
152 union trace_eval_map_item;
153 
154 struct trace_eval_map_tail {
155 	/*
156 	 * "end" is first and points to NULL as it must be different
157 	 * than "mod" or "eval_string"
158 	 */
159 	union trace_eval_map_item	*next;
160 	const char			*end;	/* points to NULL */
161 };
162 
163 static DEFINE_MUTEX(trace_eval_mutex);
164 
165 /*
166  * The trace_eval_maps are saved in an array with two extra elements,
167  * one at the beginning, and one at the end. The beginning item contains
168  * the count of the saved maps (head.length), and the module they
169  * belong to if not built in (head.mod). The ending item contains a
170  * pointer to the next array of saved eval_map items.
171  */
172 union trace_eval_map_item {
173 	struct trace_eval_map		map;
174 	struct trace_eval_map_head	head;
175 	struct trace_eval_map_tail	tail;
176 };
177 
178 static union trace_eval_map_item *trace_eval_maps;
179 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
180 
181 int tracing_set_tracer(struct trace_array *tr, const char *buf);
182 static void ftrace_trace_userstack(struct trace_array *tr,
183 				   struct trace_buffer *buffer,
184 				   unsigned int trace_ctx);
185 
186 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
187 static char *default_bootup_tracer;
188 
189 static bool allocate_snapshot;
190 static bool snapshot_at_boot;
191 
192 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
193 static int boot_instance_index;
194 
195 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
196 static int boot_snapshot_index;
197 
198 static int __init set_cmdline_ftrace(char *str)
199 {
200 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
201 	default_bootup_tracer = bootup_tracer_buf;
202 	/* We are using ftrace early, expand it */
203 	trace_set_ring_buffer_expanded(NULL);
204 	return 1;
205 }
206 __setup("ftrace=", set_cmdline_ftrace);
207 
208 int ftrace_dump_on_oops_enabled(void)
209 {
210 	if (!strcmp("0", ftrace_dump_on_oops))
211 		return 0;
212 	else
213 		return 1;
214 }
215 
216 static int __init set_ftrace_dump_on_oops(char *str)
217 {
218 	if (!*str) {
219 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
220 		return 1;
221 	}
222 
223 	if (*str == ',') {
224 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
225 		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
226 		return 1;
227 	}
228 
229 	if (*str++ == '=') {
230 		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
231 		return 1;
232 	}
233 
234 	return 0;
235 }
236 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
237 
238 static int __init stop_trace_on_warning(char *str)
239 {
240 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 		__disable_trace_on_warning = 1;
242 	return 1;
243 }
244 __setup("traceoff_on_warning", stop_trace_on_warning);
245 
246 static int __init boot_alloc_snapshot(char *str)
247 {
248 	char *slot = boot_snapshot_info + boot_snapshot_index;
249 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
250 	int ret;
251 
252 	if (str[0] == '=') {
253 		str++;
254 		if (strlen(str) >= left)
255 			return -1;
256 
257 		ret = snprintf(slot, left, "%s\t", str);
258 		boot_snapshot_index += ret;
259 	} else {
260 		allocate_snapshot = true;
261 		/* We also need the main ring buffer expanded */
262 		trace_set_ring_buffer_expanded(NULL);
263 	}
264 	return 1;
265 }
266 __setup("alloc_snapshot", boot_alloc_snapshot);
267 
268 
269 static int __init boot_snapshot(char *str)
270 {
271 	snapshot_at_boot = true;
272 	boot_alloc_snapshot(str);
273 	return 1;
274 }
275 __setup("ftrace_boot_snapshot", boot_snapshot);
276 
277 
278 static int __init boot_instance(char *str)
279 {
280 	char *slot = boot_instance_info + boot_instance_index;
281 	int left = sizeof(boot_instance_info) - boot_instance_index;
282 	int ret;
283 
284 	if (strlen(str) >= left)
285 		return -1;
286 
287 	ret = snprintf(slot, left, "%s\t", str);
288 	boot_instance_index += ret;
289 
290 	return 1;
291 }
292 __setup("trace_instance=", boot_instance);
293 
294 
295 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
296 
297 static int __init set_trace_boot_options(char *str)
298 {
299 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
300 	return 1;
301 }
302 __setup("trace_options=", set_trace_boot_options);
303 
304 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
305 static char *trace_boot_clock __initdata;
306 
307 static int __init set_trace_boot_clock(char *str)
308 {
309 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
310 	trace_boot_clock = trace_boot_clock_buf;
311 	return 1;
312 }
313 __setup("trace_clock=", set_trace_boot_clock);
314 
315 static int __init set_tracepoint_printk(char *str)
316 {
317 	/* Ignore the "tp_printk_stop_on_boot" param */
318 	if (*str == '_')
319 		return 0;
320 
321 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
322 		tracepoint_printk = 1;
323 	return 1;
324 }
325 __setup("tp_printk", set_tracepoint_printk);
326 
327 static int __init set_tracepoint_printk_stop(char *str)
328 {
329 	tracepoint_printk_stop_on_boot = true;
330 	return 1;
331 }
332 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
333 
334 unsigned long long ns2usecs(u64 nsec)
335 {
336 	nsec += 500;
337 	do_div(nsec, 1000);
338 	return nsec;
339 }
340 
341 static void
342 trace_process_export(struct trace_export *export,
343 	       struct ring_buffer_event *event, int flag)
344 {
345 	struct trace_entry *entry;
346 	unsigned int size = 0;
347 
348 	if (export->flags & flag) {
349 		entry = ring_buffer_event_data(event);
350 		size = ring_buffer_event_length(event);
351 		export->write(export, entry, size);
352 	}
353 }
354 
355 static DEFINE_MUTEX(ftrace_export_lock);
356 
357 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
358 
359 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
360 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
361 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
362 
363 static inline void ftrace_exports_enable(struct trace_export *export)
364 {
365 	if (export->flags & TRACE_EXPORT_FUNCTION)
366 		static_branch_inc(&trace_function_exports_enabled);
367 
368 	if (export->flags & TRACE_EXPORT_EVENT)
369 		static_branch_inc(&trace_event_exports_enabled);
370 
371 	if (export->flags & TRACE_EXPORT_MARKER)
372 		static_branch_inc(&trace_marker_exports_enabled);
373 }
374 
375 static inline void ftrace_exports_disable(struct trace_export *export)
376 {
377 	if (export->flags & TRACE_EXPORT_FUNCTION)
378 		static_branch_dec(&trace_function_exports_enabled);
379 
380 	if (export->flags & TRACE_EXPORT_EVENT)
381 		static_branch_dec(&trace_event_exports_enabled);
382 
383 	if (export->flags & TRACE_EXPORT_MARKER)
384 		static_branch_dec(&trace_marker_exports_enabled);
385 }
386 
387 static void ftrace_exports(struct ring_buffer_event *event, int flag)
388 {
389 	struct trace_export *export;
390 
391 	preempt_disable_notrace();
392 
393 	export = rcu_dereference_raw_check(ftrace_exports_list);
394 	while (export) {
395 		trace_process_export(export, event, flag);
396 		export = rcu_dereference_raw_check(export->next);
397 	}
398 
399 	preempt_enable_notrace();
400 }
401 
402 static inline void
403 add_trace_export(struct trace_export **list, struct trace_export *export)
404 {
405 	rcu_assign_pointer(export->next, *list);
406 	/*
407 	 * We are entering export into the list but another
408 	 * CPU might be walking that list. We need to make sure
409 	 * the export->next pointer is valid before another CPU sees
410 	 * the export pointer included into the list.
411 	 */
412 	rcu_assign_pointer(*list, export);
413 }
414 
415 static inline int
416 rm_trace_export(struct trace_export **list, struct trace_export *export)
417 {
418 	struct trace_export **p;
419 
420 	for (p = list; *p != NULL; p = &(*p)->next)
421 		if (*p == export)
422 			break;
423 
424 	if (*p != export)
425 		return -1;
426 
427 	rcu_assign_pointer(*p, (*p)->next);
428 
429 	return 0;
430 }
431 
432 static inline void
433 add_ftrace_export(struct trace_export **list, struct trace_export *export)
434 {
435 	ftrace_exports_enable(export);
436 
437 	add_trace_export(list, export);
438 }
439 
440 static inline int
441 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
442 {
443 	int ret;
444 
445 	ret = rm_trace_export(list, export);
446 	ftrace_exports_disable(export);
447 
448 	return ret;
449 }
450 
451 int register_ftrace_export(struct trace_export *export)
452 {
453 	if (WARN_ON_ONCE(!export->write))
454 		return -1;
455 
456 	mutex_lock(&ftrace_export_lock);
457 
458 	add_ftrace_export(&ftrace_exports_list, export);
459 
460 	mutex_unlock(&ftrace_export_lock);
461 
462 	return 0;
463 }
464 EXPORT_SYMBOL_GPL(register_ftrace_export);
465 
466 int unregister_ftrace_export(struct trace_export *export)
467 {
468 	int ret;
469 
470 	mutex_lock(&ftrace_export_lock);
471 
472 	ret = rm_ftrace_export(&ftrace_exports_list, export);
473 
474 	mutex_unlock(&ftrace_export_lock);
475 
476 	return ret;
477 }
478 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
479 
480 /* trace_flags holds trace_options default values */
481 #define TRACE_DEFAULT_FLAGS						\
482 	(FUNCTION_DEFAULT_FLAGS |					\
483 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
484 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
485 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
486 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
487 	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
488 
489 /* trace_options that are only supported by global_trace */
490 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
491 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
492 
493 /* trace_flags that are default zero for instances */
494 #define ZEROED_TRACE_FLAGS \
495 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
496 
497 /*
498  * The global_trace is the descriptor that holds the top-level tracing
499  * buffers for the live tracing.
500  */
501 static struct trace_array global_trace = {
502 	.trace_flags = TRACE_DEFAULT_FLAGS,
503 };
504 
505 static struct trace_array *printk_trace = &global_trace;
506 
507 static __always_inline bool printk_binsafe(struct trace_array *tr)
508 {
509 	/*
510 	 * The binary format of traceprintk can cause a crash if used
511 	 * by a buffer from another boot. Force the use of the
512 	 * non binary version of trace_printk if the trace_printk
513 	 * buffer is a boot mapped ring buffer.
514 	 */
515 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
516 }
517 
518 static void update_printk_trace(struct trace_array *tr)
519 {
520 	if (printk_trace == tr)
521 		return;
522 
523 	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
524 	printk_trace = tr;
525 	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
526 }
527 
528 void trace_set_ring_buffer_expanded(struct trace_array *tr)
529 {
530 	if (!tr)
531 		tr = &global_trace;
532 	tr->ring_buffer_expanded = true;
533 }
534 
535 LIST_HEAD(ftrace_trace_arrays);
536 
537 int trace_array_get(struct trace_array *this_tr)
538 {
539 	struct trace_array *tr;
540 
541 	guard(mutex)(&trace_types_lock);
542 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
543 		if (tr == this_tr) {
544 			tr->ref++;
545 			return 0;
546 		}
547 	}
548 
549 	return -ENODEV;
550 }
551 
552 static void __trace_array_put(struct trace_array *this_tr)
553 {
554 	WARN_ON(!this_tr->ref);
555 	this_tr->ref--;
556 }
557 
558 /**
559  * trace_array_put - Decrement the reference counter for this trace array.
560  * @this_tr : pointer to the trace array
561  *
562  * NOTE: Use this when we no longer need the trace array returned by
563  * trace_array_get_by_name(). This ensures the trace array can be later
564  * destroyed.
565  *
566  */
567 void trace_array_put(struct trace_array *this_tr)
568 {
569 	if (!this_tr)
570 		return;
571 
572 	mutex_lock(&trace_types_lock);
573 	__trace_array_put(this_tr);
574 	mutex_unlock(&trace_types_lock);
575 }
576 EXPORT_SYMBOL_GPL(trace_array_put);
577 
578 int tracing_check_open_get_tr(struct trace_array *tr)
579 {
580 	int ret;
581 
582 	ret = security_locked_down(LOCKDOWN_TRACEFS);
583 	if (ret)
584 		return ret;
585 
586 	if (tracing_disabled)
587 		return -ENODEV;
588 
589 	if (tr && trace_array_get(tr) < 0)
590 		return -ENODEV;
591 
592 	return 0;
593 }
594 
595 /**
596  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
597  * @filtered_pids: The list of pids to check
598  * @search_pid: The PID to find in @filtered_pids
599  *
600  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
601  */
602 bool
603 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
604 {
605 	return trace_pid_list_is_set(filtered_pids, search_pid);
606 }
607 
608 /**
609  * trace_ignore_this_task - should a task be ignored for tracing
610  * @filtered_pids: The list of pids to check
611  * @filtered_no_pids: The list of pids not to be traced
612  * @task: The task that should be ignored if not filtered
613  *
614  * Checks if @task should be traced or not from @filtered_pids.
615  * Returns true if @task should *NOT* be traced.
616  * Returns false if @task should be traced.
617  */
618 bool
619 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
620 		       struct trace_pid_list *filtered_no_pids,
621 		       struct task_struct *task)
622 {
623 	/*
624 	 * If filtered_no_pids is not empty, and the task's pid is listed
625 	 * in filtered_no_pids, then return true.
626 	 * Otherwise, if filtered_pids is empty, that means we can
627 	 * trace all tasks. If it has content, then only trace pids
628 	 * within filtered_pids.
629 	 */
630 
631 	return (filtered_pids &&
632 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
633 		(filtered_no_pids &&
634 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
635 }
636 
637 /**
638  * trace_filter_add_remove_task - Add or remove a task from a pid_list
639  * @pid_list: The list to modify
640  * @self: The current task for fork or NULL for exit
641  * @task: The task to add or remove
642  *
643  * If adding a task, if @self is defined, the task is only added if @self
644  * is also included in @pid_list. This happens on fork and tasks should
645  * only be added when the parent is listed. If @self is NULL, then the
646  * @task pid will be removed from the list, which would happen on exit
647  * of a task.
648  */
649 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
650 				  struct task_struct *self,
651 				  struct task_struct *task)
652 {
653 	if (!pid_list)
654 		return;
655 
656 	/* For forks, we only add if the forking task is listed */
657 	if (self) {
658 		if (!trace_find_filtered_pid(pid_list, self->pid))
659 			return;
660 	}
661 
662 	/* "self" is set for forks, and NULL for exits */
663 	if (self)
664 		trace_pid_list_set(pid_list, task->pid);
665 	else
666 		trace_pid_list_clear(pid_list, task->pid);
667 }
668 
669 /**
670  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
671  * @pid_list: The pid list to show
672  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
673  * @pos: The position of the file
674  *
675  * This is used by the seq_file "next" operation to iterate the pids
676  * listed in a trace_pid_list structure.
677  *
678  * Returns the pid+1 as we want to display pid of zero, but NULL would
679  * stop the iteration.
680  */
681 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
682 {
683 	long pid = (unsigned long)v;
684 	unsigned int next;
685 
686 	(*pos)++;
687 
688 	/* pid already is +1 of the actual previous bit */
689 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
690 		return NULL;
691 
692 	pid = next;
693 
694 	/* Return pid + 1 to allow zero to be represented */
695 	return (void *)(pid + 1);
696 }
697 
698 /**
699  * trace_pid_start - Used for seq_file to start reading pid lists
700  * @pid_list: The pid list to show
701  * @pos: The position of the file
702  *
703  * This is used by seq_file "start" operation to start the iteration
704  * of listing pids.
705  *
706  * Returns the pid+1 as we want to display pid of zero, but NULL would
707  * stop the iteration.
708  */
709 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
710 {
711 	unsigned long pid;
712 	unsigned int first;
713 	loff_t l = 0;
714 
715 	if (trace_pid_list_first(pid_list, &first) < 0)
716 		return NULL;
717 
718 	pid = first;
719 
720 	/* Return pid + 1 so that zero can be the exit value */
721 	for (pid++; pid && l < *pos;
722 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
723 		;
724 	return (void *)pid;
725 }
726 
727 /**
728  * trace_pid_show - show the current pid in seq_file processing
729  * @m: The seq_file structure to write into
730  * @v: A void pointer of the pid (+1) value to display
731  *
732  * Can be directly used by seq_file operations to display the current
733  * pid value.
734  */
735 int trace_pid_show(struct seq_file *m, void *v)
736 {
737 	unsigned long pid = (unsigned long)v - 1;
738 
739 	seq_printf(m, "%lu\n", pid);
740 	return 0;
741 }
742 
743 /* 128 should be much more than enough */
744 #define PID_BUF_SIZE		127
745 
746 int trace_pid_write(struct trace_pid_list *filtered_pids,
747 		    struct trace_pid_list **new_pid_list,
748 		    const char __user *ubuf, size_t cnt)
749 {
750 	struct trace_pid_list *pid_list;
751 	struct trace_parser parser;
752 	unsigned long val;
753 	int nr_pids = 0;
754 	ssize_t read = 0;
755 	ssize_t ret;
756 	loff_t pos;
757 	pid_t pid;
758 
759 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
760 		return -ENOMEM;
761 
762 	/*
763 	 * Always recreate a new array. The write is an all or nothing
764 	 * operation. Always create a new array when adding new pids by
765 	 * the user. If the operation fails, then the current list is
766 	 * not modified.
767 	 */
768 	pid_list = trace_pid_list_alloc();
769 	if (!pid_list) {
770 		trace_parser_put(&parser);
771 		return -ENOMEM;
772 	}
773 
774 	if (filtered_pids) {
775 		/* copy the current bits to the new max */
776 		ret = trace_pid_list_first(filtered_pids, &pid);
777 		while (!ret) {
778 			trace_pid_list_set(pid_list, pid);
779 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
780 			nr_pids++;
781 		}
782 	}
783 
784 	ret = 0;
785 	while (cnt > 0) {
786 
787 		pos = 0;
788 
789 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
790 		if (ret < 0)
791 			break;
792 
793 		read += ret;
794 		ubuf += ret;
795 		cnt -= ret;
796 
797 		if (!trace_parser_loaded(&parser))
798 			break;
799 
800 		ret = -EINVAL;
801 		if (kstrtoul(parser.buffer, 0, &val))
802 			break;
803 
804 		pid = (pid_t)val;
805 
806 		if (trace_pid_list_set(pid_list, pid) < 0) {
807 			ret = -1;
808 			break;
809 		}
810 		nr_pids++;
811 
812 		trace_parser_clear(&parser);
813 		ret = 0;
814 	}
815 	trace_parser_put(&parser);
816 
817 	if (ret < 0) {
818 		trace_pid_list_free(pid_list);
819 		return ret;
820 	}
821 
822 	if (!nr_pids) {
823 		/* Cleared the list of pids */
824 		trace_pid_list_free(pid_list);
825 		pid_list = NULL;
826 	}
827 
828 	*new_pid_list = pid_list;
829 
830 	return read;
831 }
832 
833 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
834 {
835 	u64 ts;
836 
837 	/* Early boot up does not have a buffer yet */
838 	if (!buf->buffer)
839 		return trace_clock_local();
840 
841 	ts = ring_buffer_time_stamp(buf->buffer);
842 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
843 
844 	return ts;
845 }
846 
847 u64 ftrace_now(int cpu)
848 {
849 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
850 }
851 
852 /**
853  * tracing_is_enabled - Show if global_trace has been enabled
854  *
855  * Shows if the global trace has been enabled or not. It uses the
856  * mirror flag "buffer_disabled" to be used in fast paths such as for
857  * the irqsoff tracer. But it may be inaccurate due to races. If you
858  * need to know the accurate state, use tracing_is_on() which is a little
859  * slower, but accurate.
860  */
861 int tracing_is_enabled(void)
862 {
863 	/*
864 	 * For quick access (irqsoff uses this in fast path), just
865 	 * return the mirror variable of the state of the ring buffer.
866 	 * It's a little racy, but we don't really care.
867 	 */
868 	smp_rmb();
869 	return !global_trace.buffer_disabled;
870 }
871 
872 /*
873  * trace_buf_size is the size in bytes that is allocated
874  * for a buffer. Note, the number of bytes is always rounded
875  * to page size.
876  *
877  * This number is purposely set to a low number of 16384.
878  * If the dump on oops happens, it will be much appreciated
879  * to not have to wait for all that output. Anyway this can be
880  * boot time and run time configurable.
881  */
882 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
883 
884 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
885 
886 /* trace_types holds a link list of available tracers. */
887 static struct tracer		*trace_types __read_mostly;
888 
889 /*
890  * trace_types_lock is used to protect the trace_types list.
891  */
892 DEFINE_MUTEX(trace_types_lock);
893 
894 /*
895  * serialize the access of the ring buffer
896  *
897  * ring buffer serializes readers, but it is low level protection.
898  * The validity of the events (which returns by ring_buffer_peek() ..etc)
899  * are not protected by ring buffer.
900  *
901  * The content of events may become garbage if we allow other process consumes
902  * these events concurrently:
903  *   A) the page of the consumed events may become a normal page
904  *      (not reader page) in ring buffer, and this page will be rewritten
905  *      by events producer.
906  *   B) The page of the consumed events may become a page for splice_read,
907  *      and this page will be returned to system.
908  *
909  * These primitives allow multi process access to different cpu ring buffer
910  * concurrently.
911  *
912  * These primitives don't distinguish read-only and read-consume access.
913  * Multi read-only access are also serialized.
914  */
915 
916 #ifdef CONFIG_SMP
917 static DECLARE_RWSEM(all_cpu_access_lock);
918 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
919 
920 static inline void trace_access_lock(int cpu)
921 {
922 	if (cpu == RING_BUFFER_ALL_CPUS) {
923 		/* gain it for accessing the whole ring buffer. */
924 		down_write(&all_cpu_access_lock);
925 	} else {
926 		/* gain it for accessing a cpu ring buffer. */
927 
928 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
929 		down_read(&all_cpu_access_lock);
930 
931 		/* Secondly block other access to this @cpu ring buffer. */
932 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
933 	}
934 }
935 
936 static inline void trace_access_unlock(int cpu)
937 {
938 	if (cpu == RING_BUFFER_ALL_CPUS) {
939 		up_write(&all_cpu_access_lock);
940 	} else {
941 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
942 		up_read(&all_cpu_access_lock);
943 	}
944 }
945 
946 static inline void trace_access_lock_init(void)
947 {
948 	int cpu;
949 
950 	for_each_possible_cpu(cpu)
951 		mutex_init(&per_cpu(cpu_access_lock, cpu));
952 }
953 
954 #else
955 
956 static DEFINE_MUTEX(access_lock);
957 
958 static inline void trace_access_lock(int cpu)
959 {
960 	(void)cpu;
961 	mutex_lock(&access_lock);
962 }
963 
964 static inline void trace_access_unlock(int cpu)
965 {
966 	(void)cpu;
967 	mutex_unlock(&access_lock);
968 }
969 
970 static inline void trace_access_lock_init(void)
971 {
972 }
973 
974 #endif
975 
976 #ifdef CONFIG_STACKTRACE
977 static void __ftrace_trace_stack(struct trace_array *tr,
978 				 struct trace_buffer *buffer,
979 				 unsigned int trace_ctx,
980 				 int skip, struct pt_regs *regs);
981 static inline void ftrace_trace_stack(struct trace_array *tr,
982 				      struct trace_buffer *buffer,
983 				      unsigned int trace_ctx,
984 				      int skip, struct pt_regs *regs);
985 
986 #else
987 static inline void __ftrace_trace_stack(struct trace_array *tr,
988 					struct trace_buffer *buffer,
989 					unsigned int trace_ctx,
990 					int skip, struct pt_regs *regs)
991 {
992 }
993 static inline void ftrace_trace_stack(struct trace_array *tr,
994 				      struct trace_buffer *buffer,
995 				      unsigned long trace_ctx,
996 				      int skip, struct pt_regs *regs)
997 {
998 }
999 
1000 #endif
1001 
1002 static __always_inline void
1003 trace_event_setup(struct ring_buffer_event *event,
1004 		  int type, unsigned int trace_ctx)
1005 {
1006 	struct trace_entry *ent = ring_buffer_event_data(event);
1007 
1008 	tracing_generic_entry_update(ent, type, trace_ctx);
1009 }
1010 
1011 static __always_inline struct ring_buffer_event *
1012 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1013 			  int type,
1014 			  unsigned long len,
1015 			  unsigned int trace_ctx)
1016 {
1017 	struct ring_buffer_event *event;
1018 
1019 	event = ring_buffer_lock_reserve(buffer, len);
1020 	if (event != NULL)
1021 		trace_event_setup(event, type, trace_ctx);
1022 
1023 	return event;
1024 }
1025 
1026 void tracer_tracing_on(struct trace_array *tr)
1027 {
1028 	if (tr->array_buffer.buffer)
1029 		ring_buffer_record_on(tr->array_buffer.buffer);
1030 	/*
1031 	 * This flag is looked at when buffers haven't been allocated
1032 	 * yet, or by some tracers (like irqsoff), that just want to
1033 	 * know if the ring buffer has been disabled, but it can handle
1034 	 * races of where it gets disabled but we still do a record.
1035 	 * As the check is in the fast path of the tracers, it is more
1036 	 * important to be fast than accurate.
1037 	 */
1038 	tr->buffer_disabled = 0;
1039 	/* Make the flag seen by readers */
1040 	smp_wmb();
1041 }
1042 
1043 /**
1044  * tracing_on - enable tracing buffers
1045  *
1046  * This function enables tracing buffers that may have been
1047  * disabled with tracing_off.
1048  */
1049 void tracing_on(void)
1050 {
1051 	tracer_tracing_on(&global_trace);
1052 }
1053 EXPORT_SYMBOL_GPL(tracing_on);
1054 
1055 
1056 static __always_inline void
1057 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1058 {
1059 	__this_cpu_write(trace_taskinfo_save, true);
1060 
1061 	/* If this is the temp buffer, we need to commit fully */
1062 	if (this_cpu_read(trace_buffered_event) == event) {
1063 		/* Length is in event->array[0] */
1064 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1065 		/* Release the temp buffer */
1066 		this_cpu_dec(trace_buffered_event_cnt);
1067 		/* ring_buffer_unlock_commit() enables preemption */
1068 		preempt_enable_notrace();
1069 	} else
1070 		ring_buffer_unlock_commit(buffer);
1071 }
1072 
1073 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1074 		       const char *str, int size)
1075 {
1076 	struct ring_buffer_event *event;
1077 	struct trace_buffer *buffer;
1078 	struct print_entry *entry;
1079 	unsigned int trace_ctx;
1080 	int alloc;
1081 
1082 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1083 		return 0;
1084 
1085 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1086 		return 0;
1087 
1088 	if (unlikely(tracing_disabled))
1089 		return 0;
1090 
1091 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1092 
1093 	trace_ctx = tracing_gen_ctx();
1094 	buffer = tr->array_buffer.buffer;
1095 	ring_buffer_nest_start(buffer);
1096 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1097 					    trace_ctx);
1098 	if (!event) {
1099 		size = 0;
1100 		goto out;
1101 	}
1102 
1103 	entry = ring_buffer_event_data(event);
1104 	entry->ip = ip;
1105 
1106 	memcpy(&entry->buf, str, size);
1107 
1108 	/* Add a newline if necessary */
1109 	if (entry->buf[size - 1] != '\n') {
1110 		entry->buf[size] = '\n';
1111 		entry->buf[size + 1] = '\0';
1112 	} else
1113 		entry->buf[size] = '\0';
1114 
1115 	__buffer_unlock_commit(buffer, event);
1116 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1117  out:
1118 	ring_buffer_nest_end(buffer);
1119 	return size;
1120 }
1121 EXPORT_SYMBOL_GPL(__trace_array_puts);
1122 
1123 /**
1124  * __trace_puts - write a constant string into the trace buffer.
1125  * @ip:	   The address of the caller
1126  * @str:   The constant string to write
1127  * @size:  The size of the string.
1128  */
1129 int __trace_puts(unsigned long ip, const char *str, int size)
1130 {
1131 	return __trace_array_puts(printk_trace, ip, str, size);
1132 }
1133 EXPORT_SYMBOL_GPL(__trace_puts);
1134 
1135 /**
1136  * __trace_bputs - write the pointer to a constant string into trace buffer
1137  * @ip:	   The address of the caller
1138  * @str:   The constant string to write to the buffer to
1139  */
1140 int __trace_bputs(unsigned long ip, const char *str)
1141 {
1142 	struct trace_array *tr = READ_ONCE(printk_trace);
1143 	struct ring_buffer_event *event;
1144 	struct trace_buffer *buffer;
1145 	struct bputs_entry *entry;
1146 	unsigned int trace_ctx;
1147 	int size = sizeof(struct bputs_entry);
1148 	int ret = 0;
1149 
1150 	if (!printk_binsafe(tr))
1151 		return __trace_puts(ip, str, strlen(str));
1152 
1153 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1154 		return 0;
1155 
1156 	if (unlikely(tracing_selftest_running || tracing_disabled))
1157 		return 0;
1158 
1159 	trace_ctx = tracing_gen_ctx();
1160 	buffer = tr->array_buffer.buffer;
1161 
1162 	ring_buffer_nest_start(buffer);
1163 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1164 					    trace_ctx);
1165 	if (!event)
1166 		goto out;
1167 
1168 	entry = ring_buffer_event_data(event);
1169 	entry->ip			= ip;
1170 	entry->str			= str;
1171 
1172 	__buffer_unlock_commit(buffer, event);
1173 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1174 
1175 	ret = 1;
1176  out:
1177 	ring_buffer_nest_end(buffer);
1178 	return ret;
1179 }
1180 EXPORT_SYMBOL_GPL(__trace_bputs);
1181 
1182 #ifdef CONFIG_TRACER_SNAPSHOT
1183 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1184 					   void *cond_data)
1185 {
1186 	struct tracer *tracer = tr->current_trace;
1187 	unsigned long flags;
1188 
1189 	if (in_nmi()) {
1190 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1191 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1192 		return;
1193 	}
1194 
1195 	if (!tr->allocated_snapshot) {
1196 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1197 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1198 		tracer_tracing_off(tr);
1199 		return;
1200 	}
1201 
1202 	/* Note, snapshot can not be used when the tracer uses it */
1203 	if (tracer->use_max_tr) {
1204 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1205 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1206 		return;
1207 	}
1208 
1209 	if (tr->mapped) {
1210 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1211 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1212 		return;
1213 	}
1214 
1215 	local_irq_save(flags);
1216 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1217 	local_irq_restore(flags);
1218 }
1219 
1220 void tracing_snapshot_instance(struct trace_array *tr)
1221 {
1222 	tracing_snapshot_instance_cond(tr, NULL);
1223 }
1224 
1225 /**
1226  * tracing_snapshot - take a snapshot of the current buffer.
1227  *
1228  * This causes a swap between the snapshot buffer and the current live
1229  * tracing buffer. You can use this to take snapshots of the live
1230  * trace when some condition is triggered, but continue to trace.
1231  *
1232  * Note, make sure to allocate the snapshot with either
1233  * a tracing_snapshot_alloc(), or by doing it manually
1234  * with: echo 1 > /sys/kernel/tracing/snapshot
1235  *
1236  * If the snapshot buffer is not allocated, it will stop tracing.
1237  * Basically making a permanent snapshot.
1238  */
1239 void tracing_snapshot(void)
1240 {
1241 	struct trace_array *tr = &global_trace;
1242 
1243 	tracing_snapshot_instance(tr);
1244 }
1245 EXPORT_SYMBOL_GPL(tracing_snapshot);
1246 
1247 /**
1248  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1249  * @tr:		The tracing instance to snapshot
1250  * @cond_data:	The data to be tested conditionally, and possibly saved
1251  *
1252  * This is the same as tracing_snapshot() except that the snapshot is
1253  * conditional - the snapshot will only happen if the
1254  * cond_snapshot.update() implementation receiving the cond_data
1255  * returns true, which means that the trace array's cond_snapshot
1256  * update() operation used the cond_data to determine whether the
1257  * snapshot should be taken, and if it was, presumably saved it along
1258  * with the snapshot.
1259  */
1260 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1261 {
1262 	tracing_snapshot_instance_cond(tr, cond_data);
1263 }
1264 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1265 
1266 /**
1267  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1268  * @tr:		The tracing instance
1269  *
1270  * When the user enables a conditional snapshot using
1271  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1272  * with the snapshot.  This accessor is used to retrieve it.
1273  *
1274  * Should not be called from cond_snapshot.update(), since it takes
1275  * the tr->max_lock lock, which the code calling
1276  * cond_snapshot.update() has already done.
1277  *
1278  * Returns the cond_data associated with the trace array's snapshot.
1279  */
1280 void *tracing_cond_snapshot_data(struct trace_array *tr)
1281 {
1282 	void *cond_data = NULL;
1283 
1284 	local_irq_disable();
1285 	arch_spin_lock(&tr->max_lock);
1286 
1287 	if (tr->cond_snapshot)
1288 		cond_data = tr->cond_snapshot->cond_data;
1289 
1290 	arch_spin_unlock(&tr->max_lock);
1291 	local_irq_enable();
1292 
1293 	return cond_data;
1294 }
1295 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1296 
1297 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1298 					struct array_buffer *size_buf, int cpu_id);
1299 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1300 
1301 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1302 {
1303 	int order;
1304 	int ret;
1305 
1306 	if (!tr->allocated_snapshot) {
1307 
1308 		/* Make the snapshot buffer have the same order as main buffer */
1309 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1310 		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1311 		if (ret < 0)
1312 			return ret;
1313 
1314 		/* allocate spare buffer */
1315 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1316 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1317 		if (ret < 0)
1318 			return ret;
1319 
1320 		tr->allocated_snapshot = true;
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 static void free_snapshot(struct trace_array *tr)
1327 {
1328 	/*
1329 	 * We don't free the ring buffer. instead, resize it because
1330 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1331 	 * we want preserve it.
1332 	 */
1333 	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1334 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1335 	set_buffer_entries(&tr->max_buffer, 1);
1336 	tracing_reset_online_cpus(&tr->max_buffer);
1337 	tr->allocated_snapshot = false;
1338 }
1339 
1340 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1341 {
1342 	int ret;
1343 
1344 	lockdep_assert_held(&trace_types_lock);
1345 
1346 	spin_lock(&tr->snapshot_trigger_lock);
1347 	if (tr->snapshot == UINT_MAX || tr->mapped) {
1348 		spin_unlock(&tr->snapshot_trigger_lock);
1349 		return -EBUSY;
1350 	}
1351 
1352 	tr->snapshot++;
1353 	spin_unlock(&tr->snapshot_trigger_lock);
1354 
1355 	ret = tracing_alloc_snapshot_instance(tr);
1356 	if (ret) {
1357 		spin_lock(&tr->snapshot_trigger_lock);
1358 		tr->snapshot--;
1359 		spin_unlock(&tr->snapshot_trigger_lock);
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 int tracing_arm_snapshot(struct trace_array *tr)
1366 {
1367 	int ret;
1368 
1369 	mutex_lock(&trace_types_lock);
1370 	ret = tracing_arm_snapshot_locked(tr);
1371 	mutex_unlock(&trace_types_lock);
1372 
1373 	return ret;
1374 }
1375 
1376 void tracing_disarm_snapshot(struct trace_array *tr)
1377 {
1378 	spin_lock(&tr->snapshot_trigger_lock);
1379 	if (!WARN_ON(!tr->snapshot))
1380 		tr->snapshot--;
1381 	spin_unlock(&tr->snapshot_trigger_lock);
1382 }
1383 
1384 /**
1385  * tracing_alloc_snapshot - allocate snapshot buffer.
1386  *
1387  * This only allocates the snapshot buffer if it isn't already
1388  * allocated - it doesn't also take a snapshot.
1389  *
1390  * This is meant to be used in cases where the snapshot buffer needs
1391  * to be set up for events that can't sleep but need to be able to
1392  * trigger a snapshot.
1393  */
1394 int tracing_alloc_snapshot(void)
1395 {
1396 	struct trace_array *tr = &global_trace;
1397 	int ret;
1398 
1399 	ret = tracing_alloc_snapshot_instance(tr);
1400 	WARN_ON(ret < 0);
1401 
1402 	return ret;
1403 }
1404 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1405 
1406 /**
1407  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1408  *
1409  * This is similar to tracing_snapshot(), but it will allocate the
1410  * snapshot buffer if it isn't already allocated. Use this only
1411  * where it is safe to sleep, as the allocation may sleep.
1412  *
1413  * This causes a swap between the snapshot buffer and the current live
1414  * tracing buffer. You can use this to take snapshots of the live
1415  * trace when some condition is triggered, but continue to trace.
1416  */
1417 void tracing_snapshot_alloc(void)
1418 {
1419 	int ret;
1420 
1421 	ret = tracing_alloc_snapshot();
1422 	if (ret < 0)
1423 		return;
1424 
1425 	tracing_snapshot();
1426 }
1427 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1428 
1429 /**
1430  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1431  * @tr:		The tracing instance
1432  * @cond_data:	User data to associate with the snapshot
1433  * @update:	Implementation of the cond_snapshot update function
1434  *
1435  * Check whether the conditional snapshot for the given instance has
1436  * already been enabled, or if the current tracer is already using a
1437  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1438  * save the cond_data and update function inside.
1439  *
1440  * Returns 0 if successful, error otherwise.
1441  */
1442 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1443 				 cond_update_fn_t update)
1444 {
1445 	struct cond_snapshot *cond_snapshot __free(kfree) =
1446 		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1447 	int ret;
1448 
1449 	if (!cond_snapshot)
1450 		return -ENOMEM;
1451 
1452 	cond_snapshot->cond_data = cond_data;
1453 	cond_snapshot->update = update;
1454 
1455 	guard(mutex)(&trace_types_lock);
1456 
1457 	if (tr->current_trace->use_max_tr)
1458 		return -EBUSY;
1459 
1460 	/*
1461 	 * The cond_snapshot can only change to NULL without the
1462 	 * trace_types_lock. We don't care if we race with it going
1463 	 * to NULL, but we want to make sure that it's not set to
1464 	 * something other than NULL when we get here, which we can
1465 	 * do safely with only holding the trace_types_lock and not
1466 	 * having to take the max_lock.
1467 	 */
1468 	if (tr->cond_snapshot)
1469 		return -EBUSY;
1470 
1471 	ret = tracing_arm_snapshot_locked(tr);
1472 	if (ret)
1473 		return ret;
1474 
1475 	local_irq_disable();
1476 	arch_spin_lock(&tr->max_lock);
1477 	tr->cond_snapshot = no_free_ptr(cond_snapshot);
1478 	arch_spin_unlock(&tr->max_lock);
1479 	local_irq_enable();
1480 
1481 	return 0;
1482 }
1483 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1484 
1485 /**
1486  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1487  * @tr:		The tracing instance
1488  *
1489  * Check whether the conditional snapshot for the given instance is
1490  * enabled; if so, free the cond_snapshot associated with it,
1491  * otherwise return -EINVAL.
1492  *
1493  * Returns 0 if successful, error otherwise.
1494  */
1495 int tracing_snapshot_cond_disable(struct trace_array *tr)
1496 {
1497 	int ret = 0;
1498 
1499 	local_irq_disable();
1500 	arch_spin_lock(&tr->max_lock);
1501 
1502 	if (!tr->cond_snapshot)
1503 		ret = -EINVAL;
1504 	else {
1505 		kfree(tr->cond_snapshot);
1506 		tr->cond_snapshot = NULL;
1507 	}
1508 
1509 	arch_spin_unlock(&tr->max_lock);
1510 	local_irq_enable();
1511 
1512 	tracing_disarm_snapshot(tr);
1513 
1514 	return ret;
1515 }
1516 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1517 #else
1518 void tracing_snapshot(void)
1519 {
1520 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1521 }
1522 EXPORT_SYMBOL_GPL(tracing_snapshot);
1523 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1524 {
1525 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1526 }
1527 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1528 int tracing_alloc_snapshot(void)
1529 {
1530 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1531 	return -ENODEV;
1532 }
1533 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1534 void tracing_snapshot_alloc(void)
1535 {
1536 	/* Give warning */
1537 	tracing_snapshot();
1538 }
1539 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1540 void *tracing_cond_snapshot_data(struct trace_array *tr)
1541 {
1542 	return NULL;
1543 }
1544 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1545 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1546 {
1547 	return -ENODEV;
1548 }
1549 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1550 int tracing_snapshot_cond_disable(struct trace_array *tr)
1551 {
1552 	return false;
1553 }
1554 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1555 #define free_snapshot(tr)	do { } while (0)
1556 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1557 #endif /* CONFIG_TRACER_SNAPSHOT */
1558 
1559 void tracer_tracing_off(struct trace_array *tr)
1560 {
1561 	if (tr->array_buffer.buffer)
1562 		ring_buffer_record_off(tr->array_buffer.buffer);
1563 	/*
1564 	 * This flag is looked at when buffers haven't been allocated
1565 	 * yet, or by some tracers (like irqsoff), that just want to
1566 	 * know if the ring buffer has been disabled, but it can handle
1567 	 * races of where it gets disabled but we still do a record.
1568 	 * As the check is in the fast path of the tracers, it is more
1569 	 * important to be fast than accurate.
1570 	 */
1571 	tr->buffer_disabled = 1;
1572 	/* Make the flag seen by readers */
1573 	smp_wmb();
1574 }
1575 
1576 /**
1577  * tracing_off - turn off tracing buffers
1578  *
1579  * This function stops the tracing buffers from recording data.
1580  * It does not disable any overhead the tracers themselves may
1581  * be causing. This function simply causes all recording to
1582  * the ring buffers to fail.
1583  */
1584 void tracing_off(void)
1585 {
1586 	tracer_tracing_off(&global_trace);
1587 }
1588 EXPORT_SYMBOL_GPL(tracing_off);
1589 
1590 void disable_trace_on_warning(void)
1591 {
1592 	if (__disable_trace_on_warning) {
1593 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1594 			"Disabling tracing due to warning\n");
1595 		tracing_off();
1596 	}
1597 }
1598 
1599 /**
1600  * tracer_tracing_is_on - show real state of ring buffer enabled
1601  * @tr : the trace array to know if ring buffer is enabled
1602  *
1603  * Shows real state of the ring buffer if it is enabled or not.
1604  */
1605 bool tracer_tracing_is_on(struct trace_array *tr)
1606 {
1607 	if (tr->array_buffer.buffer)
1608 		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1609 	return !tr->buffer_disabled;
1610 }
1611 
1612 /**
1613  * tracing_is_on - show state of ring buffers enabled
1614  */
1615 int tracing_is_on(void)
1616 {
1617 	return tracer_tracing_is_on(&global_trace);
1618 }
1619 EXPORT_SYMBOL_GPL(tracing_is_on);
1620 
1621 static int __init set_buf_size(char *str)
1622 {
1623 	unsigned long buf_size;
1624 
1625 	if (!str)
1626 		return 0;
1627 	buf_size = memparse(str, &str);
1628 	/*
1629 	 * nr_entries can not be zero and the startup
1630 	 * tests require some buffer space. Therefore
1631 	 * ensure we have at least 4096 bytes of buffer.
1632 	 */
1633 	trace_buf_size = max(4096UL, buf_size);
1634 	return 1;
1635 }
1636 __setup("trace_buf_size=", set_buf_size);
1637 
1638 static int __init set_tracing_thresh(char *str)
1639 {
1640 	unsigned long threshold;
1641 	int ret;
1642 
1643 	if (!str)
1644 		return 0;
1645 	ret = kstrtoul(str, 0, &threshold);
1646 	if (ret < 0)
1647 		return 0;
1648 	tracing_thresh = threshold * 1000;
1649 	return 1;
1650 }
1651 __setup("tracing_thresh=", set_tracing_thresh);
1652 
1653 unsigned long nsecs_to_usecs(unsigned long nsecs)
1654 {
1655 	return nsecs / 1000;
1656 }
1657 
1658 /*
1659  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1660  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1661  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1662  * of strings in the order that the evals (enum) were defined.
1663  */
1664 #undef C
1665 #define C(a, b) b
1666 
1667 /* These must match the bit positions in trace_iterator_flags */
1668 static const char *trace_options[] = {
1669 	TRACE_FLAGS
1670 	NULL
1671 };
1672 
1673 static struct {
1674 	u64 (*func)(void);
1675 	const char *name;
1676 	int in_ns;		/* is this clock in nanoseconds? */
1677 } trace_clocks[] = {
1678 	{ trace_clock_local,		"local",	1 },
1679 	{ trace_clock_global,		"global",	1 },
1680 	{ trace_clock_counter,		"counter",	0 },
1681 	{ trace_clock_jiffies,		"uptime",	0 },
1682 	{ trace_clock,			"perf",		1 },
1683 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1684 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1685 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1686 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1687 	ARCH_TRACE_CLOCKS
1688 };
1689 
1690 bool trace_clock_in_ns(struct trace_array *tr)
1691 {
1692 	if (trace_clocks[tr->clock_id].in_ns)
1693 		return true;
1694 
1695 	return false;
1696 }
1697 
1698 /*
1699  * trace_parser_get_init - gets the buffer for trace parser
1700  */
1701 int trace_parser_get_init(struct trace_parser *parser, int size)
1702 {
1703 	memset(parser, 0, sizeof(*parser));
1704 
1705 	parser->buffer = kmalloc(size, GFP_KERNEL);
1706 	if (!parser->buffer)
1707 		return 1;
1708 
1709 	parser->size = size;
1710 	return 0;
1711 }
1712 
1713 /*
1714  * trace_parser_put - frees the buffer for trace parser
1715  */
1716 void trace_parser_put(struct trace_parser *parser)
1717 {
1718 	kfree(parser->buffer);
1719 	parser->buffer = NULL;
1720 }
1721 
1722 /*
1723  * trace_get_user - reads the user input string separated by  space
1724  * (matched by isspace(ch))
1725  *
1726  * For each string found the 'struct trace_parser' is updated,
1727  * and the function returns.
1728  *
1729  * Returns number of bytes read.
1730  *
1731  * See kernel/trace/trace.h for 'struct trace_parser' details.
1732  */
1733 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1734 	size_t cnt, loff_t *ppos)
1735 {
1736 	char ch;
1737 	size_t read = 0;
1738 	ssize_t ret;
1739 
1740 	if (!*ppos)
1741 		trace_parser_clear(parser);
1742 
1743 	ret = get_user(ch, ubuf++);
1744 	if (ret)
1745 		goto out;
1746 
1747 	read++;
1748 	cnt--;
1749 
1750 	/*
1751 	 * The parser is not finished with the last write,
1752 	 * continue reading the user input without skipping spaces.
1753 	 */
1754 	if (!parser->cont) {
1755 		/* skip white space */
1756 		while (cnt && isspace(ch)) {
1757 			ret = get_user(ch, ubuf++);
1758 			if (ret)
1759 				goto out;
1760 			read++;
1761 			cnt--;
1762 		}
1763 
1764 		parser->idx = 0;
1765 
1766 		/* only spaces were written */
1767 		if (isspace(ch) || !ch) {
1768 			*ppos += read;
1769 			ret = read;
1770 			goto out;
1771 		}
1772 	}
1773 
1774 	/* read the non-space input */
1775 	while (cnt && !isspace(ch) && ch) {
1776 		if (parser->idx < parser->size - 1)
1777 			parser->buffer[parser->idx++] = ch;
1778 		else {
1779 			ret = -EINVAL;
1780 			goto out;
1781 		}
1782 		ret = get_user(ch, ubuf++);
1783 		if (ret)
1784 			goto out;
1785 		read++;
1786 		cnt--;
1787 	}
1788 
1789 	/* We either got finished input or we have to wait for another call. */
1790 	if (isspace(ch) || !ch) {
1791 		parser->buffer[parser->idx] = 0;
1792 		parser->cont = false;
1793 	} else if (parser->idx < parser->size - 1) {
1794 		parser->cont = true;
1795 		parser->buffer[parser->idx++] = ch;
1796 		/* Make sure the parsed string always terminates with '\0'. */
1797 		parser->buffer[parser->idx] = 0;
1798 	} else {
1799 		ret = -EINVAL;
1800 		goto out;
1801 	}
1802 
1803 	*ppos += read;
1804 	ret = read;
1805 
1806 out:
1807 	return ret;
1808 }
1809 
1810 /* TODO add a seq_buf_to_buffer() */
1811 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1812 {
1813 	int len;
1814 
1815 	if (trace_seq_used(s) <= s->readpos)
1816 		return -EBUSY;
1817 
1818 	len = trace_seq_used(s) - s->readpos;
1819 	if (cnt > len)
1820 		cnt = len;
1821 	memcpy(buf, s->buffer + s->readpos, cnt);
1822 
1823 	s->readpos += cnt;
1824 	return cnt;
1825 }
1826 
1827 unsigned long __read_mostly	tracing_thresh;
1828 
1829 #ifdef CONFIG_TRACER_MAX_TRACE
1830 static const struct file_operations tracing_max_lat_fops;
1831 
1832 #ifdef LATENCY_FS_NOTIFY
1833 
1834 static struct workqueue_struct *fsnotify_wq;
1835 
1836 static void latency_fsnotify_workfn(struct work_struct *work)
1837 {
1838 	struct trace_array *tr = container_of(work, struct trace_array,
1839 					      fsnotify_work);
1840 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1841 }
1842 
1843 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1844 {
1845 	struct trace_array *tr = container_of(iwork, struct trace_array,
1846 					      fsnotify_irqwork);
1847 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1848 }
1849 
1850 static void trace_create_maxlat_file(struct trace_array *tr,
1851 				     struct dentry *d_tracer)
1852 {
1853 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1854 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1855 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1856 					      TRACE_MODE_WRITE,
1857 					      d_tracer, tr,
1858 					      &tracing_max_lat_fops);
1859 }
1860 
1861 __init static int latency_fsnotify_init(void)
1862 {
1863 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1864 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1865 	if (!fsnotify_wq) {
1866 		pr_err("Unable to allocate tr_max_lat_wq\n");
1867 		return -ENOMEM;
1868 	}
1869 	return 0;
1870 }
1871 
1872 late_initcall_sync(latency_fsnotify_init);
1873 
1874 void latency_fsnotify(struct trace_array *tr)
1875 {
1876 	if (!fsnotify_wq)
1877 		return;
1878 	/*
1879 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1880 	 * possible that we are called from __schedule() or do_idle(), which
1881 	 * could cause a deadlock.
1882 	 */
1883 	irq_work_queue(&tr->fsnotify_irqwork);
1884 }
1885 
1886 #else /* !LATENCY_FS_NOTIFY */
1887 
1888 #define trace_create_maxlat_file(tr, d_tracer)				\
1889 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1890 			  d_tracer, tr, &tracing_max_lat_fops)
1891 
1892 #endif
1893 
1894 /*
1895  * Copy the new maximum trace into the separate maximum-trace
1896  * structure. (this way the maximum trace is permanently saved,
1897  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1898  */
1899 static void
1900 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1901 {
1902 	struct array_buffer *trace_buf = &tr->array_buffer;
1903 	struct array_buffer *max_buf = &tr->max_buffer;
1904 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1905 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1906 
1907 	max_buf->cpu = cpu;
1908 	max_buf->time_start = data->preempt_timestamp;
1909 
1910 	max_data->saved_latency = tr->max_latency;
1911 	max_data->critical_start = data->critical_start;
1912 	max_data->critical_end = data->critical_end;
1913 
1914 	strscpy(max_data->comm, tsk->comm);
1915 	max_data->pid = tsk->pid;
1916 	/*
1917 	 * If tsk == current, then use current_uid(), as that does not use
1918 	 * RCU. The irq tracer can be called out of RCU scope.
1919 	 */
1920 	if (tsk == current)
1921 		max_data->uid = current_uid();
1922 	else
1923 		max_data->uid = task_uid(tsk);
1924 
1925 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1926 	max_data->policy = tsk->policy;
1927 	max_data->rt_priority = tsk->rt_priority;
1928 
1929 	/* record this tasks comm */
1930 	tracing_record_cmdline(tsk);
1931 	latency_fsnotify(tr);
1932 }
1933 
1934 /**
1935  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1936  * @tr: tracer
1937  * @tsk: the task with the latency
1938  * @cpu: The cpu that initiated the trace.
1939  * @cond_data: User data associated with a conditional snapshot
1940  *
1941  * Flip the buffers between the @tr and the max_tr and record information
1942  * about which task was the cause of this latency.
1943  */
1944 void
1945 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1946 	      void *cond_data)
1947 {
1948 	if (tr->stop_count)
1949 		return;
1950 
1951 	WARN_ON_ONCE(!irqs_disabled());
1952 
1953 	if (!tr->allocated_snapshot) {
1954 		/* Only the nop tracer should hit this when disabling */
1955 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1956 		return;
1957 	}
1958 
1959 	arch_spin_lock(&tr->max_lock);
1960 
1961 	/* Inherit the recordable setting from array_buffer */
1962 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1963 		ring_buffer_record_on(tr->max_buffer.buffer);
1964 	else
1965 		ring_buffer_record_off(tr->max_buffer.buffer);
1966 
1967 #ifdef CONFIG_TRACER_SNAPSHOT
1968 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1969 		arch_spin_unlock(&tr->max_lock);
1970 		return;
1971 	}
1972 #endif
1973 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1974 
1975 	__update_max_tr(tr, tsk, cpu);
1976 
1977 	arch_spin_unlock(&tr->max_lock);
1978 
1979 	/* Any waiters on the old snapshot buffer need to wake up */
1980 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1981 }
1982 
1983 /**
1984  * update_max_tr_single - only copy one trace over, and reset the rest
1985  * @tr: tracer
1986  * @tsk: task with the latency
1987  * @cpu: the cpu of the buffer to copy.
1988  *
1989  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1990  */
1991 void
1992 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1993 {
1994 	int ret;
1995 
1996 	if (tr->stop_count)
1997 		return;
1998 
1999 	WARN_ON_ONCE(!irqs_disabled());
2000 	if (!tr->allocated_snapshot) {
2001 		/* Only the nop tracer should hit this when disabling */
2002 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
2003 		return;
2004 	}
2005 
2006 	arch_spin_lock(&tr->max_lock);
2007 
2008 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2009 
2010 	if (ret == -EBUSY) {
2011 		/*
2012 		 * We failed to swap the buffer due to a commit taking
2013 		 * place on this CPU. We fail to record, but we reset
2014 		 * the max trace buffer (no one writes directly to it)
2015 		 * and flag that it failed.
2016 		 * Another reason is resize is in progress.
2017 		 */
2018 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2019 			"Failed to swap buffers due to commit or resize in progress\n");
2020 	}
2021 
2022 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2023 
2024 	__update_max_tr(tr, tsk, cpu);
2025 	arch_spin_unlock(&tr->max_lock);
2026 }
2027 
2028 #endif /* CONFIG_TRACER_MAX_TRACE */
2029 
2030 struct pipe_wait {
2031 	struct trace_iterator		*iter;
2032 	int				wait_index;
2033 };
2034 
2035 static bool wait_pipe_cond(void *data)
2036 {
2037 	struct pipe_wait *pwait = data;
2038 	struct trace_iterator *iter = pwait->iter;
2039 
2040 	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2041 		return true;
2042 
2043 	return iter->closed;
2044 }
2045 
2046 static int wait_on_pipe(struct trace_iterator *iter, int full)
2047 {
2048 	struct pipe_wait pwait;
2049 	int ret;
2050 
2051 	/* Iterators are static, they should be filled or empty */
2052 	if (trace_buffer_iter(iter, iter->cpu_file))
2053 		return 0;
2054 
2055 	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2056 	pwait.iter = iter;
2057 
2058 	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2059 			       wait_pipe_cond, &pwait);
2060 
2061 #ifdef CONFIG_TRACER_MAX_TRACE
2062 	/*
2063 	 * Make sure this is still the snapshot buffer, as if a snapshot were
2064 	 * to happen, this would now be the main buffer.
2065 	 */
2066 	if (iter->snapshot)
2067 		iter->array_buffer = &iter->tr->max_buffer;
2068 #endif
2069 	return ret;
2070 }
2071 
2072 #ifdef CONFIG_FTRACE_STARTUP_TEST
2073 static bool selftests_can_run;
2074 
2075 struct trace_selftests {
2076 	struct list_head		list;
2077 	struct tracer			*type;
2078 };
2079 
2080 static LIST_HEAD(postponed_selftests);
2081 
2082 static int save_selftest(struct tracer *type)
2083 {
2084 	struct trace_selftests *selftest;
2085 
2086 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2087 	if (!selftest)
2088 		return -ENOMEM;
2089 
2090 	selftest->type = type;
2091 	list_add(&selftest->list, &postponed_selftests);
2092 	return 0;
2093 }
2094 
2095 static int run_tracer_selftest(struct tracer *type)
2096 {
2097 	struct trace_array *tr = &global_trace;
2098 	struct tracer *saved_tracer = tr->current_trace;
2099 	int ret;
2100 
2101 	if (!type->selftest || tracing_selftest_disabled)
2102 		return 0;
2103 
2104 	/*
2105 	 * If a tracer registers early in boot up (before scheduling is
2106 	 * initialized and such), then do not run its selftests yet.
2107 	 * Instead, run it a little later in the boot process.
2108 	 */
2109 	if (!selftests_can_run)
2110 		return save_selftest(type);
2111 
2112 	if (!tracing_is_on()) {
2113 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2114 			type->name);
2115 		return 0;
2116 	}
2117 
2118 	/*
2119 	 * Run a selftest on this tracer.
2120 	 * Here we reset the trace buffer, and set the current
2121 	 * tracer to be this tracer. The tracer can then run some
2122 	 * internal tracing to verify that everything is in order.
2123 	 * If we fail, we do not register this tracer.
2124 	 */
2125 	tracing_reset_online_cpus(&tr->array_buffer);
2126 
2127 	tr->current_trace = type;
2128 
2129 #ifdef CONFIG_TRACER_MAX_TRACE
2130 	if (type->use_max_tr) {
2131 		/* If we expanded the buffers, make sure the max is expanded too */
2132 		if (tr->ring_buffer_expanded)
2133 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2134 					   RING_BUFFER_ALL_CPUS);
2135 		tr->allocated_snapshot = true;
2136 	}
2137 #endif
2138 
2139 	/* the test is responsible for initializing and enabling */
2140 	pr_info("Testing tracer %s: ", type->name);
2141 	ret = type->selftest(type, tr);
2142 	/* the test is responsible for resetting too */
2143 	tr->current_trace = saved_tracer;
2144 	if (ret) {
2145 		printk(KERN_CONT "FAILED!\n");
2146 		/* Add the warning after printing 'FAILED' */
2147 		WARN_ON(1);
2148 		return -1;
2149 	}
2150 	/* Only reset on passing, to avoid touching corrupted buffers */
2151 	tracing_reset_online_cpus(&tr->array_buffer);
2152 
2153 #ifdef CONFIG_TRACER_MAX_TRACE
2154 	if (type->use_max_tr) {
2155 		tr->allocated_snapshot = false;
2156 
2157 		/* Shrink the max buffer again */
2158 		if (tr->ring_buffer_expanded)
2159 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2160 					   RING_BUFFER_ALL_CPUS);
2161 	}
2162 #endif
2163 
2164 	printk(KERN_CONT "PASSED\n");
2165 	return 0;
2166 }
2167 
2168 static int do_run_tracer_selftest(struct tracer *type)
2169 {
2170 	int ret;
2171 
2172 	/*
2173 	 * Tests can take a long time, especially if they are run one after the
2174 	 * other, as does happen during bootup when all the tracers are
2175 	 * registered. This could cause the soft lockup watchdog to trigger.
2176 	 */
2177 	cond_resched();
2178 
2179 	tracing_selftest_running = true;
2180 	ret = run_tracer_selftest(type);
2181 	tracing_selftest_running = false;
2182 
2183 	return ret;
2184 }
2185 
2186 static __init int init_trace_selftests(void)
2187 {
2188 	struct trace_selftests *p, *n;
2189 	struct tracer *t, **last;
2190 	int ret;
2191 
2192 	selftests_can_run = true;
2193 
2194 	guard(mutex)(&trace_types_lock);
2195 
2196 	if (list_empty(&postponed_selftests))
2197 		return 0;
2198 
2199 	pr_info("Running postponed tracer tests:\n");
2200 
2201 	tracing_selftest_running = true;
2202 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2203 		/* This loop can take minutes when sanitizers are enabled, so
2204 		 * lets make sure we allow RCU processing.
2205 		 */
2206 		cond_resched();
2207 		ret = run_tracer_selftest(p->type);
2208 		/* If the test fails, then warn and remove from available_tracers */
2209 		if (ret < 0) {
2210 			WARN(1, "tracer: %s failed selftest, disabling\n",
2211 			     p->type->name);
2212 			last = &trace_types;
2213 			for (t = trace_types; t; t = t->next) {
2214 				if (t == p->type) {
2215 					*last = t->next;
2216 					break;
2217 				}
2218 				last = &t->next;
2219 			}
2220 		}
2221 		list_del(&p->list);
2222 		kfree(p);
2223 	}
2224 	tracing_selftest_running = false;
2225 
2226 	return 0;
2227 }
2228 core_initcall(init_trace_selftests);
2229 #else
2230 static inline int do_run_tracer_selftest(struct tracer *type)
2231 {
2232 	return 0;
2233 }
2234 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2235 
2236 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2237 
2238 static void __init apply_trace_boot_options(void);
2239 
2240 /**
2241  * register_tracer - register a tracer with the ftrace system.
2242  * @type: the plugin for the tracer
2243  *
2244  * Register a new plugin tracer.
2245  */
2246 int __init register_tracer(struct tracer *type)
2247 {
2248 	struct tracer *t;
2249 	int ret = 0;
2250 
2251 	if (!type->name) {
2252 		pr_info("Tracer must have a name\n");
2253 		return -1;
2254 	}
2255 
2256 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2257 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2258 		return -1;
2259 	}
2260 
2261 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2262 		pr_warn("Can not register tracer %s due to lockdown\n",
2263 			   type->name);
2264 		return -EPERM;
2265 	}
2266 
2267 	mutex_lock(&trace_types_lock);
2268 
2269 	for (t = trace_types; t; t = t->next) {
2270 		if (strcmp(type->name, t->name) == 0) {
2271 			/* already found */
2272 			pr_info("Tracer %s already registered\n",
2273 				type->name);
2274 			ret = -1;
2275 			goto out;
2276 		}
2277 	}
2278 
2279 	if (!type->set_flag)
2280 		type->set_flag = &dummy_set_flag;
2281 	if (!type->flags) {
2282 		/*allocate a dummy tracer_flags*/
2283 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2284 		if (!type->flags) {
2285 			ret = -ENOMEM;
2286 			goto out;
2287 		}
2288 		type->flags->val = 0;
2289 		type->flags->opts = dummy_tracer_opt;
2290 	} else
2291 		if (!type->flags->opts)
2292 			type->flags->opts = dummy_tracer_opt;
2293 
2294 	/* store the tracer for __set_tracer_option */
2295 	type->flags->trace = type;
2296 
2297 	ret = do_run_tracer_selftest(type);
2298 	if (ret < 0)
2299 		goto out;
2300 
2301 	type->next = trace_types;
2302 	trace_types = type;
2303 	add_tracer_options(&global_trace, type);
2304 
2305  out:
2306 	mutex_unlock(&trace_types_lock);
2307 
2308 	if (ret || !default_bootup_tracer)
2309 		goto out_unlock;
2310 
2311 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2312 		goto out_unlock;
2313 
2314 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2315 	/* Do we want this tracer to start on bootup? */
2316 	tracing_set_tracer(&global_trace, type->name);
2317 	default_bootup_tracer = NULL;
2318 
2319 	apply_trace_boot_options();
2320 
2321 	/* disable other selftests, since this will break it. */
2322 	disable_tracing_selftest("running a tracer");
2323 
2324  out_unlock:
2325 	return ret;
2326 }
2327 
2328 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2329 {
2330 	struct trace_buffer *buffer = buf->buffer;
2331 
2332 	if (!buffer)
2333 		return;
2334 
2335 	ring_buffer_record_disable(buffer);
2336 
2337 	/* Make sure all commits have finished */
2338 	synchronize_rcu();
2339 	ring_buffer_reset_cpu(buffer, cpu);
2340 
2341 	ring_buffer_record_enable(buffer);
2342 }
2343 
2344 void tracing_reset_online_cpus(struct array_buffer *buf)
2345 {
2346 	struct trace_buffer *buffer = buf->buffer;
2347 
2348 	if (!buffer)
2349 		return;
2350 
2351 	ring_buffer_record_disable(buffer);
2352 
2353 	/* Make sure all commits have finished */
2354 	synchronize_rcu();
2355 
2356 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2357 
2358 	ring_buffer_reset_online_cpus(buffer);
2359 
2360 	ring_buffer_record_enable(buffer);
2361 }
2362 
2363 static void tracing_reset_all_cpus(struct array_buffer *buf)
2364 {
2365 	struct trace_buffer *buffer = buf->buffer;
2366 
2367 	if (!buffer)
2368 		return;
2369 
2370 	ring_buffer_record_disable(buffer);
2371 
2372 	/* Make sure all commits have finished */
2373 	synchronize_rcu();
2374 
2375 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2376 
2377 	ring_buffer_reset(buffer);
2378 
2379 	ring_buffer_record_enable(buffer);
2380 }
2381 
2382 /* Must have trace_types_lock held */
2383 void tracing_reset_all_online_cpus_unlocked(void)
2384 {
2385 	struct trace_array *tr;
2386 
2387 	lockdep_assert_held(&trace_types_lock);
2388 
2389 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2390 		if (!tr->clear_trace)
2391 			continue;
2392 		tr->clear_trace = false;
2393 		tracing_reset_online_cpus(&tr->array_buffer);
2394 #ifdef CONFIG_TRACER_MAX_TRACE
2395 		tracing_reset_online_cpus(&tr->max_buffer);
2396 #endif
2397 	}
2398 }
2399 
2400 void tracing_reset_all_online_cpus(void)
2401 {
2402 	mutex_lock(&trace_types_lock);
2403 	tracing_reset_all_online_cpus_unlocked();
2404 	mutex_unlock(&trace_types_lock);
2405 }
2406 
2407 int is_tracing_stopped(void)
2408 {
2409 	return global_trace.stop_count;
2410 }
2411 
2412 static void tracing_start_tr(struct trace_array *tr)
2413 {
2414 	struct trace_buffer *buffer;
2415 	unsigned long flags;
2416 
2417 	if (tracing_disabled)
2418 		return;
2419 
2420 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2421 	if (--tr->stop_count) {
2422 		if (WARN_ON_ONCE(tr->stop_count < 0)) {
2423 			/* Someone screwed up their debugging */
2424 			tr->stop_count = 0;
2425 		}
2426 		goto out;
2427 	}
2428 
2429 	/* Prevent the buffers from switching */
2430 	arch_spin_lock(&tr->max_lock);
2431 
2432 	buffer = tr->array_buffer.buffer;
2433 	if (buffer)
2434 		ring_buffer_record_enable(buffer);
2435 
2436 #ifdef CONFIG_TRACER_MAX_TRACE
2437 	buffer = tr->max_buffer.buffer;
2438 	if (buffer)
2439 		ring_buffer_record_enable(buffer);
2440 #endif
2441 
2442 	arch_spin_unlock(&tr->max_lock);
2443 
2444  out:
2445 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2446 }
2447 
2448 /**
2449  * tracing_start - quick start of the tracer
2450  *
2451  * If tracing is enabled but was stopped by tracing_stop,
2452  * this will start the tracer back up.
2453  */
2454 void tracing_start(void)
2455 
2456 {
2457 	return tracing_start_tr(&global_trace);
2458 }
2459 
2460 static void tracing_stop_tr(struct trace_array *tr)
2461 {
2462 	struct trace_buffer *buffer;
2463 	unsigned long flags;
2464 
2465 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2466 	if (tr->stop_count++)
2467 		goto out;
2468 
2469 	/* Prevent the buffers from switching */
2470 	arch_spin_lock(&tr->max_lock);
2471 
2472 	buffer = tr->array_buffer.buffer;
2473 	if (buffer)
2474 		ring_buffer_record_disable(buffer);
2475 
2476 #ifdef CONFIG_TRACER_MAX_TRACE
2477 	buffer = tr->max_buffer.buffer;
2478 	if (buffer)
2479 		ring_buffer_record_disable(buffer);
2480 #endif
2481 
2482 	arch_spin_unlock(&tr->max_lock);
2483 
2484  out:
2485 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2486 }
2487 
2488 /**
2489  * tracing_stop - quick stop of the tracer
2490  *
2491  * Light weight way to stop tracing. Use in conjunction with
2492  * tracing_start.
2493  */
2494 void tracing_stop(void)
2495 {
2496 	return tracing_stop_tr(&global_trace);
2497 }
2498 
2499 /*
2500  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2501  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2502  * simplifies those functions and keeps them in sync.
2503  */
2504 enum print_line_t trace_handle_return(struct trace_seq *s)
2505 {
2506 	return trace_seq_has_overflowed(s) ?
2507 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2508 }
2509 EXPORT_SYMBOL_GPL(trace_handle_return);
2510 
2511 static unsigned short migration_disable_value(void)
2512 {
2513 #if defined(CONFIG_SMP)
2514 	return current->migration_disabled;
2515 #else
2516 	return 0;
2517 #endif
2518 }
2519 
2520 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2521 {
2522 	unsigned int trace_flags = irqs_status;
2523 	unsigned int pc;
2524 
2525 	pc = preempt_count();
2526 
2527 	if (pc & NMI_MASK)
2528 		trace_flags |= TRACE_FLAG_NMI;
2529 	if (pc & HARDIRQ_MASK)
2530 		trace_flags |= TRACE_FLAG_HARDIRQ;
2531 	if (in_serving_softirq())
2532 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2533 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2534 		trace_flags |= TRACE_FLAG_BH_OFF;
2535 
2536 	if (tif_need_resched())
2537 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2538 	if (test_preempt_need_resched())
2539 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2540 	if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
2541 		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2542 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2543 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2544 }
2545 
2546 struct ring_buffer_event *
2547 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2548 			  int type,
2549 			  unsigned long len,
2550 			  unsigned int trace_ctx)
2551 {
2552 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2553 }
2554 
2555 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2556 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2557 static int trace_buffered_event_ref;
2558 
2559 /**
2560  * trace_buffered_event_enable - enable buffering events
2561  *
2562  * When events are being filtered, it is quicker to use a temporary
2563  * buffer to write the event data into if there's a likely chance
2564  * that it will not be committed. The discard of the ring buffer
2565  * is not as fast as committing, and is much slower than copying
2566  * a commit.
2567  *
2568  * When an event is to be filtered, allocate per cpu buffers to
2569  * write the event data into, and if the event is filtered and discarded
2570  * it is simply dropped, otherwise, the entire data is to be committed
2571  * in one shot.
2572  */
2573 void trace_buffered_event_enable(void)
2574 {
2575 	struct ring_buffer_event *event;
2576 	struct page *page;
2577 	int cpu;
2578 
2579 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2580 
2581 	if (trace_buffered_event_ref++)
2582 		return;
2583 
2584 	for_each_tracing_cpu(cpu) {
2585 		page = alloc_pages_node(cpu_to_node(cpu),
2586 					GFP_KERNEL | __GFP_NORETRY, 0);
2587 		/* This is just an optimization and can handle failures */
2588 		if (!page) {
2589 			pr_err("Failed to allocate event buffer\n");
2590 			break;
2591 		}
2592 
2593 		event = page_address(page);
2594 		memset(event, 0, sizeof(*event));
2595 
2596 		per_cpu(trace_buffered_event, cpu) = event;
2597 
2598 		preempt_disable();
2599 		if (cpu == smp_processor_id() &&
2600 		    __this_cpu_read(trace_buffered_event) !=
2601 		    per_cpu(trace_buffered_event, cpu))
2602 			WARN_ON_ONCE(1);
2603 		preempt_enable();
2604 	}
2605 }
2606 
2607 static void enable_trace_buffered_event(void *data)
2608 {
2609 	/* Probably not needed, but do it anyway */
2610 	smp_rmb();
2611 	this_cpu_dec(trace_buffered_event_cnt);
2612 }
2613 
2614 static void disable_trace_buffered_event(void *data)
2615 {
2616 	this_cpu_inc(trace_buffered_event_cnt);
2617 }
2618 
2619 /**
2620  * trace_buffered_event_disable - disable buffering events
2621  *
2622  * When a filter is removed, it is faster to not use the buffered
2623  * events, and to commit directly into the ring buffer. Free up
2624  * the temp buffers when there are no more users. This requires
2625  * special synchronization with current events.
2626  */
2627 void trace_buffered_event_disable(void)
2628 {
2629 	int cpu;
2630 
2631 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2632 
2633 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2634 		return;
2635 
2636 	if (--trace_buffered_event_ref)
2637 		return;
2638 
2639 	/* For each CPU, set the buffer as used. */
2640 	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2641 			 NULL, true);
2642 
2643 	/* Wait for all current users to finish */
2644 	synchronize_rcu();
2645 
2646 	for_each_tracing_cpu(cpu) {
2647 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2648 		per_cpu(trace_buffered_event, cpu) = NULL;
2649 	}
2650 
2651 	/*
2652 	 * Wait for all CPUs that potentially started checking if they can use
2653 	 * their event buffer only after the previous synchronize_rcu() call and
2654 	 * they still read a valid pointer from trace_buffered_event. It must be
2655 	 * ensured they don't see cleared trace_buffered_event_cnt else they
2656 	 * could wrongly decide to use the pointed-to buffer which is now freed.
2657 	 */
2658 	synchronize_rcu();
2659 
2660 	/* For each CPU, relinquish the buffer */
2661 	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2662 			 true);
2663 }
2664 
2665 static struct trace_buffer *temp_buffer;
2666 
2667 struct ring_buffer_event *
2668 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2669 			  struct trace_event_file *trace_file,
2670 			  int type, unsigned long len,
2671 			  unsigned int trace_ctx)
2672 {
2673 	struct ring_buffer_event *entry;
2674 	struct trace_array *tr = trace_file->tr;
2675 	int val;
2676 
2677 	*current_rb = tr->array_buffer.buffer;
2678 
2679 	if (!tr->no_filter_buffering_ref &&
2680 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2681 		preempt_disable_notrace();
2682 		/*
2683 		 * Filtering is on, so try to use the per cpu buffer first.
2684 		 * This buffer will simulate a ring_buffer_event,
2685 		 * where the type_len is zero and the array[0] will
2686 		 * hold the full length.
2687 		 * (see include/linux/ring-buffer.h for details on
2688 		 *  how the ring_buffer_event is structured).
2689 		 *
2690 		 * Using a temp buffer during filtering and copying it
2691 		 * on a matched filter is quicker than writing directly
2692 		 * into the ring buffer and then discarding it when
2693 		 * it doesn't match. That is because the discard
2694 		 * requires several atomic operations to get right.
2695 		 * Copying on match and doing nothing on a failed match
2696 		 * is still quicker than no copy on match, but having
2697 		 * to discard out of the ring buffer on a failed match.
2698 		 */
2699 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2700 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2701 
2702 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2703 
2704 			/*
2705 			 * Preemption is disabled, but interrupts and NMIs
2706 			 * can still come in now. If that happens after
2707 			 * the above increment, then it will have to go
2708 			 * back to the old method of allocating the event
2709 			 * on the ring buffer, and if the filter fails, it
2710 			 * will have to call ring_buffer_discard_commit()
2711 			 * to remove it.
2712 			 *
2713 			 * Need to also check the unlikely case that the
2714 			 * length is bigger than the temp buffer size.
2715 			 * If that happens, then the reserve is pretty much
2716 			 * guaranteed to fail, as the ring buffer currently
2717 			 * only allows events less than a page. But that may
2718 			 * change in the future, so let the ring buffer reserve
2719 			 * handle the failure in that case.
2720 			 */
2721 			if (val == 1 && likely(len <= max_len)) {
2722 				trace_event_setup(entry, type, trace_ctx);
2723 				entry->array[0] = len;
2724 				/* Return with preemption disabled */
2725 				return entry;
2726 			}
2727 			this_cpu_dec(trace_buffered_event_cnt);
2728 		}
2729 		/* __trace_buffer_lock_reserve() disables preemption */
2730 		preempt_enable_notrace();
2731 	}
2732 
2733 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2734 					    trace_ctx);
2735 	/*
2736 	 * If tracing is off, but we have triggers enabled
2737 	 * we still need to look at the event data. Use the temp_buffer
2738 	 * to store the trace event for the trigger to use. It's recursive
2739 	 * safe and will not be recorded anywhere.
2740 	 */
2741 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2742 		*current_rb = temp_buffer;
2743 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2744 						    trace_ctx);
2745 	}
2746 	return entry;
2747 }
2748 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2749 
2750 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2751 static DEFINE_MUTEX(tracepoint_printk_mutex);
2752 
2753 static void output_printk(struct trace_event_buffer *fbuffer)
2754 {
2755 	struct trace_event_call *event_call;
2756 	struct trace_event_file *file;
2757 	struct trace_event *event;
2758 	unsigned long flags;
2759 	struct trace_iterator *iter = tracepoint_print_iter;
2760 
2761 	/* We should never get here if iter is NULL */
2762 	if (WARN_ON_ONCE(!iter))
2763 		return;
2764 
2765 	event_call = fbuffer->trace_file->event_call;
2766 	if (!event_call || !event_call->event.funcs ||
2767 	    !event_call->event.funcs->trace)
2768 		return;
2769 
2770 	file = fbuffer->trace_file;
2771 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2772 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2773 	     !filter_match_preds(file->filter, fbuffer->entry)))
2774 		return;
2775 
2776 	event = &fbuffer->trace_file->event_call->event;
2777 
2778 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2779 	trace_seq_init(&iter->seq);
2780 	iter->ent = fbuffer->entry;
2781 	event_call->event.funcs->trace(iter, 0, event);
2782 	trace_seq_putc(&iter->seq, 0);
2783 	printk("%s", iter->seq.buffer);
2784 
2785 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2786 }
2787 
2788 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
2789 			     void *buffer, size_t *lenp,
2790 			     loff_t *ppos)
2791 {
2792 	int save_tracepoint_printk;
2793 	int ret;
2794 
2795 	guard(mutex)(&tracepoint_printk_mutex);
2796 	save_tracepoint_printk = tracepoint_printk;
2797 
2798 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2799 
2800 	/*
2801 	 * This will force exiting early, as tracepoint_printk
2802 	 * is always zero when tracepoint_printk_iter is not allocated
2803 	 */
2804 	if (!tracepoint_print_iter)
2805 		tracepoint_printk = 0;
2806 
2807 	if (save_tracepoint_printk == tracepoint_printk)
2808 		return ret;
2809 
2810 	if (tracepoint_printk)
2811 		static_key_enable(&tracepoint_printk_key.key);
2812 	else
2813 		static_key_disable(&tracepoint_printk_key.key);
2814 
2815 	return ret;
2816 }
2817 
2818 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2819 {
2820 	enum event_trigger_type tt = ETT_NONE;
2821 	struct trace_event_file *file = fbuffer->trace_file;
2822 
2823 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2824 			fbuffer->entry, &tt))
2825 		goto discard;
2826 
2827 	if (static_key_false(&tracepoint_printk_key.key))
2828 		output_printk(fbuffer);
2829 
2830 	if (static_branch_unlikely(&trace_event_exports_enabled))
2831 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2832 
2833 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2834 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2835 
2836 discard:
2837 	if (tt)
2838 		event_triggers_post_call(file, tt);
2839 
2840 }
2841 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2842 
2843 /*
2844  * Skip 3:
2845  *
2846  *   trace_buffer_unlock_commit_regs()
2847  *   trace_event_buffer_commit()
2848  *   trace_event_raw_event_xxx()
2849  */
2850 # define STACK_SKIP 3
2851 
2852 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2853 				     struct trace_buffer *buffer,
2854 				     struct ring_buffer_event *event,
2855 				     unsigned int trace_ctx,
2856 				     struct pt_regs *regs)
2857 {
2858 	__buffer_unlock_commit(buffer, event);
2859 
2860 	/*
2861 	 * If regs is not set, then skip the necessary functions.
2862 	 * Note, we can still get here via blktrace, wakeup tracer
2863 	 * and mmiotrace, but that's ok if they lose a function or
2864 	 * two. They are not that meaningful.
2865 	 */
2866 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2867 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2868 }
2869 
2870 /*
2871  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2872  */
2873 void
2874 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2875 				   struct ring_buffer_event *event)
2876 {
2877 	__buffer_unlock_commit(buffer, event);
2878 }
2879 
2880 void
2881 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2882 	       parent_ip, unsigned int trace_ctx)
2883 {
2884 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2885 	struct ring_buffer_event *event;
2886 	struct ftrace_entry *entry;
2887 
2888 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2889 					    trace_ctx);
2890 	if (!event)
2891 		return;
2892 	entry	= ring_buffer_event_data(event);
2893 	entry->ip			= ip;
2894 	entry->parent_ip		= parent_ip;
2895 
2896 	if (static_branch_unlikely(&trace_function_exports_enabled))
2897 		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2898 	__buffer_unlock_commit(buffer, event);
2899 }
2900 
2901 #ifdef CONFIG_STACKTRACE
2902 
2903 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2904 #define FTRACE_KSTACK_NESTING	4
2905 
2906 #define FTRACE_KSTACK_ENTRIES	(SZ_4K / FTRACE_KSTACK_NESTING)
2907 
2908 struct ftrace_stack {
2909 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2910 };
2911 
2912 
2913 struct ftrace_stacks {
2914 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2915 };
2916 
2917 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2918 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2919 
2920 static void __ftrace_trace_stack(struct trace_array *tr,
2921 				 struct trace_buffer *buffer,
2922 				 unsigned int trace_ctx,
2923 				 int skip, struct pt_regs *regs)
2924 {
2925 	struct ring_buffer_event *event;
2926 	unsigned int size, nr_entries;
2927 	struct ftrace_stack *fstack;
2928 	struct stack_entry *entry;
2929 	int stackidx;
2930 
2931 	/*
2932 	 * Add one, for this function and the call to save_stack_trace()
2933 	 * If regs is set, then these functions will not be in the way.
2934 	 */
2935 #ifndef CONFIG_UNWINDER_ORC
2936 	if (!regs)
2937 		skip++;
2938 #endif
2939 
2940 	preempt_disable_notrace();
2941 
2942 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2943 
2944 	/* This should never happen. If it does, yell once and skip */
2945 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2946 		goto out;
2947 
2948 	/*
2949 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2950 	 * interrupt will either see the value pre increment or post
2951 	 * increment. If the interrupt happens pre increment it will have
2952 	 * restored the counter when it returns.  We just need a barrier to
2953 	 * keep gcc from moving things around.
2954 	 */
2955 	barrier();
2956 
2957 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2958 	size = ARRAY_SIZE(fstack->calls);
2959 
2960 	if (regs) {
2961 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2962 						   size, skip);
2963 	} else {
2964 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2965 	}
2966 
2967 #ifdef CONFIG_DYNAMIC_FTRACE
2968 	/* Mark entry of stack trace as trampoline code */
2969 	if (tr->ops && tr->ops->trampoline) {
2970 		unsigned long tramp_start = tr->ops->trampoline;
2971 		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
2972 		unsigned long *calls = fstack->calls;
2973 
2974 		for (int i = 0; i < nr_entries; i++) {
2975 			if (calls[i] >= tramp_start && calls[i] < tramp_end)
2976 				calls[i] = FTRACE_TRAMPOLINE_MARKER;
2977 		}
2978 	}
2979 #endif
2980 
2981 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2982 				    struct_size(entry, caller, nr_entries),
2983 				    trace_ctx);
2984 	if (!event)
2985 		goto out;
2986 	entry = ring_buffer_event_data(event);
2987 
2988 	entry->size = nr_entries;
2989 	memcpy(&entry->caller, fstack->calls,
2990 	       flex_array_size(entry, caller, nr_entries));
2991 
2992 	__buffer_unlock_commit(buffer, event);
2993 
2994  out:
2995 	/* Again, don't let gcc optimize things here */
2996 	barrier();
2997 	__this_cpu_dec(ftrace_stack_reserve);
2998 	preempt_enable_notrace();
2999 
3000 }
3001 
3002 static inline void ftrace_trace_stack(struct trace_array *tr,
3003 				      struct trace_buffer *buffer,
3004 				      unsigned int trace_ctx,
3005 				      int skip, struct pt_regs *regs)
3006 {
3007 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3008 		return;
3009 
3010 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
3011 }
3012 
3013 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3014 		   int skip)
3015 {
3016 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3017 
3018 	if (rcu_is_watching()) {
3019 		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3020 		return;
3021 	}
3022 
3023 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3024 		return;
3025 
3026 	/*
3027 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3028 	 * but if the above rcu_is_watching() failed, then the NMI
3029 	 * triggered someplace critical, and ct_irq_enter() should
3030 	 * not be called from NMI.
3031 	 */
3032 	if (unlikely(in_nmi()))
3033 		return;
3034 
3035 	ct_irq_enter_irqson();
3036 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3037 	ct_irq_exit_irqson();
3038 }
3039 
3040 /**
3041  * trace_dump_stack - record a stack back trace in the trace buffer
3042  * @skip: Number of functions to skip (helper handlers)
3043  */
3044 void trace_dump_stack(int skip)
3045 {
3046 	if (tracing_disabled || tracing_selftest_running)
3047 		return;
3048 
3049 #ifndef CONFIG_UNWINDER_ORC
3050 	/* Skip 1 to skip this function. */
3051 	skip++;
3052 #endif
3053 	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3054 				tracing_gen_ctx(), skip, NULL);
3055 }
3056 EXPORT_SYMBOL_GPL(trace_dump_stack);
3057 
3058 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3059 static DEFINE_PER_CPU(int, user_stack_count);
3060 
3061 static void
3062 ftrace_trace_userstack(struct trace_array *tr,
3063 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3064 {
3065 	struct ring_buffer_event *event;
3066 	struct userstack_entry *entry;
3067 
3068 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3069 		return;
3070 
3071 	/*
3072 	 * NMIs can not handle page faults, even with fix ups.
3073 	 * The save user stack can (and often does) fault.
3074 	 */
3075 	if (unlikely(in_nmi()))
3076 		return;
3077 
3078 	/*
3079 	 * prevent recursion, since the user stack tracing may
3080 	 * trigger other kernel events.
3081 	 */
3082 	preempt_disable();
3083 	if (__this_cpu_read(user_stack_count))
3084 		goto out;
3085 
3086 	__this_cpu_inc(user_stack_count);
3087 
3088 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3089 					    sizeof(*entry), trace_ctx);
3090 	if (!event)
3091 		goto out_drop_count;
3092 	entry	= ring_buffer_event_data(event);
3093 
3094 	entry->tgid		= current->tgid;
3095 	memset(&entry->caller, 0, sizeof(entry->caller));
3096 
3097 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3098 	__buffer_unlock_commit(buffer, event);
3099 
3100  out_drop_count:
3101 	__this_cpu_dec(user_stack_count);
3102  out:
3103 	preempt_enable();
3104 }
3105 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3106 static void ftrace_trace_userstack(struct trace_array *tr,
3107 				   struct trace_buffer *buffer,
3108 				   unsigned int trace_ctx)
3109 {
3110 }
3111 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3112 
3113 #endif /* CONFIG_STACKTRACE */
3114 
3115 static inline void
3116 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3117 			  unsigned long long delta)
3118 {
3119 	entry->bottom_delta_ts = delta & U32_MAX;
3120 	entry->top_delta_ts = (delta >> 32);
3121 }
3122 
3123 void trace_last_func_repeats(struct trace_array *tr,
3124 			     struct trace_func_repeats *last_info,
3125 			     unsigned int trace_ctx)
3126 {
3127 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3128 	struct func_repeats_entry *entry;
3129 	struct ring_buffer_event *event;
3130 	u64 delta;
3131 
3132 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3133 					    sizeof(*entry), trace_ctx);
3134 	if (!event)
3135 		return;
3136 
3137 	delta = ring_buffer_event_time_stamp(buffer, event) -
3138 		last_info->ts_last_call;
3139 
3140 	entry = ring_buffer_event_data(event);
3141 	entry->ip = last_info->ip;
3142 	entry->parent_ip = last_info->parent_ip;
3143 	entry->count = last_info->count;
3144 	func_repeats_set_delta_ts(entry, delta);
3145 
3146 	__buffer_unlock_commit(buffer, event);
3147 }
3148 
3149 /* created for use with alloc_percpu */
3150 struct trace_buffer_struct {
3151 	int nesting;
3152 	char buffer[4][TRACE_BUF_SIZE];
3153 };
3154 
3155 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3156 
3157 /*
3158  * This allows for lockless recording.  If we're nested too deeply, then
3159  * this returns NULL.
3160  */
3161 static char *get_trace_buf(void)
3162 {
3163 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3164 
3165 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3166 		return NULL;
3167 
3168 	buffer->nesting++;
3169 
3170 	/* Interrupts must see nesting incremented before we use the buffer */
3171 	barrier();
3172 	return &buffer->buffer[buffer->nesting - 1][0];
3173 }
3174 
3175 static void put_trace_buf(void)
3176 {
3177 	/* Don't let the decrement of nesting leak before this */
3178 	barrier();
3179 	this_cpu_dec(trace_percpu_buffer->nesting);
3180 }
3181 
3182 static int alloc_percpu_trace_buffer(void)
3183 {
3184 	struct trace_buffer_struct __percpu *buffers;
3185 
3186 	if (trace_percpu_buffer)
3187 		return 0;
3188 
3189 	buffers = alloc_percpu(struct trace_buffer_struct);
3190 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3191 		return -ENOMEM;
3192 
3193 	trace_percpu_buffer = buffers;
3194 	return 0;
3195 }
3196 
3197 static int buffers_allocated;
3198 
3199 void trace_printk_init_buffers(void)
3200 {
3201 	if (buffers_allocated)
3202 		return;
3203 
3204 	if (alloc_percpu_trace_buffer())
3205 		return;
3206 
3207 	/* trace_printk() is for debug use only. Don't use it in production. */
3208 
3209 	pr_warn("\n");
3210 	pr_warn("**********************************************************\n");
3211 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3212 	pr_warn("**                                                      **\n");
3213 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3214 	pr_warn("**                                                      **\n");
3215 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3216 	pr_warn("** unsafe for production use.                           **\n");
3217 	pr_warn("**                                                      **\n");
3218 	pr_warn("** If you see this message and you are not debugging    **\n");
3219 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3220 	pr_warn("**                                                      **\n");
3221 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3222 	pr_warn("**********************************************************\n");
3223 
3224 	/* Expand the buffers to set size */
3225 	tracing_update_buffers(&global_trace);
3226 
3227 	buffers_allocated = 1;
3228 
3229 	/*
3230 	 * trace_printk_init_buffers() can be called by modules.
3231 	 * If that happens, then we need to start cmdline recording
3232 	 * directly here. If the global_trace.buffer is already
3233 	 * allocated here, then this was called by module code.
3234 	 */
3235 	if (global_trace.array_buffer.buffer)
3236 		tracing_start_cmdline_record();
3237 }
3238 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3239 
3240 void trace_printk_start_comm(void)
3241 {
3242 	/* Start tracing comms if trace printk is set */
3243 	if (!buffers_allocated)
3244 		return;
3245 	tracing_start_cmdline_record();
3246 }
3247 
3248 static void trace_printk_start_stop_comm(int enabled)
3249 {
3250 	if (!buffers_allocated)
3251 		return;
3252 
3253 	if (enabled)
3254 		tracing_start_cmdline_record();
3255 	else
3256 		tracing_stop_cmdline_record();
3257 }
3258 
3259 /**
3260  * trace_vbprintk - write binary msg to tracing buffer
3261  * @ip:    The address of the caller
3262  * @fmt:   The string format to write to the buffer
3263  * @args:  Arguments for @fmt
3264  */
3265 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3266 {
3267 	struct ring_buffer_event *event;
3268 	struct trace_buffer *buffer;
3269 	struct trace_array *tr = READ_ONCE(printk_trace);
3270 	struct bprint_entry *entry;
3271 	unsigned int trace_ctx;
3272 	char *tbuffer;
3273 	int len = 0, size;
3274 
3275 	if (!printk_binsafe(tr))
3276 		return trace_vprintk(ip, fmt, args);
3277 
3278 	if (unlikely(tracing_selftest_running || tracing_disabled))
3279 		return 0;
3280 
3281 	/* Don't pollute graph traces with trace_vprintk internals */
3282 	pause_graph_tracing();
3283 
3284 	trace_ctx = tracing_gen_ctx();
3285 	preempt_disable_notrace();
3286 
3287 	tbuffer = get_trace_buf();
3288 	if (!tbuffer) {
3289 		len = 0;
3290 		goto out_nobuffer;
3291 	}
3292 
3293 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3294 
3295 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3296 		goto out_put;
3297 
3298 	size = sizeof(*entry) + sizeof(u32) * len;
3299 	buffer = tr->array_buffer.buffer;
3300 	ring_buffer_nest_start(buffer);
3301 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3302 					    trace_ctx);
3303 	if (!event)
3304 		goto out;
3305 	entry = ring_buffer_event_data(event);
3306 	entry->ip			= ip;
3307 	entry->fmt			= fmt;
3308 
3309 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3310 	__buffer_unlock_commit(buffer, event);
3311 	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3312 
3313 out:
3314 	ring_buffer_nest_end(buffer);
3315 out_put:
3316 	put_trace_buf();
3317 
3318 out_nobuffer:
3319 	preempt_enable_notrace();
3320 	unpause_graph_tracing();
3321 
3322 	return len;
3323 }
3324 EXPORT_SYMBOL_GPL(trace_vbprintk);
3325 
3326 __printf(3, 0)
3327 static int
3328 __trace_array_vprintk(struct trace_buffer *buffer,
3329 		      unsigned long ip, const char *fmt, va_list args)
3330 {
3331 	struct ring_buffer_event *event;
3332 	int len = 0, size;
3333 	struct print_entry *entry;
3334 	unsigned int trace_ctx;
3335 	char *tbuffer;
3336 
3337 	if (tracing_disabled)
3338 		return 0;
3339 
3340 	/* Don't pollute graph traces with trace_vprintk internals */
3341 	pause_graph_tracing();
3342 
3343 	trace_ctx = tracing_gen_ctx();
3344 	preempt_disable_notrace();
3345 
3346 
3347 	tbuffer = get_trace_buf();
3348 	if (!tbuffer) {
3349 		len = 0;
3350 		goto out_nobuffer;
3351 	}
3352 
3353 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3354 
3355 	size = sizeof(*entry) + len + 1;
3356 	ring_buffer_nest_start(buffer);
3357 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3358 					    trace_ctx);
3359 	if (!event)
3360 		goto out;
3361 	entry = ring_buffer_event_data(event);
3362 	entry->ip = ip;
3363 
3364 	memcpy(&entry->buf, tbuffer, len + 1);
3365 	__buffer_unlock_commit(buffer, event);
3366 	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
3367 
3368 out:
3369 	ring_buffer_nest_end(buffer);
3370 	put_trace_buf();
3371 
3372 out_nobuffer:
3373 	preempt_enable_notrace();
3374 	unpause_graph_tracing();
3375 
3376 	return len;
3377 }
3378 
3379 __printf(3, 0)
3380 int trace_array_vprintk(struct trace_array *tr,
3381 			unsigned long ip, const char *fmt, va_list args)
3382 {
3383 	if (tracing_selftest_running && tr == &global_trace)
3384 		return 0;
3385 
3386 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3387 }
3388 
3389 /**
3390  * trace_array_printk - Print a message to a specific instance
3391  * @tr: The instance trace_array descriptor
3392  * @ip: The instruction pointer that this is called from.
3393  * @fmt: The format to print (printf format)
3394  *
3395  * If a subsystem sets up its own instance, they have the right to
3396  * printk strings into their tracing instance buffer using this
3397  * function. Note, this function will not write into the top level
3398  * buffer (use trace_printk() for that), as writing into the top level
3399  * buffer should only have events that can be individually disabled.
3400  * trace_printk() is only used for debugging a kernel, and should not
3401  * be ever incorporated in normal use.
3402  *
3403  * trace_array_printk() can be used, as it will not add noise to the
3404  * top level tracing buffer.
3405  *
3406  * Note, trace_array_init_printk() must be called on @tr before this
3407  * can be used.
3408  */
3409 __printf(3, 0)
3410 int trace_array_printk(struct trace_array *tr,
3411 		       unsigned long ip, const char *fmt, ...)
3412 {
3413 	int ret;
3414 	va_list ap;
3415 
3416 	if (!tr)
3417 		return -ENOENT;
3418 
3419 	/* This is only allowed for created instances */
3420 	if (tr == &global_trace)
3421 		return 0;
3422 
3423 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3424 		return 0;
3425 
3426 	va_start(ap, fmt);
3427 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3428 	va_end(ap);
3429 	return ret;
3430 }
3431 EXPORT_SYMBOL_GPL(trace_array_printk);
3432 
3433 /**
3434  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3435  * @tr: The trace array to initialize the buffers for
3436  *
3437  * As trace_array_printk() only writes into instances, they are OK to
3438  * have in the kernel (unlike trace_printk()). This needs to be called
3439  * before trace_array_printk() can be used on a trace_array.
3440  */
3441 int trace_array_init_printk(struct trace_array *tr)
3442 {
3443 	if (!tr)
3444 		return -ENOENT;
3445 
3446 	/* This is only allowed for created instances */
3447 	if (tr == &global_trace)
3448 		return -EINVAL;
3449 
3450 	return alloc_percpu_trace_buffer();
3451 }
3452 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3453 
3454 __printf(3, 4)
3455 int trace_array_printk_buf(struct trace_buffer *buffer,
3456 			   unsigned long ip, const char *fmt, ...)
3457 {
3458 	int ret;
3459 	va_list ap;
3460 
3461 	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3462 		return 0;
3463 
3464 	va_start(ap, fmt);
3465 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3466 	va_end(ap);
3467 	return ret;
3468 }
3469 
3470 __printf(2, 0)
3471 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3472 {
3473 	return trace_array_vprintk(printk_trace, ip, fmt, args);
3474 }
3475 EXPORT_SYMBOL_GPL(trace_vprintk);
3476 
3477 static void trace_iterator_increment(struct trace_iterator *iter)
3478 {
3479 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3480 
3481 	iter->idx++;
3482 	if (buf_iter)
3483 		ring_buffer_iter_advance(buf_iter);
3484 }
3485 
3486 static struct trace_entry *
3487 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3488 		unsigned long *lost_events)
3489 {
3490 	struct ring_buffer_event *event;
3491 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3492 
3493 	if (buf_iter) {
3494 		event = ring_buffer_iter_peek(buf_iter, ts);
3495 		if (lost_events)
3496 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3497 				(unsigned long)-1 : 0;
3498 	} else {
3499 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3500 					 lost_events);
3501 	}
3502 
3503 	if (event) {
3504 		iter->ent_size = ring_buffer_event_length(event);
3505 		return ring_buffer_event_data(event);
3506 	}
3507 	iter->ent_size = 0;
3508 	return NULL;
3509 }
3510 
3511 static struct trace_entry *
3512 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3513 		  unsigned long *missing_events, u64 *ent_ts)
3514 {
3515 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3516 	struct trace_entry *ent, *next = NULL;
3517 	unsigned long lost_events = 0, next_lost = 0;
3518 	int cpu_file = iter->cpu_file;
3519 	u64 next_ts = 0, ts;
3520 	int next_cpu = -1;
3521 	int next_size = 0;
3522 	int cpu;
3523 
3524 	/*
3525 	 * If we are in a per_cpu trace file, don't bother by iterating over
3526 	 * all cpu and peek directly.
3527 	 */
3528 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3529 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3530 			return NULL;
3531 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3532 		if (ent_cpu)
3533 			*ent_cpu = cpu_file;
3534 
3535 		return ent;
3536 	}
3537 
3538 	for_each_tracing_cpu(cpu) {
3539 
3540 		if (ring_buffer_empty_cpu(buffer, cpu))
3541 			continue;
3542 
3543 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3544 
3545 		/*
3546 		 * Pick the entry with the smallest timestamp:
3547 		 */
3548 		if (ent && (!next || ts < next_ts)) {
3549 			next = ent;
3550 			next_cpu = cpu;
3551 			next_ts = ts;
3552 			next_lost = lost_events;
3553 			next_size = iter->ent_size;
3554 		}
3555 	}
3556 
3557 	iter->ent_size = next_size;
3558 
3559 	if (ent_cpu)
3560 		*ent_cpu = next_cpu;
3561 
3562 	if (ent_ts)
3563 		*ent_ts = next_ts;
3564 
3565 	if (missing_events)
3566 		*missing_events = next_lost;
3567 
3568 	return next;
3569 }
3570 
3571 #define STATIC_FMT_BUF_SIZE	128
3572 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3573 
3574 char *trace_iter_expand_format(struct trace_iterator *iter)
3575 {
3576 	char *tmp;
3577 
3578 	/*
3579 	 * iter->tr is NULL when used with tp_printk, which makes
3580 	 * this get called where it is not safe to call krealloc().
3581 	 */
3582 	if (!iter->tr || iter->fmt == static_fmt_buf)
3583 		return NULL;
3584 
3585 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3586 		       GFP_KERNEL);
3587 	if (tmp) {
3588 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3589 		iter->fmt = tmp;
3590 	}
3591 
3592 	return tmp;
3593 }
3594 
3595 /* Returns true if the string is safe to dereference from an event */
3596 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3597 {
3598 	unsigned long addr = (unsigned long)str;
3599 	struct trace_event *trace_event;
3600 	struct trace_event_call *event;
3601 
3602 	/* OK if part of the event data */
3603 	if ((addr >= (unsigned long)iter->ent) &&
3604 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3605 		return true;
3606 
3607 	/* OK if part of the temp seq buffer */
3608 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3609 	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3610 		return true;
3611 
3612 	/* Core rodata can not be freed */
3613 	if (is_kernel_rodata(addr))
3614 		return true;
3615 
3616 	if (trace_is_tracepoint_string(str))
3617 		return true;
3618 
3619 	/*
3620 	 * Now this could be a module event, referencing core module
3621 	 * data, which is OK.
3622 	 */
3623 	if (!iter->ent)
3624 		return false;
3625 
3626 	trace_event = ftrace_find_event(iter->ent->type);
3627 	if (!trace_event)
3628 		return false;
3629 
3630 	event = container_of(trace_event, struct trace_event_call, event);
3631 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3632 		return false;
3633 
3634 	/* Would rather have rodata, but this will suffice */
3635 	if (within_module_core(addr, event->module))
3636 		return true;
3637 
3638 	return false;
3639 }
3640 
3641 /**
3642  * ignore_event - Check dereferenced fields while writing to the seq buffer
3643  * @iter: The iterator that holds the seq buffer and the event being printed
3644  *
3645  * At boot up, test_event_printk() will flag any event that dereferences
3646  * a string with "%s" that does exist in the ring buffer. It may still
3647  * be valid, as the string may point to a static string in the kernel
3648  * rodata that never gets freed. But if the string pointer is pointing
3649  * to something that was allocated, there's a chance that it can be freed
3650  * by the time the user reads the trace. This would cause a bad memory
3651  * access by the kernel and possibly crash the system.
3652  *
3653  * This function will check if the event has any fields flagged as needing
3654  * to be checked at runtime and perform those checks.
3655  *
3656  * If it is found that a field is unsafe, it will write into the @iter->seq
3657  * a message stating what was found to be unsafe.
3658  *
3659  * @return: true if the event is unsafe and should be ignored,
3660  *          false otherwise.
3661  */
3662 bool ignore_event(struct trace_iterator *iter)
3663 {
3664 	struct ftrace_event_field *field;
3665 	struct trace_event *trace_event;
3666 	struct trace_event_call *event;
3667 	struct list_head *head;
3668 	struct trace_seq *seq;
3669 	const void *ptr;
3670 
3671 	trace_event = ftrace_find_event(iter->ent->type);
3672 
3673 	seq = &iter->seq;
3674 
3675 	if (!trace_event) {
3676 		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3677 		return true;
3678 	}
3679 
3680 	event = container_of(trace_event, struct trace_event_call, event);
3681 	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3682 		return false;
3683 
3684 	head = trace_get_fields(event);
3685 	if (!head) {
3686 		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
3687 				 trace_event_name(event));
3688 		return true;
3689 	}
3690 
3691 	/* Offsets are from the iter->ent that points to the raw event */
3692 	ptr = iter->ent;
3693 
3694 	list_for_each_entry(field, head, link) {
3695 		const char *str;
3696 		bool good;
3697 
3698 		if (!field->needs_test)
3699 			continue;
3700 
3701 		str = *(const char **)(ptr + field->offset);
3702 
3703 		good = trace_safe_str(iter, str);
3704 
3705 		/*
3706 		 * If you hit this warning, it is likely that the
3707 		 * trace event in question used %s on a string that
3708 		 * was saved at the time of the event, but may not be
3709 		 * around when the trace is read. Use __string(),
3710 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3711 		 * instead. See samples/trace_events/trace-events-sample.h
3712 		 * for reference.
3713 		 */
3714 		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
3715 			      trace_event_name(event), field->name)) {
3716 			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
3717 					 trace_event_name(event), field->name);
3718 			return true;
3719 		}
3720 	}
3721 	return false;
3722 }
3723 
3724 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3725 {
3726 	const char *p, *new_fmt;
3727 	char *q;
3728 
3729 	if (WARN_ON_ONCE(!fmt))
3730 		return fmt;
3731 
3732 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3733 		return fmt;
3734 
3735 	p = fmt;
3736 	new_fmt = q = iter->fmt;
3737 	while (*p) {
3738 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3739 			if (!trace_iter_expand_format(iter))
3740 				return fmt;
3741 
3742 			q += iter->fmt - new_fmt;
3743 			new_fmt = iter->fmt;
3744 		}
3745 
3746 		*q++ = *p++;
3747 
3748 		/* Replace %p with %px */
3749 		if (p[-1] == '%') {
3750 			if (p[0] == '%') {
3751 				*q++ = *p++;
3752 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3753 				*q++ = *p++;
3754 				*q++ = 'x';
3755 			}
3756 		}
3757 	}
3758 	*q = '\0';
3759 
3760 	return new_fmt;
3761 }
3762 
3763 #define STATIC_TEMP_BUF_SIZE	128
3764 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3765 
3766 /* Find the next real entry, without updating the iterator itself */
3767 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3768 					  int *ent_cpu, u64 *ent_ts)
3769 {
3770 	/* __find_next_entry will reset ent_size */
3771 	int ent_size = iter->ent_size;
3772 	struct trace_entry *entry;
3773 
3774 	/*
3775 	 * If called from ftrace_dump(), then the iter->temp buffer
3776 	 * will be the static_temp_buf and not created from kmalloc.
3777 	 * If the entry size is greater than the buffer, we can
3778 	 * not save it. Just return NULL in that case. This is only
3779 	 * used to add markers when two consecutive events' time
3780 	 * stamps have a large delta. See trace_print_lat_context()
3781 	 */
3782 	if (iter->temp == static_temp_buf &&
3783 	    STATIC_TEMP_BUF_SIZE < ent_size)
3784 		return NULL;
3785 
3786 	/*
3787 	 * The __find_next_entry() may call peek_next_entry(), which may
3788 	 * call ring_buffer_peek() that may make the contents of iter->ent
3789 	 * undefined. Need to copy iter->ent now.
3790 	 */
3791 	if (iter->ent && iter->ent != iter->temp) {
3792 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3793 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3794 			void *temp;
3795 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3796 			if (!temp)
3797 				return NULL;
3798 			kfree(iter->temp);
3799 			iter->temp = temp;
3800 			iter->temp_size = iter->ent_size;
3801 		}
3802 		memcpy(iter->temp, iter->ent, iter->ent_size);
3803 		iter->ent = iter->temp;
3804 	}
3805 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3806 	/* Put back the original ent_size */
3807 	iter->ent_size = ent_size;
3808 
3809 	return entry;
3810 }
3811 
3812 /* Find the next real entry, and increment the iterator to the next entry */
3813 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3814 {
3815 	iter->ent = __find_next_entry(iter, &iter->cpu,
3816 				      &iter->lost_events, &iter->ts);
3817 
3818 	if (iter->ent)
3819 		trace_iterator_increment(iter);
3820 
3821 	return iter->ent ? iter : NULL;
3822 }
3823 
3824 static void trace_consume(struct trace_iterator *iter)
3825 {
3826 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3827 			    &iter->lost_events);
3828 }
3829 
3830 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3831 {
3832 	struct trace_iterator *iter = m->private;
3833 	int i = (int)*pos;
3834 	void *ent;
3835 
3836 	WARN_ON_ONCE(iter->leftover);
3837 
3838 	(*pos)++;
3839 
3840 	/* can't go backwards */
3841 	if (iter->idx > i)
3842 		return NULL;
3843 
3844 	if (iter->idx < 0)
3845 		ent = trace_find_next_entry_inc(iter);
3846 	else
3847 		ent = iter;
3848 
3849 	while (ent && iter->idx < i)
3850 		ent = trace_find_next_entry_inc(iter);
3851 
3852 	iter->pos = *pos;
3853 
3854 	return ent;
3855 }
3856 
3857 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3858 {
3859 	struct ring_buffer_iter *buf_iter;
3860 	unsigned long entries = 0;
3861 	u64 ts;
3862 
3863 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3864 
3865 	buf_iter = trace_buffer_iter(iter, cpu);
3866 	if (!buf_iter)
3867 		return;
3868 
3869 	ring_buffer_iter_reset(buf_iter);
3870 
3871 	/*
3872 	 * We could have the case with the max latency tracers
3873 	 * that a reset never took place on a cpu. This is evident
3874 	 * by the timestamp being before the start of the buffer.
3875 	 */
3876 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
3877 		if (ts >= iter->array_buffer->time_start)
3878 			break;
3879 		entries++;
3880 		ring_buffer_iter_advance(buf_iter);
3881 		/* This could be a big loop */
3882 		cond_resched();
3883 	}
3884 
3885 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3886 }
3887 
3888 /*
3889  * The current tracer is copied to avoid a global locking
3890  * all around.
3891  */
3892 static void *s_start(struct seq_file *m, loff_t *pos)
3893 {
3894 	struct trace_iterator *iter = m->private;
3895 	struct trace_array *tr = iter->tr;
3896 	int cpu_file = iter->cpu_file;
3897 	void *p = NULL;
3898 	loff_t l = 0;
3899 	int cpu;
3900 
3901 	mutex_lock(&trace_types_lock);
3902 	if (unlikely(tr->current_trace != iter->trace)) {
3903 		/* Close iter->trace before switching to the new current tracer */
3904 		if (iter->trace->close)
3905 			iter->trace->close(iter);
3906 		iter->trace = tr->current_trace;
3907 		/* Reopen the new current tracer */
3908 		if (iter->trace->open)
3909 			iter->trace->open(iter);
3910 	}
3911 	mutex_unlock(&trace_types_lock);
3912 
3913 #ifdef CONFIG_TRACER_MAX_TRACE
3914 	if (iter->snapshot && iter->trace->use_max_tr)
3915 		return ERR_PTR(-EBUSY);
3916 #endif
3917 
3918 	if (*pos != iter->pos) {
3919 		iter->ent = NULL;
3920 		iter->cpu = 0;
3921 		iter->idx = -1;
3922 
3923 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3924 			for_each_tracing_cpu(cpu)
3925 				tracing_iter_reset(iter, cpu);
3926 		} else
3927 			tracing_iter_reset(iter, cpu_file);
3928 
3929 		iter->leftover = 0;
3930 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3931 			;
3932 
3933 	} else {
3934 		/*
3935 		 * If we overflowed the seq_file before, then we want
3936 		 * to just reuse the trace_seq buffer again.
3937 		 */
3938 		if (iter->leftover)
3939 			p = iter;
3940 		else {
3941 			l = *pos - 1;
3942 			p = s_next(m, p, &l);
3943 		}
3944 	}
3945 
3946 	trace_event_read_lock();
3947 	trace_access_lock(cpu_file);
3948 	return p;
3949 }
3950 
3951 static void s_stop(struct seq_file *m, void *p)
3952 {
3953 	struct trace_iterator *iter = m->private;
3954 
3955 #ifdef CONFIG_TRACER_MAX_TRACE
3956 	if (iter->snapshot && iter->trace->use_max_tr)
3957 		return;
3958 #endif
3959 
3960 	trace_access_unlock(iter->cpu_file);
3961 	trace_event_read_unlock();
3962 }
3963 
3964 static void
3965 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3966 		      unsigned long *entries, int cpu)
3967 {
3968 	unsigned long count;
3969 
3970 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3971 	/*
3972 	 * If this buffer has skipped entries, then we hold all
3973 	 * entries for the trace and we need to ignore the
3974 	 * ones before the time stamp.
3975 	 */
3976 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3977 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3978 		/* total is the same as the entries */
3979 		*total = count;
3980 	} else
3981 		*total = count +
3982 			ring_buffer_overrun_cpu(buf->buffer, cpu);
3983 	*entries = count;
3984 }
3985 
3986 static void
3987 get_total_entries(struct array_buffer *buf,
3988 		  unsigned long *total, unsigned long *entries)
3989 {
3990 	unsigned long t, e;
3991 	int cpu;
3992 
3993 	*total = 0;
3994 	*entries = 0;
3995 
3996 	for_each_tracing_cpu(cpu) {
3997 		get_total_entries_cpu(buf, &t, &e, cpu);
3998 		*total += t;
3999 		*entries += e;
4000 	}
4001 }
4002 
4003 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4004 {
4005 	unsigned long total, entries;
4006 
4007 	if (!tr)
4008 		tr = &global_trace;
4009 
4010 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4011 
4012 	return entries;
4013 }
4014 
4015 unsigned long trace_total_entries(struct trace_array *tr)
4016 {
4017 	unsigned long total, entries;
4018 
4019 	if (!tr)
4020 		tr = &global_trace;
4021 
4022 	get_total_entries(&tr->array_buffer, &total, &entries);
4023 
4024 	return entries;
4025 }
4026 
4027 static void print_lat_help_header(struct seq_file *m)
4028 {
4029 	seq_puts(m, "#                    _------=> CPU#            \n"
4030 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4031 		    "#                  | / _----=> need-resched    \n"
4032 		    "#                  || / _---=> hardirq/softirq \n"
4033 		    "#                  ||| / _--=> preempt-depth   \n"
4034 		    "#                  |||| / _-=> migrate-disable \n"
4035 		    "#                  ||||| /     delay           \n"
4036 		    "#  cmd     pid     |||||| time  |   caller     \n"
4037 		    "#     \\   /        ||||||  \\    |    /       \n");
4038 }
4039 
4040 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4041 {
4042 	unsigned long total;
4043 	unsigned long entries;
4044 
4045 	get_total_entries(buf, &total, &entries);
4046 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4047 		   entries, total, num_online_cpus());
4048 	seq_puts(m, "#\n");
4049 }
4050 
4051 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4052 				   unsigned int flags)
4053 {
4054 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4055 
4056 	print_event_info(buf, m);
4057 
4058 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4059 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4060 }
4061 
4062 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4063 				       unsigned int flags)
4064 {
4065 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4066 	static const char space[] = "            ";
4067 	int prec = tgid ? 12 : 2;
4068 
4069 	print_event_info(buf, m);
4070 
4071 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4072 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4073 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4074 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4075 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4076 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4077 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4078 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4079 }
4080 
4081 void
4082 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4083 {
4084 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4085 	struct array_buffer *buf = iter->array_buffer;
4086 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4087 	struct tracer *type = iter->trace;
4088 	unsigned long entries;
4089 	unsigned long total;
4090 	const char *name = type->name;
4091 
4092 	get_total_entries(buf, &total, &entries);
4093 
4094 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4095 		   name, init_utsname()->release);
4096 	seq_puts(m, "# -----------------------------------"
4097 		 "---------------------------------\n");
4098 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4099 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4100 		   nsecs_to_usecs(data->saved_latency),
4101 		   entries,
4102 		   total,
4103 		   buf->cpu,
4104 		   preempt_model_none()      ? "server" :
4105 		   preempt_model_voluntary() ? "desktop" :
4106 		   preempt_model_full()      ? "preempt" :
4107 		   preempt_model_lazy()	     ? "lazy"    :
4108 		   preempt_model_rt()        ? "preempt_rt" :
4109 		   "unknown",
4110 		   /* These are reserved for later use */
4111 		   0, 0, 0, 0);
4112 #ifdef CONFIG_SMP
4113 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4114 #else
4115 	seq_puts(m, ")\n");
4116 #endif
4117 	seq_puts(m, "#    -----------------\n");
4118 	seq_printf(m, "#    | task: %.16s-%d "
4119 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4120 		   data->comm, data->pid,
4121 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4122 		   data->policy, data->rt_priority);
4123 	seq_puts(m, "#    -----------------\n");
4124 
4125 	if (data->critical_start) {
4126 		seq_puts(m, "#  => started at: ");
4127 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4128 		trace_print_seq(m, &iter->seq);
4129 		seq_puts(m, "\n#  => ended at:   ");
4130 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4131 		trace_print_seq(m, &iter->seq);
4132 		seq_puts(m, "\n#\n");
4133 	}
4134 
4135 	seq_puts(m, "#\n");
4136 }
4137 
4138 static void test_cpu_buff_start(struct trace_iterator *iter)
4139 {
4140 	struct trace_seq *s = &iter->seq;
4141 	struct trace_array *tr = iter->tr;
4142 
4143 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4144 		return;
4145 
4146 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4147 		return;
4148 
4149 	if (cpumask_available(iter->started) &&
4150 	    cpumask_test_cpu(iter->cpu, iter->started))
4151 		return;
4152 
4153 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4154 		return;
4155 
4156 	if (cpumask_available(iter->started))
4157 		cpumask_set_cpu(iter->cpu, iter->started);
4158 
4159 	/* Don't print started cpu buffer for the first entry of the trace */
4160 	if (iter->idx > 1)
4161 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4162 				iter->cpu);
4163 }
4164 
4165 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4166 {
4167 	struct trace_array *tr = iter->tr;
4168 	struct trace_seq *s = &iter->seq;
4169 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4170 	struct trace_entry *entry;
4171 	struct trace_event *event;
4172 
4173 	entry = iter->ent;
4174 
4175 	test_cpu_buff_start(iter);
4176 
4177 	event = ftrace_find_event(entry->type);
4178 
4179 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4180 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4181 			trace_print_lat_context(iter);
4182 		else
4183 			trace_print_context(iter);
4184 	}
4185 
4186 	if (trace_seq_has_overflowed(s))
4187 		return TRACE_TYPE_PARTIAL_LINE;
4188 
4189 	if (event) {
4190 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4191 			return print_event_fields(iter, event);
4192 		/*
4193 		 * For TRACE_EVENT() events, the print_fmt is not
4194 		 * safe to use if the array has delta offsets
4195 		 * Force printing via the fields.
4196 		 */
4197 		if ((tr->text_delta) &&
4198 		    event->type > __TRACE_LAST_TYPE)
4199 			return print_event_fields(iter, event);
4200 
4201 		return event->funcs->trace(iter, sym_flags, event);
4202 	}
4203 
4204 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4205 
4206 	return trace_handle_return(s);
4207 }
4208 
4209 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4210 {
4211 	struct trace_array *tr = iter->tr;
4212 	struct trace_seq *s = &iter->seq;
4213 	struct trace_entry *entry;
4214 	struct trace_event *event;
4215 
4216 	entry = iter->ent;
4217 
4218 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4219 		trace_seq_printf(s, "%d %d %llu ",
4220 				 entry->pid, iter->cpu, iter->ts);
4221 
4222 	if (trace_seq_has_overflowed(s))
4223 		return TRACE_TYPE_PARTIAL_LINE;
4224 
4225 	event = ftrace_find_event(entry->type);
4226 	if (event)
4227 		return event->funcs->raw(iter, 0, event);
4228 
4229 	trace_seq_printf(s, "%d ?\n", entry->type);
4230 
4231 	return trace_handle_return(s);
4232 }
4233 
4234 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4235 {
4236 	struct trace_array *tr = iter->tr;
4237 	struct trace_seq *s = &iter->seq;
4238 	unsigned char newline = '\n';
4239 	struct trace_entry *entry;
4240 	struct trace_event *event;
4241 
4242 	entry = iter->ent;
4243 
4244 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4245 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4246 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4247 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4248 		if (trace_seq_has_overflowed(s))
4249 			return TRACE_TYPE_PARTIAL_LINE;
4250 	}
4251 
4252 	event = ftrace_find_event(entry->type);
4253 	if (event) {
4254 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4255 		if (ret != TRACE_TYPE_HANDLED)
4256 			return ret;
4257 	}
4258 
4259 	SEQ_PUT_FIELD(s, newline);
4260 
4261 	return trace_handle_return(s);
4262 }
4263 
4264 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4265 {
4266 	struct trace_array *tr = iter->tr;
4267 	struct trace_seq *s = &iter->seq;
4268 	struct trace_entry *entry;
4269 	struct trace_event *event;
4270 
4271 	entry = iter->ent;
4272 
4273 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4274 		SEQ_PUT_FIELD(s, entry->pid);
4275 		SEQ_PUT_FIELD(s, iter->cpu);
4276 		SEQ_PUT_FIELD(s, iter->ts);
4277 		if (trace_seq_has_overflowed(s))
4278 			return TRACE_TYPE_PARTIAL_LINE;
4279 	}
4280 
4281 	event = ftrace_find_event(entry->type);
4282 	return event ? event->funcs->binary(iter, 0, event) :
4283 		TRACE_TYPE_HANDLED;
4284 }
4285 
4286 int trace_empty(struct trace_iterator *iter)
4287 {
4288 	struct ring_buffer_iter *buf_iter;
4289 	int cpu;
4290 
4291 	/* If we are looking at one CPU buffer, only check that one */
4292 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4293 		cpu = iter->cpu_file;
4294 		buf_iter = trace_buffer_iter(iter, cpu);
4295 		if (buf_iter) {
4296 			if (!ring_buffer_iter_empty(buf_iter))
4297 				return 0;
4298 		} else {
4299 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4300 				return 0;
4301 		}
4302 		return 1;
4303 	}
4304 
4305 	for_each_tracing_cpu(cpu) {
4306 		buf_iter = trace_buffer_iter(iter, cpu);
4307 		if (buf_iter) {
4308 			if (!ring_buffer_iter_empty(buf_iter))
4309 				return 0;
4310 		} else {
4311 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4312 				return 0;
4313 		}
4314 	}
4315 
4316 	return 1;
4317 }
4318 
4319 /*  Called with trace_event_read_lock() held. */
4320 enum print_line_t print_trace_line(struct trace_iterator *iter)
4321 {
4322 	struct trace_array *tr = iter->tr;
4323 	unsigned long trace_flags = tr->trace_flags;
4324 	enum print_line_t ret;
4325 
4326 	if (iter->lost_events) {
4327 		if (iter->lost_events == (unsigned long)-1)
4328 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4329 					 iter->cpu);
4330 		else
4331 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4332 					 iter->cpu, iter->lost_events);
4333 		if (trace_seq_has_overflowed(&iter->seq))
4334 			return TRACE_TYPE_PARTIAL_LINE;
4335 	}
4336 
4337 	if (iter->trace && iter->trace->print_line) {
4338 		ret = iter->trace->print_line(iter);
4339 		if (ret != TRACE_TYPE_UNHANDLED)
4340 			return ret;
4341 	}
4342 
4343 	if (iter->ent->type == TRACE_BPUTS &&
4344 			trace_flags & TRACE_ITER_PRINTK &&
4345 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4346 		return trace_print_bputs_msg_only(iter);
4347 
4348 	if (iter->ent->type == TRACE_BPRINT &&
4349 			trace_flags & TRACE_ITER_PRINTK &&
4350 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4351 		return trace_print_bprintk_msg_only(iter);
4352 
4353 	if (iter->ent->type == TRACE_PRINT &&
4354 			trace_flags & TRACE_ITER_PRINTK &&
4355 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4356 		return trace_print_printk_msg_only(iter);
4357 
4358 	if (trace_flags & TRACE_ITER_BIN)
4359 		return print_bin_fmt(iter);
4360 
4361 	if (trace_flags & TRACE_ITER_HEX)
4362 		return print_hex_fmt(iter);
4363 
4364 	if (trace_flags & TRACE_ITER_RAW)
4365 		return print_raw_fmt(iter);
4366 
4367 	return print_trace_fmt(iter);
4368 }
4369 
4370 void trace_latency_header(struct seq_file *m)
4371 {
4372 	struct trace_iterator *iter = m->private;
4373 	struct trace_array *tr = iter->tr;
4374 
4375 	/* print nothing if the buffers are empty */
4376 	if (trace_empty(iter))
4377 		return;
4378 
4379 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4380 		print_trace_header(m, iter);
4381 
4382 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4383 		print_lat_help_header(m);
4384 }
4385 
4386 void trace_default_header(struct seq_file *m)
4387 {
4388 	struct trace_iterator *iter = m->private;
4389 	struct trace_array *tr = iter->tr;
4390 	unsigned long trace_flags = tr->trace_flags;
4391 
4392 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4393 		return;
4394 
4395 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4396 		/* print nothing if the buffers are empty */
4397 		if (trace_empty(iter))
4398 			return;
4399 		print_trace_header(m, iter);
4400 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4401 			print_lat_help_header(m);
4402 	} else {
4403 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4404 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4405 				print_func_help_header_irq(iter->array_buffer,
4406 							   m, trace_flags);
4407 			else
4408 				print_func_help_header(iter->array_buffer, m,
4409 						       trace_flags);
4410 		}
4411 	}
4412 }
4413 
4414 static void test_ftrace_alive(struct seq_file *m)
4415 {
4416 	if (!ftrace_is_dead())
4417 		return;
4418 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4419 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4420 }
4421 
4422 #ifdef CONFIG_TRACER_MAX_TRACE
4423 static void show_snapshot_main_help(struct seq_file *m)
4424 {
4425 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4426 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4427 		    "#                      Takes a snapshot of the main buffer.\n"
4428 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4429 		    "#                      (Doesn't have to be '2' works with any number that\n"
4430 		    "#                       is not a '0' or '1')\n");
4431 }
4432 
4433 static void show_snapshot_percpu_help(struct seq_file *m)
4434 {
4435 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4436 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4437 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4438 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4439 #else
4440 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4441 		    "#                     Must use main snapshot file to allocate.\n");
4442 #endif
4443 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4444 		    "#                      (Doesn't have to be '2' works with any number that\n"
4445 		    "#                       is not a '0' or '1')\n");
4446 }
4447 
4448 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4449 {
4450 	if (iter->tr->allocated_snapshot)
4451 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4452 	else
4453 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4454 
4455 	seq_puts(m, "# Snapshot commands:\n");
4456 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4457 		show_snapshot_main_help(m);
4458 	else
4459 		show_snapshot_percpu_help(m);
4460 }
4461 #else
4462 /* Should never be called */
4463 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4464 #endif
4465 
4466 static int s_show(struct seq_file *m, void *v)
4467 {
4468 	struct trace_iterator *iter = v;
4469 	int ret;
4470 
4471 	if (iter->ent == NULL) {
4472 		if (iter->tr) {
4473 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4474 			seq_puts(m, "#\n");
4475 			test_ftrace_alive(m);
4476 		}
4477 		if (iter->snapshot && trace_empty(iter))
4478 			print_snapshot_help(m, iter);
4479 		else if (iter->trace && iter->trace->print_header)
4480 			iter->trace->print_header(m);
4481 		else
4482 			trace_default_header(m);
4483 
4484 	} else if (iter->leftover) {
4485 		/*
4486 		 * If we filled the seq_file buffer earlier, we
4487 		 * want to just show it now.
4488 		 */
4489 		ret = trace_print_seq(m, &iter->seq);
4490 
4491 		/* ret should this time be zero, but you never know */
4492 		iter->leftover = ret;
4493 
4494 	} else {
4495 		ret = print_trace_line(iter);
4496 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4497 			iter->seq.full = 0;
4498 			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4499 		}
4500 		ret = trace_print_seq(m, &iter->seq);
4501 		/*
4502 		 * If we overflow the seq_file buffer, then it will
4503 		 * ask us for this data again at start up.
4504 		 * Use that instead.
4505 		 *  ret is 0 if seq_file write succeeded.
4506 		 *        -1 otherwise.
4507 		 */
4508 		iter->leftover = ret;
4509 	}
4510 
4511 	return 0;
4512 }
4513 
4514 /*
4515  * Should be used after trace_array_get(), trace_types_lock
4516  * ensures that i_cdev was already initialized.
4517  */
4518 static inline int tracing_get_cpu(struct inode *inode)
4519 {
4520 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4521 		return (long)inode->i_cdev - 1;
4522 	return RING_BUFFER_ALL_CPUS;
4523 }
4524 
4525 static const struct seq_operations tracer_seq_ops = {
4526 	.start		= s_start,
4527 	.next		= s_next,
4528 	.stop		= s_stop,
4529 	.show		= s_show,
4530 };
4531 
4532 /*
4533  * Note, as iter itself can be allocated and freed in different
4534  * ways, this function is only used to free its content, and not
4535  * the iterator itself. The only requirement to all the allocations
4536  * is that it must zero all fields (kzalloc), as freeing works with
4537  * ethier allocated content or NULL.
4538  */
4539 static void free_trace_iter_content(struct trace_iterator *iter)
4540 {
4541 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4542 	if (iter->fmt != static_fmt_buf)
4543 		kfree(iter->fmt);
4544 
4545 	kfree(iter->temp);
4546 	kfree(iter->buffer_iter);
4547 	mutex_destroy(&iter->mutex);
4548 	free_cpumask_var(iter->started);
4549 }
4550 
4551 static struct trace_iterator *
4552 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4553 {
4554 	struct trace_array *tr = inode->i_private;
4555 	struct trace_iterator *iter;
4556 	int cpu;
4557 
4558 	if (tracing_disabled)
4559 		return ERR_PTR(-ENODEV);
4560 
4561 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4562 	if (!iter)
4563 		return ERR_PTR(-ENOMEM);
4564 
4565 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4566 				    GFP_KERNEL);
4567 	if (!iter->buffer_iter)
4568 		goto release;
4569 
4570 	/*
4571 	 * trace_find_next_entry() may need to save off iter->ent.
4572 	 * It will place it into the iter->temp buffer. As most
4573 	 * events are less than 128, allocate a buffer of that size.
4574 	 * If one is greater, then trace_find_next_entry() will
4575 	 * allocate a new buffer to adjust for the bigger iter->ent.
4576 	 * It's not critical if it fails to get allocated here.
4577 	 */
4578 	iter->temp = kmalloc(128, GFP_KERNEL);
4579 	if (iter->temp)
4580 		iter->temp_size = 128;
4581 
4582 	/*
4583 	 * trace_event_printf() may need to modify given format
4584 	 * string to replace %p with %px so that it shows real address
4585 	 * instead of hash value. However, that is only for the event
4586 	 * tracing, other tracer may not need. Defer the allocation
4587 	 * until it is needed.
4588 	 */
4589 	iter->fmt = NULL;
4590 	iter->fmt_size = 0;
4591 
4592 	mutex_lock(&trace_types_lock);
4593 	iter->trace = tr->current_trace;
4594 
4595 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4596 		goto fail;
4597 
4598 	iter->tr = tr;
4599 
4600 #ifdef CONFIG_TRACER_MAX_TRACE
4601 	/* Currently only the top directory has a snapshot */
4602 	if (tr->current_trace->print_max || snapshot)
4603 		iter->array_buffer = &tr->max_buffer;
4604 	else
4605 #endif
4606 		iter->array_buffer = &tr->array_buffer;
4607 	iter->snapshot = snapshot;
4608 	iter->pos = -1;
4609 	iter->cpu_file = tracing_get_cpu(inode);
4610 	mutex_init(&iter->mutex);
4611 
4612 	/* Notify the tracer early; before we stop tracing. */
4613 	if (iter->trace->open)
4614 		iter->trace->open(iter);
4615 
4616 	/* Annotate start of buffers if we had overruns */
4617 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4618 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4619 
4620 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4621 	if (trace_clocks[tr->clock_id].in_ns)
4622 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4623 
4624 	/*
4625 	 * If pause-on-trace is enabled, then stop the trace while
4626 	 * dumping, unless this is the "snapshot" file
4627 	 */
4628 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4629 		tracing_stop_tr(tr);
4630 
4631 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4632 		for_each_tracing_cpu(cpu) {
4633 			iter->buffer_iter[cpu] =
4634 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4635 							 cpu, GFP_KERNEL);
4636 		}
4637 		ring_buffer_read_prepare_sync();
4638 		for_each_tracing_cpu(cpu) {
4639 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4640 			tracing_iter_reset(iter, cpu);
4641 		}
4642 	} else {
4643 		cpu = iter->cpu_file;
4644 		iter->buffer_iter[cpu] =
4645 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4646 						 cpu, GFP_KERNEL);
4647 		ring_buffer_read_prepare_sync();
4648 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4649 		tracing_iter_reset(iter, cpu);
4650 	}
4651 
4652 	mutex_unlock(&trace_types_lock);
4653 
4654 	return iter;
4655 
4656  fail:
4657 	mutex_unlock(&trace_types_lock);
4658 	free_trace_iter_content(iter);
4659 release:
4660 	seq_release_private(inode, file);
4661 	return ERR_PTR(-ENOMEM);
4662 }
4663 
4664 int tracing_open_generic(struct inode *inode, struct file *filp)
4665 {
4666 	int ret;
4667 
4668 	ret = tracing_check_open_get_tr(NULL);
4669 	if (ret)
4670 		return ret;
4671 
4672 	filp->private_data = inode->i_private;
4673 	return 0;
4674 }
4675 
4676 bool tracing_is_disabled(void)
4677 {
4678 	return (tracing_disabled) ? true: false;
4679 }
4680 
4681 /*
4682  * Open and update trace_array ref count.
4683  * Must have the current trace_array passed to it.
4684  */
4685 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4686 {
4687 	struct trace_array *tr = inode->i_private;
4688 	int ret;
4689 
4690 	ret = tracing_check_open_get_tr(tr);
4691 	if (ret)
4692 		return ret;
4693 
4694 	filp->private_data = inode->i_private;
4695 
4696 	return 0;
4697 }
4698 
4699 /*
4700  * The private pointer of the inode is the trace_event_file.
4701  * Update the tr ref count associated to it.
4702  */
4703 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4704 {
4705 	struct trace_event_file *file = inode->i_private;
4706 	int ret;
4707 
4708 	ret = tracing_check_open_get_tr(file->tr);
4709 	if (ret)
4710 		return ret;
4711 
4712 	mutex_lock(&event_mutex);
4713 
4714 	/* Fail if the file is marked for removal */
4715 	if (file->flags & EVENT_FILE_FL_FREED) {
4716 		trace_array_put(file->tr);
4717 		ret = -ENODEV;
4718 	} else {
4719 		event_file_get(file);
4720 	}
4721 
4722 	mutex_unlock(&event_mutex);
4723 	if (ret)
4724 		return ret;
4725 
4726 	filp->private_data = inode->i_private;
4727 
4728 	return 0;
4729 }
4730 
4731 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4732 {
4733 	struct trace_event_file *file = inode->i_private;
4734 
4735 	trace_array_put(file->tr);
4736 	event_file_put(file);
4737 
4738 	return 0;
4739 }
4740 
4741 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4742 {
4743 	tracing_release_file_tr(inode, filp);
4744 	return single_release(inode, filp);
4745 }
4746 
4747 static int tracing_mark_open(struct inode *inode, struct file *filp)
4748 {
4749 	stream_open(inode, filp);
4750 	return tracing_open_generic_tr(inode, filp);
4751 }
4752 
4753 static int tracing_release(struct inode *inode, struct file *file)
4754 {
4755 	struct trace_array *tr = inode->i_private;
4756 	struct seq_file *m = file->private_data;
4757 	struct trace_iterator *iter;
4758 	int cpu;
4759 
4760 	if (!(file->f_mode & FMODE_READ)) {
4761 		trace_array_put(tr);
4762 		return 0;
4763 	}
4764 
4765 	/* Writes do not use seq_file */
4766 	iter = m->private;
4767 	mutex_lock(&trace_types_lock);
4768 
4769 	for_each_tracing_cpu(cpu) {
4770 		if (iter->buffer_iter[cpu])
4771 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4772 	}
4773 
4774 	if (iter->trace && iter->trace->close)
4775 		iter->trace->close(iter);
4776 
4777 	if (!iter->snapshot && tr->stop_count)
4778 		/* reenable tracing if it was previously enabled */
4779 		tracing_start_tr(tr);
4780 
4781 	__trace_array_put(tr);
4782 
4783 	mutex_unlock(&trace_types_lock);
4784 
4785 	free_trace_iter_content(iter);
4786 	seq_release_private(inode, file);
4787 
4788 	return 0;
4789 }
4790 
4791 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4792 {
4793 	struct trace_array *tr = inode->i_private;
4794 
4795 	trace_array_put(tr);
4796 	return 0;
4797 }
4798 
4799 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4800 {
4801 	struct trace_array *tr = inode->i_private;
4802 
4803 	trace_array_put(tr);
4804 
4805 	return single_release(inode, file);
4806 }
4807 
4808 static int tracing_open(struct inode *inode, struct file *file)
4809 {
4810 	struct trace_array *tr = inode->i_private;
4811 	struct trace_iterator *iter;
4812 	int ret;
4813 
4814 	ret = tracing_check_open_get_tr(tr);
4815 	if (ret)
4816 		return ret;
4817 
4818 	/* If this file was open for write, then erase contents */
4819 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4820 		int cpu = tracing_get_cpu(inode);
4821 		struct array_buffer *trace_buf = &tr->array_buffer;
4822 
4823 #ifdef CONFIG_TRACER_MAX_TRACE
4824 		if (tr->current_trace->print_max)
4825 			trace_buf = &tr->max_buffer;
4826 #endif
4827 
4828 		if (cpu == RING_BUFFER_ALL_CPUS)
4829 			tracing_reset_online_cpus(trace_buf);
4830 		else
4831 			tracing_reset_cpu(trace_buf, cpu);
4832 	}
4833 
4834 	if (file->f_mode & FMODE_READ) {
4835 		iter = __tracing_open(inode, file, false);
4836 		if (IS_ERR(iter))
4837 			ret = PTR_ERR(iter);
4838 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4839 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4840 	}
4841 
4842 	if (ret < 0)
4843 		trace_array_put(tr);
4844 
4845 	return ret;
4846 }
4847 
4848 /*
4849  * Some tracers are not suitable for instance buffers.
4850  * A tracer is always available for the global array (toplevel)
4851  * or if it explicitly states that it is.
4852  */
4853 static bool
4854 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4855 {
4856 #ifdef CONFIG_TRACER_SNAPSHOT
4857 	/* arrays with mapped buffer range do not have snapshots */
4858 	if (tr->range_addr_start && t->use_max_tr)
4859 		return false;
4860 #endif
4861 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4862 }
4863 
4864 /* Find the next tracer that this trace array may use */
4865 static struct tracer *
4866 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4867 {
4868 	while (t && !trace_ok_for_array(t, tr))
4869 		t = t->next;
4870 
4871 	return t;
4872 }
4873 
4874 static void *
4875 t_next(struct seq_file *m, void *v, loff_t *pos)
4876 {
4877 	struct trace_array *tr = m->private;
4878 	struct tracer *t = v;
4879 
4880 	(*pos)++;
4881 
4882 	if (t)
4883 		t = get_tracer_for_array(tr, t->next);
4884 
4885 	return t;
4886 }
4887 
4888 static void *t_start(struct seq_file *m, loff_t *pos)
4889 {
4890 	struct trace_array *tr = m->private;
4891 	struct tracer *t;
4892 	loff_t l = 0;
4893 
4894 	mutex_lock(&trace_types_lock);
4895 
4896 	t = get_tracer_for_array(tr, trace_types);
4897 	for (; t && l < *pos; t = t_next(m, t, &l))
4898 			;
4899 
4900 	return t;
4901 }
4902 
4903 static void t_stop(struct seq_file *m, void *p)
4904 {
4905 	mutex_unlock(&trace_types_lock);
4906 }
4907 
4908 static int t_show(struct seq_file *m, void *v)
4909 {
4910 	struct tracer *t = v;
4911 
4912 	if (!t)
4913 		return 0;
4914 
4915 	seq_puts(m, t->name);
4916 	if (t->next)
4917 		seq_putc(m, ' ');
4918 	else
4919 		seq_putc(m, '\n');
4920 
4921 	return 0;
4922 }
4923 
4924 static const struct seq_operations show_traces_seq_ops = {
4925 	.start		= t_start,
4926 	.next		= t_next,
4927 	.stop		= t_stop,
4928 	.show		= t_show,
4929 };
4930 
4931 static int show_traces_open(struct inode *inode, struct file *file)
4932 {
4933 	struct trace_array *tr = inode->i_private;
4934 	struct seq_file *m;
4935 	int ret;
4936 
4937 	ret = tracing_check_open_get_tr(tr);
4938 	if (ret)
4939 		return ret;
4940 
4941 	ret = seq_open(file, &show_traces_seq_ops);
4942 	if (ret) {
4943 		trace_array_put(tr);
4944 		return ret;
4945 	}
4946 
4947 	m = file->private_data;
4948 	m->private = tr;
4949 
4950 	return 0;
4951 }
4952 
4953 static int tracing_seq_release(struct inode *inode, struct file *file)
4954 {
4955 	struct trace_array *tr = inode->i_private;
4956 
4957 	trace_array_put(tr);
4958 	return seq_release(inode, file);
4959 }
4960 
4961 static ssize_t
4962 tracing_write_stub(struct file *filp, const char __user *ubuf,
4963 		   size_t count, loff_t *ppos)
4964 {
4965 	return count;
4966 }
4967 
4968 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4969 {
4970 	int ret;
4971 
4972 	if (file->f_mode & FMODE_READ)
4973 		ret = seq_lseek(file, offset, whence);
4974 	else
4975 		file->f_pos = ret = 0;
4976 
4977 	return ret;
4978 }
4979 
4980 static const struct file_operations tracing_fops = {
4981 	.open		= tracing_open,
4982 	.read		= seq_read,
4983 	.read_iter	= seq_read_iter,
4984 	.splice_read	= copy_splice_read,
4985 	.write		= tracing_write_stub,
4986 	.llseek		= tracing_lseek,
4987 	.release	= tracing_release,
4988 };
4989 
4990 static const struct file_operations show_traces_fops = {
4991 	.open		= show_traces_open,
4992 	.read		= seq_read,
4993 	.llseek		= seq_lseek,
4994 	.release	= tracing_seq_release,
4995 };
4996 
4997 static ssize_t
4998 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4999 		     size_t count, loff_t *ppos)
5000 {
5001 	struct trace_array *tr = file_inode(filp)->i_private;
5002 	char *mask_str;
5003 	int len;
5004 
5005 	len = snprintf(NULL, 0, "%*pb\n",
5006 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5007 	mask_str = kmalloc(len, GFP_KERNEL);
5008 	if (!mask_str)
5009 		return -ENOMEM;
5010 
5011 	len = snprintf(mask_str, len, "%*pb\n",
5012 		       cpumask_pr_args(tr->tracing_cpumask));
5013 	if (len >= count) {
5014 		count = -EINVAL;
5015 		goto out_err;
5016 	}
5017 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5018 
5019 out_err:
5020 	kfree(mask_str);
5021 
5022 	return count;
5023 }
5024 
5025 int tracing_set_cpumask(struct trace_array *tr,
5026 			cpumask_var_t tracing_cpumask_new)
5027 {
5028 	int cpu;
5029 
5030 	if (!tr)
5031 		return -EINVAL;
5032 
5033 	local_irq_disable();
5034 	arch_spin_lock(&tr->max_lock);
5035 	for_each_tracing_cpu(cpu) {
5036 		/*
5037 		 * Increase/decrease the disabled counter if we are
5038 		 * about to flip a bit in the cpumask:
5039 		 */
5040 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5041 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5042 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5043 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5044 #ifdef CONFIG_TRACER_MAX_TRACE
5045 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5046 #endif
5047 		}
5048 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5049 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5050 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5051 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5052 #ifdef CONFIG_TRACER_MAX_TRACE
5053 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5054 #endif
5055 		}
5056 	}
5057 	arch_spin_unlock(&tr->max_lock);
5058 	local_irq_enable();
5059 
5060 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5061 
5062 	return 0;
5063 }
5064 
5065 static ssize_t
5066 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5067 		      size_t count, loff_t *ppos)
5068 {
5069 	struct trace_array *tr = file_inode(filp)->i_private;
5070 	cpumask_var_t tracing_cpumask_new;
5071 	int err;
5072 
5073 	if (count == 0 || count > KMALLOC_MAX_SIZE)
5074 		return -EINVAL;
5075 
5076 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5077 		return -ENOMEM;
5078 
5079 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5080 	if (err)
5081 		goto err_free;
5082 
5083 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5084 	if (err)
5085 		goto err_free;
5086 
5087 	free_cpumask_var(tracing_cpumask_new);
5088 
5089 	return count;
5090 
5091 err_free:
5092 	free_cpumask_var(tracing_cpumask_new);
5093 
5094 	return err;
5095 }
5096 
5097 static const struct file_operations tracing_cpumask_fops = {
5098 	.open		= tracing_open_generic_tr,
5099 	.read		= tracing_cpumask_read,
5100 	.write		= tracing_cpumask_write,
5101 	.release	= tracing_release_generic_tr,
5102 	.llseek		= generic_file_llseek,
5103 };
5104 
5105 static int tracing_trace_options_show(struct seq_file *m, void *v)
5106 {
5107 	struct tracer_opt *trace_opts;
5108 	struct trace_array *tr = m->private;
5109 	u32 tracer_flags;
5110 	int i;
5111 
5112 	guard(mutex)(&trace_types_lock);
5113 
5114 	tracer_flags = tr->current_trace->flags->val;
5115 	trace_opts = tr->current_trace->flags->opts;
5116 
5117 	for (i = 0; trace_options[i]; i++) {
5118 		if (tr->trace_flags & (1 << i))
5119 			seq_printf(m, "%s\n", trace_options[i]);
5120 		else
5121 			seq_printf(m, "no%s\n", trace_options[i]);
5122 	}
5123 
5124 	for (i = 0; trace_opts[i].name; i++) {
5125 		if (tracer_flags & trace_opts[i].bit)
5126 			seq_printf(m, "%s\n", trace_opts[i].name);
5127 		else
5128 			seq_printf(m, "no%s\n", trace_opts[i].name);
5129 	}
5130 
5131 	return 0;
5132 }
5133 
5134 static int __set_tracer_option(struct trace_array *tr,
5135 			       struct tracer_flags *tracer_flags,
5136 			       struct tracer_opt *opts, int neg)
5137 {
5138 	struct tracer *trace = tracer_flags->trace;
5139 	int ret;
5140 
5141 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5142 	if (ret)
5143 		return ret;
5144 
5145 	if (neg)
5146 		tracer_flags->val &= ~opts->bit;
5147 	else
5148 		tracer_flags->val |= opts->bit;
5149 	return 0;
5150 }
5151 
5152 /* Try to assign a tracer specific option */
5153 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5154 {
5155 	struct tracer *trace = tr->current_trace;
5156 	struct tracer_flags *tracer_flags = trace->flags;
5157 	struct tracer_opt *opts = NULL;
5158 	int i;
5159 
5160 	for (i = 0; tracer_flags->opts[i].name; i++) {
5161 		opts = &tracer_flags->opts[i];
5162 
5163 		if (strcmp(cmp, opts->name) == 0)
5164 			return __set_tracer_option(tr, trace->flags, opts, neg);
5165 	}
5166 
5167 	return -EINVAL;
5168 }
5169 
5170 /* Some tracers require overwrite to stay enabled */
5171 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5172 {
5173 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5174 		return -1;
5175 
5176 	return 0;
5177 }
5178 
5179 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5180 {
5181 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5182 	    (mask == TRACE_ITER_RECORD_CMD) ||
5183 	    (mask == TRACE_ITER_TRACE_PRINTK))
5184 		lockdep_assert_held(&event_mutex);
5185 
5186 	/* do nothing if flag is already set */
5187 	if (!!(tr->trace_flags & mask) == !!enabled)
5188 		return 0;
5189 
5190 	/* Give the tracer a chance to approve the change */
5191 	if (tr->current_trace->flag_changed)
5192 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5193 			return -EINVAL;
5194 
5195 	if (mask == TRACE_ITER_TRACE_PRINTK) {
5196 		if (enabled) {
5197 			update_printk_trace(tr);
5198 		} else {
5199 			/*
5200 			 * The global_trace cannot clear this.
5201 			 * It's flag only gets cleared if another instance sets it.
5202 			 */
5203 			if (printk_trace == &global_trace)
5204 				return -EINVAL;
5205 			/*
5206 			 * An instance must always have it set.
5207 			 * by default, that's the global_trace instane.
5208 			 */
5209 			if (printk_trace == tr)
5210 				update_printk_trace(&global_trace);
5211 		}
5212 	}
5213 
5214 	if (enabled)
5215 		tr->trace_flags |= mask;
5216 	else
5217 		tr->trace_flags &= ~mask;
5218 
5219 	if (mask == TRACE_ITER_RECORD_CMD)
5220 		trace_event_enable_cmd_record(enabled);
5221 
5222 	if (mask == TRACE_ITER_RECORD_TGID) {
5223 
5224 		if (trace_alloc_tgid_map() < 0) {
5225 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5226 			return -ENOMEM;
5227 		}
5228 
5229 		trace_event_enable_tgid_record(enabled);
5230 	}
5231 
5232 	if (mask == TRACE_ITER_EVENT_FORK)
5233 		trace_event_follow_fork(tr, enabled);
5234 
5235 	if (mask == TRACE_ITER_FUNC_FORK)
5236 		ftrace_pid_follow_fork(tr, enabled);
5237 
5238 	if (mask == TRACE_ITER_OVERWRITE) {
5239 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5240 #ifdef CONFIG_TRACER_MAX_TRACE
5241 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5242 #endif
5243 	}
5244 
5245 	if (mask == TRACE_ITER_PRINTK) {
5246 		trace_printk_start_stop_comm(enabled);
5247 		trace_printk_control(enabled);
5248 	}
5249 
5250 	return 0;
5251 }
5252 
5253 int trace_set_options(struct trace_array *tr, char *option)
5254 {
5255 	char *cmp;
5256 	int neg = 0;
5257 	int ret;
5258 	size_t orig_len = strlen(option);
5259 	int len;
5260 
5261 	cmp = strstrip(option);
5262 
5263 	len = str_has_prefix(cmp, "no");
5264 	if (len)
5265 		neg = 1;
5266 
5267 	cmp += len;
5268 
5269 	mutex_lock(&event_mutex);
5270 	mutex_lock(&trace_types_lock);
5271 
5272 	ret = match_string(trace_options, -1, cmp);
5273 	/* If no option could be set, test the specific tracer options */
5274 	if (ret < 0)
5275 		ret = set_tracer_option(tr, cmp, neg);
5276 	else
5277 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5278 
5279 	mutex_unlock(&trace_types_lock);
5280 	mutex_unlock(&event_mutex);
5281 
5282 	/*
5283 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5284 	 * turn it back into a space.
5285 	 */
5286 	if (orig_len > strlen(option))
5287 		option[strlen(option)] = ' ';
5288 
5289 	return ret;
5290 }
5291 
5292 static void __init apply_trace_boot_options(void)
5293 {
5294 	char *buf = trace_boot_options_buf;
5295 	char *option;
5296 
5297 	while (true) {
5298 		option = strsep(&buf, ",");
5299 
5300 		if (!option)
5301 			break;
5302 
5303 		if (*option)
5304 			trace_set_options(&global_trace, option);
5305 
5306 		/* Put back the comma to allow this to be called again */
5307 		if (buf)
5308 			*(buf - 1) = ',';
5309 	}
5310 }
5311 
5312 static ssize_t
5313 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5314 			size_t cnt, loff_t *ppos)
5315 {
5316 	struct seq_file *m = filp->private_data;
5317 	struct trace_array *tr = m->private;
5318 	char buf[64];
5319 	int ret;
5320 
5321 	if (cnt >= sizeof(buf))
5322 		return -EINVAL;
5323 
5324 	if (copy_from_user(buf, ubuf, cnt))
5325 		return -EFAULT;
5326 
5327 	buf[cnt] = 0;
5328 
5329 	ret = trace_set_options(tr, buf);
5330 	if (ret < 0)
5331 		return ret;
5332 
5333 	*ppos += cnt;
5334 
5335 	return cnt;
5336 }
5337 
5338 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5339 {
5340 	struct trace_array *tr = inode->i_private;
5341 	int ret;
5342 
5343 	ret = tracing_check_open_get_tr(tr);
5344 	if (ret)
5345 		return ret;
5346 
5347 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5348 	if (ret < 0)
5349 		trace_array_put(tr);
5350 
5351 	return ret;
5352 }
5353 
5354 static const struct file_operations tracing_iter_fops = {
5355 	.open		= tracing_trace_options_open,
5356 	.read		= seq_read,
5357 	.llseek		= seq_lseek,
5358 	.release	= tracing_single_release_tr,
5359 	.write		= tracing_trace_options_write,
5360 };
5361 
5362 static const char readme_msg[] =
5363 	"tracing mini-HOWTO:\n\n"
5364 	"By default tracefs removes all OTH file permission bits.\n"
5365 	"When mounting tracefs an optional group id can be specified\n"
5366 	"which adds the group to every directory and file in tracefs:\n\n"
5367 	"\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5368 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5369 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5370 	" Important files:\n"
5371 	"  trace\t\t\t- The static contents of the buffer\n"
5372 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5373 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5374 	"  current_tracer\t- function and latency tracers\n"
5375 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5376 	"  error_log\t- error log for failed commands (that support it)\n"
5377 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5378 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5379 	"  trace_clock\t\t- change the clock used to order events\n"
5380 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5381 	"      global:   Synced across CPUs but slows tracing down.\n"
5382 	"     counter:   Not a clock, but just an increment\n"
5383 	"      uptime:   Jiffy counter from time of boot\n"
5384 	"        perf:   Same clock that perf events use\n"
5385 #ifdef CONFIG_X86_64
5386 	"     x86-tsc:   TSC cycle counter\n"
5387 #endif
5388 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5389 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5390 	"    absolute:   Absolute (standalone) timestamp\n"
5391 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5392 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5393 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5394 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5395 	"\t\t\t  Remove sub-buffer with rmdir\n"
5396 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5397 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5398 	"\t\t\t  option name\n"
5399 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5400 #ifdef CONFIG_DYNAMIC_FTRACE
5401 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5402 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5403 	"\t\t\t  functions\n"
5404 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5405 	"\t     modules: Can select a group via module\n"
5406 	"\t      Format: :mod:<module-name>\n"
5407 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5408 	"\t    triggers: a command to perform when function is hit\n"
5409 	"\t      Format: <function>:<trigger>[:count]\n"
5410 	"\t     trigger: traceon, traceoff\n"
5411 	"\t\t      enable_event:<system>:<event>\n"
5412 	"\t\t      disable_event:<system>:<event>\n"
5413 #ifdef CONFIG_STACKTRACE
5414 	"\t\t      stacktrace\n"
5415 #endif
5416 #ifdef CONFIG_TRACER_SNAPSHOT
5417 	"\t\t      snapshot\n"
5418 #endif
5419 	"\t\t      dump\n"
5420 	"\t\t      cpudump\n"
5421 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5422 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5423 	"\t     The first one will disable tracing every time do_fault is hit\n"
5424 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5425 	"\t       The first time do trap is hit and it disables tracing, the\n"
5426 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5427 	"\t       the counter will not decrement. It only decrements when the\n"
5428 	"\t       trigger did work\n"
5429 	"\t     To remove trigger without count:\n"
5430 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5431 	"\t     To remove trigger with a count:\n"
5432 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5433 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5434 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5435 	"\t    modules: Can select a group via module command :mod:\n"
5436 	"\t    Does not accept triggers\n"
5437 #endif /* CONFIG_DYNAMIC_FTRACE */
5438 #ifdef CONFIG_FUNCTION_TRACER
5439 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5440 	"\t\t    (function)\n"
5441 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5442 	"\t\t    (function)\n"
5443 #endif
5444 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5445 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5446 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5447 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5448 #endif
5449 #ifdef CONFIG_TRACER_SNAPSHOT
5450 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5451 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5452 	"\t\t\t  information\n"
5453 #endif
5454 #ifdef CONFIG_STACK_TRACER
5455 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5456 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5457 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5458 	"\t\t\t  new trace)\n"
5459 #ifdef CONFIG_DYNAMIC_FTRACE
5460 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5461 	"\t\t\t  traces\n"
5462 #endif
5463 #endif /* CONFIG_STACK_TRACER */
5464 #ifdef CONFIG_DYNAMIC_EVENTS
5465 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5466 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5467 #endif
5468 #ifdef CONFIG_KPROBE_EVENTS
5469 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5470 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5471 #endif
5472 #ifdef CONFIG_UPROBE_EVENTS
5473 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5474 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5475 #endif
5476 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5477     defined(CONFIG_FPROBE_EVENTS)
5478 	"\t  accepts: event-definitions (one definition per line)\n"
5479 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5480 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5481 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5482 #endif
5483 #ifdef CONFIG_FPROBE_EVENTS
5484 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5485 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5486 #endif
5487 #ifdef CONFIG_HIST_TRIGGERS
5488 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5489 #endif
5490 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5491 	"\t           -:[<group>/][<event>]\n"
5492 #ifdef CONFIG_KPROBE_EVENTS
5493 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5494   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5495 #endif
5496 #ifdef CONFIG_UPROBE_EVENTS
5497   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5498 #endif
5499 	"\t     args: <name>=fetcharg[:type]\n"
5500 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5501 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5502 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5503 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5504 	"\t           <argname>[->field[->field|.field...]],\n"
5505 #endif
5506 #else
5507 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5508 #endif
5509 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5510 	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5511 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5512 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5513 	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5514 #ifdef CONFIG_HIST_TRIGGERS
5515 	"\t    field: <stype> <name>;\n"
5516 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5517 	"\t           [unsigned] char/int/long\n"
5518 #endif
5519 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5520 	"\t            of the <attached-group>/<attached-event>.\n"
5521 #endif
5522 	"  set_event\t\t- Enables events by name written into it\n"
5523 	"\t\t\t  Can enable module events via: :mod:<module>\n"
5524 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5525 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5526 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5527 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5528 	"\t\t\t  events\n"
5529 	"      filter\t\t- If set, only events passing filter are traced\n"
5530 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5531 	"\t\t\t  <event>:\n"
5532 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5533 	"      filter\t\t- If set, only events passing filter are traced\n"
5534 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5535 	"\t    Format: <trigger>[:count][if <filter>]\n"
5536 	"\t   trigger: traceon, traceoff\n"
5537 	"\t            enable_event:<system>:<event>\n"
5538 	"\t            disable_event:<system>:<event>\n"
5539 #ifdef CONFIG_HIST_TRIGGERS
5540 	"\t            enable_hist:<system>:<event>\n"
5541 	"\t            disable_hist:<system>:<event>\n"
5542 #endif
5543 #ifdef CONFIG_STACKTRACE
5544 	"\t\t    stacktrace\n"
5545 #endif
5546 #ifdef CONFIG_TRACER_SNAPSHOT
5547 	"\t\t    snapshot\n"
5548 #endif
5549 #ifdef CONFIG_HIST_TRIGGERS
5550 	"\t\t    hist (see below)\n"
5551 #endif
5552 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5553 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5554 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5555 	"\t                  events/block/block_unplug/trigger\n"
5556 	"\t   The first disables tracing every time block_unplug is hit.\n"
5557 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5558 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5559 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5560 	"\t   Like function triggers, the counter is only decremented if it\n"
5561 	"\t    enabled or disabled tracing.\n"
5562 	"\t   To remove a trigger without a count:\n"
5563 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5564 	"\t   To remove a trigger with a count:\n"
5565 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5566 	"\t   Filters can be ignored when removing a trigger.\n"
5567 #ifdef CONFIG_HIST_TRIGGERS
5568 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5569 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5570 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5571 	"\t            [:values=<field1[,field2,...]>]\n"
5572 	"\t            [:sort=<field1[,field2,...]>]\n"
5573 	"\t            [:size=#entries]\n"
5574 	"\t            [:pause][:continue][:clear]\n"
5575 	"\t            [:name=histname1]\n"
5576 	"\t            [:nohitcount]\n"
5577 	"\t            [:<handler>.<action>]\n"
5578 	"\t            [if <filter>]\n\n"
5579 	"\t    Note, special fields can be used as well:\n"
5580 	"\t            common_timestamp - to record current timestamp\n"
5581 	"\t            common_cpu - to record the CPU the event happened on\n"
5582 	"\n"
5583 	"\t    A hist trigger variable can be:\n"
5584 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5585 	"\t        - a reference to another variable e.g. y=$x,\n"
5586 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5587 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5588 	"\n"
5589 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5590 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5591 	"\t    variable reference, field or numeric literal.\n"
5592 	"\n"
5593 	"\t    When a matching event is hit, an entry is added to a hash\n"
5594 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5595 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5596 	"\t    correspond to fields in the event's format description.  Keys\n"
5597 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5598 	"\t    Compound keys consisting of up to two fields can be specified\n"
5599 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5600 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5601 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5602 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5603 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5604 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5605 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5606 	"\t    its histogram data will be shared with other triggers of the\n"
5607 	"\t    same name, and trigger hits will update this common data.\n\n"
5608 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5609 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5610 	"\t    triggers attached to an event, there will be a table for each\n"
5611 	"\t    trigger in the output.  The table displayed for a named\n"
5612 	"\t    trigger will be the same as any other instance having the\n"
5613 	"\t    same name.  The default format used to display a given field\n"
5614 	"\t    can be modified by appending any of the following modifiers\n"
5615 	"\t    to the field name, as applicable:\n\n"
5616 	"\t            .hex        display a number as a hex value\n"
5617 	"\t            .sym        display an address as a symbol\n"
5618 	"\t            .sym-offset display an address as a symbol and offset\n"
5619 	"\t            .execname   display a common_pid as a program name\n"
5620 	"\t            .syscall    display a syscall id as a syscall name\n"
5621 	"\t            .log2       display log2 value rather than raw number\n"
5622 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5623 	"\t            .usecs      display a common_timestamp in microseconds\n"
5624 	"\t            .percent    display a number of percentage value\n"
5625 	"\t            .graph      display a bar-graph of a value\n\n"
5626 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5627 	"\t    trigger or to start a hist trigger but not log any events\n"
5628 	"\t    until told to do so.  'continue' can be used to start or\n"
5629 	"\t    restart a paused hist trigger.\n\n"
5630 	"\t    The 'clear' parameter will clear the contents of a running\n"
5631 	"\t    hist trigger and leave its current paused/active state\n"
5632 	"\t    unchanged.\n\n"
5633 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5634 	"\t    raw hitcount in the histogram.\n\n"
5635 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5636 	"\t    have one event conditionally start and stop another event's\n"
5637 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5638 	"\t    the enable_event and disable_event triggers.\n\n"
5639 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5640 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5641 	"\t        <handler>.<action>\n\n"
5642 	"\t    The available handlers are:\n\n"
5643 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5644 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5645 	"\t        onchange(var)            - invoke action if var changes\n\n"
5646 	"\t    The available actions are:\n\n"
5647 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5648 	"\t        save(field,...)                      - save current event fields\n"
5649 #ifdef CONFIG_TRACER_SNAPSHOT
5650 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5651 #endif
5652 #ifdef CONFIG_SYNTH_EVENTS
5653 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5654 	"\t  Write into this file to define/undefine new synthetic events.\n"
5655 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5656 #endif
5657 #endif
5658 ;
5659 
5660 static ssize_t
5661 tracing_readme_read(struct file *filp, char __user *ubuf,
5662 		       size_t cnt, loff_t *ppos)
5663 {
5664 	return simple_read_from_buffer(ubuf, cnt, ppos,
5665 					readme_msg, strlen(readme_msg));
5666 }
5667 
5668 static const struct file_operations tracing_readme_fops = {
5669 	.open		= tracing_open_generic,
5670 	.read		= tracing_readme_read,
5671 	.llseek		= generic_file_llseek,
5672 };
5673 
5674 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5675 static union trace_eval_map_item *
5676 update_eval_map(union trace_eval_map_item *ptr)
5677 {
5678 	if (!ptr->map.eval_string) {
5679 		if (ptr->tail.next) {
5680 			ptr = ptr->tail.next;
5681 			/* Set ptr to the next real item (skip head) */
5682 			ptr++;
5683 		} else
5684 			return NULL;
5685 	}
5686 	return ptr;
5687 }
5688 
5689 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5690 {
5691 	union trace_eval_map_item *ptr = v;
5692 
5693 	/*
5694 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5695 	 * This really should never happen.
5696 	 */
5697 	(*pos)++;
5698 	ptr = update_eval_map(ptr);
5699 	if (WARN_ON_ONCE(!ptr))
5700 		return NULL;
5701 
5702 	ptr++;
5703 	ptr = update_eval_map(ptr);
5704 
5705 	return ptr;
5706 }
5707 
5708 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5709 {
5710 	union trace_eval_map_item *v;
5711 	loff_t l = 0;
5712 
5713 	mutex_lock(&trace_eval_mutex);
5714 
5715 	v = trace_eval_maps;
5716 	if (v)
5717 		v++;
5718 
5719 	while (v && l < *pos) {
5720 		v = eval_map_next(m, v, &l);
5721 	}
5722 
5723 	return v;
5724 }
5725 
5726 static void eval_map_stop(struct seq_file *m, void *v)
5727 {
5728 	mutex_unlock(&trace_eval_mutex);
5729 }
5730 
5731 static int eval_map_show(struct seq_file *m, void *v)
5732 {
5733 	union trace_eval_map_item *ptr = v;
5734 
5735 	seq_printf(m, "%s %ld (%s)\n",
5736 		   ptr->map.eval_string, ptr->map.eval_value,
5737 		   ptr->map.system);
5738 
5739 	return 0;
5740 }
5741 
5742 static const struct seq_operations tracing_eval_map_seq_ops = {
5743 	.start		= eval_map_start,
5744 	.next		= eval_map_next,
5745 	.stop		= eval_map_stop,
5746 	.show		= eval_map_show,
5747 };
5748 
5749 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5750 {
5751 	int ret;
5752 
5753 	ret = tracing_check_open_get_tr(NULL);
5754 	if (ret)
5755 		return ret;
5756 
5757 	return seq_open(filp, &tracing_eval_map_seq_ops);
5758 }
5759 
5760 static const struct file_operations tracing_eval_map_fops = {
5761 	.open		= tracing_eval_map_open,
5762 	.read		= seq_read,
5763 	.llseek		= seq_lseek,
5764 	.release	= seq_release,
5765 };
5766 
5767 static inline union trace_eval_map_item *
5768 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5769 {
5770 	/* Return tail of array given the head */
5771 	return ptr + ptr->head.length + 1;
5772 }
5773 
5774 static void
5775 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5776 			   int len)
5777 {
5778 	struct trace_eval_map **stop;
5779 	struct trace_eval_map **map;
5780 	union trace_eval_map_item *map_array;
5781 	union trace_eval_map_item *ptr;
5782 
5783 	stop = start + len;
5784 
5785 	/*
5786 	 * The trace_eval_maps contains the map plus a head and tail item,
5787 	 * where the head holds the module and length of array, and the
5788 	 * tail holds a pointer to the next list.
5789 	 */
5790 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5791 	if (!map_array) {
5792 		pr_warn("Unable to allocate trace eval mapping\n");
5793 		return;
5794 	}
5795 
5796 	guard(mutex)(&trace_eval_mutex);
5797 
5798 	if (!trace_eval_maps)
5799 		trace_eval_maps = map_array;
5800 	else {
5801 		ptr = trace_eval_maps;
5802 		for (;;) {
5803 			ptr = trace_eval_jmp_to_tail(ptr);
5804 			if (!ptr->tail.next)
5805 				break;
5806 			ptr = ptr->tail.next;
5807 
5808 		}
5809 		ptr->tail.next = map_array;
5810 	}
5811 	map_array->head.mod = mod;
5812 	map_array->head.length = len;
5813 	map_array++;
5814 
5815 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5816 		map_array->map = **map;
5817 		map_array++;
5818 	}
5819 	memset(map_array, 0, sizeof(*map_array));
5820 }
5821 
5822 static void trace_create_eval_file(struct dentry *d_tracer)
5823 {
5824 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5825 			  NULL, &tracing_eval_map_fops);
5826 }
5827 
5828 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5829 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5830 static inline void trace_insert_eval_map_file(struct module *mod,
5831 			      struct trace_eval_map **start, int len) { }
5832 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5833 
5834 static void trace_insert_eval_map(struct module *mod,
5835 				  struct trace_eval_map **start, int len)
5836 {
5837 	struct trace_eval_map **map;
5838 
5839 	if (len <= 0)
5840 		return;
5841 
5842 	map = start;
5843 
5844 	trace_event_eval_update(map, len);
5845 
5846 	trace_insert_eval_map_file(mod, start, len);
5847 }
5848 
5849 static ssize_t
5850 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5851 		       size_t cnt, loff_t *ppos)
5852 {
5853 	struct trace_array *tr = filp->private_data;
5854 	char buf[MAX_TRACER_SIZE+2];
5855 	int r;
5856 
5857 	mutex_lock(&trace_types_lock);
5858 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5859 	mutex_unlock(&trace_types_lock);
5860 
5861 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5862 }
5863 
5864 int tracer_init(struct tracer *t, struct trace_array *tr)
5865 {
5866 	tracing_reset_online_cpus(&tr->array_buffer);
5867 	return t->init(tr);
5868 }
5869 
5870 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5871 {
5872 	int cpu;
5873 
5874 	for_each_tracing_cpu(cpu)
5875 		per_cpu_ptr(buf->data, cpu)->entries = val;
5876 }
5877 
5878 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5879 {
5880 	if (cpu == RING_BUFFER_ALL_CPUS) {
5881 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5882 	} else {
5883 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5884 	}
5885 }
5886 
5887 #ifdef CONFIG_TRACER_MAX_TRACE
5888 /* resize @tr's buffer to the size of @size_tr's entries */
5889 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5890 					struct array_buffer *size_buf, int cpu_id)
5891 {
5892 	int cpu, ret = 0;
5893 
5894 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5895 		for_each_tracing_cpu(cpu) {
5896 			ret = ring_buffer_resize(trace_buf->buffer,
5897 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5898 			if (ret < 0)
5899 				break;
5900 			per_cpu_ptr(trace_buf->data, cpu)->entries =
5901 				per_cpu_ptr(size_buf->data, cpu)->entries;
5902 		}
5903 	} else {
5904 		ret = ring_buffer_resize(trace_buf->buffer,
5905 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5906 		if (ret == 0)
5907 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5908 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5909 	}
5910 
5911 	return ret;
5912 }
5913 #endif /* CONFIG_TRACER_MAX_TRACE */
5914 
5915 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5916 					unsigned long size, int cpu)
5917 {
5918 	int ret;
5919 
5920 	/*
5921 	 * If kernel or user changes the size of the ring buffer
5922 	 * we use the size that was given, and we can forget about
5923 	 * expanding it later.
5924 	 */
5925 	trace_set_ring_buffer_expanded(tr);
5926 
5927 	/* May be called before buffers are initialized */
5928 	if (!tr->array_buffer.buffer)
5929 		return 0;
5930 
5931 	/* Do not allow tracing while resizing ring buffer */
5932 	tracing_stop_tr(tr);
5933 
5934 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5935 	if (ret < 0)
5936 		goto out_start;
5937 
5938 #ifdef CONFIG_TRACER_MAX_TRACE
5939 	if (!tr->allocated_snapshot)
5940 		goto out;
5941 
5942 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5943 	if (ret < 0) {
5944 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
5945 						     &tr->array_buffer, cpu);
5946 		if (r < 0) {
5947 			/*
5948 			 * AARGH! We are left with different
5949 			 * size max buffer!!!!
5950 			 * The max buffer is our "snapshot" buffer.
5951 			 * When a tracer needs a snapshot (one of the
5952 			 * latency tracers), it swaps the max buffer
5953 			 * with the saved snap shot. We succeeded to
5954 			 * update the size of the main buffer, but failed to
5955 			 * update the size of the max buffer. But when we tried
5956 			 * to reset the main buffer to the original size, we
5957 			 * failed there too. This is very unlikely to
5958 			 * happen, but if it does, warn and kill all
5959 			 * tracing.
5960 			 */
5961 			WARN_ON(1);
5962 			tracing_disabled = 1;
5963 		}
5964 		goto out_start;
5965 	}
5966 
5967 	update_buffer_entries(&tr->max_buffer, cpu);
5968 
5969  out:
5970 #endif /* CONFIG_TRACER_MAX_TRACE */
5971 
5972 	update_buffer_entries(&tr->array_buffer, cpu);
5973  out_start:
5974 	tracing_start_tr(tr);
5975 	return ret;
5976 }
5977 
5978 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5979 				  unsigned long size, int cpu_id)
5980 {
5981 	guard(mutex)(&trace_types_lock);
5982 
5983 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5984 		/* make sure, this cpu is enabled in the mask */
5985 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
5986 			return -EINVAL;
5987 	}
5988 
5989 	return __tracing_resize_ring_buffer(tr, size, cpu_id);
5990 }
5991 
5992 struct trace_mod_entry {
5993 	unsigned long	mod_addr;
5994 	char		mod_name[MODULE_NAME_LEN];
5995 };
5996 
5997 struct trace_scratch {
5998 	unsigned long		text_addr;
5999 	unsigned long		nr_entries;
6000 	struct trace_mod_entry	entries[];
6001 };
6002 
6003 static DEFINE_MUTEX(scratch_mutex);
6004 
6005 static int cmp_mod_entry(const void *key, const void *pivot)
6006 {
6007 	unsigned long addr = (unsigned long)key;
6008 	const struct trace_mod_entry *ent = pivot;
6009 
6010 	if (addr >= ent[0].mod_addr && addr < ent[1].mod_addr)
6011 		return 0;
6012 	else
6013 		return addr - ent->mod_addr;
6014 }
6015 
6016 /**
6017  * trace_adjust_address() - Adjust prev boot address to current address.
6018  * @tr: Persistent ring buffer's trace_array.
6019  * @addr: Address in @tr which is adjusted.
6020  */
6021 unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr)
6022 {
6023 	struct trace_module_delta *module_delta;
6024 	struct trace_scratch *tscratch;
6025 	struct trace_mod_entry *entry;
6026 	int idx = 0, nr_entries;
6027 
6028 	/* If we don't have last boot delta, return the address */
6029 	if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6030 		return addr;
6031 
6032 	/* tr->module_delta must be protected by rcu. */
6033 	guard(rcu)();
6034 	tscratch = tr->scratch;
6035 	/* if there is no tscrach, module_delta must be NULL. */
6036 	module_delta = READ_ONCE(tr->module_delta);
6037 	if (!module_delta || tscratch->entries[0].mod_addr > addr)
6038 		return addr + tr->text_delta;
6039 
6040 	/* Note that entries must be sorted. */
6041 	nr_entries = tscratch->nr_entries;
6042 	if (nr_entries == 1 ||
6043 	    tscratch->entries[nr_entries - 1].mod_addr < addr)
6044 		idx = nr_entries - 1;
6045 	else {
6046 		entry = __inline_bsearch((void *)addr,
6047 				tscratch->entries,
6048 				nr_entries - 1,
6049 				sizeof(tscratch->entries[0]),
6050 				cmp_mod_entry);
6051 		if (entry)
6052 			idx = entry - tscratch->entries;
6053 	}
6054 
6055 	return addr + module_delta->delta[idx];
6056 }
6057 
6058 #ifdef CONFIG_MODULES
6059 static int save_mod(struct module *mod, void *data)
6060 {
6061 	struct trace_array *tr = data;
6062 	struct trace_scratch *tscratch;
6063 	struct trace_mod_entry *entry;
6064 	unsigned int size;
6065 
6066 	tscratch = tr->scratch;
6067 	if (!tscratch)
6068 		return -1;
6069 	size = tr->scratch_size;
6070 
6071 	if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
6072 		return -1;
6073 
6074 	entry = &tscratch->entries[tscratch->nr_entries];
6075 
6076 	tscratch->nr_entries++;
6077 
6078 	entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
6079 	strscpy(entry->mod_name, mod->name);
6080 
6081 	return 0;
6082 }
6083 #else
6084 static int save_mod(struct module *mod, void *data)
6085 {
6086 	return 0;
6087 }
6088 #endif
6089 
6090 static void update_last_data(struct trace_array *tr)
6091 {
6092 	struct trace_module_delta *module_delta;
6093 	struct trace_scratch *tscratch;
6094 
6095 	if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
6096 		return;
6097 
6098 	if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6099 		return;
6100 
6101 	/* Only if the buffer has previous boot data clear and update it. */
6102 	tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
6103 
6104 	/* Reset the module list and reload them */
6105 	if (tr->scratch) {
6106 		struct trace_scratch *tscratch = tr->scratch;
6107 
6108 		memset(tscratch->entries, 0,
6109 		       flex_array_size(tscratch, entries, tscratch->nr_entries));
6110 		tscratch->nr_entries = 0;
6111 
6112 		guard(mutex)(&scratch_mutex);
6113 		module_for_each_mod(save_mod, tr);
6114 	}
6115 
6116 	/*
6117 	 * Need to clear all CPU buffers as there cannot be events
6118 	 * from the previous boot mixed with events with this boot
6119 	 * as that will cause a confusing trace. Need to clear all
6120 	 * CPU buffers, even for those that may currently be offline.
6121 	 */
6122 	tracing_reset_all_cpus(&tr->array_buffer);
6123 
6124 	/* Using current data now */
6125 	tr->text_delta = 0;
6126 
6127 	if (!tr->scratch)
6128 		return;
6129 
6130 	tscratch = tr->scratch;
6131 	module_delta = READ_ONCE(tr->module_delta);
6132 	WRITE_ONCE(tr->module_delta, NULL);
6133 	kfree_rcu(module_delta, rcu);
6134 
6135 	/* Set the persistent ring buffer meta data to this address */
6136 	tscratch->text_addr = (unsigned long)_text;
6137 }
6138 
6139 /**
6140  * tracing_update_buffers - used by tracing facility to expand ring buffers
6141  * @tr: The tracing instance
6142  *
6143  * To save on memory when the tracing is never used on a system with it
6144  * configured in. The ring buffers are set to a minimum size. But once
6145  * a user starts to use the tracing facility, then they need to grow
6146  * to their default size.
6147  *
6148  * This function is to be called when a tracer is about to be used.
6149  */
6150 int tracing_update_buffers(struct trace_array *tr)
6151 {
6152 	int ret = 0;
6153 
6154 	mutex_lock(&trace_types_lock);
6155 
6156 	update_last_data(tr);
6157 
6158 	if (!tr->ring_buffer_expanded)
6159 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6160 						RING_BUFFER_ALL_CPUS);
6161 	mutex_unlock(&trace_types_lock);
6162 
6163 	return ret;
6164 }
6165 
6166 struct trace_option_dentry;
6167 
6168 static void
6169 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6170 
6171 /*
6172  * Used to clear out the tracer before deletion of an instance.
6173  * Must have trace_types_lock held.
6174  */
6175 static void tracing_set_nop(struct trace_array *tr)
6176 {
6177 	if (tr->current_trace == &nop_trace)
6178 		return;
6179 
6180 	tr->current_trace->enabled--;
6181 
6182 	if (tr->current_trace->reset)
6183 		tr->current_trace->reset(tr);
6184 
6185 	tr->current_trace = &nop_trace;
6186 }
6187 
6188 static bool tracer_options_updated;
6189 
6190 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6191 {
6192 	/* Only enable if the directory has been created already. */
6193 	if (!tr->dir)
6194 		return;
6195 
6196 	/* Only create trace option files after update_tracer_options finish */
6197 	if (!tracer_options_updated)
6198 		return;
6199 
6200 	create_trace_option_files(tr, t);
6201 }
6202 
6203 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6204 {
6205 	struct tracer *t;
6206 #ifdef CONFIG_TRACER_MAX_TRACE
6207 	bool had_max_tr;
6208 #endif
6209 	int ret;
6210 
6211 	guard(mutex)(&trace_types_lock);
6212 
6213 	update_last_data(tr);
6214 
6215 	if (!tr->ring_buffer_expanded) {
6216 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6217 						RING_BUFFER_ALL_CPUS);
6218 		if (ret < 0)
6219 			return ret;
6220 		ret = 0;
6221 	}
6222 
6223 	for (t = trace_types; t; t = t->next) {
6224 		if (strcmp(t->name, buf) == 0)
6225 			break;
6226 	}
6227 	if (!t)
6228 		return -EINVAL;
6229 
6230 	if (t == tr->current_trace)
6231 		return 0;
6232 
6233 #ifdef CONFIG_TRACER_SNAPSHOT
6234 	if (t->use_max_tr) {
6235 		local_irq_disable();
6236 		arch_spin_lock(&tr->max_lock);
6237 		ret = tr->cond_snapshot ? -EBUSY : 0;
6238 		arch_spin_unlock(&tr->max_lock);
6239 		local_irq_enable();
6240 		if (ret)
6241 			return ret;
6242 	}
6243 #endif
6244 	/* Some tracers won't work on kernel command line */
6245 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6246 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6247 			t->name);
6248 		return -EINVAL;
6249 	}
6250 
6251 	/* Some tracers are only allowed for the top level buffer */
6252 	if (!trace_ok_for_array(t, tr))
6253 		return -EINVAL;
6254 
6255 	/* If trace pipe files are being read, we can't change the tracer */
6256 	if (tr->trace_ref)
6257 		return -EBUSY;
6258 
6259 	trace_branch_disable();
6260 
6261 	tr->current_trace->enabled--;
6262 
6263 	if (tr->current_trace->reset)
6264 		tr->current_trace->reset(tr);
6265 
6266 #ifdef CONFIG_TRACER_MAX_TRACE
6267 	had_max_tr = tr->current_trace->use_max_tr;
6268 
6269 	/* Current trace needs to be nop_trace before synchronize_rcu */
6270 	tr->current_trace = &nop_trace;
6271 
6272 	if (had_max_tr && !t->use_max_tr) {
6273 		/*
6274 		 * We need to make sure that the update_max_tr sees that
6275 		 * current_trace changed to nop_trace to keep it from
6276 		 * swapping the buffers after we resize it.
6277 		 * The update_max_tr is called from interrupts disabled
6278 		 * so a synchronized_sched() is sufficient.
6279 		 */
6280 		synchronize_rcu();
6281 		free_snapshot(tr);
6282 		tracing_disarm_snapshot(tr);
6283 	}
6284 
6285 	if (!had_max_tr && t->use_max_tr) {
6286 		ret = tracing_arm_snapshot_locked(tr);
6287 		if (ret)
6288 			return ret;
6289 	}
6290 #else
6291 	tr->current_trace = &nop_trace;
6292 #endif
6293 
6294 	if (t->init) {
6295 		ret = tracer_init(t, tr);
6296 		if (ret) {
6297 #ifdef CONFIG_TRACER_MAX_TRACE
6298 			if (t->use_max_tr)
6299 				tracing_disarm_snapshot(tr);
6300 #endif
6301 			return ret;
6302 		}
6303 	}
6304 
6305 	tr->current_trace = t;
6306 	tr->current_trace->enabled++;
6307 	trace_branch_enable(tr);
6308 
6309 	return 0;
6310 }
6311 
6312 static ssize_t
6313 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6314 			size_t cnt, loff_t *ppos)
6315 {
6316 	struct trace_array *tr = filp->private_data;
6317 	char buf[MAX_TRACER_SIZE+1];
6318 	char *name;
6319 	size_t ret;
6320 	int err;
6321 
6322 	ret = cnt;
6323 
6324 	if (cnt > MAX_TRACER_SIZE)
6325 		cnt = MAX_TRACER_SIZE;
6326 
6327 	if (copy_from_user(buf, ubuf, cnt))
6328 		return -EFAULT;
6329 
6330 	buf[cnt] = 0;
6331 
6332 	name = strim(buf);
6333 
6334 	err = tracing_set_tracer(tr, name);
6335 	if (err)
6336 		return err;
6337 
6338 	*ppos += ret;
6339 
6340 	return ret;
6341 }
6342 
6343 static ssize_t
6344 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6345 		   size_t cnt, loff_t *ppos)
6346 {
6347 	char buf[64];
6348 	int r;
6349 
6350 	r = snprintf(buf, sizeof(buf), "%ld\n",
6351 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6352 	if (r > sizeof(buf))
6353 		r = sizeof(buf);
6354 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6355 }
6356 
6357 static ssize_t
6358 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6359 		    size_t cnt, loff_t *ppos)
6360 {
6361 	unsigned long val;
6362 	int ret;
6363 
6364 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6365 	if (ret)
6366 		return ret;
6367 
6368 	*ptr = val * 1000;
6369 
6370 	return cnt;
6371 }
6372 
6373 static ssize_t
6374 tracing_thresh_read(struct file *filp, char __user *ubuf,
6375 		    size_t cnt, loff_t *ppos)
6376 {
6377 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6378 }
6379 
6380 static ssize_t
6381 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6382 		     size_t cnt, loff_t *ppos)
6383 {
6384 	struct trace_array *tr = filp->private_data;
6385 	int ret;
6386 
6387 	guard(mutex)(&trace_types_lock);
6388 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6389 	if (ret < 0)
6390 		return ret;
6391 
6392 	if (tr->current_trace->update_thresh) {
6393 		ret = tr->current_trace->update_thresh(tr);
6394 		if (ret < 0)
6395 			return ret;
6396 	}
6397 
6398 	return cnt;
6399 }
6400 
6401 #ifdef CONFIG_TRACER_MAX_TRACE
6402 
6403 static ssize_t
6404 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6405 		     size_t cnt, loff_t *ppos)
6406 {
6407 	struct trace_array *tr = filp->private_data;
6408 
6409 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6410 }
6411 
6412 static ssize_t
6413 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6414 		      size_t cnt, loff_t *ppos)
6415 {
6416 	struct trace_array *tr = filp->private_data;
6417 
6418 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6419 }
6420 
6421 #endif
6422 
6423 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6424 {
6425 	if (cpu == RING_BUFFER_ALL_CPUS) {
6426 		if (cpumask_empty(tr->pipe_cpumask)) {
6427 			cpumask_setall(tr->pipe_cpumask);
6428 			return 0;
6429 		}
6430 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6431 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6432 		return 0;
6433 	}
6434 	return -EBUSY;
6435 }
6436 
6437 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6438 {
6439 	if (cpu == RING_BUFFER_ALL_CPUS) {
6440 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6441 		cpumask_clear(tr->pipe_cpumask);
6442 	} else {
6443 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6444 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6445 	}
6446 }
6447 
6448 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6449 {
6450 	struct trace_array *tr = inode->i_private;
6451 	struct trace_iterator *iter;
6452 	int cpu;
6453 	int ret;
6454 
6455 	ret = tracing_check_open_get_tr(tr);
6456 	if (ret)
6457 		return ret;
6458 
6459 	mutex_lock(&trace_types_lock);
6460 	cpu = tracing_get_cpu(inode);
6461 	ret = open_pipe_on_cpu(tr, cpu);
6462 	if (ret)
6463 		goto fail_pipe_on_cpu;
6464 
6465 	/* create a buffer to store the information to pass to userspace */
6466 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6467 	if (!iter) {
6468 		ret = -ENOMEM;
6469 		goto fail_alloc_iter;
6470 	}
6471 
6472 	trace_seq_init(&iter->seq);
6473 	iter->trace = tr->current_trace;
6474 
6475 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6476 		ret = -ENOMEM;
6477 		goto fail;
6478 	}
6479 
6480 	/* trace pipe does not show start of buffer */
6481 	cpumask_setall(iter->started);
6482 
6483 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6484 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6485 
6486 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6487 	if (trace_clocks[tr->clock_id].in_ns)
6488 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6489 
6490 	iter->tr = tr;
6491 	iter->array_buffer = &tr->array_buffer;
6492 	iter->cpu_file = cpu;
6493 	mutex_init(&iter->mutex);
6494 	filp->private_data = iter;
6495 
6496 	if (iter->trace->pipe_open)
6497 		iter->trace->pipe_open(iter);
6498 
6499 	nonseekable_open(inode, filp);
6500 
6501 	tr->trace_ref++;
6502 
6503 	mutex_unlock(&trace_types_lock);
6504 	return ret;
6505 
6506 fail:
6507 	kfree(iter);
6508 fail_alloc_iter:
6509 	close_pipe_on_cpu(tr, cpu);
6510 fail_pipe_on_cpu:
6511 	__trace_array_put(tr);
6512 	mutex_unlock(&trace_types_lock);
6513 	return ret;
6514 }
6515 
6516 static int tracing_release_pipe(struct inode *inode, struct file *file)
6517 {
6518 	struct trace_iterator *iter = file->private_data;
6519 	struct trace_array *tr = inode->i_private;
6520 
6521 	mutex_lock(&trace_types_lock);
6522 
6523 	tr->trace_ref--;
6524 
6525 	if (iter->trace->pipe_close)
6526 		iter->trace->pipe_close(iter);
6527 	close_pipe_on_cpu(tr, iter->cpu_file);
6528 	mutex_unlock(&trace_types_lock);
6529 
6530 	free_trace_iter_content(iter);
6531 	kfree(iter);
6532 
6533 	trace_array_put(tr);
6534 
6535 	return 0;
6536 }
6537 
6538 static __poll_t
6539 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6540 {
6541 	struct trace_array *tr = iter->tr;
6542 
6543 	/* Iterators are static, they should be filled or empty */
6544 	if (trace_buffer_iter(iter, iter->cpu_file))
6545 		return EPOLLIN | EPOLLRDNORM;
6546 
6547 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6548 		/*
6549 		 * Always select as readable when in blocking mode
6550 		 */
6551 		return EPOLLIN | EPOLLRDNORM;
6552 	else
6553 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6554 					     filp, poll_table, iter->tr->buffer_percent);
6555 }
6556 
6557 static __poll_t
6558 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6559 {
6560 	struct trace_iterator *iter = filp->private_data;
6561 
6562 	return trace_poll(iter, filp, poll_table);
6563 }
6564 
6565 /* Must be called with iter->mutex held. */
6566 static int tracing_wait_pipe(struct file *filp)
6567 {
6568 	struct trace_iterator *iter = filp->private_data;
6569 	int ret;
6570 
6571 	while (trace_empty(iter)) {
6572 
6573 		if ((filp->f_flags & O_NONBLOCK)) {
6574 			return -EAGAIN;
6575 		}
6576 
6577 		/*
6578 		 * We block until we read something and tracing is disabled.
6579 		 * We still block if tracing is disabled, but we have never
6580 		 * read anything. This allows a user to cat this file, and
6581 		 * then enable tracing. But after we have read something,
6582 		 * we give an EOF when tracing is again disabled.
6583 		 *
6584 		 * iter->pos will be 0 if we haven't read anything.
6585 		 */
6586 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6587 			break;
6588 
6589 		mutex_unlock(&iter->mutex);
6590 
6591 		ret = wait_on_pipe(iter, 0);
6592 
6593 		mutex_lock(&iter->mutex);
6594 
6595 		if (ret)
6596 			return ret;
6597 	}
6598 
6599 	return 1;
6600 }
6601 
6602 /*
6603  * Consumer reader.
6604  */
6605 static ssize_t
6606 tracing_read_pipe(struct file *filp, char __user *ubuf,
6607 		  size_t cnt, loff_t *ppos)
6608 {
6609 	struct trace_iterator *iter = filp->private_data;
6610 	ssize_t sret;
6611 
6612 	/*
6613 	 * Avoid more than one consumer on a single file descriptor
6614 	 * This is just a matter of traces coherency, the ring buffer itself
6615 	 * is protected.
6616 	 */
6617 	guard(mutex)(&iter->mutex);
6618 
6619 	/* return any leftover data */
6620 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6621 	if (sret != -EBUSY)
6622 		return sret;
6623 
6624 	trace_seq_init(&iter->seq);
6625 
6626 	if (iter->trace->read) {
6627 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6628 		if (sret)
6629 			return sret;
6630 	}
6631 
6632 waitagain:
6633 	sret = tracing_wait_pipe(filp);
6634 	if (sret <= 0)
6635 		return sret;
6636 
6637 	/* stop when tracing is finished */
6638 	if (trace_empty(iter))
6639 		return 0;
6640 
6641 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6642 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6643 
6644 	/* reset all but tr, trace, and overruns */
6645 	trace_iterator_reset(iter);
6646 	cpumask_clear(iter->started);
6647 	trace_seq_init(&iter->seq);
6648 
6649 	trace_event_read_lock();
6650 	trace_access_lock(iter->cpu_file);
6651 	while (trace_find_next_entry_inc(iter) != NULL) {
6652 		enum print_line_t ret;
6653 		int save_len = iter->seq.seq.len;
6654 
6655 		ret = print_trace_line(iter);
6656 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6657 			/*
6658 			 * If one print_trace_line() fills entire trace_seq in one shot,
6659 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6660 			 * In this case, we need to consume it, otherwise, loop will peek
6661 			 * this event next time, resulting in an infinite loop.
6662 			 */
6663 			if (save_len == 0) {
6664 				iter->seq.full = 0;
6665 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6666 				trace_consume(iter);
6667 				break;
6668 			}
6669 
6670 			/* In other cases, don't print partial lines */
6671 			iter->seq.seq.len = save_len;
6672 			break;
6673 		}
6674 		if (ret != TRACE_TYPE_NO_CONSUME)
6675 			trace_consume(iter);
6676 
6677 		if (trace_seq_used(&iter->seq) >= cnt)
6678 			break;
6679 
6680 		/*
6681 		 * Setting the full flag means we reached the trace_seq buffer
6682 		 * size and we should leave by partial output condition above.
6683 		 * One of the trace_seq_* functions is not used properly.
6684 		 */
6685 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6686 			  iter->ent->type);
6687 	}
6688 	trace_access_unlock(iter->cpu_file);
6689 	trace_event_read_unlock();
6690 
6691 	/* Now copy what we have to the user */
6692 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6693 	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6694 		trace_seq_init(&iter->seq);
6695 
6696 	/*
6697 	 * If there was nothing to send to user, in spite of consuming trace
6698 	 * entries, go back to wait for more entries.
6699 	 */
6700 	if (sret == -EBUSY)
6701 		goto waitagain;
6702 
6703 	return sret;
6704 }
6705 
6706 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6707 				     unsigned int idx)
6708 {
6709 	__free_page(spd->pages[idx]);
6710 }
6711 
6712 static size_t
6713 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6714 {
6715 	size_t count;
6716 	int save_len;
6717 	int ret;
6718 
6719 	/* Seq buffer is page-sized, exactly what we need. */
6720 	for (;;) {
6721 		save_len = iter->seq.seq.len;
6722 		ret = print_trace_line(iter);
6723 
6724 		if (trace_seq_has_overflowed(&iter->seq)) {
6725 			iter->seq.seq.len = save_len;
6726 			break;
6727 		}
6728 
6729 		/*
6730 		 * This should not be hit, because it should only
6731 		 * be set if the iter->seq overflowed. But check it
6732 		 * anyway to be safe.
6733 		 */
6734 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6735 			iter->seq.seq.len = save_len;
6736 			break;
6737 		}
6738 
6739 		count = trace_seq_used(&iter->seq) - save_len;
6740 		if (rem < count) {
6741 			rem = 0;
6742 			iter->seq.seq.len = save_len;
6743 			break;
6744 		}
6745 
6746 		if (ret != TRACE_TYPE_NO_CONSUME)
6747 			trace_consume(iter);
6748 		rem -= count;
6749 		if (!trace_find_next_entry_inc(iter))	{
6750 			rem = 0;
6751 			iter->ent = NULL;
6752 			break;
6753 		}
6754 	}
6755 
6756 	return rem;
6757 }
6758 
6759 static ssize_t tracing_splice_read_pipe(struct file *filp,
6760 					loff_t *ppos,
6761 					struct pipe_inode_info *pipe,
6762 					size_t len,
6763 					unsigned int flags)
6764 {
6765 	struct page *pages_def[PIPE_DEF_BUFFERS];
6766 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6767 	struct trace_iterator *iter = filp->private_data;
6768 	struct splice_pipe_desc spd = {
6769 		.pages		= pages_def,
6770 		.partial	= partial_def,
6771 		.nr_pages	= 0, /* This gets updated below. */
6772 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6773 		.ops		= &default_pipe_buf_ops,
6774 		.spd_release	= tracing_spd_release_pipe,
6775 	};
6776 	ssize_t ret;
6777 	size_t rem;
6778 	unsigned int i;
6779 
6780 	if (splice_grow_spd(pipe, &spd))
6781 		return -ENOMEM;
6782 
6783 	mutex_lock(&iter->mutex);
6784 
6785 	if (iter->trace->splice_read) {
6786 		ret = iter->trace->splice_read(iter, filp,
6787 					       ppos, pipe, len, flags);
6788 		if (ret)
6789 			goto out_err;
6790 	}
6791 
6792 	ret = tracing_wait_pipe(filp);
6793 	if (ret <= 0)
6794 		goto out_err;
6795 
6796 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6797 		ret = -EFAULT;
6798 		goto out_err;
6799 	}
6800 
6801 	trace_event_read_lock();
6802 	trace_access_lock(iter->cpu_file);
6803 
6804 	/* Fill as many pages as possible. */
6805 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6806 		spd.pages[i] = alloc_page(GFP_KERNEL);
6807 		if (!spd.pages[i])
6808 			break;
6809 
6810 		rem = tracing_fill_pipe_page(rem, iter);
6811 
6812 		/* Copy the data into the page, so we can start over. */
6813 		ret = trace_seq_to_buffer(&iter->seq,
6814 					  page_address(spd.pages[i]),
6815 					  trace_seq_used(&iter->seq));
6816 		if (ret < 0) {
6817 			__free_page(spd.pages[i]);
6818 			break;
6819 		}
6820 		spd.partial[i].offset = 0;
6821 		spd.partial[i].len = trace_seq_used(&iter->seq);
6822 
6823 		trace_seq_init(&iter->seq);
6824 	}
6825 
6826 	trace_access_unlock(iter->cpu_file);
6827 	trace_event_read_unlock();
6828 	mutex_unlock(&iter->mutex);
6829 
6830 	spd.nr_pages = i;
6831 
6832 	if (i)
6833 		ret = splice_to_pipe(pipe, &spd);
6834 	else
6835 		ret = 0;
6836 out:
6837 	splice_shrink_spd(&spd);
6838 	return ret;
6839 
6840 out_err:
6841 	mutex_unlock(&iter->mutex);
6842 	goto out;
6843 }
6844 
6845 static ssize_t
6846 tracing_entries_read(struct file *filp, char __user *ubuf,
6847 		     size_t cnt, loff_t *ppos)
6848 {
6849 	struct inode *inode = file_inode(filp);
6850 	struct trace_array *tr = inode->i_private;
6851 	int cpu = tracing_get_cpu(inode);
6852 	char buf[64];
6853 	int r = 0;
6854 	ssize_t ret;
6855 
6856 	mutex_lock(&trace_types_lock);
6857 
6858 	if (cpu == RING_BUFFER_ALL_CPUS) {
6859 		int cpu, buf_size_same;
6860 		unsigned long size;
6861 
6862 		size = 0;
6863 		buf_size_same = 1;
6864 		/* check if all cpu sizes are same */
6865 		for_each_tracing_cpu(cpu) {
6866 			/* fill in the size from first enabled cpu */
6867 			if (size == 0)
6868 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6869 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6870 				buf_size_same = 0;
6871 				break;
6872 			}
6873 		}
6874 
6875 		if (buf_size_same) {
6876 			if (!tr->ring_buffer_expanded)
6877 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6878 					    size >> 10,
6879 					    trace_buf_size >> 10);
6880 			else
6881 				r = sprintf(buf, "%lu\n", size >> 10);
6882 		} else
6883 			r = sprintf(buf, "X\n");
6884 	} else
6885 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6886 
6887 	mutex_unlock(&trace_types_lock);
6888 
6889 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6890 	return ret;
6891 }
6892 
6893 static ssize_t
6894 tracing_entries_write(struct file *filp, const char __user *ubuf,
6895 		      size_t cnt, loff_t *ppos)
6896 {
6897 	struct inode *inode = file_inode(filp);
6898 	struct trace_array *tr = inode->i_private;
6899 	unsigned long val;
6900 	int ret;
6901 
6902 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6903 	if (ret)
6904 		return ret;
6905 
6906 	/* must have at least 1 entry */
6907 	if (!val)
6908 		return -EINVAL;
6909 
6910 	/* value is in KB */
6911 	val <<= 10;
6912 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6913 	if (ret < 0)
6914 		return ret;
6915 
6916 	*ppos += cnt;
6917 
6918 	return cnt;
6919 }
6920 
6921 static ssize_t
6922 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6923 				size_t cnt, loff_t *ppos)
6924 {
6925 	struct trace_array *tr = filp->private_data;
6926 	char buf[64];
6927 	int r, cpu;
6928 	unsigned long size = 0, expanded_size = 0;
6929 
6930 	mutex_lock(&trace_types_lock);
6931 	for_each_tracing_cpu(cpu) {
6932 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6933 		if (!tr->ring_buffer_expanded)
6934 			expanded_size += trace_buf_size >> 10;
6935 	}
6936 	if (tr->ring_buffer_expanded)
6937 		r = sprintf(buf, "%lu\n", size);
6938 	else
6939 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6940 	mutex_unlock(&trace_types_lock);
6941 
6942 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6943 }
6944 
6945 #define LAST_BOOT_HEADER ((void *)1)
6946 
6947 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
6948 {
6949 	struct trace_array *tr = m->private;
6950 	struct trace_scratch *tscratch = tr->scratch;
6951 	unsigned int index = *pos;
6952 
6953 	(*pos)++;
6954 
6955 	if (*pos == 1)
6956 		return LAST_BOOT_HEADER;
6957 
6958 	/* Only show offsets of the last boot data */
6959 	if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6960 		return NULL;
6961 
6962 	/* *pos 0 is for the header, 1 is for the first module */
6963 	index--;
6964 
6965 	if (index >= tscratch->nr_entries)
6966 		return NULL;
6967 
6968 	return &tscratch->entries[index];
6969 }
6970 
6971 static void *l_start(struct seq_file *m, loff_t *pos)
6972 {
6973 	mutex_lock(&scratch_mutex);
6974 
6975 	return l_next(m, NULL, pos);
6976 }
6977 
6978 static void l_stop(struct seq_file *m, void *p)
6979 {
6980 	mutex_unlock(&scratch_mutex);
6981 }
6982 
6983 static void show_last_boot_header(struct seq_file *m, struct trace_array *tr)
6984 {
6985 	struct trace_scratch *tscratch = tr->scratch;
6986 
6987 	/*
6988 	 * Do not leak KASLR address. This only shows the KASLR address of
6989 	 * the last boot. When the ring buffer is started, the LAST_BOOT
6990 	 * flag gets cleared, and this should only report "current".
6991 	 * Otherwise it shows the KASLR address from the previous boot which
6992 	 * should not be the same as the current boot.
6993 	 */
6994 	if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6995 		seq_printf(m, "%lx\t[kernel]\n", tscratch->text_addr);
6996 	else
6997 		seq_puts(m, "# Current\n");
6998 }
6999 
7000 static int l_show(struct seq_file *m, void *v)
7001 {
7002 	struct trace_array *tr = m->private;
7003 	struct trace_mod_entry *entry = v;
7004 
7005 	if (v == LAST_BOOT_HEADER) {
7006 		show_last_boot_header(m, tr);
7007 		return 0;
7008 	}
7009 
7010 	seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name);
7011 	return 0;
7012 }
7013 
7014 static const struct seq_operations last_boot_seq_ops = {
7015 	.start		= l_start,
7016 	.next		= l_next,
7017 	.stop		= l_stop,
7018 	.show		= l_show,
7019 };
7020 
7021 static int tracing_last_boot_open(struct inode *inode, struct file *file)
7022 {
7023 	struct trace_array *tr = inode->i_private;
7024 	struct seq_file *m;
7025 	int ret;
7026 
7027 	ret = tracing_check_open_get_tr(tr);
7028 	if (ret)
7029 		return ret;
7030 
7031 	ret = seq_open(file, &last_boot_seq_ops);
7032 	if (ret) {
7033 		trace_array_put(tr);
7034 		return ret;
7035 	}
7036 
7037 	m = file->private_data;
7038 	m->private = tr;
7039 
7040 	return 0;
7041 }
7042 
7043 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
7044 {
7045 	struct trace_array *tr = inode->i_private;
7046 	int cpu = tracing_get_cpu(inode);
7047 	int ret;
7048 
7049 	ret = tracing_check_open_get_tr(tr);
7050 	if (ret)
7051 		return ret;
7052 
7053 	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
7054 	if (ret < 0)
7055 		__trace_array_put(tr);
7056 	return ret;
7057 }
7058 
7059 static ssize_t
7060 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7061 			  size_t cnt, loff_t *ppos)
7062 {
7063 	/*
7064 	 * There is no need to read what the user has written, this function
7065 	 * is just to make sure that there is no error when "echo" is used
7066 	 */
7067 
7068 	*ppos += cnt;
7069 
7070 	return cnt;
7071 }
7072 
7073 static int
7074 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7075 {
7076 	struct trace_array *tr = inode->i_private;
7077 
7078 	/* disable tracing ? */
7079 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7080 		tracer_tracing_off(tr);
7081 	/* resize the ring buffer to 0 */
7082 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7083 
7084 	trace_array_put(tr);
7085 
7086 	return 0;
7087 }
7088 
7089 #define TRACE_MARKER_MAX_SIZE		4096
7090 
7091 static ssize_t
7092 tracing_mark_write(struct file *filp, const char __user *ubuf,
7093 					size_t cnt, loff_t *fpos)
7094 {
7095 	struct trace_array *tr = filp->private_data;
7096 	struct ring_buffer_event *event;
7097 	enum event_trigger_type tt = ETT_NONE;
7098 	struct trace_buffer *buffer;
7099 	struct print_entry *entry;
7100 	int meta_size;
7101 	ssize_t written;
7102 	size_t size;
7103 	int len;
7104 
7105 /* Used in tracing_mark_raw_write() as well */
7106 #define FAULTED_STR "<faulted>"
7107 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7108 
7109 	if (tracing_disabled)
7110 		return -EINVAL;
7111 
7112 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7113 		return -EINVAL;
7114 
7115 	if ((ssize_t)cnt < 0)
7116 		return -EINVAL;
7117 
7118 	if (cnt > TRACE_MARKER_MAX_SIZE)
7119 		cnt = TRACE_MARKER_MAX_SIZE;
7120 
7121 	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
7122  again:
7123 	size = cnt + meta_size;
7124 
7125 	/* If less than "<faulted>", then make sure we can still add that */
7126 	if (cnt < FAULTED_SIZE)
7127 		size += FAULTED_SIZE - cnt;
7128 
7129 	buffer = tr->array_buffer.buffer;
7130 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7131 					    tracing_gen_ctx());
7132 	if (unlikely(!event)) {
7133 		/*
7134 		 * If the size was greater than what was allowed, then
7135 		 * make it smaller and try again.
7136 		 */
7137 		if (size > ring_buffer_max_event_size(buffer)) {
7138 			/* cnt < FAULTED size should never be bigger than max */
7139 			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
7140 				return -EBADF;
7141 			cnt = ring_buffer_max_event_size(buffer) - meta_size;
7142 			/* The above should only happen once */
7143 			if (WARN_ON_ONCE(cnt + meta_size == size))
7144 				return -EBADF;
7145 			goto again;
7146 		}
7147 
7148 		/* Ring buffer disabled, return as if not open for write */
7149 		return -EBADF;
7150 	}
7151 
7152 	entry = ring_buffer_event_data(event);
7153 	entry->ip = _THIS_IP_;
7154 
7155 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7156 	if (len) {
7157 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7158 		cnt = FAULTED_SIZE;
7159 		written = -EFAULT;
7160 	} else
7161 		written = cnt;
7162 
7163 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7164 		/* do not add \n before testing triggers, but add \0 */
7165 		entry->buf[cnt] = '\0';
7166 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7167 	}
7168 
7169 	if (entry->buf[cnt - 1] != '\n') {
7170 		entry->buf[cnt] = '\n';
7171 		entry->buf[cnt + 1] = '\0';
7172 	} else
7173 		entry->buf[cnt] = '\0';
7174 
7175 	if (static_branch_unlikely(&trace_marker_exports_enabled))
7176 		ftrace_exports(event, TRACE_EXPORT_MARKER);
7177 	__buffer_unlock_commit(buffer, event);
7178 
7179 	if (tt)
7180 		event_triggers_post_call(tr->trace_marker_file, tt);
7181 
7182 	return written;
7183 }
7184 
7185 static ssize_t
7186 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7187 					size_t cnt, loff_t *fpos)
7188 {
7189 	struct trace_array *tr = filp->private_data;
7190 	struct ring_buffer_event *event;
7191 	struct trace_buffer *buffer;
7192 	struct raw_data_entry *entry;
7193 	ssize_t written;
7194 	int size;
7195 	int len;
7196 
7197 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7198 
7199 	if (tracing_disabled)
7200 		return -EINVAL;
7201 
7202 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7203 		return -EINVAL;
7204 
7205 	/* The marker must at least have a tag id */
7206 	if (cnt < sizeof(unsigned int))
7207 		return -EINVAL;
7208 
7209 	size = sizeof(*entry) + cnt;
7210 	if (cnt < FAULT_SIZE_ID)
7211 		size += FAULT_SIZE_ID - cnt;
7212 
7213 	buffer = tr->array_buffer.buffer;
7214 
7215 	if (size > ring_buffer_max_event_size(buffer))
7216 		return -EINVAL;
7217 
7218 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7219 					    tracing_gen_ctx());
7220 	if (!event)
7221 		/* Ring buffer disabled, return as if not open for write */
7222 		return -EBADF;
7223 
7224 	entry = ring_buffer_event_data(event);
7225 
7226 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7227 	if (len) {
7228 		entry->id = -1;
7229 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7230 		written = -EFAULT;
7231 	} else
7232 		written = cnt;
7233 
7234 	__buffer_unlock_commit(buffer, event);
7235 
7236 	return written;
7237 }
7238 
7239 static int tracing_clock_show(struct seq_file *m, void *v)
7240 {
7241 	struct trace_array *tr = m->private;
7242 	int i;
7243 
7244 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7245 		seq_printf(m,
7246 			"%s%s%s%s", i ? " " : "",
7247 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7248 			i == tr->clock_id ? "]" : "");
7249 	seq_putc(m, '\n');
7250 
7251 	return 0;
7252 }
7253 
7254 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7255 {
7256 	int i;
7257 
7258 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7259 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7260 			break;
7261 	}
7262 	if (i == ARRAY_SIZE(trace_clocks))
7263 		return -EINVAL;
7264 
7265 	mutex_lock(&trace_types_lock);
7266 
7267 	tr->clock_id = i;
7268 
7269 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7270 
7271 	/*
7272 	 * New clock may not be consistent with the previous clock.
7273 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7274 	 */
7275 	tracing_reset_online_cpus(&tr->array_buffer);
7276 
7277 #ifdef CONFIG_TRACER_MAX_TRACE
7278 	if (tr->max_buffer.buffer)
7279 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7280 	tracing_reset_online_cpus(&tr->max_buffer);
7281 #endif
7282 
7283 	mutex_unlock(&trace_types_lock);
7284 
7285 	return 0;
7286 }
7287 
7288 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7289 				   size_t cnt, loff_t *fpos)
7290 {
7291 	struct seq_file *m = filp->private_data;
7292 	struct trace_array *tr = m->private;
7293 	char buf[64];
7294 	const char *clockstr;
7295 	int ret;
7296 
7297 	if (cnt >= sizeof(buf))
7298 		return -EINVAL;
7299 
7300 	if (copy_from_user(buf, ubuf, cnt))
7301 		return -EFAULT;
7302 
7303 	buf[cnt] = 0;
7304 
7305 	clockstr = strstrip(buf);
7306 
7307 	ret = tracing_set_clock(tr, clockstr);
7308 	if (ret)
7309 		return ret;
7310 
7311 	*fpos += cnt;
7312 
7313 	return cnt;
7314 }
7315 
7316 static int tracing_clock_open(struct inode *inode, struct file *file)
7317 {
7318 	struct trace_array *tr = inode->i_private;
7319 	int ret;
7320 
7321 	ret = tracing_check_open_get_tr(tr);
7322 	if (ret)
7323 		return ret;
7324 
7325 	ret = single_open(file, tracing_clock_show, inode->i_private);
7326 	if (ret < 0)
7327 		trace_array_put(tr);
7328 
7329 	return ret;
7330 }
7331 
7332 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7333 {
7334 	struct trace_array *tr = m->private;
7335 
7336 	mutex_lock(&trace_types_lock);
7337 
7338 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7339 		seq_puts(m, "delta [absolute]\n");
7340 	else
7341 		seq_puts(m, "[delta] absolute\n");
7342 
7343 	mutex_unlock(&trace_types_lock);
7344 
7345 	return 0;
7346 }
7347 
7348 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7349 {
7350 	struct trace_array *tr = inode->i_private;
7351 	int ret;
7352 
7353 	ret = tracing_check_open_get_tr(tr);
7354 	if (ret)
7355 		return ret;
7356 
7357 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7358 	if (ret < 0)
7359 		trace_array_put(tr);
7360 
7361 	return ret;
7362 }
7363 
7364 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7365 {
7366 	if (rbe == this_cpu_read(trace_buffered_event))
7367 		return ring_buffer_time_stamp(buffer);
7368 
7369 	return ring_buffer_event_time_stamp(buffer, rbe);
7370 }
7371 
7372 /*
7373  * Set or disable using the per CPU trace_buffer_event when possible.
7374  */
7375 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7376 {
7377 	guard(mutex)(&trace_types_lock);
7378 
7379 	if (set && tr->no_filter_buffering_ref++)
7380 		return 0;
7381 
7382 	if (!set) {
7383 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7384 			return -EINVAL;
7385 
7386 		--tr->no_filter_buffering_ref;
7387 	}
7388 
7389 	return 0;
7390 }
7391 
7392 struct ftrace_buffer_info {
7393 	struct trace_iterator	iter;
7394 	void			*spare;
7395 	unsigned int		spare_cpu;
7396 	unsigned int		spare_size;
7397 	unsigned int		read;
7398 };
7399 
7400 #ifdef CONFIG_TRACER_SNAPSHOT
7401 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7402 {
7403 	struct trace_array *tr = inode->i_private;
7404 	struct trace_iterator *iter;
7405 	struct seq_file *m;
7406 	int ret;
7407 
7408 	ret = tracing_check_open_get_tr(tr);
7409 	if (ret)
7410 		return ret;
7411 
7412 	if (file->f_mode & FMODE_READ) {
7413 		iter = __tracing_open(inode, file, true);
7414 		if (IS_ERR(iter))
7415 			ret = PTR_ERR(iter);
7416 	} else {
7417 		/* Writes still need the seq_file to hold the private data */
7418 		ret = -ENOMEM;
7419 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7420 		if (!m)
7421 			goto out;
7422 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7423 		if (!iter) {
7424 			kfree(m);
7425 			goto out;
7426 		}
7427 		ret = 0;
7428 
7429 		iter->tr = tr;
7430 		iter->array_buffer = &tr->max_buffer;
7431 		iter->cpu_file = tracing_get_cpu(inode);
7432 		m->private = iter;
7433 		file->private_data = m;
7434 	}
7435 out:
7436 	if (ret < 0)
7437 		trace_array_put(tr);
7438 
7439 	return ret;
7440 }
7441 
7442 static void tracing_swap_cpu_buffer(void *tr)
7443 {
7444 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7445 }
7446 
7447 static ssize_t
7448 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7449 		       loff_t *ppos)
7450 {
7451 	struct seq_file *m = filp->private_data;
7452 	struct trace_iterator *iter = m->private;
7453 	struct trace_array *tr = iter->tr;
7454 	unsigned long val;
7455 	int ret;
7456 
7457 	ret = tracing_update_buffers(tr);
7458 	if (ret < 0)
7459 		return ret;
7460 
7461 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7462 	if (ret)
7463 		return ret;
7464 
7465 	guard(mutex)(&trace_types_lock);
7466 
7467 	if (tr->current_trace->use_max_tr)
7468 		return -EBUSY;
7469 
7470 	local_irq_disable();
7471 	arch_spin_lock(&tr->max_lock);
7472 	if (tr->cond_snapshot)
7473 		ret = -EBUSY;
7474 	arch_spin_unlock(&tr->max_lock);
7475 	local_irq_enable();
7476 	if (ret)
7477 		return ret;
7478 
7479 	switch (val) {
7480 	case 0:
7481 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7482 			return -EINVAL;
7483 		if (tr->allocated_snapshot)
7484 			free_snapshot(tr);
7485 		break;
7486 	case 1:
7487 /* Only allow per-cpu swap if the ring buffer supports it */
7488 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7489 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7490 			return -EINVAL;
7491 #endif
7492 		if (tr->allocated_snapshot)
7493 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7494 					&tr->array_buffer, iter->cpu_file);
7495 
7496 		ret = tracing_arm_snapshot_locked(tr);
7497 		if (ret)
7498 			return ret;
7499 
7500 		/* Now, we're going to swap */
7501 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7502 			local_irq_disable();
7503 			update_max_tr(tr, current, smp_processor_id(), NULL);
7504 			local_irq_enable();
7505 		} else {
7506 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7507 						 (void *)tr, 1);
7508 		}
7509 		tracing_disarm_snapshot(tr);
7510 		break;
7511 	default:
7512 		if (tr->allocated_snapshot) {
7513 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7514 				tracing_reset_online_cpus(&tr->max_buffer);
7515 			else
7516 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7517 		}
7518 		break;
7519 	}
7520 
7521 	if (ret >= 0) {
7522 		*ppos += cnt;
7523 		ret = cnt;
7524 	}
7525 
7526 	return ret;
7527 }
7528 
7529 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7530 {
7531 	struct seq_file *m = file->private_data;
7532 	int ret;
7533 
7534 	ret = tracing_release(inode, file);
7535 
7536 	if (file->f_mode & FMODE_READ)
7537 		return ret;
7538 
7539 	/* If write only, the seq_file is just a stub */
7540 	if (m)
7541 		kfree(m->private);
7542 	kfree(m);
7543 
7544 	return 0;
7545 }
7546 
7547 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7548 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7549 				    size_t count, loff_t *ppos);
7550 static int tracing_buffers_release(struct inode *inode, struct file *file);
7551 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7552 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7553 
7554 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7555 {
7556 	struct ftrace_buffer_info *info;
7557 	int ret;
7558 
7559 	/* The following checks for tracefs lockdown */
7560 	ret = tracing_buffers_open(inode, filp);
7561 	if (ret < 0)
7562 		return ret;
7563 
7564 	info = filp->private_data;
7565 
7566 	if (info->iter.trace->use_max_tr) {
7567 		tracing_buffers_release(inode, filp);
7568 		return -EBUSY;
7569 	}
7570 
7571 	info->iter.snapshot = true;
7572 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7573 
7574 	return ret;
7575 }
7576 
7577 #endif /* CONFIG_TRACER_SNAPSHOT */
7578 
7579 
7580 static const struct file_operations tracing_thresh_fops = {
7581 	.open		= tracing_open_generic,
7582 	.read		= tracing_thresh_read,
7583 	.write		= tracing_thresh_write,
7584 	.llseek		= generic_file_llseek,
7585 };
7586 
7587 #ifdef CONFIG_TRACER_MAX_TRACE
7588 static const struct file_operations tracing_max_lat_fops = {
7589 	.open		= tracing_open_generic_tr,
7590 	.read		= tracing_max_lat_read,
7591 	.write		= tracing_max_lat_write,
7592 	.llseek		= generic_file_llseek,
7593 	.release	= tracing_release_generic_tr,
7594 };
7595 #endif
7596 
7597 static const struct file_operations set_tracer_fops = {
7598 	.open		= tracing_open_generic_tr,
7599 	.read		= tracing_set_trace_read,
7600 	.write		= tracing_set_trace_write,
7601 	.llseek		= generic_file_llseek,
7602 	.release	= tracing_release_generic_tr,
7603 };
7604 
7605 static const struct file_operations tracing_pipe_fops = {
7606 	.open		= tracing_open_pipe,
7607 	.poll		= tracing_poll_pipe,
7608 	.read		= tracing_read_pipe,
7609 	.splice_read	= tracing_splice_read_pipe,
7610 	.release	= tracing_release_pipe,
7611 };
7612 
7613 static const struct file_operations tracing_entries_fops = {
7614 	.open		= tracing_open_generic_tr,
7615 	.read		= tracing_entries_read,
7616 	.write		= tracing_entries_write,
7617 	.llseek		= generic_file_llseek,
7618 	.release	= tracing_release_generic_tr,
7619 };
7620 
7621 static const struct file_operations tracing_buffer_meta_fops = {
7622 	.open		= tracing_buffer_meta_open,
7623 	.read		= seq_read,
7624 	.llseek		= seq_lseek,
7625 	.release	= tracing_seq_release,
7626 };
7627 
7628 static const struct file_operations tracing_total_entries_fops = {
7629 	.open		= tracing_open_generic_tr,
7630 	.read		= tracing_total_entries_read,
7631 	.llseek		= generic_file_llseek,
7632 	.release	= tracing_release_generic_tr,
7633 };
7634 
7635 static const struct file_operations tracing_free_buffer_fops = {
7636 	.open		= tracing_open_generic_tr,
7637 	.write		= tracing_free_buffer_write,
7638 	.release	= tracing_free_buffer_release,
7639 };
7640 
7641 static const struct file_operations tracing_mark_fops = {
7642 	.open		= tracing_mark_open,
7643 	.write		= tracing_mark_write,
7644 	.release	= tracing_release_generic_tr,
7645 };
7646 
7647 static const struct file_operations tracing_mark_raw_fops = {
7648 	.open		= tracing_mark_open,
7649 	.write		= tracing_mark_raw_write,
7650 	.release	= tracing_release_generic_tr,
7651 };
7652 
7653 static const struct file_operations trace_clock_fops = {
7654 	.open		= tracing_clock_open,
7655 	.read		= seq_read,
7656 	.llseek		= seq_lseek,
7657 	.release	= tracing_single_release_tr,
7658 	.write		= tracing_clock_write,
7659 };
7660 
7661 static const struct file_operations trace_time_stamp_mode_fops = {
7662 	.open		= tracing_time_stamp_mode_open,
7663 	.read		= seq_read,
7664 	.llseek		= seq_lseek,
7665 	.release	= tracing_single_release_tr,
7666 };
7667 
7668 static const struct file_operations last_boot_fops = {
7669 	.open		= tracing_last_boot_open,
7670 	.read		= seq_read,
7671 	.llseek		= seq_lseek,
7672 	.release	= tracing_seq_release,
7673 };
7674 
7675 #ifdef CONFIG_TRACER_SNAPSHOT
7676 static const struct file_operations snapshot_fops = {
7677 	.open		= tracing_snapshot_open,
7678 	.read		= seq_read,
7679 	.write		= tracing_snapshot_write,
7680 	.llseek		= tracing_lseek,
7681 	.release	= tracing_snapshot_release,
7682 };
7683 
7684 static const struct file_operations snapshot_raw_fops = {
7685 	.open		= snapshot_raw_open,
7686 	.read		= tracing_buffers_read,
7687 	.release	= tracing_buffers_release,
7688 	.splice_read	= tracing_buffers_splice_read,
7689 };
7690 
7691 #endif /* CONFIG_TRACER_SNAPSHOT */
7692 
7693 /*
7694  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7695  * @filp: The active open file structure
7696  * @ubuf: The userspace provided buffer to read value into
7697  * @cnt: The maximum number of bytes to read
7698  * @ppos: The current "file" position
7699  *
7700  * This function implements the write interface for a struct trace_min_max_param.
7701  * The filp->private_data must point to a trace_min_max_param structure that
7702  * defines where to write the value, the min and the max acceptable values,
7703  * and a lock to protect the write.
7704  */
7705 static ssize_t
7706 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7707 {
7708 	struct trace_min_max_param *param = filp->private_data;
7709 	u64 val;
7710 	int err;
7711 
7712 	if (!param)
7713 		return -EFAULT;
7714 
7715 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7716 	if (err)
7717 		return err;
7718 
7719 	if (param->lock)
7720 		mutex_lock(param->lock);
7721 
7722 	if (param->min && val < *param->min)
7723 		err = -EINVAL;
7724 
7725 	if (param->max && val > *param->max)
7726 		err = -EINVAL;
7727 
7728 	if (!err)
7729 		*param->val = val;
7730 
7731 	if (param->lock)
7732 		mutex_unlock(param->lock);
7733 
7734 	if (err)
7735 		return err;
7736 
7737 	return cnt;
7738 }
7739 
7740 /*
7741  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7742  * @filp: The active open file structure
7743  * @ubuf: The userspace provided buffer to read value into
7744  * @cnt: The maximum number of bytes to read
7745  * @ppos: The current "file" position
7746  *
7747  * This function implements the read interface for a struct trace_min_max_param.
7748  * The filp->private_data must point to a trace_min_max_param struct with valid
7749  * data.
7750  */
7751 static ssize_t
7752 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7753 {
7754 	struct trace_min_max_param *param = filp->private_data;
7755 	char buf[U64_STR_SIZE];
7756 	int len;
7757 	u64 val;
7758 
7759 	if (!param)
7760 		return -EFAULT;
7761 
7762 	val = *param->val;
7763 
7764 	if (cnt > sizeof(buf))
7765 		cnt = sizeof(buf);
7766 
7767 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7768 
7769 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7770 }
7771 
7772 const struct file_operations trace_min_max_fops = {
7773 	.open		= tracing_open_generic,
7774 	.read		= trace_min_max_read,
7775 	.write		= trace_min_max_write,
7776 };
7777 
7778 #define TRACING_LOG_ERRS_MAX	8
7779 #define TRACING_LOG_LOC_MAX	128
7780 
7781 #define CMD_PREFIX "  Command: "
7782 
7783 struct err_info {
7784 	const char	**errs;	/* ptr to loc-specific array of err strings */
7785 	u8		type;	/* index into errs -> specific err string */
7786 	u16		pos;	/* caret position */
7787 	u64		ts;
7788 };
7789 
7790 struct tracing_log_err {
7791 	struct list_head	list;
7792 	struct err_info		info;
7793 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7794 	char			*cmd;                     /* what caused err */
7795 };
7796 
7797 static DEFINE_MUTEX(tracing_err_log_lock);
7798 
7799 static struct tracing_log_err *alloc_tracing_log_err(int len)
7800 {
7801 	struct tracing_log_err *err;
7802 
7803 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7804 	if (!err)
7805 		return ERR_PTR(-ENOMEM);
7806 
7807 	err->cmd = kzalloc(len, GFP_KERNEL);
7808 	if (!err->cmd) {
7809 		kfree(err);
7810 		return ERR_PTR(-ENOMEM);
7811 	}
7812 
7813 	return err;
7814 }
7815 
7816 static void free_tracing_log_err(struct tracing_log_err *err)
7817 {
7818 	kfree(err->cmd);
7819 	kfree(err);
7820 }
7821 
7822 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7823 						   int len)
7824 {
7825 	struct tracing_log_err *err;
7826 	char *cmd;
7827 
7828 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7829 		err = alloc_tracing_log_err(len);
7830 		if (PTR_ERR(err) != -ENOMEM)
7831 			tr->n_err_log_entries++;
7832 
7833 		return err;
7834 	}
7835 	cmd = kzalloc(len, GFP_KERNEL);
7836 	if (!cmd)
7837 		return ERR_PTR(-ENOMEM);
7838 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7839 	kfree(err->cmd);
7840 	err->cmd = cmd;
7841 	list_del(&err->list);
7842 
7843 	return err;
7844 }
7845 
7846 /**
7847  * err_pos - find the position of a string within a command for error careting
7848  * @cmd: The tracing command that caused the error
7849  * @str: The string to position the caret at within @cmd
7850  *
7851  * Finds the position of the first occurrence of @str within @cmd.  The
7852  * return value can be passed to tracing_log_err() for caret placement
7853  * within @cmd.
7854  *
7855  * Returns the index within @cmd of the first occurrence of @str or 0
7856  * if @str was not found.
7857  */
7858 unsigned int err_pos(char *cmd, const char *str)
7859 {
7860 	char *found;
7861 
7862 	if (WARN_ON(!strlen(cmd)))
7863 		return 0;
7864 
7865 	found = strstr(cmd, str);
7866 	if (found)
7867 		return found - cmd;
7868 
7869 	return 0;
7870 }
7871 
7872 /**
7873  * tracing_log_err - write an error to the tracing error log
7874  * @tr: The associated trace array for the error (NULL for top level array)
7875  * @loc: A string describing where the error occurred
7876  * @cmd: The tracing command that caused the error
7877  * @errs: The array of loc-specific static error strings
7878  * @type: The index into errs[], which produces the specific static err string
7879  * @pos: The position the caret should be placed in the cmd
7880  *
7881  * Writes an error into tracing/error_log of the form:
7882  *
7883  * <loc>: error: <text>
7884  *   Command: <cmd>
7885  *              ^
7886  *
7887  * tracing/error_log is a small log file containing the last
7888  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7889  * unless there has been a tracing error, and the error log can be
7890  * cleared and have its memory freed by writing the empty string in
7891  * truncation mode to it i.e. echo > tracing/error_log.
7892  *
7893  * NOTE: the @errs array along with the @type param are used to
7894  * produce a static error string - this string is not copied and saved
7895  * when the error is logged - only a pointer to it is saved.  See
7896  * existing callers for examples of how static strings are typically
7897  * defined for use with tracing_log_err().
7898  */
7899 void tracing_log_err(struct trace_array *tr,
7900 		     const char *loc, const char *cmd,
7901 		     const char **errs, u8 type, u16 pos)
7902 {
7903 	struct tracing_log_err *err;
7904 	int len = 0;
7905 
7906 	if (!tr)
7907 		tr = &global_trace;
7908 
7909 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7910 
7911 	guard(mutex)(&tracing_err_log_lock);
7912 
7913 	err = get_tracing_log_err(tr, len);
7914 	if (PTR_ERR(err) == -ENOMEM)
7915 		return;
7916 
7917 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7918 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7919 
7920 	err->info.errs = errs;
7921 	err->info.type = type;
7922 	err->info.pos = pos;
7923 	err->info.ts = local_clock();
7924 
7925 	list_add_tail(&err->list, &tr->err_log);
7926 }
7927 
7928 static void clear_tracing_err_log(struct trace_array *tr)
7929 {
7930 	struct tracing_log_err *err, *next;
7931 
7932 	mutex_lock(&tracing_err_log_lock);
7933 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7934 		list_del(&err->list);
7935 		free_tracing_log_err(err);
7936 	}
7937 
7938 	tr->n_err_log_entries = 0;
7939 	mutex_unlock(&tracing_err_log_lock);
7940 }
7941 
7942 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7943 {
7944 	struct trace_array *tr = m->private;
7945 
7946 	mutex_lock(&tracing_err_log_lock);
7947 
7948 	return seq_list_start(&tr->err_log, *pos);
7949 }
7950 
7951 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7952 {
7953 	struct trace_array *tr = m->private;
7954 
7955 	return seq_list_next(v, &tr->err_log, pos);
7956 }
7957 
7958 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7959 {
7960 	mutex_unlock(&tracing_err_log_lock);
7961 }
7962 
7963 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7964 {
7965 	u16 i;
7966 
7967 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7968 		seq_putc(m, ' ');
7969 	for (i = 0; i < pos; i++)
7970 		seq_putc(m, ' ');
7971 	seq_puts(m, "^\n");
7972 }
7973 
7974 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7975 {
7976 	struct tracing_log_err *err = v;
7977 
7978 	if (err) {
7979 		const char *err_text = err->info.errs[err->info.type];
7980 		u64 sec = err->info.ts;
7981 		u32 nsec;
7982 
7983 		nsec = do_div(sec, NSEC_PER_SEC);
7984 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7985 			   err->loc, err_text);
7986 		seq_printf(m, "%s", err->cmd);
7987 		tracing_err_log_show_pos(m, err->info.pos);
7988 	}
7989 
7990 	return 0;
7991 }
7992 
7993 static const struct seq_operations tracing_err_log_seq_ops = {
7994 	.start  = tracing_err_log_seq_start,
7995 	.next   = tracing_err_log_seq_next,
7996 	.stop   = tracing_err_log_seq_stop,
7997 	.show   = tracing_err_log_seq_show
7998 };
7999 
8000 static int tracing_err_log_open(struct inode *inode, struct file *file)
8001 {
8002 	struct trace_array *tr = inode->i_private;
8003 	int ret = 0;
8004 
8005 	ret = tracing_check_open_get_tr(tr);
8006 	if (ret)
8007 		return ret;
8008 
8009 	/* If this file was opened for write, then erase contents */
8010 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8011 		clear_tracing_err_log(tr);
8012 
8013 	if (file->f_mode & FMODE_READ) {
8014 		ret = seq_open(file, &tracing_err_log_seq_ops);
8015 		if (!ret) {
8016 			struct seq_file *m = file->private_data;
8017 			m->private = tr;
8018 		} else {
8019 			trace_array_put(tr);
8020 		}
8021 	}
8022 	return ret;
8023 }
8024 
8025 static ssize_t tracing_err_log_write(struct file *file,
8026 				     const char __user *buffer,
8027 				     size_t count, loff_t *ppos)
8028 {
8029 	return count;
8030 }
8031 
8032 static int tracing_err_log_release(struct inode *inode, struct file *file)
8033 {
8034 	struct trace_array *tr = inode->i_private;
8035 
8036 	trace_array_put(tr);
8037 
8038 	if (file->f_mode & FMODE_READ)
8039 		seq_release(inode, file);
8040 
8041 	return 0;
8042 }
8043 
8044 static const struct file_operations tracing_err_log_fops = {
8045 	.open           = tracing_err_log_open,
8046 	.write		= tracing_err_log_write,
8047 	.read           = seq_read,
8048 	.llseek         = tracing_lseek,
8049 	.release        = tracing_err_log_release,
8050 };
8051 
8052 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8053 {
8054 	struct trace_array *tr = inode->i_private;
8055 	struct ftrace_buffer_info *info;
8056 	int ret;
8057 
8058 	ret = tracing_check_open_get_tr(tr);
8059 	if (ret)
8060 		return ret;
8061 
8062 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
8063 	if (!info) {
8064 		trace_array_put(tr);
8065 		return -ENOMEM;
8066 	}
8067 
8068 	mutex_lock(&trace_types_lock);
8069 
8070 	info->iter.tr		= tr;
8071 	info->iter.cpu_file	= tracing_get_cpu(inode);
8072 	info->iter.trace	= tr->current_trace;
8073 	info->iter.array_buffer = &tr->array_buffer;
8074 	info->spare		= NULL;
8075 	/* Force reading ring buffer for first read */
8076 	info->read		= (unsigned int)-1;
8077 
8078 	filp->private_data = info;
8079 
8080 	tr->trace_ref++;
8081 
8082 	mutex_unlock(&trace_types_lock);
8083 
8084 	ret = nonseekable_open(inode, filp);
8085 	if (ret < 0)
8086 		trace_array_put(tr);
8087 
8088 	return ret;
8089 }
8090 
8091 static __poll_t
8092 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8093 {
8094 	struct ftrace_buffer_info *info = filp->private_data;
8095 	struct trace_iterator *iter = &info->iter;
8096 
8097 	return trace_poll(iter, filp, poll_table);
8098 }
8099 
8100 static ssize_t
8101 tracing_buffers_read(struct file *filp, char __user *ubuf,
8102 		     size_t count, loff_t *ppos)
8103 {
8104 	struct ftrace_buffer_info *info = filp->private_data;
8105 	struct trace_iterator *iter = &info->iter;
8106 	void *trace_data;
8107 	int page_size;
8108 	ssize_t ret = 0;
8109 	ssize_t size;
8110 
8111 	if (!count)
8112 		return 0;
8113 
8114 #ifdef CONFIG_TRACER_MAX_TRACE
8115 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8116 		return -EBUSY;
8117 #endif
8118 
8119 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8120 
8121 	/* Make sure the spare matches the current sub buffer size */
8122 	if (info->spare) {
8123 		if (page_size != info->spare_size) {
8124 			ring_buffer_free_read_page(iter->array_buffer->buffer,
8125 						   info->spare_cpu, info->spare);
8126 			info->spare = NULL;
8127 		}
8128 	}
8129 
8130 	if (!info->spare) {
8131 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8132 							  iter->cpu_file);
8133 		if (IS_ERR(info->spare)) {
8134 			ret = PTR_ERR(info->spare);
8135 			info->spare = NULL;
8136 		} else {
8137 			info->spare_cpu = iter->cpu_file;
8138 			info->spare_size = page_size;
8139 		}
8140 	}
8141 	if (!info->spare)
8142 		return ret;
8143 
8144 	/* Do we have previous read data to read? */
8145 	if (info->read < page_size)
8146 		goto read;
8147 
8148  again:
8149 	trace_access_lock(iter->cpu_file);
8150 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
8151 				    info->spare,
8152 				    count,
8153 				    iter->cpu_file, 0);
8154 	trace_access_unlock(iter->cpu_file);
8155 
8156 	if (ret < 0) {
8157 		if (trace_empty(iter) && !iter->closed) {
8158 			if ((filp->f_flags & O_NONBLOCK))
8159 				return -EAGAIN;
8160 
8161 			ret = wait_on_pipe(iter, 0);
8162 			if (ret)
8163 				return ret;
8164 
8165 			goto again;
8166 		}
8167 		return 0;
8168 	}
8169 
8170 	info->read = 0;
8171  read:
8172 	size = page_size - info->read;
8173 	if (size > count)
8174 		size = count;
8175 	trace_data = ring_buffer_read_page_data(info->spare);
8176 	ret = copy_to_user(ubuf, trace_data + info->read, size);
8177 	if (ret == size)
8178 		return -EFAULT;
8179 
8180 	size -= ret;
8181 
8182 	*ppos += size;
8183 	info->read += size;
8184 
8185 	return size;
8186 }
8187 
8188 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
8189 {
8190 	struct ftrace_buffer_info *info = file->private_data;
8191 	struct trace_iterator *iter = &info->iter;
8192 
8193 	iter->closed = true;
8194 	/* Make sure the waiters see the new wait_index */
8195 	(void)atomic_fetch_inc_release(&iter->wait_index);
8196 
8197 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8198 
8199 	return 0;
8200 }
8201 
8202 static int tracing_buffers_release(struct inode *inode, struct file *file)
8203 {
8204 	struct ftrace_buffer_info *info = file->private_data;
8205 	struct trace_iterator *iter = &info->iter;
8206 
8207 	mutex_lock(&trace_types_lock);
8208 
8209 	iter->tr->trace_ref--;
8210 
8211 	__trace_array_put(iter->tr);
8212 
8213 	if (info->spare)
8214 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8215 					   info->spare_cpu, info->spare);
8216 	kvfree(info);
8217 
8218 	mutex_unlock(&trace_types_lock);
8219 
8220 	return 0;
8221 }
8222 
8223 struct buffer_ref {
8224 	struct trace_buffer	*buffer;
8225 	void			*page;
8226 	int			cpu;
8227 	refcount_t		refcount;
8228 };
8229 
8230 static void buffer_ref_release(struct buffer_ref *ref)
8231 {
8232 	if (!refcount_dec_and_test(&ref->refcount))
8233 		return;
8234 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8235 	kfree(ref);
8236 }
8237 
8238 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8239 				    struct pipe_buffer *buf)
8240 {
8241 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8242 
8243 	buffer_ref_release(ref);
8244 	buf->private = 0;
8245 }
8246 
8247 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8248 				struct pipe_buffer *buf)
8249 {
8250 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8251 
8252 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8253 		return false;
8254 
8255 	refcount_inc(&ref->refcount);
8256 	return true;
8257 }
8258 
8259 /* Pipe buffer operations for a buffer. */
8260 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8261 	.release		= buffer_pipe_buf_release,
8262 	.get			= buffer_pipe_buf_get,
8263 };
8264 
8265 /*
8266  * Callback from splice_to_pipe(), if we need to release some pages
8267  * at the end of the spd in case we error'ed out in filling the pipe.
8268  */
8269 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8270 {
8271 	struct buffer_ref *ref =
8272 		(struct buffer_ref *)spd->partial[i].private;
8273 
8274 	buffer_ref_release(ref);
8275 	spd->partial[i].private = 0;
8276 }
8277 
8278 static ssize_t
8279 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8280 			    struct pipe_inode_info *pipe, size_t len,
8281 			    unsigned int flags)
8282 {
8283 	struct ftrace_buffer_info *info = file->private_data;
8284 	struct trace_iterator *iter = &info->iter;
8285 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8286 	struct page *pages_def[PIPE_DEF_BUFFERS];
8287 	struct splice_pipe_desc spd = {
8288 		.pages		= pages_def,
8289 		.partial	= partial_def,
8290 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8291 		.ops		= &buffer_pipe_buf_ops,
8292 		.spd_release	= buffer_spd_release,
8293 	};
8294 	struct buffer_ref *ref;
8295 	bool woken = false;
8296 	int page_size;
8297 	int entries, i;
8298 	ssize_t ret = 0;
8299 
8300 #ifdef CONFIG_TRACER_MAX_TRACE
8301 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8302 		return -EBUSY;
8303 #endif
8304 
8305 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8306 	if (*ppos & (page_size - 1))
8307 		return -EINVAL;
8308 
8309 	if (len & (page_size - 1)) {
8310 		if (len < page_size)
8311 			return -EINVAL;
8312 		len &= (~(page_size - 1));
8313 	}
8314 
8315 	if (splice_grow_spd(pipe, &spd))
8316 		return -ENOMEM;
8317 
8318  again:
8319 	trace_access_lock(iter->cpu_file);
8320 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8321 
8322 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8323 		struct page *page;
8324 		int r;
8325 
8326 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8327 		if (!ref) {
8328 			ret = -ENOMEM;
8329 			break;
8330 		}
8331 
8332 		refcount_set(&ref->refcount, 1);
8333 		ref->buffer = iter->array_buffer->buffer;
8334 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8335 		if (IS_ERR(ref->page)) {
8336 			ret = PTR_ERR(ref->page);
8337 			ref->page = NULL;
8338 			kfree(ref);
8339 			break;
8340 		}
8341 		ref->cpu = iter->cpu_file;
8342 
8343 		r = ring_buffer_read_page(ref->buffer, ref->page,
8344 					  len, iter->cpu_file, 1);
8345 		if (r < 0) {
8346 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8347 						   ref->page);
8348 			kfree(ref);
8349 			break;
8350 		}
8351 
8352 		page = virt_to_page(ring_buffer_read_page_data(ref->page));
8353 
8354 		spd.pages[i] = page;
8355 		spd.partial[i].len = page_size;
8356 		spd.partial[i].offset = 0;
8357 		spd.partial[i].private = (unsigned long)ref;
8358 		spd.nr_pages++;
8359 		*ppos += page_size;
8360 
8361 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8362 	}
8363 
8364 	trace_access_unlock(iter->cpu_file);
8365 	spd.nr_pages = i;
8366 
8367 	/* did we read anything? */
8368 	if (!spd.nr_pages) {
8369 
8370 		if (ret)
8371 			goto out;
8372 
8373 		if (woken)
8374 			goto out;
8375 
8376 		ret = -EAGAIN;
8377 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8378 			goto out;
8379 
8380 		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8381 		if (ret)
8382 			goto out;
8383 
8384 		/* No need to wait after waking up when tracing is off */
8385 		if (!tracer_tracing_is_on(iter->tr))
8386 			goto out;
8387 
8388 		/* Iterate one more time to collect any new data then exit */
8389 		woken = true;
8390 
8391 		goto again;
8392 	}
8393 
8394 	ret = splice_to_pipe(pipe, &spd);
8395 out:
8396 	splice_shrink_spd(&spd);
8397 
8398 	return ret;
8399 }
8400 
8401 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8402 {
8403 	struct ftrace_buffer_info *info = file->private_data;
8404 	struct trace_iterator *iter = &info->iter;
8405 	int err;
8406 
8407 	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8408 		if (!(file->f_flags & O_NONBLOCK)) {
8409 			err = ring_buffer_wait(iter->array_buffer->buffer,
8410 					       iter->cpu_file,
8411 					       iter->tr->buffer_percent,
8412 					       NULL, NULL);
8413 			if (err)
8414 				return err;
8415 		}
8416 
8417 		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8418 						  iter->cpu_file);
8419 	} else if (cmd) {
8420 		return -ENOTTY;
8421 	}
8422 
8423 	/*
8424 	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8425 	 * waiters
8426 	 */
8427 	mutex_lock(&trace_types_lock);
8428 
8429 	/* Make sure the waiters see the new wait_index */
8430 	(void)atomic_fetch_inc_release(&iter->wait_index);
8431 
8432 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8433 
8434 	mutex_unlock(&trace_types_lock);
8435 	return 0;
8436 }
8437 
8438 #ifdef CONFIG_TRACER_MAX_TRACE
8439 static int get_snapshot_map(struct trace_array *tr)
8440 {
8441 	int err = 0;
8442 
8443 	/*
8444 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
8445 	 * take trace_types_lock. Instead use the specific
8446 	 * snapshot_trigger_lock.
8447 	 */
8448 	spin_lock(&tr->snapshot_trigger_lock);
8449 
8450 	if (tr->snapshot || tr->mapped == UINT_MAX)
8451 		err = -EBUSY;
8452 	else
8453 		tr->mapped++;
8454 
8455 	spin_unlock(&tr->snapshot_trigger_lock);
8456 
8457 	/* Wait for update_max_tr() to observe iter->tr->mapped */
8458 	if (tr->mapped == 1)
8459 		synchronize_rcu();
8460 
8461 	return err;
8462 
8463 }
8464 static void put_snapshot_map(struct trace_array *tr)
8465 {
8466 	spin_lock(&tr->snapshot_trigger_lock);
8467 	if (!WARN_ON(!tr->mapped))
8468 		tr->mapped--;
8469 	spin_unlock(&tr->snapshot_trigger_lock);
8470 }
8471 #else
8472 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8473 static inline void put_snapshot_map(struct trace_array *tr) { }
8474 #endif
8475 
8476 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8477 {
8478 	struct ftrace_buffer_info *info = vma->vm_file->private_data;
8479 	struct trace_iterator *iter = &info->iter;
8480 
8481 	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8482 	put_snapshot_map(iter->tr);
8483 }
8484 
8485 static const struct vm_operations_struct tracing_buffers_vmops = {
8486 	.close		= tracing_buffers_mmap_close,
8487 };
8488 
8489 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8490 {
8491 	struct ftrace_buffer_info *info = filp->private_data;
8492 	struct trace_iterator *iter = &info->iter;
8493 	int ret = 0;
8494 
8495 	/* A memmap'ed buffer is not supported for user space mmap */
8496 	if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
8497 		return -ENODEV;
8498 
8499 	/* Currently the boot mapped buffer is not supported for mmap */
8500 	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
8501 		return -ENODEV;
8502 
8503 	ret = get_snapshot_map(iter->tr);
8504 	if (ret)
8505 		return ret;
8506 
8507 	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8508 	if (ret)
8509 		put_snapshot_map(iter->tr);
8510 
8511 	vma->vm_ops = &tracing_buffers_vmops;
8512 
8513 	return ret;
8514 }
8515 
8516 static const struct file_operations tracing_buffers_fops = {
8517 	.open		= tracing_buffers_open,
8518 	.read		= tracing_buffers_read,
8519 	.poll		= tracing_buffers_poll,
8520 	.release	= tracing_buffers_release,
8521 	.flush		= tracing_buffers_flush,
8522 	.splice_read	= tracing_buffers_splice_read,
8523 	.unlocked_ioctl = tracing_buffers_ioctl,
8524 	.mmap		= tracing_buffers_mmap,
8525 };
8526 
8527 static ssize_t
8528 tracing_stats_read(struct file *filp, char __user *ubuf,
8529 		   size_t count, loff_t *ppos)
8530 {
8531 	struct inode *inode = file_inode(filp);
8532 	struct trace_array *tr = inode->i_private;
8533 	struct array_buffer *trace_buf = &tr->array_buffer;
8534 	int cpu = tracing_get_cpu(inode);
8535 	struct trace_seq *s;
8536 	unsigned long cnt;
8537 	unsigned long long t;
8538 	unsigned long usec_rem;
8539 
8540 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8541 	if (!s)
8542 		return -ENOMEM;
8543 
8544 	trace_seq_init(s);
8545 
8546 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8547 	trace_seq_printf(s, "entries: %ld\n", cnt);
8548 
8549 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8550 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8551 
8552 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8553 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8554 
8555 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8556 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8557 
8558 	if (trace_clocks[tr->clock_id].in_ns) {
8559 		/* local or global for trace_clock */
8560 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8561 		usec_rem = do_div(t, USEC_PER_SEC);
8562 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8563 								t, usec_rem);
8564 
8565 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8566 		usec_rem = do_div(t, USEC_PER_SEC);
8567 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8568 	} else {
8569 		/* counter or tsc mode for trace_clock */
8570 		trace_seq_printf(s, "oldest event ts: %llu\n",
8571 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8572 
8573 		trace_seq_printf(s, "now ts: %llu\n",
8574 				ring_buffer_time_stamp(trace_buf->buffer));
8575 	}
8576 
8577 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8578 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8579 
8580 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8581 	trace_seq_printf(s, "read events: %ld\n", cnt);
8582 
8583 	count = simple_read_from_buffer(ubuf, count, ppos,
8584 					s->buffer, trace_seq_used(s));
8585 
8586 	kfree(s);
8587 
8588 	return count;
8589 }
8590 
8591 static const struct file_operations tracing_stats_fops = {
8592 	.open		= tracing_open_generic_tr,
8593 	.read		= tracing_stats_read,
8594 	.llseek		= generic_file_llseek,
8595 	.release	= tracing_release_generic_tr,
8596 };
8597 
8598 #ifdef CONFIG_DYNAMIC_FTRACE
8599 
8600 static ssize_t
8601 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8602 		  size_t cnt, loff_t *ppos)
8603 {
8604 	ssize_t ret;
8605 	char *buf;
8606 	int r;
8607 
8608 	/* 512 should be plenty to hold the amount needed */
8609 #define DYN_INFO_BUF_SIZE	512
8610 
8611 	buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL);
8612 	if (!buf)
8613 		return -ENOMEM;
8614 
8615 	r = scnprintf(buf, DYN_INFO_BUF_SIZE,
8616 		      "%ld pages:%ld groups: %ld\n"
8617 		      "ftrace boot update time = %llu (ns)\n"
8618 		      "ftrace module total update time = %llu (ns)\n",
8619 		      ftrace_update_tot_cnt,
8620 		      ftrace_number_of_pages,
8621 		      ftrace_number_of_groups,
8622 		      ftrace_update_time,
8623 		      ftrace_total_mod_time);
8624 
8625 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8626 	kfree(buf);
8627 	return ret;
8628 }
8629 
8630 static const struct file_operations tracing_dyn_info_fops = {
8631 	.open		= tracing_open_generic,
8632 	.read		= tracing_read_dyn_info,
8633 	.llseek		= generic_file_llseek,
8634 };
8635 #endif /* CONFIG_DYNAMIC_FTRACE */
8636 
8637 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8638 static void
8639 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8640 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8641 		void *data)
8642 {
8643 	tracing_snapshot_instance(tr);
8644 }
8645 
8646 static void
8647 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8648 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8649 		      void *data)
8650 {
8651 	struct ftrace_func_mapper *mapper = data;
8652 	long *count = NULL;
8653 
8654 	if (mapper)
8655 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8656 
8657 	if (count) {
8658 
8659 		if (*count <= 0)
8660 			return;
8661 
8662 		(*count)--;
8663 	}
8664 
8665 	tracing_snapshot_instance(tr);
8666 }
8667 
8668 static int
8669 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8670 		      struct ftrace_probe_ops *ops, void *data)
8671 {
8672 	struct ftrace_func_mapper *mapper = data;
8673 	long *count = NULL;
8674 
8675 	seq_printf(m, "%ps:", (void *)ip);
8676 
8677 	seq_puts(m, "snapshot");
8678 
8679 	if (mapper)
8680 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8681 
8682 	if (count)
8683 		seq_printf(m, ":count=%ld\n", *count);
8684 	else
8685 		seq_puts(m, ":unlimited\n");
8686 
8687 	return 0;
8688 }
8689 
8690 static int
8691 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8692 		     unsigned long ip, void *init_data, void **data)
8693 {
8694 	struct ftrace_func_mapper *mapper = *data;
8695 
8696 	if (!mapper) {
8697 		mapper = allocate_ftrace_func_mapper();
8698 		if (!mapper)
8699 			return -ENOMEM;
8700 		*data = mapper;
8701 	}
8702 
8703 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8704 }
8705 
8706 static void
8707 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8708 		     unsigned long ip, void *data)
8709 {
8710 	struct ftrace_func_mapper *mapper = data;
8711 
8712 	if (!ip) {
8713 		if (!mapper)
8714 			return;
8715 		free_ftrace_func_mapper(mapper, NULL);
8716 		return;
8717 	}
8718 
8719 	ftrace_func_mapper_remove_ip(mapper, ip);
8720 }
8721 
8722 static struct ftrace_probe_ops snapshot_probe_ops = {
8723 	.func			= ftrace_snapshot,
8724 	.print			= ftrace_snapshot_print,
8725 };
8726 
8727 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8728 	.func			= ftrace_count_snapshot,
8729 	.print			= ftrace_snapshot_print,
8730 	.init			= ftrace_snapshot_init,
8731 	.free			= ftrace_snapshot_free,
8732 };
8733 
8734 static int
8735 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8736 			       char *glob, char *cmd, char *param, int enable)
8737 {
8738 	struct ftrace_probe_ops *ops;
8739 	void *count = (void *)-1;
8740 	char *number;
8741 	int ret;
8742 
8743 	if (!tr)
8744 		return -ENODEV;
8745 
8746 	/* hash funcs only work with set_ftrace_filter */
8747 	if (!enable)
8748 		return -EINVAL;
8749 
8750 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8751 
8752 	if (glob[0] == '!') {
8753 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8754 		if (!ret)
8755 			tracing_disarm_snapshot(tr);
8756 
8757 		return ret;
8758 	}
8759 
8760 	if (!param)
8761 		goto out_reg;
8762 
8763 	number = strsep(&param, ":");
8764 
8765 	if (!strlen(number))
8766 		goto out_reg;
8767 
8768 	/*
8769 	 * We use the callback data field (which is a pointer)
8770 	 * as our counter.
8771 	 */
8772 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8773 	if (ret)
8774 		return ret;
8775 
8776  out_reg:
8777 	ret = tracing_arm_snapshot(tr);
8778 	if (ret < 0)
8779 		goto out;
8780 
8781 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8782 	if (ret < 0)
8783 		tracing_disarm_snapshot(tr);
8784  out:
8785 	return ret < 0 ? ret : 0;
8786 }
8787 
8788 static struct ftrace_func_command ftrace_snapshot_cmd = {
8789 	.name			= "snapshot",
8790 	.func			= ftrace_trace_snapshot_callback,
8791 };
8792 
8793 static __init int register_snapshot_cmd(void)
8794 {
8795 	return register_ftrace_command(&ftrace_snapshot_cmd);
8796 }
8797 #else
8798 static inline __init int register_snapshot_cmd(void) { return 0; }
8799 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8800 
8801 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8802 {
8803 	if (WARN_ON(!tr->dir))
8804 		return ERR_PTR(-ENODEV);
8805 
8806 	/* Top directory uses NULL as the parent */
8807 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8808 		return NULL;
8809 
8810 	/* All sub buffers have a descriptor */
8811 	return tr->dir;
8812 }
8813 
8814 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8815 {
8816 	struct dentry *d_tracer;
8817 
8818 	if (tr->percpu_dir)
8819 		return tr->percpu_dir;
8820 
8821 	d_tracer = tracing_get_dentry(tr);
8822 	if (IS_ERR(d_tracer))
8823 		return NULL;
8824 
8825 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8826 
8827 	MEM_FAIL(!tr->percpu_dir,
8828 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8829 
8830 	return tr->percpu_dir;
8831 }
8832 
8833 static struct dentry *
8834 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8835 		      void *data, long cpu, const struct file_operations *fops)
8836 {
8837 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8838 
8839 	if (ret) /* See tracing_get_cpu() */
8840 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8841 	return ret;
8842 }
8843 
8844 static void
8845 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8846 {
8847 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8848 	struct dentry *d_cpu;
8849 	char cpu_dir[30]; /* 30 characters should be more than enough */
8850 
8851 	if (!d_percpu)
8852 		return;
8853 
8854 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8855 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8856 	if (!d_cpu) {
8857 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8858 		return;
8859 	}
8860 
8861 	/* per cpu trace_pipe */
8862 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8863 				tr, cpu, &tracing_pipe_fops);
8864 
8865 	/* per cpu trace */
8866 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8867 				tr, cpu, &tracing_fops);
8868 
8869 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8870 				tr, cpu, &tracing_buffers_fops);
8871 
8872 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8873 				tr, cpu, &tracing_stats_fops);
8874 
8875 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8876 				tr, cpu, &tracing_entries_fops);
8877 
8878 	if (tr->range_addr_start)
8879 		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
8880 				      tr, cpu, &tracing_buffer_meta_fops);
8881 #ifdef CONFIG_TRACER_SNAPSHOT
8882 	if (!tr->range_addr_start) {
8883 		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8884 				      tr, cpu, &snapshot_fops);
8885 
8886 		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8887 				      tr, cpu, &snapshot_raw_fops);
8888 	}
8889 #endif
8890 }
8891 
8892 #ifdef CONFIG_FTRACE_SELFTEST
8893 /* Let selftest have access to static functions in this file */
8894 #include "trace_selftest.c"
8895 #endif
8896 
8897 static ssize_t
8898 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8899 			loff_t *ppos)
8900 {
8901 	struct trace_option_dentry *topt = filp->private_data;
8902 	char *buf;
8903 
8904 	if (topt->flags->val & topt->opt->bit)
8905 		buf = "1\n";
8906 	else
8907 		buf = "0\n";
8908 
8909 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8910 }
8911 
8912 static ssize_t
8913 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8914 			 loff_t *ppos)
8915 {
8916 	struct trace_option_dentry *topt = filp->private_data;
8917 	unsigned long val;
8918 	int ret;
8919 
8920 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8921 	if (ret)
8922 		return ret;
8923 
8924 	if (val != 0 && val != 1)
8925 		return -EINVAL;
8926 
8927 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8928 		mutex_lock(&trace_types_lock);
8929 		ret = __set_tracer_option(topt->tr, topt->flags,
8930 					  topt->opt, !val);
8931 		mutex_unlock(&trace_types_lock);
8932 		if (ret)
8933 			return ret;
8934 	}
8935 
8936 	*ppos += cnt;
8937 
8938 	return cnt;
8939 }
8940 
8941 static int tracing_open_options(struct inode *inode, struct file *filp)
8942 {
8943 	struct trace_option_dentry *topt = inode->i_private;
8944 	int ret;
8945 
8946 	ret = tracing_check_open_get_tr(topt->tr);
8947 	if (ret)
8948 		return ret;
8949 
8950 	filp->private_data = inode->i_private;
8951 	return 0;
8952 }
8953 
8954 static int tracing_release_options(struct inode *inode, struct file *file)
8955 {
8956 	struct trace_option_dentry *topt = file->private_data;
8957 
8958 	trace_array_put(topt->tr);
8959 	return 0;
8960 }
8961 
8962 static const struct file_operations trace_options_fops = {
8963 	.open = tracing_open_options,
8964 	.read = trace_options_read,
8965 	.write = trace_options_write,
8966 	.llseek	= generic_file_llseek,
8967 	.release = tracing_release_options,
8968 };
8969 
8970 /*
8971  * In order to pass in both the trace_array descriptor as well as the index
8972  * to the flag that the trace option file represents, the trace_array
8973  * has a character array of trace_flags_index[], which holds the index
8974  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8975  * The address of this character array is passed to the flag option file
8976  * read/write callbacks.
8977  *
8978  * In order to extract both the index and the trace_array descriptor,
8979  * get_tr_index() uses the following algorithm.
8980  *
8981  *   idx = *ptr;
8982  *
8983  * As the pointer itself contains the address of the index (remember
8984  * index[1] == 1).
8985  *
8986  * Then to get the trace_array descriptor, by subtracting that index
8987  * from the ptr, we get to the start of the index itself.
8988  *
8989  *   ptr - idx == &index[0]
8990  *
8991  * Then a simple container_of() from that pointer gets us to the
8992  * trace_array descriptor.
8993  */
8994 static void get_tr_index(void *data, struct trace_array **ptr,
8995 			 unsigned int *pindex)
8996 {
8997 	*pindex = *(unsigned char *)data;
8998 
8999 	*ptr = container_of(data - *pindex, struct trace_array,
9000 			    trace_flags_index);
9001 }
9002 
9003 static ssize_t
9004 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9005 			loff_t *ppos)
9006 {
9007 	void *tr_index = filp->private_data;
9008 	struct trace_array *tr;
9009 	unsigned int index;
9010 	char *buf;
9011 
9012 	get_tr_index(tr_index, &tr, &index);
9013 
9014 	if (tr->trace_flags & (1 << index))
9015 		buf = "1\n";
9016 	else
9017 		buf = "0\n";
9018 
9019 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9020 }
9021 
9022 static ssize_t
9023 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9024 			 loff_t *ppos)
9025 {
9026 	void *tr_index = filp->private_data;
9027 	struct trace_array *tr;
9028 	unsigned int index;
9029 	unsigned long val;
9030 	int ret;
9031 
9032 	get_tr_index(tr_index, &tr, &index);
9033 
9034 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9035 	if (ret)
9036 		return ret;
9037 
9038 	if (val != 0 && val != 1)
9039 		return -EINVAL;
9040 
9041 	mutex_lock(&event_mutex);
9042 	mutex_lock(&trace_types_lock);
9043 	ret = set_tracer_flag(tr, 1 << index, val);
9044 	mutex_unlock(&trace_types_lock);
9045 	mutex_unlock(&event_mutex);
9046 
9047 	if (ret < 0)
9048 		return ret;
9049 
9050 	*ppos += cnt;
9051 
9052 	return cnt;
9053 }
9054 
9055 static const struct file_operations trace_options_core_fops = {
9056 	.open = tracing_open_generic,
9057 	.read = trace_options_core_read,
9058 	.write = trace_options_core_write,
9059 	.llseek = generic_file_llseek,
9060 };
9061 
9062 struct dentry *trace_create_file(const char *name,
9063 				 umode_t mode,
9064 				 struct dentry *parent,
9065 				 void *data,
9066 				 const struct file_operations *fops)
9067 {
9068 	struct dentry *ret;
9069 
9070 	ret = tracefs_create_file(name, mode, parent, data, fops);
9071 	if (!ret)
9072 		pr_warn("Could not create tracefs '%s' entry\n", name);
9073 
9074 	return ret;
9075 }
9076 
9077 
9078 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9079 {
9080 	struct dentry *d_tracer;
9081 
9082 	if (tr->options)
9083 		return tr->options;
9084 
9085 	d_tracer = tracing_get_dentry(tr);
9086 	if (IS_ERR(d_tracer))
9087 		return NULL;
9088 
9089 	tr->options = tracefs_create_dir("options", d_tracer);
9090 	if (!tr->options) {
9091 		pr_warn("Could not create tracefs directory 'options'\n");
9092 		return NULL;
9093 	}
9094 
9095 	return tr->options;
9096 }
9097 
9098 static void
9099 create_trace_option_file(struct trace_array *tr,
9100 			 struct trace_option_dentry *topt,
9101 			 struct tracer_flags *flags,
9102 			 struct tracer_opt *opt)
9103 {
9104 	struct dentry *t_options;
9105 
9106 	t_options = trace_options_init_dentry(tr);
9107 	if (!t_options)
9108 		return;
9109 
9110 	topt->flags = flags;
9111 	topt->opt = opt;
9112 	topt->tr = tr;
9113 
9114 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9115 					t_options, topt, &trace_options_fops);
9116 
9117 }
9118 
9119 static void
9120 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9121 {
9122 	struct trace_option_dentry *topts;
9123 	struct trace_options *tr_topts;
9124 	struct tracer_flags *flags;
9125 	struct tracer_opt *opts;
9126 	int cnt;
9127 	int i;
9128 
9129 	if (!tracer)
9130 		return;
9131 
9132 	flags = tracer->flags;
9133 
9134 	if (!flags || !flags->opts)
9135 		return;
9136 
9137 	/*
9138 	 * If this is an instance, only create flags for tracers
9139 	 * the instance may have.
9140 	 */
9141 	if (!trace_ok_for_array(tracer, tr))
9142 		return;
9143 
9144 	for (i = 0; i < tr->nr_topts; i++) {
9145 		/* Make sure there's no duplicate flags. */
9146 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9147 			return;
9148 	}
9149 
9150 	opts = flags->opts;
9151 
9152 	for (cnt = 0; opts[cnt].name; cnt++)
9153 		;
9154 
9155 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9156 	if (!topts)
9157 		return;
9158 
9159 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9160 			    GFP_KERNEL);
9161 	if (!tr_topts) {
9162 		kfree(topts);
9163 		return;
9164 	}
9165 
9166 	tr->topts = tr_topts;
9167 	tr->topts[tr->nr_topts].tracer = tracer;
9168 	tr->topts[tr->nr_topts].topts = topts;
9169 	tr->nr_topts++;
9170 
9171 	for (cnt = 0; opts[cnt].name; cnt++) {
9172 		create_trace_option_file(tr, &topts[cnt], flags,
9173 					 &opts[cnt]);
9174 		MEM_FAIL(topts[cnt].entry == NULL,
9175 			  "Failed to create trace option: %s",
9176 			  opts[cnt].name);
9177 	}
9178 }
9179 
9180 static struct dentry *
9181 create_trace_option_core_file(struct trace_array *tr,
9182 			      const char *option, long index)
9183 {
9184 	struct dentry *t_options;
9185 
9186 	t_options = trace_options_init_dentry(tr);
9187 	if (!t_options)
9188 		return NULL;
9189 
9190 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9191 				 (void *)&tr->trace_flags_index[index],
9192 				 &trace_options_core_fops);
9193 }
9194 
9195 static void create_trace_options_dir(struct trace_array *tr)
9196 {
9197 	struct dentry *t_options;
9198 	bool top_level = tr == &global_trace;
9199 	int i;
9200 
9201 	t_options = trace_options_init_dentry(tr);
9202 	if (!t_options)
9203 		return;
9204 
9205 	for (i = 0; trace_options[i]; i++) {
9206 		if (top_level ||
9207 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9208 			create_trace_option_core_file(tr, trace_options[i], i);
9209 	}
9210 }
9211 
9212 static ssize_t
9213 rb_simple_read(struct file *filp, char __user *ubuf,
9214 	       size_t cnt, loff_t *ppos)
9215 {
9216 	struct trace_array *tr = filp->private_data;
9217 	char buf[64];
9218 	int r;
9219 
9220 	r = tracer_tracing_is_on(tr);
9221 	r = sprintf(buf, "%d\n", r);
9222 
9223 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9224 }
9225 
9226 static ssize_t
9227 rb_simple_write(struct file *filp, const char __user *ubuf,
9228 		size_t cnt, loff_t *ppos)
9229 {
9230 	struct trace_array *tr = filp->private_data;
9231 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9232 	unsigned long val;
9233 	int ret;
9234 
9235 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9236 	if (ret)
9237 		return ret;
9238 
9239 	if (buffer) {
9240 		mutex_lock(&trace_types_lock);
9241 		if (!!val == tracer_tracing_is_on(tr)) {
9242 			val = 0; /* do nothing */
9243 		} else if (val) {
9244 			tracer_tracing_on(tr);
9245 			if (tr->current_trace->start)
9246 				tr->current_trace->start(tr);
9247 		} else {
9248 			tracer_tracing_off(tr);
9249 			if (tr->current_trace->stop)
9250 				tr->current_trace->stop(tr);
9251 			/* Wake up any waiters */
9252 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9253 		}
9254 		mutex_unlock(&trace_types_lock);
9255 	}
9256 
9257 	(*ppos)++;
9258 
9259 	return cnt;
9260 }
9261 
9262 static const struct file_operations rb_simple_fops = {
9263 	.open		= tracing_open_generic_tr,
9264 	.read		= rb_simple_read,
9265 	.write		= rb_simple_write,
9266 	.release	= tracing_release_generic_tr,
9267 	.llseek		= default_llseek,
9268 };
9269 
9270 static ssize_t
9271 buffer_percent_read(struct file *filp, char __user *ubuf,
9272 		    size_t cnt, loff_t *ppos)
9273 {
9274 	struct trace_array *tr = filp->private_data;
9275 	char buf[64];
9276 	int r;
9277 
9278 	r = tr->buffer_percent;
9279 	r = sprintf(buf, "%d\n", r);
9280 
9281 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9282 }
9283 
9284 static ssize_t
9285 buffer_percent_write(struct file *filp, const char __user *ubuf,
9286 		     size_t cnt, loff_t *ppos)
9287 {
9288 	struct trace_array *tr = filp->private_data;
9289 	unsigned long val;
9290 	int ret;
9291 
9292 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9293 	if (ret)
9294 		return ret;
9295 
9296 	if (val > 100)
9297 		return -EINVAL;
9298 
9299 	tr->buffer_percent = val;
9300 
9301 	(*ppos)++;
9302 
9303 	return cnt;
9304 }
9305 
9306 static const struct file_operations buffer_percent_fops = {
9307 	.open		= tracing_open_generic_tr,
9308 	.read		= buffer_percent_read,
9309 	.write		= buffer_percent_write,
9310 	.release	= tracing_release_generic_tr,
9311 	.llseek		= default_llseek,
9312 };
9313 
9314 static ssize_t
9315 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9316 {
9317 	struct trace_array *tr = filp->private_data;
9318 	size_t size;
9319 	char buf[64];
9320 	int order;
9321 	int r;
9322 
9323 	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9324 	size = (PAGE_SIZE << order) / 1024;
9325 
9326 	r = sprintf(buf, "%zd\n", size);
9327 
9328 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9329 }
9330 
9331 static ssize_t
9332 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9333 			 size_t cnt, loff_t *ppos)
9334 {
9335 	struct trace_array *tr = filp->private_data;
9336 	unsigned long val;
9337 	int old_order;
9338 	int order;
9339 	int pages;
9340 	int ret;
9341 
9342 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9343 	if (ret)
9344 		return ret;
9345 
9346 	val *= 1024; /* value passed in is in KB */
9347 
9348 	pages = DIV_ROUND_UP(val, PAGE_SIZE);
9349 	order = fls(pages - 1);
9350 
9351 	/* limit between 1 and 128 system pages */
9352 	if (order < 0 || order > 7)
9353 		return -EINVAL;
9354 
9355 	/* Do not allow tracing while changing the order of the ring buffer */
9356 	tracing_stop_tr(tr);
9357 
9358 	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9359 	if (old_order == order)
9360 		goto out;
9361 
9362 	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9363 	if (ret)
9364 		goto out;
9365 
9366 #ifdef CONFIG_TRACER_MAX_TRACE
9367 
9368 	if (!tr->allocated_snapshot)
9369 		goto out_max;
9370 
9371 	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9372 	if (ret) {
9373 		/* Put back the old order */
9374 		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9375 		if (WARN_ON_ONCE(cnt)) {
9376 			/*
9377 			 * AARGH! We are left with different orders!
9378 			 * The max buffer is our "snapshot" buffer.
9379 			 * When a tracer needs a snapshot (one of the
9380 			 * latency tracers), it swaps the max buffer
9381 			 * with the saved snap shot. We succeeded to
9382 			 * update the order of the main buffer, but failed to
9383 			 * update the order of the max buffer. But when we tried
9384 			 * to reset the main buffer to the original size, we
9385 			 * failed there too. This is very unlikely to
9386 			 * happen, but if it does, warn and kill all
9387 			 * tracing.
9388 			 */
9389 			tracing_disabled = 1;
9390 		}
9391 		goto out;
9392 	}
9393  out_max:
9394 #endif
9395 	(*ppos)++;
9396  out:
9397 	if (ret)
9398 		cnt = ret;
9399 	tracing_start_tr(tr);
9400 	return cnt;
9401 }
9402 
9403 static const struct file_operations buffer_subbuf_size_fops = {
9404 	.open		= tracing_open_generic_tr,
9405 	.read		= buffer_subbuf_size_read,
9406 	.write		= buffer_subbuf_size_write,
9407 	.release	= tracing_release_generic_tr,
9408 	.llseek		= default_llseek,
9409 };
9410 
9411 static struct dentry *trace_instance_dir;
9412 
9413 static void
9414 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9415 
9416 #ifdef CONFIG_MODULES
9417 static int make_mod_delta(struct module *mod, void *data)
9418 {
9419 	struct trace_module_delta *module_delta;
9420 	struct trace_scratch *tscratch;
9421 	struct trace_mod_entry *entry;
9422 	struct trace_array *tr = data;
9423 	int i;
9424 
9425 	tscratch = tr->scratch;
9426 	module_delta = READ_ONCE(tr->module_delta);
9427 	for (i = 0; i < tscratch->nr_entries; i++) {
9428 		entry = &tscratch->entries[i];
9429 		if (strcmp(mod->name, entry->mod_name))
9430 			continue;
9431 		if (mod->state == MODULE_STATE_GOING)
9432 			module_delta->delta[i] = 0;
9433 		else
9434 			module_delta->delta[i] = (unsigned long)mod->mem[MOD_TEXT].base
9435 						 - entry->mod_addr;
9436 		break;
9437 	}
9438 	return 0;
9439 }
9440 #else
9441 static int make_mod_delta(struct module *mod, void *data)
9442 {
9443 	return 0;
9444 }
9445 #endif
9446 
9447 static int mod_addr_comp(const void *a, const void *b, const void *data)
9448 {
9449 	const struct trace_mod_entry *e1 = a;
9450 	const struct trace_mod_entry *e2 = b;
9451 
9452 	return e1->mod_addr > e2->mod_addr ? 1 : -1;
9453 }
9454 
9455 static void setup_trace_scratch(struct trace_array *tr,
9456 				struct trace_scratch *tscratch, unsigned int size)
9457 {
9458 	struct trace_module_delta *module_delta;
9459 	struct trace_mod_entry *entry;
9460 	int i, nr_entries;
9461 
9462 	if (!tscratch)
9463 		return;
9464 
9465 	tr->scratch = tscratch;
9466 	tr->scratch_size = size;
9467 
9468 	if (tscratch->text_addr)
9469 		tr->text_delta = (unsigned long)_text - tscratch->text_addr;
9470 
9471 	if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
9472 		goto reset;
9473 
9474 	/* Check if each module name is a valid string */
9475 	for (i = 0; i < tscratch->nr_entries; i++) {
9476 		int n;
9477 
9478 		entry = &tscratch->entries[i];
9479 
9480 		for (n = 0; n < MODULE_NAME_LEN; n++) {
9481 			if (entry->mod_name[n] == '\0')
9482 				break;
9483 			if (!isprint(entry->mod_name[n]))
9484 				goto reset;
9485 		}
9486 		if (n == MODULE_NAME_LEN)
9487 			goto reset;
9488 	}
9489 
9490 	/* Sort the entries so that we can find appropriate module from address. */
9491 	nr_entries = tscratch->nr_entries;
9492 	sort_r(tscratch->entries, nr_entries, sizeof(struct trace_mod_entry),
9493 	       mod_addr_comp, NULL, NULL);
9494 
9495 	if (IS_ENABLED(CONFIG_MODULES)) {
9496 		module_delta = kzalloc(struct_size(module_delta, delta, nr_entries), GFP_KERNEL);
9497 		if (!module_delta) {
9498 			pr_info("module_delta allocation failed. Not able to decode module address.");
9499 			goto reset;
9500 		}
9501 		init_rcu_head(&module_delta->rcu);
9502 	} else
9503 		module_delta = NULL;
9504 	WRITE_ONCE(tr->module_delta, module_delta);
9505 
9506 	/* Scan modules to make text delta for modules. */
9507 	module_for_each_mod(make_mod_delta, tr);
9508 	return;
9509  reset:
9510 	/* Invalid trace modules */
9511 	memset(tscratch, 0, size);
9512 }
9513 
9514 static int
9515 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9516 {
9517 	enum ring_buffer_flags rb_flags;
9518 	struct trace_scratch *tscratch;
9519 	unsigned int scratch_size = 0;
9520 
9521 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9522 
9523 	buf->tr = tr;
9524 
9525 	if (tr->range_addr_start && tr->range_addr_size) {
9526 		/* Add scratch buffer to handle 128 modules */
9527 		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9528 						      tr->range_addr_start,
9529 						      tr->range_addr_size,
9530 						      struct_size(tscratch, entries, 128));
9531 
9532 		tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
9533 		setup_trace_scratch(tr, tscratch, scratch_size);
9534 
9535 		/*
9536 		 * This is basically the same as a mapped buffer,
9537 		 * with the same restrictions.
9538 		 */
9539 		tr->mapped++;
9540 	} else {
9541 		buf->buffer = ring_buffer_alloc(size, rb_flags);
9542 	}
9543 	if (!buf->buffer)
9544 		return -ENOMEM;
9545 
9546 	buf->data = alloc_percpu(struct trace_array_cpu);
9547 	if (!buf->data) {
9548 		ring_buffer_free(buf->buffer);
9549 		buf->buffer = NULL;
9550 		return -ENOMEM;
9551 	}
9552 
9553 	/* Allocate the first page for all buffers */
9554 	set_buffer_entries(&tr->array_buffer,
9555 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9556 
9557 	return 0;
9558 }
9559 
9560 static void free_trace_buffer(struct array_buffer *buf)
9561 {
9562 	if (buf->buffer) {
9563 		ring_buffer_free(buf->buffer);
9564 		buf->buffer = NULL;
9565 		free_percpu(buf->data);
9566 		buf->data = NULL;
9567 	}
9568 }
9569 
9570 static int allocate_trace_buffers(struct trace_array *tr, int size)
9571 {
9572 	int ret;
9573 
9574 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9575 	if (ret)
9576 		return ret;
9577 
9578 #ifdef CONFIG_TRACER_MAX_TRACE
9579 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
9580 	if (tr->range_addr_start)
9581 		return 0;
9582 
9583 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9584 				    allocate_snapshot ? size : 1);
9585 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9586 		free_trace_buffer(&tr->array_buffer);
9587 		return -ENOMEM;
9588 	}
9589 	tr->allocated_snapshot = allocate_snapshot;
9590 
9591 	allocate_snapshot = false;
9592 #endif
9593 
9594 	return 0;
9595 }
9596 
9597 static void free_trace_buffers(struct trace_array *tr)
9598 {
9599 	if (!tr)
9600 		return;
9601 
9602 	free_trace_buffer(&tr->array_buffer);
9603 
9604 #ifdef CONFIG_TRACER_MAX_TRACE
9605 	free_trace_buffer(&tr->max_buffer);
9606 #endif
9607 }
9608 
9609 static void init_trace_flags_index(struct trace_array *tr)
9610 {
9611 	int i;
9612 
9613 	/* Used by the trace options files */
9614 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9615 		tr->trace_flags_index[i] = i;
9616 }
9617 
9618 static void __update_tracer_options(struct trace_array *tr)
9619 {
9620 	struct tracer *t;
9621 
9622 	for (t = trace_types; t; t = t->next)
9623 		add_tracer_options(tr, t);
9624 }
9625 
9626 static void update_tracer_options(struct trace_array *tr)
9627 {
9628 	mutex_lock(&trace_types_lock);
9629 	tracer_options_updated = true;
9630 	__update_tracer_options(tr);
9631 	mutex_unlock(&trace_types_lock);
9632 }
9633 
9634 /* Must have trace_types_lock held */
9635 struct trace_array *trace_array_find(const char *instance)
9636 {
9637 	struct trace_array *tr, *found = NULL;
9638 
9639 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9640 		if (tr->name && strcmp(tr->name, instance) == 0) {
9641 			found = tr;
9642 			break;
9643 		}
9644 	}
9645 
9646 	return found;
9647 }
9648 
9649 struct trace_array *trace_array_find_get(const char *instance)
9650 {
9651 	struct trace_array *tr;
9652 
9653 	mutex_lock(&trace_types_lock);
9654 	tr = trace_array_find(instance);
9655 	if (tr)
9656 		tr->ref++;
9657 	mutex_unlock(&trace_types_lock);
9658 
9659 	return tr;
9660 }
9661 
9662 static int trace_array_create_dir(struct trace_array *tr)
9663 {
9664 	int ret;
9665 
9666 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9667 	if (!tr->dir)
9668 		return -EINVAL;
9669 
9670 	ret = event_trace_add_tracer(tr->dir, tr);
9671 	if (ret) {
9672 		tracefs_remove(tr->dir);
9673 		return ret;
9674 	}
9675 
9676 	init_tracer_tracefs(tr, tr->dir);
9677 	__update_tracer_options(tr);
9678 
9679 	return ret;
9680 }
9681 
9682 static struct trace_array *
9683 trace_array_create_systems(const char *name, const char *systems,
9684 			   unsigned long range_addr_start,
9685 			   unsigned long range_addr_size)
9686 {
9687 	struct trace_array *tr;
9688 	int ret;
9689 
9690 	ret = -ENOMEM;
9691 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9692 	if (!tr)
9693 		return ERR_PTR(ret);
9694 
9695 	tr->name = kstrdup(name, GFP_KERNEL);
9696 	if (!tr->name)
9697 		goto out_free_tr;
9698 
9699 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9700 		goto out_free_tr;
9701 
9702 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9703 		goto out_free_tr;
9704 
9705 	if (systems) {
9706 		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9707 		if (!tr->system_names)
9708 			goto out_free_tr;
9709 	}
9710 
9711 	/* Only for boot up memory mapped ring buffers */
9712 	tr->range_addr_start = range_addr_start;
9713 	tr->range_addr_size = range_addr_size;
9714 
9715 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9716 
9717 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9718 
9719 	raw_spin_lock_init(&tr->start_lock);
9720 
9721 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9722 #ifdef CONFIG_TRACER_MAX_TRACE
9723 	spin_lock_init(&tr->snapshot_trigger_lock);
9724 #endif
9725 	tr->current_trace = &nop_trace;
9726 
9727 	INIT_LIST_HEAD(&tr->systems);
9728 	INIT_LIST_HEAD(&tr->events);
9729 	INIT_LIST_HEAD(&tr->hist_vars);
9730 	INIT_LIST_HEAD(&tr->err_log);
9731 
9732 #ifdef CONFIG_MODULES
9733 	INIT_LIST_HEAD(&tr->mod_events);
9734 #endif
9735 
9736 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9737 		goto out_free_tr;
9738 
9739 	/* The ring buffer is defaultly expanded */
9740 	trace_set_ring_buffer_expanded(tr);
9741 
9742 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9743 		goto out_free_tr;
9744 
9745 	ftrace_init_trace_array(tr);
9746 
9747 	init_trace_flags_index(tr);
9748 
9749 	if (trace_instance_dir) {
9750 		ret = trace_array_create_dir(tr);
9751 		if (ret)
9752 			goto out_free_tr;
9753 	} else
9754 		__trace_early_add_events(tr);
9755 
9756 	list_add(&tr->list, &ftrace_trace_arrays);
9757 
9758 	tr->ref++;
9759 
9760 	return tr;
9761 
9762  out_free_tr:
9763 	ftrace_free_ftrace_ops(tr);
9764 	free_trace_buffers(tr);
9765 	free_cpumask_var(tr->pipe_cpumask);
9766 	free_cpumask_var(tr->tracing_cpumask);
9767 	kfree_const(tr->system_names);
9768 	kfree(tr->range_name);
9769 	kfree(tr->name);
9770 	kfree(tr);
9771 
9772 	return ERR_PTR(ret);
9773 }
9774 
9775 static struct trace_array *trace_array_create(const char *name)
9776 {
9777 	return trace_array_create_systems(name, NULL, 0, 0);
9778 }
9779 
9780 static int instance_mkdir(const char *name)
9781 {
9782 	struct trace_array *tr;
9783 	int ret;
9784 
9785 	guard(mutex)(&event_mutex);
9786 	guard(mutex)(&trace_types_lock);
9787 
9788 	ret = -EEXIST;
9789 	if (trace_array_find(name))
9790 		return -EEXIST;
9791 
9792 	tr = trace_array_create(name);
9793 
9794 	ret = PTR_ERR_OR_ZERO(tr);
9795 
9796 	return ret;
9797 }
9798 
9799 static u64 map_pages(u64 start, u64 size)
9800 {
9801 	struct page **pages;
9802 	phys_addr_t page_start;
9803 	unsigned int page_count;
9804 	unsigned int i;
9805 	void *vaddr;
9806 
9807 	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
9808 
9809 	page_start = start;
9810 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
9811 	if (!pages)
9812 		return 0;
9813 
9814 	for (i = 0; i < page_count; i++) {
9815 		phys_addr_t addr = page_start + i * PAGE_SIZE;
9816 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
9817 	}
9818 	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
9819 	kfree(pages);
9820 
9821 	return (u64)(unsigned long)vaddr;
9822 }
9823 
9824 /**
9825  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9826  * @name: The name of the trace array to be looked up/created.
9827  * @systems: A list of systems to create event directories for (NULL for all)
9828  *
9829  * Returns pointer to trace array with given name.
9830  * NULL, if it cannot be created.
9831  *
9832  * NOTE: This function increments the reference counter associated with the
9833  * trace array returned. This makes sure it cannot be freed while in use.
9834  * Use trace_array_put() once the trace array is no longer needed.
9835  * If the trace_array is to be freed, trace_array_destroy() needs to
9836  * be called after the trace_array_put(), or simply let user space delete
9837  * it from the tracefs instances directory. But until the
9838  * trace_array_put() is called, user space can not delete it.
9839  *
9840  */
9841 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9842 {
9843 	struct trace_array *tr;
9844 
9845 	guard(mutex)(&event_mutex);
9846 	guard(mutex)(&trace_types_lock);
9847 
9848 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9849 		if (tr->name && strcmp(tr->name, name) == 0) {
9850 			tr->ref++;
9851 			return tr;
9852 		}
9853 	}
9854 
9855 	tr = trace_array_create_systems(name, systems, 0, 0);
9856 
9857 	if (IS_ERR(tr))
9858 		tr = NULL;
9859 	else
9860 		tr->ref++;
9861 
9862 	return tr;
9863 }
9864 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9865 
9866 static int __remove_instance(struct trace_array *tr)
9867 {
9868 	int i;
9869 
9870 	/* Reference counter for a newly created trace array = 1. */
9871 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9872 		return -EBUSY;
9873 
9874 	list_del(&tr->list);
9875 
9876 	/* Disable all the flags that were enabled coming in */
9877 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9878 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9879 			set_tracer_flag(tr, 1 << i, 0);
9880 	}
9881 
9882 	if (printk_trace == tr)
9883 		update_printk_trace(&global_trace);
9884 
9885 	tracing_set_nop(tr);
9886 	clear_ftrace_function_probes(tr);
9887 	event_trace_del_tracer(tr);
9888 	ftrace_clear_pids(tr);
9889 	ftrace_destroy_function_files(tr);
9890 	tracefs_remove(tr->dir);
9891 	free_percpu(tr->last_func_repeats);
9892 	free_trace_buffers(tr);
9893 	clear_tracing_err_log(tr);
9894 
9895 	if (tr->range_name) {
9896 		reserve_mem_release_by_name(tr->range_name);
9897 		kfree(tr->range_name);
9898 	}
9899 
9900 	for (i = 0; i < tr->nr_topts; i++) {
9901 		kfree(tr->topts[i].topts);
9902 	}
9903 	kfree(tr->topts);
9904 
9905 	free_cpumask_var(tr->pipe_cpumask);
9906 	free_cpumask_var(tr->tracing_cpumask);
9907 	kfree_const(tr->system_names);
9908 	kfree(tr->name);
9909 	kfree(tr);
9910 
9911 	return 0;
9912 }
9913 
9914 int trace_array_destroy(struct trace_array *this_tr)
9915 {
9916 	struct trace_array *tr;
9917 
9918 	if (!this_tr)
9919 		return -EINVAL;
9920 
9921 	guard(mutex)(&event_mutex);
9922 	guard(mutex)(&trace_types_lock);
9923 
9924 
9925 	/* Making sure trace array exists before destroying it. */
9926 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9927 		if (tr == this_tr)
9928 			return __remove_instance(tr);
9929 	}
9930 
9931 	return -ENODEV;
9932 }
9933 EXPORT_SYMBOL_GPL(trace_array_destroy);
9934 
9935 static int instance_rmdir(const char *name)
9936 {
9937 	struct trace_array *tr;
9938 
9939 	guard(mutex)(&event_mutex);
9940 	guard(mutex)(&trace_types_lock);
9941 
9942 	tr = trace_array_find(name);
9943 	if (!tr)
9944 		return -ENODEV;
9945 
9946 	return __remove_instance(tr);
9947 }
9948 
9949 static __init void create_trace_instances(struct dentry *d_tracer)
9950 {
9951 	struct trace_array *tr;
9952 
9953 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9954 							 instance_mkdir,
9955 							 instance_rmdir);
9956 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9957 		return;
9958 
9959 	guard(mutex)(&event_mutex);
9960 	guard(mutex)(&trace_types_lock);
9961 
9962 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9963 		if (!tr->name)
9964 			continue;
9965 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9966 			     "Failed to create instance directory\n"))
9967 			return;
9968 	}
9969 }
9970 
9971 static void
9972 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9973 {
9974 	int cpu;
9975 
9976 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9977 			tr, &show_traces_fops);
9978 
9979 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9980 			tr, &set_tracer_fops);
9981 
9982 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9983 			  tr, &tracing_cpumask_fops);
9984 
9985 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9986 			  tr, &tracing_iter_fops);
9987 
9988 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9989 			  tr, &tracing_fops);
9990 
9991 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9992 			  tr, &tracing_pipe_fops);
9993 
9994 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9995 			  tr, &tracing_entries_fops);
9996 
9997 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9998 			  tr, &tracing_total_entries_fops);
9999 
10000 	trace_create_file("free_buffer", 0200, d_tracer,
10001 			  tr, &tracing_free_buffer_fops);
10002 
10003 	trace_create_file("trace_marker", 0220, d_tracer,
10004 			  tr, &tracing_mark_fops);
10005 
10006 	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
10007 
10008 	trace_create_file("trace_marker_raw", 0220, d_tracer,
10009 			  tr, &tracing_mark_raw_fops);
10010 
10011 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
10012 			  &trace_clock_fops);
10013 
10014 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
10015 			  tr, &rb_simple_fops);
10016 
10017 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
10018 			  &trace_time_stamp_mode_fops);
10019 
10020 	tr->buffer_percent = 50;
10021 
10022 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
10023 			tr, &buffer_percent_fops);
10024 
10025 	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
10026 			  tr, &buffer_subbuf_size_fops);
10027 
10028 	create_trace_options_dir(tr);
10029 
10030 #ifdef CONFIG_TRACER_MAX_TRACE
10031 	trace_create_maxlat_file(tr, d_tracer);
10032 #endif
10033 
10034 	if (ftrace_create_function_files(tr, d_tracer))
10035 		MEM_FAIL(1, "Could not allocate function filter files");
10036 
10037 	if (tr->range_addr_start) {
10038 		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
10039 				  tr, &last_boot_fops);
10040 #ifdef CONFIG_TRACER_SNAPSHOT
10041 	} else {
10042 		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
10043 				  tr, &snapshot_fops);
10044 #endif
10045 	}
10046 
10047 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
10048 			  tr, &tracing_err_log_fops);
10049 
10050 	for_each_tracing_cpu(cpu)
10051 		tracing_init_tracefs_percpu(tr, cpu);
10052 
10053 	ftrace_init_tracefs(tr, d_tracer);
10054 }
10055 
10056 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
10057 {
10058 	struct vfsmount *mnt;
10059 	struct file_system_type *type;
10060 
10061 	/*
10062 	 * To maintain backward compatibility for tools that mount
10063 	 * debugfs to get to the tracing facility, tracefs is automatically
10064 	 * mounted to the debugfs/tracing directory.
10065 	 */
10066 	type = get_fs_type("tracefs");
10067 	if (!type)
10068 		return NULL;
10069 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
10070 	put_filesystem(type);
10071 	if (IS_ERR(mnt))
10072 		return NULL;
10073 	mntget(mnt);
10074 
10075 	return mnt;
10076 }
10077 
10078 /**
10079  * tracing_init_dentry - initialize top level trace array
10080  *
10081  * This is called when creating files or directories in the tracing
10082  * directory. It is called via fs_initcall() by any of the boot up code
10083  * and expects to return the dentry of the top level tracing directory.
10084  */
10085 int tracing_init_dentry(void)
10086 {
10087 	struct trace_array *tr = &global_trace;
10088 
10089 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10090 		pr_warn("Tracing disabled due to lockdown\n");
10091 		return -EPERM;
10092 	}
10093 
10094 	/* The top level trace array uses  NULL as parent */
10095 	if (tr->dir)
10096 		return 0;
10097 
10098 	if (WARN_ON(!tracefs_initialized()))
10099 		return -ENODEV;
10100 
10101 	/*
10102 	 * As there may still be users that expect the tracing
10103 	 * files to exist in debugfs/tracing, we must automount
10104 	 * the tracefs file system there, so older tools still
10105 	 * work with the newer kernel.
10106 	 */
10107 	tr->dir = debugfs_create_automount("tracing", NULL,
10108 					   trace_automount, NULL);
10109 
10110 	return 0;
10111 }
10112 
10113 extern struct trace_eval_map *__start_ftrace_eval_maps[];
10114 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
10115 
10116 static struct workqueue_struct *eval_map_wq __initdata;
10117 static struct work_struct eval_map_work __initdata;
10118 static struct work_struct tracerfs_init_work __initdata;
10119 
10120 static void __init eval_map_work_func(struct work_struct *work)
10121 {
10122 	int len;
10123 
10124 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
10125 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
10126 }
10127 
10128 static int __init trace_eval_init(void)
10129 {
10130 	INIT_WORK(&eval_map_work, eval_map_work_func);
10131 
10132 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
10133 	if (!eval_map_wq) {
10134 		pr_err("Unable to allocate eval_map_wq\n");
10135 		/* Do work here */
10136 		eval_map_work_func(&eval_map_work);
10137 		return -ENOMEM;
10138 	}
10139 
10140 	queue_work(eval_map_wq, &eval_map_work);
10141 	return 0;
10142 }
10143 
10144 subsys_initcall(trace_eval_init);
10145 
10146 static int __init trace_eval_sync(void)
10147 {
10148 	/* Make sure the eval map updates are finished */
10149 	if (eval_map_wq)
10150 		destroy_workqueue(eval_map_wq);
10151 	return 0;
10152 }
10153 
10154 late_initcall_sync(trace_eval_sync);
10155 
10156 
10157 #ifdef CONFIG_MODULES
10158 
10159 bool module_exists(const char *module)
10160 {
10161 	/* All modules have the symbol __this_module */
10162 	static const char this_mod[] = "__this_module";
10163 	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
10164 	unsigned long val;
10165 	int n;
10166 
10167 	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
10168 
10169 	if (n > sizeof(modname) - 1)
10170 		return false;
10171 
10172 	val = module_kallsyms_lookup_name(modname);
10173 	return val != 0;
10174 }
10175 
10176 static void trace_module_add_evals(struct module *mod)
10177 {
10178 	if (!mod->num_trace_evals)
10179 		return;
10180 
10181 	/*
10182 	 * Modules with bad taint do not have events created, do
10183 	 * not bother with enums either.
10184 	 */
10185 	if (trace_module_has_bad_taint(mod))
10186 		return;
10187 
10188 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
10189 }
10190 
10191 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
10192 static void trace_module_remove_evals(struct module *mod)
10193 {
10194 	union trace_eval_map_item *map;
10195 	union trace_eval_map_item **last = &trace_eval_maps;
10196 
10197 	if (!mod->num_trace_evals)
10198 		return;
10199 
10200 	guard(mutex)(&trace_eval_mutex);
10201 
10202 	map = trace_eval_maps;
10203 
10204 	while (map) {
10205 		if (map->head.mod == mod)
10206 			break;
10207 		map = trace_eval_jmp_to_tail(map);
10208 		last = &map->tail.next;
10209 		map = map->tail.next;
10210 	}
10211 	if (!map)
10212 		return;
10213 
10214 	*last = trace_eval_jmp_to_tail(map)->tail.next;
10215 	kfree(map);
10216 }
10217 #else
10218 static inline void trace_module_remove_evals(struct module *mod) { }
10219 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
10220 
10221 static void trace_module_record(struct module *mod, bool add)
10222 {
10223 	struct trace_array *tr;
10224 	unsigned long flags;
10225 
10226 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10227 		flags = tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT);
10228 		/* Update any persistent trace array that has already been started */
10229 		if (flags == TRACE_ARRAY_FL_BOOT && add) {
10230 			guard(mutex)(&scratch_mutex);
10231 			save_mod(mod, tr);
10232 		} else if (flags & TRACE_ARRAY_FL_LAST_BOOT) {
10233 			/* Update delta if the module loaded in previous boot */
10234 			make_mod_delta(mod, tr);
10235 		}
10236 	}
10237 }
10238 
10239 static int trace_module_notify(struct notifier_block *self,
10240 			       unsigned long val, void *data)
10241 {
10242 	struct module *mod = data;
10243 
10244 	switch (val) {
10245 	case MODULE_STATE_COMING:
10246 		trace_module_add_evals(mod);
10247 		trace_module_record(mod, true);
10248 		break;
10249 	case MODULE_STATE_GOING:
10250 		trace_module_remove_evals(mod);
10251 		trace_module_record(mod, false);
10252 		break;
10253 	}
10254 
10255 	return NOTIFY_OK;
10256 }
10257 
10258 static struct notifier_block trace_module_nb = {
10259 	.notifier_call = trace_module_notify,
10260 	.priority = 0,
10261 };
10262 #endif /* CONFIG_MODULES */
10263 
10264 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10265 {
10266 
10267 	event_trace_init();
10268 
10269 	init_tracer_tracefs(&global_trace, NULL);
10270 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
10271 
10272 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10273 			&global_trace, &tracing_thresh_fops);
10274 
10275 	trace_create_file("README", TRACE_MODE_READ, NULL,
10276 			NULL, &tracing_readme_fops);
10277 
10278 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10279 			NULL, &tracing_saved_cmdlines_fops);
10280 
10281 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10282 			  NULL, &tracing_saved_cmdlines_size_fops);
10283 
10284 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10285 			NULL, &tracing_saved_tgids_fops);
10286 
10287 	trace_create_eval_file(NULL);
10288 
10289 #ifdef CONFIG_MODULES
10290 	register_module_notifier(&trace_module_nb);
10291 #endif
10292 
10293 #ifdef CONFIG_DYNAMIC_FTRACE
10294 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10295 			NULL, &tracing_dyn_info_fops);
10296 #endif
10297 
10298 	create_trace_instances(NULL);
10299 
10300 	update_tracer_options(&global_trace);
10301 }
10302 
10303 static __init int tracer_init_tracefs(void)
10304 {
10305 	int ret;
10306 
10307 	trace_access_lock_init();
10308 
10309 	ret = tracing_init_dentry();
10310 	if (ret)
10311 		return 0;
10312 
10313 	if (eval_map_wq) {
10314 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10315 		queue_work(eval_map_wq, &tracerfs_init_work);
10316 	} else {
10317 		tracer_init_tracefs_work_func(NULL);
10318 	}
10319 
10320 	rv_init_interface();
10321 
10322 	return 0;
10323 }
10324 
10325 fs_initcall(tracer_init_tracefs);
10326 
10327 static int trace_die_panic_handler(struct notifier_block *self,
10328 				unsigned long ev, void *unused);
10329 
10330 static struct notifier_block trace_panic_notifier = {
10331 	.notifier_call = trace_die_panic_handler,
10332 	.priority = INT_MAX - 1,
10333 };
10334 
10335 static struct notifier_block trace_die_notifier = {
10336 	.notifier_call = trace_die_panic_handler,
10337 	.priority = INT_MAX - 1,
10338 };
10339 
10340 /*
10341  * The idea is to execute the following die/panic callback early, in order
10342  * to avoid showing irrelevant information in the trace (like other panic
10343  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10344  * warnings get disabled (to prevent potential log flooding).
10345  */
10346 static int trace_die_panic_handler(struct notifier_block *self,
10347 				unsigned long ev, void *unused)
10348 {
10349 	if (!ftrace_dump_on_oops_enabled())
10350 		return NOTIFY_DONE;
10351 
10352 	/* The die notifier requires DIE_OOPS to trigger */
10353 	if (self == &trace_die_notifier && ev != DIE_OOPS)
10354 		return NOTIFY_DONE;
10355 
10356 	ftrace_dump(DUMP_PARAM);
10357 
10358 	return NOTIFY_DONE;
10359 }
10360 
10361 /*
10362  * printk is set to max of 1024, we really don't need it that big.
10363  * Nothing should be printing 1000 characters anyway.
10364  */
10365 #define TRACE_MAX_PRINT		1000
10366 
10367 /*
10368  * Define here KERN_TRACE so that we have one place to modify
10369  * it if we decide to change what log level the ftrace dump
10370  * should be at.
10371  */
10372 #define KERN_TRACE		KERN_EMERG
10373 
10374 void
10375 trace_printk_seq(struct trace_seq *s)
10376 {
10377 	/* Probably should print a warning here. */
10378 	if (s->seq.len >= TRACE_MAX_PRINT)
10379 		s->seq.len = TRACE_MAX_PRINT;
10380 
10381 	/*
10382 	 * More paranoid code. Although the buffer size is set to
10383 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10384 	 * an extra layer of protection.
10385 	 */
10386 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10387 		s->seq.len = s->seq.size - 1;
10388 
10389 	/* should be zero ended, but we are paranoid. */
10390 	s->buffer[s->seq.len] = 0;
10391 
10392 	printk(KERN_TRACE "%s", s->buffer);
10393 
10394 	trace_seq_init(s);
10395 }
10396 
10397 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10398 {
10399 	iter->tr = tr;
10400 	iter->trace = iter->tr->current_trace;
10401 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10402 	iter->array_buffer = &tr->array_buffer;
10403 
10404 	if (iter->trace && iter->trace->open)
10405 		iter->trace->open(iter);
10406 
10407 	/* Annotate start of buffers if we had overruns */
10408 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10409 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10410 
10411 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10412 	if (trace_clocks[iter->tr->clock_id].in_ns)
10413 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10414 
10415 	/* Can not use kmalloc for iter.temp and iter.fmt */
10416 	iter->temp = static_temp_buf;
10417 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10418 	iter->fmt = static_fmt_buf;
10419 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10420 }
10421 
10422 void trace_init_global_iter(struct trace_iterator *iter)
10423 {
10424 	trace_init_iter(iter, &global_trace);
10425 }
10426 
10427 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10428 {
10429 	/* use static because iter can be a bit big for the stack */
10430 	static struct trace_iterator iter;
10431 	unsigned int old_userobj;
10432 	unsigned long flags;
10433 	int cnt = 0, cpu;
10434 
10435 	/*
10436 	 * Always turn off tracing when we dump.
10437 	 * We don't need to show trace output of what happens
10438 	 * between multiple crashes.
10439 	 *
10440 	 * If the user does a sysrq-z, then they can re-enable
10441 	 * tracing with echo 1 > tracing_on.
10442 	 */
10443 	tracer_tracing_off(tr);
10444 
10445 	local_irq_save(flags);
10446 
10447 	/* Simulate the iterator */
10448 	trace_init_iter(&iter, tr);
10449 
10450 	for_each_tracing_cpu(cpu) {
10451 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10452 	}
10453 
10454 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10455 
10456 	/* don't look at user memory in panic mode */
10457 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10458 
10459 	if (dump_mode == DUMP_ORIG)
10460 		iter.cpu_file = raw_smp_processor_id();
10461 	else
10462 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10463 
10464 	if (tr == &global_trace)
10465 		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10466 	else
10467 		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10468 
10469 	/* Did function tracer already get disabled? */
10470 	if (ftrace_is_dead()) {
10471 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10472 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10473 	}
10474 
10475 	/*
10476 	 * We need to stop all tracing on all CPUS to read
10477 	 * the next buffer. This is a bit expensive, but is
10478 	 * not done often. We fill all what we can read,
10479 	 * and then release the locks again.
10480 	 */
10481 
10482 	while (!trace_empty(&iter)) {
10483 
10484 		if (!cnt)
10485 			printk(KERN_TRACE "---------------------------------\n");
10486 
10487 		cnt++;
10488 
10489 		trace_iterator_reset(&iter);
10490 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10491 
10492 		if (trace_find_next_entry_inc(&iter) != NULL) {
10493 			int ret;
10494 
10495 			ret = print_trace_line(&iter);
10496 			if (ret != TRACE_TYPE_NO_CONSUME)
10497 				trace_consume(&iter);
10498 		}
10499 		touch_nmi_watchdog();
10500 
10501 		trace_printk_seq(&iter.seq);
10502 	}
10503 
10504 	if (!cnt)
10505 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10506 	else
10507 		printk(KERN_TRACE "---------------------------------\n");
10508 
10509 	tr->trace_flags |= old_userobj;
10510 
10511 	for_each_tracing_cpu(cpu) {
10512 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10513 	}
10514 	local_irq_restore(flags);
10515 }
10516 
10517 static void ftrace_dump_by_param(void)
10518 {
10519 	bool first_param = true;
10520 	char dump_param[MAX_TRACER_SIZE];
10521 	char *buf, *token, *inst_name;
10522 	struct trace_array *tr;
10523 
10524 	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10525 	buf = dump_param;
10526 
10527 	while ((token = strsep(&buf, ",")) != NULL) {
10528 		if (first_param) {
10529 			first_param = false;
10530 			if (!strcmp("0", token))
10531 				continue;
10532 			else if (!strcmp("1", token)) {
10533 				ftrace_dump_one(&global_trace, DUMP_ALL);
10534 				continue;
10535 			}
10536 			else if (!strcmp("2", token) ||
10537 			  !strcmp("orig_cpu", token)) {
10538 				ftrace_dump_one(&global_trace, DUMP_ORIG);
10539 				continue;
10540 			}
10541 		}
10542 
10543 		inst_name = strsep(&token, "=");
10544 		tr = trace_array_find(inst_name);
10545 		if (!tr) {
10546 			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10547 			continue;
10548 		}
10549 
10550 		if (token && (!strcmp("2", token) ||
10551 			  !strcmp("orig_cpu", token)))
10552 			ftrace_dump_one(tr, DUMP_ORIG);
10553 		else
10554 			ftrace_dump_one(tr, DUMP_ALL);
10555 	}
10556 }
10557 
10558 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10559 {
10560 	static atomic_t dump_running;
10561 
10562 	/* Only allow one dump user at a time. */
10563 	if (atomic_inc_return(&dump_running) != 1) {
10564 		atomic_dec(&dump_running);
10565 		return;
10566 	}
10567 
10568 	switch (oops_dump_mode) {
10569 	case DUMP_ALL:
10570 		ftrace_dump_one(&global_trace, DUMP_ALL);
10571 		break;
10572 	case DUMP_ORIG:
10573 		ftrace_dump_one(&global_trace, DUMP_ORIG);
10574 		break;
10575 	case DUMP_PARAM:
10576 		ftrace_dump_by_param();
10577 		break;
10578 	case DUMP_NONE:
10579 		break;
10580 	default:
10581 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10582 		ftrace_dump_one(&global_trace, DUMP_ALL);
10583 	}
10584 
10585 	atomic_dec(&dump_running);
10586 }
10587 EXPORT_SYMBOL_GPL(ftrace_dump);
10588 
10589 #define WRITE_BUFSIZE  4096
10590 
10591 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10592 				size_t count, loff_t *ppos,
10593 				int (*createfn)(const char *))
10594 {
10595 	char *kbuf, *buf, *tmp;
10596 	int ret = 0;
10597 	size_t done = 0;
10598 	size_t size;
10599 
10600 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10601 	if (!kbuf)
10602 		return -ENOMEM;
10603 
10604 	while (done < count) {
10605 		size = count - done;
10606 
10607 		if (size >= WRITE_BUFSIZE)
10608 			size = WRITE_BUFSIZE - 1;
10609 
10610 		if (copy_from_user(kbuf, buffer + done, size)) {
10611 			ret = -EFAULT;
10612 			goto out;
10613 		}
10614 		kbuf[size] = '\0';
10615 		buf = kbuf;
10616 		do {
10617 			tmp = strchr(buf, '\n');
10618 			if (tmp) {
10619 				*tmp = '\0';
10620 				size = tmp - buf + 1;
10621 			} else {
10622 				size = strlen(buf);
10623 				if (done + size < count) {
10624 					if (buf != kbuf)
10625 						break;
10626 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10627 					pr_warn("Line length is too long: Should be less than %d\n",
10628 						WRITE_BUFSIZE - 2);
10629 					ret = -EINVAL;
10630 					goto out;
10631 				}
10632 			}
10633 			done += size;
10634 
10635 			/* Remove comments */
10636 			tmp = strchr(buf, '#');
10637 
10638 			if (tmp)
10639 				*tmp = '\0';
10640 
10641 			ret = createfn(buf);
10642 			if (ret)
10643 				goto out;
10644 			buf += size;
10645 
10646 		} while (done < count);
10647 	}
10648 	ret = done;
10649 
10650 out:
10651 	kfree(kbuf);
10652 
10653 	return ret;
10654 }
10655 
10656 #ifdef CONFIG_TRACER_MAX_TRACE
10657 __init static bool tr_needs_alloc_snapshot(const char *name)
10658 {
10659 	char *test;
10660 	int len = strlen(name);
10661 	bool ret;
10662 
10663 	if (!boot_snapshot_index)
10664 		return false;
10665 
10666 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10667 	    boot_snapshot_info[len] == '\t')
10668 		return true;
10669 
10670 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10671 	if (!test)
10672 		return false;
10673 
10674 	sprintf(test, "\t%s\t", name);
10675 	ret = strstr(boot_snapshot_info, test) == NULL;
10676 	kfree(test);
10677 	return ret;
10678 }
10679 
10680 __init static void do_allocate_snapshot(const char *name)
10681 {
10682 	if (!tr_needs_alloc_snapshot(name))
10683 		return;
10684 
10685 	/*
10686 	 * When allocate_snapshot is set, the next call to
10687 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10688 	 * will allocate the snapshot buffer. That will alse clear
10689 	 * this flag.
10690 	 */
10691 	allocate_snapshot = true;
10692 }
10693 #else
10694 static inline void do_allocate_snapshot(const char *name) { }
10695 #endif
10696 
10697 __init static void enable_instances(void)
10698 {
10699 	struct trace_array *tr;
10700 	bool memmap_area = false;
10701 	char *curr_str;
10702 	char *name;
10703 	char *str;
10704 	char *tok;
10705 
10706 	/* A tab is always appended */
10707 	boot_instance_info[boot_instance_index - 1] = '\0';
10708 	str = boot_instance_info;
10709 
10710 	while ((curr_str = strsep(&str, "\t"))) {
10711 		phys_addr_t start = 0;
10712 		phys_addr_t size = 0;
10713 		unsigned long addr = 0;
10714 		bool traceprintk = false;
10715 		bool traceoff = false;
10716 		char *flag_delim;
10717 		char *addr_delim;
10718 		char *rname __free(kfree) = NULL;
10719 
10720 		tok = strsep(&curr_str, ",");
10721 
10722 		flag_delim = strchr(tok, '^');
10723 		addr_delim = strchr(tok, '@');
10724 
10725 		if (addr_delim)
10726 			*addr_delim++ = '\0';
10727 
10728 		if (flag_delim)
10729 			*flag_delim++ = '\0';
10730 
10731 		name = tok;
10732 
10733 		if (flag_delim) {
10734 			char *flag;
10735 
10736 			while ((flag = strsep(&flag_delim, "^"))) {
10737 				if (strcmp(flag, "traceoff") == 0) {
10738 					traceoff = true;
10739 				} else if ((strcmp(flag, "printk") == 0) ||
10740 					   (strcmp(flag, "traceprintk") == 0) ||
10741 					   (strcmp(flag, "trace_printk") == 0)) {
10742 					traceprintk = true;
10743 				} else {
10744 					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10745 						flag, name);
10746 				}
10747 			}
10748 		}
10749 
10750 		tok = addr_delim;
10751 		if (tok && isdigit(*tok)) {
10752 			start = memparse(tok, &tok);
10753 			if (!start) {
10754 				pr_warn("Tracing: Invalid boot instance address for %s\n",
10755 					name);
10756 				continue;
10757 			}
10758 			if (*tok != ':') {
10759 				pr_warn("Tracing: No size specified for instance %s\n", name);
10760 				continue;
10761 			}
10762 			tok++;
10763 			size = memparse(tok, &tok);
10764 			if (!size) {
10765 				pr_warn("Tracing: Invalid boot instance size for %s\n",
10766 					name);
10767 				continue;
10768 			}
10769 			memmap_area = true;
10770 		} else if (tok) {
10771 			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10772 				start = 0;
10773 				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10774 				continue;
10775 			}
10776 			rname = kstrdup(tok, GFP_KERNEL);
10777 		}
10778 
10779 		if (start) {
10780 			/* Start and size must be page aligned */
10781 			if (start & ~PAGE_MASK) {
10782 				pr_warn("Tracing: mapping start addr %pa is not page aligned\n", &start);
10783 				continue;
10784 			}
10785 			if (size & ~PAGE_MASK) {
10786 				pr_warn("Tracing: mapping size %pa is not page aligned\n", &size);
10787 				continue;
10788 			}
10789 
10790 			if (memmap_area)
10791 				addr = map_pages(start, size);
10792 			else
10793 				addr = (unsigned long)phys_to_virt(start);
10794 			if (addr) {
10795 				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10796 					name, &start, (unsigned long)size);
10797 			} else {
10798 				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10799 				continue;
10800 			}
10801 		} else {
10802 			/* Only non mapped buffers have snapshot buffers */
10803 			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10804 				do_allocate_snapshot(name);
10805 		}
10806 
10807 		tr = trace_array_create_systems(name, NULL, addr, size);
10808 		if (IS_ERR(tr)) {
10809 			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10810 			continue;
10811 		}
10812 
10813 		if (traceoff)
10814 			tracer_tracing_off(tr);
10815 
10816 		if (traceprintk)
10817 			update_printk_trace(tr);
10818 
10819 		/*
10820 		 * memmap'd buffers can not be freed.
10821 		 */
10822 		if (memmap_area) {
10823 			tr->flags |= TRACE_ARRAY_FL_MEMMAP;
10824 			tr->ref++;
10825 		}
10826 
10827 		if (start) {
10828 			tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
10829 			tr->range_name = no_free_ptr(rname);
10830 		}
10831 
10832 		while ((tok = strsep(&curr_str, ","))) {
10833 			early_enable_events(tr, tok, true);
10834 		}
10835 	}
10836 }
10837 
10838 __init static int tracer_alloc_buffers(void)
10839 {
10840 	int ring_buf_size;
10841 	int ret = -ENOMEM;
10842 
10843 
10844 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10845 		pr_warn("Tracing disabled due to lockdown\n");
10846 		return -EPERM;
10847 	}
10848 
10849 	/*
10850 	 * Make sure we don't accidentally add more trace options
10851 	 * than we have bits for.
10852 	 */
10853 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10854 
10855 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10856 		goto out;
10857 
10858 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10859 		goto out_free_buffer_mask;
10860 
10861 	/* Only allocate trace_printk buffers if a trace_printk exists */
10862 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10863 		/* Must be called before global_trace.buffer is allocated */
10864 		trace_printk_init_buffers();
10865 
10866 	/* To save memory, keep the ring buffer size to its minimum */
10867 	if (global_trace.ring_buffer_expanded)
10868 		ring_buf_size = trace_buf_size;
10869 	else
10870 		ring_buf_size = 1;
10871 
10872 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10873 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10874 
10875 	raw_spin_lock_init(&global_trace.start_lock);
10876 
10877 	/*
10878 	 * The prepare callbacks allocates some memory for the ring buffer. We
10879 	 * don't free the buffer if the CPU goes down. If we were to free
10880 	 * the buffer, then the user would lose any trace that was in the
10881 	 * buffer. The memory will be removed once the "instance" is removed.
10882 	 */
10883 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10884 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10885 				      NULL);
10886 	if (ret < 0)
10887 		goto out_free_cpumask;
10888 	/* Used for event triggers */
10889 	ret = -ENOMEM;
10890 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10891 	if (!temp_buffer)
10892 		goto out_rm_hp_state;
10893 
10894 	if (trace_create_savedcmd() < 0)
10895 		goto out_free_temp_buffer;
10896 
10897 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10898 		goto out_free_savedcmd;
10899 
10900 	/* TODO: make the number of buffers hot pluggable with CPUS */
10901 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10902 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10903 		goto out_free_pipe_cpumask;
10904 	}
10905 	if (global_trace.buffer_disabled)
10906 		tracing_off();
10907 
10908 	if (trace_boot_clock) {
10909 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10910 		if (ret < 0)
10911 			pr_warn("Trace clock %s not defined, going back to default\n",
10912 				trace_boot_clock);
10913 	}
10914 
10915 	/*
10916 	 * register_tracer() might reference current_trace, so it
10917 	 * needs to be set before we register anything. This is
10918 	 * just a bootstrap of current_trace anyway.
10919 	 */
10920 	global_trace.current_trace = &nop_trace;
10921 
10922 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10923 #ifdef CONFIG_TRACER_MAX_TRACE
10924 	spin_lock_init(&global_trace.snapshot_trigger_lock);
10925 #endif
10926 	ftrace_init_global_array_ops(&global_trace);
10927 
10928 #ifdef CONFIG_MODULES
10929 	INIT_LIST_HEAD(&global_trace.mod_events);
10930 #endif
10931 
10932 	init_trace_flags_index(&global_trace);
10933 
10934 	register_tracer(&nop_trace);
10935 
10936 	/* Function tracing may start here (via kernel command line) */
10937 	init_function_trace();
10938 
10939 	/* All seems OK, enable tracing */
10940 	tracing_disabled = 0;
10941 
10942 	atomic_notifier_chain_register(&panic_notifier_list,
10943 				       &trace_panic_notifier);
10944 
10945 	register_die_notifier(&trace_die_notifier);
10946 
10947 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10948 
10949 	INIT_LIST_HEAD(&global_trace.systems);
10950 	INIT_LIST_HEAD(&global_trace.events);
10951 	INIT_LIST_HEAD(&global_trace.hist_vars);
10952 	INIT_LIST_HEAD(&global_trace.err_log);
10953 	list_add(&global_trace.list, &ftrace_trace_arrays);
10954 
10955 	apply_trace_boot_options();
10956 
10957 	register_snapshot_cmd();
10958 
10959 	return 0;
10960 
10961 out_free_pipe_cpumask:
10962 	free_cpumask_var(global_trace.pipe_cpumask);
10963 out_free_savedcmd:
10964 	trace_free_saved_cmdlines_buffer();
10965 out_free_temp_buffer:
10966 	ring_buffer_free(temp_buffer);
10967 out_rm_hp_state:
10968 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10969 out_free_cpumask:
10970 	free_cpumask_var(global_trace.tracing_cpumask);
10971 out_free_buffer_mask:
10972 	free_cpumask_var(tracing_buffer_mask);
10973 out:
10974 	return ret;
10975 }
10976 
10977 #ifdef CONFIG_FUNCTION_TRACER
10978 /* Used to set module cached ftrace filtering at boot up */
10979 __init struct trace_array *trace_get_global_array(void)
10980 {
10981 	return &global_trace;
10982 }
10983 #endif
10984 
10985 void __init ftrace_boot_snapshot(void)
10986 {
10987 #ifdef CONFIG_TRACER_MAX_TRACE
10988 	struct trace_array *tr;
10989 
10990 	if (!snapshot_at_boot)
10991 		return;
10992 
10993 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10994 		if (!tr->allocated_snapshot)
10995 			continue;
10996 
10997 		tracing_snapshot_instance(tr);
10998 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10999 	}
11000 #endif
11001 }
11002 
11003 void __init early_trace_init(void)
11004 {
11005 	if (tracepoint_printk) {
11006 		tracepoint_print_iter =
11007 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
11008 		if (MEM_FAIL(!tracepoint_print_iter,
11009 			     "Failed to allocate trace iterator\n"))
11010 			tracepoint_printk = 0;
11011 		else
11012 			static_key_enable(&tracepoint_printk_key.key);
11013 	}
11014 	tracer_alloc_buffers();
11015 
11016 	init_events();
11017 }
11018 
11019 void __init trace_init(void)
11020 {
11021 	trace_event_init();
11022 
11023 	if (boot_instance_index)
11024 		enable_instances();
11025 }
11026 
11027 __init static void clear_boot_tracer(void)
11028 {
11029 	/*
11030 	 * The default tracer at boot buffer is an init section.
11031 	 * This function is called in lateinit. If we did not
11032 	 * find the boot tracer, then clear it out, to prevent
11033 	 * later registration from accessing the buffer that is
11034 	 * about to be freed.
11035 	 */
11036 	if (!default_bootup_tracer)
11037 		return;
11038 
11039 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
11040 	       default_bootup_tracer);
11041 	default_bootup_tracer = NULL;
11042 }
11043 
11044 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
11045 __init static void tracing_set_default_clock(void)
11046 {
11047 	/* sched_clock_stable() is determined in late_initcall */
11048 	if (!trace_boot_clock && !sched_clock_stable()) {
11049 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
11050 			pr_warn("Can not set tracing clock due to lockdown\n");
11051 			return;
11052 		}
11053 
11054 		printk(KERN_WARNING
11055 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
11056 		       "If you want to keep using the local clock, then add:\n"
11057 		       "  \"trace_clock=local\"\n"
11058 		       "on the kernel command line\n");
11059 		tracing_set_clock(&global_trace, "global");
11060 	}
11061 }
11062 #else
11063 static inline void tracing_set_default_clock(void) { }
11064 #endif
11065 
11066 __init static int late_trace_init(void)
11067 {
11068 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
11069 		static_key_disable(&tracepoint_printk_key.key);
11070 		tracepoint_printk = 0;
11071 	}
11072 
11073 	tracing_set_default_clock();
11074 	clear_boot_tracer();
11075 	return 0;
11076 }
11077 
11078 late_initcall_sync(late_trace_init);
11079