xref: /linux-6.15/kernel/trace/trace.c (revision b6533482)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <[email protected]>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <linux/utsname.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/cleanup.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52 
53 #include <asm/setup.h> /* COMMAND_LINE_SIZE and kaslr_offset() */
54 
55 #include "trace.h"
56 #include "trace_output.h"
57 
58 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 /*
60  * We need to change this state when a selftest is running.
61  * A selftest will lurk into the ring-buffer to count the
62  * entries inserted during the selftest although some concurrent
63  * insertions into the ring-buffer such as trace_printk could occurred
64  * at the same time, giving false positive or negative results.
65  */
66 static bool __read_mostly tracing_selftest_running;
67 
68 /*
69  * If boot-time tracing including tracers/events via kernel cmdline
70  * is running, we do not want to run SELFTEST.
71  */
72 bool __read_mostly tracing_selftest_disabled;
73 
74 void __init disable_tracing_selftest(const char *reason)
75 {
76 	if (!tracing_selftest_disabled) {
77 		tracing_selftest_disabled = true;
78 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
79 	}
80 }
81 #else
82 #define tracing_selftest_running	0
83 #define tracing_selftest_disabled	0
84 #endif
85 
86 /* Pipe tracepoints to printk */
87 static struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
94 	{ }
95 };
96 
97 static int
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
99 {
100 	return 0;
101 }
102 
103 /*
104  * To prevent the comm cache from being overwritten when no
105  * tracing is active, only save the comm when a trace event
106  * occurred.
107  */
108 DEFINE_PER_CPU(bool, trace_taskinfo_save);
109 
110 /*
111  * Kill all tracing for good (never come back).
112  * It is initialized to 1 but will turn to zero if the initialization
113  * of the tracer is successful. But that is the only place that sets
114  * this back to zero.
115  */
116 static int tracing_disabled = 1;
117 
118 cpumask_var_t __read_mostly	tracing_buffer_mask;
119 
120 /*
121  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122  *
123  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124  * is set, then ftrace_dump is called. This will output the contents
125  * of the ftrace buffers to the console.  This is very useful for
126  * capturing traces that lead to crashes and outputing it to a
127  * serial console.
128  *
129  * It is default off, but you can enable it with either specifying
130  * "ftrace_dump_on_oops" in the kernel command line, or setting
131  * /proc/sys/kernel/ftrace_dump_on_oops
132  * Set 1 if you want to dump buffers of all CPUs
133  * Set 2 if you want to dump the buffer of the CPU that triggered oops
134  * Set instance name if you want to dump the specific trace instance
135  * Multiple instance dump is also supported, and instances are seperated
136  * by commas.
137  */
138 /* Set to string format zero to disable by default */
139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
140 
141 /* When set, tracing will stop when a WARN*() is hit */
142 int __disable_trace_on_warning;
143 
144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
145 /* Map of enums to their values, for "eval_map" file */
146 struct trace_eval_map_head {
147 	struct module			*mod;
148 	unsigned long			length;
149 };
150 
151 union trace_eval_map_item;
152 
153 struct trace_eval_map_tail {
154 	/*
155 	 * "end" is first and points to NULL as it must be different
156 	 * than "mod" or "eval_string"
157 	 */
158 	union trace_eval_map_item	*next;
159 	const char			*end;	/* points to NULL */
160 };
161 
162 static DEFINE_MUTEX(trace_eval_mutex);
163 
164 /*
165  * The trace_eval_maps are saved in an array with two extra elements,
166  * one at the beginning, and one at the end. The beginning item contains
167  * the count of the saved maps (head.length), and the module they
168  * belong to if not built in (head.mod). The ending item contains a
169  * pointer to the next array of saved eval_map items.
170  */
171 union trace_eval_map_item {
172 	struct trace_eval_map		map;
173 	struct trace_eval_map_head	head;
174 	struct trace_eval_map_tail	tail;
175 };
176 
177 static union trace_eval_map_item *trace_eval_maps;
178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
179 
180 int tracing_set_tracer(struct trace_array *tr, const char *buf);
181 static void ftrace_trace_userstack(struct trace_array *tr,
182 				   struct trace_buffer *buffer,
183 				   unsigned int trace_ctx);
184 
185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
186 static char *default_bootup_tracer;
187 
188 static bool allocate_snapshot;
189 static bool snapshot_at_boot;
190 
191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_instance_index;
193 
194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
195 static int boot_snapshot_index;
196 
197 static int __init set_cmdline_ftrace(char *str)
198 {
199 	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
200 	default_bootup_tracer = bootup_tracer_buf;
201 	/* We are using ftrace early, expand it */
202 	trace_set_ring_buffer_expanded(NULL);
203 	return 1;
204 }
205 __setup("ftrace=", set_cmdline_ftrace);
206 
207 int ftrace_dump_on_oops_enabled(void)
208 {
209 	if (!strcmp("0", ftrace_dump_on_oops))
210 		return 0;
211 	else
212 		return 1;
213 }
214 
215 static int __init set_ftrace_dump_on_oops(char *str)
216 {
217 	if (!*str) {
218 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
219 		return 1;
220 	}
221 
222 	if (*str == ',') {
223 		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
224 		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
225 		return 1;
226 	}
227 
228 	if (*str++ == '=') {
229 		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
230 		return 1;
231 	}
232 
233 	return 0;
234 }
235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
236 
237 static int __init stop_trace_on_warning(char *str)
238 {
239 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
240 		__disable_trace_on_warning = 1;
241 	return 1;
242 }
243 __setup("traceoff_on_warning", stop_trace_on_warning);
244 
245 static int __init boot_alloc_snapshot(char *str)
246 {
247 	char *slot = boot_snapshot_info + boot_snapshot_index;
248 	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
249 	int ret;
250 
251 	if (str[0] == '=') {
252 		str++;
253 		if (strlen(str) >= left)
254 			return -1;
255 
256 		ret = snprintf(slot, left, "%s\t", str);
257 		boot_snapshot_index += ret;
258 	} else {
259 		allocate_snapshot = true;
260 		/* We also need the main ring buffer expanded */
261 		trace_set_ring_buffer_expanded(NULL);
262 	}
263 	return 1;
264 }
265 __setup("alloc_snapshot", boot_alloc_snapshot);
266 
267 
268 static int __init boot_snapshot(char *str)
269 {
270 	snapshot_at_boot = true;
271 	boot_alloc_snapshot(str);
272 	return 1;
273 }
274 __setup("ftrace_boot_snapshot", boot_snapshot);
275 
276 
277 static int __init boot_instance(char *str)
278 {
279 	char *slot = boot_instance_info + boot_instance_index;
280 	int left = sizeof(boot_instance_info) - boot_instance_index;
281 	int ret;
282 
283 	if (strlen(str) >= left)
284 		return -1;
285 
286 	ret = snprintf(slot, left, "%s\t", str);
287 	boot_instance_index += ret;
288 
289 	return 1;
290 }
291 __setup("trace_instance=", boot_instance);
292 
293 
294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
295 
296 static int __init set_trace_boot_options(char *str)
297 {
298 	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
299 	return 1;
300 }
301 __setup("trace_options=", set_trace_boot_options);
302 
303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
304 static char *trace_boot_clock __initdata;
305 
306 static int __init set_trace_boot_clock(char *str)
307 {
308 	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
309 	trace_boot_clock = trace_boot_clock_buf;
310 	return 1;
311 }
312 __setup("trace_clock=", set_trace_boot_clock);
313 
314 static int __init set_tracepoint_printk(char *str)
315 {
316 	/* Ignore the "tp_printk_stop_on_boot" param */
317 	if (*str == '_')
318 		return 0;
319 
320 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
321 		tracepoint_printk = 1;
322 	return 1;
323 }
324 __setup("tp_printk", set_tracepoint_printk);
325 
326 static int __init set_tracepoint_printk_stop(char *str)
327 {
328 	tracepoint_printk_stop_on_boot = true;
329 	return 1;
330 }
331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
332 
333 unsigned long long ns2usecs(u64 nsec)
334 {
335 	nsec += 500;
336 	do_div(nsec, 1000);
337 	return nsec;
338 }
339 
340 static void
341 trace_process_export(struct trace_export *export,
342 	       struct ring_buffer_event *event, int flag)
343 {
344 	struct trace_entry *entry;
345 	unsigned int size = 0;
346 
347 	if (export->flags & flag) {
348 		entry = ring_buffer_event_data(event);
349 		size = ring_buffer_event_length(event);
350 		export->write(export, entry, size);
351 	}
352 }
353 
354 static DEFINE_MUTEX(ftrace_export_lock);
355 
356 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
357 
358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
361 
362 static inline void ftrace_exports_enable(struct trace_export *export)
363 {
364 	if (export->flags & TRACE_EXPORT_FUNCTION)
365 		static_branch_inc(&trace_function_exports_enabled);
366 
367 	if (export->flags & TRACE_EXPORT_EVENT)
368 		static_branch_inc(&trace_event_exports_enabled);
369 
370 	if (export->flags & TRACE_EXPORT_MARKER)
371 		static_branch_inc(&trace_marker_exports_enabled);
372 }
373 
374 static inline void ftrace_exports_disable(struct trace_export *export)
375 {
376 	if (export->flags & TRACE_EXPORT_FUNCTION)
377 		static_branch_dec(&trace_function_exports_enabled);
378 
379 	if (export->flags & TRACE_EXPORT_EVENT)
380 		static_branch_dec(&trace_event_exports_enabled);
381 
382 	if (export->flags & TRACE_EXPORT_MARKER)
383 		static_branch_dec(&trace_marker_exports_enabled);
384 }
385 
386 static void ftrace_exports(struct ring_buffer_event *event, int flag)
387 {
388 	struct trace_export *export;
389 
390 	preempt_disable_notrace();
391 
392 	export = rcu_dereference_raw_check(ftrace_exports_list);
393 	while (export) {
394 		trace_process_export(export, event, flag);
395 		export = rcu_dereference_raw_check(export->next);
396 	}
397 
398 	preempt_enable_notrace();
399 }
400 
401 static inline void
402 add_trace_export(struct trace_export **list, struct trace_export *export)
403 {
404 	rcu_assign_pointer(export->next, *list);
405 	/*
406 	 * We are entering export into the list but another
407 	 * CPU might be walking that list. We need to make sure
408 	 * the export->next pointer is valid before another CPU sees
409 	 * the export pointer included into the list.
410 	 */
411 	rcu_assign_pointer(*list, export);
412 }
413 
414 static inline int
415 rm_trace_export(struct trace_export **list, struct trace_export *export)
416 {
417 	struct trace_export **p;
418 
419 	for (p = list; *p != NULL; p = &(*p)->next)
420 		if (*p == export)
421 			break;
422 
423 	if (*p != export)
424 		return -1;
425 
426 	rcu_assign_pointer(*p, (*p)->next);
427 
428 	return 0;
429 }
430 
431 static inline void
432 add_ftrace_export(struct trace_export **list, struct trace_export *export)
433 {
434 	ftrace_exports_enable(export);
435 
436 	add_trace_export(list, export);
437 }
438 
439 static inline int
440 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
441 {
442 	int ret;
443 
444 	ret = rm_trace_export(list, export);
445 	ftrace_exports_disable(export);
446 
447 	return ret;
448 }
449 
450 int register_ftrace_export(struct trace_export *export)
451 {
452 	if (WARN_ON_ONCE(!export->write))
453 		return -1;
454 
455 	mutex_lock(&ftrace_export_lock);
456 
457 	add_ftrace_export(&ftrace_exports_list, export);
458 
459 	mutex_unlock(&ftrace_export_lock);
460 
461 	return 0;
462 }
463 EXPORT_SYMBOL_GPL(register_ftrace_export);
464 
465 int unregister_ftrace_export(struct trace_export *export)
466 {
467 	int ret;
468 
469 	mutex_lock(&ftrace_export_lock);
470 
471 	ret = rm_ftrace_export(&ftrace_exports_list, export);
472 
473 	mutex_unlock(&ftrace_export_lock);
474 
475 	return ret;
476 }
477 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
478 
479 /* trace_flags holds trace_options default values */
480 #define TRACE_DEFAULT_FLAGS						\
481 	(FUNCTION_DEFAULT_FLAGS |					\
482 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
483 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
484 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
485 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
486 	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
487 
488 /* trace_options that are only supported by global_trace */
489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
490 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
491 
492 /* trace_flags that are default zero for instances */
493 #define ZEROED_TRACE_FLAGS \
494 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
495 
496 /*
497  * The global_trace is the descriptor that holds the top-level tracing
498  * buffers for the live tracing.
499  */
500 static struct trace_array global_trace = {
501 	.trace_flags = TRACE_DEFAULT_FLAGS,
502 };
503 
504 static struct trace_array *printk_trace = &global_trace;
505 
506 static __always_inline bool printk_binsafe(struct trace_array *tr)
507 {
508 	/*
509 	 * The binary format of traceprintk can cause a crash if used
510 	 * by a buffer from another boot. Force the use of the
511 	 * non binary version of trace_printk if the trace_printk
512 	 * buffer is a boot mapped ring buffer.
513 	 */
514 	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
515 }
516 
517 static void update_printk_trace(struct trace_array *tr)
518 {
519 	if (printk_trace == tr)
520 		return;
521 
522 	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
523 	printk_trace = tr;
524 	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
525 }
526 
527 void trace_set_ring_buffer_expanded(struct trace_array *tr)
528 {
529 	if (!tr)
530 		tr = &global_trace;
531 	tr->ring_buffer_expanded = true;
532 }
533 
534 LIST_HEAD(ftrace_trace_arrays);
535 
536 int trace_array_get(struct trace_array *this_tr)
537 {
538 	struct trace_array *tr;
539 
540 	guard(mutex)(&trace_types_lock);
541 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
542 		if (tr == this_tr) {
543 			tr->ref++;
544 			return 0;
545 		}
546 	}
547 
548 	return -ENODEV;
549 }
550 
551 static void __trace_array_put(struct trace_array *this_tr)
552 {
553 	WARN_ON(!this_tr->ref);
554 	this_tr->ref--;
555 }
556 
557 /**
558  * trace_array_put - Decrement the reference counter for this trace array.
559  * @this_tr : pointer to the trace array
560  *
561  * NOTE: Use this when we no longer need the trace array returned by
562  * trace_array_get_by_name(). This ensures the trace array can be later
563  * destroyed.
564  *
565  */
566 void trace_array_put(struct trace_array *this_tr)
567 {
568 	if (!this_tr)
569 		return;
570 
571 	mutex_lock(&trace_types_lock);
572 	__trace_array_put(this_tr);
573 	mutex_unlock(&trace_types_lock);
574 }
575 EXPORT_SYMBOL_GPL(trace_array_put);
576 
577 int tracing_check_open_get_tr(struct trace_array *tr)
578 {
579 	int ret;
580 
581 	ret = security_locked_down(LOCKDOWN_TRACEFS);
582 	if (ret)
583 		return ret;
584 
585 	if (tracing_disabled)
586 		return -ENODEV;
587 
588 	if (tr && trace_array_get(tr) < 0)
589 		return -ENODEV;
590 
591 	return 0;
592 }
593 
594 /**
595  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
596  * @filtered_pids: The list of pids to check
597  * @search_pid: The PID to find in @filtered_pids
598  *
599  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
600  */
601 bool
602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
603 {
604 	return trace_pid_list_is_set(filtered_pids, search_pid);
605 }
606 
607 /**
608  * trace_ignore_this_task - should a task be ignored for tracing
609  * @filtered_pids: The list of pids to check
610  * @filtered_no_pids: The list of pids not to be traced
611  * @task: The task that should be ignored if not filtered
612  *
613  * Checks if @task should be traced or not from @filtered_pids.
614  * Returns true if @task should *NOT* be traced.
615  * Returns false if @task should be traced.
616  */
617 bool
618 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
619 		       struct trace_pid_list *filtered_no_pids,
620 		       struct task_struct *task)
621 {
622 	/*
623 	 * If filtered_no_pids is not empty, and the task's pid is listed
624 	 * in filtered_no_pids, then return true.
625 	 * Otherwise, if filtered_pids is empty, that means we can
626 	 * trace all tasks. If it has content, then only trace pids
627 	 * within filtered_pids.
628 	 */
629 
630 	return (filtered_pids &&
631 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
632 		(filtered_no_pids &&
633 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
634 }
635 
636 /**
637  * trace_filter_add_remove_task - Add or remove a task from a pid_list
638  * @pid_list: The list to modify
639  * @self: The current task for fork or NULL for exit
640  * @task: The task to add or remove
641  *
642  * If adding a task, if @self is defined, the task is only added if @self
643  * is also included in @pid_list. This happens on fork and tasks should
644  * only be added when the parent is listed. If @self is NULL, then the
645  * @task pid will be removed from the list, which would happen on exit
646  * of a task.
647  */
648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
649 				  struct task_struct *self,
650 				  struct task_struct *task)
651 {
652 	if (!pid_list)
653 		return;
654 
655 	/* For forks, we only add if the forking task is listed */
656 	if (self) {
657 		if (!trace_find_filtered_pid(pid_list, self->pid))
658 			return;
659 	}
660 
661 	/* "self" is set for forks, and NULL for exits */
662 	if (self)
663 		trace_pid_list_set(pid_list, task->pid);
664 	else
665 		trace_pid_list_clear(pid_list, task->pid);
666 }
667 
668 /**
669  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
670  * @pid_list: The pid list to show
671  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
672  * @pos: The position of the file
673  *
674  * This is used by the seq_file "next" operation to iterate the pids
675  * listed in a trace_pid_list structure.
676  *
677  * Returns the pid+1 as we want to display pid of zero, but NULL would
678  * stop the iteration.
679  */
680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
681 {
682 	long pid = (unsigned long)v;
683 	unsigned int next;
684 
685 	(*pos)++;
686 
687 	/* pid already is +1 of the actual previous bit */
688 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
689 		return NULL;
690 
691 	pid = next;
692 
693 	/* Return pid + 1 to allow zero to be represented */
694 	return (void *)(pid + 1);
695 }
696 
697 /**
698  * trace_pid_start - Used for seq_file to start reading pid lists
699  * @pid_list: The pid list to show
700  * @pos: The position of the file
701  *
702  * This is used by seq_file "start" operation to start the iteration
703  * of listing pids.
704  *
705  * Returns the pid+1 as we want to display pid of zero, but NULL would
706  * stop the iteration.
707  */
708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
709 {
710 	unsigned long pid;
711 	unsigned int first;
712 	loff_t l = 0;
713 
714 	if (trace_pid_list_first(pid_list, &first) < 0)
715 		return NULL;
716 
717 	pid = first;
718 
719 	/* Return pid + 1 so that zero can be the exit value */
720 	for (pid++; pid && l < *pos;
721 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
722 		;
723 	return (void *)pid;
724 }
725 
726 /**
727  * trace_pid_show - show the current pid in seq_file processing
728  * @m: The seq_file structure to write into
729  * @v: A void pointer of the pid (+1) value to display
730  *
731  * Can be directly used by seq_file operations to display the current
732  * pid value.
733  */
734 int trace_pid_show(struct seq_file *m, void *v)
735 {
736 	unsigned long pid = (unsigned long)v - 1;
737 
738 	seq_printf(m, "%lu\n", pid);
739 	return 0;
740 }
741 
742 /* 128 should be much more than enough */
743 #define PID_BUF_SIZE		127
744 
745 int trace_pid_write(struct trace_pid_list *filtered_pids,
746 		    struct trace_pid_list **new_pid_list,
747 		    const char __user *ubuf, size_t cnt)
748 {
749 	struct trace_pid_list *pid_list;
750 	struct trace_parser parser;
751 	unsigned long val;
752 	int nr_pids = 0;
753 	ssize_t read = 0;
754 	ssize_t ret;
755 	loff_t pos;
756 	pid_t pid;
757 
758 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
759 		return -ENOMEM;
760 
761 	/*
762 	 * Always recreate a new array. The write is an all or nothing
763 	 * operation. Always create a new array when adding new pids by
764 	 * the user. If the operation fails, then the current list is
765 	 * not modified.
766 	 */
767 	pid_list = trace_pid_list_alloc();
768 	if (!pid_list) {
769 		trace_parser_put(&parser);
770 		return -ENOMEM;
771 	}
772 
773 	if (filtered_pids) {
774 		/* copy the current bits to the new max */
775 		ret = trace_pid_list_first(filtered_pids, &pid);
776 		while (!ret) {
777 			trace_pid_list_set(pid_list, pid);
778 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
779 			nr_pids++;
780 		}
781 	}
782 
783 	ret = 0;
784 	while (cnt > 0) {
785 
786 		pos = 0;
787 
788 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
789 		if (ret < 0)
790 			break;
791 
792 		read += ret;
793 		ubuf += ret;
794 		cnt -= ret;
795 
796 		if (!trace_parser_loaded(&parser))
797 			break;
798 
799 		ret = -EINVAL;
800 		if (kstrtoul(parser.buffer, 0, &val))
801 			break;
802 
803 		pid = (pid_t)val;
804 
805 		if (trace_pid_list_set(pid_list, pid) < 0) {
806 			ret = -1;
807 			break;
808 		}
809 		nr_pids++;
810 
811 		trace_parser_clear(&parser);
812 		ret = 0;
813 	}
814 	trace_parser_put(&parser);
815 
816 	if (ret < 0) {
817 		trace_pid_list_free(pid_list);
818 		return ret;
819 	}
820 
821 	if (!nr_pids) {
822 		/* Cleared the list of pids */
823 		trace_pid_list_free(pid_list);
824 		pid_list = NULL;
825 	}
826 
827 	*new_pid_list = pid_list;
828 
829 	return read;
830 }
831 
832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
833 {
834 	u64 ts;
835 
836 	/* Early boot up does not have a buffer yet */
837 	if (!buf->buffer)
838 		return trace_clock_local();
839 
840 	ts = ring_buffer_time_stamp(buf->buffer);
841 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
842 
843 	return ts;
844 }
845 
846 u64 ftrace_now(int cpu)
847 {
848 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
849 }
850 
851 /**
852  * tracing_is_enabled - Show if global_trace has been enabled
853  *
854  * Shows if the global trace has been enabled or not. It uses the
855  * mirror flag "buffer_disabled" to be used in fast paths such as for
856  * the irqsoff tracer. But it may be inaccurate due to races. If you
857  * need to know the accurate state, use tracing_is_on() which is a little
858  * slower, but accurate.
859  */
860 int tracing_is_enabled(void)
861 {
862 	/*
863 	 * For quick access (irqsoff uses this in fast path), just
864 	 * return the mirror variable of the state of the ring buffer.
865 	 * It's a little racy, but we don't really care.
866 	 */
867 	smp_rmb();
868 	return !global_trace.buffer_disabled;
869 }
870 
871 /*
872  * trace_buf_size is the size in bytes that is allocated
873  * for a buffer. Note, the number of bytes is always rounded
874  * to page size.
875  *
876  * This number is purposely set to a low number of 16384.
877  * If the dump on oops happens, it will be much appreciated
878  * to not have to wait for all that output. Anyway this can be
879  * boot time and run time configurable.
880  */
881 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
882 
883 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
884 
885 /* trace_types holds a link list of available tracers. */
886 static struct tracer		*trace_types __read_mostly;
887 
888 /*
889  * trace_types_lock is used to protect the trace_types list.
890  */
891 DEFINE_MUTEX(trace_types_lock);
892 
893 /*
894  * serialize the access of the ring buffer
895  *
896  * ring buffer serializes readers, but it is low level protection.
897  * The validity of the events (which returns by ring_buffer_peek() ..etc)
898  * are not protected by ring buffer.
899  *
900  * The content of events may become garbage if we allow other process consumes
901  * these events concurrently:
902  *   A) the page of the consumed events may become a normal page
903  *      (not reader page) in ring buffer, and this page will be rewritten
904  *      by events producer.
905  *   B) The page of the consumed events may become a page for splice_read,
906  *      and this page will be returned to system.
907  *
908  * These primitives allow multi process access to different cpu ring buffer
909  * concurrently.
910  *
911  * These primitives don't distinguish read-only and read-consume access.
912  * Multi read-only access are also serialized.
913  */
914 
915 #ifdef CONFIG_SMP
916 static DECLARE_RWSEM(all_cpu_access_lock);
917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
918 
919 static inline void trace_access_lock(int cpu)
920 {
921 	if (cpu == RING_BUFFER_ALL_CPUS) {
922 		/* gain it for accessing the whole ring buffer. */
923 		down_write(&all_cpu_access_lock);
924 	} else {
925 		/* gain it for accessing a cpu ring buffer. */
926 
927 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
928 		down_read(&all_cpu_access_lock);
929 
930 		/* Secondly block other access to this @cpu ring buffer. */
931 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
932 	}
933 }
934 
935 static inline void trace_access_unlock(int cpu)
936 {
937 	if (cpu == RING_BUFFER_ALL_CPUS) {
938 		up_write(&all_cpu_access_lock);
939 	} else {
940 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
941 		up_read(&all_cpu_access_lock);
942 	}
943 }
944 
945 static inline void trace_access_lock_init(void)
946 {
947 	int cpu;
948 
949 	for_each_possible_cpu(cpu)
950 		mutex_init(&per_cpu(cpu_access_lock, cpu));
951 }
952 
953 #else
954 
955 static DEFINE_MUTEX(access_lock);
956 
957 static inline void trace_access_lock(int cpu)
958 {
959 	(void)cpu;
960 	mutex_lock(&access_lock);
961 }
962 
963 static inline void trace_access_unlock(int cpu)
964 {
965 	(void)cpu;
966 	mutex_unlock(&access_lock);
967 }
968 
969 static inline void trace_access_lock_init(void)
970 {
971 }
972 
973 #endif
974 
975 #ifdef CONFIG_STACKTRACE
976 static void __ftrace_trace_stack(struct trace_array *tr,
977 				 struct trace_buffer *buffer,
978 				 unsigned int trace_ctx,
979 				 int skip, struct pt_regs *regs);
980 static inline void ftrace_trace_stack(struct trace_array *tr,
981 				      struct trace_buffer *buffer,
982 				      unsigned int trace_ctx,
983 				      int skip, struct pt_regs *regs);
984 
985 #else
986 static inline void __ftrace_trace_stack(struct trace_array *tr,
987 					struct trace_buffer *buffer,
988 					unsigned int trace_ctx,
989 					int skip, struct pt_regs *regs)
990 {
991 }
992 static inline void ftrace_trace_stack(struct trace_array *tr,
993 				      struct trace_buffer *buffer,
994 				      unsigned long trace_ctx,
995 				      int skip, struct pt_regs *regs)
996 {
997 }
998 
999 #endif
1000 
1001 static __always_inline void
1002 trace_event_setup(struct ring_buffer_event *event,
1003 		  int type, unsigned int trace_ctx)
1004 {
1005 	struct trace_entry *ent = ring_buffer_event_data(event);
1006 
1007 	tracing_generic_entry_update(ent, type, trace_ctx);
1008 }
1009 
1010 static __always_inline struct ring_buffer_event *
1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
1012 			  int type,
1013 			  unsigned long len,
1014 			  unsigned int trace_ctx)
1015 {
1016 	struct ring_buffer_event *event;
1017 
1018 	event = ring_buffer_lock_reserve(buffer, len);
1019 	if (event != NULL)
1020 		trace_event_setup(event, type, trace_ctx);
1021 
1022 	return event;
1023 }
1024 
1025 void tracer_tracing_on(struct trace_array *tr)
1026 {
1027 	if (tr->array_buffer.buffer)
1028 		ring_buffer_record_on(tr->array_buffer.buffer);
1029 	/*
1030 	 * This flag is looked at when buffers haven't been allocated
1031 	 * yet, or by some tracers (like irqsoff), that just want to
1032 	 * know if the ring buffer has been disabled, but it can handle
1033 	 * races of where it gets disabled but we still do a record.
1034 	 * As the check is in the fast path of the tracers, it is more
1035 	 * important to be fast than accurate.
1036 	 */
1037 	tr->buffer_disabled = 0;
1038 	/* Make the flag seen by readers */
1039 	smp_wmb();
1040 }
1041 
1042 /**
1043  * tracing_on - enable tracing buffers
1044  *
1045  * This function enables tracing buffers that may have been
1046  * disabled with tracing_off.
1047  */
1048 void tracing_on(void)
1049 {
1050 	tracer_tracing_on(&global_trace);
1051 }
1052 EXPORT_SYMBOL_GPL(tracing_on);
1053 
1054 
1055 static __always_inline void
1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1057 {
1058 	__this_cpu_write(trace_taskinfo_save, true);
1059 
1060 	/* If this is the temp buffer, we need to commit fully */
1061 	if (this_cpu_read(trace_buffered_event) == event) {
1062 		/* Length is in event->array[0] */
1063 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
1064 		/* Release the temp buffer */
1065 		this_cpu_dec(trace_buffered_event_cnt);
1066 		/* ring_buffer_unlock_commit() enables preemption */
1067 		preempt_enable_notrace();
1068 	} else
1069 		ring_buffer_unlock_commit(buffer);
1070 }
1071 
1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1073 		       const char *str, int size)
1074 {
1075 	struct ring_buffer_event *event;
1076 	struct trace_buffer *buffer;
1077 	struct print_entry *entry;
1078 	unsigned int trace_ctx;
1079 	int alloc;
1080 
1081 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1082 		return 0;
1083 
1084 	if (unlikely(tracing_selftest_running && tr == &global_trace))
1085 		return 0;
1086 
1087 	if (unlikely(tracing_disabled))
1088 		return 0;
1089 
1090 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1091 
1092 	trace_ctx = tracing_gen_ctx();
1093 	buffer = tr->array_buffer.buffer;
1094 	ring_buffer_nest_start(buffer);
1095 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1096 					    trace_ctx);
1097 	if (!event) {
1098 		size = 0;
1099 		goto out;
1100 	}
1101 
1102 	entry = ring_buffer_event_data(event);
1103 	entry->ip = ip;
1104 
1105 	memcpy(&entry->buf, str, size);
1106 
1107 	/* Add a newline if necessary */
1108 	if (entry->buf[size - 1] != '\n') {
1109 		entry->buf[size] = '\n';
1110 		entry->buf[size + 1] = '\0';
1111 	} else
1112 		entry->buf[size] = '\0';
1113 
1114 	__buffer_unlock_commit(buffer, event);
1115 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1116  out:
1117 	ring_buffer_nest_end(buffer);
1118 	return size;
1119 }
1120 EXPORT_SYMBOL_GPL(__trace_array_puts);
1121 
1122 /**
1123  * __trace_puts - write a constant string into the trace buffer.
1124  * @ip:	   The address of the caller
1125  * @str:   The constant string to write
1126  * @size:  The size of the string.
1127  */
1128 int __trace_puts(unsigned long ip, const char *str, int size)
1129 {
1130 	return __trace_array_puts(printk_trace, ip, str, size);
1131 }
1132 EXPORT_SYMBOL_GPL(__trace_puts);
1133 
1134 /**
1135  * __trace_bputs - write the pointer to a constant string into trace buffer
1136  * @ip:	   The address of the caller
1137  * @str:   The constant string to write to the buffer to
1138  */
1139 int __trace_bputs(unsigned long ip, const char *str)
1140 {
1141 	struct trace_array *tr = READ_ONCE(printk_trace);
1142 	struct ring_buffer_event *event;
1143 	struct trace_buffer *buffer;
1144 	struct bputs_entry *entry;
1145 	unsigned int trace_ctx;
1146 	int size = sizeof(struct bputs_entry);
1147 	int ret = 0;
1148 
1149 	if (!printk_binsafe(tr))
1150 		return __trace_puts(ip, str, strlen(str));
1151 
1152 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1153 		return 0;
1154 
1155 	if (unlikely(tracing_selftest_running || tracing_disabled))
1156 		return 0;
1157 
1158 	trace_ctx = tracing_gen_ctx();
1159 	buffer = tr->array_buffer.buffer;
1160 
1161 	ring_buffer_nest_start(buffer);
1162 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1163 					    trace_ctx);
1164 	if (!event)
1165 		goto out;
1166 
1167 	entry = ring_buffer_event_data(event);
1168 	entry->ip			= ip;
1169 	entry->str			= str;
1170 
1171 	__buffer_unlock_commit(buffer, event);
1172 	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1173 
1174 	ret = 1;
1175  out:
1176 	ring_buffer_nest_end(buffer);
1177 	return ret;
1178 }
1179 EXPORT_SYMBOL_GPL(__trace_bputs);
1180 
1181 #ifdef CONFIG_TRACER_SNAPSHOT
1182 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1183 					   void *cond_data)
1184 {
1185 	struct tracer *tracer = tr->current_trace;
1186 	unsigned long flags;
1187 
1188 	if (in_nmi()) {
1189 		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1190 		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
1191 		return;
1192 	}
1193 
1194 	if (!tr->allocated_snapshot) {
1195 		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1196 		trace_array_puts(tr, "*** stopping trace here!   ***\n");
1197 		tracer_tracing_off(tr);
1198 		return;
1199 	}
1200 
1201 	/* Note, snapshot can not be used when the tracer uses it */
1202 	if (tracer->use_max_tr) {
1203 		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1204 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1205 		return;
1206 	}
1207 
1208 	if (tr->mapped) {
1209 		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
1210 		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1211 		return;
1212 	}
1213 
1214 	local_irq_save(flags);
1215 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1216 	local_irq_restore(flags);
1217 }
1218 
1219 void tracing_snapshot_instance(struct trace_array *tr)
1220 {
1221 	tracing_snapshot_instance_cond(tr, NULL);
1222 }
1223 
1224 /**
1225  * tracing_snapshot - take a snapshot of the current buffer.
1226  *
1227  * This causes a swap between the snapshot buffer and the current live
1228  * tracing buffer. You can use this to take snapshots of the live
1229  * trace when some condition is triggered, but continue to trace.
1230  *
1231  * Note, make sure to allocate the snapshot with either
1232  * a tracing_snapshot_alloc(), or by doing it manually
1233  * with: echo 1 > /sys/kernel/tracing/snapshot
1234  *
1235  * If the snapshot buffer is not allocated, it will stop tracing.
1236  * Basically making a permanent snapshot.
1237  */
1238 void tracing_snapshot(void)
1239 {
1240 	struct trace_array *tr = &global_trace;
1241 
1242 	tracing_snapshot_instance(tr);
1243 }
1244 EXPORT_SYMBOL_GPL(tracing_snapshot);
1245 
1246 /**
1247  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1248  * @tr:		The tracing instance to snapshot
1249  * @cond_data:	The data to be tested conditionally, and possibly saved
1250  *
1251  * This is the same as tracing_snapshot() except that the snapshot is
1252  * conditional - the snapshot will only happen if the
1253  * cond_snapshot.update() implementation receiving the cond_data
1254  * returns true, which means that the trace array's cond_snapshot
1255  * update() operation used the cond_data to determine whether the
1256  * snapshot should be taken, and if it was, presumably saved it along
1257  * with the snapshot.
1258  */
1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1260 {
1261 	tracing_snapshot_instance_cond(tr, cond_data);
1262 }
1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1264 
1265 /**
1266  * tracing_cond_snapshot_data - get the user data associated with a snapshot
1267  * @tr:		The tracing instance
1268  *
1269  * When the user enables a conditional snapshot using
1270  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1271  * with the snapshot.  This accessor is used to retrieve it.
1272  *
1273  * Should not be called from cond_snapshot.update(), since it takes
1274  * the tr->max_lock lock, which the code calling
1275  * cond_snapshot.update() has already done.
1276  *
1277  * Returns the cond_data associated with the trace array's snapshot.
1278  */
1279 void *tracing_cond_snapshot_data(struct trace_array *tr)
1280 {
1281 	void *cond_data = NULL;
1282 
1283 	local_irq_disable();
1284 	arch_spin_lock(&tr->max_lock);
1285 
1286 	if (tr->cond_snapshot)
1287 		cond_data = tr->cond_snapshot->cond_data;
1288 
1289 	arch_spin_unlock(&tr->max_lock);
1290 	local_irq_enable();
1291 
1292 	return cond_data;
1293 }
1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1295 
1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1297 					struct array_buffer *size_buf, int cpu_id);
1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1299 
1300 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1301 {
1302 	int order;
1303 	int ret;
1304 
1305 	if (!tr->allocated_snapshot) {
1306 
1307 		/* Make the snapshot buffer have the same order as main buffer */
1308 		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
1309 		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		/* allocate spare buffer */
1314 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1315 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		tr->allocated_snapshot = true;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static void free_snapshot(struct trace_array *tr)
1326 {
1327 	/*
1328 	 * We don't free the ring buffer. instead, resize it because
1329 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1330 	 * we want preserve it.
1331 	 */
1332 	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
1333 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1334 	set_buffer_entries(&tr->max_buffer, 1);
1335 	tracing_reset_online_cpus(&tr->max_buffer);
1336 	tr->allocated_snapshot = false;
1337 }
1338 
1339 static int tracing_arm_snapshot_locked(struct trace_array *tr)
1340 {
1341 	int ret;
1342 
1343 	lockdep_assert_held(&trace_types_lock);
1344 
1345 	spin_lock(&tr->snapshot_trigger_lock);
1346 	if (tr->snapshot == UINT_MAX || tr->mapped) {
1347 		spin_unlock(&tr->snapshot_trigger_lock);
1348 		return -EBUSY;
1349 	}
1350 
1351 	tr->snapshot++;
1352 	spin_unlock(&tr->snapshot_trigger_lock);
1353 
1354 	ret = tracing_alloc_snapshot_instance(tr);
1355 	if (ret) {
1356 		spin_lock(&tr->snapshot_trigger_lock);
1357 		tr->snapshot--;
1358 		spin_unlock(&tr->snapshot_trigger_lock);
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 int tracing_arm_snapshot(struct trace_array *tr)
1365 {
1366 	int ret;
1367 
1368 	mutex_lock(&trace_types_lock);
1369 	ret = tracing_arm_snapshot_locked(tr);
1370 	mutex_unlock(&trace_types_lock);
1371 
1372 	return ret;
1373 }
1374 
1375 void tracing_disarm_snapshot(struct trace_array *tr)
1376 {
1377 	spin_lock(&tr->snapshot_trigger_lock);
1378 	if (!WARN_ON(!tr->snapshot))
1379 		tr->snapshot--;
1380 	spin_unlock(&tr->snapshot_trigger_lock);
1381 }
1382 
1383 /**
1384  * tracing_alloc_snapshot - allocate snapshot buffer.
1385  *
1386  * This only allocates the snapshot buffer if it isn't already
1387  * allocated - it doesn't also take a snapshot.
1388  *
1389  * This is meant to be used in cases where the snapshot buffer needs
1390  * to be set up for events that can't sleep but need to be able to
1391  * trigger a snapshot.
1392  */
1393 int tracing_alloc_snapshot(void)
1394 {
1395 	struct trace_array *tr = &global_trace;
1396 	int ret;
1397 
1398 	ret = tracing_alloc_snapshot_instance(tr);
1399 	WARN_ON(ret < 0);
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1404 
1405 /**
1406  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1407  *
1408  * This is similar to tracing_snapshot(), but it will allocate the
1409  * snapshot buffer if it isn't already allocated. Use this only
1410  * where it is safe to sleep, as the allocation may sleep.
1411  *
1412  * This causes a swap between the snapshot buffer and the current live
1413  * tracing buffer. You can use this to take snapshots of the live
1414  * trace when some condition is triggered, but continue to trace.
1415  */
1416 void tracing_snapshot_alloc(void)
1417 {
1418 	int ret;
1419 
1420 	ret = tracing_alloc_snapshot();
1421 	if (ret < 0)
1422 		return;
1423 
1424 	tracing_snapshot();
1425 }
1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1427 
1428 /**
1429  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1430  * @tr:		The tracing instance
1431  * @cond_data:	User data to associate with the snapshot
1432  * @update:	Implementation of the cond_snapshot update function
1433  *
1434  * Check whether the conditional snapshot for the given instance has
1435  * already been enabled, or if the current tracer is already using a
1436  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1437  * save the cond_data and update function inside.
1438  *
1439  * Returns 0 if successful, error otherwise.
1440  */
1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1442 				 cond_update_fn_t update)
1443 {
1444 	struct cond_snapshot *cond_snapshot __free(kfree) =
1445 		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1446 	int ret;
1447 
1448 	if (!cond_snapshot)
1449 		return -ENOMEM;
1450 
1451 	cond_snapshot->cond_data = cond_data;
1452 	cond_snapshot->update = update;
1453 
1454 	guard(mutex)(&trace_types_lock);
1455 
1456 	if (tr->current_trace->use_max_tr)
1457 		return -EBUSY;
1458 
1459 	/*
1460 	 * The cond_snapshot can only change to NULL without the
1461 	 * trace_types_lock. We don't care if we race with it going
1462 	 * to NULL, but we want to make sure that it's not set to
1463 	 * something other than NULL when we get here, which we can
1464 	 * do safely with only holding the trace_types_lock and not
1465 	 * having to take the max_lock.
1466 	 */
1467 	if (tr->cond_snapshot)
1468 		return -EBUSY;
1469 
1470 	ret = tracing_arm_snapshot_locked(tr);
1471 	if (ret)
1472 		return ret;
1473 
1474 	local_irq_disable();
1475 	arch_spin_lock(&tr->max_lock);
1476 	tr->cond_snapshot = no_free_ptr(cond_snapshot);
1477 	arch_spin_unlock(&tr->max_lock);
1478 	local_irq_enable();
1479 
1480 	return 0;
1481 }
1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1483 
1484 /**
1485  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1486  * @tr:		The tracing instance
1487  *
1488  * Check whether the conditional snapshot for the given instance is
1489  * enabled; if so, free the cond_snapshot associated with it,
1490  * otherwise return -EINVAL.
1491  *
1492  * Returns 0 if successful, error otherwise.
1493  */
1494 int tracing_snapshot_cond_disable(struct trace_array *tr)
1495 {
1496 	int ret = 0;
1497 
1498 	local_irq_disable();
1499 	arch_spin_lock(&tr->max_lock);
1500 
1501 	if (!tr->cond_snapshot)
1502 		ret = -EINVAL;
1503 	else {
1504 		kfree(tr->cond_snapshot);
1505 		tr->cond_snapshot = NULL;
1506 	}
1507 
1508 	arch_spin_unlock(&tr->max_lock);
1509 	local_irq_enable();
1510 
1511 	tracing_disarm_snapshot(tr);
1512 
1513 	return ret;
1514 }
1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1516 #else
1517 void tracing_snapshot(void)
1518 {
1519 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1520 }
1521 EXPORT_SYMBOL_GPL(tracing_snapshot);
1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1523 {
1524 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1525 }
1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1527 int tracing_alloc_snapshot(void)
1528 {
1529 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1530 	return -ENODEV;
1531 }
1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1533 void tracing_snapshot_alloc(void)
1534 {
1535 	/* Give warning */
1536 	tracing_snapshot();
1537 }
1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1539 void *tracing_cond_snapshot_data(struct trace_array *tr)
1540 {
1541 	return NULL;
1542 }
1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1545 {
1546 	return -ENODEV;
1547 }
1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1549 int tracing_snapshot_cond_disable(struct trace_array *tr)
1550 {
1551 	return false;
1552 }
1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1554 #define free_snapshot(tr)	do { } while (0)
1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
1556 #endif /* CONFIG_TRACER_SNAPSHOT */
1557 
1558 void tracer_tracing_off(struct trace_array *tr)
1559 {
1560 	if (tr->array_buffer.buffer)
1561 		ring_buffer_record_off(tr->array_buffer.buffer);
1562 	/*
1563 	 * This flag is looked at when buffers haven't been allocated
1564 	 * yet, or by some tracers (like irqsoff), that just want to
1565 	 * know if the ring buffer has been disabled, but it can handle
1566 	 * races of where it gets disabled but we still do a record.
1567 	 * As the check is in the fast path of the tracers, it is more
1568 	 * important to be fast than accurate.
1569 	 */
1570 	tr->buffer_disabled = 1;
1571 	/* Make the flag seen by readers */
1572 	smp_wmb();
1573 }
1574 
1575 /**
1576  * tracing_off - turn off tracing buffers
1577  *
1578  * This function stops the tracing buffers from recording data.
1579  * It does not disable any overhead the tracers themselves may
1580  * be causing. This function simply causes all recording to
1581  * the ring buffers to fail.
1582  */
1583 void tracing_off(void)
1584 {
1585 	tracer_tracing_off(&global_trace);
1586 }
1587 EXPORT_SYMBOL_GPL(tracing_off);
1588 
1589 void disable_trace_on_warning(void)
1590 {
1591 	if (__disable_trace_on_warning) {
1592 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1593 			"Disabling tracing due to warning\n");
1594 		tracing_off();
1595 	}
1596 }
1597 
1598 /**
1599  * tracer_tracing_is_on - show real state of ring buffer enabled
1600  * @tr : the trace array to know if ring buffer is enabled
1601  *
1602  * Shows real state of the ring buffer if it is enabled or not.
1603  */
1604 bool tracer_tracing_is_on(struct trace_array *tr)
1605 {
1606 	if (tr->array_buffer.buffer)
1607 		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
1608 	return !tr->buffer_disabled;
1609 }
1610 
1611 /**
1612  * tracing_is_on - show state of ring buffers enabled
1613  */
1614 int tracing_is_on(void)
1615 {
1616 	return tracer_tracing_is_on(&global_trace);
1617 }
1618 EXPORT_SYMBOL_GPL(tracing_is_on);
1619 
1620 static int __init set_buf_size(char *str)
1621 {
1622 	unsigned long buf_size;
1623 
1624 	if (!str)
1625 		return 0;
1626 	buf_size = memparse(str, &str);
1627 	/*
1628 	 * nr_entries can not be zero and the startup
1629 	 * tests require some buffer space. Therefore
1630 	 * ensure we have at least 4096 bytes of buffer.
1631 	 */
1632 	trace_buf_size = max(4096UL, buf_size);
1633 	return 1;
1634 }
1635 __setup("trace_buf_size=", set_buf_size);
1636 
1637 static int __init set_tracing_thresh(char *str)
1638 {
1639 	unsigned long threshold;
1640 	int ret;
1641 
1642 	if (!str)
1643 		return 0;
1644 	ret = kstrtoul(str, 0, &threshold);
1645 	if (ret < 0)
1646 		return 0;
1647 	tracing_thresh = threshold * 1000;
1648 	return 1;
1649 }
1650 __setup("tracing_thresh=", set_tracing_thresh);
1651 
1652 unsigned long nsecs_to_usecs(unsigned long nsecs)
1653 {
1654 	return nsecs / 1000;
1655 }
1656 
1657 /*
1658  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1659  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1660  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1661  * of strings in the order that the evals (enum) were defined.
1662  */
1663 #undef C
1664 #define C(a, b) b
1665 
1666 /* These must match the bit positions in trace_iterator_flags */
1667 static const char *trace_options[] = {
1668 	TRACE_FLAGS
1669 	NULL
1670 };
1671 
1672 static struct {
1673 	u64 (*func)(void);
1674 	const char *name;
1675 	int in_ns;		/* is this clock in nanoseconds? */
1676 } trace_clocks[] = {
1677 	{ trace_clock_local,		"local",	1 },
1678 	{ trace_clock_global,		"global",	1 },
1679 	{ trace_clock_counter,		"counter",	0 },
1680 	{ trace_clock_jiffies,		"uptime",	0 },
1681 	{ trace_clock,			"perf",		1 },
1682 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1683 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1684 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1685 	{ ktime_get_tai_fast_ns,	"tai",		1 },
1686 	ARCH_TRACE_CLOCKS
1687 };
1688 
1689 bool trace_clock_in_ns(struct trace_array *tr)
1690 {
1691 	if (trace_clocks[tr->clock_id].in_ns)
1692 		return true;
1693 
1694 	return false;
1695 }
1696 
1697 /*
1698  * trace_parser_get_init - gets the buffer for trace parser
1699  */
1700 int trace_parser_get_init(struct trace_parser *parser, int size)
1701 {
1702 	memset(parser, 0, sizeof(*parser));
1703 
1704 	parser->buffer = kmalloc(size, GFP_KERNEL);
1705 	if (!parser->buffer)
1706 		return 1;
1707 
1708 	parser->size = size;
1709 	return 0;
1710 }
1711 
1712 /*
1713  * trace_parser_put - frees the buffer for trace parser
1714  */
1715 void trace_parser_put(struct trace_parser *parser)
1716 {
1717 	kfree(parser->buffer);
1718 	parser->buffer = NULL;
1719 }
1720 
1721 /*
1722  * trace_get_user - reads the user input string separated by  space
1723  * (matched by isspace(ch))
1724  *
1725  * For each string found the 'struct trace_parser' is updated,
1726  * and the function returns.
1727  *
1728  * Returns number of bytes read.
1729  *
1730  * See kernel/trace/trace.h for 'struct trace_parser' details.
1731  */
1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1733 	size_t cnt, loff_t *ppos)
1734 {
1735 	char ch;
1736 	size_t read = 0;
1737 	ssize_t ret;
1738 
1739 	if (!*ppos)
1740 		trace_parser_clear(parser);
1741 
1742 	ret = get_user(ch, ubuf++);
1743 	if (ret)
1744 		goto out;
1745 
1746 	read++;
1747 	cnt--;
1748 
1749 	/*
1750 	 * The parser is not finished with the last write,
1751 	 * continue reading the user input without skipping spaces.
1752 	 */
1753 	if (!parser->cont) {
1754 		/* skip white space */
1755 		while (cnt && isspace(ch)) {
1756 			ret = get_user(ch, ubuf++);
1757 			if (ret)
1758 				goto out;
1759 			read++;
1760 			cnt--;
1761 		}
1762 
1763 		parser->idx = 0;
1764 
1765 		/* only spaces were written */
1766 		if (isspace(ch) || !ch) {
1767 			*ppos += read;
1768 			ret = read;
1769 			goto out;
1770 		}
1771 	}
1772 
1773 	/* read the non-space input */
1774 	while (cnt && !isspace(ch) && ch) {
1775 		if (parser->idx < parser->size - 1)
1776 			parser->buffer[parser->idx++] = ch;
1777 		else {
1778 			ret = -EINVAL;
1779 			goto out;
1780 		}
1781 		ret = get_user(ch, ubuf++);
1782 		if (ret)
1783 			goto out;
1784 		read++;
1785 		cnt--;
1786 	}
1787 
1788 	/* We either got finished input or we have to wait for another call. */
1789 	if (isspace(ch) || !ch) {
1790 		parser->buffer[parser->idx] = 0;
1791 		parser->cont = false;
1792 	} else if (parser->idx < parser->size - 1) {
1793 		parser->cont = true;
1794 		parser->buffer[parser->idx++] = ch;
1795 		/* Make sure the parsed string always terminates with '\0'. */
1796 		parser->buffer[parser->idx] = 0;
1797 	} else {
1798 		ret = -EINVAL;
1799 		goto out;
1800 	}
1801 
1802 	*ppos += read;
1803 	ret = read;
1804 
1805 out:
1806 	return ret;
1807 }
1808 
1809 /* TODO add a seq_buf_to_buffer() */
1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1811 {
1812 	int len;
1813 
1814 	if (trace_seq_used(s) <= s->readpos)
1815 		return -EBUSY;
1816 
1817 	len = trace_seq_used(s) - s->readpos;
1818 	if (cnt > len)
1819 		cnt = len;
1820 	memcpy(buf, s->buffer + s->readpos, cnt);
1821 
1822 	s->readpos += cnt;
1823 	return cnt;
1824 }
1825 
1826 unsigned long __read_mostly	tracing_thresh;
1827 
1828 #ifdef CONFIG_TRACER_MAX_TRACE
1829 static const struct file_operations tracing_max_lat_fops;
1830 
1831 #ifdef LATENCY_FS_NOTIFY
1832 
1833 static struct workqueue_struct *fsnotify_wq;
1834 
1835 static void latency_fsnotify_workfn(struct work_struct *work)
1836 {
1837 	struct trace_array *tr = container_of(work, struct trace_array,
1838 					      fsnotify_work);
1839 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1840 }
1841 
1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1843 {
1844 	struct trace_array *tr = container_of(iwork, struct trace_array,
1845 					      fsnotify_irqwork);
1846 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1847 }
1848 
1849 static void trace_create_maxlat_file(struct trace_array *tr,
1850 				     struct dentry *d_tracer)
1851 {
1852 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1853 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1854 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1855 					      TRACE_MODE_WRITE,
1856 					      d_tracer, tr,
1857 					      &tracing_max_lat_fops);
1858 }
1859 
1860 __init static int latency_fsnotify_init(void)
1861 {
1862 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1863 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1864 	if (!fsnotify_wq) {
1865 		pr_err("Unable to allocate tr_max_lat_wq\n");
1866 		return -ENOMEM;
1867 	}
1868 	return 0;
1869 }
1870 
1871 late_initcall_sync(latency_fsnotify_init);
1872 
1873 void latency_fsnotify(struct trace_array *tr)
1874 {
1875 	if (!fsnotify_wq)
1876 		return;
1877 	/*
1878 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1879 	 * possible that we are called from __schedule() or do_idle(), which
1880 	 * could cause a deadlock.
1881 	 */
1882 	irq_work_queue(&tr->fsnotify_irqwork);
1883 }
1884 
1885 #else /* !LATENCY_FS_NOTIFY */
1886 
1887 #define trace_create_maxlat_file(tr, d_tracer)				\
1888 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1889 			  d_tracer, tr, &tracing_max_lat_fops)
1890 
1891 #endif
1892 
1893 /*
1894  * Copy the new maximum trace into the separate maximum-trace
1895  * structure. (this way the maximum trace is permanently saved,
1896  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1897  */
1898 static void
1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1900 {
1901 	struct array_buffer *trace_buf = &tr->array_buffer;
1902 	struct array_buffer *max_buf = &tr->max_buffer;
1903 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1904 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1905 
1906 	max_buf->cpu = cpu;
1907 	max_buf->time_start = data->preempt_timestamp;
1908 
1909 	max_data->saved_latency = tr->max_latency;
1910 	max_data->critical_start = data->critical_start;
1911 	max_data->critical_end = data->critical_end;
1912 
1913 	strscpy(max_data->comm, tsk->comm);
1914 	max_data->pid = tsk->pid;
1915 	/*
1916 	 * If tsk == current, then use current_uid(), as that does not use
1917 	 * RCU. The irq tracer can be called out of RCU scope.
1918 	 */
1919 	if (tsk == current)
1920 		max_data->uid = current_uid();
1921 	else
1922 		max_data->uid = task_uid(tsk);
1923 
1924 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1925 	max_data->policy = tsk->policy;
1926 	max_data->rt_priority = tsk->rt_priority;
1927 
1928 	/* record this tasks comm */
1929 	tracing_record_cmdline(tsk);
1930 	latency_fsnotify(tr);
1931 }
1932 
1933 /**
1934  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1935  * @tr: tracer
1936  * @tsk: the task with the latency
1937  * @cpu: The cpu that initiated the trace.
1938  * @cond_data: User data associated with a conditional snapshot
1939  *
1940  * Flip the buffers between the @tr and the max_tr and record information
1941  * about which task was the cause of this latency.
1942  */
1943 void
1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1945 	      void *cond_data)
1946 {
1947 	if (tr->stop_count)
1948 		return;
1949 
1950 	WARN_ON_ONCE(!irqs_disabled());
1951 
1952 	if (!tr->allocated_snapshot) {
1953 		/* Only the nop tracer should hit this when disabling */
1954 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1955 		return;
1956 	}
1957 
1958 	arch_spin_lock(&tr->max_lock);
1959 
1960 	/* Inherit the recordable setting from array_buffer */
1961 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1962 		ring_buffer_record_on(tr->max_buffer.buffer);
1963 	else
1964 		ring_buffer_record_off(tr->max_buffer.buffer);
1965 
1966 #ifdef CONFIG_TRACER_SNAPSHOT
1967 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1968 		arch_spin_unlock(&tr->max_lock);
1969 		return;
1970 	}
1971 #endif
1972 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1973 
1974 	__update_max_tr(tr, tsk, cpu);
1975 
1976 	arch_spin_unlock(&tr->max_lock);
1977 
1978 	/* Any waiters on the old snapshot buffer need to wake up */
1979 	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1980 }
1981 
1982 /**
1983  * update_max_tr_single - only copy one trace over, and reset the rest
1984  * @tr: tracer
1985  * @tsk: task with the latency
1986  * @cpu: the cpu of the buffer to copy.
1987  *
1988  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1989  */
1990 void
1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1992 {
1993 	int ret;
1994 
1995 	if (tr->stop_count)
1996 		return;
1997 
1998 	WARN_ON_ONCE(!irqs_disabled());
1999 	if (!tr->allocated_snapshot) {
2000 		/* Only the nop tracer should hit this when disabling */
2001 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
2002 		return;
2003 	}
2004 
2005 	arch_spin_lock(&tr->max_lock);
2006 
2007 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
2008 
2009 	if (ret == -EBUSY) {
2010 		/*
2011 		 * We failed to swap the buffer due to a commit taking
2012 		 * place on this CPU. We fail to record, but we reset
2013 		 * the max trace buffer (no one writes directly to it)
2014 		 * and flag that it failed.
2015 		 * Another reason is resize is in progress.
2016 		 */
2017 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
2018 			"Failed to swap buffers due to commit or resize in progress\n");
2019 	}
2020 
2021 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
2022 
2023 	__update_max_tr(tr, tsk, cpu);
2024 	arch_spin_unlock(&tr->max_lock);
2025 }
2026 
2027 #endif /* CONFIG_TRACER_MAX_TRACE */
2028 
2029 struct pipe_wait {
2030 	struct trace_iterator		*iter;
2031 	int				wait_index;
2032 };
2033 
2034 static bool wait_pipe_cond(void *data)
2035 {
2036 	struct pipe_wait *pwait = data;
2037 	struct trace_iterator *iter = pwait->iter;
2038 
2039 	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
2040 		return true;
2041 
2042 	return iter->closed;
2043 }
2044 
2045 static int wait_on_pipe(struct trace_iterator *iter, int full)
2046 {
2047 	struct pipe_wait pwait;
2048 	int ret;
2049 
2050 	/* Iterators are static, they should be filled or empty */
2051 	if (trace_buffer_iter(iter, iter->cpu_file))
2052 		return 0;
2053 
2054 	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
2055 	pwait.iter = iter;
2056 
2057 	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
2058 			       wait_pipe_cond, &pwait);
2059 
2060 #ifdef CONFIG_TRACER_MAX_TRACE
2061 	/*
2062 	 * Make sure this is still the snapshot buffer, as if a snapshot were
2063 	 * to happen, this would now be the main buffer.
2064 	 */
2065 	if (iter->snapshot)
2066 		iter->array_buffer = &iter->tr->max_buffer;
2067 #endif
2068 	return ret;
2069 }
2070 
2071 #ifdef CONFIG_FTRACE_STARTUP_TEST
2072 static bool selftests_can_run;
2073 
2074 struct trace_selftests {
2075 	struct list_head		list;
2076 	struct tracer			*type;
2077 };
2078 
2079 static LIST_HEAD(postponed_selftests);
2080 
2081 static int save_selftest(struct tracer *type)
2082 {
2083 	struct trace_selftests *selftest;
2084 
2085 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
2086 	if (!selftest)
2087 		return -ENOMEM;
2088 
2089 	selftest->type = type;
2090 	list_add(&selftest->list, &postponed_selftests);
2091 	return 0;
2092 }
2093 
2094 static int run_tracer_selftest(struct tracer *type)
2095 {
2096 	struct trace_array *tr = &global_trace;
2097 	struct tracer *saved_tracer = tr->current_trace;
2098 	int ret;
2099 
2100 	if (!type->selftest || tracing_selftest_disabled)
2101 		return 0;
2102 
2103 	/*
2104 	 * If a tracer registers early in boot up (before scheduling is
2105 	 * initialized and such), then do not run its selftests yet.
2106 	 * Instead, run it a little later in the boot process.
2107 	 */
2108 	if (!selftests_can_run)
2109 		return save_selftest(type);
2110 
2111 	if (!tracing_is_on()) {
2112 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2113 			type->name);
2114 		return 0;
2115 	}
2116 
2117 	/*
2118 	 * Run a selftest on this tracer.
2119 	 * Here we reset the trace buffer, and set the current
2120 	 * tracer to be this tracer. The tracer can then run some
2121 	 * internal tracing to verify that everything is in order.
2122 	 * If we fail, we do not register this tracer.
2123 	 */
2124 	tracing_reset_online_cpus(&tr->array_buffer);
2125 
2126 	tr->current_trace = type;
2127 
2128 #ifdef CONFIG_TRACER_MAX_TRACE
2129 	if (type->use_max_tr) {
2130 		/* If we expanded the buffers, make sure the max is expanded too */
2131 		if (tr->ring_buffer_expanded)
2132 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2133 					   RING_BUFFER_ALL_CPUS);
2134 		tr->allocated_snapshot = true;
2135 	}
2136 #endif
2137 
2138 	/* the test is responsible for initializing and enabling */
2139 	pr_info("Testing tracer %s: ", type->name);
2140 	ret = type->selftest(type, tr);
2141 	/* the test is responsible for resetting too */
2142 	tr->current_trace = saved_tracer;
2143 	if (ret) {
2144 		printk(KERN_CONT "FAILED!\n");
2145 		/* Add the warning after printing 'FAILED' */
2146 		WARN_ON(1);
2147 		return -1;
2148 	}
2149 	/* Only reset on passing, to avoid touching corrupted buffers */
2150 	tracing_reset_online_cpus(&tr->array_buffer);
2151 
2152 #ifdef CONFIG_TRACER_MAX_TRACE
2153 	if (type->use_max_tr) {
2154 		tr->allocated_snapshot = false;
2155 
2156 		/* Shrink the max buffer again */
2157 		if (tr->ring_buffer_expanded)
2158 			ring_buffer_resize(tr->max_buffer.buffer, 1,
2159 					   RING_BUFFER_ALL_CPUS);
2160 	}
2161 #endif
2162 
2163 	printk(KERN_CONT "PASSED\n");
2164 	return 0;
2165 }
2166 
2167 static int do_run_tracer_selftest(struct tracer *type)
2168 {
2169 	int ret;
2170 
2171 	/*
2172 	 * Tests can take a long time, especially if they are run one after the
2173 	 * other, as does happen during bootup when all the tracers are
2174 	 * registered. This could cause the soft lockup watchdog to trigger.
2175 	 */
2176 	cond_resched();
2177 
2178 	tracing_selftest_running = true;
2179 	ret = run_tracer_selftest(type);
2180 	tracing_selftest_running = false;
2181 
2182 	return ret;
2183 }
2184 
2185 static __init int init_trace_selftests(void)
2186 {
2187 	struct trace_selftests *p, *n;
2188 	struct tracer *t, **last;
2189 	int ret;
2190 
2191 	selftests_can_run = true;
2192 
2193 	guard(mutex)(&trace_types_lock);
2194 
2195 	if (list_empty(&postponed_selftests))
2196 		return 0;
2197 
2198 	pr_info("Running postponed tracer tests:\n");
2199 
2200 	tracing_selftest_running = true;
2201 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2202 		/* This loop can take minutes when sanitizers are enabled, so
2203 		 * lets make sure we allow RCU processing.
2204 		 */
2205 		cond_resched();
2206 		ret = run_tracer_selftest(p->type);
2207 		/* If the test fails, then warn and remove from available_tracers */
2208 		if (ret < 0) {
2209 			WARN(1, "tracer: %s failed selftest, disabling\n",
2210 			     p->type->name);
2211 			last = &trace_types;
2212 			for (t = trace_types; t; t = t->next) {
2213 				if (t == p->type) {
2214 					*last = t->next;
2215 					break;
2216 				}
2217 				last = &t->next;
2218 			}
2219 		}
2220 		list_del(&p->list);
2221 		kfree(p);
2222 	}
2223 	tracing_selftest_running = false;
2224 
2225 	return 0;
2226 }
2227 core_initcall(init_trace_selftests);
2228 #else
2229 static inline int do_run_tracer_selftest(struct tracer *type)
2230 {
2231 	return 0;
2232 }
2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2234 
2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2236 
2237 static void __init apply_trace_boot_options(void);
2238 
2239 /**
2240  * register_tracer - register a tracer with the ftrace system.
2241  * @type: the plugin for the tracer
2242  *
2243  * Register a new plugin tracer.
2244  */
2245 int __init register_tracer(struct tracer *type)
2246 {
2247 	struct tracer *t;
2248 	int ret = 0;
2249 
2250 	if (!type->name) {
2251 		pr_info("Tracer must have a name\n");
2252 		return -1;
2253 	}
2254 
2255 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2256 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2257 		return -1;
2258 	}
2259 
2260 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2261 		pr_warn("Can not register tracer %s due to lockdown\n",
2262 			   type->name);
2263 		return -EPERM;
2264 	}
2265 
2266 	mutex_lock(&trace_types_lock);
2267 
2268 	for (t = trace_types; t; t = t->next) {
2269 		if (strcmp(type->name, t->name) == 0) {
2270 			/* already found */
2271 			pr_info("Tracer %s already registered\n",
2272 				type->name);
2273 			ret = -1;
2274 			goto out;
2275 		}
2276 	}
2277 
2278 	if (!type->set_flag)
2279 		type->set_flag = &dummy_set_flag;
2280 	if (!type->flags) {
2281 		/*allocate a dummy tracer_flags*/
2282 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2283 		if (!type->flags) {
2284 			ret = -ENOMEM;
2285 			goto out;
2286 		}
2287 		type->flags->val = 0;
2288 		type->flags->opts = dummy_tracer_opt;
2289 	} else
2290 		if (!type->flags->opts)
2291 			type->flags->opts = dummy_tracer_opt;
2292 
2293 	/* store the tracer for __set_tracer_option */
2294 	type->flags->trace = type;
2295 
2296 	ret = do_run_tracer_selftest(type);
2297 	if (ret < 0)
2298 		goto out;
2299 
2300 	type->next = trace_types;
2301 	trace_types = type;
2302 	add_tracer_options(&global_trace, type);
2303 
2304  out:
2305 	mutex_unlock(&trace_types_lock);
2306 
2307 	if (ret || !default_bootup_tracer)
2308 		goto out_unlock;
2309 
2310 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2311 		goto out_unlock;
2312 
2313 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2314 	/* Do we want this tracer to start on bootup? */
2315 	tracing_set_tracer(&global_trace, type->name);
2316 	default_bootup_tracer = NULL;
2317 
2318 	apply_trace_boot_options();
2319 
2320 	/* disable other selftests, since this will break it. */
2321 	disable_tracing_selftest("running a tracer");
2322 
2323  out_unlock:
2324 	return ret;
2325 }
2326 
2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2328 {
2329 	struct trace_buffer *buffer = buf->buffer;
2330 
2331 	if (!buffer)
2332 		return;
2333 
2334 	ring_buffer_record_disable(buffer);
2335 
2336 	/* Make sure all commits have finished */
2337 	synchronize_rcu();
2338 	ring_buffer_reset_cpu(buffer, cpu);
2339 
2340 	ring_buffer_record_enable(buffer);
2341 }
2342 
2343 void tracing_reset_online_cpus(struct array_buffer *buf)
2344 {
2345 	struct trace_buffer *buffer = buf->buffer;
2346 
2347 	if (!buffer)
2348 		return;
2349 
2350 	ring_buffer_record_disable(buffer);
2351 
2352 	/* Make sure all commits have finished */
2353 	synchronize_rcu();
2354 
2355 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2356 
2357 	ring_buffer_reset_online_cpus(buffer);
2358 
2359 	ring_buffer_record_enable(buffer);
2360 }
2361 
2362 static void tracing_reset_all_cpus(struct array_buffer *buf)
2363 {
2364 	struct trace_buffer *buffer = buf->buffer;
2365 
2366 	if (!buffer)
2367 		return;
2368 
2369 	ring_buffer_record_disable(buffer);
2370 
2371 	/* Make sure all commits have finished */
2372 	synchronize_rcu();
2373 
2374 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2375 
2376 	ring_buffer_reset(buffer);
2377 
2378 	ring_buffer_record_enable(buffer);
2379 }
2380 
2381 /* Must have trace_types_lock held */
2382 void tracing_reset_all_online_cpus_unlocked(void)
2383 {
2384 	struct trace_array *tr;
2385 
2386 	lockdep_assert_held(&trace_types_lock);
2387 
2388 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2389 		if (!tr->clear_trace)
2390 			continue;
2391 		tr->clear_trace = false;
2392 		tracing_reset_online_cpus(&tr->array_buffer);
2393 #ifdef CONFIG_TRACER_MAX_TRACE
2394 		tracing_reset_online_cpus(&tr->max_buffer);
2395 #endif
2396 	}
2397 }
2398 
2399 void tracing_reset_all_online_cpus(void)
2400 {
2401 	mutex_lock(&trace_types_lock);
2402 	tracing_reset_all_online_cpus_unlocked();
2403 	mutex_unlock(&trace_types_lock);
2404 }
2405 
2406 int is_tracing_stopped(void)
2407 {
2408 	return global_trace.stop_count;
2409 }
2410 
2411 static void tracing_start_tr(struct trace_array *tr)
2412 {
2413 	struct trace_buffer *buffer;
2414 	unsigned long flags;
2415 
2416 	if (tracing_disabled)
2417 		return;
2418 
2419 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2420 	if (--tr->stop_count) {
2421 		if (WARN_ON_ONCE(tr->stop_count < 0)) {
2422 			/* Someone screwed up their debugging */
2423 			tr->stop_count = 0;
2424 		}
2425 		goto out;
2426 	}
2427 
2428 	/* Prevent the buffers from switching */
2429 	arch_spin_lock(&tr->max_lock);
2430 
2431 	buffer = tr->array_buffer.buffer;
2432 	if (buffer)
2433 		ring_buffer_record_enable(buffer);
2434 
2435 #ifdef CONFIG_TRACER_MAX_TRACE
2436 	buffer = tr->max_buffer.buffer;
2437 	if (buffer)
2438 		ring_buffer_record_enable(buffer);
2439 #endif
2440 
2441 	arch_spin_unlock(&tr->max_lock);
2442 
2443  out:
2444 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2445 }
2446 
2447 /**
2448  * tracing_start - quick start of the tracer
2449  *
2450  * If tracing is enabled but was stopped by tracing_stop,
2451  * this will start the tracer back up.
2452  */
2453 void tracing_start(void)
2454 
2455 {
2456 	return tracing_start_tr(&global_trace);
2457 }
2458 
2459 static void tracing_stop_tr(struct trace_array *tr)
2460 {
2461 	struct trace_buffer *buffer;
2462 	unsigned long flags;
2463 
2464 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2465 	if (tr->stop_count++)
2466 		goto out;
2467 
2468 	/* Prevent the buffers from switching */
2469 	arch_spin_lock(&tr->max_lock);
2470 
2471 	buffer = tr->array_buffer.buffer;
2472 	if (buffer)
2473 		ring_buffer_record_disable(buffer);
2474 
2475 #ifdef CONFIG_TRACER_MAX_TRACE
2476 	buffer = tr->max_buffer.buffer;
2477 	if (buffer)
2478 		ring_buffer_record_disable(buffer);
2479 #endif
2480 
2481 	arch_spin_unlock(&tr->max_lock);
2482 
2483  out:
2484 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2485 }
2486 
2487 /**
2488  * tracing_stop - quick stop of the tracer
2489  *
2490  * Light weight way to stop tracing. Use in conjunction with
2491  * tracing_start.
2492  */
2493 void tracing_stop(void)
2494 {
2495 	return tracing_stop_tr(&global_trace);
2496 }
2497 
2498 /*
2499  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2500  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2501  * simplifies those functions and keeps them in sync.
2502  */
2503 enum print_line_t trace_handle_return(struct trace_seq *s)
2504 {
2505 	return trace_seq_has_overflowed(s) ?
2506 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2507 }
2508 EXPORT_SYMBOL_GPL(trace_handle_return);
2509 
2510 static unsigned short migration_disable_value(void)
2511 {
2512 #if defined(CONFIG_SMP)
2513 	return current->migration_disabled;
2514 #else
2515 	return 0;
2516 #endif
2517 }
2518 
2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2520 {
2521 	unsigned int trace_flags = irqs_status;
2522 	unsigned int pc;
2523 
2524 	pc = preempt_count();
2525 
2526 	if (pc & NMI_MASK)
2527 		trace_flags |= TRACE_FLAG_NMI;
2528 	if (pc & HARDIRQ_MASK)
2529 		trace_flags |= TRACE_FLAG_HARDIRQ;
2530 	if (in_serving_softirq())
2531 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2532 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2533 		trace_flags |= TRACE_FLAG_BH_OFF;
2534 
2535 	if (tif_need_resched())
2536 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2537 	if (test_preempt_need_resched())
2538 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2539 	if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
2540 		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
2541 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2542 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2543 }
2544 
2545 struct ring_buffer_event *
2546 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2547 			  int type,
2548 			  unsigned long len,
2549 			  unsigned int trace_ctx)
2550 {
2551 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2552 }
2553 
2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2556 static int trace_buffered_event_ref;
2557 
2558 /**
2559  * trace_buffered_event_enable - enable buffering events
2560  *
2561  * When events are being filtered, it is quicker to use a temporary
2562  * buffer to write the event data into if there's a likely chance
2563  * that it will not be committed. The discard of the ring buffer
2564  * is not as fast as committing, and is much slower than copying
2565  * a commit.
2566  *
2567  * When an event is to be filtered, allocate per cpu buffers to
2568  * write the event data into, and if the event is filtered and discarded
2569  * it is simply dropped, otherwise, the entire data is to be committed
2570  * in one shot.
2571  */
2572 void trace_buffered_event_enable(void)
2573 {
2574 	struct ring_buffer_event *event;
2575 	struct page *page;
2576 	int cpu;
2577 
2578 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2579 
2580 	if (trace_buffered_event_ref++)
2581 		return;
2582 
2583 	for_each_tracing_cpu(cpu) {
2584 		page = alloc_pages_node(cpu_to_node(cpu),
2585 					GFP_KERNEL | __GFP_NORETRY, 0);
2586 		/* This is just an optimization and can handle failures */
2587 		if (!page) {
2588 			pr_err("Failed to allocate event buffer\n");
2589 			break;
2590 		}
2591 
2592 		event = page_address(page);
2593 		memset(event, 0, sizeof(*event));
2594 
2595 		per_cpu(trace_buffered_event, cpu) = event;
2596 
2597 		preempt_disable();
2598 		if (cpu == smp_processor_id() &&
2599 		    __this_cpu_read(trace_buffered_event) !=
2600 		    per_cpu(trace_buffered_event, cpu))
2601 			WARN_ON_ONCE(1);
2602 		preempt_enable();
2603 	}
2604 }
2605 
2606 static void enable_trace_buffered_event(void *data)
2607 {
2608 	/* Probably not needed, but do it anyway */
2609 	smp_rmb();
2610 	this_cpu_dec(trace_buffered_event_cnt);
2611 }
2612 
2613 static void disable_trace_buffered_event(void *data)
2614 {
2615 	this_cpu_inc(trace_buffered_event_cnt);
2616 }
2617 
2618 /**
2619  * trace_buffered_event_disable - disable buffering events
2620  *
2621  * When a filter is removed, it is faster to not use the buffered
2622  * events, and to commit directly into the ring buffer. Free up
2623  * the temp buffers when there are no more users. This requires
2624  * special synchronization with current events.
2625  */
2626 void trace_buffered_event_disable(void)
2627 {
2628 	int cpu;
2629 
2630 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2631 
2632 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2633 		return;
2634 
2635 	if (--trace_buffered_event_ref)
2636 		return;
2637 
2638 	/* For each CPU, set the buffer as used. */
2639 	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2640 			 NULL, true);
2641 
2642 	/* Wait for all current users to finish */
2643 	synchronize_rcu();
2644 
2645 	for_each_tracing_cpu(cpu) {
2646 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2647 		per_cpu(trace_buffered_event, cpu) = NULL;
2648 	}
2649 
2650 	/*
2651 	 * Wait for all CPUs that potentially started checking if they can use
2652 	 * their event buffer only after the previous synchronize_rcu() call and
2653 	 * they still read a valid pointer from trace_buffered_event. It must be
2654 	 * ensured they don't see cleared trace_buffered_event_cnt else they
2655 	 * could wrongly decide to use the pointed-to buffer which is now freed.
2656 	 */
2657 	synchronize_rcu();
2658 
2659 	/* For each CPU, relinquish the buffer */
2660 	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2661 			 true);
2662 }
2663 
2664 static struct trace_buffer *temp_buffer;
2665 
2666 struct ring_buffer_event *
2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2668 			  struct trace_event_file *trace_file,
2669 			  int type, unsigned long len,
2670 			  unsigned int trace_ctx)
2671 {
2672 	struct ring_buffer_event *entry;
2673 	struct trace_array *tr = trace_file->tr;
2674 	int val;
2675 
2676 	*current_rb = tr->array_buffer.buffer;
2677 
2678 	if (!tr->no_filter_buffering_ref &&
2679 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2680 		preempt_disable_notrace();
2681 		/*
2682 		 * Filtering is on, so try to use the per cpu buffer first.
2683 		 * This buffer will simulate a ring_buffer_event,
2684 		 * where the type_len is zero and the array[0] will
2685 		 * hold the full length.
2686 		 * (see include/linux/ring-buffer.h for details on
2687 		 *  how the ring_buffer_event is structured).
2688 		 *
2689 		 * Using a temp buffer during filtering and copying it
2690 		 * on a matched filter is quicker than writing directly
2691 		 * into the ring buffer and then discarding it when
2692 		 * it doesn't match. That is because the discard
2693 		 * requires several atomic operations to get right.
2694 		 * Copying on match and doing nothing on a failed match
2695 		 * is still quicker than no copy on match, but having
2696 		 * to discard out of the ring buffer on a failed match.
2697 		 */
2698 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2699 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2700 
2701 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2702 
2703 			/*
2704 			 * Preemption is disabled, but interrupts and NMIs
2705 			 * can still come in now. If that happens after
2706 			 * the above increment, then it will have to go
2707 			 * back to the old method of allocating the event
2708 			 * on the ring buffer, and if the filter fails, it
2709 			 * will have to call ring_buffer_discard_commit()
2710 			 * to remove it.
2711 			 *
2712 			 * Need to also check the unlikely case that the
2713 			 * length is bigger than the temp buffer size.
2714 			 * If that happens, then the reserve is pretty much
2715 			 * guaranteed to fail, as the ring buffer currently
2716 			 * only allows events less than a page. But that may
2717 			 * change in the future, so let the ring buffer reserve
2718 			 * handle the failure in that case.
2719 			 */
2720 			if (val == 1 && likely(len <= max_len)) {
2721 				trace_event_setup(entry, type, trace_ctx);
2722 				entry->array[0] = len;
2723 				/* Return with preemption disabled */
2724 				return entry;
2725 			}
2726 			this_cpu_dec(trace_buffered_event_cnt);
2727 		}
2728 		/* __trace_buffer_lock_reserve() disables preemption */
2729 		preempt_enable_notrace();
2730 	}
2731 
2732 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2733 					    trace_ctx);
2734 	/*
2735 	 * If tracing is off, but we have triggers enabled
2736 	 * we still need to look at the event data. Use the temp_buffer
2737 	 * to store the trace event for the trigger to use. It's recursive
2738 	 * safe and will not be recorded anywhere.
2739 	 */
2740 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2741 		*current_rb = temp_buffer;
2742 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2743 						    trace_ctx);
2744 	}
2745 	return entry;
2746 }
2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2748 
2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2750 static DEFINE_MUTEX(tracepoint_printk_mutex);
2751 
2752 static void output_printk(struct trace_event_buffer *fbuffer)
2753 {
2754 	struct trace_event_call *event_call;
2755 	struct trace_event_file *file;
2756 	struct trace_event *event;
2757 	unsigned long flags;
2758 	struct trace_iterator *iter = tracepoint_print_iter;
2759 
2760 	/* We should never get here if iter is NULL */
2761 	if (WARN_ON_ONCE(!iter))
2762 		return;
2763 
2764 	event_call = fbuffer->trace_file->event_call;
2765 	if (!event_call || !event_call->event.funcs ||
2766 	    !event_call->event.funcs->trace)
2767 		return;
2768 
2769 	file = fbuffer->trace_file;
2770 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2771 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2772 	     !filter_match_preds(file->filter, fbuffer->entry)))
2773 		return;
2774 
2775 	event = &fbuffer->trace_file->event_call->event;
2776 
2777 	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2778 	trace_seq_init(&iter->seq);
2779 	iter->ent = fbuffer->entry;
2780 	event_call->event.funcs->trace(iter, 0, event);
2781 	trace_seq_putc(&iter->seq, 0);
2782 	printk("%s", iter->seq.buffer);
2783 
2784 	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2785 }
2786 
2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
2788 			     void *buffer, size_t *lenp,
2789 			     loff_t *ppos)
2790 {
2791 	int save_tracepoint_printk;
2792 	int ret;
2793 
2794 	guard(mutex)(&tracepoint_printk_mutex);
2795 	save_tracepoint_printk = tracepoint_printk;
2796 
2797 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2798 
2799 	/*
2800 	 * This will force exiting early, as tracepoint_printk
2801 	 * is always zero when tracepoint_printk_iter is not allocated
2802 	 */
2803 	if (!tracepoint_print_iter)
2804 		tracepoint_printk = 0;
2805 
2806 	if (save_tracepoint_printk == tracepoint_printk)
2807 		return ret;
2808 
2809 	if (tracepoint_printk)
2810 		static_key_enable(&tracepoint_printk_key.key);
2811 	else
2812 		static_key_disable(&tracepoint_printk_key.key);
2813 
2814 	return ret;
2815 }
2816 
2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2818 {
2819 	enum event_trigger_type tt = ETT_NONE;
2820 	struct trace_event_file *file = fbuffer->trace_file;
2821 
2822 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2823 			fbuffer->entry, &tt))
2824 		goto discard;
2825 
2826 	if (static_key_false(&tracepoint_printk_key.key))
2827 		output_printk(fbuffer);
2828 
2829 	if (static_branch_unlikely(&trace_event_exports_enabled))
2830 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2831 
2832 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2833 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2834 
2835 discard:
2836 	if (tt)
2837 		event_triggers_post_call(file, tt);
2838 
2839 }
2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2841 
2842 /*
2843  * Skip 3:
2844  *
2845  *   trace_buffer_unlock_commit_regs()
2846  *   trace_event_buffer_commit()
2847  *   trace_event_raw_event_xxx()
2848  */
2849 # define STACK_SKIP 3
2850 
2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2852 				     struct trace_buffer *buffer,
2853 				     struct ring_buffer_event *event,
2854 				     unsigned int trace_ctx,
2855 				     struct pt_regs *regs)
2856 {
2857 	__buffer_unlock_commit(buffer, event);
2858 
2859 	/*
2860 	 * If regs is not set, then skip the necessary functions.
2861 	 * Note, we can still get here via blktrace, wakeup tracer
2862 	 * and mmiotrace, but that's ok if they lose a function or
2863 	 * two. They are not that meaningful.
2864 	 */
2865 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2866 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2867 }
2868 
2869 /*
2870  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2871  */
2872 void
2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2874 				   struct ring_buffer_event *event)
2875 {
2876 	__buffer_unlock_commit(buffer, event);
2877 }
2878 
2879 void
2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2881 	       parent_ip, unsigned int trace_ctx)
2882 {
2883 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2884 	struct ring_buffer_event *event;
2885 	struct ftrace_entry *entry;
2886 
2887 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2888 					    trace_ctx);
2889 	if (!event)
2890 		return;
2891 	entry	= ring_buffer_event_data(event);
2892 	entry->ip			= ip;
2893 	entry->parent_ip		= parent_ip;
2894 
2895 	if (static_branch_unlikely(&trace_function_exports_enabled))
2896 		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2897 	__buffer_unlock_commit(buffer, event);
2898 }
2899 
2900 #ifdef CONFIG_STACKTRACE
2901 
2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2903 #define FTRACE_KSTACK_NESTING	4
2904 
2905 #define FTRACE_KSTACK_ENTRIES	(SZ_4K / FTRACE_KSTACK_NESTING)
2906 
2907 struct ftrace_stack {
2908 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2909 };
2910 
2911 
2912 struct ftrace_stacks {
2913 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2914 };
2915 
2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2918 
2919 static void __ftrace_trace_stack(struct trace_array *tr,
2920 				 struct trace_buffer *buffer,
2921 				 unsigned int trace_ctx,
2922 				 int skip, struct pt_regs *regs)
2923 {
2924 	struct ring_buffer_event *event;
2925 	unsigned int size, nr_entries;
2926 	struct ftrace_stack *fstack;
2927 	struct stack_entry *entry;
2928 	int stackidx;
2929 
2930 	/*
2931 	 * Add one, for this function and the call to save_stack_trace()
2932 	 * If regs is set, then these functions will not be in the way.
2933 	 */
2934 #ifndef CONFIG_UNWINDER_ORC
2935 	if (!regs)
2936 		skip++;
2937 #endif
2938 
2939 	preempt_disable_notrace();
2940 
2941 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2942 
2943 	/* This should never happen. If it does, yell once and skip */
2944 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2945 		goto out;
2946 
2947 	/*
2948 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2949 	 * interrupt will either see the value pre increment or post
2950 	 * increment. If the interrupt happens pre increment it will have
2951 	 * restored the counter when it returns.  We just need a barrier to
2952 	 * keep gcc from moving things around.
2953 	 */
2954 	barrier();
2955 
2956 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2957 	size = ARRAY_SIZE(fstack->calls);
2958 
2959 	if (regs) {
2960 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2961 						   size, skip);
2962 	} else {
2963 		nr_entries = stack_trace_save(fstack->calls, size, skip);
2964 	}
2965 
2966 #ifdef CONFIG_DYNAMIC_FTRACE
2967 	/* Mark entry of stack trace as trampoline code */
2968 	if (tr->ops && tr->ops->trampoline) {
2969 		unsigned long tramp_start = tr->ops->trampoline;
2970 		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
2971 		unsigned long *calls = fstack->calls;
2972 
2973 		for (int i = 0; i < nr_entries; i++) {
2974 			if (calls[i] >= tramp_start && calls[i] < tramp_end)
2975 				calls[i] = FTRACE_TRAMPOLINE_MARKER;
2976 		}
2977 	}
2978 #endif
2979 
2980 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2981 				    struct_size(entry, caller, nr_entries),
2982 				    trace_ctx);
2983 	if (!event)
2984 		goto out;
2985 	entry = ring_buffer_event_data(event);
2986 
2987 	entry->size = nr_entries;
2988 	memcpy(&entry->caller, fstack->calls,
2989 	       flex_array_size(entry, caller, nr_entries));
2990 
2991 	__buffer_unlock_commit(buffer, event);
2992 
2993  out:
2994 	/* Again, don't let gcc optimize things here */
2995 	barrier();
2996 	__this_cpu_dec(ftrace_stack_reserve);
2997 	preempt_enable_notrace();
2998 
2999 }
3000 
3001 static inline void ftrace_trace_stack(struct trace_array *tr,
3002 				      struct trace_buffer *buffer,
3003 				      unsigned int trace_ctx,
3004 				      int skip, struct pt_regs *regs)
3005 {
3006 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3007 		return;
3008 
3009 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
3010 }
3011 
3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3013 		   int skip)
3014 {
3015 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3016 
3017 	if (rcu_is_watching()) {
3018 		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3019 		return;
3020 	}
3021 
3022 	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3023 		return;
3024 
3025 	/*
3026 	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3027 	 * but if the above rcu_is_watching() failed, then the NMI
3028 	 * triggered someplace critical, and ct_irq_enter() should
3029 	 * not be called from NMI.
3030 	 */
3031 	if (unlikely(in_nmi()))
3032 		return;
3033 
3034 	ct_irq_enter_irqson();
3035 	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
3036 	ct_irq_exit_irqson();
3037 }
3038 
3039 /**
3040  * trace_dump_stack - record a stack back trace in the trace buffer
3041  * @skip: Number of functions to skip (helper handlers)
3042  */
3043 void trace_dump_stack(int skip)
3044 {
3045 	if (tracing_disabled || tracing_selftest_running)
3046 		return;
3047 
3048 #ifndef CONFIG_UNWINDER_ORC
3049 	/* Skip 1 to skip this function. */
3050 	skip++;
3051 #endif
3052 	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
3053 				tracing_gen_ctx(), skip, NULL);
3054 }
3055 EXPORT_SYMBOL_GPL(trace_dump_stack);
3056 
3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3058 static DEFINE_PER_CPU(int, user_stack_count);
3059 
3060 static void
3061 ftrace_trace_userstack(struct trace_array *tr,
3062 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3063 {
3064 	struct ring_buffer_event *event;
3065 	struct userstack_entry *entry;
3066 
3067 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3068 		return;
3069 
3070 	/*
3071 	 * NMIs can not handle page faults, even with fix ups.
3072 	 * The save user stack can (and often does) fault.
3073 	 */
3074 	if (unlikely(in_nmi()))
3075 		return;
3076 
3077 	/*
3078 	 * prevent recursion, since the user stack tracing may
3079 	 * trigger other kernel events.
3080 	 */
3081 	preempt_disable();
3082 	if (__this_cpu_read(user_stack_count))
3083 		goto out;
3084 
3085 	__this_cpu_inc(user_stack_count);
3086 
3087 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3088 					    sizeof(*entry), trace_ctx);
3089 	if (!event)
3090 		goto out_drop_count;
3091 	entry	= ring_buffer_event_data(event);
3092 
3093 	entry->tgid		= current->tgid;
3094 	memset(&entry->caller, 0, sizeof(entry->caller));
3095 
3096 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3097 	__buffer_unlock_commit(buffer, event);
3098 
3099  out_drop_count:
3100 	__this_cpu_dec(user_stack_count);
3101  out:
3102 	preempt_enable();
3103 }
3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3105 static void ftrace_trace_userstack(struct trace_array *tr,
3106 				   struct trace_buffer *buffer,
3107 				   unsigned int trace_ctx)
3108 {
3109 }
3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3111 
3112 #endif /* CONFIG_STACKTRACE */
3113 
3114 static inline void
3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3116 			  unsigned long long delta)
3117 {
3118 	entry->bottom_delta_ts = delta & U32_MAX;
3119 	entry->top_delta_ts = (delta >> 32);
3120 }
3121 
3122 void trace_last_func_repeats(struct trace_array *tr,
3123 			     struct trace_func_repeats *last_info,
3124 			     unsigned int trace_ctx)
3125 {
3126 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3127 	struct func_repeats_entry *entry;
3128 	struct ring_buffer_event *event;
3129 	u64 delta;
3130 
3131 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3132 					    sizeof(*entry), trace_ctx);
3133 	if (!event)
3134 		return;
3135 
3136 	delta = ring_buffer_event_time_stamp(buffer, event) -
3137 		last_info->ts_last_call;
3138 
3139 	entry = ring_buffer_event_data(event);
3140 	entry->ip = last_info->ip;
3141 	entry->parent_ip = last_info->parent_ip;
3142 	entry->count = last_info->count;
3143 	func_repeats_set_delta_ts(entry, delta);
3144 
3145 	__buffer_unlock_commit(buffer, event);
3146 }
3147 
3148 /* created for use with alloc_percpu */
3149 struct trace_buffer_struct {
3150 	int nesting;
3151 	char buffer[4][TRACE_BUF_SIZE];
3152 };
3153 
3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3155 
3156 /*
3157  * This allows for lockless recording.  If we're nested too deeply, then
3158  * this returns NULL.
3159  */
3160 static char *get_trace_buf(void)
3161 {
3162 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3163 
3164 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3165 		return NULL;
3166 
3167 	buffer->nesting++;
3168 
3169 	/* Interrupts must see nesting incremented before we use the buffer */
3170 	barrier();
3171 	return &buffer->buffer[buffer->nesting - 1][0];
3172 }
3173 
3174 static void put_trace_buf(void)
3175 {
3176 	/* Don't let the decrement of nesting leak before this */
3177 	barrier();
3178 	this_cpu_dec(trace_percpu_buffer->nesting);
3179 }
3180 
3181 static int alloc_percpu_trace_buffer(void)
3182 {
3183 	struct trace_buffer_struct __percpu *buffers;
3184 
3185 	if (trace_percpu_buffer)
3186 		return 0;
3187 
3188 	buffers = alloc_percpu(struct trace_buffer_struct);
3189 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3190 		return -ENOMEM;
3191 
3192 	trace_percpu_buffer = buffers;
3193 	return 0;
3194 }
3195 
3196 static int buffers_allocated;
3197 
3198 void trace_printk_init_buffers(void)
3199 {
3200 	if (buffers_allocated)
3201 		return;
3202 
3203 	if (alloc_percpu_trace_buffer())
3204 		return;
3205 
3206 	/* trace_printk() is for debug use only. Don't use it in production. */
3207 
3208 	pr_warn("\n");
3209 	pr_warn("**********************************************************\n");
3210 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3211 	pr_warn("**                                                      **\n");
3212 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3213 	pr_warn("**                                                      **\n");
3214 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3215 	pr_warn("** unsafe for production use.                           **\n");
3216 	pr_warn("**                                                      **\n");
3217 	pr_warn("** If you see this message and you are not debugging    **\n");
3218 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3219 	pr_warn("**                                                      **\n");
3220 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3221 	pr_warn("**********************************************************\n");
3222 
3223 	/* Expand the buffers to set size */
3224 	tracing_update_buffers(&global_trace);
3225 
3226 	buffers_allocated = 1;
3227 
3228 	/*
3229 	 * trace_printk_init_buffers() can be called by modules.
3230 	 * If that happens, then we need to start cmdline recording
3231 	 * directly here. If the global_trace.buffer is already
3232 	 * allocated here, then this was called by module code.
3233 	 */
3234 	if (global_trace.array_buffer.buffer)
3235 		tracing_start_cmdline_record();
3236 }
3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3238 
3239 void trace_printk_start_comm(void)
3240 {
3241 	/* Start tracing comms if trace printk is set */
3242 	if (!buffers_allocated)
3243 		return;
3244 	tracing_start_cmdline_record();
3245 }
3246 
3247 static void trace_printk_start_stop_comm(int enabled)
3248 {
3249 	if (!buffers_allocated)
3250 		return;
3251 
3252 	if (enabled)
3253 		tracing_start_cmdline_record();
3254 	else
3255 		tracing_stop_cmdline_record();
3256 }
3257 
3258 /**
3259  * trace_vbprintk - write binary msg to tracing buffer
3260  * @ip:    The address of the caller
3261  * @fmt:   The string format to write to the buffer
3262  * @args:  Arguments for @fmt
3263  */
3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3265 {
3266 	struct ring_buffer_event *event;
3267 	struct trace_buffer *buffer;
3268 	struct trace_array *tr = READ_ONCE(printk_trace);
3269 	struct bprint_entry *entry;
3270 	unsigned int trace_ctx;
3271 	char *tbuffer;
3272 	int len = 0, size;
3273 
3274 	if (!printk_binsafe(tr))
3275 		return trace_vprintk(ip, fmt, args);
3276 
3277 	if (unlikely(tracing_selftest_running || tracing_disabled))
3278 		return 0;
3279 
3280 	/* Don't pollute graph traces with trace_vprintk internals */
3281 	pause_graph_tracing();
3282 
3283 	trace_ctx = tracing_gen_ctx();
3284 	preempt_disable_notrace();
3285 
3286 	tbuffer = get_trace_buf();
3287 	if (!tbuffer) {
3288 		len = 0;
3289 		goto out_nobuffer;
3290 	}
3291 
3292 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3293 
3294 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3295 		goto out_put;
3296 
3297 	size = sizeof(*entry) + sizeof(u32) * len;
3298 	buffer = tr->array_buffer.buffer;
3299 	ring_buffer_nest_start(buffer);
3300 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3301 					    trace_ctx);
3302 	if (!event)
3303 		goto out;
3304 	entry = ring_buffer_event_data(event);
3305 	entry->ip			= ip;
3306 	entry->fmt			= fmt;
3307 
3308 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3309 	__buffer_unlock_commit(buffer, event);
3310 	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3311 
3312 out:
3313 	ring_buffer_nest_end(buffer);
3314 out_put:
3315 	put_trace_buf();
3316 
3317 out_nobuffer:
3318 	preempt_enable_notrace();
3319 	unpause_graph_tracing();
3320 
3321 	return len;
3322 }
3323 EXPORT_SYMBOL_GPL(trace_vbprintk);
3324 
3325 __printf(3, 0)
3326 static int
3327 __trace_array_vprintk(struct trace_buffer *buffer,
3328 		      unsigned long ip, const char *fmt, va_list args)
3329 {
3330 	struct ring_buffer_event *event;
3331 	int len = 0, size;
3332 	struct print_entry *entry;
3333 	unsigned int trace_ctx;
3334 	char *tbuffer;
3335 
3336 	if (tracing_disabled)
3337 		return 0;
3338 
3339 	/* Don't pollute graph traces with trace_vprintk internals */
3340 	pause_graph_tracing();
3341 
3342 	trace_ctx = tracing_gen_ctx();
3343 	preempt_disable_notrace();
3344 
3345 
3346 	tbuffer = get_trace_buf();
3347 	if (!tbuffer) {
3348 		len = 0;
3349 		goto out_nobuffer;
3350 	}
3351 
3352 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3353 
3354 	size = sizeof(*entry) + len + 1;
3355 	ring_buffer_nest_start(buffer);
3356 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3357 					    trace_ctx);
3358 	if (!event)
3359 		goto out;
3360 	entry = ring_buffer_event_data(event);
3361 	entry->ip = ip;
3362 
3363 	memcpy(&entry->buf, tbuffer, len + 1);
3364 	__buffer_unlock_commit(buffer, event);
3365 	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
3366 
3367 out:
3368 	ring_buffer_nest_end(buffer);
3369 	put_trace_buf();
3370 
3371 out_nobuffer:
3372 	preempt_enable_notrace();
3373 	unpause_graph_tracing();
3374 
3375 	return len;
3376 }
3377 
3378 __printf(3, 0)
3379 int trace_array_vprintk(struct trace_array *tr,
3380 			unsigned long ip, const char *fmt, va_list args)
3381 {
3382 	if (tracing_selftest_running && tr == &global_trace)
3383 		return 0;
3384 
3385 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3386 }
3387 
3388 /**
3389  * trace_array_printk - Print a message to a specific instance
3390  * @tr: The instance trace_array descriptor
3391  * @ip: The instruction pointer that this is called from.
3392  * @fmt: The format to print (printf format)
3393  *
3394  * If a subsystem sets up its own instance, they have the right to
3395  * printk strings into their tracing instance buffer using this
3396  * function. Note, this function will not write into the top level
3397  * buffer (use trace_printk() for that), as writing into the top level
3398  * buffer should only have events that can be individually disabled.
3399  * trace_printk() is only used for debugging a kernel, and should not
3400  * be ever incorporated in normal use.
3401  *
3402  * trace_array_printk() can be used, as it will not add noise to the
3403  * top level tracing buffer.
3404  *
3405  * Note, trace_array_init_printk() must be called on @tr before this
3406  * can be used.
3407  */
3408 __printf(3, 0)
3409 int trace_array_printk(struct trace_array *tr,
3410 		       unsigned long ip, const char *fmt, ...)
3411 {
3412 	int ret;
3413 	va_list ap;
3414 
3415 	if (!tr)
3416 		return -ENOENT;
3417 
3418 	/* This is only allowed for created instances */
3419 	if (tr == &global_trace)
3420 		return 0;
3421 
3422 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3423 		return 0;
3424 
3425 	va_start(ap, fmt);
3426 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3427 	va_end(ap);
3428 	return ret;
3429 }
3430 EXPORT_SYMBOL_GPL(trace_array_printk);
3431 
3432 /**
3433  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3434  * @tr: The trace array to initialize the buffers for
3435  *
3436  * As trace_array_printk() only writes into instances, they are OK to
3437  * have in the kernel (unlike trace_printk()). This needs to be called
3438  * before trace_array_printk() can be used on a trace_array.
3439  */
3440 int trace_array_init_printk(struct trace_array *tr)
3441 {
3442 	if (!tr)
3443 		return -ENOENT;
3444 
3445 	/* This is only allowed for created instances */
3446 	if (tr == &global_trace)
3447 		return -EINVAL;
3448 
3449 	return alloc_percpu_trace_buffer();
3450 }
3451 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3452 
3453 __printf(3, 4)
3454 int trace_array_printk_buf(struct trace_buffer *buffer,
3455 			   unsigned long ip, const char *fmt, ...)
3456 {
3457 	int ret;
3458 	va_list ap;
3459 
3460 	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
3461 		return 0;
3462 
3463 	va_start(ap, fmt);
3464 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3465 	va_end(ap);
3466 	return ret;
3467 }
3468 
3469 __printf(2, 0)
3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3471 {
3472 	return trace_array_vprintk(printk_trace, ip, fmt, args);
3473 }
3474 EXPORT_SYMBOL_GPL(trace_vprintk);
3475 
3476 static void trace_iterator_increment(struct trace_iterator *iter)
3477 {
3478 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3479 
3480 	iter->idx++;
3481 	if (buf_iter)
3482 		ring_buffer_iter_advance(buf_iter);
3483 }
3484 
3485 static struct trace_entry *
3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3487 		unsigned long *lost_events)
3488 {
3489 	struct ring_buffer_event *event;
3490 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3491 
3492 	if (buf_iter) {
3493 		event = ring_buffer_iter_peek(buf_iter, ts);
3494 		if (lost_events)
3495 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3496 				(unsigned long)-1 : 0;
3497 	} else {
3498 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3499 					 lost_events);
3500 	}
3501 
3502 	if (event) {
3503 		iter->ent_size = ring_buffer_event_length(event);
3504 		return ring_buffer_event_data(event);
3505 	}
3506 	iter->ent_size = 0;
3507 	return NULL;
3508 }
3509 
3510 static struct trace_entry *
3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3512 		  unsigned long *missing_events, u64 *ent_ts)
3513 {
3514 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3515 	struct trace_entry *ent, *next = NULL;
3516 	unsigned long lost_events = 0, next_lost = 0;
3517 	int cpu_file = iter->cpu_file;
3518 	u64 next_ts = 0, ts;
3519 	int next_cpu = -1;
3520 	int next_size = 0;
3521 	int cpu;
3522 
3523 	/*
3524 	 * If we are in a per_cpu trace file, don't bother by iterating over
3525 	 * all cpu and peek directly.
3526 	 */
3527 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3528 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3529 			return NULL;
3530 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3531 		if (ent_cpu)
3532 			*ent_cpu = cpu_file;
3533 
3534 		return ent;
3535 	}
3536 
3537 	for_each_tracing_cpu(cpu) {
3538 
3539 		if (ring_buffer_empty_cpu(buffer, cpu))
3540 			continue;
3541 
3542 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3543 
3544 		/*
3545 		 * Pick the entry with the smallest timestamp:
3546 		 */
3547 		if (ent && (!next || ts < next_ts)) {
3548 			next = ent;
3549 			next_cpu = cpu;
3550 			next_ts = ts;
3551 			next_lost = lost_events;
3552 			next_size = iter->ent_size;
3553 		}
3554 	}
3555 
3556 	iter->ent_size = next_size;
3557 
3558 	if (ent_cpu)
3559 		*ent_cpu = next_cpu;
3560 
3561 	if (ent_ts)
3562 		*ent_ts = next_ts;
3563 
3564 	if (missing_events)
3565 		*missing_events = next_lost;
3566 
3567 	return next;
3568 }
3569 
3570 #define STATIC_FMT_BUF_SIZE	128
3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3572 
3573 char *trace_iter_expand_format(struct trace_iterator *iter)
3574 {
3575 	char *tmp;
3576 
3577 	/*
3578 	 * iter->tr is NULL when used with tp_printk, which makes
3579 	 * this get called where it is not safe to call krealloc().
3580 	 */
3581 	if (!iter->tr || iter->fmt == static_fmt_buf)
3582 		return NULL;
3583 
3584 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3585 		       GFP_KERNEL);
3586 	if (tmp) {
3587 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3588 		iter->fmt = tmp;
3589 	}
3590 
3591 	return tmp;
3592 }
3593 
3594 /* Returns true if the string is safe to dereference from an event */
3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3596 {
3597 	unsigned long addr = (unsigned long)str;
3598 	struct trace_event *trace_event;
3599 	struct trace_event_call *event;
3600 
3601 	/* OK if part of the event data */
3602 	if ((addr >= (unsigned long)iter->ent) &&
3603 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3604 		return true;
3605 
3606 	/* OK if part of the temp seq buffer */
3607 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3608 	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
3609 		return true;
3610 
3611 	/* Core rodata can not be freed */
3612 	if (is_kernel_rodata(addr))
3613 		return true;
3614 
3615 	if (trace_is_tracepoint_string(str))
3616 		return true;
3617 
3618 	/*
3619 	 * Now this could be a module event, referencing core module
3620 	 * data, which is OK.
3621 	 */
3622 	if (!iter->ent)
3623 		return false;
3624 
3625 	trace_event = ftrace_find_event(iter->ent->type);
3626 	if (!trace_event)
3627 		return false;
3628 
3629 	event = container_of(trace_event, struct trace_event_call, event);
3630 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3631 		return false;
3632 
3633 	/* Would rather have rodata, but this will suffice */
3634 	if (within_module_core(addr, event->module))
3635 		return true;
3636 
3637 	return false;
3638 }
3639 
3640 /**
3641  * ignore_event - Check dereferenced fields while writing to the seq buffer
3642  * @iter: The iterator that holds the seq buffer and the event being printed
3643  *
3644  * At boot up, test_event_printk() will flag any event that dereferences
3645  * a string with "%s" that does exist in the ring buffer. It may still
3646  * be valid, as the string may point to a static string in the kernel
3647  * rodata that never gets freed. But if the string pointer is pointing
3648  * to something that was allocated, there's a chance that it can be freed
3649  * by the time the user reads the trace. This would cause a bad memory
3650  * access by the kernel and possibly crash the system.
3651  *
3652  * This function will check if the event has any fields flagged as needing
3653  * to be checked at runtime and perform those checks.
3654  *
3655  * If it is found that a field is unsafe, it will write into the @iter->seq
3656  * a message stating what was found to be unsafe.
3657  *
3658  * @return: true if the event is unsafe and should be ignored,
3659  *          false otherwise.
3660  */
3661 bool ignore_event(struct trace_iterator *iter)
3662 {
3663 	struct ftrace_event_field *field;
3664 	struct trace_event *trace_event;
3665 	struct trace_event_call *event;
3666 	struct list_head *head;
3667 	struct trace_seq *seq;
3668 	const void *ptr;
3669 
3670 	trace_event = ftrace_find_event(iter->ent->type);
3671 
3672 	seq = &iter->seq;
3673 
3674 	if (!trace_event) {
3675 		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
3676 		return true;
3677 	}
3678 
3679 	event = container_of(trace_event, struct trace_event_call, event);
3680 	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
3681 		return false;
3682 
3683 	head = trace_get_fields(event);
3684 	if (!head) {
3685 		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
3686 				 trace_event_name(event));
3687 		return true;
3688 	}
3689 
3690 	/* Offsets are from the iter->ent that points to the raw event */
3691 	ptr = iter->ent;
3692 
3693 	list_for_each_entry(field, head, link) {
3694 		const char *str;
3695 		bool good;
3696 
3697 		if (!field->needs_test)
3698 			continue;
3699 
3700 		str = *(const char **)(ptr + field->offset);
3701 
3702 		good = trace_safe_str(iter, str);
3703 
3704 		/*
3705 		 * If you hit this warning, it is likely that the
3706 		 * trace event in question used %s on a string that
3707 		 * was saved at the time of the event, but may not be
3708 		 * around when the trace is read. Use __string(),
3709 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3710 		 * instead. See samples/trace_events/trace-events-sample.h
3711 		 * for reference.
3712 		 */
3713 		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
3714 			      trace_event_name(event), field->name)) {
3715 			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
3716 					 trace_event_name(event), field->name);
3717 			return true;
3718 		}
3719 	}
3720 	return false;
3721 }
3722 
3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3724 {
3725 	const char *p, *new_fmt;
3726 	char *q;
3727 
3728 	if (WARN_ON_ONCE(!fmt))
3729 		return fmt;
3730 
3731 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3732 		return fmt;
3733 
3734 	p = fmt;
3735 	new_fmt = q = iter->fmt;
3736 	while (*p) {
3737 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3738 			if (!trace_iter_expand_format(iter))
3739 				return fmt;
3740 
3741 			q += iter->fmt - new_fmt;
3742 			new_fmt = iter->fmt;
3743 		}
3744 
3745 		*q++ = *p++;
3746 
3747 		/* Replace %p with %px */
3748 		if (p[-1] == '%') {
3749 			if (p[0] == '%') {
3750 				*q++ = *p++;
3751 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3752 				*q++ = *p++;
3753 				*q++ = 'x';
3754 			}
3755 		}
3756 	}
3757 	*q = '\0';
3758 
3759 	return new_fmt;
3760 }
3761 
3762 #define STATIC_TEMP_BUF_SIZE	128
3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3764 
3765 /* Find the next real entry, without updating the iterator itself */
3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3767 					  int *ent_cpu, u64 *ent_ts)
3768 {
3769 	/* __find_next_entry will reset ent_size */
3770 	int ent_size = iter->ent_size;
3771 	struct trace_entry *entry;
3772 
3773 	/*
3774 	 * If called from ftrace_dump(), then the iter->temp buffer
3775 	 * will be the static_temp_buf and not created from kmalloc.
3776 	 * If the entry size is greater than the buffer, we can
3777 	 * not save it. Just return NULL in that case. This is only
3778 	 * used to add markers when two consecutive events' time
3779 	 * stamps have a large delta. See trace_print_lat_context()
3780 	 */
3781 	if (iter->temp == static_temp_buf &&
3782 	    STATIC_TEMP_BUF_SIZE < ent_size)
3783 		return NULL;
3784 
3785 	/*
3786 	 * The __find_next_entry() may call peek_next_entry(), which may
3787 	 * call ring_buffer_peek() that may make the contents of iter->ent
3788 	 * undefined. Need to copy iter->ent now.
3789 	 */
3790 	if (iter->ent && iter->ent != iter->temp) {
3791 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3792 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3793 			void *temp;
3794 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3795 			if (!temp)
3796 				return NULL;
3797 			kfree(iter->temp);
3798 			iter->temp = temp;
3799 			iter->temp_size = iter->ent_size;
3800 		}
3801 		memcpy(iter->temp, iter->ent, iter->ent_size);
3802 		iter->ent = iter->temp;
3803 	}
3804 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3805 	/* Put back the original ent_size */
3806 	iter->ent_size = ent_size;
3807 
3808 	return entry;
3809 }
3810 
3811 /* Find the next real entry, and increment the iterator to the next entry */
3812 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3813 {
3814 	iter->ent = __find_next_entry(iter, &iter->cpu,
3815 				      &iter->lost_events, &iter->ts);
3816 
3817 	if (iter->ent)
3818 		trace_iterator_increment(iter);
3819 
3820 	return iter->ent ? iter : NULL;
3821 }
3822 
3823 static void trace_consume(struct trace_iterator *iter)
3824 {
3825 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3826 			    &iter->lost_events);
3827 }
3828 
3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3830 {
3831 	struct trace_iterator *iter = m->private;
3832 	int i = (int)*pos;
3833 	void *ent;
3834 
3835 	WARN_ON_ONCE(iter->leftover);
3836 
3837 	(*pos)++;
3838 
3839 	/* can't go backwards */
3840 	if (iter->idx > i)
3841 		return NULL;
3842 
3843 	if (iter->idx < 0)
3844 		ent = trace_find_next_entry_inc(iter);
3845 	else
3846 		ent = iter;
3847 
3848 	while (ent && iter->idx < i)
3849 		ent = trace_find_next_entry_inc(iter);
3850 
3851 	iter->pos = *pos;
3852 
3853 	return ent;
3854 }
3855 
3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3857 {
3858 	struct ring_buffer_iter *buf_iter;
3859 	unsigned long entries = 0;
3860 	u64 ts;
3861 
3862 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3863 
3864 	buf_iter = trace_buffer_iter(iter, cpu);
3865 	if (!buf_iter)
3866 		return;
3867 
3868 	ring_buffer_iter_reset(buf_iter);
3869 
3870 	/*
3871 	 * We could have the case with the max latency tracers
3872 	 * that a reset never took place on a cpu. This is evident
3873 	 * by the timestamp being before the start of the buffer.
3874 	 */
3875 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
3876 		if (ts >= iter->array_buffer->time_start)
3877 			break;
3878 		entries++;
3879 		ring_buffer_iter_advance(buf_iter);
3880 		/* This could be a big loop */
3881 		cond_resched();
3882 	}
3883 
3884 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3885 }
3886 
3887 /*
3888  * The current tracer is copied to avoid a global locking
3889  * all around.
3890  */
3891 static void *s_start(struct seq_file *m, loff_t *pos)
3892 {
3893 	struct trace_iterator *iter = m->private;
3894 	struct trace_array *tr = iter->tr;
3895 	int cpu_file = iter->cpu_file;
3896 	void *p = NULL;
3897 	loff_t l = 0;
3898 	int cpu;
3899 
3900 	mutex_lock(&trace_types_lock);
3901 	if (unlikely(tr->current_trace != iter->trace)) {
3902 		/* Close iter->trace before switching to the new current tracer */
3903 		if (iter->trace->close)
3904 			iter->trace->close(iter);
3905 		iter->trace = tr->current_trace;
3906 		/* Reopen the new current tracer */
3907 		if (iter->trace->open)
3908 			iter->trace->open(iter);
3909 	}
3910 	mutex_unlock(&trace_types_lock);
3911 
3912 #ifdef CONFIG_TRACER_MAX_TRACE
3913 	if (iter->snapshot && iter->trace->use_max_tr)
3914 		return ERR_PTR(-EBUSY);
3915 #endif
3916 
3917 	if (*pos != iter->pos) {
3918 		iter->ent = NULL;
3919 		iter->cpu = 0;
3920 		iter->idx = -1;
3921 
3922 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3923 			for_each_tracing_cpu(cpu)
3924 				tracing_iter_reset(iter, cpu);
3925 		} else
3926 			tracing_iter_reset(iter, cpu_file);
3927 
3928 		iter->leftover = 0;
3929 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3930 			;
3931 
3932 	} else {
3933 		/*
3934 		 * If we overflowed the seq_file before, then we want
3935 		 * to just reuse the trace_seq buffer again.
3936 		 */
3937 		if (iter->leftover)
3938 			p = iter;
3939 		else {
3940 			l = *pos - 1;
3941 			p = s_next(m, p, &l);
3942 		}
3943 	}
3944 
3945 	trace_event_read_lock();
3946 	trace_access_lock(cpu_file);
3947 	return p;
3948 }
3949 
3950 static void s_stop(struct seq_file *m, void *p)
3951 {
3952 	struct trace_iterator *iter = m->private;
3953 
3954 #ifdef CONFIG_TRACER_MAX_TRACE
3955 	if (iter->snapshot && iter->trace->use_max_tr)
3956 		return;
3957 #endif
3958 
3959 	trace_access_unlock(iter->cpu_file);
3960 	trace_event_read_unlock();
3961 }
3962 
3963 static void
3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3965 		      unsigned long *entries, int cpu)
3966 {
3967 	unsigned long count;
3968 
3969 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3970 	/*
3971 	 * If this buffer has skipped entries, then we hold all
3972 	 * entries for the trace and we need to ignore the
3973 	 * ones before the time stamp.
3974 	 */
3975 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3976 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3977 		/* total is the same as the entries */
3978 		*total = count;
3979 	} else
3980 		*total = count +
3981 			ring_buffer_overrun_cpu(buf->buffer, cpu);
3982 	*entries = count;
3983 }
3984 
3985 static void
3986 get_total_entries(struct array_buffer *buf,
3987 		  unsigned long *total, unsigned long *entries)
3988 {
3989 	unsigned long t, e;
3990 	int cpu;
3991 
3992 	*total = 0;
3993 	*entries = 0;
3994 
3995 	for_each_tracing_cpu(cpu) {
3996 		get_total_entries_cpu(buf, &t, &e, cpu);
3997 		*total += t;
3998 		*entries += e;
3999 	}
4000 }
4001 
4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4003 {
4004 	unsigned long total, entries;
4005 
4006 	if (!tr)
4007 		tr = &global_trace;
4008 
4009 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4010 
4011 	return entries;
4012 }
4013 
4014 unsigned long trace_total_entries(struct trace_array *tr)
4015 {
4016 	unsigned long total, entries;
4017 
4018 	if (!tr)
4019 		tr = &global_trace;
4020 
4021 	get_total_entries(&tr->array_buffer, &total, &entries);
4022 
4023 	return entries;
4024 }
4025 
4026 static void print_lat_help_header(struct seq_file *m)
4027 {
4028 	seq_puts(m, "#                    _------=> CPU#            \n"
4029 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4030 		    "#                  | / _----=> need-resched    \n"
4031 		    "#                  || / _---=> hardirq/softirq \n"
4032 		    "#                  ||| / _--=> preempt-depth   \n"
4033 		    "#                  |||| / _-=> migrate-disable \n"
4034 		    "#                  ||||| /     delay           \n"
4035 		    "#  cmd     pid     |||||| time  |   caller     \n"
4036 		    "#     \\   /        ||||||  \\    |    /       \n");
4037 }
4038 
4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4040 {
4041 	unsigned long total;
4042 	unsigned long entries;
4043 
4044 	get_total_entries(buf, &total, &entries);
4045 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4046 		   entries, total, num_online_cpus());
4047 	seq_puts(m, "#\n");
4048 }
4049 
4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4051 				   unsigned int flags)
4052 {
4053 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4054 
4055 	print_event_info(buf, m);
4056 
4057 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4058 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4059 }
4060 
4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4062 				       unsigned int flags)
4063 {
4064 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4065 	static const char space[] = "            ";
4066 	int prec = tgid ? 12 : 2;
4067 
4068 	print_event_info(buf, m);
4069 
4070 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4071 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4072 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4073 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4074 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4075 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4076 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4077 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4078 }
4079 
4080 void
4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4082 {
4083 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4084 	struct array_buffer *buf = iter->array_buffer;
4085 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4086 	struct tracer *type = iter->trace;
4087 	unsigned long entries;
4088 	unsigned long total;
4089 	const char *name = type->name;
4090 
4091 	get_total_entries(buf, &total, &entries);
4092 
4093 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4094 		   name, init_utsname()->release);
4095 	seq_puts(m, "# -----------------------------------"
4096 		 "---------------------------------\n");
4097 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4098 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4099 		   nsecs_to_usecs(data->saved_latency),
4100 		   entries,
4101 		   total,
4102 		   buf->cpu,
4103 		   preempt_model_none()      ? "server" :
4104 		   preempt_model_voluntary() ? "desktop" :
4105 		   preempt_model_full()      ? "preempt" :
4106 		   preempt_model_lazy()	     ? "lazy"    :
4107 		   preempt_model_rt()        ? "preempt_rt" :
4108 		   "unknown",
4109 		   /* These are reserved for later use */
4110 		   0, 0, 0, 0);
4111 #ifdef CONFIG_SMP
4112 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4113 #else
4114 	seq_puts(m, ")\n");
4115 #endif
4116 	seq_puts(m, "#    -----------------\n");
4117 	seq_printf(m, "#    | task: %.16s-%d "
4118 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4119 		   data->comm, data->pid,
4120 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4121 		   data->policy, data->rt_priority);
4122 	seq_puts(m, "#    -----------------\n");
4123 
4124 	if (data->critical_start) {
4125 		seq_puts(m, "#  => started at: ");
4126 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4127 		trace_print_seq(m, &iter->seq);
4128 		seq_puts(m, "\n#  => ended at:   ");
4129 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4130 		trace_print_seq(m, &iter->seq);
4131 		seq_puts(m, "\n#\n");
4132 	}
4133 
4134 	seq_puts(m, "#\n");
4135 }
4136 
4137 static void test_cpu_buff_start(struct trace_iterator *iter)
4138 {
4139 	struct trace_seq *s = &iter->seq;
4140 	struct trace_array *tr = iter->tr;
4141 
4142 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4143 		return;
4144 
4145 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4146 		return;
4147 
4148 	if (cpumask_available(iter->started) &&
4149 	    cpumask_test_cpu(iter->cpu, iter->started))
4150 		return;
4151 
4152 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4153 		return;
4154 
4155 	if (cpumask_available(iter->started))
4156 		cpumask_set_cpu(iter->cpu, iter->started);
4157 
4158 	/* Don't print started cpu buffer for the first entry of the trace */
4159 	if (iter->idx > 1)
4160 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4161 				iter->cpu);
4162 }
4163 
4164 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4165 {
4166 	struct trace_array *tr = iter->tr;
4167 	struct trace_seq *s = &iter->seq;
4168 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4169 	struct trace_entry *entry;
4170 	struct trace_event *event;
4171 
4172 	entry = iter->ent;
4173 
4174 	test_cpu_buff_start(iter);
4175 
4176 	event = ftrace_find_event(entry->type);
4177 
4178 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4179 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4180 			trace_print_lat_context(iter);
4181 		else
4182 			trace_print_context(iter);
4183 	}
4184 
4185 	if (trace_seq_has_overflowed(s))
4186 		return TRACE_TYPE_PARTIAL_LINE;
4187 
4188 	if (event) {
4189 		if (tr->trace_flags & TRACE_ITER_FIELDS)
4190 			return print_event_fields(iter, event);
4191 		/*
4192 		 * For TRACE_EVENT() events, the print_fmt is not
4193 		 * safe to use if the array has delta offsets
4194 		 * Force printing via the fields.
4195 		 */
4196 		if ((tr->text_delta) &&
4197 		    event->type > __TRACE_LAST_TYPE)
4198 			return print_event_fields(iter, event);
4199 
4200 		return event->funcs->trace(iter, sym_flags, event);
4201 	}
4202 
4203 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4204 
4205 	return trace_handle_return(s);
4206 }
4207 
4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4209 {
4210 	struct trace_array *tr = iter->tr;
4211 	struct trace_seq *s = &iter->seq;
4212 	struct trace_entry *entry;
4213 	struct trace_event *event;
4214 
4215 	entry = iter->ent;
4216 
4217 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4218 		trace_seq_printf(s, "%d %d %llu ",
4219 				 entry->pid, iter->cpu, iter->ts);
4220 
4221 	if (trace_seq_has_overflowed(s))
4222 		return TRACE_TYPE_PARTIAL_LINE;
4223 
4224 	event = ftrace_find_event(entry->type);
4225 	if (event)
4226 		return event->funcs->raw(iter, 0, event);
4227 
4228 	trace_seq_printf(s, "%d ?\n", entry->type);
4229 
4230 	return trace_handle_return(s);
4231 }
4232 
4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4234 {
4235 	struct trace_array *tr = iter->tr;
4236 	struct trace_seq *s = &iter->seq;
4237 	unsigned char newline = '\n';
4238 	struct trace_entry *entry;
4239 	struct trace_event *event;
4240 
4241 	entry = iter->ent;
4242 
4243 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4244 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4245 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4246 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4247 		if (trace_seq_has_overflowed(s))
4248 			return TRACE_TYPE_PARTIAL_LINE;
4249 	}
4250 
4251 	event = ftrace_find_event(entry->type);
4252 	if (event) {
4253 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4254 		if (ret != TRACE_TYPE_HANDLED)
4255 			return ret;
4256 	}
4257 
4258 	SEQ_PUT_FIELD(s, newline);
4259 
4260 	return trace_handle_return(s);
4261 }
4262 
4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4264 {
4265 	struct trace_array *tr = iter->tr;
4266 	struct trace_seq *s = &iter->seq;
4267 	struct trace_entry *entry;
4268 	struct trace_event *event;
4269 
4270 	entry = iter->ent;
4271 
4272 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4273 		SEQ_PUT_FIELD(s, entry->pid);
4274 		SEQ_PUT_FIELD(s, iter->cpu);
4275 		SEQ_PUT_FIELD(s, iter->ts);
4276 		if (trace_seq_has_overflowed(s))
4277 			return TRACE_TYPE_PARTIAL_LINE;
4278 	}
4279 
4280 	event = ftrace_find_event(entry->type);
4281 	return event ? event->funcs->binary(iter, 0, event) :
4282 		TRACE_TYPE_HANDLED;
4283 }
4284 
4285 int trace_empty(struct trace_iterator *iter)
4286 {
4287 	struct ring_buffer_iter *buf_iter;
4288 	int cpu;
4289 
4290 	/* If we are looking at one CPU buffer, only check that one */
4291 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4292 		cpu = iter->cpu_file;
4293 		buf_iter = trace_buffer_iter(iter, cpu);
4294 		if (buf_iter) {
4295 			if (!ring_buffer_iter_empty(buf_iter))
4296 				return 0;
4297 		} else {
4298 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4299 				return 0;
4300 		}
4301 		return 1;
4302 	}
4303 
4304 	for_each_tracing_cpu(cpu) {
4305 		buf_iter = trace_buffer_iter(iter, cpu);
4306 		if (buf_iter) {
4307 			if (!ring_buffer_iter_empty(buf_iter))
4308 				return 0;
4309 		} else {
4310 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4311 				return 0;
4312 		}
4313 	}
4314 
4315 	return 1;
4316 }
4317 
4318 /*  Called with trace_event_read_lock() held. */
4319 enum print_line_t print_trace_line(struct trace_iterator *iter)
4320 {
4321 	struct trace_array *tr = iter->tr;
4322 	unsigned long trace_flags = tr->trace_flags;
4323 	enum print_line_t ret;
4324 
4325 	if (iter->lost_events) {
4326 		if (iter->lost_events == (unsigned long)-1)
4327 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4328 					 iter->cpu);
4329 		else
4330 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4331 					 iter->cpu, iter->lost_events);
4332 		if (trace_seq_has_overflowed(&iter->seq))
4333 			return TRACE_TYPE_PARTIAL_LINE;
4334 	}
4335 
4336 	if (iter->trace && iter->trace->print_line) {
4337 		ret = iter->trace->print_line(iter);
4338 		if (ret != TRACE_TYPE_UNHANDLED)
4339 			return ret;
4340 	}
4341 
4342 	if (iter->ent->type == TRACE_BPUTS &&
4343 			trace_flags & TRACE_ITER_PRINTK &&
4344 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4345 		return trace_print_bputs_msg_only(iter);
4346 
4347 	if (iter->ent->type == TRACE_BPRINT &&
4348 			trace_flags & TRACE_ITER_PRINTK &&
4349 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4350 		return trace_print_bprintk_msg_only(iter);
4351 
4352 	if (iter->ent->type == TRACE_PRINT &&
4353 			trace_flags & TRACE_ITER_PRINTK &&
4354 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4355 		return trace_print_printk_msg_only(iter);
4356 
4357 	if (trace_flags & TRACE_ITER_BIN)
4358 		return print_bin_fmt(iter);
4359 
4360 	if (trace_flags & TRACE_ITER_HEX)
4361 		return print_hex_fmt(iter);
4362 
4363 	if (trace_flags & TRACE_ITER_RAW)
4364 		return print_raw_fmt(iter);
4365 
4366 	return print_trace_fmt(iter);
4367 }
4368 
4369 void trace_latency_header(struct seq_file *m)
4370 {
4371 	struct trace_iterator *iter = m->private;
4372 	struct trace_array *tr = iter->tr;
4373 
4374 	/* print nothing if the buffers are empty */
4375 	if (trace_empty(iter))
4376 		return;
4377 
4378 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4379 		print_trace_header(m, iter);
4380 
4381 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4382 		print_lat_help_header(m);
4383 }
4384 
4385 void trace_default_header(struct seq_file *m)
4386 {
4387 	struct trace_iterator *iter = m->private;
4388 	struct trace_array *tr = iter->tr;
4389 	unsigned long trace_flags = tr->trace_flags;
4390 
4391 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4392 		return;
4393 
4394 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4395 		/* print nothing if the buffers are empty */
4396 		if (trace_empty(iter))
4397 			return;
4398 		print_trace_header(m, iter);
4399 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4400 			print_lat_help_header(m);
4401 	} else {
4402 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4403 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4404 				print_func_help_header_irq(iter->array_buffer,
4405 							   m, trace_flags);
4406 			else
4407 				print_func_help_header(iter->array_buffer, m,
4408 						       trace_flags);
4409 		}
4410 	}
4411 }
4412 
4413 static void test_ftrace_alive(struct seq_file *m)
4414 {
4415 	if (!ftrace_is_dead())
4416 		return;
4417 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4418 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4419 }
4420 
4421 #ifdef CONFIG_TRACER_MAX_TRACE
4422 static void show_snapshot_main_help(struct seq_file *m)
4423 {
4424 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4425 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4426 		    "#                      Takes a snapshot of the main buffer.\n"
4427 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4428 		    "#                      (Doesn't have to be '2' works with any number that\n"
4429 		    "#                       is not a '0' or '1')\n");
4430 }
4431 
4432 static void show_snapshot_percpu_help(struct seq_file *m)
4433 {
4434 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4436 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4437 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4438 #else
4439 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4440 		    "#                     Must use main snapshot file to allocate.\n");
4441 #endif
4442 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4443 		    "#                      (Doesn't have to be '2' works with any number that\n"
4444 		    "#                       is not a '0' or '1')\n");
4445 }
4446 
4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4448 {
4449 	if (iter->tr->allocated_snapshot)
4450 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4451 	else
4452 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4453 
4454 	seq_puts(m, "# Snapshot commands:\n");
4455 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4456 		show_snapshot_main_help(m);
4457 	else
4458 		show_snapshot_percpu_help(m);
4459 }
4460 #else
4461 /* Should never be called */
4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4463 #endif
4464 
4465 static int s_show(struct seq_file *m, void *v)
4466 {
4467 	struct trace_iterator *iter = v;
4468 	int ret;
4469 
4470 	if (iter->ent == NULL) {
4471 		if (iter->tr) {
4472 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4473 			seq_puts(m, "#\n");
4474 			test_ftrace_alive(m);
4475 		}
4476 		if (iter->snapshot && trace_empty(iter))
4477 			print_snapshot_help(m, iter);
4478 		else if (iter->trace && iter->trace->print_header)
4479 			iter->trace->print_header(m);
4480 		else
4481 			trace_default_header(m);
4482 
4483 	} else if (iter->leftover) {
4484 		/*
4485 		 * If we filled the seq_file buffer earlier, we
4486 		 * want to just show it now.
4487 		 */
4488 		ret = trace_print_seq(m, &iter->seq);
4489 
4490 		/* ret should this time be zero, but you never know */
4491 		iter->leftover = ret;
4492 
4493 	} else {
4494 		ret = print_trace_line(iter);
4495 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4496 			iter->seq.full = 0;
4497 			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4498 		}
4499 		ret = trace_print_seq(m, &iter->seq);
4500 		/*
4501 		 * If we overflow the seq_file buffer, then it will
4502 		 * ask us for this data again at start up.
4503 		 * Use that instead.
4504 		 *  ret is 0 if seq_file write succeeded.
4505 		 *        -1 otherwise.
4506 		 */
4507 		iter->leftover = ret;
4508 	}
4509 
4510 	return 0;
4511 }
4512 
4513 /*
4514  * Should be used after trace_array_get(), trace_types_lock
4515  * ensures that i_cdev was already initialized.
4516  */
4517 static inline int tracing_get_cpu(struct inode *inode)
4518 {
4519 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4520 		return (long)inode->i_cdev - 1;
4521 	return RING_BUFFER_ALL_CPUS;
4522 }
4523 
4524 static const struct seq_operations tracer_seq_ops = {
4525 	.start		= s_start,
4526 	.next		= s_next,
4527 	.stop		= s_stop,
4528 	.show		= s_show,
4529 };
4530 
4531 /*
4532  * Note, as iter itself can be allocated and freed in different
4533  * ways, this function is only used to free its content, and not
4534  * the iterator itself. The only requirement to all the allocations
4535  * is that it must zero all fields (kzalloc), as freeing works with
4536  * ethier allocated content or NULL.
4537  */
4538 static void free_trace_iter_content(struct trace_iterator *iter)
4539 {
4540 	/* The fmt is either NULL, allocated or points to static_fmt_buf */
4541 	if (iter->fmt != static_fmt_buf)
4542 		kfree(iter->fmt);
4543 
4544 	kfree(iter->temp);
4545 	kfree(iter->buffer_iter);
4546 	mutex_destroy(&iter->mutex);
4547 	free_cpumask_var(iter->started);
4548 }
4549 
4550 static struct trace_iterator *
4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4552 {
4553 	struct trace_array *tr = inode->i_private;
4554 	struct trace_iterator *iter;
4555 	int cpu;
4556 
4557 	if (tracing_disabled)
4558 		return ERR_PTR(-ENODEV);
4559 
4560 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4561 	if (!iter)
4562 		return ERR_PTR(-ENOMEM);
4563 
4564 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4565 				    GFP_KERNEL);
4566 	if (!iter->buffer_iter)
4567 		goto release;
4568 
4569 	/*
4570 	 * trace_find_next_entry() may need to save off iter->ent.
4571 	 * It will place it into the iter->temp buffer. As most
4572 	 * events are less than 128, allocate a buffer of that size.
4573 	 * If one is greater, then trace_find_next_entry() will
4574 	 * allocate a new buffer to adjust for the bigger iter->ent.
4575 	 * It's not critical if it fails to get allocated here.
4576 	 */
4577 	iter->temp = kmalloc(128, GFP_KERNEL);
4578 	if (iter->temp)
4579 		iter->temp_size = 128;
4580 
4581 	/*
4582 	 * trace_event_printf() may need to modify given format
4583 	 * string to replace %p with %px so that it shows real address
4584 	 * instead of hash value. However, that is only for the event
4585 	 * tracing, other tracer may not need. Defer the allocation
4586 	 * until it is needed.
4587 	 */
4588 	iter->fmt = NULL;
4589 	iter->fmt_size = 0;
4590 
4591 	mutex_lock(&trace_types_lock);
4592 	iter->trace = tr->current_trace;
4593 
4594 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4595 		goto fail;
4596 
4597 	iter->tr = tr;
4598 
4599 #ifdef CONFIG_TRACER_MAX_TRACE
4600 	/* Currently only the top directory has a snapshot */
4601 	if (tr->current_trace->print_max || snapshot)
4602 		iter->array_buffer = &tr->max_buffer;
4603 	else
4604 #endif
4605 		iter->array_buffer = &tr->array_buffer;
4606 	iter->snapshot = snapshot;
4607 	iter->pos = -1;
4608 	iter->cpu_file = tracing_get_cpu(inode);
4609 	mutex_init(&iter->mutex);
4610 
4611 	/* Notify the tracer early; before we stop tracing. */
4612 	if (iter->trace->open)
4613 		iter->trace->open(iter);
4614 
4615 	/* Annotate start of buffers if we had overruns */
4616 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4617 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4618 
4619 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4620 	if (trace_clocks[tr->clock_id].in_ns)
4621 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4622 
4623 	/*
4624 	 * If pause-on-trace is enabled, then stop the trace while
4625 	 * dumping, unless this is the "snapshot" file
4626 	 */
4627 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4628 		tracing_stop_tr(tr);
4629 
4630 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4631 		for_each_tracing_cpu(cpu) {
4632 			iter->buffer_iter[cpu] =
4633 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4634 							 cpu, GFP_KERNEL);
4635 		}
4636 		ring_buffer_read_prepare_sync();
4637 		for_each_tracing_cpu(cpu) {
4638 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4639 			tracing_iter_reset(iter, cpu);
4640 		}
4641 	} else {
4642 		cpu = iter->cpu_file;
4643 		iter->buffer_iter[cpu] =
4644 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4645 						 cpu, GFP_KERNEL);
4646 		ring_buffer_read_prepare_sync();
4647 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4648 		tracing_iter_reset(iter, cpu);
4649 	}
4650 
4651 	mutex_unlock(&trace_types_lock);
4652 
4653 	return iter;
4654 
4655  fail:
4656 	mutex_unlock(&trace_types_lock);
4657 	free_trace_iter_content(iter);
4658 release:
4659 	seq_release_private(inode, file);
4660 	return ERR_PTR(-ENOMEM);
4661 }
4662 
4663 int tracing_open_generic(struct inode *inode, struct file *filp)
4664 {
4665 	int ret;
4666 
4667 	ret = tracing_check_open_get_tr(NULL);
4668 	if (ret)
4669 		return ret;
4670 
4671 	filp->private_data = inode->i_private;
4672 	return 0;
4673 }
4674 
4675 bool tracing_is_disabled(void)
4676 {
4677 	return (tracing_disabled) ? true: false;
4678 }
4679 
4680 /*
4681  * Open and update trace_array ref count.
4682  * Must have the current trace_array passed to it.
4683  */
4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4685 {
4686 	struct trace_array *tr = inode->i_private;
4687 	int ret;
4688 
4689 	ret = tracing_check_open_get_tr(tr);
4690 	if (ret)
4691 		return ret;
4692 
4693 	filp->private_data = inode->i_private;
4694 
4695 	return 0;
4696 }
4697 
4698 /*
4699  * The private pointer of the inode is the trace_event_file.
4700  * Update the tr ref count associated to it.
4701  */
4702 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4703 {
4704 	struct trace_event_file *file = inode->i_private;
4705 	int ret;
4706 
4707 	ret = tracing_check_open_get_tr(file->tr);
4708 	if (ret)
4709 		return ret;
4710 
4711 	mutex_lock(&event_mutex);
4712 
4713 	/* Fail if the file is marked for removal */
4714 	if (file->flags & EVENT_FILE_FL_FREED) {
4715 		trace_array_put(file->tr);
4716 		ret = -ENODEV;
4717 	} else {
4718 		event_file_get(file);
4719 	}
4720 
4721 	mutex_unlock(&event_mutex);
4722 	if (ret)
4723 		return ret;
4724 
4725 	filp->private_data = inode->i_private;
4726 
4727 	return 0;
4728 }
4729 
4730 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4731 {
4732 	struct trace_event_file *file = inode->i_private;
4733 
4734 	trace_array_put(file->tr);
4735 	event_file_put(file);
4736 
4737 	return 0;
4738 }
4739 
4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4741 {
4742 	tracing_release_file_tr(inode, filp);
4743 	return single_release(inode, filp);
4744 }
4745 
4746 static int tracing_mark_open(struct inode *inode, struct file *filp)
4747 {
4748 	stream_open(inode, filp);
4749 	return tracing_open_generic_tr(inode, filp);
4750 }
4751 
4752 static int tracing_release(struct inode *inode, struct file *file)
4753 {
4754 	struct trace_array *tr = inode->i_private;
4755 	struct seq_file *m = file->private_data;
4756 	struct trace_iterator *iter;
4757 	int cpu;
4758 
4759 	if (!(file->f_mode & FMODE_READ)) {
4760 		trace_array_put(tr);
4761 		return 0;
4762 	}
4763 
4764 	/* Writes do not use seq_file */
4765 	iter = m->private;
4766 	mutex_lock(&trace_types_lock);
4767 
4768 	for_each_tracing_cpu(cpu) {
4769 		if (iter->buffer_iter[cpu])
4770 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4771 	}
4772 
4773 	if (iter->trace && iter->trace->close)
4774 		iter->trace->close(iter);
4775 
4776 	if (!iter->snapshot && tr->stop_count)
4777 		/* reenable tracing if it was previously enabled */
4778 		tracing_start_tr(tr);
4779 
4780 	__trace_array_put(tr);
4781 
4782 	mutex_unlock(&trace_types_lock);
4783 
4784 	free_trace_iter_content(iter);
4785 	seq_release_private(inode, file);
4786 
4787 	return 0;
4788 }
4789 
4790 int tracing_release_generic_tr(struct inode *inode, struct file *file)
4791 {
4792 	struct trace_array *tr = inode->i_private;
4793 
4794 	trace_array_put(tr);
4795 	return 0;
4796 }
4797 
4798 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4799 {
4800 	struct trace_array *tr = inode->i_private;
4801 
4802 	trace_array_put(tr);
4803 
4804 	return single_release(inode, file);
4805 }
4806 
4807 static int tracing_open(struct inode *inode, struct file *file)
4808 {
4809 	struct trace_array *tr = inode->i_private;
4810 	struct trace_iterator *iter;
4811 	int ret;
4812 
4813 	ret = tracing_check_open_get_tr(tr);
4814 	if (ret)
4815 		return ret;
4816 
4817 	/* If this file was open for write, then erase contents */
4818 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4819 		int cpu = tracing_get_cpu(inode);
4820 		struct array_buffer *trace_buf = &tr->array_buffer;
4821 
4822 #ifdef CONFIG_TRACER_MAX_TRACE
4823 		if (tr->current_trace->print_max)
4824 			trace_buf = &tr->max_buffer;
4825 #endif
4826 
4827 		if (cpu == RING_BUFFER_ALL_CPUS)
4828 			tracing_reset_online_cpus(trace_buf);
4829 		else
4830 			tracing_reset_cpu(trace_buf, cpu);
4831 	}
4832 
4833 	if (file->f_mode & FMODE_READ) {
4834 		iter = __tracing_open(inode, file, false);
4835 		if (IS_ERR(iter))
4836 			ret = PTR_ERR(iter);
4837 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4838 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4839 	}
4840 
4841 	if (ret < 0)
4842 		trace_array_put(tr);
4843 
4844 	return ret;
4845 }
4846 
4847 /*
4848  * Some tracers are not suitable for instance buffers.
4849  * A tracer is always available for the global array (toplevel)
4850  * or if it explicitly states that it is.
4851  */
4852 static bool
4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4854 {
4855 #ifdef CONFIG_TRACER_SNAPSHOT
4856 	/* arrays with mapped buffer range do not have snapshots */
4857 	if (tr->range_addr_start && t->use_max_tr)
4858 		return false;
4859 #endif
4860 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4861 }
4862 
4863 /* Find the next tracer that this trace array may use */
4864 static struct tracer *
4865 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4866 {
4867 	while (t && !trace_ok_for_array(t, tr))
4868 		t = t->next;
4869 
4870 	return t;
4871 }
4872 
4873 static void *
4874 t_next(struct seq_file *m, void *v, loff_t *pos)
4875 {
4876 	struct trace_array *tr = m->private;
4877 	struct tracer *t = v;
4878 
4879 	(*pos)++;
4880 
4881 	if (t)
4882 		t = get_tracer_for_array(tr, t->next);
4883 
4884 	return t;
4885 }
4886 
4887 static void *t_start(struct seq_file *m, loff_t *pos)
4888 {
4889 	struct trace_array *tr = m->private;
4890 	struct tracer *t;
4891 	loff_t l = 0;
4892 
4893 	mutex_lock(&trace_types_lock);
4894 
4895 	t = get_tracer_for_array(tr, trace_types);
4896 	for (; t && l < *pos; t = t_next(m, t, &l))
4897 			;
4898 
4899 	return t;
4900 }
4901 
4902 static void t_stop(struct seq_file *m, void *p)
4903 {
4904 	mutex_unlock(&trace_types_lock);
4905 }
4906 
4907 static int t_show(struct seq_file *m, void *v)
4908 {
4909 	struct tracer *t = v;
4910 
4911 	if (!t)
4912 		return 0;
4913 
4914 	seq_puts(m, t->name);
4915 	if (t->next)
4916 		seq_putc(m, ' ');
4917 	else
4918 		seq_putc(m, '\n');
4919 
4920 	return 0;
4921 }
4922 
4923 static const struct seq_operations show_traces_seq_ops = {
4924 	.start		= t_start,
4925 	.next		= t_next,
4926 	.stop		= t_stop,
4927 	.show		= t_show,
4928 };
4929 
4930 static int show_traces_open(struct inode *inode, struct file *file)
4931 {
4932 	struct trace_array *tr = inode->i_private;
4933 	struct seq_file *m;
4934 	int ret;
4935 
4936 	ret = tracing_check_open_get_tr(tr);
4937 	if (ret)
4938 		return ret;
4939 
4940 	ret = seq_open(file, &show_traces_seq_ops);
4941 	if (ret) {
4942 		trace_array_put(tr);
4943 		return ret;
4944 	}
4945 
4946 	m = file->private_data;
4947 	m->private = tr;
4948 
4949 	return 0;
4950 }
4951 
4952 static int tracing_seq_release(struct inode *inode, struct file *file)
4953 {
4954 	struct trace_array *tr = inode->i_private;
4955 
4956 	trace_array_put(tr);
4957 	return seq_release(inode, file);
4958 }
4959 
4960 static ssize_t
4961 tracing_write_stub(struct file *filp, const char __user *ubuf,
4962 		   size_t count, loff_t *ppos)
4963 {
4964 	return count;
4965 }
4966 
4967 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4968 {
4969 	int ret;
4970 
4971 	if (file->f_mode & FMODE_READ)
4972 		ret = seq_lseek(file, offset, whence);
4973 	else
4974 		file->f_pos = ret = 0;
4975 
4976 	return ret;
4977 }
4978 
4979 static const struct file_operations tracing_fops = {
4980 	.open		= tracing_open,
4981 	.read		= seq_read,
4982 	.read_iter	= seq_read_iter,
4983 	.splice_read	= copy_splice_read,
4984 	.write		= tracing_write_stub,
4985 	.llseek		= tracing_lseek,
4986 	.release	= tracing_release,
4987 };
4988 
4989 static const struct file_operations show_traces_fops = {
4990 	.open		= show_traces_open,
4991 	.read		= seq_read,
4992 	.llseek		= seq_lseek,
4993 	.release	= tracing_seq_release,
4994 };
4995 
4996 static ssize_t
4997 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4998 		     size_t count, loff_t *ppos)
4999 {
5000 	struct trace_array *tr = file_inode(filp)->i_private;
5001 	char *mask_str;
5002 	int len;
5003 
5004 	len = snprintf(NULL, 0, "%*pb\n",
5005 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5006 	mask_str = kmalloc(len, GFP_KERNEL);
5007 	if (!mask_str)
5008 		return -ENOMEM;
5009 
5010 	len = snprintf(mask_str, len, "%*pb\n",
5011 		       cpumask_pr_args(tr->tracing_cpumask));
5012 	if (len >= count) {
5013 		count = -EINVAL;
5014 		goto out_err;
5015 	}
5016 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5017 
5018 out_err:
5019 	kfree(mask_str);
5020 
5021 	return count;
5022 }
5023 
5024 int tracing_set_cpumask(struct trace_array *tr,
5025 			cpumask_var_t tracing_cpumask_new)
5026 {
5027 	int cpu;
5028 
5029 	if (!tr)
5030 		return -EINVAL;
5031 
5032 	local_irq_disable();
5033 	arch_spin_lock(&tr->max_lock);
5034 	for_each_tracing_cpu(cpu) {
5035 		/*
5036 		 * Increase/decrease the disabled counter if we are
5037 		 * about to flip a bit in the cpumask:
5038 		 */
5039 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5040 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5041 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5042 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5043 #ifdef CONFIG_TRACER_MAX_TRACE
5044 			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5045 #endif
5046 		}
5047 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5048 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5049 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5050 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5051 #ifdef CONFIG_TRACER_MAX_TRACE
5052 			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5053 #endif
5054 		}
5055 	}
5056 	arch_spin_unlock(&tr->max_lock);
5057 	local_irq_enable();
5058 
5059 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5060 
5061 	return 0;
5062 }
5063 
5064 static ssize_t
5065 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5066 		      size_t count, loff_t *ppos)
5067 {
5068 	struct trace_array *tr = file_inode(filp)->i_private;
5069 	cpumask_var_t tracing_cpumask_new;
5070 	int err;
5071 
5072 	if (count == 0 || count > KMALLOC_MAX_SIZE)
5073 		return -EINVAL;
5074 
5075 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5076 		return -ENOMEM;
5077 
5078 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5079 	if (err)
5080 		goto err_free;
5081 
5082 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5083 	if (err)
5084 		goto err_free;
5085 
5086 	free_cpumask_var(tracing_cpumask_new);
5087 
5088 	return count;
5089 
5090 err_free:
5091 	free_cpumask_var(tracing_cpumask_new);
5092 
5093 	return err;
5094 }
5095 
5096 static const struct file_operations tracing_cpumask_fops = {
5097 	.open		= tracing_open_generic_tr,
5098 	.read		= tracing_cpumask_read,
5099 	.write		= tracing_cpumask_write,
5100 	.release	= tracing_release_generic_tr,
5101 	.llseek		= generic_file_llseek,
5102 };
5103 
5104 static int tracing_trace_options_show(struct seq_file *m, void *v)
5105 {
5106 	struct tracer_opt *trace_opts;
5107 	struct trace_array *tr = m->private;
5108 	u32 tracer_flags;
5109 	int i;
5110 
5111 	guard(mutex)(&trace_types_lock);
5112 
5113 	tracer_flags = tr->current_trace->flags->val;
5114 	trace_opts = tr->current_trace->flags->opts;
5115 
5116 	for (i = 0; trace_options[i]; i++) {
5117 		if (tr->trace_flags & (1 << i))
5118 			seq_printf(m, "%s\n", trace_options[i]);
5119 		else
5120 			seq_printf(m, "no%s\n", trace_options[i]);
5121 	}
5122 
5123 	for (i = 0; trace_opts[i].name; i++) {
5124 		if (tracer_flags & trace_opts[i].bit)
5125 			seq_printf(m, "%s\n", trace_opts[i].name);
5126 		else
5127 			seq_printf(m, "no%s\n", trace_opts[i].name);
5128 	}
5129 
5130 	return 0;
5131 }
5132 
5133 static int __set_tracer_option(struct trace_array *tr,
5134 			       struct tracer_flags *tracer_flags,
5135 			       struct tracer_opt *opts, int neg)
5136 {
5137 	struct tracer *trace = tracer_flags->trace;
5138 	int ret;
5139 
5140 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5141 	if (ret)
5142 		return ret;
5143 
5144 	if (neg)
5145 		tracer_flags->val &= ~opts->bit;
5146 	else
5147 		tracer_flags->val |= opts->bit;
5148 	return 0;
5149 }
5150 
5151 /* Try to assign a tracer specific option */
5152 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5153 {
5154 	struct tracer *trace = tr->current_trace;
5155 	struct tracer_flags *tracer_flags = trace->flags;
5156 	struct tracer_opt *opts = NULL;
5157 	int i;
5158 
5159 	for (i = 0; tracer_flags->opts[i].name; i++) {
5160 		opts = &tracer_flags->opts[i];
5161 
5162 		if (strcmp(cmp, opts->name) == 0)
5163 			return __set_tracer_option(tr, trace->flags, opts, neg);
5164 	}
5165 
5166 	return -EINVAL;
5167 }
5168 
5169 /* Some tracers require overwrite to stay enabled */
5170 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5171 {
5172 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5173 		return -1;
5174 
5175 	return 0;
5176 }
5177 
5178 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5179 {
5180 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5181 	    (mask == TRACE_ITER_RECORD_CMD) ||
5182 	    (mask == TRACE_ITER_TRACE_PRINTK))
5183 		lockdep_assert_held(&event_mutex);
5184 
5185 	/* do nothing if flag is already set */
5186 	if (!!(tr->trace_flags & mask) == !!enabled)
5187 		return 0;
5188 
5189 	/* Give the tracer a chance to approve the change */
5190 	if (tr->current_trace->flag_changed)
5191 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5192 			return -EINVAL;
5193 
5194 	if (mask == TRACE_ITER_TRACE_PRINTK) {
5195 		if (enabled) {
5196 			update_printk_trace(tr);
5197 		} else {
5198 			/*
5199 			 * The global_trace cannot clear this.
5200 			 * It's flag only gets cleared if another instance sets it.
5201 			 */
5202 			if (printk_trace == &global_trace)
5203 				return -EINVAL;
5204 			/*
5205 			 * An instance must always have it set.
5206 			 * by default, that's the global_trace instane.
5207 			 */
5208 			if (printk_trace == tr)
5209 				update_printk_trace(&global_trace);
5210 		}
5211 	}
5212 
5213 	if (enabled)
5214 		tr->trace_flags |= mask;
5215 	else
5216 		tr->trace_flags &= ~mask;
5217 
5218 	if (mask == TRACE_ITER_RECORD_CMD)
5219 		trace_event_enable_cmd_record(enabled);
5220 
5221 	if (mask == TRACE_ITER_RECORD_TGID) {
5222 
5223 		if (trace_alloc_tgid_map() < 0) {
5224 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5225 			return -ENOMEM;
5226 		}
5227 
5228 		trace_event_enable_tgid_record(enabled);
5229 	}
5230 
5231 	if (mask == TRACE_ITER_EVENT_FORK)
5232 		trace_event_follow_fork(tr, enabled);
5233 
5234 	if (mask == TRACE_ITER_FUNC_FORK)
5235 		ftrace_pid_follow_fork(tr, enabled);
5236 
5237 	if (mask == TRACE_ITER_OVERWRITE) {
5238 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5239 #ifdef CONFIG_TRACER_MAX_TRACE
5240 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5241 #endif
5242 	}
5243 
5244 	if (mask == TRACE_ITER_PRINTK) {
5245 		trace_printk_start_stop_comm(enabled);
5246 		trace_printk_control(enabled);
5247 	}
5248 
5249 	return 0;
5250 }
5251 
5252 int trace_set_options(struct trace_array *tr, char *option)
5253 {
5254 	char *cmp;
5255 	int neg = 0;
5256 	int ret;
5257 	size_t orig_len = strlen(option);
5258 	int len;
5259 
5260 	cmp = strstrip(option);
5261 
5262 	len = str_has_prefix(cmp, "no");
5263 	if (len)
5264 		neg = 1;
5265 
5266 	cmp += len;
5267 
5268 	mutex_lock(&event_mutex);
5269 	mutex_lock(&trace_types_lock);
5270 
5271 	ret = match_string(trace_options, -1, cmp);
5272 	/* If no option could be set, test the specific tracer options */
5273 	if (ret < 0)
5274 		ret = set_tracer_option(tr, cmp, neg);
5275 	else
5276 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5277 
5278 	mutex_unlock(&trace_types_lock);
5279 	mutex_unlock(&event_mutex);
5280 
5281 	/*
5282 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5283 	 * turn it back into a space.
5284 	 */
5285 	if (orig_len > strlen(option))
5286 		option[strlen(option)] = ' ';
5287 
5288 	return ret;
5289 }
5290 
5291 static void __init apply_trace_boot_options(void)
5292 {
5293 	char *buf = trace_boot_options_buf;
5294 	char *option;
5295 
5296 	while (true) {
5297 		option = strsep(&buf, ",");
5298 
5299 		if (!option)
5300 			break;
5301 
5302 		if (*option)
5303 			trace_set_options(&global_trace, option);
5304 
5305 		/* Put back the comma to allow this to be called again */
5306 		if (buf)
5307 			*(buf - 1) = ',';
5308 	}
5309 }
5310 
5311 static ssize_t
5312 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5313 			size_t cnt, loff_t *ppos)
5314 {
5315 	struct seq_file *m = filp->private_data;
5316 	struct trace_array *tr = m->private;
5317 	char buf[64];
5318 	int ret;
5319 
5320 	if (cnt >= sizeof(buf))
5321 		return -EINVAL;
5322 
5323 	if (copy_from_user(buf, ubuf, cnt))
5324 		return -EFAULT;
5325 
5326 	buf[cnt] = 0;
5327 
5328 	ret = trace_set_options(tr, buf);
5329 	if (ret < 0)
5330 		return ret;
5331 
5332 	*ppos += cnt;
5333 
5334 	return cnt;
5335 }
5336 
5337 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5338 {
5339 	struct trace_array *tr = inode->i_private;
5340 	int ret;
5341 
5342 	ret = tracing_check_open_get_tr(tr);
5343 	if (ret)
5344 		return ret;
5345 
5346 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5347 	if (ret < 0)
5348 		trace_array_put(tr);
5349 
5350 	return ret;
5351 }
5352 
5353 static const struct file_operations tracing_iter_fops = {
5354 	.open		= tracing_trace_options_open,
5355 	.read		= seq_read,
5356 	.llseek		= seq_lseek,
5357 	.release	= tracing_single_release_tr,
5358 	.write		= tracing_trace_options_write,
5359 };
5360 
5361 static const char readme_msg[] =
5362 	"tracing mini-HOWTO:\n\n"
5363 	"By default tracefs removes all OTH file permission bits.\n"
5364 	"When mounting tracefs an optional group id can be specified\n"
5365 	"which adds the group to every directory and file in tracefs:\n\n"
5366 	"\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
5367 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5368 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5369 	" Important files:\n"
5370 	"  trace\t\t\t- The static contents of the buffer\n"
5371 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5372 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5373 	"  current_tracer\t- function and latency tracers\n"
5374 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5375 	"  error_log\t- error log for failed commands (that support it)\n"
5376 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5377 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5378 	"  trace_clock\t\t- change the clock used to order events\n"
5379 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5380 	"      global:   Synced across CPUs but slows tracing down.\n"
5381 	"     counter:   Not a clock, but just an increment\n"
5382 	"      uptime:   Jiffy counter from time of boot\n"
5383 	"        perf:   Same clock that perf events use\n"
5384 #ifdef CONFIG_X86_64
5385 	"     x86-tsc:   TSC cycle counter\n"
5386 #endif
5387 	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
5388 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5389 	"    absolute:   Absolute (standalone) timestamp\n"
5390 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5391 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5392 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5393 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5394 	"\t\t\t  Remove sub-buffer with rmdir\n"
5395 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5396 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5397 	"\t\t\t  option name\n"
5398 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5399 #ifdef CONFIG_DYNAMIC_FTRACE
5400 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5401 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5402 	"\t\t\t  functions\n"
5403 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5404 	"\t     modules: Can select a group via module\n"
5405 	"\t      Format: :mod:<module-name>\n"
5406 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5407 	"\t    triggers: a command to perform when function is hit\n"
5408 	"\t      Format: <function>:<trigger>[:count]\n"
5409 	"\t     trigger: traceon, traceoff\n"
5410 	"\t\t      enable_event:<system>:<event>\n"
5411 	"\t\t      disable_event:<system>:<event>\n"
5412 #ifdef CONFIG_STACKTRACE
5413 	"\t\t      stacktrace\n"
5414 #endif
5415 #ifdef CONFIG_TRACER_SNAPSHOT
5416 	"\t\t      snapshot\n"
5417 #endif
5418 	"\t\t      dump\n"
5419 	"\t\t      cpudump\n"
5420 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5421 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5422 	"\t     The first one will disable tracing every time do_fault is hit\n"
5423 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5424 	"\t       The first time do trap is hit and it disables tracing, the\n"
5425 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5426 	"\t       the counter will not decrement. It only decrements when the\n"
5427 	"\t       trigger did work\n"
5428 	"\t     To remove trigger without count:\n"
5429 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5430 	"\t     To remove trigger with a count:\n"
5431 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5432 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5433 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5434 	"\t    modules: Can select a group via module command :mod:\n"
5435 	"\t    Does not accept triggers\n"
5436 #endif /* CONFIG_DYNAMIC_FTRACE */
5437 #ifdef CONFIG_FUNCTION_TRACER
5438 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5439 	"\t\t    (function)\n"
5440 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5441 	"\t\t    (function)\n"
5442 #endif
5443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5444 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5445 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5446 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5447 #endif
5448 #ifdef CONFIG_TRACER_SNAPSHOT
5449 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5450 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5451 	"\t\t\t  information\n"
5452 #endif
5453 #ifdef CONFIG_STACK_TRACER
5454 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5455 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5456 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5457 	"\t\t\t  new trace)\n"
5458 #ifdef CONFIG_DYNAMIC_FTRACE
5459 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5460 	"\t\t\t  traces\n"
5461 #endif
5462 #endif /* CONFIG_STACK_TRACER */
5463 #ifdef CONFIG_DYNAMIC_EVENTS
5464 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5465 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5466 #endif
5467 #ifdef CONFIG_KPROBE_EVENTS
5468 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5469 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5470 #endif
5471 #ifdef CONFIG_UPROBE_EVENTS
5472 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5473 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5474 #endif
5475 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5476     defined(CONFIG_FPROBE_EVENTS)
5477 	"\t  accepts: event-definitions (one definition per line)\n"
5478 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5479 	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5480 	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5481 #endif
5482 #ifdef CONFIG_FPROBE_EVENTS
5483 	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5484 	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5485 #endif
5486 #ifdef CONFIG_HIST_TRIGGERS
5487 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5488 #endif
5489 	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5490 	"\t           -:[<group>/][<event>]\n"
5491 #ifdef CONFIG_KPROBE_EVENTS
5492 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5493   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5494 #endif
5495 #ifdef CONFIG_UPROBE_EVENTS
5496   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5497 #endif
5498 	"\t     args: <name>=fetcharg[:type]\n"
5499 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5500 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5501 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5502 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5503 	"\t           <argname>[->field[->field|.field...]],\n"
5504 #endif
5505 #else
5506 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5507 #endif
5508 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5509 	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
5510 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5511 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5512 	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
5513 #ifdef CONFIG_HIST_TRIGGERS
5514 	"\t    field: <stype> <name>;\n"
5515 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5516 	"\t           [unsigned] char/int/long\n"
5517 #endif
5518 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5519 	"\t            of the <attached-group>/<attached-event>.\n"
5520 #endif
5521 	"  set_event\t\t- Enables events by name written into it\n"
5522 	"\t\t\t  Can enable module events via: :mod:<module>\n"
5523 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5524 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5525 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5526 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5527 	"\t\t\t  events\n"
5528 	"      filter\t\t- If set, only events passing filter are traced\n"
5529 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5530 	"\t\t\t  <event>:\n"
5531 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5532 	"      filter\t\t- If set, only events passing filter are traced\n"
5533 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5534 	"\t    Format: <trigger>[:count][if <filter>]\n"
5535 	"\t   trigger: traceon, traceoff\n"
5536 	"\t            enable_event:<system>:<event>\n"
5537 	"\t            disable_event:<system>:<event>\n"
5538 #ifdef CONFIG_HIST_TRIGGERS
5539 	"\t            enable_hist:<system>:<event>\n"
5540 	"\t            disable_hist:<system>:<event>\n"
5541 #endif
5542 #ifdef CONFIG_STACKTRACE
5543 	"\t\t    stacktrace\n"
5544 #endif
5545 #ifdef CONFIG_TRACER_SNAPSHOT
5546 	"\t\t    snapshot\n"
5547 #endif
5548 #ifdef CONFIG_HIST_TRIGGERS
5549 	"\t\t    hist (see below)\n"
5550 #endif
5551 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5552 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5553 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5554 	"\t                  events/block/block_unplug/trigger\n"
5555 	"\t   The first disables tracing every time block_unplug is hit.\n"
5556 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5557 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5558 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5559 	"\t   Like function triggers, the counter is only decremented if it\n"
5560 	"\t    enabled or disabled tracing.\n"
5561 	"\t   To remove a trigger without a count:\n"
5562 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5563 	"\t   To remove a trigger with a count:\n"
5564 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5565 	"\t   Filters can be ignored when removing a trigger.\n"
5566 #ifdef CONFIG_HIST_TRIGGERS
5567 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5568 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5569 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5570 	"\t            [:values=<field1[,field2,...]>]\n"
5571 	"\t            [:sort=<field1[,field2,...]>]\n"
5572 	"\t            [:size=#entries]\n"
5573 	"\t            [:pause][:continue][:clear]\n"
5574 	"\t            [:name=histname1]\n"
5575 	"\t            [:nohitcount]\n"
5576 	"\t            [:<handler>.<action>]\n"
5577 	"\t            [if <filter>]\n\n"
5578 	"\t    Note, special fields can be used as well:\n"
5579 	"\t            common_timestamp - to record current timestamp\n"
5580 	"\t            common_cpu - to record the CPU the event happened on\n"
5581 	"\n"
5582 	"\t    A hist trigger variable can be:\n"
5583 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5584 	"\t        - a reference to another variable e.g. y=$x,\n"
5585 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5586 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5587 	"\n"
5588 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5589 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5590 	"\t    variable reference, field or numeric literal.\n"
5591 	"\n"
5592 	"\t    When a matching event is hit, an entry is added to a hash\n"
5593 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5594 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5595 	"\t    correspond to fields in the event's format description.  Keys\n"
5596 	"\t    can be any field, or the special string 'common_stacktrace'.\n"
5597 	"\t    Compound keys consisting of up to two fields can be specified\n"
5598 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5599 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5600 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5601 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5602 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5603 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5604 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5605 	"\t    its histogram data will be shared with other triggers of the\n"
5606 	"\t    same name, and trigger hits will update this common data.\n\n"
5607 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5608 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5609 	"\t    triggers attached to an event, there will be a table for each\n"
5610 	"\t    trigger in the output.  The table displayed for a named\n"
5611 	"\t    trigger will be the same as any other instance having the\n"
5612 	"\t    same name.  The default format used to display a given field\n"
5613 	"\t    can be modified by appending any of the following modifiers\n"
5614 	"\t    to the field name, as applicable:\n\n"
5615 	"\t            .hex        display a number as a hex value\n"
5616 	"\t            .sym        display an address as a symbol\n"
5617 	"\t            .sym-offset display an address as a symbol and offset\n"
5618 	"\t            .execname   display a common_pid as a program name\n"
5619 	"\t            .syscall    display a syscall id as a syscall name\n"
5620 	"\t            .log2       display log2 value rather than raw number\n"
5621 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5622 	"\t            .usecs      display a common_timestamp in microseconds\n"
5623 	"\t            .percent    display a number of percentage value\n"
5624 	"\t            .graph      display a bar-graph of a value\n\n"
5625 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5626 	"\t    trigger or to start a hist trigger but not log any events\n"
5627 	"\t    until told to do so.  'continue' can be used to start or\n"
5628 	"\t    restart a paused hist trigger.\n\n"
5629 	"\t    The 'clear' parameter will clear the contents of a running\n"
5630 	"\t    hist trigger and leave its current paused/active state\n"
5631 	"\t    unchanged.\n\n"
5632 	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5633 	"\t    raw hitcount in the histogram.\n\n"
5634 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5635 	"\t    have one event conditionally start and stop another event's\n"
5636 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5637 	"\t    the enable_event and disable_event triggers.\n\n"
5638 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5639 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5640 	"\t        <handler>.<action>\n\n"
5641 	"\t    The available handlers are:\n\n"
5642 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5643 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5644 	"\t        onchange(var)            - invoke action if var changes\n\n"
5645 	"\t    The available actions are:\n\n"
5646 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5647 	"\t        save(field,...)                      - save current event fields\n"
5648 #ifdef CONFIG_TRACER_SNAPSHOT
5649 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5650 #endif
5651 #ifdef CONFIG_SYNTH_EVENTS
5652 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5653 	"\t  Write into this file to define/undefine new synthetic events.\n"
5654 	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5655 #endif
5656 #endif
5657 ;
5658 
5659 static ssize_t
5660 tracing_readme_read(struct file *filp, char __user *ubuf,
5661 		       size_t cnt, loff_t *ppos)
5662 {
5663 	return simple_read_from_buffer(ubuf, cnt, ppos,
5664 					readme_msg, strlen(readme_msg));
5665 }
5666 
5667 static const struct file_operations tracing_readme_fops = {
5668 	.open		= tracing_open_generic,
5669 	.read		= tracing_readme_read,
5670 	.llseek		= generic_file_llseek,
5671 };
5672 
5673 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5674 static union trace_eval_map_item *
5675 update_eval_map(union trace_eval_map_item *ptr)
5676 {
5677 	if (!ptr->map.eval_string) {
5678 		if (ptr->tail.next) {
5679 			ptr = ptr->tail.next;
5680 			/* Set ptr to the next real item (skip head) */
5681 			ptr++;
5682 		} else
5683 			return NULL;
5684 	}
5685 	return ptr;
5686 }
5687 
5688 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5689 {
5690 	union trace_eval_map_item *ptr = v;
5691 
5692 	/*
5693 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5694 	 * This really should never happen.
5695 	 */
5696 	(*pos)++;
5697 	ptr = update_eval_map(ptr);
5698 	if (WARN_ON_ONCE(!ptr))
5699 		return NULL;
5700 
5701 	ptr++;
5702 	ptr = update_eval_map(ptr);
5703 
5704 	return ptr;
5705 }
5706 
5707 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5708 {
5709 	union trace_eval_map_item *v;
5710 	loff_t l = 0;
5711 
5712 	mutex_lock(&trace_eval_mutex);
5713 
5714 	v = trace_eval_maps;
5715 	if (v)
5716 		v++;
5717 
5718 	while (v && l < *pos) {
5719 		v = eval_map_next(m, v, &l);
5720 	}
5721 
5722 	return v;
5723 }
5724 
5725 static void eval_map_stop(struct seq_file *m, void *v)
5726 {
5727 	mutex_unlock(&trace_eval_mutex);
5728 }
5729 
5730 static int eval_map_show(struct seq_file *m, void *v)
5731 {
5732 	union trace_eval_map_item *ptr = v;
5733 
5734 	seq_printf(m, "%s %ld (%s)\n",
5735 		   ptr->map.eval_string, ptr->map.eval_value,
5736 		   ptr->map.system);
5737 
5738 	return 0;
5739 }
5740 
5741 static const struct seq_operations tracing_eval_map_seq_ops = {
5742 	.start		= eval_map_start,
5743 	.next		= eval_map_next,
5744 	.stop		= eval_map_stop,
5745 	.show		= eval_map_show,
5746 };
5747 
5748 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5749 {
5750 	int ret;
5751 
5752 	ret = tracing_check_open_get_tr(NULL);
5753 	if (ret)
5754 		return ret;
5755 
5756 	return seq_open(filp, &tracing_eval_map_seq_ops);
5757 }
5758 
5759 static const struct file_operations tracing_eval_map_fops = {
5760 	.open		= tracing_eval_map_open,
5761 	.read		= seq_read,
5762 	.llseek		= seq_lseek,
5763 	.release	= seq_release,
5764 };
5765 
5766 static inline union trace_eval_map_item *
5767 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5768 {
5769 	/* Return tail of array given the head */
5770 	return ptr + ptr->head.length + 1;
5771 }
5772 
5773 static void
5774 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5775 			   int len)
5776 {
5777 	struct trace_eval_map **stop;
5778 	struct trace_eval_map **map;
5779 	union trace_eval_map_item *map_array;
5780 	union trace_eval_map_item *ptr;
5781 
5782 	stop = start + len;
5783 
5784 	/*
5785 	 * The trace_eval_maps contains the map plus a head and tail item,
5786 	 * where the head holds the module and length of array, and the
5787 	 * tail holds a pointer to the next list.
5788 	 */
5789 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5790 	if (!map_array) {
5791 		pr_warn("Unable to allocate trace eval mapping\n");
5792 		return;
5793 	}
5794 
5795 	guard(mutex)(&trace_eval_mutex);
5796 
5797 	if (!trace_eval_maps)
5798 		trace_eval_maps = map_array;
5799 	else {
5800 		ptr = trace_eval_maps;
5801 		for (;;) {
5802 			ptr = trace_eval_jmp_to_tail(ptr);
5803 			if (!ptr->tail.next)
5804 				break;
5805 			ptr = ptr->tail.next;
5806 
5807 		}
5808 		ptr->tail.next = map_array;
5809 	}
5810 	map_array->head.mod = mod;
5811 	map_array->head.length = len;
5812 	map_array++;
5813 
5814 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5815 		map_array->map = **map;
5816 		map_array++;
5817 	}
5818 	memset(map_array, 0, sizeof(*map_array));
5819 }
5820 
5821 static void trace_create_eval_file(struct dentry *d_tracer)
5822 {
5823 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
5824 			  NULL, &tracing_eval_map_fops);
5825 }
5826 
5827 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5828 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5829 static inline void trace_insert_eval_map_file(struct module *mod,
5830 			      struct trace_eval_map **start, int len) { }
5831 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5832 
5833 static void trace_insert_eval_map(struct module *mod,
5834 				  struct trace_eval_map **start, int len)
5835 {
5836 	struct trace_eval_map **map;
5837 
5838 	if (len <= 0)
5839 		return;
5840 
5841 	map = start;
5842 
5843 	trace_event_eval_update(map, len);
5844 
5845 	trace_insert_eval_map_file(mod, start, len);
5846 }
5847 
5848 static ssize_t
5849 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5850 		       size_t cnt, loff_t *ppos)
5851 {
5852 	struct trace_array *tr = filp->private_data;
5853 	char buf[MAX_TRACER_SIZE+2];
5854 	int r;
5855 
5856 	mutex_lock(&trace_types_lock);
5857 	r = sprintf(buf, "%s\n", tr->current_trace->name);
5858 	mutex_unlock(&trace_types_lock);
5859 
5860 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5861 }
5862 
5863 int tracer_init(struct tracer *t, struct trace_array *tr)
5864 {
5865 	tracing_reset_online_cpus(&tr->array_buffer);
5866 	return t->init(tr);
5867 }
5868 
5869 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5870 {
5871 	int cpu;
5872 
5873 	for_each_tracing_cpu(cpu)
5874 		per_cpu_ptr(buf->data, cpu)->entries = val;
5875 }
5876 
5877 static void update_buffer_entries(struct array_buffer *buf, int cpu)
5878 {
5879 	if (cpu == RING_BUFFER_ALL_CPUS) {
5880 		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
5881 	} else {
5882 		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
5883 	}
5884 }
5885 
5886 #ifdef CONFIG_TRACER_MAX_TRACE
5887 /* resize @tr's buffer to the size of @size_tr's entries */
5888 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5889 					struct array_buffer *size_buf, int cpu_id)
5890 {
5891 	int cpu, ret = 0;
5892 
5893 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5894 		for_each_tracing_cpu(cpu) {
5895 			ret = ring_buffer_resize(trace_buf->buffer,
5896 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5897 			if (ret < 0)
5898 				break;
5899 			per_cpu_ptr(trace_buf->data, cpu)->entries =
5900 				per_cpu_ptr(size_buf->data, cpu)->entries;
5901 		}
5902 	} else {
5903 		ret = ring_buffer_resize(trace_buf->buffer,
5904 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5905 		if (ret == 0)
5906 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5907 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5908 	}
5909 
5910 	return ret;
5911 }
5912 #endif /* CONFIG_TRACER_MAX_TRACE */
5913 
5914 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5915 					unsigned long size, int cpu)
5916 {
5917 	int ret;
5918 
5919 	/*
5920 	 * If kernel or user changes the size of the ring buffer
5921 	 * we use the size that was given, and we can forget about
5922 	 * expanding it later.
5923 	 */
5924 	trace_set_ring_buffer_expanded(tr);
5925 
5926 	/* May be called before buffers are initialized */
5927 	if (!tr->array_buffer.buffer)
5928 		return 0;
5929 
5930 	/* Do not allow tracing while resizing ring buffer */
5931 	tracing_stop_tr(tr);
5932 
5933 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5934 	if (ret < 0)
5935 		goto out_start;
5936 
5937 #ifdef CONFIG_TRACER_MAX_TRACE
5938 	if (!tr->allocated_snapshot)
5939 		goto out;
5940 
5941 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5942 	if (ret < 0) {
5943 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
5944 						     &tr->array_buffer, cpu);
5945 		if (r < 0) {
5946 			/*
5947 			 * AARGH! We are left with different
5948 			 * size max buffer!!!!
5949 			 * The max buffer is our "snapshot" buffer.
5950 			 * When a tracer needs a snapshot (one of the
5951 			 * latency tracers), it swaps the max buffer
5952 			 * with the saved snap shot. We succeeded to
5953 			 * update the size of the main buffer, but failed to
5954 			 * update the size of the max buffer. But when we tried
5955 			 * to reset the main buffer to the original size, we
5956 			 * failed there too. This is very unlikely to
5957 			 * happen, but if it does, warn and kill all
5958 			 * tracing.
5959 			 */
5960 			WARN_ON(1);
5961 			tracing_disabled = 1;
5962 		}
5963 		goto out_start;
5964 	}
5965 
5966 	update_buffer_entries(&tr->max_buffer, cpu);
5967 
5968  out:
5969 #endif /* CONFIG_TRACER_MAX_TRACE */
5970 
5971 	update_buffer_entries(&tr->array_buffer, cpu);
5972  out_start:
5973 	tracing_start_tr(tr);
5974 	return ret;
5975 }
5976 
5977 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5978 				  unsigned long size, int cpu_id)
5979 {
5980 	guard(mutex)(&trace_types_lock);
5981 
5982 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5983 		/* make sure, this cpu is enabled in the mask */
5984 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
5985 			return -EINVAL;
5986 	}
5987 
5988 	return __tracing_resize_ring_buffer(tr, size, cpu_id);
5989 }
5990 
5991 struct trace_scratch {
5992 	unsigned long		kaslr_addr;
5993 };
5994 
5995 static void update_last_data(struct trace_array *tr)
5996 {
5997 	struct trace_scratch *tscratch;
5998 
5999 	if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6000 		return;
6001 
6002 	/*
6003 	 * Need to clear all CPU buffers as there cannot be events
6004 	 * from the previous boot mixed with events with this boot
6005 	 * as that will cause a confusing trace. Need to clear all
6006 	 * CPU buffers, even for those that may currently be offline.
6007 	 */
6008 	tracing_reset_all_cpus(&tr->array_buffer);
6009 
6010 	/* Using current data now */
6011 	tr->text_delta = 0;
6012 
6013 	if (!tr->scratch)
6014 		return;
6015 
6016 	tscratch = tr->scratch;
6017 
6018 	/* Set the persistent ring buffer meta data to this address */
6019 #ifdef CONFIG_RANDOMIZE_BASE
6020 	tscratch->kaslr_addr = kaslr_offset();
6021 #else
6022 	tscratch->kaslr_addr = 0;
6023 #endif
6024 	tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT;
6025 }
6026 
6027 /**
6028  * tracing_update_buffers - used by tracing facility to expand ring buffers
6029  * @tr: The tracing instance
6030  *
6031  * To save on memory when the tracing is never used on a system with it
6032  * configured in. The ring buffers are set to a minimum size. But once
6033  * a user starts to use the tracing facility, then they need to grow
6034  * to their default size.
6035  *
6036  * This function is to be called when a tracer is about to be used.
6037  */
6038 int tracing_update_buffers(struct trace_array *tr)
6039 {
6040 	int ret = 0;
6041 
6042 	mutex_lock(&trace_types_lock);
6043 
6044 	update_last_data(tr);
6045 
6046 	if (!tr->ring_buffer_expanded)
6047 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6048 						RING_BUFFER_ALL_CPUS);
6049 	mutex_unlock(&trace_types_lock);
6050 
6051 	return ret;
6052 }
6053 
6054 struct trace_option_dentry;
6055 
6056 static void
6057 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6058 
6059 /*
6060  * Used to clear out the tracer before deletion of an instance.
6061  * Must have trace_types_lock held.
6062  */
6063 static void tracing_set_nop(struct trace_array *tr)
6064 {
6065 	if (tr->current_trace == &nop_trace)
6066 		return;
6067 
6068 	tr->current_trace->enabled--;
6069 
6070 	if (tr->current_trace->reset)
6071 		tr->current_trace->reset(tr);
6072 
6073 	tr->current_trace = &nop_trace;
6074 }
6075 
6076 static bool tracer_options_updated;
6077 
6078 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6079 {
6080 	/* Only enable if the directory has been created already. */
6081 	if (!tr->dir)
6082 		return;
6083 
6084 	/* Only create trace option files after update_tracer_options finish */
6085 	if (!tracer_options_updated)
6086 		return;
6087 
6088 	create_trace_option_files(tr, t);
6089 }
6090 
6091 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6092 {
6093 	struct tracer *t;
6094 #ifdef CONFIG_TRACER_MAX_TRACE
6095 	bool had_max_tr;
6096 #endif
6097 	int ret;
6098 
6099 	guard(mutex)(&trace_types_lock);
6100 
6101 	update_last_data(tr);
6102 
6103 	if (!tr->ring_buffer_expanded) {
6104 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6105 						RING_BUFFER_ALL_CPUS);
6106 		if (ret < 0)
6107 			return ret;
6108 		ret = 0;
6109 	}
6110 
6111 	for (t = trace_types; t; t = t->next) {
6112 		if (strcmp(t->name, buf) == 0)
6113 			break;
6114 	}
6115 	if (!t)
6116 		return -EINVAL;
6117 
6118 	if (t == tr->current_trace)
6119 		return 0;
6120 
6121 #ifdef CONFIG_TRACER_SNAPSHOT
6122 	if (t->use_max_tr) {
6123 		local_irq_disable();
6124 		arch_spin_lock(&tr->max_lock);
6125 		ret = tr->cond_snapshot ? -EBUSY : 0;
6126 		arch_spin_unlock(&tr->max_lock);
6127 		local_irq_enable();
6128 		if (ret)
6129 			return ret;
6130 	}
6131 #endif
6132 	/* Some tracers won't work on kernel command line */
6133 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6134 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6135 			t->name);
6136 		return -EINVAL;
6137 	}
6138 
6139 	/* Some tracers are only allowed for the top level buffer */
6140 	if (!trace_ok_for_array(t, tr))
6141 		return -EINVAL;
6142 
6143 	/* If trace pipe files are being read, we can't change the tracer */
6144 	if (tr->trace_ref)
6145 		return -EBUSY;
6146 
6147 	trace_branch_disable();
6148 
6149 	tr->current_trace->enabled--;
6150 
6151 	if (tr->current_trace->reset)
6152 		tr->current_trace->reset(tr);
6153 
6154 #ifdef CONFIG_TRACER_MAX_TRACE
6155 	had_max_tr = tr->current_trace->use_max_tr;
6156 
6157 	/* Current trace needs to be nop_trace before synchronize_rcu */
6158 	tr->current_trace = &nop_trace;
6159 
6160 	if (had_max_tr && !t->use_max_tr) {
6161 		/*
6162 		 * We need to make sure that the update_max_tr sees that
6163 		 * current_trace changed to nop_trace to keep it from
6164 		 * swapping the buffers after we resize it.
6165 		 * The update_max_tr is called from interrupts disabled
6166 		 * so a synchronized_sched() is sufficient.
6167 		 */
6168 		synchronize_rcu();
6169 		free_snapshot(tr);
6170 		tracing_disarm_snapshot(tr);
6171 	}
6172 
6173 	if (!had_max_tr && t->use_max_tr) {
6174 		ret = tracing_arm_snapshot_locked(tr);
6175 		if (ret)
6176 			return ret;
6177 	}
6178 #else
6179 	tr->current_trace = &nop_trace;
6180 #endif
6181 
6182 	if (t->init) {
6183 		ret = tracer_init(t, tr);
6184 		if (ret) {
6185 #ifdef CONFIG_TRACER_MAX_TRACE
6186 			if (t->use_max_tr)
6187 				tracing_disarm_snapshot(tr);
6188 #endif
6189 			return ret;
6190 		}
6191 	}
6192 
6193 	tr->current_trace = t;
6194 	tr->current_trace->enabled++;
6195 	trace_branch_enable(tr);
6196 
6197 	return 0;
6198 }
6199 
6200 static ssize_t
6201 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6202 			size_t cnt, loff_t *ppos)
6203 {
6204 	struct trace_array *tr = filp->private_data;
6205 	char buf[MAX_TRACER_SIZE+1];
6206 	char *name;
6207 	size_t ret;
6208 	int err;
6209 
6210 	ret = cnt;
6211 
6212 	if (cnt > MAX_TRACER_SIZE)
6213 		cnt = MAX_TRACER_SIZE;
6214 
6215 	if (copy_from_user(buf, ubuf, cnt))
6216 		return -EFAULT;
6217 
6218 	buf[cnt] = 0;
6219 
6220 	name = strim(buf);
6221 
6222 	err = tracing_set_tracer(tr, name);
6223 	if (err)
6224 		return err;
6225 
6226 	*ppos += ret;
6227 
6228 	return ret;
6229 }
6230 
6231 static ssize_t
6232 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6233 		   size_t cnt, loff_t *ppos)
6234 {
6235 	char buf[64];
6236 	int r;
6237 
6238 	r = snprintf(buf, sizeof(buf), "%ld\n",
6239 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6240 	if (r > sizeof(buf))
6241 		r = sizeof(buf);
6242 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6243 }
6244 
6245 static ssize_t
6246 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6247 		    size_t cnt, loff_t *ppos)
6248 {
6249 	unsigned long val;
6250 	int ret;
6251 
6252 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6253 	if (ret)
6254 		return ret;
6255 
6256 	*ptr = val * 1000;
6257 
6258 	return cnt;
6259 }
6260 
6261 static ssize_t
6262 tracing_thresh_read(struct file *filp, char __user *ubuf,
6263 		    size_t cnt, loff_t *ppos)
6264 {
6265 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6266 }
6267 
6268 static ssize_t
6269 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6270 		     size_t cnt, loff_t *ppos)
6271 {
6272 	struct trace_array *tr = filp->private_data;
6273 	int ret;
6274 
6275 	guard(mutex)(&trace_types_lock);
6276 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6277 	if (ret < 0)
6278 		return ret;
6279 
6280 	if (tr->current_trace->update_thresh) {
6281 		ret = tr->current_trace->update_thresh(tr);
6282 		if (ret < 0)
6283 			return ret;
6284 	}
6285 
6286 	return cnt;
6287 }
6288 
6289 #ifdef CONFIG_TRACER_MAX_TRACE
6290 
6291 static ssize_t
6292 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6293 		     size_t cnt, loff_t *ppos)
6294 {
6295 	struct trace_array *tr = filp->private_data;
6296 
6297 	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6298 }
6299 
6300 static ssize_t
6301 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6302 		      size_t cnt, loff_t *ppos)
6303 {
6304 	struct trace_array *tr = filp->private_data;
6305 
6306 	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6307 }
6308 
6309 #endif
6310 
6311 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6312 {
6313 	if (cpu == RING_BUFFER_ALL_CPUS) {
6314 		if (cpumask_empty(tr->pipe_cpumask)) {
6315 			cpumask_setall(tr->pipe_cpumask);
6316 			return 0;
6317 		}
6318 	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6319 		cpumask_set_cpu(cpu, tr->pipe_cpumask);
6320 		return 0;
6321 	}
6322 	return -EBUSY;
6323 }
6324 
6325 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6326 {
6327 	if (cpu == RING_BUFFER_ALL_CPUS) {
6328 		WARN_ON(!cpumask_full(tr->pipe_cpumask));
6329 		cpumask_clear(tr->pipe_cpumask);
6330 	} else {
6331 		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6332 		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6333 	}
6334 }
6335 
6336 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6337 {
6338 	struct trace_array *tr = inode->i_private;
6339 	struct trace_iterator *iter;
6340 	int cpu;
6341 	int ret;
6342 
6343 	ret = tracing_check_open_get_tr(tr);
6344 	if (ret)
6345 		return ret;
6346 
6347 	mutex_lock(&trace_types_lock);
6348 	cpu = tracing_get_cpu(inode);
6349 	ret = open_pipe_on_cpu(tr, cpu);
6350 	if (ret)
6351 		goto fail_pipe_on_cpu;
6352 
6353 	/* create a buffer to store the information to pass to userspace */
6354 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6355 	if (!iter) {
6356 		ret = -ENOMEM;
6357 		goto fail_alloc_iter;
6358 	}
6359 
6360 	trace_seq_init(&iter->seq);
6361 	iter->trace = tr->current_trace;
6362 
6363 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6364 		ret = -ENOMEM;
6365 		goto fail;
6366 	}
6367 
6368 	/* trace pipe does not show start of buffer */
6369 	cpumask_setall(iter->started);
6370 
6371 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6372 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6373 
6374 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6375 	if (trace_clocks[tr->clock_id].in_ns)
6376 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6377 
6378 	iter->tr = tr;
6379 	iter->array_buffer = &tr->array_buffer;
6380 	iter->cpu_file = cpu;
6381 	mutex_init(&iter->mutex);
6382 	filp->private_data = iter;
6383 
6384 	if (iter->trace->pipe_open)
6385 		iter->trace->pipe_open(iter);
6386 
6387 	nonseekable_open(inode, filp);
6388 
6389 	tr->trace_ref++;
6390 
6391 	mutex_unlock(&trace_types_lock);
6392 	return ret;
6393 
6394 fail:
6395 	kfree(iter);
6396 fail_alloc_iter:
6397 	close_pipe_on_cpu(tr, cpu);
6398 fail_pipe_on_cpu:
6399 	__trace_array_put(tr);
6400 	mutex_unlock(&trace_types_lock);
6401 	return ret;
6402 }
6403 
6404 static int tracing_release_pipe(struct inode *inode, struct file *file)
6405 {
6406 	struct trace_iterator *iter = file->private_data;
6407 	struct trace_array *tr = inode->i_private;
6408 
6409 	mutex_lock(&trace_types_lock);
6410 
6411 	tr->trace_ref--;
6412 
6413 	if (iter->trace->pipe_close)
6414 		iter->trace->pipe_close(iter);
6415 	close_pipe_on_cpu(tr, iter->cpu_file);
6416 	mutex_unlock(&trace_types_lock);
6417 
6418 	free_trace_iter_content(iter);
6419 	kfree(iter);
6420 
6421 	trace_array_put(tr);
6422 
6423 	return 0;
6424 }
6425 
6426 static __poll_t
6427 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6428 {
6429 	struct trace_array *tr = iter->tr;
6430 
6431 	/* Iterators are static, they should be filled or empty */
6432 	if (trace_buffer_iter(iter, iter->cpu_file))
6433 		return EPOLLIN | EPOLLRDNORM;
6434 
6435 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6436 		/*
6437 		 * Always select as readable when in blocking mode
6438 		 */
6439 		return EPOLLIN | EPOLLRDNORM;
6440 	else
6441 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6442 					     filp, poll_table, iter->tr->buffer_percent);
6443 }
6444 
6445 static __poll_t
6446 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6447 {
6448 	struct trace_iterator *iter = filp->private_data;
6449 
6450 	return trace_poll(iter, filp, poll_table);
6451 }
6452 
6453 /* Must be called with iter->mutex held. */
6454 static int tracing_wait_pipe(struct file *filp)
6455 {
6456 	struct trace_iterator *iter = filp->private_data;
6457 	int ret;
6458 
6459 	while (trace_empty(iter)) {
6460 
6461 		if ((filp->f_flags & O_NONBLOCK)) {
6462 			return -EAGAIN;
6463 		}
6464 
6465 		/*
6466 		 * We block until we read something and tracing is disabled.
6467 		 * We still block if tracing is disabled, but we have never
6468 		 * read anything. This allows a user to cat this file, and
6469 		 * then enable tracing. But after we have read something,
6470 		 * we give an EOF when tracing is again disabled.
6471 		 *
6472 		 * iter->pos will be 0 if we haven't read anything.
6473 		 */
6474 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6475 			break;
6476 
6477 		mutex_unlock(&iter->mutex);
6478 
6479 		ret = wait_on_pipe(iter, 0);
6480 
6481 		mutex_lock(&iter->mutex);
6482 
6483 		if (ret)
6484 			return ret;
6485 	}
6486 
6487 	return 1;
6488 }
6489 
6490 /*
6491  * Consumer reader.
6492  */
6493 static ssize_t
6494 tracing_read_pipe(struct file *filp, char __user *ubuf,
6495 		  size_t cnt, loff_t *ppos)
6496 {
6497 	struct trace_iterator *iter = filp->private_data;
6498 	ssize_t sret;
6499 
6500 	/*
6501 	 * Avoid more than one consumer on a single file descriptor
6502 	 * This is just a matter of traces coherency, the ring buffer itself
6503 	 * is protected.
6504 	 */
6505 	guard(mutex)(&iter->mutex);
6506 
6507 	/* return any leftover data */
6508 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6509 	if (sret != -EBUSY)
6510 		return sret;
6511 
6512 	trace_seq_init(&iter->seq);
6513 
6514 	if (iter->trace->read) {
6515 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6516 		if (sret)
6517 			return sret;
6518 	}
6519 
6520 waitagain:
6521 	sret = tracing_wait_pipe(filp);
6522 	if (sret <= 0)
6523 		return sret;
6524 
6525 	/* stop when tracing is finished */
6526 	if (trace_empty(iter))
6527 		return 0;
6528 
6529 	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
6530 		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
6531 
6532 	/* reset all but tr, trace, and overruns */
6533 	trace_iterator_reset(iter);
6534 	cpumask_clear(iter->started);
6535 	trace_seq_init(&iter->seq);
6536 
6537 	trace_event_read_lock();
6538 	trace_access_lock(iter->cpu_file);
6539 	while (trace_find_next_entry_inc(iter) != NULL) {
6540 		enum print_line_t ret;
6541 		int save_len = iter->seq.seq.len;
6542 
6543 		ret = print_trace_line(iter);
6544 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6545 			/*
6546 			 * If one print_trace_line() fills entire trace_seq in one shot,
6547 			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6548 			 * In this case, we need to consume it, otherwise, loop will peek
6549 			 * this event next time, resulting in an infinite loop.
6550 			 */
6551 			if (save_len == 0) {
6552 				iter->seq.full = 0;
6553 				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6554 				trace_consume(iter);
6555 				break;
6556 			}
6557 
6558 			/* In other cases, don't print partial lines */
6559 			iter->seq.seq.len = save_len;
6560 			break;
6561 		}
6562 		if (ret != TRACE_TYPE_NO_CONSUME)
6563 			trace_consume(iter);
6564 
6565 		if (trace_seq_used(&iter->seq) >= cnt)
6566 			break;
6567 
6568 		/*
6569 		 * Setting the full flag means we reached the trace_seq buffer
6570 		 * size and we should leave by partial output condition above.
6571 		 * One of the trace_seq_* functions is not used properly.
6572 		 */
6573 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6574 			  iter->ent->type);
6575 	}
6576 	trace_access_unlock(iter->cpu_file);
6577 	trace_event_read_unlock();
6578 
6579 	/* Now copy what we have to the user */
6580 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6581 	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
6582 		trace_seq_init(&iter->seq);
6583 
6584 	/*
6585 	 * If there was nothing to send to user, in spite of consuming trace
6586 	 * entries, go back to wait for more entries.
6587 	 */
6588 	if (sret == -EBUSY)
6589 		goto waitagain;
6590 
6591 	return sret;
6592 }
6593 
6594 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6595 				     unsigned int idx)
6596 {
6597 	__free_page(spd->pages[idx]);
6598 }
6599 
6600 static size_t
6601 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6602 {
6603 	size_t count;
6604 	int save_len;
6605 	int ret;
6606 
6607 	/* Seq buffer is page-sized, exactly what we need. */
6608 	for (;;) {
6609 		save_len = iter->seq.seq.len;
6610 		ret = print_trace_line(iter);
6611 
6612 		if (trace_seq_has_overflowed(&iter->seq)) {
6613 			iter->seq.seq.len = save_len;
6614 			break;
6615 		}
6616 
6617 		/*
6618 		 * This should not be hit, because it should only
6619 		 * be set if the iter->seq overflowed. But check it
6620 		 * anyway to be safe.
6621 		 */
6622 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6623 			iter->seq.seq.len = save_len;
6624 			break;
6625 		}
6626 
6627 		count = trace_seq_used(&iter->seq) - save_len;
6628 		if (rem < count) {
6629 			rem = 0;
6630 			iter->seq.seq.len = save_len;
6631 			break;
6632 		}
6633 
6634 		if (ret != TRACE_TYPE_NO_CONSUME)
6635 			trace_consume(iter);
6636 		rem -= count;
6637 		if (!trace_find_next_entry_inc(iter))	{
6638 			rem = 0;
6639 			iter->ent = NULL;
6640 			break;
6641 		}
6642 	}
6643 
6644 	return rem;
6645 }
6646 
6647 static ssize_t tracing_splice_read_pipe(struct file *filp,
6648 					loff_t *ppos,
6649 					struct pipe_inode_info *pipe,
6650 					size_t len,
6651 					unsigned int flags)
6652 {
6653 	struct page *pages_def[PIPE_DEF_BUFFERS];
6654 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6655 	struct trace_iterator *iter = filp->private_data;
6656 	struct splice_pipe_desc spd = {
6657 		.pages		= pages_def,
6658 		.partial	= partial_def,
6659 		.nr_pages	= 0, /* This gets updated below. */
6660 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6661 		.ops		= &default_pipe_buf_ops,
6662 		.spd_release	= tracing_spd_release_pipe,
6663 	};
6664 	ssize_t ret;
6665 	size_t rem;
6666 	unsigned int i;
6667 
6668 	if (splice_grow_spd(pipe, &spd))
6669 		return -ENOMEM;
6670 
6671 	mutex_lock(&iter->mutex);
6672 
6673 	if (iter->trace->splice_read) {
6674 		ret = iter->trace->splice_read(iter, filp,
6675 					       ppos, pipe, len, flags);
6676 		if (ret)
6677 			goto out_err;
6678 	}
6679 
6680 	ret = tracing_wait_pipe(filp);
6681 	if (ret <= 0)
6682 		goto out_err;
6683 
6684 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6685 		ret = -EFAULT;
6686 		goto out_err;
6687 	}
6688 
6689 	trace_event_read_lock();
6690 	trace_access_lock(iter->cpu_file);
6691 
6692 	/* Fill as many pages as possible. */
6693 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6694 		spd.pages[i] = alloc_page(GFP_KERNEL);
6695 		if (!spd.pages[i])
6696 			break;
6697 
6698 		rem = tracing_fill_pipe_page(rem, iter);
6699 
6700 		/* Copy the data into the page, so we can start over. */
6701 		ret = trace_seq_to_buffer(&iter->seq,
6702 					  page_address(spd.pages[i]),
6703 					  trace_seq_used(&iter->seq));
6704 		if (ret < 0) {
6705 			__free_page(spd.pages[i]);
6706 			break;
6707 		}
6708 		spd.partial[i].offset = 0;
6709 		spd.partial[i].len = trace_seq_used(&iter->seq);
6710 
6711 		trace_seq_init(&iter->seq);
6712 	}
6713 
6714 	trace_access_unlock(iter->cpu_file);
6715 	trace_event_read_unlock();
6716 	mutex_unlock(&iter->mutex);
6717 
6718 	spd.nr_pages = i;
6719 
6720 	if (i)
6721 		ret = splice_to_pipe(pipe, &spd);
6722 	else
6723 		ret = 0;
6724 out:
6725 	splice_shrink_spd(&spd);
6726 	return ret;
6727 
6728 out_err:
6729 	mutex_unlock(&iter->mutex);
6730 	goto out;
6731 }
6732 
6733 static ssize_t
6734 tracing_entries_read(struct file *filp, char __user *ubuf,
6735 		     size_t cnt, loff_t *ppos)
6736 {
6737 	struct inode *inode = file_inode(filp);
6738 	struct trace_array *tr = inode->i_private;
6739 	int cpu = tracing_get_cpu(inode);
6740 	char buf[64];
6741 	int r = 0;
6742 	ssize_t ret;
6743 
6744 	mutex_lock(&trace_types_lock);
6745 
6746 	if (cpu == RING_BUFFER_ALL_CPUS) {
6747 		int cpu, buf_size_same;
6748 		unsigned long size;
6749 
6750 		size = 0;
6751 		buf_size_same = 1;
6752 		/* check if all cpu sizes are same */
6753 		for_each_tracing_cpu(cpu) {
6754 			/* fill in the size from first enabled cpu */
6755 			if (size == 0)
6756 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6757 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6758 				buf_size_same = 0;
6759 				break;
6760 			}
6761 		}
6762 
6763 		if (buf_size_same) {
6764 			if (!tr->ring_buffer_expanded)
6765 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6766 					    size >> 10,
6767 					    trace_buf_size >> 10);
6768 			else
6769 				r = sprintf(buf, "%lu\n", size >> 10);
6770 		} else
6771 			r = sprintf(buf, "X\n");
6772 	} else
6773 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6774 
6775 	mutex_unlock(&trace_types_lock);
6776 
6777 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6778 	return ret;
6779 }
6780 
6781 static ssize_t
6782 tracing_entries_write(struct file *filp, const char __user *ubuf,
6783 		      size_t cnt, loff_t *ppos)
6784 {
6785 	struct inode *inode = file_inode(filp);
6786 	struct trace_array *tr = inode->i_private;
6787 	unsigned long val;
6788 	int ret;
6789 
6790 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6791 	if (ret)
6792 		return ret;
6793 
6794 	/* must have at least 1 entry */
6795 	if (!val)
6796 		return -EINVAL;
6797 
6798 	/* value is in KB */
6799 	val <<= 10;
6800 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6801 	if (ret < 0)
6802 		return ret;
6803 
6804 	*ppos += cnt;
6805 
6806 	return cnt;
6807 }
6808 
6809 static ssize_t
6810 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6811 				size_t cnt, loff_t *ppos)
6812 {
6813 	struct trace_array *tr = filp->private_data;
6814 	char buf[64];
6815 	int r, cpu;
6816 	unsigned long size = 0, expanded_size = 0;
6817 
6818 	mutex_lock(&trace_types_lock);
6819 	for_each_tracing_cpu(cpu) {
6820 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6821 		if (!tr->ring_buffer_expanded)
6822 			expanded_size += trace_buf_size >> 10;
6823 	}
6824 	if (tr->ring_buffer_expanded)
6825 		r = sprintf(buf, "%lu\n", size);
6826 	else
6827 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6828 	mutex_unlock(&trace_types_lock);
6829 
6830 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6831 }
6832 
6833 static ssize_t
6834 tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
6835 {
6836 	struct trace_array *tr = filp->private_data;
6837 	struct trace_scratch *tscratch = tr->scratch;
6838 	struct seq_buf seq;
6839 	char buf[64];
6840 
6841 	seq_buf_init(&seq, buf, 64);
6842 
6843 	/*
6844 	 * Do not leak KASLR address. This only shows the KASLR address of
6845 	 * the last boot. When the ring buffer is started, the LAST_BOOT
6846 	 * flag gets cleared, and this should only report "current".
6847 	 * Otherwise it shows the KASLR address from the previous boot which
6848 	 * should not be the same as the current boot.
6849 	 */
6850 	if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
6851 		seq_buf_printf(&seq, "%lx\t[kernel]\n", tscratch->kaslr_addr);
6852 	else
6853 		seq_buf_puts(&seq, "# Current\n");
6854 
6855 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
6856 }
6857 
6858 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
6859 {
6860 	struct trace_array *tr = inode->i_private;
6861 	int cpu = tracing_get_cpu(inode);
6862 	int ret;
6863 
6864 	ret = tracing_check_open_get_tr(tr);
6865 	if (ret)
6866 		return ret;
6867 
6868 	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
6869 	if (ret < 0)
6870 		__trace_array_put(tr);
6871 	return ret;
6872 }
6873 
6874 static ssize_t
6875 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6876 			  size_t cnt, loff_t *ppos)
6877 {
6878 	/*
6879 	 * There is no need to read what the user has written, this function
6880 	 * is just to make sure that there is no error when "echo" is used
6881 	 */
6882 
6883 	*ppos += cnt;
6884 
6885 	return cnt;
6886 }
6887 
6888 static int
6889 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6890 {
6891 	struct trace_array *tr = inode->i_private;
6892 
6893 	/* disable tracing ? */
6894 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6895 		tracer_tracing_off(tr);
6896 	/* resize the ring buffer to 0 */
6897 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6898 
6899 	trace_array_put(tr);
6900 
6901 	return 0;
6902 }
6903 
6904 #define TRACE_MARKER_MAX_SIZE		4096
6905 
6906 static ssize_t
6907 tracing_mark_write(struct file *filp, const char __user *ubuf,
6908 					size_t cnt, loff_t *fpos)
6909 {
6910 	struct trace_array *tr = filp->private_data;
6911 	struct ring_buffer_event *event;
6912 	enum event_trigger_type tt = ETT_NONE;
6913 	struct trace_buffer *buffer;
6914 	struct print_entry *entry;
6915 	int meta_size;
6916 	ssize_t written;
6917 	size_t size;
6918 	int len;
6919 
6920 /* Used in tracing_mark_raw_write() as well */
6921 #define FAULTED_STR "<faulted>"
6922 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6923 
6924 	if (tracing_disabled)
6925 		return -EINVAL;
6926 
6927 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6928 		return -EINVAL;
6929 
6930 	if ((ssize_t)cnt < 0)
6931 		return -EINVAL;
6932 
6933 	if (cnt > TRACE_MARKER_MAX_SIZE)
6934 		cnt = TRACE_MARKER_MAX_SIZE;
6935 
6936 	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
6937  again:
6938 	size = cnt + meta_size;
6939 
6940 	/* If less than "<faulted>", then make sure we can still add that */
6941 	if (cnt < FAULTED_SIZE)
6942 		size += FAULTED_SIZE - cnt;
6943 
6944 	buffer = tr->array_buffer.buffer;
6945 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6946 					    tracing_gen_ctx());
6947 	if (unlikely(!event)) {
6948 		/*
6949 		 * If the size was greater than what was allowed, then
6950 		 * make it smaller and try again.
6951 		 */
6952 		if (size > ring_buffer_max_event_size(buffer)) {
6953 			/* cnt < FAULTED size should never be bigger than max */
6954 			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
6955 				return -EBADF;
6956 			cnt = ring_buffer_max_event_size(buffer) - meta_size;
6957 			/* The above should only happen once */
6958 			if (WARN_ON_ONCE(cnt + meta_size == size))
6959 				return -EBADF;
6960 			goto again;
6961 		}
6962 
6963 		/* Ring buffer disabled, return as if not open for write */
6964 		return -EBADF;
6965 	}
6966 
6967 	entry = ring_buffer_event_data(event);
6968 	entry->ip = _THIS_IP_;
6969 
6970 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6971 	if (len) {
6972 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6973 		cnt = FAULTED_SIZE;
6974 		written = -EFAULT;
6975 	} else
6976 		written = cnt;
6977 
6978 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6979 		/* do not add \n before testing triggers, but add \0 */
6980 		entry->buf[cnt] = '\0';
6981 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
6982 	}
6983 
6984 	if (entry->buf[cnt - 1] != '\n') {
6985 		entry->buf[cnt] = '\n';
6986 		entry->buf[cnt + 1] = '\0';
6987 	} else
6988 		entry->buf[cnt] = '\0';
6989 
6990 	if (static_branch_unlikely(&trace_marker_exports_enabled))
6991 		ftrace_exports(event, TRACE_EXPORT_MARKER);
6992 	__buffer_unlock_commit(buffer, event);
6993 
6994 	if (tt)
6995 		event_triggers_post_call(tr->trace_marker_file, tt);
6996 
6997 	return written;
6998 }
6999 
7000 static ssize_t
7001 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7002 					size_t cnt, loff_t *fpos)
7003 {
7004 	struct trace_array *tr = filp->private_data;
7005 	struct ring_buffer_event *event;
7006 	struct trace_buffer *buffer;
7007 	struct raw_data_entry *entry;
7008 	ssize_t written;
7009 	int size;
7010 	int len;
7011 
7012 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7013 
7014 	if (tracing_disabled)
7015 		return -EINVAL;
7016 
7017 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7018 		return -EINVAL;
7019 
7020 	/* The marker must at least have a tag id */
7021 	if (cnt < sizeof(unsigned int))
7022 		return -EINVAL;
7023 
7024 	size = sizeof(*entry) + cnt;
7025 	if (cnt < FAULT_SIZE_ID)
7026 		size += FAULT_SIZE_ID - cnt;
7027 
7028 	buffer = tr->array_buffer.buffer;
7029 
7030 	if (size > ring_buffer_max_event_size(buffer))
7031 		return -EINVAL;
7032 
7033 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7034 					    tracing_gen_ctx());
7035 	if (!event)
7036 		/* Ring buffer disabled, return as if not open for write */
7037 		return -EBADF;
7038 
7039 	entry = ring_buffer_event_data(event);
7040 
7041 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7042 	if (len) {
7043 		entry->id = -1;
7044 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7045 		written = -EFAULT;
7046 	} else
7047 		written = cnt;
7048 
7049 	__buffer_unlock_commit(buffer, event);
7050 
7051 	return written;
7052 }
7053 
7054 static int tracing_clock_show(struct seq_file *m, void *v)
7055 {
7056 	struct trace_array *tr = m->private;
7057 	int i;
7058 
7059 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7060 		seq_printf(m,
7061 			"%s%s%s%s", i ? " " : "",
7062 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7063 			i == tr->clock_id ? "]" : "");
7064 	seq_putc(m, '\n');
7065 
7066 	return 0;
7067 }
7068 
7069 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7070 {
7071 	int i;
7072 
7073 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7074 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7075 			break;
7076 	}
7077 	if (i == ARRAY_SIZE(trace_clocks))
7078 		return -EINVAL;
7079 
7080 	mutex_lock(&trace_types_lock);
7081 
7082 	tr->clock_id = i;
7083 
7084 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7085 
7086 	/*
7087 	 * New clock may not be consistent with the previous clock.
7088 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7089 	 */
7090 	tracing_reset_online_cpus(&tr->array_buffer);
7091 
7092 #ifdef CONFIG_TRACER_MAX_TRACE
7093 	if (tr->max_buffer.buffer)
7094 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7095 	tracing_reset_online_cpus(&tr->max_buffer);
7096 #endif
7097 
7098 	mutex_unlock(&trace_types_lock);
7099 
7100 	return 0;
7101 }
7102 
7103 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7104 				   size_t cnt, loff_t *fpos)
7105 {
7106 	struct seq_file *m = filp->private_data;
7107 	struct trace_array *tr = m->private;
7108 	char buf[64];
7109 	const char *clockstr;
7110 	int ret;
7111 
7112 	if (cnt >= sizeof(buf))
7113 		return -EINVAL;
7114 
7115 	if (copy_from_user(buf, ubuf, cnt))
7116 		return -EFAULT;
7117 
7118 	buf[cnt] = 0;
7119 
7120 	clockstr = strstrip(buf);
7121 
7122 	ret = tracing_set_clock(tr, clockstr);
7123 	if (ret)
7124 		return ret;
7125 
7126 	*fpos += cnt;
7127 
7128 	return cnt;
7129 }
7130 
7131 static int tracing_clock_open(struct inode *inode, struct file *file)
7132 {
7133 	struct trace_array *tr = inode->i_private;
7134 	int ret;
7135 
7136 	ret = tracing_check_open_get_tr(tr);
7137 	if (ret)
7138 		return ret;
7139 
7140 	ret = single_open(file, tracing_clock_show, inode->i_private);
7141 	if (ret < 0)
7142 		trace_array_put(tr);
7143 
7144 	return ret;
7145 }
7146 
7147 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7148 {
7149 	struct trace_array *tr = m->private;
7150 
7151 	mutex_lock(&trace_types_lock);
7152 
7153 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7154 		seq_puts(m, "delta [absolute]\n");
7155 	else
7156 		seq_puts(m, "[delta] absolute\n");
7157 
7158 	mutex_unlock(&trace_types_lock);
7159 
7160 	return 0;
7161 }
7162 
7163 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7164 {
7165 	struct trace_array *tr = inode->i_private;
7166 	int ret;
7167 
7168 	ret = tracing_check_open_get_tr(tr);
7169 	if (ret)
7170 		return ret;
7171 
7172 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7173 	if (ret < 0)
7174 		trace_array_put(tr);
7175 
7176 	return ret;
7177 }
7178 
7179 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7180 {
7181 	if (rbe == this_cpu_read(trace_buffered_event))
7182 		return ring_buffer_time_stamp(buffer);
7183 
7184 	return ring_buffer_event_time_stamp(buffer, rbe);
7185 }
7186 
7187 /*
7188  * Set or disable using the per CPU trace_buffer_event when possible.
7189  */
7190 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7191 {
7192 	guard(mutex)(&trace_types_lock);
7193 
7194 	if (set && tr->no_filter_buffering_ref++)
7195 		return 0;
7196 
7197 	if (!set) {
7198 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
7199 			return -EINVAL;
7200 
7201 		--tr->no_filter_buffering_ref;
7202 	}
7203 
7204 	return 0;
7205 }
7206 
7207 struct ftrace_buffer_info {
7208 	struct trace_iterator	iter;
7209 	void			*spare;
7210 	unsigned int		spare_cpu;
7211 	unsigned int		spare_size;
7212 	unsigned int		read;
7213 };
7214 
7215 #ifdef CONFIG_TRACER_SNAPSHOT
7216 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7217 {
7218 	struct trace_array *tr = inode->i_private;
7219 	struct trace_iterator *iter;
7220 	struct seq_file *m;
7221 	int ret;
7222 
7223 	ret = tracing_check_open_get_tr(tr);
7224 	if (ret)
7225 		return ret;
7226 
7227 	if (file->f_mode & FMODE_READ) {
7228 		iter = __tracing_open(inode, file, true);
7229 		if (IS_ERR(iter))
7230 			ret = PTR_ERR(iter);
7231 	} else {
7232 		/* Writes still need the seq_file to hold the private data */
7233 		ret = -ENOMEM;
7234 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7235 		if (!m)
7236 			goto out;
7237 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7238 		if (!iter) {
7239 			kfree(m);
7240 			goto out;
7241 		}
7242 		ret = 0;
7243 
7244 		iter->tr = tr;
7245 		iter->array_buffer = &tr->max_buffer;
7246 		iter->cpu_file = tracing_get_cpu(inode);
7247 		m->private = iter;
7248 		file->private_data = m;
7249 	}
7250 out:
7251 	if (ret < 0)
7252 		trace_array_put(tr);
7253 
7254 	return ret;
7255 }
7256 
7257 static void tracing_swap_cpu_buffer(void *tr)
7258 {
7259 	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7260 }
7261 
7262 static ssize_t
7263 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7264 		       loff_t *ppos)
7265 {
7266 	struct seq_file *m = filp->private_data;
7267 	struct trace_iterator *iter = m->private;
7268 	struct trace_array *tr = iter->tr;
7269 	unsigned long val;
7270 	int ret;
7271 
7272 	ret = tracing_update_buffers(tr);
7273 	if (ret < 0)
7274 		return ret;
7275 
7276 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7277 	if (ret)
7278 		return ret;
7279 
7280 	guard(mutex)(&trace_types_lock);
7281 
7282 	if (tr->current_trace->use_max_tr)
7283 		return -EBUSY;
7284 
7285 	local_irq_disable();
7286 	arch_spin_lock(&tr->max_lock);
7287 	if (tr->cond_snapshot)
7288 		ret = -EBUSY;
7289 	arch_spin_unlock(&tr->max_lock);
7290 	local_irq_enable();
7291 	if (ret)
7292 		return ret;
7293 
7294 	switch (val) {
7295 	case 0:
7296 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7297 			return -EINVAL;
7298 		if (tr->allocated_snapshot)
7299 			free_snapshot(tr);
7300 		break;
7301 	case 1:
7302 /* Only allow per-cpu swap if the ring buffer supports it */
7303 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7304 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
7305 			return -EINVAL;
7306 #endif
7307 		if (tr->allocated_snapshot)
7308 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7309 					&tr->array_buffer, iter->cpu_file);
7310 
7311 		ret = tracing_arm_snapshot_locked(tr);
7312 		if (ret)
7313 			return ret;
7314 
7315 		/* Now, we're going to swap */
7316 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7317 			local_irq_disable();
7318 			update_max_tr(tr, current, smp_processor_id(), NULL);
7319 			local_irq_enable();
7320 		} else {
7321 			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7322 						 (void *)tr, 1);
7323 		}
7324 		tracing_disarm_snapshot(tr);
7325 		break;
7326 	default:
7327 		if (tr->allocated_snapshot) {
7328 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7329 				tracing_reset_online_cpus(&tr->max_buffer);
7330 			else
7331 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7332 		}
7333 		break;
7334 	}
7335 
7336 	if (ret >= 0) {
7337 		*ppos += cnt;
7338 		ret = cnt;
7339 	}
7340 
7341 	return ret;
7342 }
7343 
7344 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7345 {
7346 	struct seq_file *m = file->private_data;
7347 	int ret;
7348 
7349 	ret = tracing_release(inode, file);
7350 
7351 	if (file->f_mode & FMODE_READ)
7352 		return ret;
7353 
7354 	/* If write only, the seq_file is just a stub */
7355 	if (m)
7356 		kfree(m->private);
7357 	kfree(m);
7358 
7359 	return 0;
7360 }
7361 
7362 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7363 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7364 				    size_t count, loff_t *ppos);
7365 static int tracing_buffers_release(struct inode *inode, struct file *file);
7366 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7367 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7368 
7369 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7370 {
7371 	struct ftrace_buffer_info *info;
7372 	int ret;
7373 
7374 	/* The following checks for tracefs lockdown */
7375 	ret = tracing_buffers_open(inode, filp);
7376 	if (ret < 0)
7377 		return ret;
7378 
7379 	info = filp->private_data;
7380 
7381 	if (info->iter.trace->use_max_tr) {
7382 		tracing_buffers_release(inode, filp);
7383 		return -EBUSY;
7384 	}
7385 
7386 	info->iter.snapshot = true;
7387 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7388 
7389 	return ret;
7390 }
7391 
7392 #endif /* CONFIG_TRACER_SNAPSHOT */
7393 
7394 
7395 static const struct file_operations tracing_thresh_fops = {
7396 	.open		= tracing_open_generic,
7397 	.read		= tracing_thresh_read,
7398 	.write		= tracing_thresh_write,
7399 	.llseek		= generic_file_llseek,
7400 };
7401 
7402 #ifdef CONFIG_TRACER_MAX_TRACE
7403 static const struct file_operations tracing_max_lat_fops = {
7404 	.open		= tracing_open_generic_tr,
7405 	.read		= tracing_max_lat_read,
7406 	.write		= tracing_max_lat_write,
7407 	.llseek		= generic_file_llseek,
7408 	.release	= tracing_release_generic_tr,
7409 };
7410 #endif
7411 
7412 static const struct file_operations set_tracer_fops = {
7413 	.open		= tracing_open_generic_tr,
7414 	.read		= tracing_set_trace_read,
7415 	.write		= tracing_set_trace_write,
7416 	.llseek		= generic_file_llseek,
7417 	.release	= tracing_release_generic_tr,
7418 };
7419 
7420 static const struct file_operations tracing_pipe_fops = {
7421 	.open		= tracing_open_pipe,
7422 	.poll		= tracing_poll_pipe,
7423 	.read		= tracing_read_pipe,
7424 	.splice_read	= tracing_splice_read_pipe,
7425 	.release	= tracing_release_pipe,
7426 };
7427 
7428 static const struct file_operations tracing_entries_fops = {
7429 	.open		= tracing_open_generic_tr,
7430 	.read		= tracing_entries_read,
7431 	.write		= tracing_entries_write,
7432 	.llseek		= generic_file_llseek,
7433 	.release	= tracing_release_generic_tr,
7434 };
7435 
7436 static const struct file_operations tracing_buffer_meta_fops = {
7437 	.open		= tracing_buffer_meta_open,
7438 	.read		= seq_read,
7439 	.llseek		= seq_lseek,
7440 	.release	= tracing_seq_release,
7441 };
7442 
7443 static const struct file_operations tracing_total_entries_fops = {
7444 	.open		= tracing_open_generic_tr,
7445 	.read		= tracing_total_entries_read,
7446 	.llseek		= generic_file_llseek,
7447 	.release	= tracing_release_generic_tr,
7448 };
7449 
7450 static const struct file_operations tracing_free_buffer_fops = {
7451 	.open		= tracing_open_generic_tr,
7452 	.write		= tracing_free_buffer_write,
7453 	.release	= tracing_free_buffer_release,
7454 };
7455 
7456 static const struct file_operations tracing_mark_fops = {
7457 	.open		= tracing_mark_open,
7458 	.write		= tracing_mark_write,
7459 	.release	= tracing_release_generic_tr,
7460 };
7461 
7462 static const struct file_operations tracing_mark_raw_fops = {
7463 	.open		= tracing_mark_open,
7464 	.write		= tracing_mark_raw_write,
7465 	.release	= tracing_release_generic_tr,
7466 };
7467 
7468 static const struct file_operations trace_clock_fops = {
7469 	.open		= tracing_clock_open,
7470 	.read		= seq_read,
7471 	.llseek		= seq_lseek,
7472 	.release	= tracing_single_release_tr,
7473 	.write		= tracing_clock_write,
7474 };
7475 
7476 static const struct file_operations trace_time_stamp_mode_fops = {
7477 	.open		= tracing_time_stamp_mode_open,
7478 	.read		= seq_read,
7479 	.llseek		= seq_lseek,
7480 	.release	= tracing_single_release_tr,
7481 };
7482 
7483 static const struct file_operations last_boot_fops = {
7484 	.open		= tracing_open_generic_tr,
7485 	.read		= tracing_last_boot_read,
7486 	.llseek		= generic_file_llseek,
7487 	.release	= tracing_release_generic_tr,
7488 };
7489 
7490 #ifdef CONFIG_TRACER_SNAPSHOT
7491 static const struct file_operations snapshot_fops = {
7492 	.open		= tracing_snapshot_open,
7493 	.read		= seq_read,
7494 	.write		= tracing_snapshot_write,
7495 	.llseek		= tracing_lseek,
7496 	.release	= tracing_snapshot_release,
7497 };
7498 
7499 static const struct file_operations snapshot_raw_fops = {
7500 	.open		= snapshot_raw_open,
7501 	.read		= tracing_buffers_read,
7502 	.release	= tracing_buffers_release,
7503 	.splice_read	= tracing_buffers_splice_read,
7504 };
7505 
7506 #endif /* CONFIG_TRACER_SNAPSHOT */
7507 
7508 /*
7509  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7510  * @filp: The active open file structure
7511  * @ubuf: The userspace provided buffer to read value into
7512  * @cnt: The maximum number of bytes to read
7513  * @ppos: The current "file" position
7514  *
7515  * This function implements the write interface for a struct trace_min_max_param.
7516  * The filp->private_data must point to a trace_min_max_param structure that
7517  * defines where to write the value, the min and the max acceptable values,
7518  * and a lock to protect the write.
7519  */
7520 static ssize_t
7521 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7522 {
7523 	struct trace_min_max_param *param = filp->private_data;
7524 	u64 val;
7525 	int err;
7526 
7527 	if (!param)
7528 		return -EFAULT;
7529 
7530 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7531 	if (err)
7532 		return err;
7533 
7534 	if (param->lock)
7535 		mutex_lock(param->lock);
7536 
7537 	if (param->min && val < *param->min)
7538 		err = -EINVAL;
7539 
7540 	if (param->max && val > *param->max)
7541 		err = -EINVAL;
7542 
7543 	if (!err)
7544 		*param->val = val;
7545 
7546 	if (param->lock)
7547 		mutex_unlock(param->lock);
7548 
7549 	if (err)
7550 		return err;
7551 
7552 	return cnt;
7553 }
7554 
7555 /*
7556  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7557  * @filp: The active open file structure
7558  * @ubuf: The userspace provided buffer to read value into
7559  * @cnt: The maximum number of bytes to read
7560  * @ppos: The current "file" position
7561  *
7562  * This function implements the read interface for a struct trace_min_max_param.
7563  * The filp->private_data must point to a trace_min_max_param struct with valid
7564  * data.
7565  */
7566 static ssize_t
7567 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7568 {
7569 	struct trace_min_max_param *param = filp->private_data;
7570 	char buf[U64_STR_SIZE];
7571 	int len;
7572 	u64 val;
7573 
7574 	if (!param)
7575 		return -EFAULT;
7576 
7577 	val = *param->val;
7578 
7579 	if (cnt > sizeof(buf))
7580 		cnt = sizeof(buf);
7581 
7582 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7583 
7584 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7585 }
7586 
7587 const struct file_operations trace_min_max_fops = {
7588 	.open		= tracing_open_generic,
7589 	.read		= trace_min_max_read,
7590 	.write		= trace_min_max_write,
7591 };
7592 
7593 #define TRACING_LOG_ERRS_MAX	8
7594 #define TRACING_LOG_LOC_MAX	128
7595 
7596 #define CMD_PREFIX "  Command: "
7597 
7598 struct err_info {
7599 	const char	**errs;	/* ptr to loc-specific array of err strings */
7600 	u8		type;	/* index into errs -> specific err string */
7601 	u16		pos;	/* caret position */
7602 	u64		ts;
7603 };
7604 
7605 struct tracing_log_err {
7606 	struct list_head	list;
7607 	struct err_info		info;
7608 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7609 	char			*cmd;                     /* what caused err */
7610 };
7611 
7612 static DEFINE_MUTEX(tracing_err_log_lock);
7613 
7614 static struct tracing_log_err *alloc_tracing_log_err(int len)
7615 {
7616 	struct tracing_log_err *err;
7617 
7618 	err = kzalloc(sizeof(*err), GFP_KERNEL);
7619 	if (!err)
7620 		return ERR_PTR(-ENOMEM);
7621 
7622 	err->cmd = kzalloc(len, GFP_KERNEL);
7623 	if (!err->cmd) {
7624 		kfree(err);
7625 		return ERR_PTR(-ENOMEM);
7626 	}
7627 
7628 	return err;
7629 }
7630 
7631 static void free_tracing_log_err(struct tracing_log_err *err)
7632 {
7633 	kfree(err->cmd);
7634 	kfree(err);
7635 }
7636 
7637 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7638 						   int len)
7639 {
7640 	struct tracing_log_err *err;
7641 	char *cmd;
7642 
7643 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7644 		err = alloc_tracing_log_err(len);
7645 		if (PTR_ERR(err) != -ENOMEM)
7646 			tr->n_err_log_entries++;
7647 
7648 		return err;
7649 	}
7650 	cmd = kzalloc(len, GFP_KERNEL);
7651 	if (!cmd)
7652 		return ERR_PTR(-ENOMEM);
7653 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7654 	kfree(err->cmd);
7655 	err->cmd = cmd;
7656 	list_del(&err->list);
7657 
7658 	return err;
7659 }
7660 
7661 /**
7662  * err_pos - find the position of a string within a command for error careting
7663  * @cmd: The tracing command that caused the error
7664  * @str: The string to position the caret at within @cmd
7665  *
7666  * Finds the position of the first occurrence of @str within @cmd.  The
7667  * return value can be passed to tracing_log_err() for caret placement
7668  * within @cmd.
7669  *
7670  * Returns the index within @cmd of the first occurrence of @str or 0
7671  * if @str was not found.
7672  */
7673 unsigned int err_pos(char *cmd, const char *str)
7674 {
7675 	char *found;
7676 
7677 	if (WARN_ON(!strlen(cmd)))
7678 		return 0;
7679 
7680 	found = strstr(cmd, str);
7681 	if (found)
7682 		return found - cmd;
7683 
7684 	return 0;
7685 }
7686 
7687 /**
7688  * tracing_log_err - write an error to the tracing error log
7689  * @tr: The associated trace array for the error (NULL for top level array)
7690  * @loc: A string describing where the error occurred
7691  * @cmd: The tracing command that caused the error
7692  * @errs: The array of loc-specific static error strings
7693  * @type: The index into errs[], which produces the specific static err string
7694  * @pos: The position the caret should be placed in the cmd
7695  *
7696  * Writes an error into tracing/error_log of the form:
7697  *
7698  * <loc>: error: <text>
7699  *   Command: <cmd>
7700  *              ^
7701  *
7702  * tracing/error_log is a small log file containing the last
7703  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7704  * unless there has been a tracing error, and the error log can be
7705  * cleared and have its memory freed by writing the empty string in
7706  * truncation mode to it i.e. echo > tracing/error_log.
7707  *
7708  * NOTE: the @errs array along with the @type param are used to
7709  * produce a static error string - this string is not copied and saved
7710  * when the error is logged - only a pointer to it is saved.  See
7711  * existing callers for examples of how static strings are typically
7712  * defined for use with tracing_log_err().
7713  */
7714 void tracing_log_err(struct trace_array *tr,
7715 		     const char *loc, const char *cmd,
7716 		     const char **errs, u8 type, u16 pos)
7717 {
7718 	struct tracing_log_err *err;
7719 	int len = 0;
7720 
7721 	if (!tr)
7722 		tr = &global_trace;
7723 
7724 	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7725 
7726 	guard(mutex)(&tracing_err_log_lock);
7727 
7728 	err = get_tracing_log_err(tr, len);
7729 	if (PTR_ERR(err) == -ENOMEM)
7730 		return;
7731 
7732 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7733 	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7734 
7735 	err->info.errs = errs;
7736 	err->info.type = type;
7737 	err->info.pos = pos;
7738 	err->info.ts = local_clock();
7739 
7740 	list_add_tail(&err->list, &tr->err_log);
7741 }
7742 
7743 static void clear_tracing_err_log(struct trace_array *tr)
7744 {
7745 	struct tracing_log_err *err, *next;
7746 
7747 	mutex_lock(&tracing_err_log_lock);
7748 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7749 		list_del(&err->list);
7750 		free_tracing_log_err(err);
7751 	}
7752 
7753 	tr->n_err_log_entries = 0;
7754 	mutex_unlock(&tracing_err_log_lock);
7755 }
7756 
7757 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7758 {
7759 	struct trace_array *tr = m->private;
7760 
7761 	mutex_lock(&tracing_err_log_lock);
7762 
7763 	return seq_list_start(&tr->err_log, *pos);
7764 }
7765 
7766 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7767 {
7768 	struct trace_array *tr = m->private;
7769 
7770 	return seq_list_next(v, &tr->err_log, pos);
7771 }
7772 
7773 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7774 {
7775 	mutex_unlock(&tracing_err_log_lock);
7776 }
7777 
7778 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7779 {
7780 	u16 i;
7781 
7782 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7783 		seq_putc(m, ' ');
7784 	for (i = 0; i < pos; i++)
7785 		seq_putc(m, ' ');
7786 	seq_puts(m, "^\n");
7787 }
7788 
7789 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7790 {
7791 	struct tracing_log_err *err = v;
7792 
7793 	if (err) {
7794 		const char *err_text = err->info.errs[err->info.type];
7795 		u64 sec = err->info.ts;
7796 		u32 nsec;
7797 
7798 		nsec = do_div(sec, NSEC_PER_SEC);
7799 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7800 			   err->loc, err_text);
7801 		seq_printf(m, "%s", err->cmd);
7802 		tracing_err_log_show_pos(m, err->info.pos);
7803 	}
7804 
7805 	return 0;
7806 }
7807 
7808 static const struct seq_operations tracing_err_log_seq_ops = {
7809 	.start  = tracing_err_log_seq_start,
7810 	.next   = tracing_err_log_seq_next,
7811 	.stop   = tracing_err_log_seq_stop,
7812 	.show   = tracing_err_log_seq_show
7813 };
7814 
7815 static int tracing_err_log_open(struct inode *inode, struct file *file)
7816 {
7817 	struct trace_array *tr = inode->i_private;
7818 	int ret = 0;
7819 
7820 	ret = tracing_check_open_get_tr(tr);
7821 	if (ret)
7822 		return ret;
7823 
7824 	/* If this file was opened for write, then erase contents */
7825 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7826 		clear_tracing_err_log(tr);
7827 
7828 	if (file->f_mode & FMODE_READ) {
7829 		ret = seq_open(file, &tracing_err_log_seq_ops);
7830 		if (!ret) {
7831 			struct seq_file *m = file->private_data;
7832 			m->private = tr;
7833 		} else {
7834 			trace_array_put(tr);
7835 		}
7836 	}
7837 	return ret;
7838 }
7839 
7840 static ssize_t tracing_err_log_write(struct file *file,
7841 				     const char __user *buffer,
7842 				     size_t count, loff_t *ppos)
7843 {
7844 	return count;
7845 }
7846 
7847 static int tracing_err_log_release(struct inode *inode, struct file *file)
7848 {
7849 	struct trace_array *tr = inode->i_private;
7850 
7851 	trace_array_put(tr);
7852 
7853 	if (file->f_mode & FMODE_READ)
7854 		seq_release(inode, file);
7855 
7856 	return 0;
7857 }
7858 
7859 static const struct file_operations tracing_err_log_fops = {
7860 	.open           = tracing_err_log_open,
7861 	.write		= tracing_err_log_write,
7862 	.read           = seq_read,
7863 	.llseek         = tracing_lseek,
7864 	.release        = tracing_err_log_release,
7865 };
7866 
7867 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7868 {
7869 	struct trace_array *tr = inode->i_private;
7870 	struct ftrace_buffer_info *info;
7871 	int ret;
7872 
7873 	ret = tracing_check_open_get_tr(tr);
7874 	if (ret)
7875 		return ret;
7876 
7877 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
7878 	if (!info) {
7879 		trace_array_put(tr);
7880 		return -ENOMEM;
7881 	}
7882 
7883 	mutex_lock(&trace_types_lock);
7884 
7885 	info->iter.tr		= tr;
7886 	info->iter.cpu_file	= tracing_get_cpu(inode);
7887 	info->iter.trace	= tr->current_trace;
7888 	info->iter.array_buffer = &tr->array_buffer;
7889 	info->spare		= NULL;
7890 	/* Force reading ring buffer for first read */
7891 	info->read		= (unsigned int)-1;
7892 
7893 	filp->private_data = info;
7894 
7895 	tr->trace_ref++;
7896 
7897 	mutex_unlock(&trace_types_lock);
7898 
7899 	ret = nonseekable_open(inode, filp);
7900 	if (ret < 0)
7901 		trace_array_put(tr);
7902 
7903 	return ret;
7904 }
7905 
7906 static __poll_t
7907 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7908 {
7909 	struct ftrace_buffer_info *info = filp->private_data;
7910 	struct trace_iterator *iter = &info->iter;
7911 
7912 	return trace_poll(iter, filp, poll_table);
7913 }
7914 
7915 static ssize_t
7916 tracing_buffers_read(struct file *filp, char __user *ubuf,
7917 		     size_t count, loff_t *ppos)
7918 {
7919 	struct ftrace_buffer_info *info = filp->private_data;
7920 	struct trace_iterator *iter = &info->iter;
7921 	void *trace_data;
7922 	int page_size;
7923 	ssize_t ret = 0;
7924 	ssize_t size;
7925 
7926 	if (!count)
7927 		return 0;
7928 
7929 #ifdef CONFIG_TRACER_MAX_TRACE
7930 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7931 		return -EBUSY;
7932 #endif
7933 
7934 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
7935 
7936 	/* Make sure the spare matches the current sub buffer size */
7937 	if (info->spare) {
7938 		if (page_size != info->spare_size) {
7939 			ring_buffer_free_read_page(iter->array_buffer->buffer,
7940 						   info->spare_cpu, info->spare);
7941 			info->spare = NULL;
7942 		}
7943 	}
7944 
7945 	if (!info->spare) {
7946 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7947 							  iter->cpu_file);
7948 		if (IS_ERR(info->spare)) {
7949 			ret = PTR_ERR(info->spare);
7950 			info->spare = NULL;
7951 		} else {
7952 			info->spare_cpu = iter->cpu_file;
7953 			info->spare_size = page_size;
7954 		}
7955 	}
7956 	if (!info->spare)
7957 		return ret;
7958 
7959 	/* Do we have previous read data to read? */
7960 	if (info->read < page_size)
7961 		goto read;
7962 
7963  again:
7964 	trace_access_lock(iter->cpu_file);
7965 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
7966 				    info->spare,
7967 				    count,
7968 				    iter->cpu_file, 0);
7969 	trace_access_unlock(iter->cpu_file);
7970 
7971 	if (ret < 0) {
7972 		if (trace_empty(iter) && !iter->closed) {
7973 			if ((filp->f_flags & O_NONBLOCK))
7974 				return -EAGAIN;
7975 
7976 			ret = wait_on_pipe(iter, 0);
7977 			if (ret)
7978 				return ret;
7979 
7980 			goto again;
7981 		}
7982 		return 0;
7983 	}
7984 
7985 	info->read = 0;
7986  read:
7987 	size = page_size - info->read;
7988 	if (size > count)
7989 		size = count;
7990 	trace_data = ring_buffer_read_page_data(info->spare);
7991 	ret = copy_to_user(ubuf, trace_data + info->read, size);
7992 	if (ret == size)
7993 		return -EFAULT;
7994 
7995 	size -= ret;
7996 
7997 	*ppos += size;
7998 	info->read += size;
7999 
8000 	return size;
8001 }
8002 
8003 static int tracing_buffers_flush(struct file *file, fl_owner_t id)
8004 {
8005 	struct ftrace_buffer_info *info = file->private_data;
8006 	struct trace_iterator *iter = &info->iter;
8007 
8008 	iter->closed = true;
8009 	/* Make sure the waiters see the new wait_index */
8010 	(void)atomic_fetch_inc_release(&iter->wait_index);
8011 
8012 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8013 
8014 	return 0;
8015 }
8016 
8017 static int tracing_buffers_release(struct inode *inode, struct file *file)
8018 {
8019 	struct ftrace_buffer_info *info = file->private_data;
8020 	struct trace_iterator *iter = &info->iter;
8021 
8022 	mutex_lock(&trace_types_lock);
8023 
8024 	iter->tr->trace_ref--;
8025 
8026 	__trace_array_put(iter->tr);
8027 
8028 	if (info->spare)
8029 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8030 					   info->spare_cpu, info->spare);
8031 	kvfree(info);
8032 
8033 	mutex_unlock(&trace_types_lock);
8034 
8035 	return 0;
8036 }
8037 
8038 struct buffer_ref {
8039 	struct trace_buffer	*buffer;
8040 	void			*page;
8041 	int			cpu;
8042 	refcount_t		refcount;
8043 };
8044 
8045 static void buffer_ref_release(struct buffer_ref *ref)
8046 {
8047 	if (!refcount_dec_and_test(&ref->refcount))
8048 		return;
8049 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8050 	kfree(ref);
8051 }
8052 
8053 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8054 				    struct pipe_buffer *buf)
8055 {
8056 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8057 
8058 	buffer_ref_release(ref);
8059 	buf->private = 0;
8060 }
8061 
8062 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8063 				struct pipe_buffer *buf)
8064 {
8065 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8066 
8067 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8068 		return false;
8069 
8070 	refcount_inc(&ref->refcount);
8071 	return true;
8072 }
8073 
8074 /* Pipe buffer operations for a buffer. */
8075 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8076 	.release		= buffer_pipe_buf_release,
8077 	.get			= buffer_pipe_buf_get,
8078 };
8079 
8080 /*
8081  * Callback from splice_to_pipe(), if we need to release some pages
8082  * at the end of the spd in case we error'ed out in filling the pipe.
8083  */
8084 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8085 {
8086 	struct buffer_ref *ref =
8087 		(struct buffer_ref *)spd->partial[i].private;
8088 
8089 	buffer_ref_release(ref);
8090 	spd->partial[i].private = 0;
8091 }
8092 
8093 static ssize_t
8094 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8095 			    struct pipe_inode_info *pipe, size_t len,
8096 			    unsigned int flags)
8097 {
8098 	struct ftrace_buffer_info *info = file->private_data;
8099 	struct trace_iterator *iter = &info->iter;
8100 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8101 	struct page *pages_def[PIPE_DEF_BUFFERS];
8102 	struct splice_pipe_desc spd = {
8103 		.pages		= pages_def,
8104 		.partial	= partial_def,
8105 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8106 		.ops		= &buffer_pipe_buf_ops,
8107 		.spd_release	= buffer_spd_release,
8108 	};
8109 	struct buffer_ref *ref;
8110 	bool woken = false;
8111 	int page_size;
8112 	int entries, i;
8113 	ssize_t ret = 0;
8114 
8115 #ifdef CONFIG_TRACER_MAX_TRACE
8116 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8117 		return -EBUSY;
8118 #endif
8119 
8120 	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
8121 	if (*ppos & (page_size - 1))
8122 		return -EINVAL;
8123 
8124 	if (len & (page_size - 1)) {
8125 		if (len < page_size)
8126 			return -EINVAL;
8127 		len &= (~(page_size - 1));
8128 	}
8129 
8130 	if (splice_grow_spd(pipe, &spd))
8131 		return -ENOMEM;
8132 
8133  again:
8134 	trace_access_lock(iter->cpu_file);
8135 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8136 
8137 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
8138 		struct page *page;
8139 		int r;
8140 
8141 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8142 		if (!ref) {
8143 			ret = -ENOMEM;
8144 			break;
8145 		}
8146 
8147 		refcount_set(&ref->refcount, 1);
8148 		ref->buffer = iter->array_buffer->buffer;
8149 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8150 		if (IS_ERR(ref->page)) {
8151 			ret = PTR_ERR(ref->page);
8152 			ref->page = NULL;
8153 			kfree(ref);
8154 			break;
8155 		}
8156 		ref->cpu = iter->cpu_file;
8157 
8158 		r = ring_buffer_read_page(ref->buffer, ref->page,
8159 					  len, iter->cpu_file, 1);
8160 		if (r < 0) {
8161 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8162 						   ref->page);
8163 			kfree(ref);
8164 			break;
8165 		}
8166 
8167 		page = virt_to_page(ring_buffer_read_page_data(ref->page));
8168 
8169 		spd.pages[i] = page;
8170 		spd.partial[i].len = page_size;
8171 		spd.partial[i].offset = 0;
8172 		spd.partial[i].private = (unsigned long)ref;
8173 		spd.nr_pages++;
8174 		*ppos += page_size;
8175 
8176 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8177 	}
8178 
8179 	trace_access_unlock(iter->cpu_file);
8180 	spd.nr_pages = i;
8181 
8182 	/* did we read anything? */
8183 	if (!spd.nr_pages) {
8184 
8185 		if (ret)
8186 			goto out;
8187 
8188 		if (woken)
8189 			goto out;
8190 
8191 		ret = -EAGAIN;
8192 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8193 			goto out;
8194 
8195 		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8196 		if (ret)
8197 			goto out;
8198 
8199 		/* No need to wait after waking up when tracing is off */
8200 		if (!tracer_tracing_is_on(iter->tr))
8201 			goto out;
8202 
8203 		/* Iterate one more time to collect any new data then exit */
8204 		woken = true;
8205 
8206 		goto again;
8207 	}
8208 
8209 	ret = splice_to_pipe(pipe, &spd);
8210 out:
8211 	splice_shrink_spd(&spd);
8212 
8213 	return ret;
8214 }
8215 
8216 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8217 {
8218 	struct ftrace_buffer_info *info = file->private_data;
8219 	struct trace_iterator *iter = &info->iter;
8220 	int err;
8221 
8222 	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
8223 		if (!(file->f_flags & O_NONBLOCK)) {
8224 			err = ring_buffer_wait(iter->array_buffer->buffer,
8225 					       iter->cpu_file,
8226 					       iter->tr->buffer_percent,
8227 					       NULL, NULL);
8228 			if (err)
8229 				return err;
8230 		}
8231 
8232 		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
8233 						  iter->cpu_file);
8234 	} else if (cmd) {
8235 		return -ENOTTY;
8236 	}
8237 
8238 	/*
8239 	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
8240 	 * waiters
8241 	 */
8242 	mutex_lock(&trace_types_lock);
8243 
8244 	/* Make sure the waiters see the new wait_index */
8245 	(void)atomic_fetch_inc_release(&iter->wait_index);
8246 
8247 	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8248 
8249 	mutex_unlock(&trace_types_lock);
8250 	return 0;
8251 }
8252 
8253 #ifdef CONFIG_TRACER_MAX_TRACE
8254 static int get_snapshot_map(struct trace_array *tr)
8255 {
8256 	int err = 0;
8257 
8258 	/*
8259 	 * Called with mmap_lock held. lockdep would be unhappy if we would now
8260 	 * take trace_types_lock. Instead use the specific
8261 	 * snapshot_trigger_lock.
8262 	 */
8263 	spin_lock(&tr->snapshot_trigger_lock);
8264 
8265 	if (tr->snapshot || tr->mapped == UINT_MAX)
8266 		err = -EBUSY;
8267 	else
8268 		tr->mapped++;
8269 
8270 	spin_unlock(&tr->snapshot_trigger_lock);
8271 
8272 	/* Wait for update_max_tr() to observe iter->tr->mapped */
8273 	if (tr->mapped == 1)
8274 		synchronize_rcu();
8275 
8276 	return err;
8277 
8278 }
8279 static void put_snapshot_map(struct trace_array *tr)
8280 {
8281 	spin_lock(&tr->snapshot_trigger_lock);
8282 	if (!WARN_ON(!tr->mapped))
8283 		tr->mapped--;
8284 	spin_unlock(&tr->snapshot_trigger_lock);
8285 }
8286 #else
8287 static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
8288 static inline void put_snapshot_map(struct trace_array *tr) { }
8289 #endif
8290 
8291 static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
8292 {
8293 	struct ftrace_buffer_info *info = vma->vm_file->private_data;
8294 	struct trace_iterator *iter = &info->iter;
8295 
8296 	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
8297 	put_snapshot_map(iter->tr);
8298 }
8299 
8300 static const struct vm_operations_struct tracing_buffers_vmops = {
8301 	.close		= tracing_buffers_mmap_close,
8302 };
8303 
8304 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
8305 {
8306 	struct ftrace_buffer_info *info = filp->private_data;
8307 	struct trace_iterator *iter = &info->iter;
8308 	int ret = 0;
8309 
8310 	/* Currently the boot mapped buffer is not supported for mmap */
8311 	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
8312 		return -ENODEV;
8313 
8314 	ret = get_snapshot_map(iter->tr);
8315 	if (ret)
8316 		return ret;
8317 
8318 	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
8319 	if (ret)
8320 		put_snapshot_map(iter->tr);
8321 
8322 	vma->vm_ops = &tracing_buffers_vmops;
8323 
8324 	return ret;
8325 }
8326 
8327 static const struct file_operations tracing_buffers_fops = {
8328 	.open		= tracing_buffers_open,
8329 	.read		= tracing_buffers_read,
8330 	.poll		= tracing_buffers_poll,
8331 	.release	= tracing_buffers_release,
8332 	.flush		= tracing_buffers_flush,
8333 	.splice_read	= tracing_buffers_splice_read,
8334 	.unlocked_ioctl = tracing_buffers_ioctl,
8335 	.mmap		= tracing_buffers_mmap,
8336 };
8337 
8338 static ssize_t
8339 tracing_stats_read(struct file *filp, char __user *ubuf,
8340 		   size_t count, loff_t *ppos)
8341 {
8342 	struct inode *inode = file_inode(filp);
8343 	struct trace_array *tr = inode->i_private;
8344 	struct array_buffer *trace_buf = &tr->array_buffer;
8345 	int cpu = tracing_get_cpu(inode);
8346 	struct trace_seq *s;
8347 	unsigned long cnt;
8348 	unsigned long long t;
8349 	unsigned long usec_rem;
8350 
8351 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8352 	if (!s)
8353 		return -ENOMEM;
8354 
8355 	trace_seq_init(s);
8356 
8357 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8358 	trace_seq_printf(s, "entries: %ld\n", cnt);
8359 
8360 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8361 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8362 
8363 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8364 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8365 
8366 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8367 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8368 
8369 	if (trace_clocks[tr->clock_id].in_ns) {
8370 		/* local or global for trace_clock */
8371 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8372 		usec_rem = do_div(t, USEC_PER_SEC);
8373 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8374 								t, usec_rem);
8375 
8376 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8377 		usec_rem = do_div(t, USEC_PER_SEC);
8378 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8379 	} else {
8380 		/* counter or tsc mode for trace_clock */
8381 		trace_seq_printf(s, "oldest event ts: %llu\n",
8382 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8383 
8384 		trace_seq_printf(s, "now ts: %llu\n",
8385 				ring_buffer_time_stamp(trace_buf->buffer));
8386 	}
8387 
8388 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8389 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8390 
8391 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8392 	trace_seq_printf(s, "read events: %ld\n", cnt);
8393 
8394 	count = simple_read_from_buffer(ubuf, count, ppos,
8395 					s->buffer, trace_seq_used(s));
8396 
8397 	kfree(s);
8398 
8399 	return count;
8400 }
8401 
8402 static const struct file_operations tracing_stats_fops = {
8403 	.open		= tracing_open_generic_tr,
8404 	.read		= tracing_stats_read,
8405 	.llseek		= generic_file_llseek,
8406 	.release	= tracing_release_generic_tr,
8407 };
8408 
8409 #ifdef CONFIG_DYNAMIC_FTRACE
8410 
8411 static ssize_t
8412 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8413 		  size_t cnt, loff_t *ppos)
8414 {
8415 	ssize_t ret;
8416 	char *buf;
8417 	int r;
8418 
8419 	/* 512 should be plenty to hold the amount needed */
8420 #define DYN_INFO_BUF_SIZE	512
8421 
8422 	buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL);
8423 	if (!buf)
8424 		return -ENOMEM;
8425 
8426 	r = scnprintf(buf, DYN_INFO_BUF_SIZE,
8427 		      "%ld pages:%ld groups: %ld\n"
8428 		      "ftrace boot update time = %llu (ns)\n"
8429 		      "ftrace module total update time = %llu (ns)\n",
8430 		      ftrace_update_tot_cnt,
8431 		      ftrace_number_of_pages,
8432 		      ftrace_number_of_groups,
8433 		      ftrace_update_time,
8434 		      ftrace_total_mod_time);
8435 
8436 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8437 	kfree(buf);
8438 	return ret;
8439 }
8440 
8441 static const struct file_operations tracing_dyn_info_fops = {
8442 	.open		= tracing_open_generic,
8443 	.read		= tracing_read_dyn_info,
8444 	.llseek		= generic_file_llseek,
8445 };
8446 #endif /* CONFIG_DYNAMIC_FTRACE */
8447 
8448 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8449 static void
8450 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8451 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8452 		void *data)
8453 {
8454 	tracing_snapshot_instance(tr);
8455 }
8456 
8457 static void
8458 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8459 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8460 		      void *data)
8461 {
8462 	struct ftrace_func_mapper *mapper = data;
8463 	long *count = NULL;
8464 
8465 	if (mapper)
8466 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8467 
8468 	if (count) {
8469 
8470 		if (*count <= 0)
8471 			return;
8472 
8473 		(*count)--;
8474 	}
8475 
8476 	tracing_snapshot_instance(tr);
8477 }
8478 
8479 static int
8480 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8481 		      struct ftrace_probe_ops *ops, void *data)
8482 {
8483 	struct ftrace_func_mapper *mapper = data;
8484 	long *count = NULL;
8485 
8486 	seq_printf(m, "%ps:", (void *)ip);
8487 
8488 	seq_puts(m, "snapshot");
8489 
8490 	if (mapper)
8491 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8492 
8493 	if (count)
8494 		seq_printf(m, ":count=%ld\n", *count);
8495 	else
8496 		seq_puts(m, ":unlimited\n");
8497 
8498 	return 0;
8499 }
8500 
8501 static int
8502 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8503 		     unsigned long ip, void *init_data, void **data)
8504 {
8505 	struct ftrace_func_mapper *mapper = *data;
8506 
8507 	if (!mapper) {
8508 		mapper = allocate_ftrace_func_mapper();
8509 		if (!mapper)
8510 			return -ENOMEM;
8511 		*data = mapper;
8512 	}
8513 
8514 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8515 }
8516 
8517 static void
8518 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8519 		     unsigned long ip, void *data)
8520 {
8521 	struct ftrace_func_mapper *mapper = data;
8522 
8523 	if (!ip) {
8524 		if (!mapper)
8525 			return;
8526 		free_ftrace_func_mapper(mapper, NULL);
8527 		return;
8528 	}
8529 
8530 	ftrace_func_mapper_remove_ip(mapper, ip);
8531 }
8532 
8533 static struct ftrace_probe_ops snapshot_probe_ops = {
8534 	.func			= ftrace_snapshot,
8535 	.print			= ftrace_snapshot_print,
8536 };
8537 
8538 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8539 	.func			= ftrace_count_snapshot,
8540 	.print			= ftrace_snapshot_print,
8541 	.init			= ftrace_snapshot_init,
8542 	.free			= ftrace_snapshot_free,
8543 };
8544 
8545 static int
8546 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8547 			       char *glob, char *cmd, char *param, int enable)
8548 {
8549 	struct ftrace_probe_ops *ops;
8550 	void *count = (void *)-1;
8551 	char *number;
8552 	int ret;
8553 
8554 	if (!tr)
8555 		return -ENODEV;
8556 
8557 	/* hash funcs only work with set_ftrace_filter */
8558 	if (!enable)
8559 		return -EINVAL;
8560 
8561 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8562 
8563 	if (glob[0] == '!') {
8564 		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
8565 		if (!ret)
8566 			tracing_disarm_snapshot(tr);
8567 
8568 		return ret;
8569 	}
8570 
8571 	if (!param)
8572 		goto out_reg;
8573 
8574 	number = strsep(&param, ":");
8575 
8576 	if (!strlen(number))
8577 		goto out_reg;
8578 
8579 	/*
8580 	 * We use the callback data field (which is a pointer)
8581 	 * as our counter.
8582 	 */
8583 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8584 	if (ret)
8585 		return ret;
8586 
8587  out_reg:
8588 	ret = tracing_arm_snapshot(tr);
8589 	if (ret < 0)
8590 		goto out;
8591 
8592 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8593 	if (ret < 0)
8594 		tracing_disarm_snapshot(tr);
8595  out:
8596 	return ret < 0 ? ret : 0;
8597 }
8598 
8599 static struct ftrace_func_command ftrace_snapshot_cmd = {
8600 	.name			= "snapshot",
8601 	.func			= ftrace_trace_snapshot_callback,
8602 };
8603 
8604 static __init int register_snapshot_cmd(void)
8605 {
8606 	return register_ftrace_command(&ftrace_snapshot_cmd);
8607 }
8608 #else
8609 static inline __init int register_snapshot_cmd(void) { return 0; }
8610 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8611 
8612 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8613 {
8614 	if (WARN_ON(!tr->dir))
8615 		return ERR_PTR(-ENODEV);
8616 
8617 	/* Top directory uses NULL as the parent */
8618 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8619 		return NULL;
8620 
8621 	/* All sub buffers have a descriptor */
8622 	return tr->dir;
8623 }
8624 
8625 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8626 {
8627 	struct dentry *d_tracer;
8628 
8629 	if (tr->percpu_dir)
8630 		return tr->percpu_dir;
8631 
8632 	d_tracer = tracing_get_dentry(tr);
8633 	if (IS_ERR(d_tracer))
8634 		return NULL;
8635 
8636 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8637 
8638 	MEM_FAIL(!tr->percpu_dir,
8639 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8640 
8641 	return tr->percpu_dir;
8642 }
8643 
8644 static struct dentry *
8645 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8646 		      void *data, long cpu, const struct file_operations *fops)
8647 {
8648 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8649 
8650 	if (ret) /* See tracing_get_cpu() */
8651 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8652 	return ret;
8653 }
8654 
8655 static void
8656 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8657 {
8658 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8659 	struct dentry *d_cpu;
8660 	char cpu_dir[30]; /* 30 characters should be more than enough */
8661 
8662 	if (!d_percpu)
8663 		return;
8664 
8665 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8666 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8667 	if (!d_cpu) {
8668 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8669 		return;
8670 	}
8671 
8672 	/* per cpu trace_pipe */
8673 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8674 				tr, cpu, &tracing_pipe_fops);
8675 
8676 	/* per cpu trace */
8677 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8678 				tr, cpu, &tracing_fops);
8679 
8680 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8681 				tr, cpu, &tracing_buffers_fops);
8682 
8683 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8684 				tr, cpu, &tracing_stats_fops);
8685 
8686 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8687 				tr, cpu, &tracing_entries_fops);
8688 
8689 	if (tr->range_addr_start)
8690 		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
8691 				      tr, cpu, &tracing_buffer_meta_fops);
8692 #ifdef CONFIG_TRACER_SNAPSHOT
8693 	if (!tr->range_addr_start) {
8694 		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8695 				      tr, cpu, &snapshot_fops);
8696 
8697 		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8698 				      tr, cpu, &snapshot_raw_fops);
8699 	}
8700 #endif
8701 }
8702 
8703 #ifdef CONFIG_FTRACE_SELFTEST
8704 /* Let selftest have access to static functions in this file */
8705 #include "trace_selftest.c"
8706 #endif
8707 
8708 static ssize_t
8709 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8710 			loff_t *ppos)
8711 {
8712 	struct trace_option_dentry *topt = filp->private_data;
8713 	char *buf;
8714 
8715 	if (topt->flags->val & topt->opt->bit)
8716 		buf = "1\n";
8717 	else
8718 		buf = "0\n";
8719 
8720 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8721 }
8722 
8723 static ssize_t
8724 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8725 			 loff_t *ppos)
8726 {
8727 	struct trace_option_dentry *topt = filp->private_data;
8728 	unsigned long val;
8729 	int ret;
8730 
8731 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8732 	if (ret)
8733 		return ret;
8734 
8735 	if (val != 0 && val != 1)
8736 		return -EINVAL;
8737 
8738 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8739 		mutex_lock(&trace_types_lock);
8740 		ret = __set_tracer_option(topt->tr, topt->flags,
8741 					  topt->opt, !val);
8742 		mutex_unlock(&trace_types_lock);
8743 		if (ret)
8744 			return ret;
8745 	}
8746 
8747 	*ppos += cnt;
8748 
8749 	return cnt;
8750 }
8751 
8752 static int tracing_open_options(struct inode *inode, struct file *filp)
8753 {
8754 	struct trace_option_dentry *topt = inode->i_private;
8755 	int ret;
8756 
8757 	ret = tracing_check_open_get_tr(topt->tr);
8758 	if (ret)
8759 		return ret;
8760 
8761 	filp->private_data = inode->i_private;
8762 	return 0;
8763 }
8764 
8765 static int tracing_release_options(struct inode *inode, struct file *file)
8766 {
8767 	struct trace_option_dentry *topt = file->private_data;
8768 
8769 	trace_array_put(topt->tr);
8770 	return 0;
8771 }
8772 
8773 static const struct file_operations trace_options_fops = {
8774 	.open = tracing_open_options,
8775 	.read = trace_options_read,
8776 	.write = trace_options_write,
8777 	.llseek	= generic_file_llseek,
8778 	.release = tracing_release_options,
8779 };
8780 
8781 /*
8782  * In order to pass in both the trace_array descriptor as well as the index
8783  * to the flag that the trace option file represents, the trace_array
8784  * has a character array of trace_flags_index[], which holds the index
8785  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8786  * The address of this character array is passed to the flag option file
8787  * read/write callbacks.
8788  *
8789  * In order to extract both the index and the trace_array descriptor,
8790  * get_tr_index() uses the following algorithm.
8791  *
8792  *   idx = *ptr;
8793  *
8794  * As the pointer itself contains the address of the index (remember
8795  * index[1] == 1).
8796  *
8797  * Then to get the trace_array descriptor, by subtracting that index
8798  * from the ptr, we get to the start of the index itself.
8799  *
8800  *   ptr - idx == &index[0]
8801  *
8802  * Then a simple container_of() from that pointer gets us to the
8803  * trace_array descriptor.
8804  */
8805 static void get_tr_index(void *data, struct trace_array **ptr,
8806 			 unsigned int *pindex)
8807 {
8808 	*pindex = *(unsigned char *)data;
8809 
8810 	*ptr = container_of(data - *pindex, struct trace_array,
8811 			    trace_flags_index);
8812 }
8813 
8814 static ssize_t
8815 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8816 			loff_t *ppos)
8817 {
8818 	void *tr_index = filp->private_data;
8819 	struct trace_array *tr;
8820 	unsigned int index;
8821 	char *buf;
8822 
8823 	get_tr_index(tr_index, &tr, &index);
8824 
8825 	if (tr->trace_flags & (1 << index))
8826 		buf = "1\n";
8827 	else
8828 		buf = "0\n";
8829 
8830 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8831 }
8832 
8833 static ssize_t
8834 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8835 			 loff_t *ppos)
8836 {
8837 	void *tr_index = filp->private_data;
8838 	struct trace_array *tr;
8839 	unsigned int index;
8840 	unsigned long val;
8841 	int ret;
8842 
8843 	get_tr_index(tr_index, &tr, &index);
8844 
8845 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8846 	if (ret)
8847 		return ret;
8848 
8849 	if (val != 0 && val != 1)
8850 		return -EINVAL;
8851 
8852 	mutex_lock(&event_mutex);
8853 	mutex_lock(&trace_types_lock);
8854 	ret = set_tracer_flag(tr, 1 << index, val);
8855 	mutex_unlock(&trace_types_lock);
8856 	mutex_unlock(&event_mutex);
8857 
8858 	if (ret < 0)
8859 		return ret;
8860 
8861 	*ppos += cnt;
8862 
8863 	return cnt;
8864 }
8865 
8866 static const struct file_operations trace_options_core_fops = {
8867 	.open = tracing_open_generic,
8868 	.read = trace_options_core_read,
8869 	.write = trace_options_core_write,
8870 	.llseek = generic_file_llseek,
8871 };
8872 
8873 struct dentry *trace_create_file(const char *name,
8874 				 umode_t mode,
8875 				 struct dentry *parent,
8876 				 void *data,
8877 				 const struct file_operations *fops)
8878 {
8879 	struct dentry *ret;
8880 
8881 	ret = tracefs_create_file(name, mode, parent, data, fops);
8882 	if (!ret)
8883 		pr_warn("Could not create tracefs '%s' entry\n", name);
8884 
8885 	return ret;
8886 }
8887 
8888 
8889 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8890 {
8891 	struct dentry *d_tracer;
8892 
8893 	if (tr->options)
8894 		return tr->options;
8895 
8896 	d_tracer = tracing_get_dentry(tr);
8897 	if (IS_ERR(d_tracer))
8898 		return NULL;
8899 
8900 	tr->options = tracefs_create_dir("options", d_tracer);
8901 	if (!tr->options) {
8902 		pr_warn("Could not create tracefs directory 'options'\n");
8903 		return NULL;
8904 	}
8905 
8906 	return tr->options;
8907 }
8908 
8909 static void
8910 create_trace_option_file(struct trace_array *tr,
8911 			 struct trace_option_dentry *topt,
8912 			 struct tracer_flags *flags,
8913 			 struct tracer_opt *opt)
8914 {
8915 	struct dentry *t_options;
8916 
8917 	t_options = trace_options_init_dentry(tr);
8918 	if (!t_options)
8919 		return;
8920 
8921 	topt->flags = flags;
8922 	topt->opt = opt;
8923 	topt->tr = tr;
8924 
8925 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8926 					t_options, topt, &trace_options_fops);
8927 
8928 }
8929 
8930 static void
8931 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8932 {
8933 	struct trace_option_dentry *topts;
8934 	struct trace_options *tr_topts;
8935 	struct tracer_flags *flags;
8936 	struct tracer_opt *opts;
8937 	int cnt;
8938 	int i;
8939 
8940 	if (!tracer)
8941 		return;
8942 
8943 	flags = tracer->flags;
8944 
8945 	if (!flags || !flags->opts)
8946 		return;
8947 
8948 	/*
8949 	 * If this is an instance, only create flags for tracers
8950 	 * the instance may have.
8951 	 */
8952 	if (!trace_ok_for_array(tracer, tr))
8953 		return;
8954 
8955 	for (i = 0; i < tr->nr_topts; i++) {
8956 		/* Make sure there's no duplicate flags. */
8957 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8958 			return;
8959 	}
8960 
8961 	opts = flags->opts;
8962 
8963 	for (cnt = 0; opts[cnt].name; cnt++)
8964 		;
8965 
8966 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8967 	if (!topts)
8968 		return;
8969 
8970 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8971 			    GFP_KERNEL);
8972 	if (!tr_topts) {
8973 		kfree(topts);
8974 		return;
8975 	}
8976 
8977 	tr->topts = tr_topts;
8978 	tr->topts[tr->nr_topts].tracer = tracer;
8979 	tr->topts[tr->nr_topts].topts = topts;
8980 	tr->nr_topts++;
8981 
8982 	for (cnt = 0; opts[cnt].name; cnt++) {
8983 		create_trace_option_file(tr, &topts[cnt], flags,
8984 					 &opts[cnt]);
8985 		MEM_FAIL(topts[cnt].entry == NULL,
8986 			  "Failed to create trace option: %s",
8987 			  opts[cnt].name);
8988 	}
8989 }
8990 
8991 static struct dentry *
8992 create_trace_option_core_file(struct trace_array *tr,
8993 			      const char *option, long index)
8994 {
8995 	struct dentry *t_options;
8996 
8997 	t_options = trace_options_init_dentry(tr);
8998 	if (!t_options)
8999 		return NULL;
9000 
9001 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9002 				 (void *)&tr->trace_flags_index[index],
9003 				 &trace_options_core_fops);
9004 }
9005 
9006 static void create_trace_options_dir(struct trace_array *tr)
9007 {
9008 	struct dentry *t_options;
9009 	bool top_level = tr == &global_trace;
9010 	int i;
9011 
9012 	t_options = trace_options_init_dentry(tr);
9013 	if (!t_options)
9014 		return;
9015 
9016 	for (i = 0; trace_options[i]; i++) {
9017 		if (top_level ||
9018 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9019 			create_trace_option_core_file(tr, trace_options[i], i);
9020 	}
9021 }
9022 
9023 static ssize_t
9024 rb_simple_read(struct file *filp, char __user *ubuf,
9025 	       size_t cnt, loff_t *ppos)
9026 {
9027 	struct trace_array *tr = filp->private_data;
9028 	char buf[64];
9029 	int r;
9030 
9031 	r = tracer_tracing_is_on(tr);
9032 	r = sprintf(buf, "%d\n", r);
9033 
9034 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9035 }
9036 
9037 static ssize_t
9038 rb_simple_write(struct file *filp, const char __user *ubuf,
9039 		size_t cnt, loff_t *ppos)
9040 {
9041 	struct trace_array *tr = filp->private_data;
9042 	struct trace_buffer *buffer = tr->array_buffer.buffer;
9043 	unsigned long val;
9044 	int ret;
9045 
9046 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9047 	if (ret)
9048 		return ret;
9049 
9050 	if (buffer) {
9051 		mutex_lock(&trace_types_lock);
9052 		if (!!val == tracer_tracing_is_on(tr)) {
9053 			val = 0; /* do nothing */
9054 		} else if (val) {
9055 			tracer_tracing_on(tr);
9056 			if (tr->current_trace->start)
9057 				tr->current_trace->start(tr);
9058 		} else {
9059 			tracer_tracing_off(tr);
9060 			if (tr->current_trace->stop)
9061 				tr->current_trace->stop(tr);
9062 			/* Wake up any waiters */
9063 			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9064 		}
9065 		mutex_unlock(&trace_types_lock);
9066 	}
9067 
9068 	(*ppos)++;
9069 
9070 	return cnt;
9071 }
9072 
9073 static const struct file_operations rb_simple_fops = {
9074 	.open		= tracing_open_generic_tr,
9075 	.read		= rb_simple_read,
9076 	.write		= rb_simple_write,
9077 	.release	= tracing_release_generic_tr,
9078 	.llseek		= default_llseek,
9079 };
9080 
9081 static ssize_t
9082 buffer_percent_read(struct file *filp, char __user *ubuf,
9083 		    size_t cnt, loff_t *ppos)
9084 {
9085 	struct trace_array *tr = filp->private_data;
9086 	char buf[64];
9087 	int r;
9088 
9089 	r = tr->buffer_percent;
9090 	r = sprintf(buf, "%d\n", r);
9091 
9092 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9093 }
9094 
9095 static ssize_t
9096 buffer_percent_write(struct file *filp, const char __user *ubuf,
9097 		     size_t cnt, loff_t *ppos)
9098 {
9099 	struct trace_array *tr = filp->private_data;
9100 	unsigned long val;
9101 	int ret;
9102 
9103 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9104 	if (ret)
9105 		return ret;
9106 
9107 	if (val > 100)
9108 		return -EINVAL;
9109 
9110 	tr->buffer_percent = val;
9111 
9112 	(*ppos)++;
9113 
9114 	return cnt;
9115 }
9116 
9117 static const struct file_operations buffer_percent_fops = {
9118 	.open		= tracing_open_generic_tr,
9119 	.read		= buffer_percent_read,
9120 	.write		= buffer_percent_write,
9121 	.release	= tracing_release_generic_tr,
9122 	.llseek		= default_llseek,
9123 };
9124 
9125 static ssize_t
9126 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
9127 {
9128 	struct trace_array *tr = filp->private_data;
9129 	size_t size;
9130 	char buf[64];
9131 	int order;
9132 	int r;
9133 
9134 	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9135 	size = (PAGE_SIZE << order) / 1024;
9136 
9137 	r = sprintf(buf, "%zd\n", size);
9138 
9139 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9140 }
9141 
9142 static ssize_t
9143 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
9144 			 size_t cnt, loff_t *ppos)
9145 {
9146 	struct trace_array *tr = filp->private_data;
9147 	unsigned long val;
9148 	int old_order;
9149 	int order;
9150 	int pages;
9151 	int ret;
9152 
9153 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9154 	if (ret)
9155 		return ret;
9156 
9157 	val *= 1024; /* value passed in is in KB */
9158 
9159 	pages = DIV_ROUND_UP(val, PAGE_SIZE);
9160 	order = fls(pages - 1);
9161 
9162 	/* limit between 1 and 128 system pages */
9163 	if (order < 0 || order > 7)
9164 		return -EINVAL;
9165 
9166 	/* Do not allow tracing while changing the order of the ring buffer */
9167 	tracing_stop_tr(tr);
9168 
9169 	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
9170 	if (old_order == order)
9171 		goto out;
9172 
9173 	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
9174 	if (ret)
9175 		goto out;
9176 
9177 #ifdef CONFIG_TRACER_MAX_TRACE
9178 
9179 	if (!tr->allocated_snapshot)
9180 		goto out_max;
9181 
9182 	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
9183 	if (ret) {
9184 		/* Put back the old order */
9185 		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
9186 		if (WARN_ON_ONCE(cnt)) {
9187 			/*
9188 			 * AARGH! We are left with different orders!
9189 			 * The max buffer is our "snapshot" buffer.
9190 			 * When a tracer needs a snapshot (one of the
9191 			 * latency tracers), it swaps the max buffer
9192 			 * with the saved snap shot. We succeeded to
9193 			 * update the order of the main buffer, but failed to
9194 			 * update the order of the max buffer. But when we tried
9195 			 * to reset the main buffer to the original size, we
9196 			 * failed there too. This is very unlikely to
9197 			 * happen, but if it does, warn and kill all
9198 			 * tracing.
9199 			 */
9200 			tracing_disabled = 1;
9201 		}
9202 		goto out;
9203 	}
9204  out_max:
9205 #endif
9206 	(*ppos)++;
9207  out:
9208 	if (ret)
9209 		cnt = ret;
9210 	tracing_start_tr(tr);
9211 	return cnt;
9212 }
9213 
9214 static const struct file_operations buffer_subbuf_size_fops = {
9215 	.open		= tracing_open_generic_tr,
9216 	.read		= buffer_subbuf_size_read,
9217 	.write		= buffer_subbuf_size_write,
9218 	.release	= tracing_release_generic_tr,
9219 	.llseek		= default_llseek,
9220 };
9221 
9222 static struct dentry *trace_instance_dir;
9223 
9224 static void
9225 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9226 
9227 static int
9228 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9229 {
9230 	enum ring_buffer_flags rb_flags;
9231 	struct trace_scratch *tscratch;
9232 	unsigned int scratch_size;
9233 
9234 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9235 
9236 	buf->tr = tr;
9237 
9238 	if (tr->range_addr_start && tr->range_addr_size) {
9239 		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
9240 						      tr->range_addr_start,
9241 						      tr->range_addr_size,
9242 						      sizeof(*tscratch));
9243 
9244 		tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
9245 		if (tscratch) {
9246 			tr->scratch = tscratch;
9247 			tr->scratch_size = scratch_size;
9248 
9249 #ifdef CONFIG_RANDOMIZE_BASE
9250 			if (tscratch->kaslr_addr)
9251 				tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
9252 #endif
9253 		}
9254 		/*
9255 		 * This is basically the same as a mapped buffer,
9256 		 * with the same restrictions.
9257 		 */
9258 		tr->mapped++;
9259 	} else {
9260 		buf->buffer = ring_buffer_alloc(size, rb_flags);
9261 	}
9262 	if (!buf->buffer)
9263 		return -ENOMEM;
9264 
9265 	buf->data = alloc_percpu(struct trace_array_cpu);
9266 	if (!buf->data) {
9267 		ring_buffer_free(buf->buffer);
9268 		buf->buffer = NULL;
9269 		return -ENOMEM;
9270 	}
9271 
9272 	/* Allocate the first page for all buffers */
9273 	set_buffer_entries(&tr->array_buffer,
9274 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9275 
9276 	return 0;
9277 }
9278 
9279 static void free_trace_buffer(struct array_buffer *buf)
9280 {
9281 	if (buf->buffer) {
9282 		ring_buffer_free(buf->buffer);
9283 		buf->buffer = NULL;
9284 		free_percpu(buf->data);
9285 		buf->data = NULL;
9286 	}
9287 }
9288 
9289 static int allocate_trace_buffers(struct trace_array *tr, int size)
9290 {
9291 	int ret;
9292 
9293 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9294 	if (ret)
9295 		return ret;
9296 
9297 #ifdef CONFIG_TRACER_MAX_TRACE
9298 	/* Fix mapped buffer trace arrays do not have snapshot buffers */
9299 	if (tr->range_addr_start)
9300 		return 0;
9301 
9302 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9303 				    allocate_snapshot ? size : 1);
9304 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9305 		free_trace_buffer(&tr->array_buffer);
9306 		return -ENOMEM;
9307 	}
9308 	tr->allocated_snapshot = allocate_snapshot;
9309 
9310 	allocate_snapshot = false;
9311 #endif
9312 
9313 	return 0;
9314 }
9315 
9316 static void free_trace_buffers(struct trace_array *tr)
9317 {
9318 	if (!tr)
9319 		return;
9320 
9321 	free_trace_buffer(&tr->array_buffer);
9322 
9323 #ifdef CONFIG_TRACER_MAX_TRACE
9324 	free_trace_buffer(&tr->max_buffer);
9325 #endif
9326 }
9327 
9328 static void init_trace_flags_index(struct trace_array *tr)
9329 {
9330 	int i;
9331 
9332 	/* Used by the trace options files */
9333 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9334 		tr->trace_flags_index[i] = i;
9335 }
9336 
9337 static void __update_tracer_options(struct trace_array *tr)
9338 {
9339 	struct tracer *t;
9340 
9341 	for (t = trace_types; t; t = t->next)
9342 		add_tracer_options(tr, t);
9343 }
9344 
9345 static void update_tracer_options(struct trace_array *tr)
9346 {
9347 	mutex_lock(&trace_types_lock);
9348 	tracer_options_updated = true;
9349 	__update_tracer_options(tr);
9350 	mutex_unlock(&trace_types_lock);
9351 }
9352 
9353 /* Must have trace_types_lock held */
9354 struct trace_array *trace_array_find(const char *instance)
9355 {
9356 	struct trace_array *tr, *found = NULL;
9357 
9358 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9359 		if (tr->name && strcmp(tr->name, instance) == 0) {
9360 			found = tr;
9361 			break;
9362 		}
9363 	}
9364 
9365 	return found;
9366 }
9367 
9368 struct trace_array *trace_array_find_get(const char *instance)
9369 {
9370 	struct trace_array *tr;
9371 
9372 	mutex_lock(&trace_types_lock);
9373 	tr = trace_array_find(instance);
9374 	if (tr)
9375 		tr->ref++;
9376 	mutex_unlock(&trace_types_lock);
9377 
9378 	return tr;
9379 }
9380 
9381 static int trace_array_create_dir(struct trace_array *tr)
9382 {
9383 	int ret;
9384 
9385 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9386 	if (!tr->dir)
9387 		return -EINVAL;
9388 
9389 	ret = event_trace_add_tracer(tr->dir, tr);
9390 	if (ret) {
9391 		tracefs_remove(tr->dir);
9392 		return ret;
9393 	}
9394 
9395 	init_tracer_tracefs(tr, tr->dir);
9396 	__update_tracer_options(tr);
9397 
9398 	return ret;
9399 }
9400 
9401 static struct trace_array *
9402 trace_array_create_systems(const char *name, const char *systems,
9403 			   unsigned long range_addr_start,
9404 			   unsigned long range_addr_size)
9405 {
9406 	struct trace_array *tr;
9407 	int ret;
9408 
9409 	ret = -ENOMEM;
9410 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9411 	if (!tr)
9412 		return ERR_PTR(ret);
9413 
9414 	tr->name = kstrdup(name, GFP_KERNEL);
9415 	if (!tr->name)
9416 		goto out_free_tr;
9417 
9418 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9419 		goto out_free_tr;
9420 
9421 	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9422 		goto out_free_tr;
9423 
9424 	if (systems) {
9425 		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
9426 		if (!tr->system_names)
9427 			goto out_free_tr;
9428 	}
9429 
9430 	/* Only for boot up memory mapped ring buffers */
9431 	tr->range_addr_start = range_addr_start;
9432 	tr->range_addr_size = range_addr_size;
9433 
9434 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9435 
9436 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9437 
9438 	raw_spin_lock_init(&tr->start_lock);
9439 
9440 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9441 #ifdef CONFIG_TRACER_MAX_TRACE
9442 	spin_lock_init(&tr->snapshot_trigger_lock);
9443 #endif
9444 	tr->current_trace = &nop_trace;
9445 
9446 	INIT_LIST_HEAD(&tr->systems);
9447 	INIT_LIST_HEAD(&tr->events);
9448 	INIT_LIST_HEAD(&tr->hist_vars);
9449 	INIT_LIST_HEAD(&tr->err_log);
9450 
9451 #ifdef CONFIG_MODULES
9452 	INIT_LIST_HEAD(&tr->mod_events);
9453 #endif
9454 
9455 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9456 		goto out_free_tr;
9457 
9458 	/* The ring buffer is defaultly expanded */
9459 	trace_set_ring_buffer_expanded(tr);
9460 
9461 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9462 		goto out_free_tr;
9463 
9464 	ftrace_init_trace_array(tr);
9465 
9466 	init_trace_flags_index(tr);
9467 
9468 	if (trace_instance_dir) {
9469 		ret = trace_array_create_dir(tr);
9470 		if (ret)
9471 			goto out_free_tr;
9472 	} else
9473 		__trace_early_add_events(tr);
9474 
9475 	list_add(&tr->list, &ftrace_trace_arrays);
9476 
9477 	tr->ref++;
9478 
9479 	return tr;
9480 
9481  out_free_tr:
9482 	ftrace_free_ftrace_ops(tr);
9483 	free_trace_buffers(tr);
9484 	free_cpumask_var(tr->pipe_cpumask);
9485 	free_cpumask_var(tr->tracing_cpumask);
9486 	kfree_const(tr->system_names);
9487 	kfree(tr->name);
9488 	kfree(tr);
9489 
9490 	return ERR_PTR(ret);
9491 }
9492 
9493 static struct trace_array *trace_array_create(const char *name)
9494 {
9495 	return trace_array_create_systems(name, NULL, 0, 0);
9496 }
9497 
9498 static int instance_mkdir(const char *name)
9499 {
9500 	struct trace_array *tr;
9501 	int ret;
9502 
9503 	guard(mutex)(&event_mutex);
9504 	guard(mutex)(&trace_types_lock);
9505 
9506 	ret = -EEXIST;
9507 	if (trace_array_find(name))
9508 		return -EEXIST;
9509 
9510 	tr = trace_array_create(name);
9511 
9512 	ret = PTR_ERR_OR_ZERO(tr);
9513 
9514 	return ret;
9515 }
9516 
9517 static u64 map_pages(u64 start, u64 size)
9518 {
9519 	struct page **pages;
9520 	phys_addr_t page_start;
9521 	unsigned int page_count;
9522 	unsigned int i;
9523 	void *vaddr;
9524 
9525 	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
9526 
9527 	page_start = start;
9528 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
9529 	if (!pages)
9530 		return 0;
9531 
9532 	for (i = 0; i < page_count; i++) {
9533 		phys_addr_t addr = page_start + i * PAGE_SIZE;
9534 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
9535 	}
9536 	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
9537 	kfree(pages);
9538 
9539 	return (u64)(unsigned long)vaddr;
9540 }
9541 
9542 /**
9543  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9544  * @name: The name of the trace array to be looked up/created.
9545  * @systems: A list of systems to create event directories for (NULL for all)
9546  *
9547  * Returns pointer to trace array with given name.
9548  * NULL, if it cannot be created.
9549  *
9550  * NOTE: This function increments the reference counter associated with the
9551  * trace array returned. This makes sure it cannot be freed while in use.
9552  * Use trace_array_put() once the trace array is no longer needed.
9553  * If the trace_array is to be freed, trace_array_destroy() needs to
9554  * be called after the trace_array_put(), or simply let user space delete
9555  * it from the tracefs instances directory. But until the
9556  * trace_array_put() is called, user space can not delete it.
9557  *
9558  */
9559 struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
9560 {
9561 	struct trace_array *tr;
9562 
9563 	guard(mutex)(&event_mutex);
9564 	guard(mutex)(&trace_types_lock);
9565 
9566 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9567 		if (tr->name && strcmp(tr->name, name) == 0) {
9568 			tr->ref++;
9569 			return tr;
9570 		}
9571 	}
9572 
9573 	tr = trace_array_create_systems(name, systems, 0, 0);
9574 
9575 	if (IS_ERR(tr))
9576 		tr = NULL;
9577 	else
9578 		tr->ref++;
9579 
9580 	return tr;
9581 }
9582 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9583 
9584 static int __remove_instance(struct trace_array *tr)
9585 {
9586 	int i;
9587 
9588 	/* Reference counter for a newly created trace array = 1. */
9589 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9590 		return -EBUSY;
9591 
9592 	list_del(&tr->list);
9593 
9594 	/* Disable all the flags that were enabled coming in */
9595 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9596 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9597 			set_tracer_flag(tr, 1 << i, 0);
9598 	}
9599 
9600 	if (printk_trace == tr)
9601 		update_printk_trace(&global_trace);
9602 
9603 	tracing_set_nop(tr);
9604 	clear_ftrace_function_probes(tr);
9605 	event_trace_del_tracer(tr);
9606 	ftrace_clear_pids(tr);
9607 	ftrace_destroy_function_files(tr);
9608 	tracefs_remove(tr->dir);
9609 	free_percpu(tr->last_func_repeats);
9610 	free_trace_buffers(tr);
9611 	clear_tracing_err_log(tr);
9612 
9613 	for (i = 0; i < tr->nr_topts; i++) {
9614 		kfree(tr->topts[i].topts);
9615 	}
9616 	kfree(tr->topts);
9617 
9618 	free_cpumask_var(tr->pipe_cpumask);
9619 	free_cpumask_var(tr->tracing_cpumask);
9620 	kfree_const(tr->system_names);
9621 	kfree(tr->name);
9622 	kfree(tr);
9623 
9624 	return 0;
9625 }
9626 
9627 int trace_array_destroy(struct trace_array *this_tr)
9628 {
9629 	struct trace_array *tr;
9630 
9631 	if (!this_tr)
9632 		return -EINVAL;
9633 
9634 	guard(mutex)(&event_mutex);
9635 	guard(mutex)(&trace_types_lock);
9636 
9637 
9638 	/* Making sure trace array exists before destroying it. */
9639 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9640 		if (tr == this_tr)
9641 			return __remove_instance(tr);
9642 	}
9643 
9644 	return -ENODEV;
9645 }
9646 EXPORT_SYMBOL_GPL(trace_array_destroy);
9647 
9648 static int instance_rmdir(const char *name)
9649 {
9650 	struct trace_array *tr;
9651 
9652 	guard(mutex)(&event_mutex);
9653 	guard(mutex)(&trace_types_lock);
9654 
9655 	tr = trace_array_find(name);
9656 	if (!tr)
9657 		return -ENODEV;
9658 
9659 	return __remove_instance(tr);
9660 }
9661 
9662 static __init void create_trace_instances(struct dentry *d_tracer)
9663 {
9664 	struct trace_array *tr;
9665 
9666 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9667 							 instance_mkdir,
9668 							 instance_rmdir);
9669 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9670 		return;
9671 
9672 	guard(mutex)(&event_mutex);
9673 	guard(mutex)(&trace_types_lock);
9674 
9675 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9676 		if (!tr->name)
9677 			continue;
9678 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9679 			     "Failed to create instance directory\n"))
9680 			return;
9681 	}
9682 }
9683 
9684 static void
9685 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9686 {
9687 	int cpu;
9688 
9689 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9690 			tr, &show_traces_fops);
9691 
9692 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9693 			tr, &set_tracer_fops);
9694 
9695 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9696 			  tr, &tracing_cpumask_fops);
9697 
9698 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9699 			  tr, &tracing_iter_fops);
9700 
9701 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9702 			  tr, &tracing_fops);
9703 
9704 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9705 			  tr, &tracing_pipe_fops);
9706 
9707 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9708 			  tr, &tracing_entries_fops);
9709 
9710 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9711 			  tr, &tracing_total_entries_fops);
9712 
9713 	trace_create_file("free_buffer", 0200, d_tracer,
9714 			  tr, &tracing_free_buffer_fops);
9715 
9716 	trace_create_file("trace_marker", 0220, d_tracer,
9717 			  tr, &tracing_mark_fops);
9718 
9719 	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9720 
9721 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9722 			  tr, &tracing_mark_raw_fops);
9723 
9724 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9725 			  &trace_clock_fops);
9726 
9727 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9728 			  tr, &rb_simple_fops);
9729 
9730 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9731 			  &trace_time_stamp_mode_fops);
9732 
9733 	tr->buffer_percent = 50;
9734 
9735 	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9736 			tr, &buffer_percent_fops);
9737 
9738 	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
9739 			  tr, &buffer_subbuf_size_fops);
9740 
9741 	create_trace_options_dir(tr);
9742 
9743 #ifdef CONFIG_TRACER_MAX_TRACE
9744 	trace_create_maxlat_file(tr, d_tracer);
9745 #endif
9746 
9747 	if (ftrace_create_function_files(tr, d_tracer))
9748 		MEM_FAIL(1, "Could not allocate function filter files");
9749 
9750 	if (tr->range_addr_start) {
9751 		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
9752 				  tr, &last_boot_fops);
9753 #ifdef CONFIG_TRACER_SNAPSHOT
9754 	} else {
9755 		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9756 				  tr, &snapshot_fops);
9757 #endif
9758 	}
9759 
9760 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9761 			  tr, &tracing_err_log_fops);
9762 
9763 	for_each_tracing_cpu(cpu)
9764 		tracing_init_tracefs_percpu(tr, cpu);
9765 
9766 	ftrace_init_tracefs(tr, d_tracer);
9767 }
9768 
9769 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9770 {
9771 	struct vfsmount *mnt;
9772 	struct file_system_type *type;
9773 
9774 	/*
9775 	 * To maintain backward compatibility for tools that mount
9776 	 * debugfs to get to the tracing facility, tracefs is automatically
9777 	 * mounted to the debugfs/tracing directory.
9778 	 */
9779 	type = get_fs_type("tracefs");
9780 	if (!type)
9781 		return NULL;
9782 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9783 	put_filesystem(type);
9784 	if (IS_ERR(mnt))
9785 		return NULL;
9786 	mntget(mnt);
9787 
9788 	return mnt;
9789 }
9790 
9791 /**
9792  * tracing_init_dentry - initialize top level trace array
9793  *
9794  * This is called when creating files or directories in the tracing
9795  * directory. It is called via fs_initcall() by any of the boot up code
9796  * and expects to return the dentry of the top level tracing directory.
9797  */
9798 int tracing_init_dentry(void)
9799 {
9800 	struct trace_array *tr = &global_trace;
9801 
9802 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9803 		pr_warn("Tracing disabled due to lockdown\n");
9804 		return -EPERM;
9805 	}
9806 
9807 	/* The top level trace array uses  NULL as parent */
9808 	if (tr->dir)
9809 		return 0;
9810 
9811 	if (WARN_ON(!tracefs_initialized()))
9812 		return -ENODEV;
9813 
9814 	/*
9815 	 * As there may still be users that expect the tracing
9816 	 * files to exist in debugfs/tracing, we must automount
9817 	 * the tracefs file system there, so older tools still
9818 	 * work with the newer kernel.
9819 	 */
9820 	tr->dir = debugfs_create_automount("tracing", NULL,
9821 					   trace_automount, NULL);
9822 
9823 	return 0;
9824 }
9825 
9826 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9827 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9828 
9829 static struct workqueue_struct *eval_map_wq __initdata;
9830 static struct work_struct eval_map_work __initdata;
9831 static struct work_struct tracerfs_init_work __initdata;
9832 
9833 static void __init eval_map_work_func(struct work_struct *work)
9834 {
9835 	int len;
9836 
9837 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9838 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9839 }
9840 
9841 static int __init trace_eval_init(void)
9842 {
9843 	INIT_WORK(&eval_map_work, eval_map_work_func);
9844 
9845 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9846 	if (!eval_map_wq) {
9847 		pr_err("Unable to allocate eval_map_wq\n");
9848 		/* Do work here */
9849 		eval_map_work_func(&eval_map_work);
9850 		return -ENOMEM;
9851 	}
9852 
9853 	queue_work(eval_map_wq, &eval_map_work);
9854 	return 0;
9855 }
9856 
9857 subsys_initcall(trace_eval_init);
9858 
9859 static int __init trace_eval_sync(void)
9860 {
9861 	/* Make sure the eval map updates are finished */
9862 	if (eval_map_wq)
9863 		destroy_workqueue(eval_map_wq);
9864 	return 0;
9865 }
9866 
9867 late_initcall_sync(trace_eval_sync);
9868 
9869 
9870 #ifdef CONFIG_MODULES
9871 
9872 bool module_exists(const char *module)
9873 {
9874 	/* All modules have the symbol __this_module */
9875 	static const char this_mod[] = "__this_module";
9876 	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
9877 	unsigned long val;
9878 	int n;
9879 
9880 	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
9881 
9882 	if (n > sizeof(modname) - 1)
9883 		return false;
9884 
9885 	val = module_kallsyms_lookup_name(modname);
9886 	return val != 0;
9887 }
9888 
9889 static void trace_module_add_evals(struct module *mod)
9890 {
9891 	if (!mod->num_trace_evals)
9892 		return;
9893 
9894 	/*
9895 	 * Modules with bad taint do not have events created, do
9896 	 * not bother with enums either.
9897 	 */
9898 	if (trace_module_has_bad_taint(mod))
9899 		return;
9900 
9901 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9902 }
9903 
9904 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9905 static void trace_module_remove_evals(struct module *mod)
9906 {
9907 	union trace_eval_map_item *map;
9908 	union trace_eval_map_item **last = &trace_eval_maps;
9909 
9910 	if (!mod->num_trace_evals)
9911 		return;
9912 
9913 	guard(mutex)(&trace_eval_mutex);
9914 
9915 	map = trace_eval_maps;
9916 
9917 	while (map) {
9918 		if (map->head.mod == mod)
9919 			break;
9920 		map = trace_eval_jmp_to_tail(map);
9921 		last = &map->tail.next;
9922 		map = map->tail.next;
9923 	}
9924 	if (!map)
9925 		return;
9926 
9927 	*last = trace_eval_jmp_to_tail(map)->tail.next;
9928 	kfree(map);
9929 }
9930 #else
9931 static inline void trace_module_remove_evals(struct module *mod) { }
9932 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9933 
9934 static int trace_module_notify(struct notifier_block *self,
9935 			       unsigned long val, void *data)
9936 {
9937 	struct module *mod = data;
9938 
9939 	switch (val) {
9940 	case MODULE_STATE_COMING:
9941 		trace_module_add_evals(mod);
9942 		break;
9943 	case MODULE_STATE_GOING:
9944 		trace_module_remove_evals(mod);
9945 		break;
9946 	}
9947 
9948 	return NOTIFY_OK;
9949 }
9950 
9951 static struct notifier_block trace_module_nb = {
9952 	.notifier_call = trace_module_notify,
9953 	.priority = 0,
9954 };
9955 #endif /* CONFIG_MODULES */
9956 
9957 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9958 {
9959 
9960 	event_trace_init();
9961 
9962 	init_tracer_tracefs(&global_trace, NULL);
9963 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
9964 
9965 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9966 			&global_trace, &tracing_thresh_fops);
9967 
9968 	trace_create_file("README", TRACE_MODE_READ, NULL,
9969 			NULL, &tracing_readme_fops);
9970 
9971 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9972 			NULL, &tracing_saved_cmdlines_fops);
9973 
9974 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9975 			  NULL, &tracing_saved_cmdlines_size_fops);
9976 
9977 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9978 			NULL, &tracing_saved_tgids_fops);
9979 
9980 	trace_create_eval_file(NULL);
9981 
9982 #ifdef CONFIG_MODULES
9983 	register_module_notifier(&trace_module_nb);
9984 #endif
9985 
9986 #ifdef CONFIG_DYNAMIC_FTRACE
9987 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9988 			NULL, &tracing_dyn_info_fops);
9989 #endif
9990 
9991 	create_trace_instances(NULL);
9992 
9993 	update_tracer_options(&global_trace);
9994 }
9995 
9996 static __init int tracer_init_tracefs(void)
9997 {
9998 	int ret;
9999 
10000 	trace_access_lock_init();
10001 
10002 	ret = tracing_init_dentry();
10003 	if (ret)
10004 		return 0;
10005 
10006 	if (eval_map_wq) {
10007 		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10008 		queue_work(eval_map_wq, &tracerfs_init_work);
10009 	} else {
10010 		tracer_init_tracefs_work_func(NULL);
10011 	}
10012 
10013 	rv_init_interface();
10014 
10015 	return 0;
10016 }
10017 
10018 fs_initcall(tracer_init_tracefs);
10019 
10020 static int trace_die_panic_handler(struct notifier_block *self,
10021 				unsigned long ev, void *unused);
10022 
10023 static struct notifier_block trace_panic_notifier = {
10024 	.notifier_call = trace_die_panic_handler,
10025 	.priority = INT_MAX - 1,
10026 };
10027 
10028 static struct notifier_block trace_die_notifier = {
10029 	.notifier_call = trace_die_panic_handler,
10030 	.priority = INT_MAX - 1,
10031 };
10032 
10033 /*
10034  * The idea is to execute the following die/panic callback early, in order
10035  * to avoid showing irrelevant information in the trace (like other panic
10036  * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10037  * warnings get disabled (to prevent potential log flooding).
10038  */
10039 static int trace_die_panic_handler(struct notifier_block *self,
10040 				unsigned long ev, void *unused)
10041 {
10042 	if (!ftrace_dump_on_oops_enabled())
10043 		return NOTIFY_DONE;
10044 
10045 	/* The die notifier requires DIE_OOPS to trigger */
10046 	if (self == &trace_die_notifier && ev != DIE_OOPS)
10047 		return NOTIFY_DONE;
10048 
10049 	ftrace_dump(DUMP_PARAM);
10050 
10051 	return NOTIFY_DONE;
10052 }
10053 
10054 /*
10055  * printk is set to max of 1024, we really don't need it that big.
10056  * Nothing should be printing 1000 characters anyway.
10057  */
10058 #define TRACE_MAX_PRINT		1000
10059 
10060 /*
10061  * Define here KERN_TRACE so that we have one place to modify
10062  * it if we decide to change what log level the ftrace dump
10063  * should be at.
10064  */
10065 #define KERN_TRACE		KERN_EMERG
10066 
10067 void
10068 trace_printk_seq(struct trace_seq *s)
10069 {
10070 	/* Probably should print a warning here. */
10071 	if (s->seq.len >= TRACE_MAX_PRINT)
10072 		s->seq.len = TRACE_MAX_PRINT;
10073 
10074 	/*
10075 	 * More paranoid code. Although the buffer size is set to
10076 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10077 	 * an extra layer of protection.
10078 	 */
10079 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10080 		s->seq.len = s->seq.size - 1;
10081 
10082 	/* should be zero ended, but we are paranoid. */
10083 	s->buffer[s->seq.len] = 0;
10084 
10085 	printk(KERN_TRACE "%s", s->buffer);
10086 
10087 	trace_seq_init(s);
10088 }
10089 
10090 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10091 {
10092 	iter->tr = tr;
10093 	iter->trace = iter->tr->current_trace;
10094 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10095 	iter->array_buffer = &tr->array_buffer;
10096 
10097 	if (iter->trace && iter->trace->open)
10098 		iter->trace->open(iter);
10099 
10100 	/* Annotate start of buffers if we had overruns */
10101 	if (ring_buffer_overruns(iter->array_buffer->buffer))
10102 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10103 
10104 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10105 	if (trace_clocks[iter->tr->clock_id].in_ns)
10106 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10107 
10108 	/* Can not use kmalloc for iter.temp and iter.fmt */
10109 	iter->temp = static_temp_buf;
10110 	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10111 	iter->fmt = static_fmt_buf;
10112 	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10113 }
10114 
10115 void trace_init_global_iter(struct trace_iterator *iter)
10116 {
10117 	trace_init_iter(iter, &global_trace);
10118 }
10119 
10120 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
10121 {
10122 	/* use static because iter can be a bit big for the stack */
10123 	static struct trace_iterator iter;
10124 	unsigned int old_userobj;
10125 	unsigned long flags;
10126 	int cnt = 0, cpu;
10127 
10128 	/*
10129 	 * Always turn off tracing when we dump.
10130 	 * We don't need to show trace output of what happens
10131 	 * between multiple crashes.
10132 	 *
10133 	 * If the user does a sysrq-z, then they can re-enable
10134 	 * tracing with echo 1 > tracing_on.
10135 	 */
10136 	tracer_tracing_off(tr);
10137 
10138 	local_irq_save(flags);
10139 
10140 	/* Simulate the iterator */
10141 	trace_init_iter(&iter, tr);
10142 
10143 	for_each_tracing_cpu(cpu) {
10144 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10145 	}
10146 
10147 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10148 
10149 	/* don't look at user memory in panic mode */
10150 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10151 
10152 	if (dump_mode == DUMP_ORIG)
10153 		iter.cpu_file = raw_smp_processor_id();
10154 	else
10155 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10156 
10157 	if (tr == &global_trace)
10158 		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10159 	else
10160 		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10161 
10162 	/* Did function tracer already get disabled? */
10163 	if (ftrace_is_dead()) {
10164 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10165 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10166 	}
10167 
10168 	/*
10169 	 * We need to stop all tracing on all CPUS to read
10170 	 * the next buffer. This is a bit expensive, but is
10171 	 * not done often. We fill all what we can read,
10172 	 * and then release the locks again.
10173 	 */
10174 
10175 	while (!trace_empty(&iter)) {
10176 
10177 		if (!cnt)
10178 			printk(KERN_TRACE "---------------------------------\n");
10179 
10180 		cnt++;
10181 
10182 		trace_iterator_reset(&iter);
10183 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10184 
10185 		if (trace_find_next_entry_inc(&iter) != NULL) {
10186 			int ret;
10187 
10188 			ret = print_trace_line(&iter);
10189 			if (ret != TRACE_TYPE_NO_CONSUME)
10190 				trace_consume(&iter);
10191 		}
10192 		touch_nmi_watchdog();
10193 
10194 		trace_printk_seq(&iter.seq);
10195 	}
10196 
10197 	if (!cnt)
10198 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10199 	else
10200 		printk(KERN_TRACE "---------------------------------\n");
10201 
10202 	tr->trace_flags |= old_userobj;
10203 
10204 	for_each_tracing_cpu(cpu) {
10205 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10206 	}
10207 	local_irq_restore(flags);
10208 }
10209 
10210 static void ftrace_dump_by_param(void)
10211 {
10212 	bool first_param = true;
10213 	char dump_param[MAX_TRACER_SIZE];
10214 	char *buf, *token, *inst_name;
10215 	struct trace_array *tr;
10216 
10217 	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10218 	buf = dump_param;
10219 
10220 	while ((token = strsep(&buf, ",")) != NULL) {
10221 		if (first_param) {
10222 			first_param = false;
10223 			if (!strcmp("0", token))
10224 				continue;
10225 			else if (!strcmp("1", token)) {
10226 				ftrace_dump_one(&global_trace, DUMP_ALL);
10227 				continue;
10228 			}
10229 			else if (!strcmp("2", token) ||
10230 			  !strcmp("orig_cpu", token)) {
10231 				ftrace_dump_one(&global_trace, DUMP_ORIG);
10232 				continue;
10233 			}
10234 		}
10235 
10236 		inst_name = strsep(&token, "=");
10237 		tr = trace_array_find(inst_name);
10238 		if (!tr) {
10239 			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10240 			continue;
10241 		}
10242 
10243 		if (token && (!strcmp("2", token) ||
10244 			  !strcmp("orig_cpu", token)))
10245 			ftrace_dump_one(tr, DUMP_ORIG);
10246 		else
10247 			ftrace_dump_one(tr, DUMP_ALL);
10248 	}
10249 }
10250 
10251 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10252 {
10253 	static atomic_t dump_running;
10254 
10255 	/* Only allow one dump user at a time. */
10256 	if (atomic_inc_return(&dump_running) != 1) {
10257 		atomic_dec(&dump_running);
10258 		return;
10259 	}
10260 
10261 	switch (oops_dump_mode) {
10262 	case DUMP_ALL:
10263 		ftrace_dump_one(&global_trace, DUMP_ALL);
10264 		break;
10265 	case DUMP_ORIG:
10266 		ftrace_dump_one(&global_trace, DUMP_ORIG);
10267 		break;
10268 	case DUMP_PARAM:
10269 		ftrace_dump_by_param();
10270 		break;
10271 	case DUMP_NONE:
10272 		break;
10273 	default:
10274 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10275 		ftrace_dump_one(&global_trace, DUMP_ALL);
10276 	}
10277 
10278 	atomic_dec(&dump_running);
10279 }
10280 EXPORT_SYMBOL_GPL(ftrace_dump);
10281 
10282 #define WRITE_BUFSIZE  4096
10283 
10284 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10285 				size_t count, loff_t *ppos,
10286 				int (*createfn)(const char *))
10287 {
10288 	char *kbuf, *buf, *tmp;
10289 	int ret = 0;
10290 	size_t done = 0;
10291 	size_t size;
10292 
10293 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10294 	if (!kbuf)
10295 		return -ENOMEM;
10296 
10297 	while (done < count) {
10298 		size = count - done;
10299 
10300 		if (size >= WRITE_BUFSIZE)
10301 			size = WRITE_BUFSIZE - 1;
10302 
10303 		if (copy_from_user(kbuf, buffer + done, size)) {
10304 			ret = -EFAULT;
10305 			goto out;
10306 		}
10307 		kbuf[size] = '\0';
10308 		buf = kbuf;
10309 		do {
10310 			tmp = strchr(buf, '\n');
10311 			if (tmp) {
10312 				*tmp = '\0';
10313 				size = tmp - buf + 1;
10314 			} else {
10315 				size = strlen(buf);
10316 				if (done + size < count) {
10317 					if (buf != kbuf)
10318 						break;
10319 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10320 					pr_warn("Line length is too long: Should be less than %d\n",
10321 						WRITE_BUFSIZE - 2);
10322 					ret = -EINVAL;
10323 					goto out;
10324 				}
10325 			}
10326 			done += size;
10327 
10328 			/* Remove comments */
10329 			tmp = strchr(buf, '#');
10330 
10331 			if (tmp)
10332 				*tmp = '\0';
10333 
10334 			ret = createfn(buf);
10335 			if (ret)
10336 				goto out;
10337 			buf += size;
10338 
10339 		} while (done < count);
10340 	}
10341 	ret = done;
10342 
10343 out:
10344 	kfree(kbuf);
10345 
10346 	return ret;
10347 }
10348 
10349 #ifdef CONFIG_TRACER_MAX_TRACE
10350 __init static bool tr_needs_alloc_snapshot(const char *name)
10351 {
10352 	char *test;
10353 	int len = strlen(name);
10354 	bool ret;
10355 
10356 	if (!boot_snapshot_index)
10357 		return false;
10358 
10359 	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10360 	    boot_snapshot_info[len] == '\t')
10361 		return true;
10362 
10363 	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10364 	if (!test)
10365 		return false;
10366 
10367 	sprintf(test, "\t%s\t", name);
10368 	ret = strstr(boot_snapshot_info, test) == NULL;
10369 	kfree(test);
10370 	return ret;
10371 }
10372 
10373 __init static void do_allocate_snapshot(const char *name)
10374 {
10375 	if (!tr_needs_alloc_snapshot(name))
10376 		return;
10377 
10378 	/*
10379 	 * When allocate_snapshot is set, the next call to
10380 	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10381 	 * will allocate the snapshot buffer. That will alse clear
10382 	 * this flag.
10383 	 */
10384 	allocate_snapshot = true;
10385 }
10386 #else
10387 static inline void do_allocate_snapshot(const char *name) { }
10388 #endif
10389 
10390 __init static void enable_instances(void)
10391 {
10392 	struct trace_array *tr;
10393 	char *curr_str;
10394 	char *name;
10395 	char *str;
10396 	char *tok;
10397 
10398 	/* A tab is always appended */
10399 	boot_instance_info[boot_instance_index - 1] = '\0';
10400 	str = boot_instance_info;
10401 
10402 	while ((curr_str = strsep(&str, "\t"))) {
10403 		phys_addr_t start = 0;
10404 		phys_addr_t size = 0;
10405 		unsigned long addr = 0;
10406 		bool traceprintk = false;
10407 		bool traceoff = false;
10408 		char *flag_delim;
10409 		char *addr_delim;
10410 
10411 		tok = strsep(&curr_str, ",");
10412 
10413 		flag_delim = strchr(tok, '^');
10414 		addr_delim = strchr(tok, '@');
10415 
10416 		if (addr_delim)
10417 			*addr_delim++ = '\0';
10418 
10419 		if (flag_delim)
10420 			*flag_delim++ = '\0';
10421 
10422 		name = tok;
10423 
10424 		if (flag_delim) {
10425 			char *flag;
10426 
10427 			while ((flag = strsep(&flag_delim, "^"))) {
10428 				if (strcmp(flag, "traceoff") == 0) {
10429 					traceoff = true;
10430 				} else if ((strcmp(flag, "printk") == 0) ||
10431 					   (strcmp(flag, "traceprintk") == 0) ||
10432 					   (strcmp(flag, "trace_printk") == 0)) {
10433 					traceprintk = true;
10434 				} else {
10435 					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10436 						flag, name);
10437 				}
10438 			}
10439 		}
10440 
10441 		tok = addr_delim;
10442 		if (tok && isdigit(*tok)) {
10443 			start = memparse(tok, &tok);
10444 			if (!start) {
10445 				pr_warn("Tracing: Invalid boot instance address for %s\n",
10446 					name);
10447 				continue;
10448 			}
10449 			if (*tok != ':') {
10450 				pr_warn("Tracing: No size specified for instance %s\n", name);
10451 				continue;
10452 			}
10453 			tok++;
10454 			size = memparse(tok, &tok);
10455 			if (!size) {
10456 				pr_warn("Tracing: Invalid boot instance size for %s\n",
10457 					name);
10458 				continue;
10459 			}
10460 		} else if (tok) {
10461 			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10462 				start = 0;
10463 				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10464 				continue;
10465 			}
10466 		}
10467 
10468 		if (start) {
10469 			addr = map_pages(start, size);
10470 			if (addr) {
10471 				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10472 					name, &start, (unsigned long)size);
10473 			} else {
10474 				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10475 				continue;
10476 			}
10477 		} else {
10478 			/* Only non mapped buffers have snapshot buffers */
10479 			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10480 				do_allocate_snapshot(name);
10481 		}
10482 
10483 		tr = trace_array_create_systems(name, NULL, addr, size);
10484 		if (IS_ERR(tr)) {
10485 			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10486 			continue;
10487 		}
10488 
10489 		if (traceoff)
10490 			tracer_tracing_off(tr);
10491 
10492 		if (traceprintk)
10493 			update_printk_trace(tr);
10494 
10495 		/*
10496 		 * If start is set, then this is a mapped buffer, and
10497 		 * cannot be deleted by user space, so keep the reference
10498 		 * to it.
10499 		 */
10500 		if (start) {
10501 			tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
10502 			tr->ref++;
10503 		}
10504 
10505 		while ((tok = strsep(&curr_str, ","))) {
10506 			early_enable_events(tr, tok, true);
10507 		}
10508 	}
10509 }
10510 
10511 __init static int tracer_alloc_buffers(void)
10512 {
10513 	int ring_buf_size;
10514 	int ret = -ENOMEM;
10515 
10516 
10517 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10518 		pr_warn("Tracing disabled due to lockdown\n");
10519 		return -EPERM;
10520 	}
10521 
10522 	/*
10523 	 * Make sure we don't accidentally add more trace options
10524 	 * than we have bits for.
10525 	 */
10526 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10527 
10528 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10529 		goto out;
10530 
10531 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10532 		goto out_free_buffer_mask;
10533 
10534 	/* Only allocate trace_printk buffers if a trace_printk exists */
10535 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10536 		/* Must be called before global_trace.buffer is allocated */
10537 		trace_printk_init_buffers();
10538 
10539 	/* To save memory, keep the ring buffer size to its minimum */
10540 	if (global_trace.ring_buffer_expanded)
10541 		ring_buf_size = trace_buf_size;
10542 	else
10543 		ring_buf_size = 1;
10544 
10545 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10546 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10547 
10548 	raw_spin_lock_init(&global_trace.start_lock);
10549 
10550 	/*
10551 	 * The prepare callbacks allocates some memory for the ring buffer. We
10552 	 * don't free the buffer if the CPU goes down. If we were to free
10553 	 * the buffer, then the user would lose any trace that was in the
10554 	 * buffer. The memory will be removed once the "instance" is removed.
10555 	 */
10556 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10557 				      "trace/RB:prepare", trace_rb_cpu_prepare,
10558 				      NULL);
10559 	if (ret < 0)
10560 		goto out_free_cpumask;
10561 	/* Used for event triggers */
10562 	ret = -ENOMEM;
10563 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10564 	if (!temp_buffer)
10565 		goto out_rm_hp_state;
10566 
10567 	if (trace_create_savedcmd() < 0)
10568 		goto out_free_temp_buffer;
10569 
10570 	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10571 		goto out_free_savedcmd;
10572 
10573 	/* TODO: make the number of buffers hot pluggable with CPUS */
10574 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10575 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10576 		goto out_free_pipe_cpumask;
10577 	}
10578 	if (global_trace.buffer_disabled)
10579 		tracing_off();
10580 
10581 	if (trace_boot_clock) {
10582 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10583 		if (ret < 0)
10584 			pr_warn("Trace clock %s not defined, going back to default\n",
10585 				trace_boot_clock);
10586 	}
10587 
10588 	/*
10589 	 * register_tracer() might reference current_trace, so it
10590 	 * needs to be set before we register anything. This is
10591 	 * just a bootstrap of current_trace anyway.
10592 	 */
10593 	global_trace.current_trace = &nop_trace;
10594 
10595 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10596 #ifdef CONFIG_TRACER_MAX_TRACE
10597 	spin_lock_init(&global_trace.snapshot_trigger_lock);
10598 #endif
10599 	ftrace_init_global_array_ops(&global_trace);
10600 
10601 #ifdef CONFIG_MODULES
10602 	INIT_LIST_HEAD(&global_trace.mod_events);
10603 #endif
10604 
10605 	init_trace_flags_index(&global_trace);
10606 
10607 	register_tracer(&nop_trace);
10608 
10609 	/* Function tracing may start here (via kernel command line) */
10610 	init_function_trace();
10611 
10612 	/* All seems OK, enable tracing */
10613 	tracing_disabled = 0;
10614 
10615 	atomic_notifier_chain_register(&panic_notifier_list,
10616 				       &trace_panic_notifier);
10617 
10618 	register_die_notifier(&trace_die_notifier);
10619 
10620 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10621 
10622 	INIT_LIST_HEAD(&global_trace.systems);
10623 	INIT_LIST_HEAD(&global_trace.events);
10624 	INIT_LIST_HEAD(&global_trace.hist_vars);
10625 	INIT_LIST_HEAD(&global_trace.err_log);
10626 	list_add(&global_trace.list, &ftrace_trace_arrays);
10627 
10628 	apply_trace_boot_options();
10629 
10630 	register_snapshot_cmd();
10631 
10632 	return 0;
10633 
10634 out_free_pipe_cpumask:
10635 	free_cpumask_var(global_trace.pipe_cpumask);
10636 out_free_savedcmd:
10637 	trace_free_saved_cmdlines_buffer();
10638 out_free_temp_buffer:
10639 	ring_buffer_free(temp_buffer);
10640 out_rm_hp_state:
10641 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10642 out_free_cpumask:
10643 	free_cpumask_var(global_trace.tracing_cpumask);
10644 out_free_buffer_mask:
10645 	free_cpumask_var(tracing_buffer_mask);
10646 out:
10647 	return ret;
10648 }
10649 
10650 #ifdef CONFIG_FUNCTION_TRACER
10651 /* Used to set module cached ftrace filtering at boot up */
10652 __init struct trace_array *trace_get_global_array(void)
10653 {
10654 	return &global_trace;
10655 }
10656 #endif
10657 
10658 void __init ftrace_boot_snapshot(void)
10659 {
10660 #ifdef CONFIG_TRACER_MAX_TRACE
10661 	struct trace_array *tr;
10662 
10663 	if (!snapshot_at_boot)
10664 		return;
10665 
10666 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10667 		if (!tr->allocated_snapshot)
10668 			continue;
10669 
10670 		tracing_snapshot_instance(tr);
10671 		trace_array_puts(tr, "** Boot snapshot taken **\n");
10672 	}
10673 #endif
10674 }
10675 
10676 void __init early_trace_init(void)
10677 {
10678 	if (tracepoint_printk) {
10679 		tracepoint_print_iter =
10680 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10681 		if (MEM_FAIL(!tracepoint_print_iter,
10682 			     "Failed to allocate trace iterator\n"))
10683 			tracepoint_printk = 0;
10684 		else
10685 			static_key_enable(&tracepoint_printk_key.key);
10686 	}
10687 	tracer_alloc_buffers();
10688 
10689 	init_events();
10690 }
10691 
10692 void __init trace_init(void)
10693 {
10694 	trace_event_init();
10695 
10696 	if (boot_instance_index)
10697 		enable_instances();
10698 }
10699 
10700 __init static void clear_boot_tracer(void)
10701 {
10702 	/*
10703 	 * The default tracer at boot buffer is an init section.
10704 	 * This function is called in lateinit. If we did not
10705 	 * find the boot tracer, then clear it out, to prevent
10706 	 * later registration from accessing the buffer that is
10707 	 * about to be freed.
10708 	 */
10709 	if (!default_bootup_tracer)
10710 		return;
10711 
10712 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10713 	       default_bootup_tracer);
10714 	default_bootup_tracer = NULL;
10715 }
10716 
10717 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10718 __init static void tracing_set_default_clock(void)
10719 {
10720 	/* sched_clock_stable() is determined in late_initcall */
10721 	if (!trace_boot_clock && !sched_clock_stable()) {
10722 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10723 			pr_warn("Can not set tracing clock due to lockdown\n");
10724 			return;
10725 		}
10726 
10727 		printk(KERN_WARNING
10728 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10729 		       "If you want to keep using the local clock, then add:\n"
10730 		       "  \"trace_clock=local\"\n"
10731 		       "on the kernel command line\n");
10732 		tracing_set_clock(&global_trace, "global");
10733 	}
10734 }
10735 #else
10736 static inline void tracing_set_default_clock(void) { }
10737 #endif
10738 
10739 __init static int late_trace_init(void)
10740 {
10741 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10742 		static_key_disable(&tracepoint_printk_key.key);
10743 		tracepoint_printk = 0;
10744 	}
10745 
10746 	tracing_set_default_clock();
10747 	clear_boot_tracer();
10748 	return 0;
10749 }
10750 
10751 late_initcall_sync(late_trace_init);
10752