1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <[email protected]>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 	/* Display function return value ? */
63 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 	/* Display function return value in hexadecimal format ? */
65 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 	/* Include sleep time (scheduled out) between entry and return */
68 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69 
70 #ifdef CONFIG_FUNCTION_PROFILER
71 	/* Include time within nested functions */
72 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74 
75 	{ } /* Empty entry */
76 };
77 
78 static struct tracer_flags tracer_flags = {
79 	/* Don't display overruns, proc, or tail by default */
80 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 	.opts = trace_opts
84 };
85 
86 /*
87  * DURATION column is being also used to display IRQ signs,
88  * following values are used by print_graph_irq and others
89  * to fill in space into DURATION column.
90  */
91 enum {
92 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
93 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 };
96 
97 static void
98 print_graph_duration(struct trace_array *tr, unsigned long long duration,
99 		     struct trace_seq *s, u32 flags);
100 
101 int __trace_graph_entry(struct trace_array *tr,
102 				struct ftrace_graph_ent *trace,
103 				unsigned int trace_ctx)
104 {
105 	struct trace_event_call *call = &event_funcgraph_entry;
106 	struct ring_buffer_event *event;
107 	struct trace_buffer *buffer = tr->array_buffer.buffer;
108 	struct ftrace_graph_ent_entry *entry;
109 
110 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
111 					  sizeof(*entry), trace_ctx);
112 	if (!event)
113 		return 0;
114 	entry	= ring_buffer_event_data(event);
115 	entry->graph_ent			= *trace;
116 	if (!call_filter_check_discard(call, entry, buffer, event))
117 		trace_buffer_unlock_commit_nostack(buffer, event);
118 
119 	return 1;
120 }
121 
122 static inline int ftrace_graph_ignore_irqs(void)
123 {
124 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
125 		return 0;
126 
127 	return in_hardirq();
128 }
129 
130 struct fgraph_times {
131 	unsigned long long		calltime;
132 	unsigned long long		sleeptime; /* may be optional! */
133 };
134 
135 int trace_graph_entry(struct ftrace_graph_ent *trace,
136 		      struct fgraph_ops *gops)
137 {
138 	unsigned long *task_var = fgraph_get_task_var(gops);
139 	struct trace_array *tr = gops->private;
140 	struct trace_array_cpu *data;
141 	struct fgraph_times *ftimes;
142 	unsigned long flags;
143 	unsigned int trace_ctx;
144 	long disabled;
145 	int ret;
146 	int cpu;
147 
148 	if (*task_var & TRACE_GRAPH_NOTRACE)
149 		return 0;
150 
151 	/*
152 	 * Do not trace a function if it's filtered by set_graph_notrace.
153 	 * Make the index of ret stack negative to indicate that it should
154 	 * ignore further functions.  But it needs its own ret stack entry
155 	 * to recover the original index in order to continue tracing after
156 	 * returning from the function.
157 	 */
158 	if (ftrace_graph_notrace_addr(trace->func)) {
159 		*task_var |= TRACE_GRAPH_NOTRACE_BIT;
160 		/*
161 		 * Need to return 1 to have the return called
162 		 * that will clear the NOTRACE bit.
163 		 */
164 		return 1;
165 	}
166 
167 	if (!ftrace_trace_task(tr))
168 		return 0;
169 
170 	if (ftrace_graph_ignore_func(gops, trace))
171 		return 0;
172 
173 	if (ftrace_graph_ignore_irqs())
174 		return 0;
175 
176 	if (fgraph_sleep_time) {
177 		/* Only need to record the calltime */
178 		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
179 	} else {
180 		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
181 		if (ftimes)
182 			ftimes->sleeptime = current->ftrace_sleeptime;
183 	}
184 	if (!ftimes)
185 		return 0;
186 
187 	ftimes->calltime = trace_clock_local();
188 
189 	/*
190 	 * Stop here if tracing_threshold is set. We only write function return
191 	 * events to the ring buffer.
192 	 */
193 	if (tracing_thresh)
194 		return 1;
195 
196 	local_irq_save(flags);
197 	cpu = raw_smp_processor_id();
198 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
199 	disabled = atomic_inc_return(&data->disabled);
200 	if (likely(disabled == 1)) {
201 		trace_ctx = tracing_gen_ctx_flags(flags);
202 		ret = __trace_graph_entry(tr, trace, trace_ctx);
203 	} else {
204 		ret = 0;
205 	}
206 
207 	atomic_dec(&data->disabled);
208 	local_irq_restore(flags);
209 
210 	return ret;
211 }
212 
213 static void
214 __trace_graph_function(struct trace_array *tr,
215 		unsigned long ip, unsigned int trace_ctx)
216 {
217 	u64 time = trace_clock_local();
218 	struct ftrace_graph_ent ent = {
219 		.func  = ip,
220 		.depth = 0,
221 	};
222 	struct ftrace_graph_ret ret = {
223 		.func     = ip,
224 		.depth    = 0,
225 		.calltime = time,
226 		.rettime  = time,
227 	};
228 
229 	__trace_graph_entry(tr, &ent, trace_ctx);
230 	__trace_graph_return(tr, &ret, trace_ctx);
231 }
232 
233 void
234 trace_graph_function(struct trace_array *tr,
235 		unsigned long ip, unsigned long parent_ip,
236 		unsigned int trace_ctx)
237 {
238 	__trace_graph_function(tr, ip, trace_ctx);
239 }
240 
241 void __trace_graph_return(struct trace_array *tr,
242 				struct ftrace_graph_ret *trace,
243 				unsigned int trace_ctx)
244 {
245 	struct trace_event_call *call = &event_funcgraph_exit;
246 	struct ring_buffer_event *event;
247 	struct trace_buffer *buffer = tr->array_buffer.buffer;
248 	struct ftrace_graph_ret_entry *entry;
249 
250 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
251 					  sizeof(*entry), trace_ctx);
252 	if (!event)
253 		return;
254 	entry	= ring_buffer_event_data(event);
255 	entry->ret				= *trace;
256 	if (!call_filter_check_discard(call, entry, buffer, event))
257 		trace_buffer_unlock_commit_nostack(buffer, event);
258 }
259 
260 static void handle_nosleeptime(struct ftrace_graph_ret *trace,
261 			       struct fgraph_times *ftimes,
262 			       int size)
263 {
264 	if (fgraph_sleep_time || size < sizeof(*ftimes))
265 		return;
266 
267 	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
268 }
269 
270 void trace_graph_return(struct ftrace_graph_ret *trace,
271 			struct fgraph_ops *gops)
272 {
273 	unsigned long *task_var = fgraph_get_task_var(gops);
274 	struct trace_array *tr = gops->private;
275 	struct trace_array_cpu *data;
276 	struct fgraph_times *ftimes;
277 	unsigned long flags;
278 	unsigned int trace_ctx;
279 	long disabled;
280 	int size;
281 	int cpu;
282 
283 	ftrace_graph_addr_finish(gops, trace);
284 
285 	if (*task_var & TRACE_GRAPH_NOTRACE) {
286 		*task_var &= ~TRACE_GRAPH_NOTRACE;
287 		return;
288 	}
289 
290 	ftimes = fgraph_retrieve_data(gops->idx, &size);
291 	if (!ftimes)
292 		return;
293 
294 	handle_nosleeptime(trace, ftimes, size);
295 
296 	trace->calltime = ftimes->calltime;
297 
298 	local_irq_save(flags);
299 	cpu = raw_smp_processor_id();
300 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
301 	disabled = atomic_inc_return(&data->disabled);
302 	if (likely(disabled == 1)) {
303 		trace_ctx = tracing_gen_ctx_flags(flags);
304 		__trace_graph_return(tr, trace, trace_ctx);
305 	}
306 	atomic_dec(&data->disabled);
307 	local_irq_restore(flags);
308 }
309 
310 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
311 				      struct fgraph_ops *gops)
312 {
313 	struct fgraph_times *ftimes;
314 	int size;
315 
316 	ftrace_graph_addr_finish(gops, trace);
317 
318 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
319 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
320 		return;
321 	}
322 
323 	ftimes = fgraph_retrieve_data(gops->idx, &size);
324 	if (!ftimes)
325 		return;
326 
327 	handle_nosleeptime(trace, ftimes, size);
328 
329 	trace->calltime = ftimes->calltime;
330 
331 	if (tracing_thresh &&
332 	    (trace->rettime - ftimes->calltime < tracing_thresh))
333 		return;
334 	else
335 		trace_graph_return(trace, gops);
336 }
337 
338 static struct fgraph_ops funcgraph_ops = {
339 	.entryfunc = &trace_graph_entry,
340 	.retfunc = &trace_graph_return,
341 };
342 
343 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
344 {
345 	struct fgraph_ops *gops;
346 
347 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
348 	if (!gops)
349 		return -ENOMEM;
350 
351 	gops->entryfunc = &trace_graph_entry;
352 	gops->retfunc = &trace_graph_return;
353 
354 	tr->gops = gops;
355 	gops->private = tr;
356 
357 	fgraph_init_ops(&gops->ops, ops);
358 
359 	return 0;
360 }
361 
362 void free_fgraph_ops(struct trace_array *tr)
363 {
364 	kfree(tr->gops);
365 }
366 
367 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
368 {
369 	tr->gops = &funcgraph_ops;
370 	funcgraph_ops.private = tr;
371 	fgraph_init_ops(&tr->gops->ops, ops);
372 }
373 
374 static int graph_trace_init(struct trace_array *tr)
375 {
376 	int ret;
377 
378 	tr->gops->entryfunc = trace_graph_entry;
379 
380 	if (tracing_thresh)
381 		tr->gops->retfunc = trace_graph_thresh_return;
382 	else
383 		tr->gops->retfunc = trace_graph_return;
384 
385 	/* Make gops functions are visible before we start tracing */
386 	smp_mb();
387 
388 	ret = register_ftrace_graph(tr->gops);
389 	if (ret)
390 		return ret;
391 	tracing_start_cmdline_record();
392 
393 	return 0;
394 }
395 
396 static void graph_trace_reset(struct trace_array *tr)
397 {
398 	tracing_stop_cmdline_record();
399 	unregister_ftrace_graph(tr->gops);
400 }
401 
402 static int graph_trace_update_thresh(struct trace_array *tr)
403 {
404 	graph_trace_reset(tr);
405 	return graph_trace_init(tr);
406 }
407 
408 static int max_bytes_for_cpu;
409 
410 static void print_graph_cpu(struct trace_seq *s, int cpu)
411 {
412 	/*
413 	 * Start with a space character - to make it stand out
414 	 * to the right a bit when trace output is pasted into
415 	 * email:
416 	 */
417 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
418 }
419 
420 #define TRACE_GRAPH_PROCINFO_LENGTH	14
421 
422 static void print_graph_proc(struct trace_seq *s, pid_t pid)
423 {
424 	char comm[TASK_COMM_LEN];
425 	/* sign + log10(MAX_INT) + '\0' */
426 	char pid_str[11];
427 	int spaces = 0;
428 	int len;
429 	int i;
430 
431 	trace_find_cmdline(pid, comm);
432 	comm[7] = '\0';
433 	sprintf(pid_str, "%d", pid);
434 
435 	/* 1 stands for the "-" character */
436 	len = strlen(comm) + strlen(pid_str) + 1;
437 
438 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
439 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
440 
441 	/* First spaces to align center */
442 	for (i = 0; i < spaces / 2; i++)
443 		trace_seq_putc(s, ' ');
444 
445 	trace_seq_printf(s, "%s-%s", comm, pid_str);
446 
447 	/* Last spaces to align center */
448 	for (i = 0; i < spaces - (spaces / 2); i++)
449 		trace_seq_putc(s, ' ');
450 }
451 
452 
453 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
454 {
455 	trace_seq_putc(s, ' ');
456 	trace_print_lat_fmt(s, entry);
457 	trace_seq_puts(s, " | ");
458 }
459 
460 /* If the pid changed since the last trace, output this event */
461 static void
462 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
463 {
464 	pid_t prev_pid;
465 	pid_t *last_pid;
466 
467 	if (!data)
468 		return;
469 
470 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
471 
472 	if (*last_pid == pid)
473 		return;
474 
475 	prev_pid = *last_pid;
476 	*last_pid = pid;
477 
478 	if (prev_pid == -1)
479 		return;
480 /*
481  * Context-switch trace line:
482 
483  ------------------------------------------
484  | 1)  migration/0--1  =>  sshd-1755
485  ------------------------------------------
486 
487  */
488 	trace_seq_puts(s, " ------------------------------------------\n");
489 	print_graph_cpu(s, cpu);
490 	print_graph_proc(s, prev_pid);
491 	trace_seq_puts(s, " => ");
492 	print_graph_proc(s, pid);
493 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
494 }
495 
496 static struct ftrace_graph_ret_entry *
497 get_return_for_leaf(struct trace_iterator *iter,
498 		struct ftrace_graph_ent_entry *curr)
499 {
500 	struct fgraph_data *data = iter->private;
501 	struct ring_buffer_iter *ring_iter = NULL;
502 	struct ring_buffer_event *event;
503 	struct ftrace_graph_ret_entry *next;
504 
505 	/*
506 	 * If the previous output failed to write to the seq buffer,
507 	 * then we just reuse the data from before.
508 	 */
509 	if (data && data->failed) {
510 		curr = &data->ent;
511 		next = &data->ret;
512 	} else {
513 
514 		ring_iter = trace_buffer_iter(iter, iter->cpu);
515 
516 		/* First peek to compare current entry and the next one */
517 		if (ring_iter)
518 			event = ring_buffer_iter_peek(ring_iter, NULL);
519 		else {
520 			/*
521 			 * We need to consume the current entry to see
522 			 * the next one.
523 			 */
524 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
525 					    NULL, NULL);
526 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
527 						 NULL, NULL);
528 		}
529 
530 		if (!event)
531 			return NULL;
532 
533 		next = ring_buffer_event_data(event);
534 
535 		if (data) {
536 			/*
537 			 * Save current and next entries for later reference
538 			 * if the output fails.
539 			 */
540 			data->ent = *curr;
541 			/*
542 			 * If the next event is not a return type, then
543 			 * we only care about what type it is. Otherwise we can
544 			 * safely copy the entire event.
545 			 */
546 			if (next->ent.type == TRACE_GRAPH_RET)
547 				data->ret = *next;
548 			else
549 				data->ret.ent.type = next->ent.type;
550 		}
551 	}
552 
553 	if (next->ent.type != TRACE_GRAPH_RET)
554 		return NULL;
555 
556 	if (curr->ent.pid != next->ent.pid ||
557 			curr->graph_ent.func != next->ret.func)
558 		return NULL;
559 
560 	/* this is a leaf, now advance the iterator */
561 	if (ring_iter)
562 		ring_buffer_iter_advance(ring_iter);
563 
564 	return next;
565 }
566 
567 static void print_graph_abs_time(u64 t, struct trace_seq *s)
568 {
569 	unsigned long usecs_rem;
570 
571 	usecs_rem = do_div(t, NSEC_PER_SEC);
572 	usecs_rem /= 1000;
573 
574 	trace_seq_printf(s, "%5lu.%06lu |  ",
575 			 (unsigned long)t, usecs_rem);
576 }
577 
578 static void
579 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
580 {
581 	unsigned long long usecs;
582 
583 	usecs = iter->ts - iter->array_buffer->time_start;
584 	do_div(usecs, NSEC_PER_USEC);
585 
586 	trace_seq_printf(s, "%9llu us |  ", usecs);
587 }
588 
589 static void
590 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
591 		enum trace_type type, int cpu, pid_t pid, u32 flags)
592 {
593 	struct trace_array *tr = iter->tr;
594 	struct trace_seq *s = &iter->seq;
595 	struct trace_entry *ent = iter->ent;
596 
597 	addr += iter->tr->text_delta;
598 
599 	if (addr < (unsigned long)__irqentry_text_start ||
600 		addr >= (unsigned long)__irqentry_text_end)
601 		return;
602 
603 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
604 		/* Absolute time */
605 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
606 			print_graph_abs_time(iter->ts, s);
607 
608 		/* Relative time */
609 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
610 			print_graph_rel_time(iter, s);
611 
612 		/* Cpu */
613 		if (flags & TRACE_GRAPH_PRINT_CPU)
614 			print_graph_cpu(s, cpu);
615 
616 		/* Proc */
617 		if (flags & TRACE_GRAPH_PRINT_PROC) {
618 			print_graph_proc(s, pid);
619 			trace_seq_puts(s, " | ");
620 		}
621 
622 		/* Latency format */
623 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
624 			print_graph_lat_fmt(s, ent);
625 	}
626 
627 	/* No overhead */
628 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
629 
630 	if (type == TRACE_GRAPH_ENT)
631 		trace_seq_puts(s, "==========>");
632 	else
633 		trace_seq_puts(s, "<==========");
634 
635 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
636 	trace_seq_putc(s, '\n');
637 }
638 
639 void
640 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
641 {
642 	unsigned long nsecs_rem = do_div(duration, 1000);
643 	/* log10(ULONG_MAX) + '\0' */
644 	char usecs_str[21];
645 	char nsecs_str[5];
646 	int len;
647 	int i;
648 
649 	sprintf(usecs_str, "%lu", (unsigned long) duration);
650 
651 	/* Print msecs */
652 	trace_seq_printf(s, "%s", usecs_str);
653 
654 	len = strlen(usecs_str);
655 
656 	/* Print nsecs (we don't want to exceed 7 numbers) */
657 	if (len < 7) {
658 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
659 
660 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
661 		trace_seq_printf(s, ".%s", nsecs_str);
662 		len += strlen(nsecs_str) + 1;
663 	}
664 
665 	trace_seq_puts(s, " us ");
666 
667 	/* Print remaining spaces to fit the row's width */
668 	for (i = len; i < 8; i++)
669 		trace_seq_putc(s, ' ');
670 }
671 
672 static void
673 print_graph_duration(struct trace_array *tr, unsigned long long duration,
674 		     struct trace_seq *s, u32 flags)
675 {
676 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
677 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
678 		return;
679 
680 	/* No real adata, just filling the column with spaces */
681 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
682 	case FLAGS_FILL_FULL:
683 		trace_seq_puts(s, "              |  ");
684 		return;
685 	case FLAGS_FILL_START:
686 		trace_seq_puts(s, "  ");
687 		return;
688 	case FLAGS_FILL_END:
689 		trace_seq_puts(s, " |");
690 		return;
691 	}
692 
693 	/* Signal a overhead of time execution to the output */
694 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
695 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
696 	else
697 		trace_seq_puts(s, "  ");
698 
699 	trace_print_graph_duration(duration, s);
700 	trace_seq_puts(s, "|  ");
701 }
702 
703 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
704 
705 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
706 
707 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
708 				bool leaf, void *func, bool hex_format)
709 {
710 	unsigned long err_code = 0;
711 
712 	if (retval == 0 || hex_format)
713 		goto done;
714 
715 	/* Check if the return value matches the negative format */
716 	if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
717 		(((u64)retval) >> 32) == 0) {
718 		/* sign extension */
719 		err_code = (unsigned long)(s32)retval;
720 	} else {
721 		err_code = retval;
722 	}
723 
724 	if (!IS_ERR_VALUE(err_code))
725 		err_code = 0;
726 
727 done:
728 	if (leaf) {
729 		if (hex_format || (err_code == 0))
730 			trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
731 					func, retval);
732 		else
733 			trace_seq_printf(s, "%ps(); /* = %ld */\n",
734 					func, err_code);
735 	} else {
736 		if (hex_format || (err_code == 0))
737 			trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
738 					func, retval);
739 		else
740 			trace_seq_printf(s, "} /* %ps = %ld */\n",
741 					func, err_code);
742 	}
743 }
744 
745 #else
746 
747 #define __TRACE_GRAPH_PRINT_RETVAL 0
748 
749 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
750 
751 #endif
752 
753 /* Case of a leaf function on its call entry */
754 static enum print_line_t
755 print_graph_entry_leaf(struct trace_iterator *iter,
756 		struct ftrace_graph_ent_entry *entry,
757 		struct ftrace_graph_ret_entry *ret_entry,
758 		struct trace_seq *s, u32 flags)
759 {
760 	struct fgraph_data *data = iter->private;
761 	struct trace_array *tr = iter->tr;
762 	struct ftrace_graph_ret *graph_ret;
763 	struct ftrace_graph_ent *call;
764 	unsigned long long duration;
765 	unsigned long func;
766 	int cpu = iter->cpu;
767 	int i;
768 
769 	graph_ret = &ret_entry->ret;
770 	call = &entry->graph_ent;
771 	duration = graph_ret->rettime - graph_ret->calltime;
772 
773 	func = call->func + iter->tr->text_delta;
774 
775 	if (data) {
776 		struct fgraph_cpu_data *cpu_data;
777 
778 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
779 
780 		/*
781 		 * Comments display at + 1 to depth. Since
782 		 * this is a leaf function, keep the comments
783 		 * equal to this depth.
784 		 */
785 		cpu_data->depth = call->depth - 1;
786 
787 		/* No need to keep this function around for this depth */
788 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
789 		    !WARN_ON_ONCE(call->depth < 0))
790 			cpu_data->enter_funcs[call->depth] = 0;
791 	}
792 
793 	/* Overhead and duration */
794 	print_graph_duration(tr, duration, s, flags);
795 
796 	/* Function */
797 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
798 		trace_seq_putc(s, ' ');
799 
800 	/*
801 	 * Write out the function return value if the option function-retval is
802 	 * enabled.
803 	 */
804 	if (flags & __TRACE_GRAPH_PRINT_RETVAL)
805 		print_graph_retval(s, graph_ret->retval, true, (void *)func,
806 				!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
807 	else
808 		trace_seq_printf(s, "%ps();\n", (void *)func);
809 
810 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
811 			cpu, iter->ent->pid, flags);
812 
813 	return trace_handle_return(s);
814 }
815 
816 static enum print_line_t
817 print_graph_entry_nested(struct trace_iterator *iter,
818 			 struct ftrace_graph_ent_entry *entry,
819 			 struct trace_seq *s, int cpu, u32 flags)
820 {
821 	struct ftrace_graph_ent *call = &entry->graph_ent;
822 	struct fgraph_data *data = iter->private;
823 	struct trace_array *tr = iter->tr;
824 	unsigned long func;
825 	int i;
826 
827 	if (data) {
828 		struct fgraph_cpu_data *cpu_data;
829 		int cpu = iter->cpu;
830 
831 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
832 		cpu_data->depth = call->depth;
833 
834 		/* Save this function pointer to see if the exit matches */
835 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
836 		    !WARN_ON_ONCE(call->depth < 0))
837 			cpu_data->enter_funcs[call->depth] = call->func;
838 	}
839 
840 	/* No time */
841 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
842 
843 	/* Function */
844 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
845 		trace_seq_putc(s, ' ');
846 
847 	func = call->func + iter->tr->text_delta;
848 
849 	trace_seq_printf(s, "%ps() {\n", (void *)func);
850 
851 	if (trace_seq_has_overflowed(s))
852 		return TRACE_TYPE_PARTIAL_LINE;
853 
854 	/*
855 	 * we already consumed the current entry to check the next one
856 	 * and see if this is a leaf.
857 	 */
858 	return TRACE_TYPE_NO_CONSUME;
859 }
860 
861 static void
862 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
863 		     int type, unsigned long addr, u32 flags)
864 {
865 	struct fgraph_data *data = iter->private;
866 	struct trace_entry *ent = iter->ent;
867 	struct trace_array *tr = iter->tr;
868 	int cpu = iter->cpu;
869 
870 	/* Pid */
871 	verif_pid(s, ent->pid, cpu, data);
872 
873 	if (type)
874 		/* Interrupt */
875 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
876 
877 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
878 		return;
879 
880 	/* Absolute time */
881 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
882 		print_graph_abs_time(iter->ts, s);
883 
884 	/* Relative time */
885 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
886 		print_graph_rel_time(iter, s);
887 
888 	/* Cpu */
889 	if (flags & TRACE_GRAPH_PRINT_CPU)
890 		print_graph_cpu(s, cpu);
891 
892 	/* Proc */
893 	if (flags & TRACE_GRAPH_PRINT_PROC) {
894 		print_graph_proc(s, ent->pid);
895 		trace_seq_puts(s, " | ");
896 	}
897 
898 	/* Latency format */
899 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
900 		print_graph_lat_fmt(s, ent);
901 
902 	return;
903 }
904 
905 /*
906  * Entry check for irq code
907  *
908  * returns 1 if
909  *  - we are inside irq code
910  *  - we just entered irq code
911  *
912  * returns 0 if
913  *  - funcgraph-interrupts option is set
914  *  - we are not inside irq code
915  */
916 static int
917 check_irq_entry(struct trace_iterator *iter, u32 flags,
918 		unsigned long addr, int depth)
919 {
920 	int cpu = iter->cpu;
921 	int *depth_irq;
922 	struct fgraph_data *data = iter->private;
923 
924 	addr += iter->tr->text_delta;
925 
926 	/*
927 	 * If we are either displaying irqs, or we got called as
928 	 * a graph event and private data does not exist,
929 	 * then we bypass the irq check.
930 	 */
931 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
932 	    (!data))
933 		return 0;
934 
935 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
936 
937 	/*
938 	 * We are inside the irq code
939 	 */
940 	if (*depth_irq >= 0)
941 		return 1;
942 
943 	if ((addr < (unsigned long)__irqentry_text_start) ||
944 	    (addr >= (unsigned long)__irqentry_text_end))
945 		return 0;
946 
947 	/*
948 	 * We are entering irq code.
949 	 */
950 	*depth_irq = depth;
951 	return 1;
952 }
953 
954 /*
955  * Return check for irq code
956  *
957  * returns 1 if
958  *  - we are inside irq code
959  *  - we just left irq code
960  *
961  * returns 0 if
962  *  - funcgraph-interrupts option is set
963  *  - we are not inside irq code
964  */
965 static int
966 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
967 {
968 	int cpu = iter->cpu;
969 	int *depth_irq;
970 	struct fgraph_data *data = iter->private;
971 
972 	/*
973 	 * If we are either displaying irqs, or we got called as
974 	 * a graph event and private data does not exist,
975 	 * then we bypass the irq check.
976 	 */
977 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
978 	    (!data))
979 		return 0;
980 
981 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
982 
983 	/*
984 	 * We are not inside the irq code.
985 	 */
986 	if (*depth_irq == -1)
987 		return 0;
988 
989 	/*
990 	 * We are inside the irq code, and this is returning entry.
991 	 * Let's not trace it and clear the entry depth, since
992 	 * we are out of irq code.
993 	 *
994 	 * This condition ensures that we 'leave the irq code' once
995 	 * we are out of the entry depth. Thus protecting us from
996 	 * the RETURN entry loss.
997 	 */
998 	if (*depth_irq >= depth) {
999 		*depth_irq = -1;
1000 		return 1;
1001 	}
1002 
1003 	/*
1004 	 * We are inside the irq code, and this is not the entry.
1005 	 */
1006 	return 1;
1007 }
1008 
1009 static enum print_line_t
1010 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1011 			struct trace_iterator *iter, u32 flags)
1012 {
1013 	struct fgraph_data *data = iter->private;
1014 	struct ftrace_graph_ent *call = &field->graph_ent;
1015 	struct ftrace_graph_ret_entry *leaf_ret;
1016 	static enum print_line_t ret;
1017 	int cpu = iter->cpu;
1018 
1019 	if (check_irq_entry(iter, flags, call->func, call->depth))
1020 		return TRACE_TYPE_HANDLED;
1021 
1022 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1023 
1024 	leaf_ret = get_return_for_leaf(iter, field);
1025 	if (leaf_ret)
1026 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1027 	else
1028 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1029 
1030 	if (data) {
1031 		/*
1032 		 * If we failed to write our output, then we need to make
1033 		 * note of it. Because we already consumed our entry.
1034 		 */
1035 		if (s->full) {
1036 			data->failed = 1;
1037 			data->cpu = cpu;
1038 		} else
1039 			data->failed = 0;
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 static enum print_line_t
1046 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1047 		   struct trace_entry *ent, struct trace_iterator *iter,
1048 		   u32 flags)
1049 {
1050 	unsigned long long duration = trace->rettime - trace->calltime;
1051 	struct fgraph_data *data = iter->private;
1052 	struct trace_array *tr = iter->tr;
1053 	unsigned long func;
1054 	pid_t pid = ent->pid;
1055 	int cpu = iter->cpu;
1056 	int func_match = 1;
1057 	int i;
1058 
1059 	func = trace->func + iter->tr->text_delta;
1060 
1061 	if (check_irq_return(iter, flags, trace->depth))
1062 		return TRACE_TYPE_HANDLED;
1063 
1064 	if (data) {
1065 		struct fgraph_cpu_data *cpu_data;
1066 		int cpu = iter->cpu;
1067 
1068 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1069 
1070 		/*
1071 		 * Comments display at + 1 to depth. This is the
1072 		 * return from a function, we now want the comments
1073 		 * to display at the same level of the bracket.
1074 		 */
1075 		cpu_data->depth = trace->depth - 1;
1076 
1077 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1078 		    !WARN_ON_ONCE(trace->depth < 0)) {
1079 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1080 				func_match = 0;
1081 			cpu_data->enter_funcs[trace->depth] = 0;
1082 		}
1083 	}
1084 
1085 	print_graph_prologue(iter, s, 0, 0, flags);
1086 
1087 	/* Overhead and duration */
1088 	print_graph_duration(tr, duration, s, flags);
1089 
1090 	/* Closing brace */
1091 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1092 		trace_seq_putc(s, ' ');
1093 
1094 	/*
1095 	 * Always write out the function name and its return value if the
1096 	 * function-retval option is enabled.
1097 	 */
1098 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1099 		print_graph_retval(s, trace->retval, false, (void *)func,
1100 			!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1101 	} else {
1102 		/*
1103 		 * If the return function does not have a matching entry,
1104 		 * then the entry was lost. Instead of just printing
1105 		 * the '}' and letting the user guess what function this
1106 		 * belongs to, write out the function name. Always do
1107 		 * that if the funcgraph-tail option is enabled.
1108 		 */
1109 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1110 			trace_seq_puts(s, "}\n");
1111 		else
1112 			trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1113 	}
1114 
1115 	/* Overrun */
1116 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1117 		trace_seq_printf(s, " (Overruns: %u)\n",
1118 				 trace->overrun);
1119 
1120 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1121 			cpu, pid, flags);
1122 
1123 	return trace_handle_return(s);
1124 }
1125 
1126 static enum print_line_t
1127 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1128 		    struct trace_iterator *iter, u32 flags)
1129 {
1130 	struct trace_array *tr = iter->tr;
1131 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1132 	struct fgraph_data *data = iter->private;
1133 	struct trace_event *event;
1134 	int depth = 0;
1135 	int ret;
1136 	int i;
1137 
1138 	if (data)
1139 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1140 
1141 	print_graph_prologue(iter, s, 0, 0, flags);
1142 
1143 	/* No time */
1144 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1145 
1146 	/* Indentation */
1147 	if (depth > 0)
1148 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1149 			trace_seq_putc(s, ' ');
1150 
1151 	/* The comment */
1152 	trace_seq_puts(s, "/* ");
1153 
1154 	switch (iter->ent->type) {
1155 	case TRACE_BPUTS:
1156 		ret = trace_print_bputs_msg_only(iter);
1157 		if (ret != TRACE_TYPE_HANDLED)
1158 			return ret;
1159 		break;
1160 	case TRACE_BPRINT:
1161 		ret = trace_print_bprintk_msg_only(iter);
1162 		if (ret != TRACE_TYPE_HANDLED)
1163 			return ret;
1164 		break;
1165 	case TRACE_PRINT:
1166 		ret = trace_print_printk_msg_only(iter);
1167 		if (ret != TRACE_TYPE_HANDLED)
1168 			return ret;
1169 		break;
1170 	default:
1171 		event = ftrace_find_event(ent->type);
1172 		if (!event)
1173 			return TRACE_TYPE_UNHANDLED;
1174 
1175 		ret = event->funcs->trace(iter, sym_flags, event);
1176 		if (ret != TRACE_TYPE_HANDLED)
1177 			return ret;
1178 	}
1179 
1180 	if (trace_seq_has_overflowed(s))
1181 		goto out;
1182 
1183 	/* Strip ending newline */
1184 	if (s->buffer[s->seq.len - 1] == '\n') {
1185 		s->buffer[s->seq.len - 1] = '\0';
1186 		s->seq.len--;
1187 	}
1188 
1189 	trace_seq_puts(s, " */\n");
1190  out:
1191 	return trace_handle_return(s);
1192 }
1193 
1194 
1195 enum print_line_t
1196 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1197 {
1198 	struct ftrace_graph_ent_entry *field;
1199 	struct fgraph_data *data = iter->private;
1200 	struct trace_entry *entry = iter->ent;
1201 	struct trace_seq *s = &iter->seq;
1202 	int cpu = iter->cpu;
1203 	int ret;
1204 
1205 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1206 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1207 		return TRACE_TYPE_HANDLED;
1208 	}
1209 
1210 	/*
1211 	 * If the last output failed, there's a possibility we need
1212 	 * to print out the missing entry which would never go out.
1213 	 */
1214 	if (data && data->failed) {
1215 		field = &data->ent;
1216 		iter->cpu = data->cpu;
1217 		ret = print_graph_entry(field, s, iter, flags);
1218 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1219 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1220 			ret = TRACE_TYPE_NO_CONSUME;
1221 		}
1222 		iter->cpu = cpu;
1223 		return ret;
1224 	}
1225 
1226 	switch (entry->type) {
1227 	case TRACE_GRAPH_ENT: {
1228 		/*
1229 		 * print_graph_entry() may consume the current event,
1230 		 * thus @field may become invalid, so we need to save it.
1231 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1232 		 * it can be safely saved at the stack.
1233 		 */
1234 		struct ftrace_graph_ent_entry saved;
1235 		trace_assign_type(field, entry);
1236 		saved = *field;
1237 		return print_graph_entry(&saved, s, iter, flags);
1238 	}
1239 	case TRACE_GRAPH_RET: {
1240 		struct ftrace_graph_ret_entry *field;
1241 		trace_assign_type(field, entry);
1242 		return print_graph_return(&field->ret, s, entry, iter, flags);
1243 	}
1244 	case TRACE_STACK:
1245 	case TRACE_FN:
1246 		/* dont trace stack and functions as comments */
1247 		return TRACE_TYPE_UNHANDLED;
1248 
1249 	default:
1250 		return print_graph_comment(s, entry, iter, flags);
1251 	}
1252 
1253 	return TRACE_TYPE_HANDLED;
1254 }
1255 
1256 static enum print_line_t
1257 print_graph_function(struct trace_iterator *iter)
1258 {
1259 	return print_graph_function_flags(iter, tracer_flags.val);
1260 }
1261 
1262 static enum print_line_t
1263 print_graph_function_event(struct trace_iterator *iter, int flags,
1264 			   struct trace_event *event)
1265 {
1266 	return print_graph_function(iter);
1267 }
1268 
1269 static void print_lat_header(struct seq_file *s, u32 flags)
1270 {
1271 	static const char spaces[] = "                "	/* 16 spaces */
1272 		"    "					/* 4 spaces */
1273 		"                 ";			/* 17 spaces */
1274 	int size = 0;
1275 
1276 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1277 		size += 16;
1278 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1279 		size += 16;
1280 	if (flags & TRACE_GRAPH_PRINT_CPU)
1281 		size += 4;
1282 	if (flags & TRACE_GRAPH_PRINT_PROC)
1283 		size += 17;
1284 
1285 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1286 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1287 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1288 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1289 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1290 }
1291 
1292 static void __print_graph_headers_flags(struct trace_array *tr,
1293 					struct seq_file *s, u32 flags)
1294 {
1295 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1296 
1297 	if (lat)
1298 		print_lat_header(s, flags);
1299 
1300 	/* 1st line */
1301 	seq_putc(s, '#');
1302 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1303 		seq_puts(s, "     TIME       ");
1304 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1305 		seq_puts(s, "   REL TIME     ");
1306 	if (flags & TRACE_GRAPH_PRINT_CPU)
1307 		seq_puts(s, " CPU");
1308 	if (flags & TRACE_GRAPH_PRINT_PROC)
1309 		seq_puts(s, "  TASK/PID       ");
1310 	if (lat)
1311 		seq_puts(s, "||||   ");
1312 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1313 		seq_puts(s, "  DURATION   ");
1314 	seq_puts(s, "               FUNCTION CALLS\n");
1315 
1316 	/* 2nd line */
1317 	seq_putc(s, '#');
1318 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1319 		seq_puts(s, "      |         ");
1320 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1321 		seq_puts(s, "      |         ");
1322 	if (flags & TRACE_GRAPH_PRINT_CPU)
1323 		seq_puts(s, " |  ");
1324 	if (flags & TRACE_GRAPH_PRINT_PROC)
1325 		seq_puts(s, "   |    |        ");
1326 	if (lat)
1327 		seq_puts(s, "||||   ");
1328 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1329 		seq_puts(s, "   |   |      ");
1330 	seq_puts(s, "               |   |   |   |\n");
1331 }
1332 
1333 static void print_graph_headers(struct seq_file *s)
1334 {
1335 	print_graph_headers_flags(s, tracer_flags.val);
1336 }
1337 
1338 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1339 {
1340 	struct trace_iterator *iter = s->private;
1341 	struct trace_array *tr = iter->tr;
1342 
1343 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1344 		return;
1345 
1346 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1347 		/* print nothing if the buffers are empty */
1348 		if (trace_empty(iter))
1349 			return;
1350 
1351 		print_trace_header(s, iter);
1352 	}
1353 
1354 	__print_graph_headers_flags(tr, s, flags);
1355 }
1356 
1357 void graph_trace_open(struct trace_iterator *iter)
1358 {
1359 	/* pid and depth on the last trace processed */
1360 	struct fgraph_data *data;
1361 	gfp_t gfpflags;
1362 	int cpu;
1363 
1364 	iter->private = NULL;
1365 
1366 	/* We can be called in atomic context via ftrace_dump() */
1367 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1368 
1369 	data = kzalloc(sizeof(*data), gfpflags);
1370 	if (!data)
1371 		goto out_err;
1372 
1373 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1374 	if (!data->cpu_data)
1375 		goto out_err_free;
1376 
1377 	for_each_possible_cpu(cpu) {
1378 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1379 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1380 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1381 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1382 
1383 		*pid = -1;
1384 		*depth = 0;
1385 		*ignore = 0;
1386 		*depth_irq = -1;
1387 	}
1388 
1389 	iter->private = data;
1390 
1391 	return;
1392 
1393  out_err_free:
1394 	kfree(data);
1395  out_err:
1396 	pr_warn("function graph tracer: not enough memory\n");
1397 }
1398 
1399 void graph_trace_close(struct trace_iterator *iter)
1400 {
1401 	struct fgraph_data *data = iter->private;
1402 
1403 	if (data) {
1404 		free_percpu(data->cpu_data);
1405 		kfree(data);
1406 	}
1407 }
1408 
1409 static int
1410 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1411 {
1412 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1413 		ftrace_graph_skip_irqs = !set;
1414 
1415 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1416 		ftrace_graph_sleep_time_control(set);
1417 
1418 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1419 		ftrace_graph_graph_time_control(set);
1420 
1421 	return 0;
1422 }
1423 
1424 static struct trace_event_functions graph_functions = {
1425 	.trace		= print_graph_function_event,
1426 };
1427 
1428 static struct trace_event graph_trace_entry_event = {
1429 	.type		= TRACE_GRAPH_ENT,
1430 	.funcs		= &graph_functions,
1431 };
1432 
1433 static struct trace_event graph_trace_ret_event = {
1434 	.type		= TRACE_GRAPH_RET,
1435 	.funcs		= &graph_functions
1436 };
1437 
1438 static struct tracer graph_trace __tracer_data = {
1439 	.name		= "function_graph",
1440 	.update_thresh	= graph_trace_update_thresh,
1441 	.open		= graph_trace_open,
1442 	.pipe_open	= graph_trace_open,
1443 	.close		= graph_trace_close,
1444 	.pipe_close	= graph_trace_close,
1445 	.init		= graph_trace_init,
1446 	.reset		= graph_trace_reset,
1447 	.print_line	= print_graph_function,
1448 	.print_header	= print_graph_headers,
1449 	.flags		= &tracer_flags,
1450 	.set_flag	= func_graph_set_flag,
1451 	.allow_instances = true,
1452 #ifdef CONFIG_FTRACE_SELFTEST
1453 	.selftest	= trace_selftest_startup_function_graph,
1454 #endif
1455 };
1456 
1457 
1458 static ssize_t
1459 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1460 		  loff_t *ppos)
1461 {
1462 	unsigned long val;
1463 	int ret;
1464 
1465 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1466 	if (ret)
1467 		return ret;
1468 
1469 	fgraph_max_depth = val;
1470 
1471 	*ppos += cnt;
1472 
1473 	return cnt;
1474 }
1475 
1476 static ssize_t
1477 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1478 		 loff_t *ppos)
1479 {
1480 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1481 	int n;
1482 
1483 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1484 
1485 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1486 }
1487 
1488 static const struct file_operations graph_depth_fops = {
1489 	.open		= tracing_open_generic,
1490 	.write		= graph_depth_write,
1491 	.read		= graph_depth_read,
1492 	.llseek		= generic_file_llseek,
1493 };
1494 
1495 static __init int init_graph_tracefs(void)
1496 {
1497 	int ret;
1498 
1499 	ret = tracing_init_dentry();
1500 	if (ret)
1501 		return 0;
1502 
1503 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1504 			  NULL, &graph_depth_fops);
1505 
1506 	return 0;
1507 }
1508 fs_initcall(init_graph_tracefs);
1509 
1510 static __init int init_graph_trace(void)
1511 {
1512 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1513 
1514 	if (!register_trace_event(&graph_trace_entry_event)) {
1515 		pr_warn("Warning: could not register graph trace events\n");
1516 		return 1;
1517 	}
1518 
1519 	if (!register_trace_event(&graph_trace_ret_event)) {
1520 		pr_warn("Warning: could not register graph trace events\n");
1521 		return 1;
1522 	}
1523 
1524 	return register_tracer(&graph_trace);
1525 }
1526 
1527 core_initcall(init_graph_trace);
1528