1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <[email protected]>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 	/* Display function return value ? */
63 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 	/* Display function return value in hexadecimal format ? */
65 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 	/* Include sleep time (scheduled out) between entry and return */
68 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69 
70 #ifdef CONFIG_FUNCTION_PROFILER
71 	/* Include time within nested functions */
72 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74 
75 	{ } /* Empty entry */
76 };
77 
78 static struct tracer_flags tracer_flags = {
79 	/* Don't display overruns, proc, or tail by default */
80 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 	.opts = trace_opts
84 };
85 
86 static struct trace_array *graph_array;
87 
88 /*
89  * DURATION column is being also used to display IRQ signs,
90  * following values are used by print_graph_irq and others
91  * to fill in space into DURATION column.
92  */
93 enum {
94 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
96 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
97 };
98 
99 static void
100 print_graph_duration(struct trace_array *tr, unsigned long long duration,
101 		     struct trace_seq *s, u32 flags);
102 
103 int __trace_graph_entry(struct trace_array *tr,
104 				struct ftrace_graph_ent *trace,
105 				unsigned int trace_ctx)
106 {
107 	struct trace_event_call *call = &event_funcgraph_entry;
108 	struct ring_buffer_event *event;
109 	struct trace_buffer *buffer = tr->array_buffer.buffer;
110 	struct ftrace_graph_ent_entry *entry;
111 
112 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
113 					  sizeof(*entry), trace_ctx);
114 	if (!event)
115 		return 0;
116 	entry	= ring_buffer_event_data(event);
117 	entry->graph_ent			= *trace;
118 	if (!call_filter_check_discard(call, entry, buffer, event))
119 		trace_buffer_unlock_commit_nostack(buffer, event);
120 
121 	return 1;
122 }
123 
124 static inline int ftrace_graph_ignore_irqs(void)
125 {
126 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
127 		return 0;
128 
129 	return in_hardirq();
130 }
131 
132 int trace_graph_entry(struct ftrace_graph_ent *trace,
133 		      struct fgraph_ops *gops)
134 {
135 	struct trace_array *tr = graph_array;
136 	struct trace_array_cpu *data;
137 	unsigned long flags;
138 	unsigned int trace_ctx;
139 	long disabled;
140 	int ret;
141 	int cpu;
142 
143 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
144 		return 0;
145 
146 	/*
147 	 * Do not trace a function if it's filtered by set_graph_notrace.
148 	 * Make the index of ret stack negative to indicate that it should
149 	 * ignore further functions.  But it needs its own ret stack entry
150 	 * to recover the original index in order to continue tracing after
151 	 * returning from the function.
152 	 */
153 	if (ftrace_graph_notrace_addr(trace->func)) {
154 		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
155 		/*
156 		 * Need to return 1 to have the return called
157 		 * that will clear the NOTRACE bit.
158 		 */
159 		return 1;
160 	}
161 
162 	if (!ftrace_trace_task(tr))
163 		return 0;
164 
165 	if (ftrace_graph_ignore_func(trace))
166 		return 0;
167 
168 	if (ftrace_graph_ignore_irqs())
169 		return 0;
170 
171 	/*
172 	 * Stop here if tracing_threshold is set. We only write function return
173 	 * events to the ring buffer.
174 	 */
175 	if (tracing_thresh)
176 		return 1;
177 
178 	local_irq_save(flags);
179 	cpu = raw_smp_processor_id();
180 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
181 	disabled = atomic_inc_return(&data->disabled);
182 	if (likely(disabled == 1)) {
183 		trace_ctx = tracing_gen_ctx_flags(flags);
184 		ret = __trace_graph_entry(tr, trace, trace_ctx);
185 	} else {
186 		ret = 0;
187 	}
188 
189 	atomic_dec(&data->disabled);
190 	local_irq_restore(flags);
191 
192 	return ret;
193 }
194 
195 static void
196 __trace_graph_function(struct trace_array *tr,
197 		unsigned long ip, unsigned int trace_ctx)
198 {
199 	u64 time = trace_clock_local();
200 	struct ftrace_graph_ent ent = {
201 		.func  = ip,
202 		.depth = 0,
203 	};
204 	struct ftrace_graph_ret ret = {
205 		.func     = ip,
206 		.depth    = 0,
207 		.calltime = time,
208 		.rettime  = time,
209 	};
210 
211 	__trace_graph_entry(tr, &ent, trace_ctx);
212 	__trace_graph_return(tr, &ret, trace_ctx);
213 }
214 
215 void
216 trace_graph_function(struct trace_array *tr,
217 		unsigned long ip, unsigned long parent_ip,
218 		unsigned int trace_ctx)
219 {
220 	__trace_graph_function(tr, ip, trace_ctx);
221 }
222 
223 void __trace_graph_return(struct trace_array *tr,
224 				struct ftrace_graph_ret *trace,
225 				unsigned int trace_ctx)
226 {
227 	struct trace_event_call *call = &event_funcgraph_exit;
228 	struct ring_buffer_event *event;
229 	struct trace_buffer *buffer = tr->array_buffer.buffer;
230 	struct ftrace_graph_ret_entry *entry;
231 
232 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
233 					  sizeof(*entry), trace_ctx);
234 	if (!event)
235 		return;
236 	entry	= ring_buffer_event_data(event);
237 	entry->ret				= *trace;
238 	if (!call_filter_check_discard(call, entry, buffer, event))
239 		trace_buffer_unlock_commit_nostack(buffer, event);
240 }
241 
242 void trace_graph_return(struct ftrace_graph_ret *trace,
243 			struct fgraph_ops *gops)
244 {
245 	struct trace_array *tr = graph_array;
246 	struct trace_array_cpu *data;
247 	unsigned long flags;
248 	unsigned int trace_ctx;
249 	long disabled;
250 	int cpu;
251 
252 	ftrace_graph_addr_finish(trace);
253 
254 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
255 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
256 		return;
257 	}
258 
259 	local_irq_save(flags);
260 	cpu = raw_smp_processor_id();
261 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
262 	disabled = atomic_inc_return(&data->disabled);
263 	if (likely(disabled == 1)) {
264 		trace_ctx = tracing_gen_ctx_flags(flags);
265 		__trace_graph_return(tr, trace, trace_ctx);
266 	}
267 	atomic_dec(&data->disabled);
268 	local_irq_restore(flags);
269 }
270 
271 void set_graph_array(struct trace_array *tr)
272 {
273 	graph_array = tr;
274 
275 	/* Make graph_array visible before we start tracing */
276 
277 	smp_mb();
278 }
279 
280 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
281 				      struct fgraph_ops *gops)
282 {
283 	ftrace_graph_addr_finish(trace);
284 
285 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
286 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
287 		return;
288 	}
289 
290 	if (tracing_thresh &&
291 	    (trace->rettime - trace->calltime < tracing_thresh))
292 		return;
293 	else
294 		trace_graph_return(trace, gops);
295 }
296 
297 static struct fgraph_ops funcgraph_thresh_ops = {
298 	.entryfunc = &trace_graph_entry,
299 	.retfunc = &trace_graph_thresh_return,
300 };
301 
302 static struct fgraph_ops funcgraph_ops = {
303 	.entryfunc = &trace_graph_entry,
304 	.retfunc = &trace_graph_return,
305 };
306 
307 static int graph_trace_init(struct trace_array *tr)
308 {
309 	int ret;
310 
311 	set_graph_array(tr);
312 	if (tracing_thresh)
313 		ret = register_ftrace_graph(&funcgraph_thresh_ops);
314 	else
315 		ret = register_ftrace_graph(&funcgraph_ops);
316 	if (ret)
317 		return ret;
318 	tracing_start_cmdline_record();
319 
320 	return 0;
321 }
322 
323 static void graph_trace_reset(struct trace_array *tr)
324 {
325 	tracing_stop_cmdline_record();
326 	if (tracing_thresh)
327 		unregister_ftrace_graph(&funcgraph_thresh_ops);
328 	else
329 		unregister_ftrace_graph(&funcgraph_ops);
330 }
331 
332 static int graph_trace_update_thresh(struct trace_array *tr)
333 {
334 	graph_trace_reset(tr);
335 	return graph_trace_init(tr);
336 }
337 
338 static int max_bytes_for_cpu;
339 
340 static void print_graph_cpu(struct trace_seq *s, int cpu)
341 {
342 	/*
343 	 * Start with a space character - to make it stand out
344 	 * to the right a bit when trace output is pasted into
345 	 * email:
346 	 */
347 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
348 }
349 
350 #define TRACE_GRAPH_PROCINFO_LENGTH	14
351 
352 static void print_graph_proc(struct trace_seq *s, pid_t pid)
353 {
354 	char comm[TASK_COMM_LEN];
355 	/* sign + log10(MAX_INT) + '\0' */
356 	char pid_str[11];
357 	int spaces = 0;
358 	int len;
359 	int i;
360 
361 	trace_find_cmdline(pid, comm);
362 	comm[7] = '\0';
363 	sprintf(pid_str, "%d", pid);
364 
365 	/* 1 stands for the "-" character */
366 	len = strlen(comm) + strlen(pid_str) + 1;
367 
368 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
369 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
370 
371 	/* First spaces to align center */
372 	for (i = 0; i < spaces / 2; i++)
373 		trace_seq_putc(s, ' ');
374 
375 	trace_seq_printf(s, "%s-%s", comm, pid_str);
376 
377 	/* Last spaces to align center */
378 	for (i = 0; i < spaces - (spaces / 2); i++)
379 		trace_seq_putc(s, ' ');
380 }
381 
382 
383 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
384 {
385 	trace_seq_putc(s, ' ');
386 	trace_print_lat_fmt(s, entry);
387 	trace_seq_puts(s, " | ");
388 }
389 
390 /* If the pid changed since the last trace, output this event */
391 static void
392 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
393 {
394 	pid_t prev_pid;
395 	pid_t *last_pid;
396 
397 	if (!data)
398 		return;
399 
400 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
401 
402 	if (*last_pid == pid)
403 		return;
404 
405 	prev_pid = *last_pid;
406 	*last_pid = pid;
407 
408 	if (prev_pid == -1)
409 		return;
410 /*
411  * Context-switch trace line:
412 
413  ------------------------------------------
414  | 1)  migration/0--1  =>  sshd-1755
415  ------------------------------------------
416 
417  */
418 	trace_seq_puts(s, " ------------------------------------------\n");
419 	print_graph_cpu(s, cpu);
420 	print_graph_proc(s, prev_pid);
421 	trace_seq_puts(s, " => ");
422 	print_graph_proc(s, pid);
423 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
424 }
425 
426 static struct ftrace_graph_ret_entry *
427 get_return_for_leaf(struct trace_iterator *iter,
428 		struct ftrace_graph_ent_entry *curr)
429 {
430 	struct fgraph_data *data = iter->private;
431 	struct ring_buffer_iter *ring_iter = NULL;
432 	struct ring_buffer_event *event;
433 	struct ftrace_graph_ret_entry *next;
434 
435 	/*
436 	 * If the previous output failed to write to the seq buffer,
437 	 * then we just reuse the data from before.
438 	 */
439 	if (data && data->failed) {
440 		curr = &data->ent;
441 		next = &data->ret;
442 	} else {
443 
444 		ring_iter = trace_buffer_iter(iter, iter->cpu);
445 
446 		/* First peek to compare current entry and the next one */
447 		if (ring_iter)
448 			event = ring_buffer_iter_peek(ring_iter, NULL);
449 		else {
450 			/*
451 			 * We need to consume the current entry to see
452 			 * the next one.
453 			 */
454 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
455 					    NULL, NULL);
456 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
457 						 NULL, NULL);
458 		}
459 
460 		if (!event)
461 			return NULL;
462 
463 		next = ring_buffer_event_data(event);
464 
465 		if (data) {
466 			/*
467 			 * Save current and next entries for later reference
468 			 * if the output fails.
469 			 */
470 			data->ent = *curr;
471 			/*
472 			 * If the next event is not a return type, then
473 			 * we only care about what type it is. Otherwise we can
474 			 * safely copy the entire event.
475 			 */
476 			if (next->ent.type == TRACE_GRAPH_RET)
477 				data->ret = *next;
478 			else
479 				data->ret.ent.type = next->ent.type;
480 		}
481 	}
482 
483 	if (next->ent.type != TRACE_GRAPH_RET)
484 		return NULL;
485 
486 	if (curr->ent.pid != next->ent.pid ||
487 			curr->graph_ent.func != next->ret.func)
488 		return NULL;
489 
490 	/* this is a leaf, now advance the iterator */
491 	if (ring_iter)
492 		ring_buffer_iter_advance(ring_iter);
493 
494 	return next;
495 }
496 
497 static void print_graph_abs_time(u64 t, struct trace_seq *s)
498 {
499 	unsigned long usecs_rem;
500 
501 	usecs_rem = do_div(t, NSEC_PER_SEC);
502 	usecs_rem /= 1000;
503 
504 	trace_seq_printf(s, "%5lu.%06lu |  ",
505 			 (unsigned long)t, usecs_rem);
506 }
507 
508 static void
509 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
510 {
511 	unsigned long long usecs;
512 
513 	usecs = iter->ts - iter->array_buffer->time_start;
514 	do_div(usecs, NSEC_PER_USEC);
515 
516 	trace_seq_printf(s, "%9llu us |  ", usecs);
517 }
518 
519 static void
520 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
521 		enum trace_type type, int cpu, pid_t pid, u32 flags)
522 {
523 	struct trace_array *tr = iter->tr;
524 	struct trace_seq *s = &iter->seq;
525 	struct trace_entry *ent = iter->ent;
526 
527 	if (addr < (unsigned long)__irqentry_text_start ||
528 		addr >= (unsigned long)__irqentry_text_end)
529 		return;
530 
531 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
532 		/* Absolute time */
533 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
534 			print_graph_abs_time(iter->ts, s);
535 
536 		/* Relative time */
537 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
538 			print_graph_rel_time(iter, s);
539 
540 		/* Cpu */
541 		if (flags & TRACE_GRAPH_PRINT_CPU)
542 			print_graph_cpu(s, cpu);
543 
544 		/* Proc */
545 		if (flags & TRACE_GRAPH_PRINT_PROC) {
546 			print_graph_proc(s, pid);
547 			trace_seq_puts(s, " | ");
548 		}
549 
550 		/* Latency format */
551 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
552 			print_graph_lat_fmt(s, ent);
553 	}
554 
555 	/* No overhead */
556 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
557 
558 	if (type == TRACE_GRAPH_ENT)
559 		trace_seq_puts(s, "==========>");
560 	else
561 		trace_seq_puts(s, "<==========");
562 
563 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
564 	trace_seq_putc(s, '\n');
565 }
566 
567 void
568 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
569 {
570 	unsigned long nsecs_rem = do_div(duration, 1000);
571 	/* log10(ULONG_MAX) + '\0' */
572 	char usecs_str[21];
573 	char nsecs_str[5];
574 	int len;
575 	int i;
576 
577 	sprintf(usecs_str, "%lu", (unsigned long) duration);
578 
579 	/* Print msecs */
580 	trace_seq_printf(s, "%s", usecs_str);
581 
582 	len = strlen(usecs_str);
583 
584 	/* Print nsecs (we don't want to exceed 7 numbers) */
585 	if (len < 7) {
586 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
587 
588 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
589 		trace_seq_printf(s, ".%s", nsecs_str);
590 		len += strlen(nsecs_str) + 1;
591 	}
592 
593 	trace_seq_puts(s, " us ");
594 
595 	/* Print remaining spaces to fit the row's width */
596 	for (i = len; i < 8; i++)
597 		trace_seq_putc(s, ' ');
598 }
599 
600 static void
601 print_graph_duration(struct trace_array *tr, unsigned long long duration,
602 		     struct trace_seq *s, u32 flags)
603 {
604 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
605 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
606 		return;
607 
608 	/* No real adata, just filling the column with spaces */
609 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
610 	case FLAGS_FILL_FULL:
611 		trace_seq_puts(s, "              |  ");
612 		return;
613 	case FLAGS_FILL_START:
614 		trace_seq_puts(s, "  ");
615 		return;
616 	case FLAGS_FILL_END:
617 		trace_seq_puts(s, " |");
618 		return;
619 	}
620 
621 	/* Signal a overhead of time execution to the output */
622 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
623 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
624 	else
625 		trace_seq_puts(s, "  ");
626 
627 	trace_print_graph_duration(duration, s);
628 	trace_seq_puts(s, "|  ");
629 }
630 
631 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
632 
633 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
634 
635 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
636 				bool leaf, void *func, bool hex_format)
637 {
638 	unsigned long err_code = 0;
639 
640 	if (retval == 0 || hex_format)
641 		goto done;
642 
643 	/* Check if the return value matches the negative format */
644 	if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
645 		(((u64)retval) >> 32) == 0) {
646 		/* sign extension */
647 		err_code = (unsigned long)(s32)retval;
648 	} else {
649 		err_code = retval;
650 	}
651 
652 	if (!IS_ERR_VALUE(err_code))
653 		err_code = 0;
654 
655 done:
656 	if (leaf) {
657 		if (hex_format || (err_code == 0))
658 			trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
659 					func, retval);
660 		else
661 			trace_seq_printf(s, "%ps(); /* = %ld */\n",
662 					func, err_code);
663 	} else {
664 		if (hex_format || (err_code == 0))
665 			trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
666 					func, retval);
667 		else
668 			trace_seq_printf(s, "} /* %ps = %ld */\n",
669 					func, err_code);
670 	}
671 }
672 
673 #else
674 
675 #define __TRACE_GRAPH_PRINT_RETVAL 0
676 
677 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
678 
679 #endif
680 
681 /* Case of a leaf function on its call entry */
682 static enum print_line_t
683 print_graph_entry_leaf(struct trace_iterator *iter,
684 		struct ftrace_graph_ent_entry *entry,
685 		struct ftrace_graph_ret_entry *ret_entry,
686 		struct trace_seq *s, u32 flags)
687 {
688 	struct fgraph_data *data = iter->private;
689 	struct trace_array *tr = iter->tr;
690 	struct ftrace_graph_ret *graph_ret;
691 	struct ftrace_graph_ent *call;
692 	unsigned long long duration;
693 	int cpu = iter->cpu;
694 	int i;
695 
696 	graph_ret = &ret_entry->ret;
697 	call = &entry->graph_ent;
698 	duration = graph_ret->rettime - graph_ret->calltime;
699 
700 	if (data) {
701 		struct fgraph_cpu_data *cpu_data;
702 
703 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
704 
705 		/*
706 		 * Comments display at + 1 to depth. Since
707 		 * this is a leaf function, keep the comments
708 		 * equal to this depth.
709 		 */
710 		cpu_data->depth = call->depth - 1;
711 
712 		/* No need to keep this function around for this depth */
713 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
714 		    !WARN_ON_ONCE(call->depth < 0))
715 			cpu_data->enter_funcs[call->depth] = 0;
716 	}
717 
718 	/* Overhead and duration */
719 	print_graph_duration(tr, duration, s, flags);
720 
721 	/* Function */
722 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
723 		trace_seq_putc(s, ' ');
724 
725 	/*
726 	 * Write out the function return value if the option function-retval is
727 	 * enabled.
728 	 */
729 	if (flags & __TRACE_GRAPH_PRINT_RETVAL)
730 		print_graph_retval(s, graph_ret->retval, true, (void *)call->func,
731 				!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
732 	else
733 		trace_seq_printf(s, "%ps();\n", (void *)call->func);
734 
735 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
736 			cpu, iter->ent->pid, flags);
737 
738 	return trace_handle_return(s);
739 }
740 
741 static enum print_line_t
742 print_graph_entry_nested(struct trace_iterator *iter,
743 			 struct ftrace_graph_ent_entry *entry,
744 			 struct trace_seq *s, int cpu, u32 flags)
745 {
746 	struct ftrace_graph_ent *call = &entry->graph_ent;
747 	struct fgraph_data *data = iter->private;
748 	struct trace_array *tr = iter->tr;
749 	int i;
750 
751 	if (data) {
752 		struct fgraph_cpu_data *cpu_data;
753 		int cpu = iter->cpu;
754 
755 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
756 		cpu_data->depth = call->depth;
757 
758 		/* Save this function pointer to see if the exit matches */
759 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
760 		    !WARN_ON_ONCE(call->depth < 0))
761 			cpu_data->enter_funcs[call->depth] = call->func;
762 	}
763 
764 	/* No time */
765 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
766 
767 	/* Function */
768 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
769 		trace_seq_putc(s, ' ');
770 
771 	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
772 
773 	if (trace_seq_has_overflowed(s))
774 		return TRACE_TYPE_PARTIAL_LINE;
775 
776 	/*
777 	 * we already consumed the current entry to check the next one
778 	 * and see if this is a leaf.
779 	 */
780 	return TRACE_TYPE_NO_CONSUME;
781 }
782 
783 static void
784 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
785 		     int type, unsigned long addr, u32 flags)
786 {
787 	struct fgraph_data *data = iter->private;
788 	struct trace_entry *ent = iter->ent;
789 	struct trace_array *tr = iter->tr;
790 	int cpu = iter->cpu;
791 
792 	/* Pid */
793 	verif_pid(s, ent->pid, cpu, data);
794 
795 	if (type)
796 		/* Interrupt */
797 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
798 
799 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
800 		return;
801 
802 	/* Absolute time */
803 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
804 		print_graph_abs_time(iter->ts, s);
805 
806 	/* Relative time */
807 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
808 		print_graph_rel_time(iter, s);
809 
810 	/* Cpu */
811 	if (flags & TRACE_GRAPH_PRINT_CPU)
812 		print_graph_cpu(s, cpu);
813 
814 	/* Proc */
815 	if (flags & TRACE_GRAPH_PRINT_PROC) {
816 		print_graph_proc(s, ent->pid);
817 		trace_seq_puts(s, " | ");
818 	}
819 
820 	/* Latency format */
821 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
822 		print_graph_lat_fmt(s, ent);
823 
824 	return;
825 }
826 
827 /*
828  * Entry check for irq code
829  *
830  * returns 1 if
831  *  - we are inside irq code
832  *  - we just entered irq code
833  *
834  * returns 0 if
835  *  - funcgraph-interrupts option is set
836  *  - we are not inside irq code
837  */
838 static int
839 check_irq_entry(struct trace_iterator *iter, u32 flags,
840 		unsigned long addr, int depth)
841 {
842 	int cpu = iter->cpu;
843 	int *depth_irq;
844 	struct fgraph_data *data = iter->private;
845 
846 	/*
847 	 * If we are either displaying irqs, or we got called as
848 	 * a graph event and private data does not exist,
849 	 * then we bypass the irq check.
850 	 */
851 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
852 	    (!data))
853 		return 0;
854 
855 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
856 
857 	/*
858 	 * We are inside the irq code
859 	 */
860 	if (*depth_irq >= 0)
861 		return 1;
862 
863 	if ((addr < (unsigned long)__irqentry_text_start) ||
864 	    (addr >= (unsigned long)__irqentry_text_end))
865 		return 0;
866 
867 	/*
868 	 * We are entering irq code.
869 	 */
870 	*depth_irq = depth;
871 	return 1;
872 }
873 
874 /*
875  * Return check for irq code
876  *
877  * returns 1 if
878  *  - we are inside irq code
879  *  - we just left irq code
880  *
881  * returns 0 if
882  *  - funcgraph-interrupts option is set
883  *  - we are not inside irq code
884  */
885 static int
886 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
887 {
888 	int cpu = iter->cpu;
889 	int *depth_irq;
890 	struct fgraph_data *data = iter->private;
891 
892 	/*
893 	 * If we are either displaying irqs, or we got called as
894 	 * a graph event and private data does not exist,
895 	 * then we bypass the irq check.
896 	 */
897 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
898 	    (!data))
899 		return 0;
900 
901 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
902 
903 	/*
904 	 * We are not inside the irq code.
905 	 */
906 	if (*depth_irq == -1)
907 		return 0;
908 
909 	/*
910 	 * We are inside the irq code, and this is returning entry.
911 	 * Let's not trace it and clear the entry depth, since
912 	 * we are out of irq code.
913 	 *
914 	 * This condition ensures that we 'leave the irq code' once
915 	 * we are out of the entry depth. Thus protecting us from
916 	 * the RETURN entry loss.
917 	 */
918 	if (*depth_irq >= depth) {
919 		*depth_irq = -1;
920 		return 1;
921 	}
922 
923 	/*
924 	 * We are inside the irq code, and this is not the entry.
925 	 */
926 	return 1;
927 }
928 
929 static enum print_line_t
930 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
931 			struct trace_iterator *iter, u32 flags)
932 {
933 	struct fgraph_data *data = iter->private;
934 	struct ftrace_graph_ent *call = &field->graph_ent;
935 	struct ftrace_graph_ret_entry *leaf_ret;
936 	static enum print_line_t ret;
937 	int cpu = iter->cpu;
938 
939 	if (check_irq_entry(iter, flags, call->func, call->depth))
940 		return TRACE_TYPE_HANDLED;
941 
942 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
943 
944 	leaf_ret = get_return_for_leaf(iter, field);
945 	if (leaf_ret)
946 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
947 	else
948 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
949 
950 	if (data) {
951 		/*
952 		 * If we failed to write our output, then we need to make
953 		 * note of it. Because we already consumed our entry.
954 		 */
955 		if (s->full) {
956 			data->failed = 1;
957 			data->cpu = cpu;
958 		} else
959 			data->failed = 0;
960 	}
961 
962 	return ret;
963 }
964 
965 static enum print_line_t
966 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
967 		   struct trace_entry *ent, struct trace_iterator *iter,
968 		   u32 flags)
969 {
970 	unsigned long long duration = trace->rettime - trace->calltime;
971 	struct fgraph_data *data = iter->private;
972 	struct trace_array *tr = iter->tr;
973 	pid_t pid = ent->pid;
974 	int cpu = iter->cpu;
975 	int func_match = 1;
976 	int i;
977 
978 	if (check_irq_return(iter, flags, trace->depth))
979 		return TRACE_TYPE_HANDLED;
980 
981 	if (data) {
982 		struct fgraph_cpu_data *cpu_data;
983 		int cpu = iter->cpu;
984 
985 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
986 
987 		/*
988 		 * Comments display at + 1 to depth. This is the
989 		 * return from a function, we now want the comments
990 		 * to display at the same level of the bracket.
991 		 */
992 		cpu_data->depth = trace->depth - 1;
993 
994 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
995 		    !WARN_ON_ONCE(trace->depth < 0)) {
996 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
997 				func_match = 0;
998 			cpu_data->enter_funcs[trace->depth] = 0;
999 		}
1000 	}
1001 
1002 	print_graph_prologue(iter, s, 0, 0, flags);
1003 
1004 	/* Overhead and duration */
1005 	print_graph_duration(tr, duration, s, flags);
1006 
1007 	/* Closing brace */
1008 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1009 		trace_seq_putc(s, ' ');
1010 
1011 	/*
1012 	 * Always write out the function name and its return value if the
1013 	 * function-retval option is enabled.
1014 	 */
1015 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1016 		print_graph_retval(s, trace->retval, false, (void *)trace->func,
1017 			!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1018 	} else {
1019 		/*
1020 		 * If the return function does not have a matching entry,
1021 		 * then the entry was lost. Instead of just printing
1022 		 * the '}' and letting the user guess what function this
1023 		 * belongs to, write out the function name. Always do
1024 		 * that if the funcgraph-tail option is enabled.
1025 		 */
1026 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1027 			trace_seq_puts(s, "}\n");
1028 		else
1029 			trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1030 	}
1031 
1032 	/* Overrun */
1033 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1034 		trace_seq_printf(s, " (Overruns: %u)\n",
1035 				 trace->overrun);
1036 
1037 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1038 			cpu, pid, flags);
1039 
1040 	return trace_handle_return(s);
1041 }
1042 
1043 static enum print_line_t
1044 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1045 		    struct trace_iterator *iter, u32 flags)
1046 {
1047 	struct trace_array *tr = iter->tr;
1048 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1049 	struct fgraph_data *data = iter->private;
1050 	struct trace_event *event;
1051 	int depth = 0;
1052 	int ret;
1053 	int i;
1054 
1055 	if (data)
1056 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1057 
1058 	print_graph_prologue(iter, s, 0, 0, flags);
1059 
1060 	/* No time */
1061 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1062 
1063 	/* Indentation */
1064 	if (depth > 0)
1065 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1066 			trace_seq_putc(s, ' ');
1067 
1068 	/* The comment */
1069 	trace_seq_puts(s, "/* ");
1070 
1071 	switch (iter->ent->type) {
1072 	case TRACE_BPUTS:
1073 		ret = trace_print_bputs_msg_only(iter);
1074 		if (ret != TRACE_TYPE_HANDLED)
1075 			return ret;
1076 		break;
1077 	case TRACE_BPRINT:
1078 		ret = trace_print_bprintk_msg_only(iter);
1079 		if (ret != TRACE_TYPE_HANDLED)
1080 			return ret;
1081 		break;
1082 	case TRACE_PRINT:
1083 		ret = trace_print_printk_msg_only(iter);
1084 		if (ret != TRACE_TYPE_HANDLED)
1085 			return ret;
1086 		break;
1087 	default:
1088 		event = ftrace_find_event(ent->type);
1089 		if (!event)
1090 			return TRACE_TYPE_UNHANDLED;
1091 
1092 		ret = event->funcs->trace(iter, sym_flags, event);
1093 		if (ret != TRACE_TYPE_HANDLED)
1094 			return ret;
1095 	}
1096 
1097 	if (trace_seq_has_overflowed(s))
1098 		goto out;
1099 
1100 	/* Strip ending newline */
1101 	if (s->buffer[s->seq.len - 1] == '\n') {
1102 		s->buffer[s->seq.len - 1] = '\0';
1103 		s->seq.len--;
1104 	}
1105 
1106 	trace_seq_puts(s, " */\n");
1107  out:
1108 	return trace_handle_return(s);
1109 }
1110 
1111 
1112 enum print_line_t
1113 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1114 {
1115 	struct ftrace_graph_ent_entry *field;
1116 	struct fgraph_data *data = iter->private;
1117 	struct trace_entry *entry = iter->ent;
1118 	struct trace_seq *s = &iter->seq;
1119 	int cpu = iter->cpu;
1120 	int ret;
1121 
1122 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1123 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1124 		return TRACE_TYPE_HANDLED;
1125 	}
1126 
1127 	/*
1128 	 * If the last output failed, there's a possibility we need
1129 	 * to print out the missing entry which would never go out.
1130 	 */
1131 	if (data && data->failed) {
1132 		field = &data->ent;
1133 		iter->cpu = data->cpu;
1134 		ret = print_graph_entry(field, s, iter, flags);
1135 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1136 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1137 			ret = TRACE_TYPE_NO_CONSUME;
1138 		}
1139 		iter->cpu = cpu;
1140 		return ret;
1141 	}
1142 
1143 	switch (entry->type) {
1144 	case TRACE_GRAPH_ENT: {
1145 		/*
1146 		 * print_graph_entry() may consume the current event,
1147 		 * thus @field may become invalid, so we need to save it.
1148 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1149 		 * it can be safely saved at the stack.
1150 		 */
1151 		struct ftrace_graph_ent_entry saved;
1152 		trace_assign_type(field, entry);
1153 		saved = *field;
1154 		return print_graph_entry(&saved, s, iter, flags);
1155 	}
1156 	case TRACE_GRAPH_RET: {
1157 		struct ftrace_graph_ret_entry *field;
1158 		trace_assign_type(field, entry);
1159 		return print_graph_return(&field->ret, s, entry, iter, flags);
1160 	}
1161 	case TRACE_STACK:
1162 	case TRACE_FN:
1163 		/* dont trace stack and functions as comments */
1164 		return TRACE_TYPE_UNHANDLED;
1165 
1166 	default:
1167 		return print_graph_comment(s, entry, iter, flags);
1168 	}
1169 
1170 	return TRACE_TYPE_HANDLED;
1171 }
1172 
1173 static enum print_line_t
1174 print_graph_function(struct trace_iterator *iter)
1175 {
1176 	return print_graph_function_flags(iter, tracer_flags.val);
1177 }
1178 
1179 static enum print_line_t
1180 print_graph_function_event(struct trace_iterator *iter, int flags,
1181 			   struct trace_event *event)
1182 {
1183 	return print_graph_function(iter);
1184 }
1185 
1186 static void print_lat_header(struct seq_file *s, u32 flags)
1187 {
1188 	static const char spaces[] = "                "	/* 16 spaces */
1189 		"    "					/* 4 spaces */
1190 		"                 ";			/* 17 spaces */
1191 	int size = 0;
1192 
1193 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1194 		size += 16;
1195 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1196 		size += 16;
1197 	if (flags & TRACE_GRAPH_PRINT_CPU)
1198 		size += 4;
1199 	if (flags & TRACE_GRAPH_PRINT_PROC)
1200 		size += 17;
1201 
1202 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1203 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1204 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1205 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1206 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1207 }
1208 
1209 static void __print_graph_headers_flags(struct trace_array *tr,
1210 					struct seq_file *s, u32 flags)
1211 {
1212 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1213 
1214 	if (lat)
1215 		print_lat_header(s, flags);
1216 
1217 	/* 1st line */
1218 	seq_putc(s, '#');
1219 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1220 		seq_puts(s, "     TIME       ");
1221 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1222 		seq_puts(s, "   REL TIME     ");
1223 	if (flags & TRACE_GRAPH_PRINT_CPU)
1224 		seq_puts(s, " CPU");
1225 	if (flags & TRACE_GRAPH_PRINT_PROC)
1226 		seq_puts(s, "  TASK/PID       ");
1227 	if (lat)
1228 		seq_puts(s, "||||   ");
1229 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1230 		seq_puts(s, "  DURATION   ");
1231 	seq_puts(s, "               FUNCTION CALLS\n");
1232 
1233 	/* 2nd line */
1234 	seq_putc(s, '#');
1235 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1236 		seq_puts(s, "      |         ");
1237 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1238 		seq_puts(s, "      |         ");
1239 	if (flags & TRACE_GRAPH_PRINT_CPU)
1240 		seq_puts(s, " |  ");
1241 	if (flags & TRACE_GRAPH_PRINT_PROC)
1242 		seq_puts(s, "   |    |        ");
1243 	if (lat)
1244 		seq_puts(s, "||||   ");
1245 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1246 		seq_puts(s, "   |   |      ");
1247 	seq_puts(s, "               |   |   |   |\n");
1248 }
1249 
1250 static void print_graph_headers(struct seq_file *s)
1251 {
1252 	print_graph_headers_flags(s, tracer_flags.val);
1253 }
1254 
1255 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1256 {
1257 	struct trace_iterator *iter = s->private;
1258 	struct trace_array *tr = iter->tr;
1259 
1260 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1261 		return;
1262 
1263 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1264 		/* print nothing if the buffers are empty */
1265 		if (trace_empty(iter))
1266 			return;
1267 
1268 		print_trace_header(s, iter);
1269 	}
1270 
1271 	__print_graph_headers_flags(tr, s, flags);
1272 }
1273 
1274 void graph_trace_open(struct trace_iterator *iter)
1275 {
1276 	/* pid and depth on the last trace processed */
1277 	struct fgraph_data *data;
1278 	gfp_t gfpflags;
1279 	int cpu;
1280 
1281 	iter->private = NULL;
1282 
1283 	/* We can be called in atomic context via ftrace_dump() */
1284 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1285 
1286 	data = kzalloc(sizeof(*data), gfpflags);
1287 	if (!data)
1288 		goto out_err;
1289 
1290 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1291 	if (!data->cpu_data)
1292 		goto out_err_free;
1293 
1294 	for_each_possible_cpu(cpu) {
1295 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1296 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1297 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1298 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1299 
1300 		*pid = -1;
1301 		*depth = 0;
1302 		*ignore = 0;
1303 		*depth_irq = -1;
1304 	}
1305 
1306 	iter->private = data;
1307 
1308 	return;
1309 
1310  out_err_free:
1311 	kfree(data);
1312  out_err:
1313 	pr_warn("function graph tracer: not enough memory\n");
1314 }
1315 
1316 void graph_trace_close(struct trace_iterator *iter)
1317 {
1318 	struct fgraph_data *data = iter->private;
1319 
1320 	if (data) {
1321 		free_percpu(data->cpu_data);
1322 		kfree(data);
1323 	}
1324 }
1325 
1326 static int
1327 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1328 {
1329 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1330 		ftrace_graph_skip_irqs = !set;
1331 
1332 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1333 		ftrace_graph_sleep_time_control(set);
1334 
1335 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1336 		ftrace_graph_graph_time_control(set);
1337 
1338 	return 0;
1339 }
1340 
1341 static struct trace_event_functions graph_functions = {
1342 	.trace		= print_graph_function_event,
1343 };
1344 
1345 static struct trace_event graph_trace_entry_event = {
1346 	.type		= TRACE_GRAPH_ENT,
1347 	.funcs		= &graph_functions,
1348 };
1349 
1350 static struct trace_event graph_trace_ret_event = {
1351 	.type		= TRACE_GRAPH_RET,
1352 	.funcs		= &graph_functions
1353 };
1354 
1355 static struct tracer graph_trace __tracer_data = {
1356 	.name		= "function_graph",
1357 	.update_thresh	= graph_trace_update_thresh,
1358 	.open		= graph_trace_open,
1359 	.pipe_open	= graph_trace_open,
1360 	.close		= graph_trace_close,
1361 	.pipe_close	= graph_trace_close,
1362 	.init		= graph_trace_init,
1363 	.reset		= graph_trace_reset,
1364 	.print_line	= print_graph_function,
1365 	.print_header	= print_graph_headers,
1366 	.flags		= &tracer_flags,
1367 	.set_flag	= func_graph_set_flag,
1368 #ifdef CONFIG_FTRACE_SELFTEST
1369 	.selftest	= trace_selftest_startup_function_graph,
1370 #endif
1371 };
1372 
1373 
1374 static ssize_t
1375 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1376 		  loff_t *ppos)
1377 {
1378 	unsigned long val;
1379 	int ret;
1380 
1381 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1382 	if (ret)
1383 		return ret;
1384 
1385 	fgraph_max_depth = val;
1386 
1387 	*ppos += cnt;
1388 
1389 	return cnt;
1390 }
1391 
1392 static ssize_t
1393 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1394 		 loff_t *ppos)
1395 {
1396 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1397 	int n;
1398 
1399 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1400 
1401 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1402 }
1403 
1404 static const struct file_operations graph_depth_fops = {
1405 	.open		= tracing_open_generic,
1406 	.write		= graph_depth_write,
1407 	.read		= graph_depth_read,
1408 	.llseek		= generic_file_llseek,
1409 };
1410 
1411 static __init int init_graph_tracefs(void)
1412 {
1413 	int ret;
1414 
1415 	ret = tracing_init_dentry();
1416 	if (ret)
1417 		return 0;
1418 
1419 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1420 			  NULL, &graph_depth_fops);
1421 
1422 	return 0;
1423 }
1424 fs_initcall(init_graph_tracefs);
1425 
1426 static __init int init_graph_trace(void)
1427 {
1428 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1429 
1430 	if (!register_trace_event(&graph_trace_entry_event)) {
1431 		pr_warn("Warning: could not register graph trace events\n");
1432 		return 1;
1433 	}
1434 
1435 	if (!register_trace_event(&graph_trace_ret_event)) {
1436 		pr_warn("Warning: could not register graph trace events\n");
1437 		return 1;
1438 	}
1439 
1440 	return register_tracer(&graph_trace);
1441 }
1442 
1443 core_initcall(init_graph_trace);
1444