1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <[email protected]>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	union {
35 		struct ftrace_graph_ent_entry	ent;
36 		struct fgraph_retaddr_ent_entry	rent;
37 	} ent;
38 	struct ftrace_graph_ret_entry	ret;
39 	int				failed;
40 	int				cpu;
41 };
42 
43 #define TRACE_GRAPH_INDENT	2
44 
45 unsigned int fgraph_max_depth;
46 
47 static struct tracer_opt trace_opts[] = {
48 	/* Display overruns? (for self-debug purpose) */
49 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
50 	/* Display CPU ? */
51 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
52 	/* Display Overhead ? */
53 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
54 	/* Display proc name/pid */
55 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
56 	/* Display duration of execution */
57 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
58 	/* Display absolute time of an entry */
59 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
60 	/* Display interrupts */
61 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
62 	/* Display function name after trailing } */
63 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
64 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
65 	/* Display function return value ? */
66 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
67 	/* Display function return value in hexadecimal format ? */
68 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
69 #endif
70 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
71 	/* Display function return address ? */
72 	{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
73 #endif
74 #ifdef CONFIG_FUNCTION_TRACE_ARGS
75 	/* Display function arguments ? */
76 	{ TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) },
77 #endif
78 	/* Include sleep time (scheduled out) between entry and return */
79 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
80 
81 #ifdef CONFIG_FUNCTION_PROFILER
82 	/* Include time within nested functions */
83 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
84 #endif
85 
86 	{ } /* Empty entry */
87 };
88 
89 static struct tracer_flags tracer_flags = {
90 	/* Don't display overruns, proc, or tail by default */
91 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
92 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
93 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
94 	.opts = trace_opts
95 };
96 
97 static bool tracer_flags_is_set(u32 flags)
98 {
99 	return (tracer_flags.val & flags) == flags;
100 }
101 
102 /*
103  * DURATION column is being also used to display IRQ signs,
104  * following values are used by print_graph_irq and others
105  * to fill in space into DURATION column.
106  */
107 enum {
108 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
109 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 };
112 
113 static void
114 print_graph_duration(struct trace_array *tr, unsigned long long duration,
115 		     struct trace_seq *s, u32 flags);
116 
117 static int __graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
118 			 unsigned int trace_ctx, struct ftrace_regs *fregs)
119 {
120 	struct ring_buffer_event *event;
121 	struct trace_buffer *buffer = tr->array_buffer.buffer;
122 	struct ftrace_graph_ent_entry *entry;
123 	int size;
124 
125 	/* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
126 	size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
127 
128 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, size, trace_ctx);
129 	if (!event)
130 		return 0;
131 
132 	entry = ring_buffer_event_data(event);
133 	entry->graph_ent = *trace;
134 
135 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
136 	if (fregs) {
137 		for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
138 			entry->args[i] = ftrace_regs_get_argument(fregs, i);
139 	}
140 #endif
141 
142 	trace_buffer_unlock_commit_nostack(buffer, event);
143 
144 	return 1;
145 }
146 
147 int __trace_graph_entry(struct trace_array *tr,
148 				struct ftrace_graph_ent *trace,
149 				unsigned int trace_ctx)
150 {
151 	return __graph_entry(tr, trace, trace_ctx, NULL);
152 }
153 
154 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
155 int __trace_graph_retaddr_entry(struct trace_array *tr,
156 				struct ftrace_graph_ent *trace,
157 				unsigned int trace_ctx,
158 				unsigned long retaddr)
159 {
160 	struct ring_buffer_event *event;
161 	struct trace_buffer *buffer = tr->array_buffer.buffer;
162 	struct fgraph_retaddr_ent_entry *entry;
163 
164 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT,
165 					  sizeof(*entry), trace_ctx);
166 	if (!event)
167 		return 0;
168 	entry	= ring_buffer_event_data(event);
169 	entry->graph_ent.func = trace->func;
170 	entry->graph_ent.depth = trace->depth;
171 	entry->graph_ent.retaddr = retaddr;
172 	trace_buffer_unlock_commit_nostack(buffer, event);
173 
174 	return 1;
175 }
176 #else
177 int __trace_graph_retaddr_entry(struct trace_array *tr,
178 				struct ftrace_graph_ent *trace,
179 				unsigned int trace_ctx,
180 				unsigned long retaddr)
181 {
182 	return 1;
183 }
184 #endif
185 
186 static inline int ftrace_graph_ignore_irqs(void)
187 {
188 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
189 		return 0;
190 
191 	return in_hardirq();
192 }
193 
194 struct fgraph_times {
195 	unsigned long long		calltime;
196 	unsigned long long		sleeptime; /* may be optional! */
197 };
198 
199 static int graph_entry(struct ftrace_graph_ent *trace,
200 		       struct fgraph_ops *gops,
201 		       struct ftrace_regs *fregs)
202 {
203 	unsigned long *task_var = fgraph_get_task_var(gops);
204 	struct trace_array *tr = gops->private;
205 	struct trace_array_cpu *data;
206 	struct fgraph_times *ftimes;
207 	unsigned int trace_ctx;
208 	long disabled;
209 	int ret = 0;
210 	int cpu;
211 
212 	if (*task_var & TRACE_GRAPH_NOTRACE)
213 		return 0;
214 
215 	/*
216 	 * Do not trace a function if it's filtered by set_graph_notrace.
217 	 * Make the index of ret stack negative to indicate that it should
218 	 * ignore further functions.  But it needs its own ret stack entry
219 	 * to recover the original index in order to continue tracing after
220 	 * returning from the function.
221 	 */
222 	if (ftrace_graph_notrace_addr(trace->func)) {
223 		*task_var |= TRACE_GRAPH_NOTRACE;
224 		/*
225 		 * Need to return 1 to have the return called
226 		 * that will clear the NOTRACE bit.
227 		 */
228 		return 1;
229 	}
230 
231 	if (!ftrace_trace_task(tr))
232 		return 0;
233 
234 	if (ftrace_graph_ignore_func(gops, trace))
235 		return 0;
236 
237 	if (ftrace_graph_ignore_irqs())
238 		return 0;
239 
240 	if (fgraph_sleep_time) {
241 		/* Only need to record the calltime */
242 		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
243 	} else {
244 		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
245 		if (ftimes)
246 			ftimes->sleeptime = current->ftrace_sleeptime;
247 	}
248 	if (!ftimes)
249 		return 0;
250 
251 	ftimes->calltime = trace_clock_local();
252 
253 	/*
254 	 * Stop here if tracing_threshold is set. We only write function return
255 	 * events to the ring buffer.
256 	 */
257 	if (tracing_thresh)
258 		return 1;
259 
260 	preempt_disable_notrace();
261 	cpu = raw_smp_processor_id();
262 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
263 	disabled = atomic_read(&data->disabled);
264 	if (likely(!disabled)) {
265 		trace_ctx = tracing_gen_ctx();
266 		if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
267 		    tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
268 			unsigned long retaddr = ftrace_graph_top_ret_addr(current);
269 			ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
270 		} else {
271 			ret = __graph_entry(tr, trace, trace_ctx, fregs);
272 		}
273 	}
274 	preempt_enable_notrace();
275 
276 	return ret;
277 }
278 
279 int trace_graph_entry(struct ftrace_graph_ent *trace,
280 		      struct fgraph_ops *gops,
281 		      struct ftrace_regs *fregs)
282 {
283 	return graph_entry(trace, gops, NULL);
284 }
285 
286 static int trace_graph_entry_args(struct ftrace_graph_ent *trace,
287 				  struct fgraph_ops *gops,
288 				  struct ftrace_regs *fregs)
289 {
290 	return graph_entry(trace, gops, fregs);
291 }
292 
293 static void
294 __trace_graph_function(struct trace_array *tr,
295 		unsigned long ip, unsigned int trace_ctx)
296 {
297 	u64 time = trace_clock_local();
298 	struct ftrace_graph_ent ent = {
299 		.func  = ip,
300 		.depth = 0,
301 	};
302 	struct ftrace_graph_ret ret = {
303 		.func     = ip,
304 		.depth    = 0,
305 	};
306 
307 	__trace_graph_entry(tr, &ent, trace_ctx);
308 	__trace_graph_return(tr, &ret, trace_ctx, time, time);
309 }
310 
311 void
312 trace_graph_function(struct trace_array *tr,
313 		unsigned long ip, unsigned long parent_ip,
314 		unsigned int trace_ctx)
315 {
316 	__trace_graph_function(tr, ip, trace_ctx);
317 }
318 
319 void __trace_graph_return(struct trace_array *tr,
320 			  struct ftrace_graph_ret *trace,
321 			  unsigned int trace_ctx,
322 			  u64 calltime, u64 rettime)
323 {
324 	struct ring_buffer_event *event;
325 	struct trace_buffer *buffer = tr->array_buffer.buffer;
326 	struct ftrace_graph_ret_entry *entry;
327 
328 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
329 					  sizeof(*entry), trace_ctx);
330 	if (!event)
331 		return;
332 	entry	= ring_buffer_event_data(event);
333 	entry->ret				= *trace;
334 	entry->calltime				= calltime;
335 	entry->rettime				= rettime;
336 	trace_buffer_unlock_commit_nostack(buffer, event);
337 }
338 
339 static void handle_nosleeptime(struct ftrace_graph_ret *trace,
340 			       struct fgraph_times *ftimes,
341 			       int size)
342 {
343 	if (fgraph_sleep_time || size < sizeof(*ftimes))
344 		return;
345 
346 	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
347 }
348 
349 void trace_graph_return(struct ftrace_graph_ret *trace,
350 			struct fgraph_ops *gops, struct ftrace_regs *fregs)
351 {
352 	unsigned long *task_var = fgraph_get_task_var(gops);
353 	struct trace_array *tr = gops->private;
354 	struct trace_array_cpu *data;
355 	struct fgraph_times *ftimes;
356 	unsigned int trace_ctx;
357 	u64 calltime, rettime;
358 	long disabled;
359 	int size;
360 	int cpu;
361 
362 	rettime = trace_clock_local();
363 
364 	ftrace_graph_addr_finish(gops, trace);
365 
366 	if (*task_var & TRACE_GRAPH_NOTRACE) {
367 		*task_var &= ~TRACE_GRAPH_NOTRACE;
368 		return;
369 	}
370 
371 	ftimes = fgraph_retrieve_data(gops->idx, &size);
372 	if (!ftimes)
373 		return;
374 
375 	handle_nosleeptime(trace, ftimes, size);
376 
377 	calltime = ftimes->calltime;
378 
379 	preempt_disable_notrace();
380 	cpu = raw_smp_processor_id();
381 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
382 	disabled = atomic_read(&data->disabled);
383 	if (likely(!disabled)) {
384 		trace_ctx = tracing_gen_ctx();
385 		__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
386 	}
387 	preempt_enable_notrace();
388 }
389 
390 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
391 				      struct fgraph_ops *gops,
392 				      struct ftrace_regs *fregs)
393 {
394 	struct fgraph_times *ftimes;
395 	int size;
396 
397 	ftrace_graph_addr_finish(gops, trace);
398 
399 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
400 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
401 		return;
402 	}
403 
404 	ftimes = fgraph_retrieve_data(gops->idx, &size);
405 	if (!ftimes)
406 		return;
407 
408 	handle_nosleeptime(trace, ftimes, size);
409 
410 	if (tracing_thresh &&
411 	    (trace_clock_local() - ftimes->calltime < tracing_thresh))
412 		return;
413 	else
414 		trace_graph_return(trace, gops, fregs);
415 }
416 
417 static struct fgraph_ops funcgraph_ops = {
418 	.entryfunc = &trace_graph_entry,
419 	.retfunc = &trace_graph_return,
420 };
421 
422 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
423 {
424 	struct fgraph_ops *gops;
425 
426 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
427 	if (!gops)
428 		return -ENOMEM;
429 
430 	gops->entryfunc = &trace_graph_entry;
431 	gops->retfunc = &trace_graph_return;
432 
433 	tr->gops = gops;
434 	gops->private = tr;
435 
436 	fgraph_init_ops(&gops->ops, ops);
437 
438 	return 0;
439 }
440 
441 void free_fgraph_ops(struct trace_array *tr)
442 {
443 	kfree(tr->gops);
444 }
445 
446 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
447 {
448 	tr->gops = &funcgraph_ops;
449 	funcgraph_ops.private = tr;
450 	fgraph_init_ops(&tr->gops->ops, ops);
451 }
452 
453 static int graph_trace_init(struct trace_array *tr)
454 {
455 	int ret;
456 
457 	if (tracer_flags_is_set(TRACE_GRAPH_ARGS))
458 		tr->gops->entryfunc = trace_graph_entry_args;
459 	else
460 		tr->gops->entryfunc = trace_graph_entry;
461 
462 	if (tracing_thresh)
463 		tr->gops->retfunc = trace_graph_thresh_return;
464 	else
465 		tr->gops->retfunc = trace_graph_return;
466 
467 	/* Make gops functions are visible before we start tracing */
468 	smp_mb();
469 
470 	ret = register_ftrace_graph(tr->gops);
471 	if (ret)
472 		return ret;
473 	tracing_start_cmdline_record();
474 
475 	return 0;
476 }
477 
478 static void graph_trace_reset(struct trace_array *tr)
479 {
480 	tracing_stop_cmdline_record();
481 	unregister_ftrace_graph(tr->gops);
482 }
483 
484 static int graph_trace_update_thresh(struct trace_array *tr)
485 {
486 	graph_trace_reset(tr);
487 	return graph_trace_init(tr);
488 }
489 
490 static int max_bytes_for_cpu;
491 
492 static void print_graph_cpu(struct trace_seq *s, int cpu)
493 {
494 	/*
495 	 * Start with a space character - to make it stand out
496 	 * to the right a bit when trace output is pasted into
497 	 * email:
498 	 */
499 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
500 }
501 
502 #define TRACE_GRAPH_PROCINFO_LENGTH	14
503 
504 static void print_graph_proc(struct trace_seq *s, pid_t pid)
505 {
506 	char comm[TASK_COMM_LEN];
507 	/* sign + log10(MAX_INT) + '\0' */
508 	char pid_str[11];
509 	int spaces = 0;
510 	int len;
511 	int i;
512 
513 	trace_find_cmdline(pid, comm);
514 	comm[7] = '\0';
515 	sprintf(pid_str, "%d", pid);
516 
517 	/* 1 stands for the "-" character */
518 	len = strlen(comm) + strlen(pid_str) + 1;
519 
520 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
521 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
522 
523 	/* First spaces to align center */
524 	for (i = 0; i < spaces / 2; i++)
525 		trace_seq_putc(s, ' ');
526 
527 	trace_seq_printf(s, "%s-%s", comm, pid_str);
528 
529 	/* Last spaces to align center */
530 	for (i = 0; i < spaces - (spaces / 2); i++)
531 		trace_seq_putc(s, ' ');
532 }
533 
534 
535 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
536 {
537 	trace_seq_putc(s, ' ');
538 	trace_print_lat_fmt(s, entry);
539 	trace_seq_puts(s, " | ");
540 }
541 
542 /* If the pid changed since the last trace, output this event */
543 static void
544 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
545 {
546 	pid_t prev_pid;
547 	pid_t *last_pid;
548 
549 	if (!data)
550 		return;
551 
552 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
553 
554 	if (*last_pid == pid)
555 		return;
556 
557 	prev_pid = *last_pid;
558 	*last_pid = pid;
559 
560 	if (prev_pid == -1)
561 		return;
562 /*
563  * Context-switch trace line:
564 
565  ------------------------------------------
566  | 1)  migration/0--1  =>  sshd-1755
567  ------------------------------------------
568 
569  */
570 	trace_seq_puts(s, " ------------------------------------------\n");
571 	print_graph_cpu(s, cpu);
572 	print_graph_proc(s, prev_pid);
573 	trace_seq_puts(s, " => ");
574 	print_graph_proc(s, pid);
575 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
576 }
577 
578 static struct ftrace_graph_ret_entry *
579 get_return_for_leaf(struct trace_iterator *iter,
580 		struct ftrace_graph_ent_entry *curr)
581 {
582 	struct fgraph_data *data = iter->private;
583 	struct ring_buffer_iter *ring_iter = NULL;
584 	struct ring_buffer_event *event;
585 	struct ftrace_graph_ret_entry *next;
586 
587 	/*
588 	 * If the previous output failed to write to the seq buffer,
589 	 * then we just reuse the data from before.
590 	 */
591 	if (data && data->failed) {
592 		curr = &data->ent.ent;
593 		next = &data->ret;
594 	} else {
595 
596 		ring_iter = trace_buffer_iter(iter, iter->cpu);
597 
598 		/* First peek to compare current entry and the next one */
599 		if (ring_iter)
600 			event = ring_buffer_iter_peek(ring_iter, NULL);
601 		else {
602 			/*
603 			 * We need to consume the current entry to see
604 			 * the next one.
605 			 */
606 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
607 					    NULL, NULL);
608 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
609 						 NULL, NULL);
610 		}
611 
612 		if (!event)
613 			return NULL;
614 
615 		next = ring_buffer_event_data(event);
616 
617 		if (data) {
618 			/*
619 			 * Save current and next entries for later reference
620 			 * if the output fails.
621 			 */
622 			if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT))
623 				data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr;
624 			else
625 				data->ent.ent = *curr;
626 			/*
627 			 * If the next event is not a return type, then
628 			 * we only care about what type it is. Otherwise we can
629 			 * safely copy the entire event.
630 			 */
631 			if (next->ent.type == TRACE_GRAPH_RET)
632 				data->ret = *next;
633 			else
634 				data->ret.ent.type = next->ent.type;
635 		}
636 	}
637 
638 	if (next->ent.type != TRACE_GRAPH_RET)
639 		return NULL;
640 
641 	if (curr->ent.pid != next->ent.pid ||
642 			curr->graph_ent.func != next->ret.func)
643 		return NULL;
644 
645 	/* this is a leaf, now advance the iterator */
646 	if (ring_iter)
647 		ring_buffer_iter_advance(ring_iter);
648 
649 	return next;
650 }
651 
652 static void print_graph_abs_time(u64 t, struct trace_seq *s)
653 {
654 	unsigned long usecs_rem;
655 
656 	usecs_rem = do_div(t, NSEC_PER_SEC);
657 	usecs_rem /= 1000;
658 
659 	trace_seq_printf(s, "%5lu.%06lu |  ",
660 			 (unsigned long)t, usecs_rem);
661 }
662 
663 static void
664 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
665 {
666 	unsigned long long usecs;
667 
668 	usecs = iter->ts - iter->array_buffer->time_start;
669 	do_div(usecs, NSEC_PER_USEC);
670 
671 	trace_seq_printf(s, "%9llu us |  ", usecs);
672 }
673 
674 static void
675 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
676 		enum trace_type type, int cpu, pid_t pid, u32 flags)
677 {
678 	struct trace_array *tr = iter->tr;
679 	struct trace_seq *s = &iter->seq;
680 	struct trace_entry *ent = iter->ent;
681 
682 	addr += iter->tr->text_delta;
683 
684 	if (addr < (unsigned long)__irqentry_text_start ||
685 		addr >= (unsigned long)__irqentry_text_end)
686 		return;
687 
688 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
689 		/* Absolute time */
690 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
691 			print_graph_abs_time(iter->ts, s);
692 
693 		/* Relative time */
694 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
695 			print_graph_rel_time(iter, s);
696 
697 		/* Cpu */
698 		if (flags & TRACE_GRAPH_PRINT_CPU)
699 			print_graph_cpu(s, cpu);
700 
701 		/* Proc */
702 		if (flags & TRACE_GRAPH_PRINT_PROC) {
703 			print_graph_proc(s, pid);
704 			trace_seq_puts(s, " | ");
705 		}
706 
707 		/* Latency format */
708 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
709 			print_graph_lat_fmt(s, ent);
710 	}
711 
712 	/* No overhead */
713 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
714 
715 	if (type == TRACE_GRAPH_ENT)
716 		trace_seq_puts(s, "==========>");
717 	else
718 		trace_seq_puts(s, "<==========");
719 
720 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
721 	trace_seq_putc(s, '\n');
722 }
723 
724 void
725 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
726 {
727 	unsigned long nsecs_rem = do_div(duration, 1000);
728 	/* log10(ULONG_MAX) + '\0' */
729 	char usecs_str[21];
730 	char nsecs_str[5];
731 	int len;
732 	int i;
733 
734 	sprintf(usecs_str, "%lu", (unsigned long) duration);
735 
736 	/* Print msecs */
737 	trace_seq_printf(s, "%s", usecs_str);
738 
739 	len = strlen(usecs_str);
740 
741 	/* Print nsecs (we don't want to exceed 7 numbers) */
742 	if (len < 7) {
743 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
744 
745 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
746 		trace_seq_printf(s, ".%s", nsecs_str);
747 		len += strlen(nsecs_str) + 1;
748 	}
749 
750 	trace_seq_puts(s, " us ");
751 
752 	/* Print remaining spaces to fit the row's width */
753 	for (i = len; i < 8; i++)
754 		trace_seq_putc(s, ' ');
755 }
756 
757 static void
758 print_graph_duration(struct trace_array *tr, unsigned long long duration,
759 		     struct trace_seq *s, u32 flags)
760 {
761 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
762 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
763 		return;
764 
765 	/* No real adata, just filling the column with spaces */
766 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
767 	case FLAGS_FILL_FULL:
768 		trace_seq_puts(s, "              |  ");
769 		return;
770 	case FLAGS_FILL_START:
771 		trace_seq_puts(s, "  ");
772 		return;
773 	case FLAGS_FILL_END:
774 		trace_seq_puts(s, " |");
775 		return;
776 	}
777 
778 	/* Signal a overhead of time execution to the output */
779 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
780 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
781 	else
782 		trace_seq_puts(s, "  ");
783 
784 	trace_print_graph_duration(duration, s);
785 	trace_seq_puts(s, "|  ");
786 }
787 
788 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
789 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
790 #else
791 #define __TRACE_GRAPH_PRINT_RETVAL 0
792 #endif
793 
794 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
795 #define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR
796 static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry,
797 				u32 trace_flags, bool comment)
798 {
799 	if (comment)
800 		trace_seq_puts(s, " /*");
801 
802 	trace_seq_puts(s, " <-");
803 	seq_print_ip_sym(s, entry->graph_ent.retaddr, trace_flags | TRACE_ITER_SYM_OFFSET);
804 
805 	if (comment)
806 		trace_seq_puts(s, " */");
807 }
808 #else
809 #define __TRACE_GRAPH_PRINT_RETADDR 0
810 #define print_graph_retaddr(_seq, _entry, _tflags, _comment)		do { } while (0)
811 #endif
812 
813 #if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR)
814 
815 static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
816 				struct ftrace_graph_ret *graph_ret, void *func,
817 				u32 opt_flags, u32 trace_flags, int args_size)
818 {
819 	unsigned long err_code = 0;
820 	unsigned long retval = 0;
821 	bool print_retaddr = false;
822 	bool print_retval = false;
823 	bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX);
824 
825 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
826 	retval = graph_ret->retval;
827 	print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL);
828 #endif
829 
830 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
831 	print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR);
832 #endif
833 
834 	if (print_retval && retval && !hex_format) {
835 		/* Check if the return value matches the negative format */
836 		if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
837 			(((u64)retval) >> 32) == 0) {
838 			err_code = sign_extend64(retval, 31);
839 		} else {
840 			err_code = retval;
841 		}
842 
843 		if (!IS_ERR_VALUE(err_code))
844 			err_code = 0;
845 	}
846 
847 	if (entry) {
848 		if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
849 			print_retaddr = false;
850 
851 		trace_seq_printf(s, "%ps", func);
852 
853 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
854 			print_function_args(s, entry->args, (unsigned long)func);
855 			trace_seq_putc(s, ';');
856 		} else
857 			trace_seq_puts(s, "();");
858 
859 		if (print_retval || print_retaddr)
860 			trace_seq_puts(s, " /*");
861 		else
862 			trace_seq_putc(s, '\n');
863 	} else {
864 		print_retaddr = false;
865 		trace_seq_printf(s, "} /* %ps", func);
866 	}
867 
868 	if (print_retaddr)
869 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
870 				    trace_flags, false);
871 
872 	if (print_retval) {
873 		if (hex_format || (err_code == 0))
874 			trace_seq_printf(s, " ret=0x%lx", retval);
875 		else
876 			trace_seq_printf(s, " ret=%ld", err_code);
877 	}
878 
879 	if (!entry || print_retval || print_retaddr)
880 		trace_seq_puts(s, " */\n");
881 }
882 
883 #else
884 
885 #define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags, args_size) \
886 	do {} while (0)
887 
888 #endif
889 
890 /* Case of a leaf function on its call entry */
891 static enum print_line_t
892 print_graph_entry_leaf(struct trace_iterator *iter,
893 		struct ftrace_graph_ent_entry *entry,
894 		struct ftrace_graph_ret_entry *ret_entry,
895 		struct trace_seq *s, u32 flags)
896 {
897 	struct fgraph_data *data = iter->private;
898 	struct trace_array *tr = iter->tr;
899 	struct ftrace_graph_ret *graph_ret;
900 	struct ftrace_graph_ent *call;
901 	unsigned long long duration;
902 	unsigned long ret_func;
903 	unsigned long func;
904 	int args_size;
905 	int cpu = iter->cpu;
906 	int i;
907 
908 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
909 
910 	graph_ret = &ret_entry->ret;
911 	call = &entry->graph_ent;
912 	duration = ret_entry->rettime - ret_entry->calltime;
913 
914 	func = call->func + iter->tr->text_delta;
915 
916 	if (data) {
917 		struct fgraph_cpu_data *cpu_data;
918 
919 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
920 
921 		/*
922 		 * Comments display at + 1 to depth. Since
923 		 * this is a leaf function, keep the comments
924 		 * equal to this depth.
925 		 */
926 		cpu_data->depth = call->depth - 1;
927 
928 		/* No need to keep this function around for this depth */
929 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
930 		    !WARN_ON_ONCE(call->depth < 0))
931 			cpu_data->enter_funcs[call->depth] = 0;
932 	}
933 
934 	/* Overhead and duration */
935 	print_graph_duration(tr, duration, s, flags);
936 
937 	/* Function */
938 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
939 		trace_seq_putc(s, ' ');
940 
941 	ret_func = graph_ret->func + iter->tr->text_delta;
942 
943 	/*
944 	 * Write out the function return value or return address
945 	 */
946 	if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
947 		print_graph_retval(s, entry, graph_ret,
948 				   (void *)graph_ret->func + iter->tr->text_delta,
949 				   flags, tr->trace_flags, args_size);
950 	} else {
951 		trace_seq_printf(s, "%ps", (void *)ret_func);
952 
953 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
954 			print_function_args(s, entry->args, ret_func);
955 			trace_seq_putc(s, ';');
956 		} else
957 			trace_seq_puts(s, "();");
958 	}
959 	trace_seq_printf(s, "\n");
960 
961 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
962 			cpu, iter->ent->pid, flags);
963 
964 	return trace_handle_return(s);
965 }
966 
967 static enum print_line_t
968 print_graph_entry_nested(struct trace_iterator *iter,
969 			 struct ftrace_graph_ent_entry *entry,
970 			 struct trace_seq *s, int cpu, u32 flags)
971 {
972 	struct ftrace_graph_ent *call = &entry->graph_ent;
973 	struct fgraph_data *data = iter->private;
974 	struct trace_array *tr = iter->tr;
975 	unsigned long func;
976 	int args_size;
977 	int i;
978 
979 	if (data) {
980 		struct fgraph_cpu_data *cpu_data;
981 		int cpu = iter->cpu;
982 
983 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
984 		cpu_data->depth = call->depth;
985 
986 		/* Save this function pointer to see if the exit matches */
987 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
988 		    !WARN_ON_ONCE(call->depth < 0))
989 			cpu_data->enter_funcs[call->depth] = call->func;
990 	}
991 
992 	/* No time */
993 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
994 
995 	/* Function */
996 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
997 		trace_seq_putc(s, ' ');
998 
999 	func = call->func + iter->tr->text_delta;
1000 
1001 	trace_seq_printf(s, "%ps", (void *)func);
1002 
1003 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
1004 
1005 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1006 		print_function_args(s, entry->args, func);
1007 	else
1008 		trace_seq_puts(s, "()");
1009 
1010 	trace_seq_puts(s, " {");
1011 
1012 	if (flags & __TRACE_GRAPH_PRINT_RETADDR  &&
1013 		entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
1014 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
1015 			tr->trace_flags, true);
1016 	trace_seq_putc(s, '\n');
1017 
1018 	if (trace_seq_has_overflowed(s))
1019 		return TRACE_TYPE_PARTIAL_LINE;
1020 
1021 	/*
1022 	 * we already consumed the current entry to check the next one
1023 	 * and see if this is a leaf.
1024 	 */
1025 	return TRACE_TYPE_NO_CONSUME;
1026 }
1027 
1028 static void
1029 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
1030 		     int type, unsigned long addr, u32 flags)
1031 {
1032 	struct fgraph_data *data = iter->private;
1033 	struct trace_entry *ent = iter->ent;
1034 	struct trace_array *tr = iter->tr;
1035 	int cpu = iter->cpu;
1036 
1037 	/* Pid */
1038 	verif_pid(s, ent->pid, cpu, data);
1039 
1040 	if (type)
1041 		/* Interrupt */
1042 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
1043 
1044 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1045 		return;
1046 
1047 	/* Absolute time */
1048 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1049 		print_graph_abs_time(iter->ts, s);
1050 
1051 	/* Relative time */
1052 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1053 		print_graph_rel_time(iter, s);
1054 
1055 	/* Cpu */
1056 	if (flags & TRACE_GRAPH_PRINT_CPU)
1057 		print_graph_cpu(s, cpu);
1058 
1059 	/* Proc */
1060 	if (flags & TRACE_GRAPH_PRINT_PROC) {
1061 		print_graph_proc(s, ent->pid);
1062 		trace_seq_puts(s, " | ");
1063 	}
1064 
1065 	/* Latency format */
1066 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
1067 		print_graph_lat_fmt(s, ent);
1068 
1069 	return;
1070 }
1071 
1072 /*
1073  * Entry check for irq code
1074  *
1075  * returns 1 if
1076  *  - we are inside irq code
1077  *  - we just entered irq code
1078  *
1079  * returns 0 if
1080  *  - funcgraph-interrupts option is set
1081  *  - we are not inside irq code
1082  */
1083 static int
1084 check_irq_entry(struct trace_iterator *iter, u32 flags,
1085 		unsigned long addr, int depth)
1086 {
1087 	int cpu = iter->cpu;
1088 	int *depth_irq;
1089 	struct fgraph_data *data = iter->private;
1090 
1091 	addr += iter->tr->text_delta;
1092 
1093 	/*
1094 	 * If we are either displaying irqs, or we got called as
1095 	 * a graph event and private data does not exist,
1096 	 * then we bypass the irq check.
1097 	 */
1098 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1099 	    (!data))
1100 		return 0;
1101 
1102 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1103 
1104 	/*
1105 	 * We are inside the irq code
1106 	 */
1107 	if (*depth_irq >= 0)
1108 		return 1;
1109 
1110 	if ((addr < (unsigned long)__irqentry_text_start) ||
1111 	    (addr >= (unsigned long)__irqentry_text_end))
1112 		return 0;
1113 
1114 	/*
1115 	 * We are entering irq code.
1116 	 */
1117 	*depth_irq = depth;
1118 	return 1;
1119 }
1120 
1121 /*
1122  * Return check for irq code
1123  *
1124  * returns 1 if
1125  *  - we are inside irq code
1126  *  - we just left irq code
1127  *
1128  * returns 0 if
1129  *  - funcgraph-interrupts option is set
1130  *  - we are not inside irq code
1131  */
1132 static int
1133 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1134 {
1135 	int cpu = iter->cpu;
1136 	int *depth_irq;
1137 	struct fgraph_data *data = iter->private;
1138 
1139 	/*
1140 	 * If we are either displaying irqs, or we got called as
1141 	 * a graph event and private data does not exist,
1142 	 * then we bypass the irq check.
1143 	 */
1144 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1145 	    (!data))
1146 		return 0;
1147 
1148 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1149 
1150 	/*
1151 	 * We are not inside the irq code.
1152 	 */
1153 	if (*depth_irq == -1)
1154 		return 0;
1155 
1156 	/*
1157 	 * We are inside the irq code, and this is returning entry.
1158 	 * Let's not trace it and clear the entry depth, since
1159 	 * we are out of irq code.
1160 	 *
1161 	 * This condition ensures that we 'leave the irq code' once
1162 	 * we are out of the entry depth. Thus protecting us from
1163 	 * the RETURN entry loss.
1164 	 */
1165 	if (*depth_irq >= depth) {
1166 		*depth_irq = -1;
1167 		return 1;
1168 	}
1169 
1170 	/*
1171 	 * We are inside the irq code, and this is not the entry.
1172 	 */
1173 	return 1;
1174 }
1175 
1176 static enum print_line_t
1177 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1178 			struct trace_iterator *iter, u32 flags)
1179 {
1180 	struct fgraph_data *data = iter->private;
1181 	struct ftrace_graph_ent *call;
1182 	struct ftrace_graph_ret_entry *leaf_ret;
1183 	static enum print_line_t ret;
1184 	int cpu = iter->cpu;
1185 	/*
1186 	 * print_graph_entry() may consume the current event,
1187 	 * thus @field may become invalid, so we need to save it.
1188 	 * sizeof(struct ftrace_graph_ent_entry) is very small,
1189 	 * it can be safely saved at the stack.
1190 	 */
1191 	struct ftrace_graph_ent_entry *entry;
1192 	u8 save_buf[sizeof(*entry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
1193 
1194 	/* The ent_size is expected to be as big as the entry */
1195 	if (iter->ent_size > sizeof(save_buf))
1196 		iter->ent_size = sizeof(save_buf);
1197 
1198 	entry = (void *)save_buf;
1199 	memcpy(entry, field, iter->ent_size);
1200 
1201 	call = &entry->graph_ent;
1202 
1203 	if (check_irq_entry(iter, flags, call->func, call->depth))
1204 		return TRACE_TYPE_HANDLED;
1205 
1206 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1207 
1208 	leaf_ret = get_return_for_leaf(iter, entry);
1209 	if (leaf_ret)
1210 		ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
1211 	else
1212 		ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
1213 
1214 	if (data) {
1215 		/*
1216 		 * If we failed to write our output, then we need to make
1217 		 * note of it. Because we already consumed our entry.
1218 		 */
1219 		if (s->full) {
1220 			data->failed = 1;
1221 			data->cpu = cpu;
1222 		} else
1223 			data->failed = 0;
1224 	}
1225 
1226 	return ret;
1227 }
1228 
1229 static enum print_line_t
1230 print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
1231 		   struct trace_entry *ent, struct trace_iterator *iter,
1232 		   u32 flags)
1233 {
1234 	struct ftrace_graph_ret *trace = &retentry->ret;
1235 	u64 calltime = retentry->calltime;
1236 	u64 rettime = retentry->rettime;
1237 	unsigned long long duration = rettime - calltime;
1238 	struct fgraph_data *data = iter->private;
1239 	struct trace_array *tr = iter->tr;
1240 	unsigned long func;
1241 	pid_t pid = ent->pid;
1242 	int cpu = iter->cpu;
1243 	int func_match = 1;
1244 	int i;
1245 
1246 	func = trace->func + iter->tr->text_delta;
1247 
1248 	if (check_irq_return(iter, flags, trace->depth))
1249 		return TRACE_TYPE_HANDLED;
1250 
1251 	if (data) {
1252 		struct fgraph_cpu_data *cpu_data;
1253 		int cpu = iter->cpu;
1254 
1255 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1256 
1257 		/*
1258 		 * Comments display at + 1 to depth. This is the
1259 		 * return from a function, we now want the comments
1260 		 * to display at the same level of the bracket.
1261 		 */
1262 		cpu_data->depth = trace->depth - 1;
1263 
1264 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1265 		    !WARN_ON_ONCE(trace->depth < 0)) {
1266 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1267 				func_match = 0;
1268 			cpu_data->enter_funcs[trace->depth] = 0;
1269 		}
1270 	}
1271 
1272 	print_graph_prologue(iter, s, 0, 0, flags);
1273 
1274 	/* Overhead and duration */
1275 	print_graph_duration(tr, duration, s, flags);
1276 
1277 	/* Closing brace */
1278 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1279 		trace_seq_putc(s, ' ');
1280 
1281 	/*
1282 	 * Always write out the function name and its return value if the
1283 	 * funcgraph-retval option is enabled.
1284 	 */
1285 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1286 		print_graph_retval(s, NULL, trace, (void *)func, flags,
1287 				   tr->trace_flags, 0);
1288 	} else {
1289 		/*
1290 		 * If the return function does not have a matching entry,
1291 		 * then the entry was lost. Instead of just printing
1292 		 * the '}' and letting the user guess what function this
1293 		 * belongs to, write out the function name. Always do
1294 		 * that if the funcgraph-tail option is enabled.
1295 		 */
1296 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1297 			trace_seq_puts(s, "}\n");
1298 		else
1299 			trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1300 	}
1301 
1302 	/* Overrun */
1303 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1304 		trace_seq_printf(s, " (Overruns: %u)\n",
1305 				 trace->overrun);
1306 
1307 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1308 			cpu, pid, flags);
1309 
1310 	return trace_handle_return(s);
1311 }
1312 
1313 static enum print_line_t
1314 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1315 		    struct trace_iterator *iter, u32 flags)
1316 {
1317 	struct trace_array *tr = iter->tr;
1318 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1319 	struct fgraph_data *data = iter->private;
1320 	struct trace_event *event;
1321 	int depth = 0;
1322 	int ret;
1323 	int i;
1324 
1325 	if (data)
1326 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1327 
1328 	print_graph_prologue(iter, s, 0, 0, flags);
1329 
1330 	/* No time */
1331 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1332 
1333 	/* Indentation */
1334 	if (depth > 0)
1335 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1336 			trace_seq_putc(s, ' ');
1337 
1338 	/* The comment */
1339 	trace_seq_puts(s, "/* ");
1340 
1341 	switch (iter->ent->type) {
1342 	case TRACE_BPUTS:
1343 		ret = trace_print_bputs_msg_only(iter);
1344 		if (ret != TRACE_TYPE_HANDLED)
1345 			return ret;
1346 		break;
1347 	case TRACE_BPRINT:
1348 		ret = trace_print_bprintk_msg_only(iter);
1349 		if (ret != TRACE_TYPE_HANDLED)
1350 			return ret;
1351 		break;
1352 	case TRACE_PRINT:
1353 		ret = trace_print_printk_msg_only(iter);
1354 		if (ret != TRACE_TYPE_HANDLED)
1355 			return ret;
1356 		break;
1357 	default:
1358 		event = ftrace_find_event(ent->type);
1359 		if (!event)
1360 			return TRACE_TYPE_UNHANDLED;
1361 
1362 		ret = event->funcs->trace(iter, sym_flags, event);
1363 		if (ret != TRACE_TYPE_HANDLED)
1364 			return ret;
1365 	}
1366 
1367 	if (trace_seq_has_overflowed(s))
1368 		goto out;
1369 
1370 	/* Strip ending newline */
1371 	if (s->buffer[s->seq.len - 1] == '\n') {
1372 		s->buffer[s->seq.len - 1] = '\0';
1373 		s->seq.len--;
1374 	}
1375 
1376 	trace_seq_puts(s, " */\n");
1377  out:
1378 	return trace_handle_return(s);
1379 }
1380 
1381 
1382 enum print_line_t
1383 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1384 {
1385 	struct ftrace_graph_ent_entry *field;
1386 	struct fgraph_data *data = iter->private;
1387 	struct trace_entry *entry = iter->ent;
1388 	struct trace_seq *s = &iter->seq;
1389 	int cpu = iter->cpu;
1390 	int ret;
1391 
1392 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1393 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1394 		return TRACE_TYPE_HANDLED;
1395 	}
1396 
1397 	/*
1398 	 * If the last output failed, there's a possibility we need
1399 	 * to print out the missing entry which would never go out.
1400 	 */
1401 	if (data && data->failed) {
1402 		field = &data->ent.ent;
1403 		iter->cpu = data->cpu;
1404 		ret = print_graph_entry(field, s, iter, flags);
1405 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1406 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1407 			ret = TRACE_TYPE_NO_CONSUME;
1408 		}
1409 		iter->cpu = cpu;
1410 		return ret;
1411 	}
1412 
1413 	switch (entry->type) {
1414 	case TRACE_GRAPH_ENT: {
1415 		trace_assign_type(field, entry);
1416 		return print_graph_entry(field, s, iter, flags);
1417 	}
1418 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1419 	case TRACE_GRAPH_RETADDR_ENT: {
1420 		struct fgraph_retaddr_ent_entry saved;
1421 		struct fgraph_retaddr_ent_entry *rfield;
1422 
1423 		trace_assign_type(rfield, entry);
1424 		saved = *rfield;
1425 		return print_graph_entry((struct ftrace_graph_ent_entry *)&saved, s, iter, flags);
1426 	}
1427 #endif
1428 	case TRACE_GRAPH_RET: {
1429 		struct ftrace_graph_ret_entry *field;
1430 		trace_assign_type(field, entry);
1431 		return print_graph_return(field, s, entry, iter, flags);
1432 	}
1433 	case TRACE_STACK:
1434 	case TRACE_FN:
1435 		/* dont trace stack and functions as comments */
1436 		return TRACE_TYPE_UNHANDLED;
1437 
1438 	default:
1439 		return print_graph_comment(s, entry, iter, flags);
1440 	}
1441 
1442 	return TRACE_TYPE_HANDLED;
1443 }
1444 
1445 static enum print_line_t
1446 print_graph_function(struct trace_iterator *iter)
1447 {
1448 	return print_graph_function_flags(iter, tracer_flags.val);
1449 }
1450 
1451 static enum print_line_t
1452 print_graph_function_event(struct trace_iterator *iter, int flags,
1453 			   struct trace_event *event)
1454 {
1455 	return print_graph_function(iter);
1456 }
1457 
1458 static void print_lat_header(struct seq_file *s, u32 flags)
1459 {
1460 	static const char spaces[] = "                "	/* 16 spaces */
1461 		"    "					/* 4 spaces */
1462 		"                 ";			/* 17 spaces */
1463 	int size = 0;
1464 
1465 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1466 		size += 16;
1467 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1468 		size += 16;
1469 	if (flags & TRACE_GRAPH_PRINT_CPU)
1470 		size += 4;
1471 	if (flags & TRACE_GRAPH_PRINT_PROC)
1472 		size += 17;
1473 
1474 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1475 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1476 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1477 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1478 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1479 }
1480 
1481 static void __print_graph_headers_flags(struct trace_array *tr,
1482 					struct seq_file *s, u32 flags)
1483 {
1484 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1485 
1486 	if (lat)
1487 		print_lat_header(s, flags);
1488 
1489 	/* 1st line */
1490 	seq_putc(s, '#');
1491 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1492 		seq_puts(s, "     TIME       ");
1493 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1494 		seq_puts(s, "   REL TIME     ");
1495 	if (flags & TRACE_GRAPH_PRINT_CPU)
1496 		seq_puts(s, " CPU");
1497 	if (flags & TRACE_GRAPH_PRINT_PROC)
1498 		seq_puts(s, "  TASK/PID       ");
1499 	if (lat)
1500 		seq_puts(s, "||||   ");
1501 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1502 		seq_puts(s, "  DURATION   ");
1503 	seq_puts(s, "               FUNCTION CALLS\n");
1504 
1505 	/* 2nd line */
1506 	seq_putc(s, '#');
1507 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1508 		seq_puts(s, "      |         ");
1509 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1510 		seq_puts(s, "      |         ");
1511 	if (flags & TRACE_GRAPH_PRINT_CPU)
1512 		seq_puts(s, " |  ");
1513 	if (flags & TRACE_GRAPH_PRINT_PROC)
1514 		seq_puts(s, "   |    |        ");
1515 	if (lat)
1516 		seq_puts(s, "||||   ");
1517 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1518 		seq_puts(s, "   |   |      ");
1519 	seq_puts(s, "               |   |   |   |\n");
1520 }
1521 
1522 static void print_graph_headers(struct seq_file *s)
1523 {
1524 	print_graph_headers_flags(s, tracer_flags.val);
1525 }
1526 
1527 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1528 {
1529 	struct trace_iterator *iter = s->private;
1530 	struct trace_array *tr = iter->tr;
1531 
1532 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1533 		return;
1534 
1535 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1536 		/* print nothing if the buffers are empty */
1537 		if (trace_empty(iter))
1538 			return;
1539 
1540 		print_trace_header(s, iter);
1541 	}
1542 
1543 	__print_graph_headers_flags(tr, s, flags);
1544 }
1545 
1546 void graph_trace_open(struct trace_iterator *iter)
1547 {
1548 	/* pid and depth on the last trace processed */
1549 	struct fgraph_data *data;
1550 	gfp_t gfpflags;
1551 	int cpu;
1552 
1553 	iter->private = NULL;
1554 
1555 	/* We can be called in atomic context via ftrace_dump() */
1556 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1557 
1558 	data = kzalloc(sizeof(*data), gfpflags);
1559 	if (!data)
1560 		goto out_err;
1561 
1562 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1563 	if (!data->cpu_data)
1564 		goto out_err_free;
1565 
1566 	for_each_possible_cpu(cpu) {
1567 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1568 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1569 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1570 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1571 
1572 		*pid = -1;
1573 		*depth = 0;
1574 		*ignore = 0;
1575 		*depth_irq = -1;
1576 	}
1577 
1578 	iter->private = data;
1579 
1580 	return;
1581 
1582  out_err_free:
1583 	kfree(data);
1584  out_err:
1585 	pr_warn("function graph tracer: not enough memory\n");
1586 }
1587 
1588 void graph_trace_close(struct trace_iterator *iter)
1589 {
1590 	struct fgraph_data *data = iter->private;
1591 
1592 	if (data) {
1593 		free_percpu(data->cpu_data);
1594 		kfree(data);
1595 	}
1596 }
1597 
1598 static int
1599 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1600 {
1601 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1602 		ftrace_graph_skip_irqs = !set;
1603 
1604 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1605 		ftrace_graph_sleep_time_control(set);
1606 
1607 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1608 		ftrace_graph_graph_time_control(set);
1609 
1610 	return 0;
1611 }
1612 
1613 static struct trace_event_functions graph_functions = {
1614 	.trace		= print_graph_function_event,
1615 };
1616 
1617 static struct trace_event graph_trace_entry_event = {
1618 	.type		= TRACE_GRAPH_ENT,
1619 	.funcs		= &graph_functions,
1620 };
1621 
1622 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1623 static struct trace_event graph_trace_retaddr_entry_event = {
1624 	.type		= TRACE_GRAPH_RETADDR_ENT,
1625 	.funcs		= &graph_functions,
1626 };
1627 #endif
1628 
1629 static struct trace_event graph_trace_ret_event = {
1630 	.type		= TRACE_GRAPH_RET,
1631 	.funcs		= &graph_functions
1632 };
1633 
1634 static struct tracer graph_trace __tracer_data = {
1635 	.name		= "function_graph",
1636 	.update_thresh	= graph_trace_update_thresh,
1637 	.open		= graph_trace_open,
1638 	.pipe_open	= graph_trace_open,
1639 	.close		= graph_trace_close,
1640 	.pipe_close	= graph_trace_close,
1641 	.init		= graph_trace_init,
1642 	.reset		= graph_trace_reset,
1643 	.print_line	= print_graph_function,
1644 	.print_header	= print_graph_headers,
1645 	.flags		= &tracer_flags,
1646 	.set_flag	= func_graph_set_flag,
1647 	.allow_instances = true,
1648 #ifdef CONFIG_FTRACE_SELFTEST
1649 	.selftest	= trace_selftest_startup_function_graph,
1650 #endif
1651 };
1652 
1653 
1654 static ssize_t
1655 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1656 		  loff_t *ppos)
1657 {
1658 	unsigned long val;
1659 	int ret;
1660 
1661 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1662 	if (ret)
1663 		return ret;
1664 
1665 	fgraph_max_depth = val;
1666 
1667 	*ppos += cnt;
1668 
1669 	return cnt;
1670 }
1671 
1672 static ssize_t
1673 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1674 		 loff_t *ppos)
1675 {
1676 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1677 	int n;
1678 
1679 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1680 
1681 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1682 }
1683 
1684 static const struct file_operations graph_depth_fops = {
1685 	.open		= tracing_open_generic,
1686 	.write		= graph_depth_write,
1687 	.read		= graph_depth_read,
1688 	.llseek		= generic_file_llseek,
1689 };
1690 
1691 static __init int init_graph_tracefs(void)
1692 {
1693 	int ret;
1694 
1695 	ret = tracing_init_dentry();
1696 	if (ret)
1697 		return 0;
1698 
1699 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1700 			  NULL, &graph_depth_fops);
1701 
1702 	return 0;
1703 }
1704 fs_initcall(init_graph_tracefs);
1705 
1706 static __init int init_graph_trace(void)
1707 {
1708 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1709 
1710 	if (!register_trace_event(&graph_trace_entry_event)) {
1711 		pr_warn("Warning: could not register graph trace events\n");
1712 		return 1;
1713 	}
1714 
1715 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1716 	if (!register_trace_event(&graph_trace_retaddr_entry_event)) {
1717 		pr_warn("Warning: could not register graph trace retaddr events\n");
1718 		return 1;
1719 	}
1720 #endif
1721 
1722 	if (!register_trace_event(&graph_trace_ret_event)) {
1723 		pr_warn("Warning: could not register graph trace events\n");
1724 		return 1;
1725 	}
1726 
1727 	return register_tracer(&graph_trace);
1728 }
1729 
1730 core_initcall(init_graph_trace);
1731