1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <[email protected]>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 	/* Display function return value ? */
63 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 	/* Display function return value in hexadecimal format ? */
65 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 	/* Include sleep time (scheduled out) between entry and return */
68 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69 
70 #ifdef CONFIG_FUNCTION_PROFILER
71 	/* Include time within nested functions */
72 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74 
75 	{ } /* Empty entry */
76 };
77 
78 static struct tracer_flags tracer_flags = {
79 	/* Don't display overruns, proc, or tail by default */
80 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 	.opts = trace_opts
84 };
85 
86 /*
87  * DURATION column is being also used to display IRQ signs,
88  * following values are used by print_graph_irq and others
89  * to fill in space into DURATION column.
90  */
91 enum {
92 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
93 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 };
96 
97 static void
98 print_graph_duration(struct trace_array *tr, unsigned long long duration,
99 		     struct trace_seq *s, u32 flags);
100 
101 int __trace_graph_entry(struct trace_array *tr,
102 				struct ftrace_graph_ent *trace,
103 				unsigned int trace_ctx)
104 {
105 	struct trace_event_call *call = &event_funcgraph_entry;
106 	struct ring_buffer_event *event;
107 	struct trace_buffer *buffer = tr->array_buffer.buffer;
108 	struct ftrace_graph_ent_entry *entry;
109 
110 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
111 					  sizeof(*entry), trace_ctx);
112 	if (!event)
113 		return 0;
114 	entry	= ring_buffer_event_data(event);
115 	entry->graph_ent			= *trace;
116 	if (!call_filter_check_discard(call, entry, buffer, event))
117 		trace_buffer_unlock_commit_nostack(buffer, event);
118 
119 	return 1;
120 }
121 
122 static inline int ftrace_graph_ignore_irqs(void)
123 {
124 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
125 		return 0;
126 
127 	return in_hardirq();
128 }
129 
130 int trace_graph_entry(struct ftrace_graph_ent *trace,
131 		      struct fgraph_ops *gops)
132 {
133 	unsigned long *task_var = fgraph_get_task_var(gops);
134 	struct trace_array *tr = gops->private;
135 	struct trace_array_cpu *data;
136 	unsigned long *sleeptime;
137 	unsigned long flags;
138 	unsigned int trace_ctx;
139 	long disabled;
140 	int ret;
141 	int cpu;
142 
143 	if (*task_var & TRACE_GRAPH_NOTRACE)
144 		return 0;
145 
146 	/*
147 	 * Do not trace a function if it's filtered by set_graph_notrace.
148 	 * Make the index of ret stack negative to indicate that it should
149 	 * ignore further functions.  But it needs its own ret stack entry
150 	 * to recover the original index in order to continue tracing after
151 	 * returning from the function.
152 	 */
153 	if (ftrace_graph_notrace_addr(trace->func)) {
154 		*task_var |= TRACE_GRAPH_NOTRACE_BIT;
155 		/*
156 		 * Need to return 1 to have the return called
157 		 * that will clear the NOTRACE bit.
158 		 */
159 		return 1;
160 	}
161 
162 	if (!ftrace_trace_task(tr))
163 		return 0;
164 
165 	if (ftrace_graph_ignore_func(gops, trace))
166 		return 0;
167 
168 	if (ftrace_graph_ignore_irqs())
169 		return 0;
170 
171 	/* save the current sleep time if we are to ignore it */
172 	if (!fgraph_sleep_time) {
173 		sleeptime = fgraph_reserve_data(gops->idx, sizeof(*sleeptime));
174 		if (sleeptime)
175 			*sleeptime = current->ftrace_sleeptime;
176 	}
177 
178 	/*
179 	 * Stop here if tracing_threshold is set. We only write function return
180 	 * events to the ring buffer.
181 	 */
182 	if (tracing_thresh)
183 		return 1;
184 
185 	local_irq_save(flags);
186 	cpu = raw_smp_processor_id();
187 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
188 	disabled = atomic_inc_return(&data->disabled);
189 	if (likely(disabled == 1)) {
190 		trace_ctx = tracing_gen_ctx_flags(flags);
191 		ret = __trace_graph_entry(tr, trace, trace_ctx);
192 	} else {
193 		ret = 0;
194 	}
195 
196 	atomic_dec(&data->disabled);
197 	local_irq_restore(flags);
198 
199 	return ret;
200 }
201 
202 static void
203 __trace_graph_function(struct trace_array *tr,
204 		unsigned long ip, unsigned int trace_ctx)
205 {
206 	u64 time = trace_clock_local();
207 	struct ftrace_graph_ent ent = {
208 		.func  = ip,
209 		.depth = 0,
210 	};
211 	struct ftrace_graph_ret ret = {
212 		.func     = ip,
213 		.depth    = 0,
214 		.calltime = time,
215 		.rettime  = time,
216 	};
217 
218 	__trace_graph_entry(tr, &ent, trace_ctx);
219 	__trace_graph_return(tr, &ret, trace_ctx);
220 }
221 
222 void
223 trace_graph_function(struct trace_array *tr,
224 		unsigned long ip, unsigned long parent_ip,
225 		unsigned int trace_ctx)
226 {
227 	__trace_graph_function(tr, ip, trace_ctx);
228 }
229 
230 void __trace_graph_return(struct trace_array *tr,
231 				struct ftrace_graph_ret *trace,
232 				unsigned int trace_ctx)
233 {
234 	struct trace_event_call *call = &event_funcgraph_exit;
235 	struct ring_buffer_event *event;
236 	struct trace_buffer *buffer = tr->array_buffer.buffer;
237 	struct ftrace_graph_ret_entry *entry;
238 
239 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
240 					  sizeof(*entry), trace_ctx);
241 	if (!event)
242 		return;
243 	entry	= ring_buffer_event_data(event);
244 	entry->ret				= *trace;
245 	if (!call_filter_check_discard(call, entry, buffer, event))
246 		trace_buffer_unlock_commit_nostack(buffer, event);
247 }
248 
249 static void handle_nosleeptime(struct ftrace_graph_ret *trace,
250 			       struct fgraph_ops *gops)
251 {
252 	unsigned long long *sleeptime;
253 	int size;
254 
255 	if (fgraph_sleep_time)
256 		return;
257 
258 	sleeptime = fgraph_retrieve_data(gops->idx, &size);
259 	if (!sleeptime)
260 		return;
261 
262 	trace->calltime += current->ftrace_sleeptime - *sleeptime;
263 }
264 
265 void trace_graph_return(struct ftrace_graph_ret *trace,
266 			struct fgraph_ops *gops)
267 {
268 	unsigned long *task_var = fgraph_get_task_var(gops);
269 	struct trace_array *tr = gops->private;
270 	struct trace_array_cpu *data;
271 	unsigned long flags;
272 	unsigned int trace_ctx;
273 	long disabled;
274 	int cpu;
275 
276 	ftrace_graph_addr_finish(gops, trace);
277 
278 	if (*task_var & TRACE_GRAPH_NOTRACE) {
279 		*task_var &= ~TRACE_GRAPH_NOTRACE;
280 		return;
281 	}
282 
283 	handle_nosleeptime(trace, gops);
284 
285 	local_irq_save(flags);
286 	cpu = raw_smp_processor_id();
287 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
288 	disabled = atomic_inc_return(&data->disabled);
289 	if (likely(disabled == 1)) {
290 		trace_ctx = tracing_gen_ctx_flags(flags);
291 		__trace_graph_return(tr, trace, trace_ctx);
292 	}
293 	atomic_dec(&data->disabled);
294 	local_irq_restore(flags);
295 }
296 
297 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
298 				      struct fgraph_ops *gops)
299 {
300 	ftrace_graph_addr_finish(gops, trace);
301 
302 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
303 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
304 		return;
305 	}
306 
307 	handle_nosleeptime(trace, gops);
308 
309 	if (tracing_thresh &&
310 	    (trace->rettime - trace->calltime < tracing_thresh))
311 		return;
312 	else
313 		trace_graph_return(trace, gops);
314 }
315 
316 static struct fgraph_ops funcgraph_ops = {
317 	.entryfunc = &trace_graph_entry,
318 	.retfunc = &trace_graph_return,
319 };
320 
321 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
322 {
323 	struct fgraph_ops *gops;
324 
325 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
326 	if (!gops)
327 		return -ENOMEM;
328 
329 	gops->entryfunc = &trace_graph_entry;
330 	gops->retfunc = &trace_graph_return;
331 
332 	tr->gops = gops;
333 	gops->private = tr;
334 
335 	fgraph_init_ops(&gops->ops, ops);
336 
337 	return 0;
338 }
339 
340 void free_fgraph_ops(struct trace_array *tr)
341 {
342 	kfree(tr->gops);
343 }
344 
345 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
346 {
347 	tr->gops = &funcgraph_ops;
348 	funcgraph_ops.private = tr;
349 	fgraph_init_ops(&tr->gops->ops, ops);
350 }
351 
352 static int graph_trace_init(struct trace_array *tr)
353 {
354 	int ret;
355 
356 	tr->gops->entryfunc = trace_graph_entry;
357 
358 	if (tracing_thresh)
359 		tr->gops->retfunc = trace_graph_thresh_return;
360 	else
361 		tr->gops->retfunc = trace_graph_return;
362 
363 	/* Make gops functions are visible before we start tracing */
364 	smp_mb();
365 
366 	ret = register_ftrace_graph(tr->gops);
367 	if (ret)
368 		return ret;
369 	tracing_start_cmdline_record();
370 
371 	return 0;
372 }
373 
374 static void graph_trace_reset(struct trace_array *tr)
375 {
376 	tracing_stop_cmdline_record();
377 	unregister_ftrace_graph(tr->gops);
378 }
379 
380 static int graph_trace_update_thresh(struct trace_array *tr)
381 {
382 	graph_trace_reset(tr);
383 	return graph_trace_init(tr);
384 }
385 
386 static int max_bytes_for_cpu;
387 
388 static void print_graph_cpu(struct trace_seq *s, int cpu)
389 {
390 	/*
391 	 * Start with a space character - to make it stand out
392 	 * to the right a bit when trace output is pasted into
393 	 * email:
394 	 */
395 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
396 }
397 
398 #define TRACE_GRAPH_PROCINFO_LENGTH	14
399 
400 static void print_graph_proc(struct trace_seq *s, pid_t pid)
401 {
402 	char comm[TASK_COMM_LEN];
403 	/* sign + log10(MAX_INT) + '\0' */
404 	char pid_str[11];
405 	int spaces = 0;
406 	int len;
407 	int i;
408 
409 	trace_find_cmdline(pid, comm);
410 	comm[7] = '\0';
411 	sprintf(pid_str, "%d", pid);
412 
413 	/* 1 stands for the "-" character */
414 	len = strlen(comm) + strlen(pid_str) + 1;
415 
416 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
417 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
418 
419 	/* First spaces to align center */
420 	for (i = 0; i < spaces / 2; i++)
421 		trace_seq_putc(s, ' ');
422 
423 	trace_seq_printf(s, "%s-%s", comm, pid_str);
424 
425 	/* Last spaces to align center */
426 	for (i = 0; i < spaces - (spaces / 2); i++)
427 		trace_seq_putc(s, ' ');
428 }
429 
430 
431 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
432 {
433 	trace_seq_putc(s, ' ');
434 	trace_print_lat_fmt(s, entry);
435 	trace_seq_puts(s, " | ");
436 }
437 
438 /* If the pid changed since the last trace, output this event */
439 static void
440 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
441 {
442 	pid_t prev_pid;
443 	pid_t *last_pid;
444 
445 	if (!data)
446 		return;
447 
448 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
449 
450 	if (*last_pid == pid)
451 		return;
452 
453 	prev_pid = *last_pid;
454 	*last_pid = pid;
455 
456 	if (prev_pid == -1)
457 		return;
458 /*
459  * Context-switch trace line:
460 
461  ------------------------------------------
462  | 1)  migration/0--1  =>  sshd-1755
463  ------------------------------------------
464 
465  */
466 	trace_seq_puts(s, " ------------------------------------------\n");
467 	print_graph_cpu(s, cpu);
468 	print_graph_proc(s, prev_pid);
469 	trace_seq_puts(s, " => ");
470 	print_graph_proc(s, pid);
471 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
472 }
473 
474 static struct ftrace_graph_ret_entry *
475 get_return_for_leaf(struct trace_iterator *iter,
476 		struct ftrace_graph_ent_entry *curr)
477 {
478 	struct fgraph_data *data = iter->private;
479 	struct ring_buffer_iter *ring_iter = NULL;
480 	struct ring_buffer_event *event;
481 	struct ftrace_graph_ret_entry *next;
482 
483 	/*
484 	 * If the previous output failed to write to the seq buffer,
485 	 * then we just reuse the data from before.
486 	 */
487 	if (data && data->failed) {
488 		curr = &data->ent;
489 		next = &data->ret;
490 	} else {
491 
492 		ring_iter = trace_buffer_iter(iter, iter->cpu);
493 
494 		/* First peek to compare current entry and the next one */
495 		if (ring_iter)
496 			event = ring_buffer_iter_peek(ring_iter, NULL);
497 		else {
498 			/*
499 			 * We need to consume the current entry to see
500 			 * the next one.
501 			 */
502 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
503 					    NULL, NULL);
504 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
505 						 NULL, NULL);
506 		}
507 
508 		if (!event)
509 			return NULL;
510 
511 		next = ring_buffer_event_data(event);
512 
513 		if (data) {
514 			/*
515 			 * Save current and next entries for later reference
516 			 * if the output fails.
517 			 */
518 			data->ent = *curr;
519 			/*
520 			 * If the next event is not a return type, then
521 			 * we only care about what type it is. Otherwise we can
522 			 * safely copy the entire event.
523 			 */
524 			if (next->ent.type == TRACE_GRAPH_RET)
525 				data->ret = *next;
526 			else
527 				data->ret.ent.type = next->ent.type;
528 		}
529 	}
530 
531 	if (next->ent.type != TRACE_GRAPH_RET)
532 		return NULL;
533 
534 	if (curr->ent.pid != next->ent.pid ||
535 			curr->graph_ent.func != next->ret.func)
536 		return NULL;
537 
538 	/* this is a leaf, now advance the iterator */
539 	if (ring_iter)
540 		ring_buffer_iter_advance(ring_iter);
541 
542 	return next;
543 }
544 
545 static void print_graph_abs_time(u64 t, struct trace_seq *s)
546 {
547 	unsigned long usecs_rem;
548 
549 	usecs_rem = do_div(t, NSEC_PER_SEC);
550 	usecs_rem /= 1000;
551 
552 	trace_seq_printf(s, "%5lu.%06lu |  ",
553 			 (unsigned long)t, usecs_rem);
554 }
555 
556 static void
557 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
558 {
559 	unsigned long long usecs;
560 
561 	usecs = iter->ts - iter->array_buffer->time_start;
562 	do_div(usecs, NSEC_PER_USEC);
563 
564 	trace_seq_printf(s, "%9llu us |  ", usecs);
565 }
566 
567 static void
568 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
569 		enum trace_type type, int cpu, pid_t pid, u32 flags)
570 {
571 	struct trace_array *tr = iter->tr;
572 	struct trace_seq *s = &iter->seq;
573 	struct trace_entry *ent = iter->ent;
574 
575 	addr += iter->tr->text_delta;
576 
577 	if (addr < (unsigned long)__irqentry_text_start ||
578 		addr >= (unsigned long)__irqentry_text_end)
579 		return;
580 
581 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
582 		/* Absolute time */
583 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
584 			print_graph_abs_time(iter->ts, s);
585 
586 		/* Relative time */
587 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
588 			print_graph_rel_time(iter, s);
589 
590 		/* Cpu */
591 		if (flags & TRACE_GRAPH_PRINT_CPU)
592 			print_graph_cpu(s, cpu);
593 
594 		/* Proc */
595 		if (flags & TRACE_GRAPH_PRINT_PROC) {
596 			print_graph_proc(s, pid);
597 			trace_seq_puts(s, " | ");
598 		}
599 
600 		/* Latency format */
601 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
602 			print_graph_lat_fmt(s, ent);
603 	}
604 
605 	/* No overhead */
606 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
607 
608 	if (type == TRACE_GRAPH_ENT)
609 		trace_seq_puts(s, "==========>");
610 	else
611 		trace_seq_puts(s, "<==========");
612 
613 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
614 	trace_seq_putc(s, '\n');
615 }
616 
617 void
618 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
619 {
620 	unsigned long nsecs_rem = do_div(duration, 1000);
621 	/* log10(ULONG_MAX) + '\0' */
622 	char usecs_str[21];
623 	char nsecs_str[5];
624 	int len;
625 	int i;
626 
627 	sprintf(usecs_str, "%lu", (unsigned long) duration);
628 
629 	/* Print msecs */
630 	trace_seq_printf(s, "%s", usecs_str);
631 
632 	len = strlen(usecs_str);
633 
634 	/* Print nsecs (we don't want to exceed 7 numbers) */
635 	if (len < 7) {
636 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
637 
638 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
639 		trace_seq_printf(s, ".%s", nsecs_str);
640 		len += strlen(nsecs_str) + 1;
641 	}
642 
643 	trace_seq_puts(s, " us ");
644 
645 	/* Print remaining spaces to fit the row's width */
646 	for (i = len; i < 8; i++)
647 		trace_seq_putc(s, ' ');
648 }
649 
650 static void
651 print_graph_duration(struct trace_array *tr, unsigned long long duration,
652 		     struct trace_seq *s, u32 flags)
653 {
654 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
655 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
656 		return;
657 
658 	/* No real adata, just filling the column with spaces */
659 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
660 	case FLAGS_FILL_FULL:
661 		trace_seq_puts(s, "              |  ");
662 		return;
663 	case FLAGS_FILL_START:
664 		trace_seq_puts(s, "  ");
665 		return;
666 	case FLAGS_FILL_END:
667 		trace_seq_puts(s, " |");
668 		return;
669 	}
670 
671 	/* Signal a overhead of time execution to the output */
672 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
673 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
674 	else
675 		trace_seq_puts(s, "  ");
676 
677 	trace_print_graph_duration(duration, s);
678 	trace_seq_puts(s, "|  ");
679 }
680 
681 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
682 
683 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
684 
685 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
686 				bool leaf, void *func, bool hex_format)
687 {
688 	unsigned long err_code = 0;
689 
690 	if (retval == 0 || hex_format)
691 		goto done;
692 
693 	/* Check if the return value matches the negative format */
694 	if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
695 		(((u64)retval) >> 32) == 0) {
696 		/* sign extension */
697 		err_code = (unsigned long)(s32)retval;
698 	} else {
699 		err_code = retval;
700 	}
701 
702 	if (!IS_ERR_VALUE(err_code))
703 		err_code = 0;
704 
705 done:
706 	if (leaf) {
707 		if (hex_format || (err_code == 0))
708 			trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
709 					func, retval);
710 		else
711 			trace_seq_printf(s, "%ps(); /* = %ld */\n",
712 					func, err_code);
713 	} else {
714 		if (hex_format || (err_code == 0))
715 			trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
716 					func, retval);
717 		else
718 			trace_seq_printf(s, "} /* %ps = %ld */\n",
719 					func, err_code);
720 	}
721 }
722 
723 #else
724 
725 #define __TRACE_GRAPH_PRINT_RETVAL 0
726 
727 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
728 
729 #endif
730 
731 /* Case of a leaf function on its call entry */
732 static enum print_line_t
733 print_graph_entry_leaf(struct trace_iterator *iter,
734 		struct ftrace_graph_ent_entry *entry,
735 		struct ftrace_graph_ret_entry *ret_entry,
736 		struct trace_seq *s, u32 flags)
737 {
738 	struct fgraph_data *data = iter->private;
739 	struct trace_array *tr = iter->tr;
740 	struct ftrace_graph_ret *graph_ret;
741 	struct ftrace_graph_ent *call;
742 	unsigned long long duration;
743 	unsigned long func;
744 	int cpu = iter->cpu;
745 	int i;
746 
747 	graph_ret = &ret_entry->ret;
748 	call = &entry->graph_ent;
749 	duration = graph_ret->rettime - graph_ret->calltime;
750 
751 	func = call->func + iter->tr->text_delta;
752 
753 	if (data) {
754 		struct fgraph_cpu_data *cpu_data;
755 
756 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
757 
758 		/*
759 		 * Comments display at + 1 to depth. Since
760 		 * this is a leaf function, keep the comments
761 		 * equal to this depth.
762 		 */
763 		cpu_data->depth = call->depth - 1;
764 
765 		/* No need to keep this function around for this depth */
766 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
767 		    !WARN_ON_ONCE(call->depth < 0))
768 			cpu_data->enter_funcs[call->depth] = 0;
769 	}
770 
771 	/* Overhead and duration */
772 	print_graph_duration(tr, duration, s, flags);
773 
774 	/* Function */
775 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
776 		trace_seq_putc(s, ' ');
777 
778 	/*
779 	 * Write out the function return value if the option function-retval is
780 	 * enabled.
781 	 */
782 	if (flags & __TRACE_GRAPH_PRINT_RETVAL)
783 		print_graph_retval(s, graph_ret->retval, true, (void *)func,
784 				!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
785 	else
786 		trace_seq_printf(s, "%ps();\n", (void *)func);
787 
788 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
789 			cpu, iter->ent->pid, flags);
790 
791 	return trace_handle_return(s);
792 }
793 
794 static enum print_line_t
795 print_graph_entry_nested(struct trace_iterator *iter,
796 			 struct ftrace_graph_ent_entry *entry,
797 			 struct trace_seq *s, int cpu, u32 flags)
798 {
799 	struct ftrace_graph_ent *call = &entry->graph_ent;
800 	struct fgraph_data *data = iter->private;
801 	struct trace_array *tr = iter->tr;
802 	unsigned long func;
803 	int i;
804 
805 	if (data) {
806 		struct fgraph_cpu_data *cpu_data;
807 		int cpu = iter->cpu;
808 
809 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
810 		cpu_data->depth = call->depth;
811 
812 		/* Save this function pointer to see if the exit matches */
813 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
814 		    !WARN_ON_ONCE(call->depth < 0))
815 			cpu_data->enter_funcs[call->depth] = call->func;
816 	}
817 
818 	/* No time */
819 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
820 
821 	/* Function */
822 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
823 		trace_seq_putc(s, ' ');
824 
825 	func = call->func + iter->tr->text_delta;
826 
827 	trace_seq_printf(s, "%ps() {\n", (void *)func);
828 
829 	if (trace_seq_has_overflowed(s))
830 		return TRACE_TYPE_PARTIAL_LINE;
831 
832 	/*
833 	 * we already consumed the current entry to check the next one
834 	 * and see if this is a leaf.
835 	 */
836 	return TRACE_TYPE_NO_CONSUME;
837 }
838 
839 static void
840 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
841 		     int type, unsigned long addr, u32 flags)
842 {
843 	struct fgraph_data *data = iter->private;
844 	struct trace_entry *ent = iter->ent;
845 	struct trace_array *tr = iter->tr;
846 	int cpu = iter->cpu;
847 
848 	/* Pid */
849 	verif_pid(s, ent->pid, cpu, data);
850 
851 	if (type)
852 		/* Interrupt */
853 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
854 
855 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
856 		return;
857 
858 	/* Absolute time */
859 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
860 		print_graph_abs_time(iter->ts, s);
861 
862 	/* Relative time */
863 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
864 		print_graph_rel_time(iter, s);
865 
866 	/* Cpu */
867 	if (flags & TRACE_GRAPH_PRINT_CPU)
868 		print_graph_cpu(s, cpu);
869 
870 	/* Proc */
871 	if (flags & TRACE_GRAPH_PRINT_PROC) {
872 		print_graph_proc(s, ent->pid);
873 		trace_seq_puts(s, " | ");
874 	}
875 
876 	/* Latency format */
877 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
878 		print_graph_lat_fmt(s, ent);
879 
880 	return;
881 }
882 
883 /*
884  * Entry check for irq code
885  *
886  * returns 1 if
887  *  - we are inside irq code
888  *  - we just entered irq code
889  *
890  * returns 0 if
891  *  - funcgraph-interrupts option is set
892  *  - we are not inside irq code
893  */
894 static int
895 check_irq_entry(struct trace_iterator *iter, u32 flags,
896 		unsigned long addr, int depth)
897 {
898 	int cpu = iter->cpu;
899 	int *depth_irq;
900 	struct fgraph_data *data = iter->private;
901 
902 	addr += iter->tr->text_delta;
903 
904 	/*
905 	 * If we are either displaying irqs, or we got called as
906 	 * a graph event and private data does not exist,
907 	 * then we bypass the irq check.
908 	 */
909 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
910 	    (!data))
911 		return 0;
912 
913 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
914 
915 	/*
916 	 * We are inside the irq code
917 	 */
918 	if (*depth_irq >= 0)
919 		return 1;
920 
921 	if ((addr < (unsigned long)__irqentry_text_start) ||
922 	    (addr >= (unsigned long)__irqentry_text_end))
923 		return 0;
924 
925 	/*
926 	 * We are entering irq code.
927 	 */
928 	*depth_irq = depth;
929 	return 1;
930 }
931 
932 /*
933  * Return check for irq code
934  *
935  * returns 1 if
936  *  - we are inside irq code
937  *  - we just left irq code
938  *
939  * returns 0 if
940  *  - funcgraph-interrupts option is set
941  *  - we are not inside irq code
942  */
943 static int
944 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
945 {
946 	int cpu = iter->cpu;
947 	int *depth_irq;
948 	struct fgraph_data *data = iter->private;
949 
950 	/*
951 	 * If we are either displaying irqs, or we got called as
952 	 * a graph event and private data does not exist,
953 	 * then we bypass the irq check.
954 	 */
955 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
956 	    (!data))
957 		return 0;
958 
959 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
960 
961 	/*
962 	 * We are not inside the irq code.
963 	 */
964 	if (*depth_irq == -1)
965 		return 0;
966 
967 	/*
968 	 * We are inside the irq code, and this is returning entry.
969 	 * Let's not trace it and clear the entry depth, since
970 	 * we are out of irq code.
971 	 *
972 	 * This condition ensures that we 'leave the irq code' once
973 	 * we are out of the entry depth. Thus protecting us from
974 	 * the RETURN entry loss.
975 	 */
976 	if (*depth_irq >= depth) {
977 		*depth_irq = -1;
978 		return 1;
979 	}
980 
981 	/*
982 	 * We are inside the irq code, and this is not the entry.
983 	 */
984 	return 1;
985 }
986 
987 static enum print_line_t
988 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
989 			struct trace_iterator *iter, u32 flags)
990 {
991 	struct fgraph_data *data = iter->private;
992 	struct ftrace_graph_ent *call = &field->graph_ent;
993 	struct ftrace_graph_ret_entry *leaf_ret;
994 	static enum print_line_t ret;
995 	int cpu = iter->cpu;
996 
997 	if (check_irq_entry(iter, flags, call->func, call->depth))
998 		return TRACE_TYPE_HANDLED;
999 
1000 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1001 
1002 	leaf_ret = get_return_for_leaf(iter, field);
1003 	if (leaf_ret)
1004 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1005 	else
1006 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1007 
1008 	if (data) {
1009 		/*
1010 		 * If we failed to write our output, then we need to make
1011 		 * note of it. Because we already consumed our entry.
1012 		 */
1013 		if (s->full) {
1014 			data->failed = 1;
1015 			data->cpu = cpu;
1016 		} else
1017 			data->failed = 0;
1018 	}
1019 
1020 	return ret;
1021 }
1022 
1023 static enum print_line_t
1024 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1025 		   struct trace_entry *ent, struct trace_iterator *iter,
1026 		   u32 flags)
1027 {
1028 	unsigned long long duration = trace->rettime - trace->calltime;
1029 	struct fgraph_data *data = iter->private;
1030 	struct trace_array *tr = iter->tr;
1031 	unsigned long func;
1032 	pid_t pid = ent->pid;
1033 	int cpu = iter->cpu;
1034 	int func_match = 1;
1035 	int i;
1036 
1037 	func = trace->func + iter->tr->text_delta;
1038 
1039 	if (check_irq_return(iter, flags, trace->depth))
1040 		return TRACE_TYPE_HANDLED;
1041 
1042 	if (data) {
1043 		struct fgraph_cpu_data *cpu_data;
1044 		int cpu = iter->cpu;
1045 
1046 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1047 
1048 		/*
1049 		 * Comments display at + 1 to depth. This is the
1050 		 * return from a function, we now want the comments
1051 		 * to display at the same level of the bracket.
1052 		 */
1053 		cpu_data->depth = trace->depth - 1;
1054 
1055 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1056 		    !WARN_ON_ONCE(trace->depth < 0)) {
1057 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1058 				func_match = 0;
1059 			cpu_data->enter_funcs[trace->depth] = 0;
1060 		}
1061 	}
1062 
1063 	print_graph_prologue(iter, s, 0, 0, flags);
1064 
1065 	/* Overhead and duration */
1066 	print_graph_duration(tr, duration, s, flags);
1067 
1068 	/* Closing brace */
1069 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1070 		trace_seq_putc(s, ' ');
1071 
1072 	/*
1073 	 * Always write out the function name and its return value if the
1074 	 * function-retval option is enabled.
1075 	 */
1076 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1077 		print_graph_retval(s, trace->retval, false, (void *)func,
1078 			!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1079 	} else {
1080 		/*
1081 		 * If the return function does not have a matching entry,
1082 		 * then the entry was lost. Instead of just printing
1083 		 * the '}' and letting the user guess what function this
1084 		 * belongs to, write out the function name. Always do
1085 		 * that if the funcgraph-tail option is enabled.
1086 		 */
1087 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1088 			trace_seq_puts(s, "}\n");
1089 		else
1090 			trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1091 	}
1092 
1093 	/* Overrun */
1094 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1095 		trace_seq_printf(s, " (Overruns: %u)\n",
1096 				 trace->overrun);
1097 
1098 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1099 			cpu, pid, flags);
1100 
1101 	return trace_handle_return(s);
1102 }
1103 
1104 static enum print_line_t
1105 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1106 		    struct trace_iterator *iter, u32 flags)
1107 {
1108 	struct trace_array *tr = iter->tr;
1109 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1110 	struct fgraph_data *data = iter->private;
1111 	struct trace_event *event;
1112 	int depth = 0;
1113 	int ret;
1114 	int i;
1115 
1116 	if (data)
1117 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1118 
1119 	print_graph_prologue(iter, s, 0, 0, flags);
1120 
1121 	/* No time */
1122 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1123 
1124 	/* Indentation */
1125 	if (depth > 0)
1126 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1127 			trace_seq_putc(s, ' ');
1128 
1129 	/* The comment */
1130 	trace_seq_puts(s, "/* ");
1131 
1132 	switch (iter->ent->type) {
1133 	case TRACE_BPUTS:
1134 		ret = trace_print_bputs_msg_only(iter);
1135 		if (ret != TRACE_TYPE_HANDLED)
1136 			return ret;
1137 		break;
1138 	case TRACE_BPRINT:
1139 		ret = trace_print_bprintk_msg_only(iter);
1140 		if (ret != TRACE_TYPE_HANDLED)
1141 			return ret;
1142 		break;
1143 	case TRACE_PRINT:
1144 		ret = trace_print_printk_msg_only(iter);
1145 		if (ret != TRACE_TYPE_HANDLED)
1146 			return ret;
1147 		break;
1148 	default:
1149 		event = ftrace_find_event(ent->type);
1150 		if (!event)
1151 			return TRACE_TYPE_UNHANDLED;
1152 
1153 		ret = event->funcs->trace(iter, sym_flags, event);
1154 		if (ret != TRACE_TYPE_HANDLED)
1155 			return ret;
1156 	}
1157 
1158 	if (trace_seq_has_overflowed(s))
1159 		goto out;
1160 
1161 	/* Strip ending newline */
1162 	if (s->buffer[s->seq.len - 1] == '\n') {
1163 		s->buffer[s->seq.len - 1] = '\0';
1164 		s->seq.len--;
1165 	}
1166 
1167 	trace_seq_puts(s, " */\n");
1168  out:
1169 	return trace_handle_return(s);
1170 }
1171 
1172 
1173 enum print_line_t
1174 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1175 {
1176 	struct ftrace_graph_ent_entry *field;
1177 	struct fgraph_data *data = iter->private;
1178 	struct trace_entry *entry = iter->ent;
1179 	struct trace_seq *s = &iter->seq;
1180 	int cpu = iter->cpu;
1181 	int ret;
1182 
1183 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1184 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1185 		return TRACE_TYPE_HANDLED;
1186 	}
1187 
1188 	/*
1189 	 * If the last output failed, there's a possibility we need
1190 	 * to print out the missing entry which would never go out.
1191 	 */
1192 	if (data && data->failed) {
1193 		field = &data->ent;
1194 		iter->cpu = data->cpu;
1195 		ret = print_graph_entry(field, s, iter, flags);
1196 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1197 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1198 			ret = TRACE_TYPE_NO_CONSUME;
1199 		}
1200 		iter->cpu = cpu;
1201 		return ret;
1202 	}
1203 
1204 	switch (entry->type) {
1205 	case TRACE_GRAPH_ENT: {
1206 		/*
1207 		 * print_graph_entry() may consume the current event,
1208 		 * thus @field may become invalid, so we need to save it.
1209 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1210 		 * it can be safely saved at the stack.
1211 		 */
1212 		struct ftrace_graph_ent_entry saved;
1213 		trace_assign_type(field, entry);
1214 		saved = *field;
1215 		return print_graph_entry(&saved, s, iter, flags);
1216 	}
1217 	case TRACE_GRAPH_RET: {
1218 		struct ftrace_graph_ret_entry *field;
1219 		trace_assign_type(field, entry);
1220 		return print_graph_return(&field->ret, s, entry, iter, flags);
1221 	}
1222 	case TRACE_STACK:
1223 	case TRACE_FN:
1224 		/* dont trace stack and functions as comments */
1225 		return TRACE_TYPE_UNHANDLED;
1226 
1227 	default:
1228 		return print_graph_comment(s, entry, iter, flags);
1229 	}
1230 
1231 	return TRACE_TYPE_HANDLED;
1232 }
1233 
1234 static enum print_line_t
1235 print_graph_function(struct trace_iterator *iter)
1236 {
1237 	return print_graph_function_flags(iter, tracer_flags.val);
1238 }
1239 
1240 static enum print_line_t
1241 print_graph_function_event(struct trace_iterator *iter, int flags,
1242 			   struct trace_event *event)
1243 {
1244 	return print_graph_function(iter);
1245 }
1246 
1247 static void print_lat_header(struct seq_file *s, u32 flags)
1248 {
1249 	static const char spaces[] = "                "	/* 16 spaces */
1250 		"    "					/* 4 spaces */
1251 		"                 ";			/* 17 spaces */
1252 	int size = 0;
1253 
1254 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1255 		size += 16;
1256 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1257 		size += 16;
1258 	if (flags & TRACE_GRAPH_PRINT_CPU)
1259 		size += 4;
1260 	if (flags & TRACE_GRAPH_PRINT_PROC)
1261 		size += 17;
1262 
1263 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1264 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1265 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1266 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1267 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1268 }
1269 
1270 static void __print_graph_headers_flags(struct trace_array *tr,
1271 					struct seq_file *s, u32 flags)
1272 {
1273 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1274 
1275 	if (lat)
1276 		print_lat_header(s, flags);
1277 
1278 	/* 1st line */
1279 	seq_putc(s, '#');
1280 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1281 		seq_puts(s, "     TIME       ");
1282 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1283 		seq_puts(s, "   REL TIME     ");
1284 	if (flags & TRACE_GRAPH_PRINT_CPU)
1285 		seq_puts(s, " CPU");
1286 	if (flags & TRACE_GRAPH_PRINT_PROC)
1287 		seq_puts(s, "  TASK/PID       ");
1288 	if (lat)
1289 		seq_puts(s, "||||   ");
1290 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1291 		seq_puts(s, "  DURATION   ");
1292 	seq_puts(s, "               FUNCTION CALLS\n");
1293 
1294 	/* 2nd line */
1295 	seq_putc(s, '#');
1296 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1297 		seq_puts(s, "      |         ");
1298 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1299 		seq_puts(s, "      |         ");
1300 	if (flags & TRACE_GRAPH_PRINT_CPU)
1301 		seq_puts(s, " |  ");
1302 	if (flags & TRACE_GRAPH_PRINT_PROC)
1303 		seq_puts(s, "   |    |        ");
1304 	if (lat)
1305 		seq_puts(s, "||||   ");
1306 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1307 		seq_puts(s, "   |   |      ");
1308 	seq_puts(s, "               |   |   |   |\n");
1309 }
1310 
1311 static void print_graph_headers(struct seq_file *s)
1312 {
1313 	print_graph_headers_flags(s, tracer_flags.val);
1314 }
1315 
1316 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1317 {
1318 	struct trace_iterator *iter = s->private;
1319 	struct trace_array *tr = iter->tr;
1320 
1321 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1322 		return;
1323 
1324 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1325 		/* print nothing if the buffers are empty */
1326 		if (trace_empty(iter))
1327 			return;
1328 
1329 		print_trace_header(s, iter);
1330 	}
1331 
1332 	__print_graph_headers_flags(tr, s, flags);
1333 }
1334 
1335 void graph_trace_open(struct trace_iterator *iter)
1336 {
1337 	/* pid and depth on the last trace processed */
1338 	struct fgraph_data *data;
1339 	gfp_t gfpflags;
1340 	int cpu;
1341 
1342 	iter->private = NULL;
1343 
1344 	/* We can be called in atomic context via ftrace_dump() */
1345 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1346 
1347 	data = kzalloc(sizeof(*data), gfpflags);
1348 	if (!data)
1349 		goto out_err;
1350 
1351 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1352 	if (!data->cpu_data)
1353 		goto out_err_free;
1354 
1355 	for_each_possible_cpu(cpu) {
1356 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1357 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1358 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1359 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1360 
1361 		*pid = -1;
1362 		*depth = 0;
1363 		*ignore = 0;
1364 		*depth_irq = -1;
1365 	}
1366 
1367 	iter->private = data;
1368 
1369 	return;
1370 
1371  out_err_free:
1372 	kfree(data);
1373  out_err:
1374 	pr_warn("function graph tracer: not enough memory\n");
1375 }
1376 
1377 void graph_trace_close(struct trace_iterator *iter)
1378 {
1379 	struct fgraph_data *data = iter->private;
1380 
1381 	if (data) {
1382 		free_percpu(data->cpu_data);
1383 		kfree(data);
1384 	}
1385 }
1386 
1387 static int
1388 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1389 {
1390 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1391 		ftrace_graph_skip_irqs = !set;
1392 
1393 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1394 		ftrace_graph_sleep_time_control(set);
1395 
1396 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1397 		ftrace_graph_graph_time_control(set);
1398 
1399 	return 0;
1400 }
1401 
1402 static struct trace_event_functions graph_functions = {
1403 	.trace		= print_graph_function_event,
1404 };
1405 
1406 static struct trace_event graph_trace_entry_event = {
1407 	.type		= TRACE_GRAPH_ENT,
1408 	.funcs		= &graph_functions,
1409 };
1410 
1411 static struct trace_event graph_trace_ret_event = {
1412 	.type		= TRACE_GRAPH_RET,
1413 	.funcs		= &graph_functions
1414 };
1415 
1416 static struct tracer graph_trace __tracer_data = {
1417 	.name		= "function_graph",
1418 	.update_thresh	= graph_trace_update_thresh,
1419 	.open		= graph_trace_open,
1420 	.pipe_open	= graph_trace_open,
1421 	.close		= graph_trace_close,
1422 	.pipe_close	= graph_trace_close,
1423 	.init		= graph_trace_init,
1424 	.reset		= graph_trace_reset,
1425 	.print_line	= print_graph_function,
1426 	.print_header	= print_graph_headers,
1427 	.flags		= &tracer_flags,
1428 	.set_flag	= func_graph_set_flag,
1429 	.allow_instances = true,
1430 #ifdef CONFIG_FTRACE_SELFTEST
1431 	.selftest	= trace_selftest_startup_function_graph,
1432 #endif
1433 };
1434 
1435 
1436 static ssize_t
1437 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1438 		  loff_t *ppos)
1439 {
1440 	unsigned long val;
1441 	int ret;
1442 
1443 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1444 	if (ret)
1445 		return ret;
1446 
1447 	fgraph_max_depth = val;
1448 
1449 	*ppos += cnt;
1450 
1451 	return cnt;
1452 }
1453 
1454 static ssize_t
1455 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1456 		 loff_t *ppos)
1457 {
1458 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1459 	int n;
1460 
1461 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1462 
1463 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1464 }
1465 
1466 static const struct file_operations graph_depth_fops = {
1467 	.open		= tracing_open_generic,
1468 	.write		= graph_depth_write,
1469 	.read		= graph_depth_read,
1470 	.llseek		= generic_file_llseek,
1471 };
1472 
1473 static __init int init_graph_tracefs(void)
1474 {
1475 	int ret;
1476 
1477 	ret = tracing_init_dentry();
1478 	if (ret)
1479 		return 0;
1480 
1481 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1482 			  NULL, &graph_depth_fops);
1483 
1484 	return 0;
1485 }
1486 fs_initcall(init_graph_tracefs);
1487 
1488 static __init int init_graph_trace(void)
1489 {
1490 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1491 
1492 	if (!register_trace_event(&graph_trace_entry_event)) {
1493 		pr_warn("Warning: could not register graph trace events\n");
1494 		return 1;
1495 	}
1496 
1497 	if (!register_trace_event(&graph_trace_ret_event)) {
1498 		pr_warn("Warning: could not register graph trace events\n");
1499 		return 1;
1500 	}
1501 
1502 	return register_tracer(&graph_trace);
1503 }
1504 
1505 core_initcall(init_graph_trace);
1506