xref: /linux-6.15/kernel/trace/trace_fprobe.c (revision 4346ba16)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fprobe-based tracing events
4  * Copyright (C) 2022 Google LLC.
5  */
6 #define pr_fmt(fmt)	"trace_fprobe: " fmt
7 #include <asm/ptrace.h>
8 
9 #include <linux/fprobe.h>
10 #include <linux/module.h>
11 #include <linux/rculist.h>
12 #include <linux/security.h>
13 #include <linux/tracepoint.h>
14 #include <linux/uaccess.h>
15 
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_kernel.h"
19 #include "trace_probe_tmpl.h"
20 
21 #define FPROBE_EVENT_SYSTEM "fprobes"
22 #define TRACEPOINT_EVENT_SYSTEM "tracepoints"
23 #define RETHOOK_MAXACTIVE_MAX 4096
24 #define TRACEPOINT_STUB ERR_PTR(-ENOENT)
25 
26 static int trace_fprobe_create(const char *raw_command);
27 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev);
28 static int trace_fprobe_release(struct dyn_event *ev);
29 static bool trace_fprobe_is_busy(struct dyn_event *ev);
30 static bool trace_fprobe_match(const char *system, const char *event,
31 			int argc, const char **argv, struct dyn_event *ev);
32 
33 static struct dyn_event_operations trace_fprobe_ops = {
34 	.create = trace_fprobe_create,
35 	.show = trace_fprobe_show,
36 	.is_busy = trace_fprobe_is_busy,
37 	.free = trace_fprobe_release,
38 	.match = trace_fprobe_match,
39 };
40 
41 /*
42  * Fprobe event core functions
43  */
44 struct trace_fprobe {
45 	struct dyn_event	devent;
46 	struct fprobe		fp;
47 	const char		*symbol;
48 	struct tracepoint	*tpoint;
49 	struct module		*mod;
50 	struct trace_probe	tp;
51 };
52 
53 static bool is_trace_fprobe(struct dyn_event *ev)
54 {
55 	return ev->ops == &trace_fprobe_ops;
56 }
57 
58 static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev)
59 {
60 	return container_of(ev, struct trace_fprobe, devent);
61 }
62 
63 /**
64  * for_each_trace_fprobe - iterate over the trace_fprobe list
65  * @pos:	the struct trace_fprobe * for each entry
66  * @dpos:	the struct dyn_event * to use as a loop cursor
67  */
68 #define for_each_trace_fprobe(pos, dpos)	\
69 	for_each_dyn_event(dpos)		\
70 		if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos)))
71 
72 static bool trace_fprobe_is_return(struct trace_fprobe *tf)
73 {
74 	return tf->fp.exit_handler != NULL;
75 }
76 
77 static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf)
78 {
79 	return tf->tpoint != NULL;
80 }
81 
82 static const char *trace_fprobe_symbol(struct trace_fprobe *tf)
83 {
84 	return tf->symbol ? tf->symbol : "unknown";
85 }
86 
87 static bool trace_fprobe_is_busy(struct dyn_event *ev)
88 {
89 	struct trace_fprobe *tf = to_trace_fprobe(ev);
90 
91 	return trace_probe_is_enabled(&tf->tp);
92 }
93 
94 static bool trace_fprobe_match_command_head(struct trace_fprobe *tf,
95 					    int argc, const char **argv)
96 {
97 	char buf[MAX_ARGSTR_LEN + 1];
98 
99 	if (!argc)
100 		return true;
101 
102 	snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf));
103 	if (strcmp(buf, argv[0]))
104 		return false;
105 	argc--; argv++;
106 
107 	return trace_probe_match_command_args(&tf->tp, argc, argv);
108 }
109 
110 static bool trace_fprobe_match(const char *system, const char *event,
111 			int argc, const char **argv, struct dyn_event *ev)
112 {
113 	struct trace_fprobe *tf = to_trace_fprobe(ev);
114 
115 	if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event))
116 		return false;
117 
118 	if (system && strcmp(trace_probe_group_name(&tf->tp), system))
119 		return false;
120 
121 	return trace_fprobe_match_command_head(tf, argc, argv);
122 }
123 
124 static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
125 {
126 	return fprobe_is_registered(&tf->fp);
127 }
128 
129 /*
130  * Note that we don't verify the fetch_insn code, since it does not come
131  * from user space.
132  */
133 static int
134 process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
135 		   void *dest, void *base)
136 {
137 	struct ftrace_regs *fregs = rec;
138 	unsigned long val;
139 	int ret;
140 
141 retry:
142 	/* 1st stage: get value from context */
143 	switch (code->op) {
144 	case FETCH_OP_STACK:
145 		val = ftrace_regs_get_kernel_stack_nth(fregs, code->param);
146 		break;
147 	case FETCH_OP_STACKP:
148 		val = ftrace_regs_get_stack_pointer(fregs);
149 		break;
150 	case FETCH_OP_RETVAL:
151 		val = ftrace_regs_get_return_value(fregs);
152 		break;
153 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
154 	case FETCH_OP_ARG:
155 		val = ftrace_regs_get_argument(fregs, code->param);
156 		break;
157 	case FETCH_OP_EDATA:
158 		val = *(unsigned long *)((unsigned long)edata + code->offset);
159 		break;
160 #endif
161 	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
162 		code++;
163 		goto retry;
164 	default:
165 		ret = process_common_fetch_insn(code, &val);
166 		if (ret < 0)
167 			return ret;
168 	}
169 	code++;
170 
171 	return process_fetch_insn_bottom(code, val, dest, base);
172 }
173 NOKPROBE_SYMBOL(process_fetch_insn)
174 
175 /* function entry handler */
176 static nokprobe_inline void
177 __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
178 		    struct ftrace_regs *fregs,
179 		    struct trace_event_file *trace_file)
180 {
181 	struct fentry_trace_entry_head *entry;
182 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
183 	struct trace_event_buffer fbuffer;
184 	int dsize;
185 
186 	if (WARN_ON_ONCE(call != trace_file->event_call))
187 		return;
188 
189 	if (trace_trigger_soft_disabled(trace_file))
190 		return;
191 
192 	dsize = __get_data_size(&tf->tp, fregs, NULL);
193 
194 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
195 					   sizeof(*entry) + tf->tp.size + dsize);
196 	if (!entry)
197 		return;
198 
199 	fbuffer.regs = ftrace_get_regs(fregs);
200 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
201 	entry->ip = entry_ip;
202 	store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize);
203 
204 	trace_event_buffer_commit(&fbuffer);
205 }
206 
207 static void
208 fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
209 		  struct ftrace_regs *fregs)
210 {
211 	struct event_file_link *link;
212 
213 	trace_probe_for_each_link_rcu(link, &tf->tp)
214 		__fentry_trace_func(tf, entry_ip, fregs, link->file);
215 }
216 NOKPROBE_SYMBOL(fentry_trace_func);
217 
218 static nokprobe_inline
219 void store_fprobe_entry_data(void *edata, struct trace_probe *tp, struct ftrace_regs *fregs)
220 {
221 	struct probe_entry_arg *earg = tp->entry_arg;
222 	unsigned long val = 0;
223 	int i;
224 
225 	if (!earg)
226 		return;
227 
228 	for (i = 0; i < earg->size; i++) {
229 		struct fetch_insn *code = &earg->code[i];
230 
231 		switch (code->op) {
232 		case FETCH_OP_ARG:
233 			val = ftrace_regs_get_argument(fregs, code->param);
234 			break;
235 		case FETCH_OP_ST_EDATA:
236 			*(unsigned long *)((unsigned long)edata + code->offset) = val;
237 			break;
238 		case FETCH_OP_END:
239 			goto end;
240 		default:
241 			break;
242 		}
243 	}
244 end:
245 	return;
246 }
247 
248 /* function exit handler */
249 static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
250 				unsigned long ret_ip, struct ftrace_regs *fregs,
251 				void *entry_data)
252 {
253 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
254 
255 	if (tf->tp.entry_arg)
256 		store_fprobe_entry_data(entry_data, &tf->tp, fregs);
257 
258 	return 0;
259 }
260 NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
261 
262 static nokprobe_inline void
263 __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
264 		   unsigned long ret_ip, struct ftrace_regs *fregs,
265 		   void *entry_data, struct trace_event_file *trace_file)
266 {
267 	struct fexit_trace_entry_head *entry;
268 	struct trace_event_buffer fbuffer;
269 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
270 	int dsize;
271 
272 	if (WARN_ON_ONCE(call != trace_file->event_call))
273 		return;
274 
275 	if (trace_trigger_soft_disabled(trace_file))
276 		return;
277 
278 	dsize = __get_data_size(&tf->tp, fregs, entry_data);
279 
280 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
281 					   sizeof(*entry) + tf->tp.size + dsize);
282 	if (!entry)
283 		return;
284 
285 	fbuffer.regs = ftrace_get_regs(fregs);
286 	entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
287 	entry->func = entry_ip;
288 	entry->ret_ip = ret_ip;
289 	store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize);
290 
291 	trace_event_buffer_commit(&fbuffer);
292 }
293 
294 static void
295 fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
296 		 unsigned long ret_ip, struct ftrace_regs *fregs, void *entry_data)
297 {
298 	struct event_file_link *link;
299 
300 	trace_probe_for_each_link_rcu(link, &tf->tp)
301 		__fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data, link->file);
302 }
303 NOKPROBE_SYMBOL(fexit_trace_func);
304 
305 #ifdef CONFIG_PERF_EVENTS
306 
307 static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
308 			    struct ftrace_regs *fregs)
309 {
310 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
311 	struct fentry_trace_entry_head *entry;
312 	struct hlist_head *head;
313 	int size, __size, dsize;
314 	struct pt_regs *regs;
315 	int rctx;
316 
317 	head = this_cpu_ptr(call->perf_events);
318 	if (hlist_empty(head))
319 		return 0;
320 
321 	dsize = __get_data_size(&tf->tp, fregs, NULL);
322 	__size = sizeof(*entry) + tf->tp.size + dsize;
323 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
324 	size -= sizeof(u32);
325 
326 	entry = perf_trace_buf_alloc(size, &regs, &rctx);
327 	if (!entry)
328 		return 0;
329 
330 	regs = ftrace_fill_perf_regs(fregs, regs);
331 
332 	entry->ip = entry_ip;
333 	memset(&entry[1], 0, dsize);
334 	store_trace_args(&entry[1], &tf->tp, fregs, NULL, sizeof(*entry), dsize);
335 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
336 			      head, NULL);
337 	return 0;
338 }
339 NOKPROBE_SYMBOL(fentry_perf_func);
340 
341 static void
342 fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
343 		unsigned long ret_ip, struct ftrace_regs *fregs,
344 		void *entry_data)
345 {
346 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
347 	struct fexit_trace_entry_head *entry;
348 	struct hlist_head *head;
349 	int size, __size, dsize;
350 	struct pt_regs *regs;
351 	int rctx;
352 
353 	head = this_cpu_ptr(call->perf_events);
354 	if (hlist_empty(head))
355 		return;
356 
357 	dsize = __get_data_size(&tf->tp, fregs, entry_data);
358 	__size = sizeof(*entry) + tf->tp.size + dsize;
359 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
360 	size -= sizeof(u32);
361 
362 	entry = perf_trace_buf_alloc(size, &regs, &rctx);
363 	if (!entry)
364 		return;
365 
366 	regs = ftrace_fill_perf_regs(fregs, regs);
367 
368 	entry->func = entry_ip;
369 	entry->ret_ip = ret_ip;
370 	store_trace_args(&entry[1], &tf->tp, fregs, entry_data, sizeof(*entry), dsize);
371 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
372 			      head, NULL);
373 }
374 NOKPROBE_SYMBOL(fexit_perf_func);
375 #endif	/* CONFIG_PERF_EVENTS */
376 
377 static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip,
378 			     unsigned long ret_ip, struct ftrace_regs *fregs,
379 			     void *entry_data)
380 {
381 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
382 	int ret = 0;
383 
384 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
385 		fentry_trace_func(tf, entry_ip, fregs);
386 
387 #ifdef CONFIG_PERF_EVENTS
388 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
389 		ret = fentry_perf_func(tf, entry_ip, fregs);
390 #endif
391 	return ret;
392 }
393 NOKPROBE_SYMBOL(fentry_dispatcher);
394 
395 static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
396 			     unsigned long ret_ip, struct ftrace_regs *fregs,
397 			     void *entry_data)
398 {
399 	struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
400 
401 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
402 		fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data);
403 #ifdef CONFIG_PERF_EVENTS
404 	if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
405 		fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data);
406 #endif
407 }
408 NOKPROBE_SYMBOL(fexit_dispatcher);
409 
410 static void free_trace_fprobe(struct trace_fprobe *tf)
411 {
412 	if (tf) {
413 		trace_probe_cleanup(&tf->tp);
414 		kfree(tf->symbol);
415 		kfree(tf);
416 	}
417 }
418 
419 /*
420  * Allocate new trace_probe and initialize it (including fprobe).
421  */
422 static struct trace_fprobe *alloc_trace_fprobe(const char *group,
423 					       const char *event,
424 					       const char *symbol,
425 					       struct tracepoint *tpoint,
426 					       struct module *mod,
427 					       int maxactive,
428 					       int nargs, bool is_return)
429 {
430 	struct trace_fprobe *tf;
431 	int ret = -ENOMEM;
432 
433 	tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL);
434 	if (!tf)
435 		return ERR_PTR(ret);
436 
437 	tf->symbol = kstrdup(symbol, GFP_KERNEL);
438 	if (!tf->symbol)
439 		goto error;
440 
441 	if (is_return)
442 		tf->fp.exit_handler = fexit_dispatcher;
443 	else
444 		tf->fp.entry_handler = fentry_dispatcher;
445 
446 	tf->tpoint = tpoint;
447 	tf->mod = mod;
448 	tf->fp.nr_maxactive = maxactive;
449 
450 	ret = trace_probe_init(&tf->tp, event, group, false, nargs);
451 	if (ret < 0)
452 		goto error;
453 
454 	dyn_event_init(&tf->devent, &trace_fprobe_ops);
455 	return tf;
456 error:
457 	free_trace_fprobe(tf);
458 	return ERR_PTR(ret);
459 }
460 
461 static struct trace_fprobe *find_trace_fprobe(const char *event,
462 					      const char *group)
463 {
464 	struct dyn_event *pos;
465 	struct trace_fprobe *tf;
466 
467 	for_each_trace_fprobe(tf, pos)
468 		if (strcmp(trace_probe_name(&tf->tp), event) == 0 &&
469 		    strcmp(trace_probe_group_name(&tf->tp), group) == 0)
470 			return tf;
471 	return NULL;
472 }
473 
474 static inline int __enable_trace_fprobe(struct trace_fprobe *tf)
475 {
476 	if (trace_fprobe_is_registered(tf))
477 		enable_fprobe(&tf->fp);
478 
479 	return 0;
480 }
481 
482 static void __disable_trace_fprobe(struct trace_probe *tp)
483 {
484 	struct trace_fprobe *tf;
485 
486 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
487 		if (!trace_fprobe_is_registered(tf))
488 			continue;
489 		disable_fprobe(&tf->fp);
490 	}
491 }
492 
493 /*
494  * Enable trace_probe
495  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
496  */
497 static int enable_trace_fprobe(struct trace_event_call *call,
498 			       struct trace_event_file *file)
499 {
500 	struct trace_probe *tp;
501 	struct trace_fprobe *tf;
502 	bool enabled;
503 	int ret = 0;
504 
505 	tp = trace_probe_primary_from_call(call);
506 	if (WARN_ON_ONCE(!tp))
507 		return -ENODEV;
508 	enabled = trace_probe_is_enabled(tp);
509 
510 	/* This also changes "enabled" state */
511 	if (file) {
512 		ret = trace_probe_add_file(tp, file);
513 		if (ret)
514 			return ret;
515 	} else
516 		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
517 
518 	if (!enabled) {
519 		list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
520 			/* TODO: check the fprobe is gone */
521 			__enable_trace_fprobe(tf);
522 		}
523 	}
524 
525 	return 0;
526 }
527 
528 /*
529  * Disable trace_probe
530  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
531  */
532 static int disable_trace_fprobe(struct trace_event_call *call,
533 				struct trace_event_file *file)
534 {
535 	struct trace_probe *tp;
536 
537 	tp = trace_probe_primary_from_call(call);
538 	if (WARN_ON_ONCE(!tp))
539 		return -ENODEV;
540 
541 	if (file) {
542 		if (!trace_probe_get_file_link(tp, file))
543 			return -ENOENT;
544 		if (!trace_probe_has_single_file(tp))
545 			goto out;
546 		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
547 	} else
548 		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
549 
550 	if (!trace_probe_is_enabled(tp))
551 		__disable_trace_fprobe(tp);
552 
553  out:
554 	if (file)
555 		/*
556 		 * Synchronization is done in below function. For perf event,
557 		 * file == NULL and perf_trace_event_unreg() calls
558 		 * tracepoint_synchronize_unregister() to ensure synchronize
559 		 * event. We don't need to care about it.
560 		 */
561 		trace_probe_remove_file(tp, file);
562 
563 	return 0;
564 }
565 
566 /* Event entry printers */
567 static enum print_line_t
568 print_fentry_event(struct trace_iterator *iter, int flags,
569 		   struct trace_event *event)
570 {
571 	struct fentry_trace_entry_head *field;
572 	struct trace_seq *s = &iter->seq;
573 	struct trace_probe *tp;
574 
575 	field = (struct fentry_trace_entry_head *)iter->ent;
576 	tp = trace_probe_primary_from_call(
577 		container_of(event, struct trace_event_call, event));
578 	if (WARN_ON_ONCE(!tp))
579 		goto out;
580 
581 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
582 
583 	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
584 		goto out;
585 
586 	trace_seq_putc(s, ')');
587 
588 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
589 			     (u8 *)&field[1], field) < 0)
590 		goto out;
591 
592 	trace_seq_putc(s, '\n');
593  out:
594 	return trace_handle_return(s);
595 }
596 
597 static enum print_line_t
598 print_fexit_event(struct trace_iterator *iter, int flags,
599 		  struct trace_event *event)
600 {
601 	struct fexit_trace_entry_head *field;
602 	struct trace_seq *s = &iter->seq;
603 	struct trace_probe *tp;
604 
605 	field = (struct fexit_trace_entry_head *)iter->ent;
606 	tp = trace_probe_primary_from_call(
607 		container_of(event, struct trace_event_call, event));
608 	if (WARN_ON_ONCE(!tp))
609 		goto out;
610 
611 	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
612 
613 	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
614 		goto out;
615 
616 	trace_seq_puts(s, " <- ");
617 
618 	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
619 		goto out;
620 
621 	trace_seq_putc(s, ')');
622 
623 	if (trace_probe_print_args(s, tp->args, tp->nr_args,
624 			     (u8 *)&field[1], field) < 0)
625 		goto out;
626 
627 	trace_seq_putc(s, '\n');
628 
629  out:
630 	return trace_handle_return(s);
631 }
632 
633 static int fentry_event_define_fields(struct trace_event_call *event_call)
634 {
635 	int ret;
636 	struct fentry_trace_entry_head field;
637 	struct trace_probe *tp;
638 
639 	tp = trace_probe_primary_from_call(event_call);
640 	if (WARN_ON_ONCE(!tp))
641 		return -ENOENT;
642 
643 	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
644 
645 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
646 }
647 
648 static int fexit_event_define_fields(struct trace_event_call *event_call)
649 {
650 	int ret;
651 	struct fexit_trace_entry_head field;
652 	struct trace_probe *tp;
653 
654 	tp = trace_probe_primary_from_call(event_call);
655 	if (WARN_ON_ONCE(!tp))
656 		return -ENOENT;
657 
658 	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
659 	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
660 
661 	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
662 }
663 
664 static struct trace_event_functions fentry_funcs = {
665 	.trace		= print_fentry_event
666 };
667 
668 static struct trace_event_functions fexit_funcs = {
669 	.trace		= print_fexit_event
670 };
671 
672 static struct trace_event_fields fentry_fields_array[] = {
673 	{ .type = TRACE_FUNCTION_TYPE,
674 	  .define_fields = fentry_event_define_fields },
675 	{}
676 };
677 
678 static struct trace_event_fields fexit_fields_array[] = {
679 	{ .type = TRACE_FUNCTION_TYPE,
680 	  .define_fields = fexit_event_define_fields },
681 	{}
682 };
683 
684 static int fprobe_register(struct trace_event_call *event,
685 			   enum trace_reg type, void *data);
686 
687 static inline void init_trace_event_call(struct trace_fprobe *tf)
688 {
689 	struct trace_event_call *call = trace_probe_event_call(&tf->tp);
690 
691 	if (trace_fprobe_is_return(tf)) {
692 		call->event.funcs = &fexit_funcs;
693 		call->class->fields_array = fexit_fields_array;
694 	} else {
695 		call->event.funcs = &fentry_funcs;
696 		call->class->fields_array = fentry_fields_array;
697 	}
698 
699 	call->flags = TRACE_EVENT_FL_FPROBE;
700 	call->class->reg = fprobe_register;
701 }
702 
703 static int register_fprobe_event(struct trace_fprobe *tf)
704 {
705 	init_trace_event_call(tf);
706 
707 	return trace_probe_register_event_call(&tf->tp);
708 }
709 
710 static int unregister_fprobe_event(struct trace_fprobe *tf)
711 {
712 	return trace_probe_unregister_event_call(&tf->tp);
713 }
714 
715 static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf)
716 {
717 	struct tracepoint *tpoint = tf->tpoint;
718 	unsigned long ip = (unsigned long)tpoint->probestub;
719 	int ret;
720 
721 	/*
722 	 * Here, we do 2 steps to enable fprobe on a tracepoint.
723 	 * At first, put __probestub_##TP function on the tracepoint
724 	 * and put a fprobe on the stub function.
725 	 */
726 	ret = tracepoint_probe_register_prio_may_exist(tpoint,
727 				tpoint->probestub, NULL, 0);
728 	if (ret < 0)
729 		return ret;
730 	return register_fprobe_ips(&tf->fp, &ip, 1);
731 }
732 
733 /* Internal register function - just handle fprobe and flags */
734 static int __register_trace_fprobe(struct trace_fprobe *tf)
735 {
736 	int i, ret;
737 
738 	/* Should we need new LOCKDOWN flag for fprobe? */
739 	ret = security_locked_down(LOCKDOWN_KPROBES);
740 	if (ret)
741 		return ret;
742 
743 	if (trace_fprobe_is_registered(tf))
744 		return -EINVAL;
745 
746 	for (i = 0; i < tf->tp.nr_args; i++) {
747 		ret = traceprobe_update_arg(&tf->tp.args[i]);
748 		if (ret)
749 			return ret;
750 	}
751 
752 	/* Set/clear disabled flag according to tp->flag */
753 	if (trace_probe_is_enabled(&tf->tp))
754 		tf->fp.flags &= ~FPROBE_FL_DISABLED;
755 	else
756 		tf->fp.flags |= FPROBE_FL_DISABLED;
757 
758 	if (trace_fprobe_is_tracepoint(tf)) {
759 
760 		/* This tracepoint is not loaded yet */
761 		if (tf->tpoint == TRACEPOINT_STUB)
762 			return 0;
763 
764 		return __regsiter_tracepoint_fprobe(tf);
765 	}
766 
767 	/* TODO: handle filter, nofilter or symbol list */
768 	return register_fprobe(&tf->fp, tf->symbol, NULL);
769 }
770 
771 /* Internal unregister function - just handle fprobe and flags */
772 static void __unregister_trace_fprobe(struct trace_fprobe *tf)
773 {
774 	if (trace_fprobe_is_registered(tf)) {
775 		unregister_fprobe(&tf->fp);
776 		memset(&tf->fp, 0, sizeof(tf->fp));
777 		if (trace_fprobe_is_tracepoint(tf)) {
778 			tracepoint_probe_unregister(tf->tpoint,
779 					tf->tpoint->probestub, NULL);
780 			tf->tpoint = NULL;
781 			tf->mod = NULL;
782 		}
783 	}
784 }
785 
786 /* TODO: make this trace_*probe common function */
787 /* Unregister a trace_probe and probe_event */
788 static int unregister_trace_fprobe(struct trace_fprobe *tf)
789 {
790 	/* If other probes are on the event, just unregister fprobe */
791 	if (trace_probe_has_sibling(&tf->tp))
792 		goto unreg;
793 
794 	/* Enabled event can not be unregistered */
795 	if (trace_probe_is_enabled(&tf->tp))
796 		return -EBUSY;
797 
798 	/* If there's a reference to the dynamic event */
799 	if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp)))
800 		return -EBUSY;
801 
802 	/* Will fail if probe is being used by ftrace or perf */
803 	if (unregister_fprobe_event(tf))
804 		return -EBUSY;
805 
806 unreg:
807 	__unregister_trace_fprobe(tf);
808 	dyn_event_remove(&tf->devent);
809 	trace_probe_unlink(&tf->tp);
810 
811 	return 0;
812 }
813 
814 static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig,
815 					 struct trace_fprobe *comp)
816 {
817 	struct trace_probe_event *tpe = orig->tp.event;
818 	int i;
819 
820 	list_for_each_entry(orig, &tpe->probes, tp.list) {
821 		if (strcmp(trace_fprobe_symbol(orig),
822 			   trace_fprobe_symbol(comp)))
823 			continue;
824 
825 		/*
826 		 * trace_probe_compare_arg_type() ensured that nr_args and
827 		 * each argument name and type are same. Let's compare comm.
828 		 */
829 		for (i = 0; i < orig->tp.nr_args; i++) {
830 			if (strcmp(orig->tp.args[i].comm,
831 				   comp->tp.args[i].comm))
832 				break;
833 		}
834 
835 		if (i == orig->tp.nr_args)
836 			return true;
837 	}
838 
839 	return false;
840 }
841 
842 static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to)
843 {
844 	int ret;
845 
846 	if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to) ||
847 	    trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(to)) {
848 		trace_probe_log_set_index(0);
849 		trace_probe_log_err(0, DIFF_PROBE_TYPE);
850 		return -EEXIST;
851 	}
852 	ret = trace_probe_compare_arg_type(&tf->tp, &to->tp);
853 	if (ret) {
854 		/* Note that argument starts index = 2 */
855 		trace_probe_log_set_index(ret + 1);
856 		trace_probe_log_err(0, DIFF_ARG_TYPE);
857 		return -EEXIST;
858 	}
859 	if (trace_fprobe_has_same_fprobe(to, tf)) {
860 		trace_probe_log_set_index(0);
861 		trace_probe_log_err(0, SAME_PROBE);
862 		return -EEXIST;
863 	}
864 
865 	/* Append to existing event */
866 	ret = trace_probe_append(&tf->tp, &to->tp);
867 	if (ret)
868 		return ret;
869 
870 	ret = __register_trace_fprobe(tf);
871 	if (ret)
872 		trace_probe_unlink(&tf->tp);
873 	else
874 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
875 
876 	return ret;
877 }
878 
879 /* Register a trace_probe and probe_event */
880 static int register_trace_fprobe(struct trace_fprobe *tf)
881 {
882 	struct trace_fprobe *old_tf;
883 	int ret;
884 
885 	mutex_lock(&event_mutex);
886 
887 	old_tf = find_trace_fprobe(trace_probe_name(&tf->tp),
888 				   trace_probe_group_name(&tf->tp));
889 	if (old_tf) {
890 		ret = append_trace_fprobe(tf, old_tf);
891 		goto end;
892 	}
893 
894 	/* Register new event */
895 	ret = register_fprobe_event(tf);
896 	if (ret) {
897 		if (ret == -EEXIST) {
898 			trace_probe_log_set_index(0);
899 			trace_probe_log_err(0, EVENT_EXIST);
900 		} else
901 			pr_warn("Failed to register probe event(%d)\n", ret);
902 		goto end;
903 	}
904 
905 	/* Register fprobe */
906 	ret = __register_trace_fprobe(tf);
907 	if (ret < 0)
908 		unregister_fprobe_event(tf);
909 	else
910 		dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp));
911 
912 end:
913 	mutex_unlock(&event_mutex);
914 	return ret;
915 }
916 
917 struct __find_tracepoint_cb_data {
918 	const char *tp_name;
919 	struct tracepoint *tpoint;
920 	struct module *mod;
921 };
922 
923 static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv)
924 {
925 	struct __find_tracepoint_cb_data *data = priv;
926 
927 	if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
928 		data->tpoint = tp;
929 		if (!data->mod) {
930 			data->mod = mod;
931 			if (!try_module_get(data->mod)) {
932 				data->tpoint = NULL;
933 				data->mod = NULL;
934 			}
935 		}
936 	}
937 }
938 
939 static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
940 {
941 	struct __find_tracepoint_cb_data *data = priv;
942 
943 	if (!data->tpoint && !strcmp(data->tp_name, tp->name))
944 		data->tpoint = tp;
945 }
946 
947 /*
948  * Find a tracepoint from kernel and module. If the tracepoint is in a module,
949  * this increments the module refcount to prevent unloading until the
950  * trace_fprobe is registered to the list. After registering the trace_fprobe
951  * on the trace_fprobe list, the module refcount is decremented because
952  * tracepoint_probe_module_cb will handle it.
953  */
954 static struct tracepoint *find_tracepoint(const char *tp_name,
955 					  struct module **tp_mod)
956 {
957 	struct __find_tracepoint_cb_data data = {
958 		.tp_name = tp_name,
959 		.mod = NULL,
960 	};
961 
962 	for_each_kernel_tracepoint(__find_tracepoint_cb, &data);
963 
964 	if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) {
965 		for_each_module_tracepoint(__find_tracepoint_module_cb, &data);
966 		*tp_mod = data.mod;
967 	}
968 
969 	return data.tpoint;
970 }
971 
972 #ifdef CONFIG_MODULES
973 static void reenable_trace_fprobe(struct trace_fprobe *tf)
974 {
975 	struct trace_probe *tp = &tf->tp;
976 
977 	list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) {
978 		__enable_trace_fprobe(tf);
979 	}
980 }
981 
982 static struct tracepoint *find_tracepoint_in_module(struct module *mod,
983 						    const char *tp_name)
984 {
985 	struct __find_tracepoint_cb_data data = {
986 		.tp_name = tp_name,
987 		.mod = mod,
988 	};
989 
990 	for_each_tracepoint_in_module(mod, __find_tracepoint_module_cb, &data);
991 	return data.tpoint;
992 }
993 
994 static int __tracepoint_probe_module_cb(struct notifier_block *self,
995 					unsigned long val, void *data)
996 {
997 	struct tp_module *tp_mod = data;
998 	struct tracepoint *tpoint;
999 	struct trace_fprobe *tf;
1000 	struct dyn_event *pos;
1001 
1002 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING)
1003 		return NOTIFY_DONE;
1004 
1005 	mutex_lock(&event_mutex);
1006 	for_each_trace_fprobe(tf, pos) {
1007 		if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) {
1008 			tpoint = find_tracepoint_in_module(tp_mod->mod, tf->symbol);
1009 			if (tpoint) {
1010 				tf->tpoint = tpoint;
1011 				tf->mod = tp_mod->mod;
1012 				if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) &&
1013 				    trace_probe_is_enabled(&tf->tp))
1014 					reenable_trace_fprobe(tf);
1015 			}
1016 		} else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) {
1017 			tracepoint_probe_unregister(tf->tpoint,
1018 					tf->tpoint->probestub, NULL);
1019 			tf->tpoint = NULL;
1020 			tf->mod = NULL;
1021 		}
1022 	}
1023 	mutex_unlock(&event_mutex);
1024 
1025 	return NOTIFY_DONE;
1026 }
1027 
1028 static struct notifier_block tracepoint_module_nb = {
1029 	.notifier_call = __tracepoint_probe_module_cb,
1030 };
1031 #endif /* CONFIG_MODULES */
1032 
1033 static int parse_symbol_and_return(int argc, const char *argv[],
1034 				   char **symbol, bool *is_return,
1035 				   bool is_tracepoint)
1036 {
1037 	char *tmp = strchr(argv[1], '%');
1038 	int i;
1039 
1040 	if (tmp) {
1041 		int len = tmp - argv[1];
1042 
1043 		if (!is_tracepoint && !strcmp(tmp, "%return")) {
1044 			*is_return = true;
1045 		} else {
1046 			trace_probe_log_err(len, BAD_ADDR_SUFFIX);
1047 			return -EINVAL;
1048 		}
1049 		*symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
1050 	} else
1051 		*symbol = kstrdup(argv[1], GFP_KERNEL);
1052 	if (!*symbol)
1053 		return -ENOMEM;
1054 
1055 	if (*is_return)
1056 		return 0;
1057 
1058 	/* If there is $retval, this should be a return fprobe. */
1059 	for (i = 2; i < argc; i++) {
1060 		tmp = strstr(argv[i], "$retval");
1061 		if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
1062 			if (is_tracepoint) {
1063 				trace_probe_log_set_index(i);
1064 				trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
1065 				return -EINVAL;
1066 			}
1067 			*is_return = true;
1068 			break;
1069 		}
1070 	}
1071 	return 0;
1072 }
1073 
1074 static int __trace_fprobe_create(int argc, const char *argv[])
1075 {
1076 	/*
1077 	 * Argument syntax:
1078 	 *  - Add fentry probe:
1079 	 *      f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS]
1080 	 *  - Add fexit probe:
1081 	 *      f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS]
1082 	 *  - Add tracepoint probe:
1083 	 *      t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS]
1084 	 *
1085 	 * Fetch args:
1086 	 *  $retval	: fetch return value
1087 	 *  $stack	: fetch stack address
1088 	 *  $stackN	: fetch Nth entry of stack (N:0-)
1089 	 *  $argN	: fetch Nth argument (N:1-)
1090 	 *  $comm       : fetch current task comm
1091 	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
1092 	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
1093 	 * Dereferencing memory fetch:
1094 	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
1095 	 * Alias name of args:
1096 	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
1097 	 * Type of args:
1098 	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
1099 	 */
1100 	struct trace_fprobe *tf = NULL;
1101 	int i, len, new_argc = 0, ret = 0;
1102 	bool is_return = false;
1103 	char *symbol = NULL;
1104 	const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
1105 	const char **new_argv = NULL;
1106 	int maxactive = 0;
1107 	char buf[MAX_EVENT_NAME_LEN];
1108 	char gbuf[MAX_EVENT_NAME_LEN];
1109 	char sbuf[KSYM_NAME_LEN];
1110 	char abuf[MAX_BTF_ARGS_LEN];
1111 	char *dbuf = NULL;
1112 	bool is_tracepoint = false;
1113 	struct module *tp_mod = NULL;
1114 	struct tracepoint *tpoint = NULL;
1115 	struct traceprobe_parse_context ctx = {
1116 		.flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE,
1117 	};
1118 
1119 	if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2)
1120 		return -ECANCELED;
1121 
1122 	if (argv[0][0] == 't') {
1123 		is_tracepoint = true;
1124 		group = TRACEPOINT_EVENT_SYSTEM;
1125 	}
1126 
1127 	trace_probe_log_init("trace_fprobe", argc, argv);
1128 
1129 	event = strchr(&argv[0][1], ':');
1130 	if (event)
1131 		event++;
1132 
1133 	if (isdigit(argv[0][1])) {
1134 		if (event)
1135 			len = event - &argv[0][1] - 1;
1136 		else
1137 			len = strlen(&argv[0][1]);
1138 		if (len > MAX_EVENT_NAME_LEN - 1) {
1139 			trace_probe_log_err(1, BAD_MAXACT);
1140 			goto parse_error;
1141 		}
1142 		memcpy(buf, &argv[0][1], len);
1143 		buf[len] = '\0';
1144 		ret = kstrtouint(buf, 0, &maxactive);
1145 		if (ret || !maxactive) {
1146 			trace_probe_log_err(1, BAD_MAXACT);
1147 			goto parse_error;
1148 		}
1149 		/* fprobe rethook instances are iterated over via a list. The
1150 		 * maximum should stay reasonable.
1151 		 */
1152 		if (maxactive > RETHOOK_MAXACTIVE_MAX) {
1153 			trace_probe_log_err(1, MAXACT_TOO_BIG);
1154 			goto parse_error;
1155 		}
1156 	}
1157 
1158 	trace_probe_log_set_index(1);
1159 
1160 	/* a symbol(or tracepoint) must be specified */
1161 	ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
1162 	if (ret < 0)
1163 		goto parse_error;
1164 
1165 	if (!is_return && maxactive) {
1166 		trace_probe_log_set_index(0);
1167 		trace_probe_log_err(1, BAD_MAXACT_TYPE);
1168 		goto parse_error;
1169 	}
1170 
1171 	trace_probe_log_set_index(0);
1172 	if (event) {
1173 		ret = traceprobe_parse_event_name(&event, &group, gbuf,
1174 						  event - argv[0]);
1175 		if (ret)
1176 			goto parse_error;
1177 	}
1178 
1179 	if (!event) {
1180 		/* Make a new event name */
1181 		if (is_tracepoint)
1182 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s%s",
1183 				 isdigit(*symbol) ? "_" : "", symbol);
1184 		else
1185 			snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol,
1186 				 is_return ? "exit" : "entry");
1187 		sanitize_event_name(buf);
1188 		event = buf;
1189 	}
1190 
1191 	if (is_return)
1192 		ctx.flags |= TPARG_FL_RETURN;
1193 	else
1194 		ctx.flags |= TPARG_FL_FENTRY;
1195 
1196 	if (is_tracepoint) {
1197 		ctx.flags |= TPARG_FL_TPOINT;
1198 		tpoint = find_tracepoint(symbol, &tp_mod);
1199 		if (tpoint) {
1200 			ctx.funcname = kallsyms_lookup(
1201 				(unsigned long)tpoint->probestub,
1202 				NULL, NULL, NULL, sbuf);
1203 		} else if (IS_ENABLED(CONFIG_MODULES)) {
1204 				/* This *may* be loaded afterwards */
1205 				tpoint = TRACEPOINT_STUB;
1206 				ctx.funcname = symbol;
1207 		} else {
1208 			trace_probe_log_set_index(1);
1209 			trace_probe_log_err(0, NO_TRACEPOINT);
1210 			goto parse_error;
1211 		}
1212 	} else
1213 		ctx.funcname = symbol;
1214 
1215 	argc -= 2; argv += 2;
1216 	new_argv = traceprobe_expand_meta_args(argc, argv, &new_argc,
1217 					       abuf, MAX_BTF_ARGS_LEN, &ctx);
1218 	if (IS_ERR(new_argv)) {
1219 		ret = PTR_ERR(new_argv);
1220 		new_argv = NULL;
1221 		goto out;
1222 	}
1223 	if (new_argv) {
1224 		argc = new_argc;
1225 		argv = new_argv;
1226 	}
1227 	if (argc > MAX_TRACE_ARGS) {
1228 		ret = -E2BIG;
1229 		goto out;
1230 	}
1231 
1232 	ret = traceprobe_expand_dentry_args(argc, argv, &dbuf);
1233 	if (ret)
1234 		goto out;
1235 
1236 	/* setup a probe */
1237 	tf = alloc_trace_fprobe(group, event, symbol, tpoint, tp_mod,
1238 				maxactive, argc, is_return);
1239 	if (IS_ERR(tf)) {
1240 		ret = PTR_ERR(tf);
1241 		/* This must return -ENOMEM, else there is a bug */
1242 		WARN_ON_ONCE(ret != -ENOMEM);
1243 		goto out;	/* We know tf is not allocated */
1244 	}
1245 
1246 	/* parse arguments */
1247 	for (i = 0; i < argc; i++) {
1248 		trace_probe_log_set_index(i + 2);
1249 		ctx.offset = 0;
1250 		ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
1251 		if (ret)
1252 			goto error;	/* This can be -ENOMEM */
1253 	}
1254 
1255 	if (is_return && tf->tp.entry_arg) {
1256 		tf->fp.entry_handler = trace_fprobe_entry_handler;
1257 		tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
1258 	}
1259 
1260 	ret = traceprobe_set_print_fmt(&tf->tp,
1261 			is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
1262 	if (ret < 0)
1263 		goto error;
1264 
1265 	ret = register_trace_fprobe(tf);
1266 	if (ret) {
1267 		trace_probe_log_set_index(1);
1268 		if (ret == -EILSEQ)
1269 			trace_probe_log_err(0, BAD_INSN_BNDRY);
1270 		else if (ret == -ENOENT)
1271 			trace_probe_log_err(0, BAD_PROBE_ADDR);
1272 		else if (ret != -ENOMEM && ret != -EEXIST)
1273 			trace_probe_log_err(0, FAIL_REG_PROBE);
1274 		goto error;
1275 	}
1276 
1277 out:
1278 	if (tp_mod)
1279 		module_put(tp_mod);
1280 	traceprobe_finish_parse(&ctx);
1281 	trace_probe_log_clear();
1282 	kfree(new_argv);
1283 	kfree(symbol);
1284 	kfree(dbuf);
1285 	return ret;
1286 
1287 parse_error:
1288 	ret = -EINVAL;
1289 error:
1290 	free_trace_fprobe(tf);
1291 	goto out;
1292 }
1293 
1294 static int trace_fprobe_create(const char *raw_command)
1295 {
1296 	return trace_probe_create(raw_command, __trace_fprobe_create);
1297 }
1298 
1299 static int trace_fprobe_release(struct dyn_event *ev)
1300 {
1301 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1302 	int ret = unregister_trace_fprobe(tf);
1303 
1304 	if (!ret)
1305 		free_trace_fprobe(tf);
1306 	return ret;
1307 }
1308 
1309 static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev)
1310 {
1311 	struct trace_fprobe *tf = to_trace_fprobe(ev);
1312 	int i;
1313 
1314 	if (trace_fprobe_is_tracepoint(tf))
1315 		seq_putc(m, 't');
1316 	else
1317 		seq_putc(m, 'f');
1318 	if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive)
1319 		seq_printf(m, "%d", tf->fp.nr_maxactive);
1320 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp),
1321 				trace_probe_name(&tf->tp));
1322 
1323 	seq_printf(m, " %s%s", trace_fprobe_symbol(tf),
1324 			       trace_fprobe_is_return(tf) ? "%return" : "");
1325 
1326 	for (i = 0; i < tf->tp.nr_args; i++)
1327 		seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm);
1328 	seq_putc(m, '\n');
1329 
1330 	return 0;
1331 }
1332 
1333 /*
1334  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1335  */
1336 static int fprobe_register(struct trace_event_call *event,
1337 			   enum trace_reg type, void *data)
1338 {
1339 	struct trace_event_file *file = data;
1340 
1341 	switch (type) {
1342 	case TRACE_REG_REGISTER:
1343 		return enable_trace_fprobe(event, file);
1344 	case TRACE_REG_UNREGISTER:
1345 		return disable_trace_fprobe(event, file);
1346 
1347 #ifdef CONFIG_PERF_EVENTS
1348 	case TRACE_REG_PERF_REGISTER:
1349 		return enable_trace_fprobe(event, NULL);
1350 	case TRACE_REG_PERF_UNREGISTER:
1351 		return disable_trace_fprobe(event, NULL);
1352 	case TRACE_REG_PERF_OPEN:
1353 	case TRACE_REG_PERF_CLOSE:
1354 	case TRACE_REG_PERF_ADD:
1355 	case TRACE_REG_PERF_DEL:
1356 		return 0;
1357 #endif
1358 	}
1359 	return 0;
1360 }
1361 
1362 /*
1363  * Register dynevent at core_initcall. This allows kernel to setup fprobe
1364  * events in postcore_initcall without tracefs.
1365  */
1366 static __init int init_fprobe_trace_early(void)
1367 {
1368 	int ret;
1369 
1370 	ret = dyn_event_register(&trace_fprobe_ops);
1371 	if (ret)
1372 		return ret;
1373 
1374 #ifdef CONFIG_MODULES
1375 	ret = register_tracepoint_module_notifier(&tracepoint_module_nb);
1376 	if (ret)
1377 		return ret;
1378 #endif
1379 
1380 	return 0;
1381 }
1382 core_initcall(init_fprobe_trace_early);
1383