1 /* 2 * Kprobes-based tracing events 3 * 4 * Created by Masami Hiramatsu <[email protected]> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/module.h> 21 #include <linux/uaccess.h> 22 #include <linux/kprobes.h> 23 #include <linux/seq_file.h> 24 #include <linux/slab.h> 25 #include <linux/smp.h> 26 #include <linux/debugfs.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/ctype.h> 30 #include <linux/ptrace.h> 31 #include <linux/perf_event.h> 32 33 #include "trace.h" 34 #include "trace_output.h" 35 36 #define MAX_TRACE_ARGS 128 37 #define MAX_ARGSTR_LEN 63 38 #define MAX_EVENT_NAME_LEN 64 39 #define KPROBE_EVENT_SYSTEM "kprobes" 40 41 /* Reserved field names */ 42 #define FIELD_STRING_IP "__probe_ip" 43 #define FIELD_STRING_NARGS "__probe_nargs" 44 #define FIELD_STRING_RETIP "__probe_ret_ip" 45 #define FIELD_STRING_FUNC "__probe_func" 46 47 const char *reserved_field_names[] = { 48 "common_type", 49 "common_flags", 50 "common_preempt_count", 51 "common_pid", 52 "common_tgid", 53 "common_lock_depth", 54 FIELD_STRING_IP, 55 FIELD_STRING_NARGS, 56 FIELD_STRING_RETIP, 57 FIELD_STRING_FUNC, 58 }; 59 60 struct fetch_func { 61 unsigned long (*func)(struct pt_regs *, void *); 62 void *data; 63 }; 64 65 static __kprobes unsigned long call_fetch(struct fetch_func *f, 66 struct pt_regs *regs) 67 { 68 return f->func(regs, f->data); 69 } 70 71 /* fetch handlers */ 72 static __kprobes unsigned long fetch_register(struct pt_regs *regs, 73 void *offset) 74 { 75 return regs_get_register(regs, (unsigned int)((unsigned long)offset)); 76 } 77 78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs, 79 void *num) 80 { 81 return regs_get_kernel_stack_nth(regs, 82 (unsigned int)((unsigned long)num)); 83 } 84 85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) 86 { 87 unsigned long retval; 88 89 if (probe_kernel_address(addr, retval)) 90 return 0; 91 return retval; 92 } 93 94 static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) 95 { 96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); 97 } 98 99 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, 100 void *dummy) 101 { 102 return regs_return_value(regs); 103 } 104 105 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, 106 void *dummy) 107 { 108 return kernel_stack_pointer(regs); 109 } 110 111 /* Memory fetching by symbol */ 112 struct symbol_cache { 113 char *symbol; 114 long offset; 115 unsigned long addr; 116 }; 117 118 static unsigned long update_symbol_cache(struct symbol_cache *sc) 119 { 120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); 121 if (sc->addr) 122 sc->addr += sc->offset; 123 return sc->addr; 124 } 125 126 static void free_symbol_cache(struct symbol_cache *sc) 127 { 128 kfree(sc->symbol); 129 kfree(sc); 130 } 131 132 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) 133 { 134 struct symbol_cache *sc; 135 136 if (!sym || strlen(sym) == 0) 137 return NULL; 138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); 139 if (!sc) 140 return NULL; 141 142 sc->symbol = kstrdup(sym, GFP_KERNEL); 143 if (!sc->symbol) { 144 kfree(sc); 145 return NULL; 146 } 147 sc->offset = offset; 148 149 update_symbol_cache(sc); 150 return sc; 151 } 152 153 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) 154 { 155 struct symbol_cache *sc = data; 156 157 if (sc->addr) 158 return fetch_memory(regs, (void *)sc->addr); 159 else 160 return 0; 161 } 162 163 /* Special indirect memory access interface */ 164 struct indirect_fetch_data { 165 struct fetch_func orig; 166 long offset; 167 }; 168 169 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) 170 { 171 struct indirect_fetch_data *ind = data; 172 unsigned long addr; 173 174 addr = call_fetch(&ind->orig, regs); 175 if (addr) { 176 addr += ind->offset; 177 return fetch_memory(regs, (void *)addr); 178 } else 179 return 0; 180 } 181 182 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) 183 { 184 if (data->orig.func == fetch_indirect) 185 free_indirect_fetch_data(data->orig.data); 186 else if (data->orig.func == fetch_symbol) 187 free_symbol_cache(data->orig.data); 188 kfree(data); 189 } 190 191 /** 192 * Kprobe event core functions 193 */ 194 195 struct probe_arg { 196 struct fetch_func fetch; 197 const char *name; 198 }; 199 200 /* Flags for trace_probe */ 201 #define TP_FLAG_TRACE 1 202 #define TP_FLAG_PROFILE 2 203 204 struct trace_probe { 205 struct list_head list; 206 struct kretprobe rp; /* Use rp.kp for kprobe use */ 207 unsigned long nhit; 208 unsigned int flags; /* For TP_FLAG_* */ 209 const char *symbol; /* symbol name */ 210 struct ftrace_event_call call; 211 struct trace_event event; 212 unsigned int nr_args; 213 struct probe_arg args[]; 214 }; 215 216 #define SIZEOF_TRACE_PROBE(n) \ 217 (offsetof(struct trace_probe, args) + \ 218 (sizeof(struct probe_arg) * (n))) 219 220 static __kprobes int probe_is_return(struct trace_probe *tp) 221 { 222 return tp->rp.handler != NULL; 223 } 224 225 static __kprobes const char *probe_symbol(struct trace_probe *tp) 226 { 227 return tp->symbol ? tp->symbol : "unknown"; 228 } 229 230 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) 231 { 232 int ret = -EINVAL; 233 234 if (ff->func == fetch_argument) 235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); 236 else if (ff->func == fetch_register) { 237 const char *name; 238 name = regs_query_register_name((unsigned int)((long)ff->data)); 239 ret = snprintf(buf, n, "%%%s", name); 240 } else if (ff->func == fetch_stack) 241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); 242 else if (ff->func == fetch_memory) 243 ret = snprintf(buf, n, "@0x%p", ff->data); 244 else if (ff->func == fetch_symbol) { 245 struct symbol_cache *sc = ff->data; 246 if (sc->offset) 247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol, 248 sc->offset); 249 else 250 ret = snprintf(buf, n, "@%s", sc->symbol); 251 } else if (ff->func == fetch_retvalue) 252 ret = snprintf(buf, n, "$retval"); 253 else if (ff->func == fetch_stack_address) 254 ret = snprintf(buf, n, "$stack"); 255 else if (ff->func == fetch_indirect) { 256 struct indirect_fetch_data *id = ff->data; 257 size_t l = 0; 258 ret = snprintf(buf, n, "%+ld(", id->offset); 259 if (ret >= n) 260 goto end; 261 l += ret; 262 ret = probe_arg_string(buf + l, n - l, &id->orig); 263 if (ret < 0) 264 goto end; 265 l += ret; 266 ret = snprintf(buf + l, n - l, ")"); 267 ret += l; 268 } 269 end: 270 if (ret >= n) 271 return -ENOSPC; 272 return ret; 273 } 274 275 static int register_probe_event(struct trace_probe *tp); 276 static void unregister_probe_event(struct trace_probe *tp); 277 278 static DEFINE_MUTEX(probe_lock); 279 static LIST_HEAD(probe_list); 280 281 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); 282 static int kretprobe_dispatcher(struct kretprobe_instance *ri, 283 struct pt_regs *regs); 284 285 /* 286 * Allocate new trace_probe and initialize it (including kprobes). 287 */ 288 static struct trace_probe *alloc_trace_probe(const char *group, 289 const char *event, 290 void *addr, 291 const char *symbol, 292 unsigned long offs, 293 int nargs, int is_return) 294 { 295 struct trace_probe *tp; 296 297 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); 298 if (!tp) 299 return ERR_PTR(-ENOMEM); 300 301 if (symbol) { 302 tp->symbol = kstrdup(symbol, GFP_KERNEL); 303 if (!tp->symbol) 304 goto error; 305 tp->rp.kp.symbol_name = tp->symbol; 306 tp->rp.kp.offset = offs; 307 } else 308 tp->rp.kp.addr = addr; 309 310 if (is_return) 311 tp->rp.handler = kretprobe_dispatcher; 312 else 313 tp->rp.kp.pre_handler = kprobe_dispatcher; 314 315 if (!event) 316 goto error; 317 tp->call.name = kstrdup(event, GFP_KERNEL); 318 if (!tp->call.name) 319 goto error; 320 321 if (!group) 322 goto error; 323 tp->call.system = kstrdup(group, GFP_KERNEL); 324 if (!tp->call.system) 325 goto error; 326 327 INIT_LIST_HEAD(&tp->list); 328 return tp; 329 error: 330 kfree(tp->call.name); 331 kfree(tp->symbol); 332 kfree(tp); 333 return ERR_PTR(-ENOMEM); 334 } 335 336 static void free_probe_arg(struct probe_arg *arg) 337 { 338 if (arg->fetch.func == fetch_symbol) 339 free_symbol_cache(arg->fetch.data); 340 else if (arg->fetch.func == fetch_indirect) 341 free_indirect_fetch_data(arg->fetch.data); 342 kfree(arg->name); 343 } 344 345 static void free_trace_probe(struct trace_probe *tp) 346 { 347 int i; 348 349 for (i = 0; i < tp->nr_args; i++) 350 free_probe_arg(&tp->args[i]); 351 352 kfree(tp->call.system); 353 kfree(tp->call.name); 354 kfree(tp->symbol); 355 kfree(tp); 356 } 357 358 static struct trace_probe *find_probe_event(const char *event, 359 const char *group) 360 { 361 struct trace_probe *tp; 362 363 list_for_each_entry(tp, &probe_list, list) 364 if (strcmp(tp->call.name, event) == 0 && 365 strcmp(tp->call.system, group) == 0) 366 return tp; 367 return NULL; 368 } 369 370 /* Unregister a trace_probe and probe_event: call with locking probe_lock */ 371 static void unregister_trace_probe(struct trace_probe *tp) 372 { 373 if (probe_is_return(tp)) 374 unregister_kretprobe(&tp->rp); 375 else 376 unregister_kprobe(&tp->rp.kp); 377 list_del(&tp->list); 378 unregister_probe_event(tp); 379 } 380 381 /* Register a trace_probe and probe_event */ 382 static int register_trace_probe(struct trace_probe *tp) 383 { 384 struct trace_probe *old_tp; 385 int ret; 386 387 mutex_lock(&probe_lock); 388 389 /* register as an event */ 390 old_tp = find_probe_event(tp->call.name, tp->call.system); 391 if (old_tp) { 392 /* delete old event */ 393 unregister_trace_probe(old_tp); 394 free_trace_probe(old_tp); 395 } 396 ret = register_probe_event(tp); 397 if (ret) { 398 pr_warning("Faild to register probe event(%d)\n", ret); 399 goto end; 400 } 401 402 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; 403 if (probe_is_return(tp)) 404 ret = register_kretprobe(&tp->rp); 405 else 406 ret = register_kprobe(&tp->rp.kp); 407 408 if (ret) { 409 pr_warning("Could not insert probe(%d)\n", ret); 410 if (ret == -EILSEQ) { 411 pr_warning("Probing address(0x%p) is not an " 412 "instruction boundary.\n", 413 tp->rp.kp.addr); 414 ret = -EINVAL; 415 } 416 unregister_probe_event(tp); 417 } else 418 list_add_tail(&tp->list, &probe_list); 419 end: 420 mutex_unlock(&probe_lock); 421 return ret; 422 } 423 424 /* Split symbol and offset. */ 425 static int split_symbol_offset(char *symbol, unsigned long *offset) 426 { 427 char *tmp; 428 int ret; 429 430 if (!offset) 431 return -EINVAL; 432 433 tmp = strchr(symbol, '+'); 434 if (tmp) { 435 /* skip sign because strict_strtol doesn't accept '+' */ 436 ret = strict_strtoul(tmp + 1, 0, offset); 437 if (ret) 438 return ret; 439 *tmp = '\0'; 440 } else 441 *offset = 0; 442 return 0; 443 } 444 445 #define PARAM_MAX_ARGS 16 446 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) 447 448 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) 449 { 450 int ret = 0; 451 unsigned long param; 452 453 if (strcmp(arg, "retval") == 0) { 454 if (is_return) { 455 ff->func = fetch_retvalue; 456 ff->data = NULL; 457 } else 458 ret = -EINVAL; 459 } else if (strncmp(arg, "stack", 5) == 0) { 460 if (arg[5] == '\0') { 461 ff->func = fetch_stack_address; 462 ff->data = NULL; 463 } else if (isdigit(arg[5])) { 464 ret = strict_strtoul(arg + 5, 10, ¶m); 465 if (ret || param > PARAM_MAX_STACK) 466 ret = -EINVAL; 467 else { 468 ff->func = fetch_stack; 469 ff->data = (void *)param; 470 } 471 } else 472 ret = -EINVAL; 473 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { 474 ret = strict_strtoul(arg + 3, 10, ¶m); 475 if (ret || param > PARAM_MAX_ARGS) 476 ret = -EINVAL; 477 else { 478 ff->func = fetch_argument; 479 ff->data = (void *)param; 480 } 481 } else 482 ret = -EINVAL; 483 return ret; 484 } 485 486 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) 487 { 488 int ret = 0; 489 unsigned long param; 490 long offset; 491 char *tmp; 492 493 switch (arg[0]) { 494 case '$': 495 ret = parse_probe_vars(arg + 1, ff, is_return); 496 break; 497 case '%': /* named register */ 498 ret = regs_query_register_offset(arg + 1); 499 if (ret >= 0) { 500 ff->func = fetch_register; 501 ff->data = (void *)(unsigned long)ret; 502 ret = 0; 503 } 504 break; 505 case '@': /* memory or symbol */ 506 if (isdigit(arg[1])) { 507 ret = strict_strtoul(arg + 1, 0, ¶m); 508 if (ret) 509 break; 510 ff->func = fetch_memory; 511 ff->data = (void *)param; 512 } else { 513 ret = split_symbol_offset(arg + 1, &offset); 514 if (ret) 515 break; 516 ff->data = alloc_symbol_cache(arg + 1, offset); 517 if (ff->data) 518 ff->func = fetch_symbol; 519 else 520 ret = -EINVAL; 521 } 522 break; 523 case '+': /* indirect memory */ 524 case '-': 525 tmp = strchr(arg, '('); 526 if (!tmp) { 527 ret = -EINVAL; 528 break; 529 } 530 *tmp = '\0'; 531 ret = strict_strtol(arg + 1, 0, &offset); 532 if (ret) 533 break; 534 if (arg[0] == '-') 535 offset = -offset; 536 arg = tmp + 1; 537 tmp = strrchr(arg, ')'); 538 if (tmp) { 539 struct indirect_fetch_data *id; 540 *tmp = '\0'; 541 id = kzalloc(sizeof(struct indirect_fetch_data), 542 GFP_KERNEL); 543 if (!id) 544 return -ENOMEM; 545 id->offset = offset; 546 ret = parse_probe_arg(arg, &id->orig, is_return); 547 if (ret) 548 kfree(id); 549 else { 550 ff->func = fetch_indirect; 551 ff->data = (void *)id; 552 } 553 } else 554 ret = -EINVAL; 555 break; 556 default: 557 /* TODO: support custom handler */ 558 ret = -EINVAL; 559 } 560 return ret; 561 } 562 563 /* Return 1 if name is reserved or already used by another argument */ 564 static int conflict_field_name(const char *name, 565 struct probe_arg *args, int narg) 566 { 567 int i; 568 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) 569 if (strcmp(reserved_field_names[i], name) == 0) 570 return 1; 571 for (i = 0; i < narg; i++) 572 if (strcmp(args[i].name, name) == 0) 573 return 1; 574 return 0; 575 } 576 577 static int create_trace_probe(int argc, char **argv) 578 { 579 /* 580 * Argument syntax: 581 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 582 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 583 * Fetch args: 584 * $argN : fetch Nth of function argument. (N:0-) 585 * $retval : fetch return value 586 * $stack : fetch stack address 587 * $stackN : fetch Nth of stack (N:0-) 588 * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 589 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 590 * %REG : fetch register REG 591 * Indirect memory fetch: 592 * +|-offs(ARG) : fetch memory at ARG +|- offs address. 593 * Alias name of args: 594 * NAME=FETCHARG : set NAME as alias of FETCHARG. 595 */ 596 struct trace_probe *tp; 597 int i, ret = 0; 598 int is_return = 0; 599 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; 600 unsigned long offset = 0; 601 void *addr = NULL; 602 char buf[MAX_EVENT_NAME_LEN]; 603 604 if (argc < 2) { 605 pr_info("Probe point is not specified.\n"); 606 return -EINVAL; 607 } 608 609 if (argv[0][0] == 'p') 610 is_return = 0; 611 else if (argv[0][0] == 'r') 612 is_return = 1; 613 else { 614 pr_info("Probe definition must be started with 'p' or 'r'.\n"); 615 return -EINVAL; 616 } 617 618 if (argv[0][1] == ':') { 619 event = &argv[0][2]; 620 if (strchr(event, '/')) { 621 group = event; 622 event = strchr(group, '/') + 1; 623 event[-1] = '\0'; 624 if (strlen(group) == 0) { 625 pr_info("Group name is not specifiled\n"); 626 return -EINVAL; 627 } 628 } 629 if (strlen(event) == 0) { 630 pr_info("Event name is not specifiled\n"); 631 return -EINVAL; 632 } 633 } 634 635 if (isdigit(argv[1][0])) { 636 if (is_return) { 637 pr_info("Return probe point must be a symbol.\n"); 638 return -EINVAL; 639 } 640 /* an address specified */ 641 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); 642 if (ret) { 643 pr_info("Failed to parse address.\n"); 644 return ret; 645 } 646 } else { 647 /* a symbol specified */ 648 symbol = argv[1]; 649 /* TODO: support .init module functions */ 650 ret = split_symbol_offset(symbol, &offset); 651 if (ret) { 652 pr_info("Failed to parse symbol.\n"); 653 return ret; 654 } 655 if (offset && is_return) { 656 pr_info("Return probe must be used without offset.\n"); 657 return -EINVAL; 658 } 659 } 660 argc -= 2; argv += 2; 661 662 /* setup a probe */ 663 if (!group) 664 group = KPROBE_EVENT_SYSTEM; 665 if (!event) { 666 /* Make a new event name */ 667 if (symbol) 668 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", 669 is_return ? 'r' : 'p', symbol, offset); 670 else 671 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", 672 is_return ? 'r' : 'p', addr); 673 event = buf; 674 } 675 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, 676 is_return); 677 if (IS_ERR(tp)) { 678 pr_info("Failed to allocate trace_probe.(%d)\n", 679 (int)PTR_ERR(tp)); 680 return PTR_ERR(tp); 681 } 682 683 /* parse arguments */ 684 ret = 0; 685 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 686 /* Parse argument name */ 687 arg = strchr(argv[i], '='); 688 if (arg) 689 *arg++ = '\0'; 690 else 691 arg = argv[i]; 692 693 if (conflict_field_name(argv[i], tp->args, i)) { 694 pr_info("Argument%d name '%s' conflicts with " 695 "another field.\n", i, argv[i]); 696 ret = -EINVAL; 697 goto error; 698 } 699 700 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); 701 702 /* Parse fetch argument */ 703 if (strlen(arg) > MAX_ARGSTR_LEN) { 704 pr_info("Argument%d(%s) is too long.\n", i, arg); 705 ret = -ENOSPC; 706 goto error; 707 } 708 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); 709 if (ret) { 710 pr_info("Parse error at argument%d. (%d)\n", i, ret); 711 kfree(tp->args[i].name); 712 goto error; 713 } 714 715 tp->nr_args++; 716 } 717 718 ret = register_trace_probe(tp); 719 if (ret) 720 goto error; 721 return 0; 722 723 error: 724 free_trace_probe(tp); 725 return ret; 726 } 727 728 static void cleanup_all_probes(void) 729 { 730 struct trace_probe *tp; 731 732 mutex_lock(&probe_lock); 733 /* TODO: Use batch unregistration */ 734 while (!list_empty(&probe_list)) { 735 tp = list_entry(probe_list.next, struct trace_probe, list); 736 unregister_trace_probe(tp); 737 free_trace_probe(tp); 738 } 739 mutex_unlock(&probe_lock); 740 } 741 742 743 /* Probes listing interfaces */ 744 static void *probes_seq_start(struct seq_file *m, loff_t *pos) 745 { 746 mutex_lock(&probe_lock); 747 return seq_list_start(&probe_list, *pos); 748 } 749 750 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) 751 { 752 return seq_list_next(v, &probe_list, pos); 753 } 754 755 static void probes_seq_stop(struct seq_file *m, void *v) 756 { 757 mutex_unlock(&probe_lock); 758 } 759 760 static int probes_seq_show(struct seq_file *m, void *v) 761 { 762 struct trace_probe *tp = v; 763 int i, ret; 764 char buf[MAX_ARGSTR_LEN + 1]; 765 766 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); 767 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); 768 769 if (!tp->symbol) 770 seq_printf(m, " 0x%p", tp->rp.kp.addr); 771 else if (tp->rp.kp.offset) 772 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); 773 else 774 seq_printf(m, " %s", probe_symbol(tp)); 775 776 for (i = 0; i < tp->nr_args; i++) { 777 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); 778 if (ret < 0) { 779 pr_warning("Argument%d decoding error(%d).\n", i, ret); 780 return ret; 781 } 782 seq_printf(m, " %s=%s", tp->args[i].name, buf); 783 } 784 seq_printf(m, "\n"); 785 return 0; 786 } 787 788 static const struct seq_operations probes_seq_op = { 789 .start = probes_seq_start, 790 .next = probes_seq_next, 791 .stop = probes_seq_stop, 792 .show = probes_seq_show 793 }; 794 795 static int probes_open(struct inode *inode, struct file *file) 796 { 797 if ((file->f_mode & FMODE_WRITE) && 798 (file->f_flags & O_TRUNC)) 799 cleanup_all_probes(); 800 801 return seq_open(file, &probes_seq_op); 802 } 803 804 static int command_trace_probe(const char *buf) 805 { 806 char **argv; 807 int argc = 0, ret = 0; 808 809 argv = argv_split(GFP_KERNEL, buf, &argc); 810 if (!argv) 811 return -ENOMEM; 812 813 if (argc) 814 ret = create_trace_probe(argc, argv); 815 816 argv_free(argv); 817 return ret; 818 } 819 820 #define WRITE_BUFSIZE 128 821 822 static ssize_t probes_write(struct file *file, const char __user *buffer, 823 size_t count, loff_t *ppos) 824 { 825 char *kbuf, *tmp; 826 int ret; 827 size_t done; 828 size_t size; 829 830 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 831 if (!kbuf) 832 return -ENOMEM; 833 834 ret = done = 0; 835 while (done < count) { 836 size = count - done; 837 if (size >= WRITE_BUFSIZE) 838 size = WRITE_BUFSIZE - 1; 839 if (copy_from_user(kbuf, buffer + done, size)) { 840 ret = -EFAULT; 841 goto out; 842 } 843 kbuf[size] = '\0'; 844 tmp = strchr(kbuf, '\n'); 845 if (tmp) { 846 *tmp = '\0'; 847 size = tmp - kbuf + 1; 848 } else if (done + size < count) { 849 pr_warning("Line length is too long: " 850 "Should be less than %d.", WRITE_BUFSIZE); 851 ret = -EINVAL; 852 goto out; 853 } 854 done += size; 855 /* Remove comments */ 856 tmp = strchr(kbuf, '#'); 857 if (tmp) 858 *tmp = '\0'; 859 860 ret = command_trace_probe(kbuf); 861 if (ret) 862 goto out; 863 } 864 ret = done; 865 out: 866 kfree(kbuf); 867 return ret; 868 } 869 870 static const struct file_operations kprobe_events_ops = { 871 .owner = THIS_MODULE, 872 .open = probes_open, 873 .read = seq_read, 874 .llseek = seq_lseek, 875 .release = seq_release, 876 .write = probes_write, 877 }; 878 879 /* Probes profiling interfaces */ 880 static int probes_profile_seq_show(struct seq_file *m, void *v) 881 { 882 struct trace_probe *tp = v; 883 884 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, 885 tp->rp.kp.nmissed); 886 887 return 0; 888 } 889 890 static const struct seq_operations profile_seq_op = { 891 .start = probes_seq_start, 892 .next = probes_seq_next, 893 .stop = probes_seq_stop, 894 .show = probes_profile_seq_show 895 }; 896 897 static int profile_open(struct inode *inode, struct file *file) 898 { 899 return seq_open(file, &profile_seq_op); 900 } 901 902 static const struct file_operations kprobe_profile_ops = { 903 .owner = THIS_MODULE, 904 .open = profile_open, 905 .read = seq_read, 906 .llseek = seq_lseek, 907 .release = seq_release, 908 }; 909 910 /* Kprobe handler */ 911 static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 912 { 913 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 914 struct kprobe_trace_entry *entry; 915 struct ring_buffer_event *event; 916 struct ring_buffer *buffer; 917 int size, i, pc; 918 unsigned long irq_flags; 919 struct ftrace_event_call *call = &tp->call; 920 921 tp->nhit++; 922 923 local_save_flags(irq_flags); 924 pc = preempt_count(); 925 926 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 927 928 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 929 irq_flags, pc); 930 if (!event) 931 return 0; 932 933 entry = ring_buffer_event_data(event); 934 entry->nargs = tp->nr_args; 935 entry->ip = (unsigned long)kp->addr; 936 for (i = 0; i < tp->nr_args; i++) 937 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 938 939 if (!filter_current_check_discard(buffer, call, entry, event)) 940 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 941 return 0; 942 } 943 944 /* Kretprobe handler */ 945 static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, 946 struct pt_regs *regs) 947 { 948 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 949 struct kretprobe_trace_entry *entry; 950 struct ring_buffer_event *event; 951 struct ring_buffer *buffer; 952 int size, i, pc; 953 unsigned long irq_flags; 954 struct ftrace_event_call *call = &tp->call; 955 956 local_save_flags(irq_flags); 957 pc = preempt_count(); 958 959 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 960 961 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 962 irq_flags, pc); 963 if (!event) 964 return 0; 965 966 entry = ring_buffer_event_data(event); 967 entry->nargs = tp->nr_args; 968 entry->func = (unsigned long)tp->rp.kp.addr; 969 entry->ret_ip = (unsigned long)ri->ret_addr; 970 for (i = 0; i < tp->nr_args; i++) 971 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 972 973 if (!filter_current_check_discard(buffer, call, entry, event)) 974 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 975 976 return 0; 977 } 978 979 /* Event entry printers */ 980 enum print_line_t 981 print_kprobe_event(struct trace_iterator *iter, int flags) 982 { 983 struct kprobe_trace_entry *field; 984 struct trace_seq *s = &iter->seq; 985 struct trace_event *event; 986 struct trace_probe *tp; 987 int i; 988 989 field = (struct kprobe_trace_entry *)iter->ent; 990 event = ftrace_find_event(field->ent.type); 991 tp = container_of(event, struct trace_probe, event); 992 993 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 994 goto partial; 995 996 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 997 goto partial; 998 999 if (!trace_seq_puts(s, ")")) 1000 goto partial; 1001 1002 for (i = 0; i < field->nargs; i++) 1003 if (!trace_seq_printf(s, " %s=%lx", 1004 tp->args[i].name, field->args[i])) 1005 goto partial; 1006 1007 if (!trace_seq_puts(s, "\n")) 1008 goto partial; 1009 1010 return TRACE_TYPE_HANDLED; 1011 partial: 1012 return TRACE_TYPE_PARTIAL_LINE; 1013 } 1014 1015 enum print_line_t 1016 print_kretprobe_event(struct trace_iterator *iter, int flags) 1017 { 1018 struct kretprobe_trace_entry *field; 1019 struct trace_seq *s = &iter->seq; 1020 struct trace_event *event; 1021 struct trace_probe *tp; 1022 int i; 1023 1024 field = (struct kretprobe_trace_entry *)iter->ent; 1025 event = ftrace_find_event(field->ent.type); 1026 tp = container_of(event, struct trace_probe, event); 1027 1028 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1029 goto partial; 1030 1031 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1032 goto partial; 1033 1034 if (!trace_seq_puts(s, " <- ")) 1035 goto partial; 1036 1037 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 1038 goto partial; 1039 1040 if (!trace_seq_puts(s, ")")) 1041 goto partial; 1042 1043 for (i = 0; i < field->nargs; i++) 1044 if (!trace_seq_printf(s, " %s=%lx", 1045 tp->args[i].name, field->args[i])) 1046 goto partial; 1047 1048 if (!trace_seq_puts(s, "\n")) 1049 goto partial; 1050 1051 return TRACE_TYPE_HANDLED; 1052 partial: 1053 return TRACE_TYPE_PARTIAL_LINE; 1054 } 1055 1056 static int probe_event_enable(struct ftrace_event_call *call) 1057 { 1058 struct trace_probe *tp = (struct trace_probe *)call->data; 1059 1060 tp->flags |= TP_FLAG_TRACE; 1061 if (probe_is_return(tp)) 1062 return enable_kretprobe(&tp->rp); 1063 else 1064 return enable_kprobe(&tp->rp.kp); 1065 } 1066 1067 static void probe_event_disable(struct ftrace_event_call *call) 1068 { 1069 struct trace_probe *tp = (struct trace_probe *)call->data; 1070 1071 tp->flags &= ~TP_FLAG_TRACE; 1072 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { 1073 if (probe_is_return(tp)) 1074 disable_kretprobe(&tp->rp); 1075 else 1076 disable_kprobe(&tp->rp.kp); 1077 } 1078 } 1079 1080 static int probe_event_raw_init(struct ftrace_event_call *event_call) 1081 { 1082 INIT_LIST_HEAD(&event_call->fields); 1083 1084 return 0; 1085 } 1086 1087 #undef DEFINE_FIELD 1088 #define DEFINE_FIELD(type, item, name, is_signed) \ 1089 do { \ 1090 ret = trace_define_field(event_call, #type, name, \ 1091 offsetof(typeof(field), item), \ 1092 sizeof(field.item), is_signed, \ 1093 FILTER_OTHER); \ 1094 if (ret) \ 1095 return ret; \ 1096 } while (0) 1097 1098 static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1099 { 1100 int ret, i; 1101 struct kprobe_trace_entry field; 1102 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1103 1104 ret = trace_define_common_fields(event_call); 1105 if (!ret) 1106 return ret; 1107 1108 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1109 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1110 /* Set argument names as fields */ 1111 for (i = 0; i < tp->nr_args; i++) 1112 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1113 return 0; 1114 } 1115 1116 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1117 { 1118 int ret, i; 1119 struct kretprobe_trace_entry field; 1120 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1121 1122 ret = trace_define_common_fields(event_call); 1123 if (!ret) 1124 return ret; 1125 1126 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1127 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 1128 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1129 /* Set argument names as fields */ 1130 for (i = 0; i < tp->nr_args; i++) 1131 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); 1132 return 0; 1133 } 1134 1135 static int __probe_event_show_format(struct trace_seq *s, 1136 struct trace_probe *tp, const char *fmt, 1137 const char *arg) 1138 { 1139 int i; 1140 1141 /* Show format */ 1142 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) 1143 return 0; 1144 1145 for (i = 0; i < tp->nr_args; i++) 1146 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) 1147 return 0; 1148 1149 if (!trace_seq_printf(s, "\", %s", arg)) 1150 return 0; 1151 1152 for (i = 0; i < tp->nr_args; i++) 1153 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name)) 1154 return 0; 1155 1156 return trace_seq_puts(s, "\n"); 1157 } 1158 1159 #undef SHOW_FIELD 1160 #define SHOW_FIELD(type, item, name) \ 1161 do { \ 1162 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ 1163 "offset:%u;\tsize:%u;\n", name, \ 1164 (unsigned int)offsetof(typeof(field), item),\ 1165 (unsigned int)sizeof(type)); \ 1166 if (!ret) \ 1167 return 0; \ 1168 } while (0) 1169 1170 static int kprobe_event_show_format(struct ftrace_event_call *call, 1171 struct trace_seq *s) 1172 { 1173 struct kprobe_trace_entry field __attribute__((unused)); 1174 int ret, i; 1175 struct trace_probe *tp = (struct trace_probe *)call->data; 1176 1177 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP); 1178 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1179 1180 /* Show fields */ 1181 for (i = 0; i < tp->nr_args; i++) 1182 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1183 trace_seq_puts(s, "\n"); 1184 1185 return __probe_event_show_format(s, tp, "(%lx)", 1186 "REC->" FIELD_STRING_IP); 1187 } 1188 1189 static int kretprobe_event_show_format(struct ftrace_event_call *call, 1190 struct trace_seq *s) 1191 { 1192 struct kretprobe_trace_entry field __attribute__((unused)); 1193 int ret, i; 1194 struct trace_probe *tp = (struct trace_probe *)call->data; 1195 1196 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); 1197 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); 1198 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); 1199 1200 /* Show fields */ 1201 for (i = 0; i < tp->nr_args; i++) 1202 SHOW_FIELD(unsigned long, args[i], tp->args[i].name); 1203 trace_seq_puts(s, "\n"); 1204 1205 return __probe_event_show_format(s, tp, "(%lx <- %lx)", 1206 "REC->" FIELD_STRING_FUNC 1207 ", REC->" FIELD_STRING_RETIP); 1208 } 1209 1210 #ifdef CONFIG_EVENT_PROFILE 1211 1212 /* Kprobe profile handler */ 1213 static __kprobes int kprobe_profile_func(struct kprobe *kp, 1214 struct pt_regs *regs) 1215 { 1216 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1217 struct ftrace_event_call *call = &tp->call; 1218 struct kprobe_trace_entry *entry; 1219 struct trace_entry *ent; 1220 int size, __size, i, pc, __cpu; 1221 unsigned long irq_flags; 1222 char *trace_buf; 1223 char *raw_data; 1224 int rctx; 1225 1226 pc = preempt_count(); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1229 size -= sizeof(u32); 1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1231 "profile buffer not large enough")) 1232 return 0; 1233 1234 /* 1235 * Protect the non nmi buffer 1236 * This also protects the rcu read side 1237 */ 1238 local_irq_save(irq_flags); 1239 1240 rctx = perf_swevent_get_recursion_context(); 1241 if (rctx < 0) 1242 goto end_recursion; 1243 1244 __cpu = smp_processor_id(); 1245 1246 if (in_nmi()) 1247 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1248 else 1249 trace_buf = rcu_dereference(perf_trace_buf); 1250 1251 if (!trace_buf) 1252 goto end; 1253 1254 raw_data = per_cpu_ptr(trace_buf, __cpu); 1255 1256 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1257 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1258 entry = (struct kprobe_trace_entry *)raw_data; 1259 ent = &entry->ent; 1260 1261 tracing_generic_entry_update(ent, irq_flags, pc); 1262 ent->type = call->id; 1263 entry->nargs = tp->nr_args; 1264 entry->ip = (unsigned long)kp->addr; 1265 for (i = 0; i < tp->nr_args; i++) 1266 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1267 perf_tp_event(call->id, entry->ip, 1, entry, size); 1268 1269 end: 1270 perf_swevent_put_recursion_context(rctx); 1271 end_recursion: 1272 local_irq_restore(irq_flags); 1273 1274 return 0; 1275 } 1276 1277 /* Kretprobe profile handler */ 1278 static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, 1279 struct pt_regs *regs) 1280 { 1281 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1282 struct ftrace_event_call *call = &tp->call; 1283 struct kretprobe_trace_entry *entry; 1284 struct trace_entry *ent; 1285 int size, __size, i, pc, __cpu; 1286 unsigned long irq_flags; 1287 char *trace_buf; 1288 char *raw_data; 1289 int rctx; 1290 1291 pc = preempt_count(); 1292 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1293 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1294 size -= sizeof(u32); 1295 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1296 "profile buffer not large enough")) 1297 return 0; 1298 1299 /* 1300 * Protect the non nmi buffer 1301 * This also protects the rcu read side 1302 */ 1303 local_irq_save(irq_flags); 1304 1305 rctx = perf_swevent_get_recursion_context(); 1306 if (rctx < 0) 1307 goto end_recursion; 1308 1309 __cpu = smp_processor_id(); 1310 1311 if (in_nmi()) 1312 trace_buf = rcu_dereference(perf_trace_buf_nmi); 1313 else 1314 trace_buf = rcu_dereference(perf_trace_buf); 1315 1316 if (!trace_buf) 1317 goto end; 1318 1319 raw_data = per_cpu_ptr(trace_buf, __cpu); 1320 1321 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1322 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1323 entry = (struct kretprobe_trace_entry *)raw_data; 1324 ent = &entry->ent; 1325 1326 tracing_generic_entry_update(ent, irq_flags, pc); 1327 ent->type = call->id; 1328 entry->nargs = tp->nr_args; 1329 entry->func = (unsigned long)tp->rp.kp.addr; 1330 entry->ret_ip = (unsigned long)ri->ret_addr; 1331 for (i = 0; i < tp->nr_args; i++) 1332 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1333 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1334 1335 end: 1336 perf_swevent_put_recursion_context(rctx); 1337 end_recursion: 1338 local_irq_restore(irq_flags); 1339 1340 return 0; 1341 } 1342 1343 static int probe_profile_enable(struct ftrace_event_call *call) 1344 { 1345 struct trace_probe *tp = (struct trace_probe *)call->data; 1346 1347 tp->flags |= TP_FLAG_PROFILE; 1348 1349 if (probe_is_return(tp)) 1350 return enable_kretprobe(&tp->rp); 1351 else 1352 return enable_kprobe(&tp->rp.kp); 1353 } 1354 1355 static void probe_profile_disable(struct ftrace_event_call *call) 1356 { 1357 struct trace_probe *tp = (struct trace_probe *)call->data; 1358 1359 tp->flags &= ~TP_FLAG_PROFILE; 1360 1361 if (!(tp->flags & TP_FLAG_TRACE)) { 1362 if (probe_is_return(tp)) 1363 disable_kretprobe(&tp->rp); 1364 else 1365 disable_kprobe(&tp->rp.kp); 1366 } 1367 } 1368 #endif /* CONFIG_EVENT_PROFILE */ 1369 1370 1371 static __kprobes 1372 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1373 { 1374 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1375 1376 if (tp->flags & TP_FLAG_TRACE) 1377 kprobe_trace_func(kp, regs); 1378 #ifdef CONFIG_EVENT_PROFILE 1379 if (tp->flags & TP_FLAG_PROFILE) 1380 kprobe_profile_func(kp, regs); 1381 #endif /* CONFIG_EVENT_PROFILE */ 1382 return 0; /* We don't tweek kernel, so just return 0 */ 1383 } 1384 1385 static __kprobes 1386 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) 1387 { 1388 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1389 1390 if (tp->flags & TP_FLAG_TRACE) 1391 kretprobe_trace_func(ri, regs); 1392 #ifdef CONFIG_EVENT_PROFILE 1393 if (tp->flags & TP_FLAG_PROFILE) 1394 kretprobe_profile_func(ri, regs); 1395 #endif /* CONFIG_EVENT_PROFILE */ 1396 return 0; /* We don't tweek kernel, so just return 0 */ 1397 } 1398 1399 static int register_probe_event(struct trace_probe *tp) 1400 { 1401 struct ftrace_event_call *call = &tp->call; 1402 int ret; 1403 1404 /* Initialize ftrace_event_call */ 1405 if (probe_is_return(tp)) { 1406 tp->event.trace = print_kretprobe_event; 1407 call->raw_init = probe_event_raw_init; 1408 call->show_format = kretprobe_event_show_format; 1409 call->define_fields = kretprobe_event_define_fields; 1410 } else { 1411 tp->event.trace = print_kprobe_event; 1412 call->raw_init = probe_event_raw_init; 1413 call->show_format = kprobe_event_show_format; 1414 call->define_fields = kprobe_event_define_fields; 1415 } 1416 call->event = &tp->event; 1417 call->id = register_ftrace_event(&tp->event); 1418 if (!call->id) 1419 return -ENODEV; 1420 call->enabled = 0; 1421 call->regfunc = probe_event_enable; 1422 call->unregfunc = probe_event_disable; 1423 1424 #ifdef CONFIG_EVENT_PROFILE 1425 atomic_set(&call->profile_count, -1); 1426 call->profile_enable = probe_profile_enable; 1427 call->profile_disable = probe_profile_disable; 1428 #endif 1429 call->data = tp; 1430 ret = trace_add_event_call(call); 1431 if (ret) { 1432 pr_info("Failed to register kprobe event: %s\n", call->name); 1433 unregister_ftrace_event(&tp->event); 1434 } 1435 return ret; 1436 } 1437 1438 static void unregister_probe_event(struct trace_probe *tp) 1439 { 1440 /* tp->event is unregistered in trace_remove_event_call() */ 1441 trace_remove_event_call(&tp->call); 1442 } 1443 1444 /* Make a debugfs interface for controling probe points */ 1445 static __init int init_kprobe_trace(void) 1446 { 1447 struct dentry *d_tracer; 1448 struct dentry *entry; 1449 1450 d_tracer = tracing_init_dentry(); 1451 if (!d_tracer) 1452 return 0; 1453 1454 entry = debugfs_create_file("kprobe_events", 0644, d_tracer, 1455 NULL, &kprobe_events_ops); 1456 1457 /* Event list interface */ 1458 if (!entry) 1459 pr_warning("Could not create debugfs " 1460 "'kprobe_events' entry\n"); 1461 1462 /* Profile interface */ 1463 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, 1464 NULL, &kprobe_profile_ops); 1465 1466 if (!entry) 1467 pr_warning("Could not create debugfs " 1468 "'kprobe_profile' entry\n"); 1469 return 0; 1470 } 1471 fs_initcall(init_kprobe_trace); 1472 1473 1474 #ifdef CONFIG_FTRACE_STARTUP_TEST 1475 1476 static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1477 int a4, int a5, int a6) 1478 { 1479 return a1 + a2 + a3 + a4 + a5 + a6; 1480 } 1481 1482 static __init int kprobe_trace_self_tests_init(void) 1483 { 1484 int ret; 1485 int (*target)(int, int, int, int, int, int); 1486 1487 target = kprobe_trace_selftest_target; 1488 1489 pr_info("Testing kprobe tracing: "); 1490 1491 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " 1492 "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); 1493 if (WARN_ON_ONCE(ret)) 1494 pr_warning("error enabling function entry\n"); 1495 1496 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 1497 "$retval"); 1498 if (WARN_ON_ONCE(ret)) 1499 pr_warning("error enabling function return\n"); 1500 1501 ret = target(1, 2, 3, 4, 5, 6); 1502 1503 cleanup_all_probes(); 1504 1505 pr_cont("OK\n"); 1506 return 0; 1507 } 1508 1509 late_initcall(kprobe_trace_self_tests_init); 1510 1511 #endif 1512