1 /* 2 * thread-stack.c: Synthesize a thread's stack using call / return events 3 * Copyright (c) 2014, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <linux/rbtree.h> 17 #include <linux/list.h> 18 #include <errno.h> 19 #include "thread.h" 20 #include "event.h" 21 #include "machine.h" 22 #include "util.h" 23 #include "debug.h" 24 #include "symbol.h" 25 #include "comm.h" 26 #include "call-path.h" 27 #include "thread-stack.h" 28 29 #define STACK_GROWTH 2048 30 31 /** 32 * struct thread_stack_entry - thread stack entry. 33 * @ret_addr: return address 34 * @timestamp: timestamp (if known) 35 * @ref: external reference (e.g. db_id of sample) 36 * @branch_count: the branch count when the entry was created 37 * @cp: call path 38 * @no_call: a 'call' was not seen 39 * @trace_end: a 'call' but trace ended 40 */ 41 struct thread_stack_entry { 42 u64 ret_addr; 43 u64 timestamp; 44 u64 ref; 45 u64 branch_count; 46 struct call_path *cp; 47 bool no_call; 48 bool trace_end; 49 }; 50 51 /** 52 * struct thread_stack - thread stack constructed from 'call' and 'return' 53 * branch samples. 54 * @stack: array that holds the stack 55 * @cnt: number of entries in the stack 56 * @sz: current maximum stack size 57 * @trace_nr: current trace number 58 * @branch_count: running branch count 59 * @kernel_start: kernel start address 60 * @last_time: last timestamp 61 * @crp: call/return processor 62 * @comm: current comm 63 */ 64 struct thread_stack { 65 struct thread_stack_entry *stack; 66 size_t cnt; 67 size_t sz; 68 u64 trace_nr; 69 u64 branch_count; 70 u64 kernel_start; 71 u64 last_time; 72 struct call_return_processor *crp; 73 struct comm *comm; 74 }; 75 76 static int thread_stack__grow(struct thread_stack *ts) 77 { 78 struct thread_stack_entry *new_stack; 79 size_t sz, new_sz; 80 81 new_sz = ts->sz + STACK_GROWTH; 82 sz = new_sz * sizeof(struct thread_stack_entry); 83 84 new_stack = realloc(ts->stack, sz); 85 if (!new_stack) 86 return -ENOMEM; 87 88 ts->stack = new_stack; 89 ts->sz = new_sz; 90 91 return 0; 92 } 93 94 static struct thread_stack *thread_stack__new(struct thread *thread, 95 struct call_return_processor *crp) 96 { 97 struct thread_stack *ts; 98 99 ts = zalloc(sizeof(struct thread_stack)); 100 if (!ts) 101 return NULL; 102 103 if (thread_stack__grow(ts)) { 104 free(ts); 105 return NULL; 106 } 107 108 if (thread->mg && thread->mg->machine) 109 ts->kernel_start = machine__kernel_start(thread->mg->machine); 110 else 111 ts->kernel_start = 1ULL << 63; 112 ts->crp = crp; 113 114 thread->ts = ts; 115 116 return ts; 117 } 118 119 static inline struct thread_stack *thread__stack(struct thread *thread) 120 { 121 return thread ? thread->ts : NULL; 122 } 123 124 static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, 125 bool trace_end) 126 { 127 int err = 0; 128 129 if (ts->cnt == ts->sz) { 130 err = thread_stack__grow(ts); 131 if (err) { 132 pr_warning("Out of memory: discarding thread stack\n"); 133 ts->cnt = 0; 134 } 135 } 136 137 ts->stack[ts->cnt].trace_end = trace_end; 138 ts->stack[ts->cnt++].ret_addr = ret_addr; 139 140 return err; 141 } 142 143 static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr) 144 { 145 size_t i; 146 147 /* 148 * In some cases there may be functions which are not seen to return. 149 * For example when setjmp / longjmp has been used. Or the perf context 150 * switch in the kernel which doesn't stop and start tracing in exactly 151 * the same code path. When that happens the return address will be 152 * further down the stack. If the return address is not found at all, 153 * we assume the opposite (i.e. this is a return for a call that wasn't 154 * seen for some reason) and leave the stack alone. 155 */ 156 for (i = ts->cnt; i; ) { 157 if (ts->stack[--i].ret_addr == ret_addr) { 158 ts->cnt = i; 159 return; 160 } 161 } 162 } 163 164 static void thread_stack__pop_trace_end(struct thread_stack *ts) 165 { 166 size_t i; 167 168 for (i = ts->cnt; i; ) { 169 if (ts->stack[--i].trace_end) 170 ts->cnt = i; 171 else 172 return; 173 } 174 } 175 176 static bool thread_stack__in_kernel(struct thread_stack *ts) 177 { 178 if (!ts->cnt) 179 return false; 180 181 return ts->stack[ts->cnt - 1].cp->in_kernel; 182 } 183 184 static int thread_stack__call_return(struct thread *thread, 185 struct thread_stack *ts, size_t idx, 186 u64 timestamp, u64 ref, bool no_return) 187 { 188 struct call_return_processor *crp = ts->crp; 189 struct thread_stack_entry *tse; 190 struct call_return cr = { 191 .thread = thread, 192 .comm = ts->comm, 193 .db_id = 0, 194 }; 195 196 tse = &ts->stack[idx]; 197 cr.cp = tse->cp; 198 cr.call_time = tse->timestamp; 199 cr.return_time = timestamp; 200 cr.branch_count = ts->branch_count - tse->branch_count; 201 cr.call_ref = tse->ref; 202 cr.return_ref = ref; 203 if (tse->no_call) 204 cr.flags |= CALL_RETURN_NO_CALL; 205 if (no_return) 206 cr.flags |= CALL_RETURN_NO_RETURN; 207 208 return crp->process(&cr, crp->data); 209 } 210 211 static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts) 212 { 213 struct call_return_processor *crp = ts->crp; 214 int err; 215 216 if (!crp) { 217 ts->cnt = 0; 218 return 0; 219 } 220 221 while (ts->cnt) { 222 err = thread_stack__call_return(thread, ts, --ts->cnt, 223 ts->last_time, 0, true); 224 if (err) { 225 pr_err("Error flushing thread stack!\n"); 226 ts->cnt = 0; 227 return err; 228 } 229 } 230 231 return 0; 232 } 233 234 int thread_stack__flush(struct thread *thread) 235 { 236 struct thread_stack *ts = thread->ts; 237 238 if (ts) 239 return __thread_stack__flush(thread, ts); 240 241 return 0; 242 } 243 244 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, 245 u64 to_ip, u16 insn_len, u64 trace_nr) 246 { 247 struct thread_stack *ts = thread__stack(thread); 248 249 if (!thread) 250 return -EINVAL; 251 252 if (!ts) { 253 ts = thread_stack__new(thread, NULL); 254 if (!ts) { 255 pr_warning("Out of memory: no thread stack\n"); 256 return -ENOMEM; 257 } 258 ts->trace_nr = trace_nr; 259 } 260 261 /* 262 * When the trace is discontinuous, the trace_nr changes. In that case 263 * the stack might be completely invalid. Better to report nothing than 264 * to report something misleading, so flush the stack. 265 */ 266 if (trace_nr != ts->trace_nr) { 267 if (ts->trace_nr) 268 __thread_stack__flush(thread, ts); 269 ts->trace_nr = trace_nr; 270 } 271 272 /* Stop here if thread_stack__process() is in use */ 273 if (ts->crp) 274 return 0; 275 276 if (flags & PERF_IP_FLAG_CALL) { 277 u64 ret_addr; 278 279 if (!to_ip) 280 return 0; 281 ret_addr = from_ip + insn_len; 282 if (ret_addr == to_ip) 283 return 0; /* Zero-length calls are excluded */ 284 return thread_stack__push(ts, ret_addr, 285 flags & PERF_IP_FLAG_TRACE_END); 286 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { 287 /* 288 * If the caller did not change the trace number (which would 289 * have flushed the stack) then try to make sense of the stack. 290 * Possibly, tracing began after returning to the current 291 * address, so try to pop that. Also, do not expect a call made 292 * when the trace ended, to return, so pop that. 293 */ 294 thread_stack__pop(ts, to_ip); 295 thread_stack__pop_trace_end(ts); 296 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { 297 thread_stack__pop(ts, to_ip); 298 } 299 300 return 0; 301 } 302 303 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) 304 { 305 struct thread_stack *ts = thread__stack(thread); 306 307 if (!ts) 308 return; 309 310 if (trace_nr != ts->trace_nr) { 311 if (ts->trace_nr) 312 __thread_stack__flush(thread, ts); 313 ts->trace_nr = trace_nr; 314 } 315 } 316 317 void thread_stack__free(struct thread *thread) 318 { 319 struct thread_stack *ts = thread->ts; 320 321 if (ts) { 322 __thread_stack__flush(thread, ts); 323 zfree(&ts->stack); 324 zfree(&thread->ts); 325 } 326 } 327 328 static inline u64 callchain_context(u64 ip, u64 kernel_start) 329 { 330 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL; 331 } 332 333 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, 334 size_t sz, u64 ip, u64 kernel_start) 335 { 336 struct thread_stack *ts = thread__stack(thread); 337 u64 context = callchain_context(ip, kernel_start); 338 u64 last_context; 339 size_t i, j; 340 341 if (sz < 2) { 342 chain->nr = 0; 343 return; 344 } 345 346 chain->ips[0] = context; 347 chain->ips[1] = ip; 348 349 if (!ts) { 350 chain->nr = 2; 351 return; 352 } 353 354 last_context = context; 355 356 for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) { 357 ip = ts->stack[ts->cnt - j].ret_addr; 358 context = callchain_context(ip, kernel_start); 359 if (context != last_context) { 360 if (i >= sz - 1) 361 break; 362 chain->ips[i++] = context; 363 last_context = context; 364 } 365 chain->ips[i] = ip; 366 } 367 368 chain->nr = i; 369 } 370 371 struct call_return_processor * 372 call_return_processor__new(int (*process)(struct call_return *cr, void *data), 373 void *data) 374 { 375 struct call_return_processor *crp; 376 377 crp = zalloc(sizeof(struct call_return_processor)); 378 if (!crp) 379 return NULL; 380 crp->cpr = call_path_root__new(); 381 if (!crp->cpr) 382 goto out_free; 383 crp->process = process; 384 crp->data = data; 385 return crp; 386 387 out_free: 388 free(crp); 389 return NULL; 390 } 391 392 void call_return_processor__free(struct call_return_processor *crp) 393 { 394 if (crp) { 395 call_path_root__free(crp->cpr); 396 free(crp); 397 } 398 } 399 400 static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, 401 u64 timestamp, u64 ref, struct call_path *cp, 402 bool no_call, bool trace_end) 403 { 404 struct thread_stack_entry *tse; 405 int err; 406 407 if (ts->cnt == ts->sz) { 408 err = thread_stack__grow(ts); 409 if (err) 410 return err; 411 } 412 413 tse = &ts->stack[ts->cnt++]; 414 tse->ret_addr = ret_addr; 415 tse->timestamp = timestamp; 416 tse->ref = ref; 417 tse->branch_count = ts->branch_count; 418 tse->cp = cp; 419 tse->no_call = no_call; 420 tse->trace_end = trace_end; 421 422 return 0; 423 } 424 425 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, 426 u64 ret_addr, u64 timestamp, u64 ref, 427 struct symbol *sym) 428 { 429 int err; 430 431 if (!ts->cnt) 432 return 1; 433 434 if (ts->cnt == 1) { 435 struct thread_stack_entry *tse = &ts->stack[0]; 436 437 if (tse->cp->sym == sym) 438 return thread_stack__call_return(thread, ts, --ts->cnt, 439 timestamp, ref, false); 440 } 441 442 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) { 443 return thread_stack__call_return(thread, ts, --ts->cnt, 444 timestamp, ref, false); 445 } else { 446 size_t i = ts->cnt - 1; 447 448 while (i--) { 449 if (ts->stack[i].ret_addr != ret_addr) 450 continue; 451 i += 1; 452 while (ts->cnt > i) { 453 err = thread_stack__call_return(thread, ts, 454 --ts->cnt, 455 timestamp, ref, 456 true); 457 if (err) 458 return err; 459 } 460 return thread_stack__call_return(thread, ts, --ts->cnt, 461 timestamp, ref, false); 462 } 463 } 464 465 return 1; 466 } 467 468 static int thread_stack__bottom(struct thread_stack *ts, 469 struct perf_sample *sample, 470 struct addr_location *from_al, 471 struct addr_location *to_al, u64 ref) 472 { 473 struct call_path_root *cpr = ts->crp->cpr; 474 struct call_path *cp; 475 struct symbol *sym; 476 u64 ip; 477 478 if (sample->ip) { 479 ip = sample->ip; 480 sym = from_al->sym; 481 } else if (sample->addr) { 482 ip = sample->addr; 483 sym = to_al->sym; 484 } else { 485 return 0; 486 } 487 488 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip, 489 ts->kernel_start); 490 if (!cp) 491 return -ENOMEM; 492 493 return thread_stack__push_cp(ts, ip, sample->time, ref, cp, 494 true, false); 495 } 496 497 static int thread_stack__no_call_return(struct thread *thread, 498 struct thread_stack *ts, 499 struct perf_sample *sample, 500 struct addr_location *from_al, 501 struct addr_location *to_al, u64 ref) 502 { 503 struct call_path_root *cpr = ts->crp->cpr; 504 struct call_path *cp, *parent; 505 u64 ks = ts->kernel_start; 506 int err; 507 508 if (sample->ip >= ks && sample->addr < ks) { 509 /* Return to userspace, so pop all kernel addresses */ 510 while (thread_stack__in_kernel(ts)) { 511 err = thread_stack__call_return(thread, ts, --ts->cnt, 512 sample->time, ref, 513 true); 514 if (err) 515 return err; 516 } 517 518 /* If the stack is empty, push the userspace address */ 519 if (!ts->cnt) { 520 cp = call_path__findnew(cpr, &cpr->call_path, 521 to_al->sym, sample->addr, 522 ts->kernel_start); 523 if (!cp) 524 return -ENOMEM; 525 return thread_stack__push_cp(ts, 0, sample->time, ref, 526 cp, true, false); 527 } 528 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) { 529 /* Return to userspace, so pop all kernel addresses */ 530 while (thread_stack__in_kernel(ts)) { 531 err = thread_stack__call_return(thread, ts, --ts->cnt, 532 sample->time, ref, 533 true); 534 if (err) 535 return err; 536 } 537 } 538 539 if (ts->cnt) 540 parent = ts->stack[ts->cnt - 1].cp; 541 else 542 parent = &cpr->call_path; 543 544 /* This 'return' had no 'call', so push and pop top of stack */ 545 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip, 546 ts->kernel_start); 547 if (!cp) 548 return -ENOMEM; 549 550 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, 551 true, false); 552 if (err) 553 return err; 554 555 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, 556 to_al->sym); 557 } 558 559 static int thread_stack__trace_begin(struct thread *thread, 560 struct thread_stack *ts, u64 timestamp, 561 u64 ref) 562 { 563 struct thread_stack_entry *tse; 564 int err; 565 566 if (!ts->cnt) 567 return 0; 568 569 /* Pop trace end */ 570 tse = &ts->stack[ts->cnt - 1]; 571 if (tse->trace_end) { 572 err = thread_stack__call_return(thread, ts, --ts->cnt, 573 timestamp, ref, false); 574 if (err) 575 return err; 576 } 577 578 return 0; 579 } 580 581 static int thread_stack__trace_end(struct thread_stack *ts, 582 struct perf_sample *sample, u64 ref) 583 { 584 struct call_path_root *cpr = ts->crp->cpr; 585 struct call_path *cp; 586 u64 ret_addr; 587 588 /* No point having 'trace end' on the bottom of the stack */ 589 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref)) 590 return 0; 591 592 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0, 593 ts->kernel_start); 594 if (!cp) 595 return -ENOMEM; 596 597 ret_addr = sample->ip + sample->insn_len; 598 599 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, 600 false, true); 601 } 602 603 int thread_stack__process(struct thread *thread, struct comm *comm, 604 struct perf_sample *sample, 605 struct addr_location *from_al, 606 struct addr_location *to_al, u64 ref, 607 struct call_return_processor *crp) 608 { 609 struct thread_stack *ts = thread__stack(thread); 610 int err = 0; 611 612 if (ts && !ts->crp) { 613 /* Supersede thread_stack__event() */ 614 thread_stack__free(thread); 615 ts = NULL; 616 } 617 618 if (!ts) { 619 ts = thread_stack__new(thread, crp); 620 if (!ts) 621 return -ENOMEM; 622 ts->comm = comm; 623 } 624 625 /* Flush stack on exec */ 626 if (ts->comm != comm && thread->pid_ == thread->tid) { 627 err = __thread_stack__flush(thread, ts); 628 if (err) 629 return err; 630 ts->comm = comm; 631 } 632 633 /* If the stack is empty, put the current symbol on the stack */ 634 if (!ts->cnt) { 635 err = thread_stack__bottom(ts, sample, from_al, to_al, ref); 636 if (err) 637 return err; 638 } 639 640 ts->branch_count += 1; 641 ts->last_time = sample->time; 642 643 if (sample->flags & PERF_IP_FLAG_CALL) { 644 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END; 645 struct call_path_root *cpr = ts->crp->cpr; 646 struct call_path *cp; 647 u64 ret_addr; 648 649 if (!sample->ip || !sample->addr) 650 return 0; 651 652 ret_addr = sample->ip + sample->insn_len; 653 if (ret_addr == sample->addr) 654 return 0; /* Zero-length calls are excluded */ 655 656 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, 657 to_al->sym, sample->addr, 658 ts->kernel_start); 659 if (!cp) 660 return -ENOMEM; 661 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, 662 cp, false, trace_end); 663 } else if (sample->flags & PERF_IP_FLAG_RETURN) { 664 if (!sample->ip || !sample->addr) 665 return 0; 666 667 err = thread_stack__pop_cp(thread, ts, sample->addr, 668 sample->time, ref, from_al->sym); 669 if (err) { 670 if (err < 0) 671 return err; 672 err = thread_stack__no_call_return(thread, ts, sample, 673 from_al, to_al, ref); 674 } 675 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) { 676 err = thread_stack__trace_begin(thread, ts, sample->time, ref); 677 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) { 678 err = thread_stack__trace_end(ts, sample, ref); 679 } 680 681 return err; 682 } 683 684 size_t thread_stack__depth(struct thread *thread) 685 { 686 struct thread_stack *ts = thread__stack(thread); 687 688 if (!ts) 689 return 0; 690 return ts->cnt; 691 } 692