1 #include <linux/kernel.h> 2 #include <linux/types.h> 3 #include <inttypes.h> 4 #include <stdlib.h> 5 #include <unistd.h> 6 #include <stdio.h> 7 #include <string.h> 8 9 #include "parse-events.h" 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "thread_map.h" 13 #include "cpumap.h" 14 #include "machine.h" 15 #include "event.h" 16 #include "thread.h" 17 18 #include "tests.h" 19 20 #include "sane_ctype.h" 21 22 #define BUFSZ 1024 23 #define READLEN 128 24 25 struct state { 26 u64 done[1024]; 27 size_t done_cnt; 28 }; 29 30 static unsigned int hex(char c) 31 { 32 if (c >= '0' && c <= '9') 33 return c - '0'; 34 if (c >= 'a' && c <= 'f') 35 return c - 'a' + 10; 36 return c - 'A' + 10; 37 } 38 39 static size_t read_objdump_chunk(const char **line, unsigned char **buf, 40 size_t *buf_len) 41 { 42 size_t bytes_read = 0; 43 unsigned char *chunk_start = *buf; 44 45 /* Read bytes */ 46 while (*buf_len > 0) { 47 char c1, c2; 48 49 /* Get 2 hex digits */ 50 c1 = *(*line)++; 51 if (!isxdigit(c1)) 52 break; 53 c2 = *(*line)++; 54 if (!isxdigit(c2)) 55 break; 56 57 /* Store byte and advance buf */ 58 **buf = (hex(c1) << 4) | hex(c2); 59 (*buf)++; 60 (*buf_len)--; 61 bytes_read++; 62 63 /* End of chunk? */ 64 if (isspace(**line)) 65 break; 66 } 67 68 /* 69 * objdump will display raw insn as LE if code endian 70 * is LE and bytes_per_chunk > 1. In that case reverse 71 * the chunk we just read. 72 * 73 * see disassemble_bytes() at binutils/objdump.c for details 74 * how objdump chooses display endian) 75 */ 76 if (bytes_read > 1 && !bigendian()) { 77 unsigned char *chunk_end = chunk_start + bytes_read - 1; 78 unsigned char tmp; 79 80 while (chunk_start < chunk_end) { 81 tmp = *chunk_start; 82 *chunk_start = *chunk_end; 83 *chunk_end = tmp; 84 chunk_start++; 85 chunk_end--; 86 } 87 } 88 89 return bytes_read; 90 } 91 92 static size_t read_objdump_line(const char *line, unsigned char *buf, 93 size_t buf_len) 94 { 95 const char *p; 96 size_t ret, bytes_read = 0; 97 98 /* Skip to a colon */ 99 p = strchr(line, ':'); 100 if (!p) 101 return 0; 102 p++; 103 104 /* Skip initial spaces */ 105 while (*p) { 106 if (!isspace(*p)) 107 break; 108 p++; 109 } 110 111 do { 112 ret = read_objdump_chunk(&p, &buf, &buf_len); 113 bytes_read += ret; 114 p++; 115 } while (ret > 0); 116 117 /* return number of successfully read bytes */ 118 return bytes_read; 119 } 120 121 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr) 122 { 123 char *line = NULL; 124 size_t line_len, off_last = 0; 125 ssize_t ret; 126 int err = 0; 127 u64 addr, last_addr = start_addr; 128 129 while (off_last < *len) { 130 size_t off, read_bytes, written_bytes; 131 unsigned char tmp[BUFSZ]; 132 133 ret = getline(&line, &line_len, f); 134 if (feof(f)) 135 break; 136 if (ret < 0) { 137 pr_debug("getline failed\n"); 138 err = -1; 139 break; 140 } 141 142 /* read objdump data into temporary buffer */ 143 read_bytes = read_objdump_line(line, tmp, sizeof(tmp)); 144 if (!read_bytes) 145 continue; 146 147 if (sscanf(line, "%"PRIx64, &addr) != 1) 148 continue; 149 if (addr < last_addr) { 150 pr_debug("addr going backwards, read beyond section?\n"); 151 break; 152 } 153 last_addr = addr; 154 155 /* copy it from temporary buffer to 'buf' according 156 * to address on current objdump line */ 157 off = addr - start_addr; 158 if (off >= *len) 159 break; 160 written_bytes = MIN(read_bytes, *len - off); 161 memcpy(buf + off, tmp, written_bytes); 162 off_last = off + written_bytes; 163 } 164 165 /* len returns number of bytes that could not be read */ 166 *len -= off_last; 167 168 free(line); 169 170 return err; 171 } 172 173 static int read_via_objdump(const char *filename, u64 addr, void *buf, 174 size_t len) 175 { 176 char cmd[PATH_MAX * 2]; 177 const char *fmt; 178 FILE *f; 179 int ret; 180 181 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s"; 182 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len, 183 filename); 184 if (ret <= 0 || (size_t)ret >= sizeof(cmd)) 185 return -1; 186 187 pr_debug("Objdump command is: %s\n", cmd); 188 189 /* Ignore objdump errors */ 190 strcat(cmd, " 2>/dev/null"); 191 192 f = popen(cmd, "r"); 193 if (!f) { 194 pr_debug("popen failed\n"); 195 return -1; 196 } 197 198 ret = read_objdump_output(f, buf, &len, addr); 199 if (len) { 200 pr_debug("objdump read too few bytes: %zd\n", len); 201 if (!ret) 202 ret = len; 203 } 204 205 pclose(f); 206 207 return ret; 208 } 209 210 static void dump_buf(unsigned char *buf, size_t len) 211 { 212 size_t i; 213 214 for (i = 0; i < len; i++) { 215 pr_debug("0x%02x ", buf[i]); 216 if (i % 16 == 15) 217 pr_debug("\n"); 218 } 219 pr_debug("\n"); 220 } 221 222 static int read_object_code(u64 addr, size_t len, u8 cpumode, 223 struct thread *thread, struct state *state) 224 { 225 struct addr_location al; 226 unsigned char buf1[BUFSZ]; 227 unsigned char buf2[BUFSZ]; 228 size_t ret_len; 229 u64 objdump_addr; 230 int ret; 231 232 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 233 234 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); 235 if (!al.map || !al.map->dso) { 236 pr_debug("thread__find_addr_map failed\n"); 237 return -1; 238 } 239 240 pr_debug("File is: %s\n", al.map->dso->long_name); 241 242 if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 243 !dso__is_kcore(al.map->dso)) { 244 pr_debug("Unexpected kernel address - skipping\n"); 245 return 0; 246 } 247 248 pr_debug("On file address is: %#"PRIx64"\n", al.addr); 249 250 if (len > BUFSZ) 251 len = BUFSZ; 252 253 /* Do not go off the map */ 254 if (addr + len > al.map->end) 255 len = al.map->end - addr; 256 257 /* Read the object code using perf */ 258 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine, 259 al.addr, buf1, len); 260 if (ret_len != len) { 261 pr_debug("dso__data_read_offset failed\n"); 262 return -1; 263 } 264 265 /* 266 * Converting addresses for use by objdump requires more information. 267 * map__load() does that. See map__rip_2objdump() for details. 268 */ 269 if (map__load(al.map)) 270 return -1; 271 272 /* objdump struggles with kcore - try each map only once */ 273 if (dso__is_kcore(al.map->dso)) { 274 size_t d; 275 276 for (d = 0; d < state->done_cnt; d++) { 277 if (state->done[d] == al.map->start) { 278 pr_debug("kcore map tested already"); 279 pr_debug(" - skipping\n"); 280 return 0; 281 } 282 } 283 if (state->done_cnt >= ARRAY_SIZE(state->done)) { 284 pr_debug("Too many kcore maps - skipping\n"); 285 return 0; 286 } 287 state->done[state->done_cnt++] = al.map->start; 288 } 289 290 /* Read the object code using objdump */ 291 objdump_addr = map__rip_2objdump(al.map, al.addr); 292 ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); 293 if (ret > 0) { 294 /* 295 * The kernel maps are inaccurate - assume objdump is right in 296 * that case. 297 */ 298 if (cpumode == PERF_RECORD_MISC_KERNEL || 299 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) { 300 len -= ret; 301 if (len) { 302 pr_debug("Reducing len to %zu\n", len); 303 } else if (dso__is_kcore(al.map->dso)) { 304 /* 305 * objdump cannot handle very large segments 306 * that may be found in kcore. 307 */ 308 pr_debug("objdump failed for kcore"); 309 pr_debug(" - skipping\n"); 310 return 0; 311 } else { 312 return -1; 313 } 314 } 315 } 316 if (ret < 0) { 317 pr_debug("read_via_objdump failed\n"); 318 return -1; 319 } 320 321 /* The results should be identical */ 322 if (memcmp(buf1, buf2, len)) { 323 pr_debug("Bytes read differ from those read by objdump\n"); 324 pr_debug("buf1 (dso):\n"); 325 dump_buf(buf1, len); 326 pr_debug("buf2 (objdump):\n"); 327 dump_buf(buf2, len); 328 return -1; 329 } 330 pr_debug("Bytes read match those read by objdump\n"); 331 332 return 0; 333 } 334 335 static int process_sample_event(struct machine *machine, 336 struct perf_evlist *evlist, 337 union perf_event *event, struct state *state) 338 { 339 struct perf_sample sample; 340 struct thread *thread; 341 int ret; 342 343 if (perf_evlist__parse_sample(evlist, event, &sample)) { 344 pr_debug("perf_evlist__parse_sample failed\n"); 345 return -1; 346 } 347 348 thread = machine__findnew_thread(machine, sample.pid, sample.tid); 349 if (!thread) { 350 pr_debug("machine__findnew_thread failed\n"); 351 return -1; 352 } 353 354 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state); 355 thread__put(thread); 356 return ret; 357 } 358 359 static int process_event(struct machine *machine, struct perf_evlist *evlist, 360 union perf_event *event, struct state *state) 361 { 362 if (event->header.type == PERF_RECORD_SAMPLE) 363 return process_sample_event(machine, evlist, event, state); 364 365 if (event->header.type == PERF_RECORD_THROTTLE || 366 event->header.type == PERF_RECORD_UNTHROTTLE) 367 return 0; 368 369 if (event->header.type < PERF_RECORD_MAX) { 370 int ret; 371 372 ret = machine__process_event(machine, event, NULL); 373 if (ret < 0) 374 pr_debug("machine__process_event failed, event type %u\n", 375 event->header.type); 376 return ret; 377 } 378 379 return 0; 380 } 381 382 static int process_events(struct machine *machine, struct perf_evlist *evlist, 383 struct state *state) 384 { 385 union perf_event *event; 386 int i, ret; 387 388 for (i = 0; i < evlist->nr_mmaps; i++) { 389 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 390 ret = process_event(machine, evlist, event, state); 391 perf_evlist__mmap_consume(evlist, i); 392 if (ret < 0) 393 return ret; 394 } 395 } 396 return 0; 397 } 398 399 static int comp(const void *a, const void *b) 400 { 401 return *(int *)a - *(int *)b; 402 } 403 404 static void do_sort_something(void) 405 { 406 int buf[40960], i; 407 408 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) 409 buf[i] = ARRAY_SIZE(buf) - i - 1; 410 411 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp); 412 413 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) { 414 if (buf[i] != i) { 415 pr_debug("qsort failed\n"); 416 break; 417 } 418 } 419 } 420 421 static void sort_something(void) 422 { 423 int i; 424 425 for (i = 0; i < 10; i++) 426 do_sort_something(); 427 } 428 429 static void syscall_something(void) 430 { 431 int pipefd[2]; 432 int i; 433 434 for (i = 0; i < 1000; i++) { 435 if (pipe(pipefd) < 0) { 436 pr_debug("pipe failed\n"); 437 break; 438 } 439 close(pipefd[1]); 440 close(pipefd[0]); 441 } 442 } 443 444 static void fs_something(void) 445 { 446 const char *test_file_name = "temp-perf-code-reading-test-file--"; 447 FILE *f; 448 int i; 449 450 for (i = 0; i < 1000; i++) { 451 f = fopen(test_file_name, "w+"); 452 if (f) { 453 fclose(f); 454 unlink(test_file_name); 455 } 456 } 457 } 458 459 static void do_something(void) 460 { 461 fs_something(); 462 463 sort_something(); 464 465 syscall_something(); 466 } 467 468 enum { 469 TEST_CODE_READING_OK, 470 TEST_CODE_READING_NO_VMLINUX, 471 TEST_CODE_READING_NO_KCORE, 472 TEST_CODE_READING_NO_ACCESS, 473 TEST_CODE_READING_NO_KERNEL_OBJ, 474 }; 475 476 static int do_test_code_reading(bool try_kcore) 477 { 478 struct machine *machine; 479 struct thread *thread; 480 struct record_opts opts = { 481 .mmap_pages = UINT_MAX, 482 .user_freq = UINT_MAX, 483 .user_interval = ULLONG_MAX, 484 .freq = 500, 485 .target = { 486 .uses_mmap = true, 487 }, 488 }; 489 struct state state = { 490 .done_cnt = 0, 491 }; 492 struct thread_map *threads = NULL; 493 struct cpu_map *cpus = NULL; 494 struct perf_evlist *evlist = NULL; 495 struct perf_evsel *evsel = NULL; 496 int err = -1, ret; 497 pid_t pid; 498 struct map *map; 499 bool have_vmlinux, have_kcore, excl_kernel = false; 500 501 pid = getpid(); 502 503 machine = machine__new_host(); 504 505 ret = machine__create_kernel_maps(machine); 506 if (ret < 0) { 507 pr_debug("machine__create_kernel_maps failed\n"); 508 goto out_err; 509 } 510 511 /* Force the use of kallsyms instead of vmlinux to try kcore */ 512 if (try_kcore) 513 symbol_conf.kallsyms_name = "/proc/kallsyms"; 514 515 /* Load kernel map */ 516 map = machine__kernel_map(machine); 517 ret = map__load(map); 518 if (ret < 0) { 519 pr_debug("map__load failed\n"); 520 goto out_err; 521 } 522 have_vmlinux = dso__is_vmlinux(map->dso); 523 have_kcore = dso__is_kcore(map->dso); 524 525 /* 2nd time through we just try kcore */ 526 if (try_kcore && !have_kcore) 527 return TEST_CODE_READING_NO_KCORE; 528 529 /* No point getting kernel events if there is no kernel object */ 530 if (!have_vmlinux && !have_kcore) 531 excl_kernel = true; 532 533 threads = thread_map__new_by_tid(pid); 534 if (!threads) { 535 pr_debug("thread_map__new_by_tid failed\n"); 536 goto out_err; 537 } 538 539 ret = perf_event__synthesize_thread_map(NULL, threads, 540 perf_event__process, machine, false, 500); 541 if (ret < 0) { 542 pr_debug("perf_event__synthesize_thread_map failed\n"); 543 goto out_err; 544 } 545 546 thread = machine__findnew_thread(machine, pid, pid); 547 if (!thread) { 548 pr_debug("machine__findnew_thread failed\n"); 549 goto out_put; 550 } 551 552 cpus = cpu_map__new(NULL); 553 if (!cpus) { 554 pr_debug("cpu_map__new failed\n"); 555 goto out_put; 556 } 557 558 while (1) { 559 const char *str; 560 561 evlist = perf_evlist__new(); 562 if (!evlist) { 563 pr_debug("perf_evlist__new failed\n"); 564 goto out_put; 565 } 566 567 perf_evlist__set_maps(evlist, cpus, threads); 568 569 if (excl_kernel) 570 str = "cycles:u"; 571 else 572 str = "cycles"; 573 pr_debug("Parsing event '%s'\n", str); 574 ret = parse_events(evlist, str, NULL); 575 if (ret < 0) { 576 pr_debug("parse_events failed\n"); 577 goto out_put; 578 } 579 580 perf_evlist__config(evlist, &opts, NULL); 581 582 evsel = perf_evlist__first(evlist); 583 584 evsel->attr.comm = 1; 585 evsel->attr.disabled = 1; 586 evsel->attr.enable_on_exec = 0; 587 588 ret = perf_evlist__open(evlist); 589 if (ret < 0) { 590 if (!excl_kernel) { 591 excl_kernel = true; 592 /* 593 * Both cpus and threads are now owned by evlist 594 * and will be freed by following perf_evlist__set_maps 595 * call. Getting refference to keep them alive. 596 */ 597 cpu_map__get(cpus); 598 thread_map__get(threads); 599 perf_evlist__set_maps(evlist, NULL, NULL); 600 perf_evlist__delete(evlist); 601 evlist = NULL; 602 continue; 603 } 604 605 if (verbose > 0) { 606 char errbuf[512]; 607 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 608 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); 609 } 610 611 goto out_put; 612 } 613 break; 614 } 615 616 ret = perf_evlist__mmap(evlist, UINT_MAX, false); 617 if (ret < 0) { 618 pr_debug("perf_evlist__mmap failed\n"); 619 goto out_put; 620 } 621 622 perf_evlist__enable(evlist); 623 624 do_something(); 625 626 perf_evlist__disable(evlist); 627 628 ret = process_events(machine, evlist, &state); 629 if (ret < 0) 630 goto out_put; 631 632 if (!have_vmlinux && !have_kcore && !try_kcore) 633 err = TEST_CODE_READING_NO_KERNEL_OBJ; 634 else if (!have_vmlinux && !try_kcore) 635 err = TEST_CODE_READING_NO_VMLINUX; 636 else if (excl_kernel) 637 err = TEST_CODE_READING_NO_ACCESS; 638 else 639 err = TEST_CODE_READING_OK; 640 out_put: 641 thread__put(thread); 642 out_err: 643 644 if (evlist) { 645 perf_evlist__delete(evlist); 646 } else { 647 cpu_map__put(cpus); 648 thread_map__put(threads); 649 } 650 machine__delete_threads(machine); 651 machine__delete(machine); 652 653 return err; 654 } 655 656 int test__code_reading(int subtest __maybe_unused) 657 { 658 int ret; 659 660 ret = do_test_code_reading(false); 661 if (!ret) 662 ret = do_test_code_reading(true); 663 664 switch (ret) { 665 case TEST_CODE_READING_OK: 666 return 0; 667 case TEST_CODE_READING_NO_VMLINUX: 668 pr_debug("no vmlinux\n"); 669 return 0; 670 case TEST_CODE_READING_NO_KCORE: 671 pr_debug("no kcore\n"); 672 return 0; 673 case TEST_CODE_READING_NO_ACCESS: 674 pr_debug("no access\n"); 675 return 0; 676 case TEST_CODE_READING_NO_KERNEL_OBJ: 677 pr_debug("no kernel obj\n"); 678 return 0; 679 default: 680 return -1; 681 }; 682 } 683