1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <byteswap.h> 11 #include <linux/bitops.h> 12 #include <api/fs/tracing_path.h> 13 #include <traceevent/event-parse.h> 14 #include <linux/hw_breakpoint.h> 15 #include <linux/perf_event.h> 16 #include <linux/err.h> 17 #include <sys/resource.h> 18 #include "asm/bug.h" 19 #include "callchain.h" 20 #include "cgroup.h" 21 #include "evsel.h" 22 #include "evlist.h" 23 #include "util.h" 24 #include "cpumap.h" 25 #include "thread_map.h" 26 #include "target.h" 27 #include "perf_regs.h" 28 #include "debug.h" 29 #include "trace-event.h" 30 #include "stat.h" 31 32 static struct { 33 bool sample_id_all; 34 bool exclude_guest; 35 bool mmap2; 36 bool cloexec; 37 bool clockid; 38 bool clockid_wrong; 39 } perf_missing_features; 40 41 static clockid_t clockid; 42 43 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused) 44 { 45 return 0; 46 } 47 48 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused) 49 { 50 } 51 52 static struct { 53 size_t size; 54 int (*init)(struct perf_evsel *evsel); 55 void (*fini)(struct perf_evsel *evsel); 56 } perf_evsel__object = { 57 .size = sizeof(struct perf_evsel), 58 .init = perf_evsel__no_extra_init, 59 .fini = perf_evsel__no_extra_fini, 60 }; 61 62 int perf_evsel__object_config(size_t object_size, 63 int (*init)(struct perf_evsel *evsel), 64 void (*fini)(struct perf_evsel *evsel)) 65 { 66 67 if (object_size == 0) 68 goto set_methods; 69 70 if (perf_evsel__object.size > object_size) 71 return -EINVAL; 72 73 perf_evsel__object.size = object_size; 74 75 set_methods: 76 if (init != NULL) 77 perf_evsel__object.init = init; 78 79 if (fini != NULL) 80 perf_evsel__object.fini = fini; 81 82 return 0; 83 } 84 85 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 86 87 int __perf_evsel__sample_size(u64 sample_type) 88 { 89 u64 mask = sample_type & PERF_SAMPLE_MASK; 90 int size = 0; 91 int i; 92 93 for (i = 0; i < 64; i++) { 94 if (mask & (1ULL << i)) 95 size++; 96 } 97 98 size *= sizeof(u64); 99 100 return size; 101 } 102 103 /** 104 * __perf_evsel__calc_id_pos - calculate id_pos. 105 * @sample_type: sample type 106 * 107 * This function returns the position of the event id (PERF_SAMPLE_ID or 108 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct 109 * sample_event. 110 */ 111 static int __perf_evsel__calc_id_pos(u64 sample_type) 112 { 113 int idx = 0; 114 115 if (sample_type & PERF_SAMPLE_IDENTIFIER) 116 return 0; 117 118 if (!(sample_type & PERF_SAMPLE_ID)) 119 return -1; 120 121 if (sample_type & PERF_SAMPLE_IP) 122 idx += 1; 123 124 if (sample_type & PERF_SAMPLE_TID) 125 idx += 1; 126 127 if (sample_type & PERF_SAMPLE_TIME) 128 idx += 1; 129 130 if (sample_type & PERF_SAMPLE_ADDR) 131 idx += 1; 132 133 return idx; 134 } 135 136 /** 137 * __perf_evsel__calc_is_pos - calculate is_pos. 138 * @sample_type: sample type 139 * 140 * This function returns the position (counting backwards) of the event id 141 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if 142 * sample_id_all is used there is an id sample appended to non-sample events. 143 */ 144 static int __perf_evsel__calc_is_pos(u64 sample_type) 145 { 146 int idx = 1; 147 148 if (sample_type & PERF_SAMPLE_IDENTIFIER) 149 return 1; 150 151 if (!(sample_type & PERF_SAMPLE_ID)) 152 return -1; 153 154 if (sample_type & PERF_SAMPLE_CPU) 155 idx += 1; 156 157 if (sample_type & PERF_SAMPLE_STREAM_ID) 158 idx += 1; 159 160 return idx; 161 } 162 163 void perf_evsel__calc_id_pos(struct perf_evsel *evsel) 164 { 165 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); 166 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); 167 } 168 169 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, 170 enum perf_event_sample_format bit) 171 { 172 if (!(evsel->attr.sample_type & bit)) { 173 evsel->attr.sample_type |= bit; 174 evsel->sample_size += sizeof(u64); 175 perf_evsel__calc_id_pos(evsel); 176 } 177 } 178 179 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, 180 enum perf_event_sample_format bit) 181 { 182 if (evsel->attr.sample_type & bit) { 183 evsel->attr.sample_type &= ~bit; 184 evsel->sample_size -= sizeof(u64); 185 perf_evsel__calc_id_pos(evsel); 186 } 187 } 188 189 void perf_evsel__set_sample_id(struct perf_evsel *evsel, 190 bool can_sample_identifier) 191 { 192 if (can_sample_identifier) { 193 perf_evsel__reset_sample_bit(evsel, ID); 194 perf_evsel__set_sample_bit(evsel, IDENTIFIER); 195 } else { 196 perf_evsel__set_sample_bit(evsel, ID); 197 } 198 evsel->attr.read_format |= PERF_FORMAT_ID; 199 } 200 201 void perf_evsel__init(struct perf_evsel *evsel, 202 struct perf_event_attr *attr, int idx) 203 { 204 evsel->idx = idx; 205 evsel->tracking = !idx; 206 evsel->attr = *attr; 207 evsel->leader = evsel; 208 evsel->unit = ""; 209 evsel->scale = 1.0; 210 evsel->evlist = NULL; 211 evsel->bpf_fd = -1; 212 INIT_LIST_HEAD(&evsel->node); 213 INIT_LIST_HEAD(&evsel->config_terms); 214 perf_evsel__object.init(evsel); 215 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 216 perf_evsel__calc_id_pos(evsel); 217 evsel->cmdline_group_boundary = false; 218 } 219 220 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) 221 { 222 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 223 224 if (evsel != NULL) 225 perf_evsel__init(evsel, attr, idx); 226 227 return evsel; 228 } 229 230 /* 231 * Returns pointer with encoded error via <linux/err.h> interface. 232 */ 233 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) 234 { 235 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 236 int err = -ENOMEM; 237 238 if (evsel == NULL) { 239 goto out_err; 240 } else { 241 struct perf_event_attr attr = { 242 .type = PERF_TYPE_TRACEPOINT, 243 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 244 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 245 }; 246 247 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 248 goto out_free; 249 250 evsel->tp_format = trace_event__tp_format(sys, name); 251 if (IS_ERR(evsel->tp_format)) { 252 err = PTR_ERR(evsel->tp_format); 253 goto out_free; 254 } 255 256 event_attr_init(&attr); 257 attr.config = evsel->tp_format->id; 258 attr.sample_period = 1; 259 perf_evsel__init(evsel, &attr, idx); 260 } 261 262 return evsel; 263 264 out_free: 265 zfree(&evsel->name); 266 free(evsel); 267 out_err: 268 return ERR_PTR(err); 269 } 270 271 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { 272 "cycles", 273 "instructions", 274 "cache-references", 275 "cache-misses", 276 "branches", 277 "branch-misses", 278 "bus-cycles", 279 "stalled-cycles-frontend", 280 "stalled-cycles-backend", 281 "ref-cycles", 282 }; 283 284 static const char *__perf_evsel__hw_name(u64 config) 285 { 286 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) 287 return perf_evsel__hw_names[config]; 288 289 return "unknown-hardware"; 290 } 291 292 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) 293 { 294 int colon = 0, r = 0; 295 struct perf_event_attr *attr = &evsel->attr; 296 bool exclude_guest_default = false; 297 298 #define MOD_PRINT(context, mod) do { \ 299 if (!attr->exclude_##context) { \ 300 if (!colon) colon = ++r; \ 301 r += scnprintf(bf + r, size - r, "%c", mod); \ 302 } } while(0) 303 304 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 305 MOD_PRINT(kernel, 'k'); 306 MOD_PRINT(user, 'u'); 307 MOD_PRINT(hv, 'h'); 308 exclude_guest_default = true; 309 } 310 311 if (attr->precise_ip) { 312 if (!colon) 313 colon = ++r; 314 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 315 exclude_guest_default = true; 316 } 317 318 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 319 MOD_PRINT(host, 'H'); 320 MOD_PRINT(guest, 'G'); 321 } 322 #undef MOD_PRINT 323 if (colon) 324 bf[colon - 1] = ':'; 325 return r; 326 } 327 328 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) 329 { 330 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); 331 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 332 } 333 334 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { 335 "cpu-clock", 336 "task-clock", 337 "page-faults", 338 "context-switches", 339 "cpu-migrations", 340 "minor-faults", 341 "major-faults", 342 "alignment-faults", 343 "emulation-faults", 344 "dummy", 345 }; 346 347 static const char *__perf_evsel__sw_name(u64 config) 348 { 349 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) 350 return perf_evsel__sw_names[config]; 351 return "unknown-software"; 352 } 353 354 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) 355 { 356 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); 357 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 358 } 359 360 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 361 { 362 int r; 363 364 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 365 366 if (type & HW_BREAKPOINT_R) 367 r += scnprintf(bf + r, size - r, "r"); 368 369 if (type & HW_BREAKPOINT_W) 370 r += scnprintf(bf + r, size - r, "w"); 371 372 if (type & HW_BREAKPOINT_X) 373 r += scnprintf(bf + r, size - r, "x"); 374 375 return r; 376 } 377 378 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) 379 { 380 struct perf_event_attr *attr = &evsel->attr; 381 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 382 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 383 } 384 385 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] 386 [PERF_EVSEL__MAX_ALIASES] = { 387 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 388 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 389 { "LLC", "L2", }, 390 { "dTLB", "d-tlb", "Data-TLB", }, 391 { "iTLB", "i-tlb", "Instruction-TLB", }, 392 { "branch", "branches", "bpu", "btb", "bpc", }, 393 { "node", }, 394 }; 395 396 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] 397 [PERF_EVSEL__MAX_ALIASES] = { 398 { "load", "loads", "read", }, 399 { "store", "stores", "write", }, 400 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 401 }; 402 403 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] 404 [PERF_EVSEL__MAX_ALIASES] = { 405 { "refs", "Reference", "ops", "access", }, 406 { "misses", "miss", }, 407 }; 408 409 #define C(x) PERF_COUNT_HW_CACHE_##x 410 #define CACHE_READ (1 << C(OP_READ)) 411 #define CACHE_WRITE (1 << C(OP_WRITE)) 412 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 413 #define COP(x) (1 << x) 414 415 /* 416 * cache operartion stat 417 * L1I : Read and prefetch only 418 * ITLB and BPU : Read-only 419 */ 420 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { 421 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 422 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 423 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 424 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 425 [C(ITLB)] = (CACHE_READ), 426 [C(BPU)] = (CACHE_READ), 427 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 428 }; 429 430 bool perf_evsel__is_cache_op_valid(u8 type, u8 op) 431 { 432 if (perf_evsel__hw_cache_stat[type] & COP(op)) 433 return true; /* valid */ 434 else 435 return false; /* invalid */ 436 } 437 438 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 439 char *bf, size_t size) 440 { 441 if (result) { 442 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], 443 perf_evsel__hw_cache_op[op][0], 444 perf_evsel__hw_cache_result[result][0]); 445 } 446 447 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], 448 perf_evsel__hw_cache_op[op][1]); 449 } 450 451 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) 452 { 453 u8 op, result, type = (config >> 0) & 0xff; 454 const char *err = "unknown-ext-hardware-cache-type"; 455 456 if (type > PERF_COUNT_HW_CACHE_MAX) 457 goto out_err; 458 459 op = (config >> 8) & 0xff; 460 err = "unknown-ext-hardware-cache-op"; 461 if (op > PERF_COUNT_HW_CACHE_OP_MAX) 462 goto out_err; 463 464 result = (config >> 16) & 0xff; 465 err = "unknown-ext-hardware-cache-result"; 466 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) 467 goto out_err; 468 469 err = "invalid-cache"; 470 if (!perf_evsel__is_cache_op_valid(type, op)) 471 goto out_err; 472 473 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 474 out_err: 475 return scnprintf(bf, size, "%s", err); 476 } 477 478 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) 479 { 480 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); 481 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 482 } 483 484 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) 485 { 486 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); 487 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 488 } 489 490 const char *perf_evsel__name(struct perf_evsel *evsel) 491 { 492 char bf[128]; 493 494 if (evsel->name) 495 return evsel->name; 496 497 switch (evsel->attr.type) { 498 case PERF_TYPE_RAW: 499 perf_evsel__raw_name(evsel, bf, sizeof(bf)); 500 break; 501 502 case PERF_TYPE_HARDWARE: 503 perf_evsel__hw_name(evsel, bf, sizeof(bf)); 504 break; 505 506 case PERF_TYPE_HW_CACHE: 507 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); 508 break; 509 510 case PERF_TYPE_SOFTWARE: 511 perf_evsel__sw_name(evsel, bf, sizeof(bf)); 512 break; 513 514 case PERF_TYPE_TRACEPOINT: 515 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 516 break; 517 518 case PERF_TYPE_BREAKPOINT: 519 perf_evsel__bp_name(evsel, bf, sizeof(bf)); 520 break; 521 522 default: 523 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 524 evsel->attr.type); 525 break; 526 } 527 528 evsel->name = strdup(bf); 529 530 return evsel->name ?: "unknown"; 531 } 532 533 const char *perf_evsel__group_name(struct perf_evsel *evsel) 534 { 535 return evsel->group_name ?: "anon group"; 536 } 537 538 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) 539 { 540 int ret; 541 struct perf_evsel *pos; 542 const char *group_name = perf_evsel__group_name(evsel); 543 544 ret = scnprintf(buf, size, "%s", group_name); 545 546 ret += scnprintf(buf + ret, size - ret, " { %s", 547 perf_evsel__name(evsel)); 548 549 for_each_group_member(pos, evsel) 550 ret += scnprintf(buf + ret, size - ret, ", %s", 551 perf_evsel__name(pos)); 552 553 ret += scnprintf(buf + ret, size - ret, " }"); 554 555 return ret; 556 } 557 558 static void 559 perf_evsel__config_callgraph(struct perf_evsel *evsel, 560 struct record_opts *opts, 561 struct callchain_param *param) 562 { 563 bool function = perf_evsel__is_function_event(evsel); 564 struct perf_event_attr *attr = &evsel->attr; 565 566 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 567 568 if (param->record_mode == CALLCHAIN_LBR) { 569 if (!opts->branch_stack) { 570 if (attr->exclude_user) { 571 pr_warning("LBR callstack option is only available " 572 "to get user callchain information. " 573 "Falling back to framepointers.\n"); 574 } else { 575 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 576 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | 577 PERF_SAMPLE_BRANCH_CALL_STACK; 578 } 579 } else 580 pr_warning("Cannot use LBR callstack with branch stack. " 581 "Falling back to framepointers.\n"); 582 } 583 584 if (param->record_mode == CALLCHAIN_DWARF) { 585 if (!function) { 586 perf_evsel__set_sample_bit(evsel, REGS_USER); 587 perf_evsel__set_sample_bit(evsel, STACK_USER); 588 attr->sample_regs_user = PERF_REGS_MASK; 589 attr->sample_stack_user = param->dump_size; 590 attr->exclude_callchain_user = 1; 591 } else { 592 pr_info("Cannot use DWARF unwind for function trace event," 593 " falling back to framepointers.\n"); 594 } 595 } 596 597 if (function) { 598 pr_info("Disabling user space callchains for function trace event.\n"); 599 attr->exclude_callchain_user = 1; 600 } 601 } 602 603 static void 604 perf_evsel__reset_callgraph(struct perf_evsel *evsel, 605 struct callchain_param *param) 606 { 607 struct perf_event_attr *attr = &evsel->attr; 608 609 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 610 if (param->record_mode == CALLCHAIN_LBR) { 611 perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); 612 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | 613 PERF_SAMPLE_BRANCH_CALL_STACK); 614 } 615 if (param->record_mode == CALLCHAIN_DWARF) { 616 perf_evsel__reset_sample_bit(evsel, REGS_USER); 617 perf_evsel__reset_sample_bit(evsel, STACK_USER); 618 } 619 } 620 621 static void apply_config_terms(struct perf_evsel *evsel, 622 struct record_opts *opts) 623 { 624 struct perf_evsel_config_term *term; 625 struct list_head *config_terms = &evsel->config_terms; 626 struct perf_event_attr *attr = &evsel->attr; 627 struct callchain_param param; 628 u32 dump_size = 0; 629 char *callgraph_buf = NULL; 630 631 /* callgraph default */ 632 param.record_mode = callchain_param.record_mode; 633 634 list_for_each_entry(term, config_terms, list) { 635 switch (term->type) { 636 case PERF_EVSEL__CONFIG_TERM_PERIOD: 637 attr->sample_period = term->val.period; 638 attr->freq = 0; 639 break; 640 case PERF_EVSEL__CONFIG_TERM_FREQ: 641 attr->sample_freq = term->val.freq; 642 attr->freq = 1; 643 break; 644 case PERF_EVSEL__CONFIG_TERM_TIME: 645 if (term->val.time) 646 perf_evsel__set_sample_bit(evsel, TIME); 647 else 648 perf_evsel__reset_sample_bit(evsel, TIME); 649 break; 650 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH: 651 callgraph_buf = term->val.callgraph; 652 break; 653 case PERF_EVSEL__CONFIG_TERM_STACK_USER: 654 dump_size = term->val.stack_user; 655 break; 656 case PERF_EVSEL__CONFIG_TERM_INHERIT: 657 /* 658 * attr->inherit should has already been set by 659 * perf_evsel__config. If user explicitly set 660 * inherit using config terms, override global 661 * opt->no_inherit setting. 662 */ 663 attr->inherit = term->val.inherit ? 1 : 0; 664 break; 665 default: 666 break; 667 } 668 } 669 670 /* User explicitly set per-event callgraph, clear the old setting and reset. */ 671 if ((callgraph_buf != NULL) || (dump_size > 0)) { 672 673 /* parse callgraph parameters */ 674 if (callgraph_buf != NULL) { 675 if (!strcmp(callgraph_buf, "no")) { 676 param.enabled = false; 677 param.record_mode = CALLCHAIN_NONE; 678 } else { 679 param.enabled = true; 680 if (parse_callchain_record(callgraph_buf, ¶m)) { 681 pr_err("per-event callgraph setting for %s failed. " 682 "Apply callgraph global setting for it\n", 683 evsel->name); 684 return; 685 } 686 } 687 } 688 if (dump_size > 0) { 689 dump_size = round_up(dump_size, sizeof(u64)); 690 param.dump_size = dump_size; 691 } 692 693 /* If global callgraph set, clear it */ 694 if (callchain_param.enabled) 695 perf_evsel__reset_callgraph(evsel, &callchain_param); 696 697 /* set perf-event callgraph */ 698 if (param.enabled) 699 perf_evsel__config_callgraph(evsel, opts, ¶m); 700 } 701 } 702 703 /* 704 * The enable_on_exec/disabled value strategy: 705 * 706 * 1) For any type of traced program: 707 * - all independent events and group leaders are disabled 708 * - all group members are enabled 709 * 710 * Group members are ruled by group leaders. They need to 711 * be enabled, because the group scheduling relies on that. 712 * 713 * 2) For traced programs executed by perf: 714 * - all independent events and group leaders have 715 * enable_on_exec set 716 * - we don't specifically enable or disable any event during 717 * the record command 718 * 719 * Independent events and group leaders are initially disabled 720 * and get enabled by exec. Group members are ruled by group 721 * leaders as stated in 1). 722 * 723 * 3) For traced programs attached by perf (pid/tid): 724 * - we specifically enable or disable all events during 725 * the record command 726 * 727 * When attaching events to already running traced we 728 * enable/disable events specifically, as there's no 729 * initial traced exec call. 730 */ 731 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) 732 { 733 struct perf_evsel *leader = evsel->leader; 734 struct perf_event_attr *attr = &evsel->attr; 735 int track = evsel->tracking; 736 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; 737 738 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 739 attr->inherit = !opts->no_inherit; 740 741 perf_evsel__set_sample_bit(evsel, IP); 742 perf_evsel__set_sample_bit(evsel, TID); 743 744 if (evsel->sample_read) { 745 perf_evsel__set_sample_bit(evsel, READ); 746 747 /* 748 * We need ID even in case of single event, because 749 * PERF_SAMPLE_READ process ID specific data. 750 */ 751 perf_evsel__set_sample_id(evsel, false); 752 753 /* 754 * Apply group format only if we belong to group 755 * with more than one members. 756 */ 757 if (leader->nr_members > 1) { 758 attr->read_format |= PERF_FORMAT_GROUP; 759 attr->inherit = 0; 760 } 761 } 762 763 /* 764 * We default some events to have a default interval. But keep 765 * it a weak assumption overridable by the user. 766 */ 767 if (!attr->sample_period || (opts->user_freq != UINT_MAX || 768 opts->user_interval != ULLONG_MAX)) { 769 if (opts->freq) { 770 perf_evsel__set_sample_bit(evsel, PERIOD); 771 attr->freq = 1; 772 attr->sample_freq = opts->freq; 773 } else { 774 attr->sample_period = opts->default_interval; 775 } 776 } 777 778 /* 779 * Disable sampling for all group members other 780 * than leader in case leader 'leads' the sampling. 781 */ 782 if ((leader != evsel) && leader->sample_read) { 783 attr->sample_freq = 0; 784 attr->sample_period = 0; 785 } 786 787 if (opts->no_samples) 788 attr->sample_freq = 0; 789 790 if (opts->inherit_stat) 791 attr->inherit_stat = 1; 792 793 if (opts->sample_address) { 794 perf_evsel__set_sample_bit(evsel, ADDR); 795 attr->mmap_data = track; 796 } 797 798 /* 799 * We don't allow user space callchains for function trace 800 * event, due to issues with page faults while tracing page 801 * fault handler and its overall trickiness nature. 802 */ 803 if (perf_evsel__is_function_event(evsel)) 804 evsel->attr.exclude_callchain_user = 1; 805 806 if (callchain_param.enabled && !evsel->no_aux_samples) 807 perf_evsel__config_callgraph(evsel, opts, &callchain_param); 808 809 if (opts->sample_intr_regs) { 810 attr->sample_regs_intr = opts->sample_intr_regs; 811 perf_evsel__set_sample_bit(evsel, REGS_INTR); 812 } 813 814 if (target__has_cpu(&opts->target)) 815 perf_evsel__set_sample_bit(evsel, CPU); 816 817 if (opts->period) 818 perf_evsel__set_sample_bit(evsel, PERIOD); 819 820 /* 821 * When the user explicitely disabled time don't force it here. 822 */ 823 if (opts->sample_time && 824 (!perf_missing_features.sample_id_all && 825 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || 826 opts->sample_time_set))) 827 perf_evsel__set_sample_bit(evsel, TIME); 828 829 if (opts->raw_samples && !evsel->no_aux_samples) { 830 perf_evsel__set_sample_bit(evsel, TIME); 831 perf_evsel__set_sample_bit(evsel, RAW); 832 perf_evsel__set_sample_bit(evsel, CPU); 833 } 834 835 if (opts->sample_address) 836 perf_evsel__set_sample_bit(evsel, DATA_SRC); 837 838 if (opts->no_buffering) { 839 attr->watermark = 0; 840 attr->wakeup_events = 1; 841 } 842 if (opts->branch_stack && !evsel->no_aux_samples) { 843 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 844 attr->branch_sample_type = opts->branch_stack; 845 } 846 847 if (opts->sample_weight) 848 perf_evsel__set_sample_bit(evsel, WEIGHT); 849 850 attr->task = track; 851 attr->mmap = track; 852 attr->mmap2 = track && !perf_missing_features.mmap2; 853 attr->comm = track; 854 855 if (opts->record_switch_events) 856 attr->context_switch = track; 857 858 if (opts->sample_transaction) 859 perf_evsel__set_sample_bit(evsel, TRANSACTION); 860 861 if (opts->running_time) { 862 evsel->attr.read_format |= 863 PERF_FORMAT_TOTAL_TIME_ENABLED | 864 PERF_FORMAT_TOTAL_TIME_RUNNING; 865 } 866 867 /* 868 * XXX see the function comment above 869 * 870 * Disabling only independent events or group leaders, 871 * keeping group members enabled. 872 */ 873 if (perf_evsel__is_group_leader(evsel)) 874 attr->disabled = 1; 875 876 /* 877 * Setting enable_on_exec for independent events and 878 * group leaders for traced executed by perf. 879 */ 880 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && 881 !opts->initial_delay) 882 attr->enable_on_exec = 1; 883 884 if (evsel->immediate) { 885 attr->disabled = 0; 886 attr->enable_on_exec = 0; 887 } 888 889 clockid = opts->clockid; 890 if (opts->use_clockid) { 891 attr->use_clockid = 1; 892 attr->clockid = opts->clockid; 893 } 894 895 if (evsel->precise_max) 896 perf_event_attr__set_max_precise_ip(attr); 897 898 /* 899 * Apply event specific term settings, 900 * it overloads any global configuration. 901 */ 902 apply_config_terms(evsel, opts); 903 } 904 905 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 906 { 907 int cpu, thread; 908 909 if (evsel->system_wide) 910 nthreads = 1; 911 912 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 913 914 if (evsel->fd) { 915 for (cpu = 0; cpu < ncpus; cpu++) { 916 for (thread = 0; thread < nthreads; thread++) { 917 FD(evsel, cpu, thread) = -1; 918 } 919 } 920 } 921 922 return evsel->fd != NULL ? 0 : -ENOMEM; 923 } 924 925 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, 926 int ioc, void *arg) 927 { 928 int cpu, thread; 929 930 if (evsel->system_wide) 931 nthreads = 1; 932 933 for (cpu = 0; cpu < ncpus; cpu++) { 934 for (thread = 0; thread < nthreads; thread++) { 935 int fd = FD(evsel, cpu, thread), 936 err = ioctl(fd, ioc, arg); 937 938 if (err) 939 return err; 940 } 941 } 942 943 return 0; 944 } 945 946 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 947 const char *filter) 948 { 949 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, 950 PERF_EVENT_IOC_SET_FILTER, 951 (void *)filter); 952 } 953 954 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter) 955 { 956 char *new_filter = strdup(filter); 957 958 if (new_filter != NULL) { 959 free(evsel->filter); 960 evsel->filter = new_filter; 961 return 0; 962 } 963 964 return -1; 965 } 966 967 int perf_evsel__append_filter(struct perf_evsel *evsel, 968 const char *op, const char *filter) 969 { 970 char *new_filter; 971 972 if (evsel->filter == NULL) 973 return perf_evsel__set_filter(evsel, filter); 974 975 if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) { 976 free(evsel->filter); 977 evsel->filter = new_filter; 978 return 0; 979 } 980 981 return -1; 982 } 983 984 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) 985 { 986 return perf_evsel__run_ioctl(evsel, ncpus, nthreads, 987 PERF_EVENT_IOC_ENABLE, 988 0); 989 } 990 991 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 992 { 993 if (ncpus == 0 || nthreads == 0) 994 return 0; 995 996 if (evsel->system_wide) 997 nthreads = 1; 998 999 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 1000 if (evsel->sample_id == NULL) 1001 return -ENOMEM; 1002 1003 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 1004 if (evsel->id == NULL) { 1005 xyarray__delete(evsel->sample_id); 1006 evsel->sample_id = NULL; 1007 return -ENOMEM; 1008 } 1009 1010 return 0; 1011 } 1012 1013 static void perf_evsel__free_fd(struct perf_evsel *evsel) 1014 { 1015 xyarray__delete(evsel->fd); 1016 evsel->fd = NULL; 1017 } 1018 1019 static void perf_evsel__free_id(struct perf_evsel *evsel) 1020 { 1021 xyarray__delete(evsel->sample_id); 1022 evsel->sample_id = NULL; 1023 zfree(&evsel->id); 1024 } 1025 1026 static void perf_evsel__free_config_terms(struct perf_evsel *evsel) 1027 { 1028 struct perf_evsel_config_term *term, *h; 1029 1030 list_for_each_entry_safe(term, h, &evsel->config_terms, list) { 1031 list_del(&term->list); 1032 free(term); 1033 } 1034 } 1035 1036 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 1037 { 1038 int cpu, thread; 1039 1040 if (evsel->system_wide) 1041 nthreads = 1; 1042 1043 for (cpu = 0; cpu < ncpus; cpu++) 1044 for (thread = 0; thread < nthreads; ++thread) { 1045 close(FD(evsel, cpu, thread)); 1046 FD(evsel, cpu, thread) = -1; 1047 } 1048 } 1049 1050 void perf_evsel__exit(struct perf_evsel *evsel) 1051 { 1052 assert(list_empty(&evsel->node)); 1053 assert(evsel->evlist == NULL); 1054 perf_evsel__free_fd(evsel); 1055 perf_evsel__free_id(evsel); 1056 perf_evsel__free_config_terms(evsel); 1057 close_cgroup(evsel->cgrp); 1058 cpu_map__put(evsel->cpus); 1059 cpu_map__put(evsel->own_cpus); 1060 thread_map__put(evsel->threads); 1061 zfree(&evsel->group_name); 1062 zfree(&evsel->name); 1063 perf_evsel__object.fini(evsel); 1064 } 1065 1066 void perf_evsel__delete(struct perf_evsel *evsel) 1067 { 1068 perf_evsel__exit(evsel); 1069 free(evsel); 1070 } 1071 1072 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, 1073 struct perf_counts_values *count) 1074 { 1075 struct perf_counts_values tmp; 1076 1077 if (!evsel->prev_raw_counts) 1078 return; 1079 1080 if (cpu == -1) { 1081 tmp = evsel->prev_raw_counts->aggr; 1082 evsel->prev_raw_counts->aggr = *count; 1083 } else { 1084 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); 1085 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; 1086 } 1087 1088 count->val = count->val - tmp.val; 1089 count->ena = count->ena - tmp.ena; 1090 count->run = count->run - tmp.run; 1091 } 1092 1093 void perf_counts_values__scale(struct perf_counts_values *count, 1094 bool scale, s8 *pscaled) 1095 { 1096 s8 scaled = 0; 1097 1098 if (scale) { 1099 if (count->run == 0) { 1100 scaled = -1; 1101 count->val = 0; 1102 } else if (count->run < count->ena) { 1103 scaled = 1; 1104 count->val = (u64)((double) count->val * count->ena / count->run + 0.5); 1105 } 1106 } else 1107 count->ena = count->run = 0; 1108 1109 if (pscaled) 1110 *pscaled = scaled; 1111 } 1112 1113 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, 1114 struct perf_counts_values *count) 1115 { 1116 memset(count, 0, sizeof(*count)); 1117 1118 if (FD(evsel, cpu, thread) < 0) 1119 return -EINVAL; 1120 1121 if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) 1122 return -errno; 1123 1124 return 0; 1125 } 1126 1127 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, 1128 int cpu, int thread, bool scale) 1129 { 1130 struct perf_counts_values count; 1131 size_t nv = scale ? 3 : 1; 1132 1133 if (FD(evsel, cpu, thread) < 0) 1134 return -EINVAL; 1135 1136 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) 1137 return -ENOMEM; 1138 1139 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) 1140 return -errno; 1141 1142 perf_evsel__compute_deltas(evsel, cpu, thread, &count); 1143 perf_counts_values__scale(&count, scale, NULL); 1144 *perf_counts(evsel->counts, cpu, thread) = count; 1145 return 0; 1146 } 1147 1148 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) 1149 { 1150 struct perf_evsel *leader = evsel->leader; 1151 int fd; 1152 1153 if (perf_evsel__is_group_leader(evsel)) 1154 return -1; 1155 1156 /* 1157 * Leader must be already processed/open, 1158 * if not it's a bug. 1159 */ 1160 BUG_ON(!leader->fd); 1161 1162 fd = FD(leader, cpu, thread); 1163 BUG_ON(fd == -1); 1164 1165 return fd; 1166 } 1167 1168 struct bit_names { 1169 int bit; 1170 const char *name; 1171 }; 1172 1173 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) 1174 { 1175 bool first_bit = true; 1176 int i = 0; 1177 1178 do { 1179 if (value & bits[i].bit) { 1180 buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); 1181 first_bit = false; 1182 } 1183 } while (bits[++i].name != NULL); 1184 } 1185 1186 static void __p_sample_type(char *buf, size_t size, u64 value) 1187 { 1188 #define bit_name(n) { PERF_SAMPLE_##n, #n } 1189 struct bit_names bits[] = { 1190 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1191 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1192 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1193 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1194 bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), 1195 { .name = NULL, } 1196 }; 1197 #undef bit_name 1198 __p_bits(buf, size, value, bits); 1199 } 1200 1201 static void __p_read_format(char *buf, size_t size, u64 value) 1202 { 1203 #define bit_name(n) { PERF_FORMAT_##n, #n } 1204 struct bit_names bits[] = { 1205 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1206 bit_name(ID), bit_name(GROUP), 1207 { .name = NULL, } 1208 }; 1209 #undef bit_name 1210 __p_bits(buf, size, value, bits); 1211 } 1212 1213 #define BUF_SIZE 1024 1214 1215 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 1216 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 1217 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 1218 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 1219 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 1220 1221 #define PRINT_ATTRn(_n, _f, _p) \ 1222 do { \ 1223 if (attr->_f) { \ 1224 _p(attr->_f); \ 1225 ret += attr__fprintf(fp, _n, buf, priv);\ 1226 } \ 1227 } while (0) 1228 1229 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 1230 1231 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 1232 attr__fprintf_f attr__fprintf, void *priv) 1233 { 1234 char buf[BUF_SIZE]; 1235 int ret = 0; 1236 1237 PRINT_ATTRf(type, p_unsigned); 1238 PRINT_ATTRf(size, p_unsigned); 1239 PRINT_ATTRf(config, p_hex); 1240 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); 1241 PRINT_ATTRf(sample_type, p_sample_type); 1242 PRINT_ATTRf(read_format, p_read_format); 1243 1244 PRINT_ATTRf(disabled, p_unsigned); 1245 PRINT_ATTRf(inherit, p_unsigned); 1246 PRINT_ATTRf(pinned, p_unsigned); 1247 PRINT_ATTRf(exclusive, p_unsigned); 1248 PRINT_ATTRf(exclude_user, p_unsigned); 1249 PRINT_ATTRf(exclude_kernel, p_unsigned); 1250 PRINT_ATTRf(exclude_hv, p_unsigned); 1251 PRINT_ATTRf(exclude_idle, p_unsigned); 1252 PRINT_ATTRf(mmap, p_unsigned); 1253 PRINT_ATTRf(comm, p_unsigned); 1254 PRINT_ATTRf(freq, p_unsigned); 1255 PRINT_ATTRf(inherit_stat, p_unsigned); 1256 PRINT_ATTRf(enable_on_exec, p_unsigned); 1257 PRINT_ATTRf(task, p_unsigned); 1258 PRINT_ATTRf(watermark, p_unsigned); 1259 PRINT_ATTRf(precise_ip, p_unsigned); 1260 PRINT_ATTRf(mmap_data, p_unsigned); 1261 PRINT_ATTRf(sample_id_all, p_unsigned); 1262 PRINT_ATTRf(exclude_host, p_unsigned); 1263 PRINT_ATTRf(exclude_guest, p_unsigned); 1264 PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); 1265 PRINT_ATTRf(exclude_callchain_user, p_unsigned); 1266 PRINT_ATTRf(mmap2, p_unsigned); 1267 PRINT_ATTRf(comm_exec, p_unsigned); 1268 PRINT_ATTRf(use_clockid, p_unsigned); 1269 PRINT_ATTRf(context_switch, p_unsigned); 1270 1271 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); 1272 PRINT_ATTRf(bp_type, p_unsigned); 1273 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 1274 PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 1275 PRINT_ATTRf(branch_sample_type, p_unsigned); 1276 PRINT_ATTRf(sample_regs_user, p_hex); 1277 PRINT_ATTRf(sample_stack_user, p_unsigned); 1278 PRINT_ATTRf(clockid, p_signed); 1279 PRINT_ATTRf(sample_regs_intr, p_hex); 1280 PRINT_ATTRf(aux_watermark, p_unsigned); 1281 1282 return ret; 1283 } 1284 1285 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1286 void *priv __attribute__((unused))) 1287 { 1288 return fprintf(fp, " %-32s %s\n", name, val); 1289 } 1290 1291 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1292 struct thread_map *threads) 1293 { 1294 int cpu, thread, nthreads; 1295 unsigned long flags = PERF_FLAG_FD_CLOEXEC; 1296 int pid = -1, err; 1297 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; 1298 1299 if (evsel->system_wide) 1300 nthreads = 1; 1301 else 1302 nthreads = threads->nr; 1303 1304 if (evsel->fd == NULL && 1305 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) 1306 return -ENOMEM; 1307 1308 if (evsel->cgrp) { 1309 flags |= PERF_FLAG_PID_CGROUP; 1310 pid = evsel->cgrp->fd; 1311 } 1312 1313 fallback_missing_features: 1314 if (perf_missing_features.clockid_wrong) 1315 evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */ 1316 if (perf_missing_features.clockid) { 1317 evsel->attr.use_clockid = 0; 1318 evsel->attr.clockid = 0; 1319 } 1320 if (perf_missing_features.cloexec) 1321 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; 1322 if (perf_missing_features.mmap2) 1323 evsel->attr.mmap2 = 0; 1324 if (perf_missing_features.exclude_guest) 1325 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 1326 retry_sample_id: 1327 if (perf_missing_features.sample_id_all) 1328 evsel->attr.sample_id_all = 0; 1329 1330 if (verbose >= 2) { 1331 fprintf(stderr, "%.60s\n", graph_dotted_line); 1332 fprintf(stderr, "perf_event_attr:\n"); 1333 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); 1334 fprintf(stderr, "%.60s\n", graph_dotted_line); 1335 } 1336 1337 for (cpu = 0; cpu < cpus->nr; cpu++) { 1338 1339 for (thread = 0; thread < nthreads; thread++) { 1340 int group_fd; 1341 1342 if (!evsel->cgrp && !evsel->system_wide) 1343 pid = thread_map__pid(threads, thread); 1344 1345 group_fd = get_group_fd(evsel, cpu, thread); 1346 retry_open: 1347 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", 1348 pid, cpus->map[cpu], group_fd, flags); 1349 1350 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, 1351 pid, 1352 cpus->map[cpu], 1353 group_fd, flags); 1354 if (FD(evsel, cpu, thread) < 0) { 1355 err = -errno; 1356 pr_debug2("sys_perf_event_open failed, error %d\n", 1357 err); 1358 goto try_fallback; 1359 } 1360 1361 if (evsel->bpf_fd >= 0) { 1362 int evt_fd = FD(evsel, cpu, thread); 1363 int bpf_fd = evsel->bpf_fd; 1364 1365 err = ioctl(evt_fd, 1366 PERF_EVENT_IOC_SET_BPF, 1367 bpf_fd); 1368 if (err && errno != EEXIST) { 1369 pr_err("failed to attach bpf fd %d: %s\n", 1370 bpf_fd, strerror(errno)); 1371 err = -EINVAL; 1372 goto out_close; 1373 } 1374 } 1375 1376 set_rlimit = NO_CHANGE; 1377 1378 /* 1379 * If we succeeded but had to kill clockid, fail and 1380 * have perf_evsel__open_strerror() print us a nice 1381 * error. 1382 */ 1383 if (perf_missing_features.clockid || 1384 perf_missing_features.clockid_wrong) { 1385 err = -EINVAL; 1386 goto out_close; 1387 } 1388 } 1389 } 1390 1391 return 0; 1392 1393 try_fallback: 1394 /* 1395 * perf stat needs between 5 and 22 fds per CPU. When we run out 1396 * of them try to increase the limits. 1397 */ 1398 if (err == -EMFILE && set_rlimit < INCREASED_MAX) { 1399 struct rlimit l; 1400 int old_errno = errno; 1401 1402 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 1403 if (set_rlimit == NO_CHANGE) 1404 l.rlim_cur = l.rlim_max; 1405 else { 1406 l.rlim_cur = l.rlim_max + 1000; 1407 l.rlim_max = l.rlim_cur; 1408 } 1409 if (setrlimit(RLIMIT_NOFILE, &l) == 0) { 1410 set_rlimit++; 1411 errno = old_errno; 1412 goto retry_open; 1413 } 1414 } 1415 errno = old_errno; 1416 } 1417 1418 if (err != -EINVAL || cpu > 0 || thread > 0) 1419 goto out_close; 1420 1421 /* 1422 * Must probe features in the order they were added to the 1423 * perf_event_attr interface. 1424 */ 1425 if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) { 1426 perf_missing_features.clockid_wrong = true; 1427 goto fallback_missing_features; 1428 } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) { 1429 perf_missing_features.clockid = true; 1430 goto fallback_missing_features; 1431 } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { 1432 perf_missing_features.cloexec = true; 1433 goto fallback_missing_features; 1434 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { 1435 perf_missing_features.mmap2 = true; 1436 goto fallback_missing_features; 1437 } else if (!perf_missing_features.exclude_guest && 1438 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 1439 perf_missing_features.exclude_guest = true; 1440 goto fallback_missing_features; 1441 } else if (!perf_missing_features.sample_id_all) { 1442 perf_missing_features.sample_id_all = true; 1443 goto retry_sample_id; 1444 } 1445 1446 out_close: 1447 do { 1448 while (--thread >= 0) { 1449 close(FD(evsel, cpu, thread)); 1450 FD(evsel, cpu, thread) = -1; 1451 } 1452 thread = nthreads; 1453 } while (--cpu >= 0); 1454 return err; 1455 } 1456 1457 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) 1458 { 1459 if (evsel->fd == NULL) 1460 return; 1461 1462 perf_evsel__close_fd(evsel, ncpus, nthreads); 1463 perf_evsel__free_fd(evsel); 1464 } 1465 1466 static struct { 1467 struct cpu_map map; 1468 int cpus[1]; 1469 } empty_cpu_map = { 1470 .map.nr = 1, 1471 .cpus = { -1, }, 1472 }; 1473 1474 static struct { 1475 struct thread_map map; 1476 int threads[1]; 1477 } empty_thread_map = { 1478 .map.nr = 1, 1479 .threads = { -1, }, 1480 }; 1481 1482 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1483 struct thread_map *threads) 1484 { 1485 if (cpus == NULL) { 1486 /* Work around old compiler warnings about strict aliasing */ 1487 cpus = &empty_cpu_map.map; 1488 } 1489 1490 if (threads == NULL) 1491 threads = &empty_thread_map.map; 1492 1493 return __perf_evsel__open(evsel, cpus, threads); 1494 } 1495 1496 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 1497 struct cpu_map *cpus) 1498 { 1499 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); 1500 } 1501 1502 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 1503 struct thread_map *threads) 1504 { 1505 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); 1506 } 1507 1508 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, 1509 const union perf_event *event, 1510 struct perf_sample *sample) 1511 { 1512 u64 type = evsel->attr.sample_type; 1513 const u64 *array = event->sample.array; 1514 bool swapped = evsel->needs_swap; 1515 union u64_swap u; 1516 1517 array += ((event->header.size - 1518 sizeof(event->header)) / sizeof(u64)) - 1; 1519 1520 if (type & PERF_SAMPLE_IDENTIFIER) { 1521 sample->id = *array; 1522 array--; 1523 } 1524 1525 if (type & PERF_SAMPLE_CPU) { 1526 u.val64 = *array; 1527 if (swapped) { 1528 /* undo swap of u64, then swap on individual u32s */ 1529 u.val64 = bswap_64(u.val64); 1530 u.val32[0] = bswap_32(u.val32[0]); 1531 } 1532 1533 sample->cpu = u.val32[0]; 1534 array--; 1535 } 1536 1537 if (type & PERF_SAMPLE_STREAM_ID) { 1538 sample->stream_id = *array; 1539 array--; 1540 } 1541 1542 if (type & PERF_SAMPLE_ID) { 1543 sample->id = *array; 1544 array--; 1545 } 1546 1547 if (type & PERF_SAMPLE_TIME) { 1548 sample->time = *array; 1549 array--; 1550 } 1551 1552 if (type & PERF_SAMPLE_TID) { 1553 u.val64 = *array; 1554 if (swapped) { 1555 /* undo swap of u64, then swap on individual u32s */ 1556 u.val64 = bswap_64(u.val64); 1557 u.val32[0] = bswap_32(u.val32[0]); 1558 u.val32[1] = bswap_32(u.val32[1]); 1559 } 1560 1561 sample->pid = u.val32[0]; 1562 sample->tid = u.val32[1]; 1563 array--; 1564 } 1565 1566 return 0; 1567 } 1568 1569 static inline bool overflow(const void *endp, u16 max_size, const void *offset, 1570 u64 size) 1571 { 1572 return size > max_size || offset + size > endp; 1573 } 1574 1575 #define OVERFLOW_CHECK(offset, size, max_size) \ 1576 do { \ 1577 if (overflow(endp, (max_size), (offset), (size))) \ 1578 return -EFAULT; \ 1579 } while (0) 1580 1581 #define OVERFLOW_CHECK_u64(offset) \ 1582 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) 1583 1584 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 1585 struct perf_sample *data) 1586 { 1587 u64 type = evsel->attr.sample_type; 1588 bool swapped = evsel->needs_swap; 1589 const u64 *array; 1590 u16 max_size = event->header.size; 1591 const void *endp = (void *)event + max_size; 1592 u64 sz; 1593 1594 /* 1595 * used for cross-endian analysis. See git commit 65014ab3 1596 * for why this goofiness is needed. 1597 */ 1598 union u64_swap u; 1599 1600 memset(data, 0, sizeof(*data)); 1601 data->cpu = data->pid = data->tid = -1; 1602 data->stream_id = data->id = data->time = -1ULL; 1603 data->period = evsel->attr.sample_period; 1604 data->weight = 0; 1605 1606 if (event->header.type != PERF_RECORD_SAMPLE) { 1607 if (!evsel->attr.sample_id_all) 1608 return 0; 1609 return perf_evsel__parse_id_sample(evsel, event, data); 1610 } 1611 1612 array = event->sample.array; 1613 1614 /* 1615 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes 1616 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to 1617 * check the format does not go past the end of the event. 1618 */ 1619 if (evsel->sample_size + sizeof(event->header) > event->header.size) 1620 return -EFAULT; 1621 1622 data->id = -1ULL; 1623 if (type & PERF_SAMPLE_IDENTIFIER) { 1624 data->id = *array; 1625 array++; 1626 } 1627 1628 if (type & PERF_SAMPLE_IP) { 1629 data->ip = *array; 1630 array++; 1631 } 1632 1633 if (type & PERF_SAMPLE_TID) { 1634 u.val64 = *array; 1635 if (swapped) { 1636 /* undo swap of u64, then swap on individual u32s */ 1637 u.val64 = bswap_64(u.val64); 1638 u.val32[0] = bswap_32(u.val32[0]); 1639 u.val32[1] = bswap_32(u.val32[1]); 1640 } 1641 1642 data->pid = u.val32[0]; 1643 data->tid = u.val32[1]; 1644 array++; 1645 } 1646 1647 if (type & PERF_SAMPLE_TIME) { 1648 data->time = *array; 1649 array++; 1650 } 1651 1652 data->addr = 0; 1653 if (type & PERF_SAMPLE_ADDR) { 1654 data->addr = *array; 1655 array++; 1656 } 1657 1658 if (type & PERF_SAMPLE_ID) { 1659 data->id = *array; 1660 array++; 1661 } 1662 1663 if (type & PERF_SAMPLE_STREAM_ID) { 1664 data->stream_id = *array; 1665 array++; 1666 } 1667 1668 if (type & PERF_SAMPLE_CPU) { 1669 1670 u.val64 = *array; 1671 if (swapped) { 1672 /* undo swap of u64, then swap on individual u32s */ 1673 u.val64 = bswap_64(u.val64); 1674 u.val32[0] = bswap_32(u.val32[0]); 1675 } 1676 1677 data->cpu = u.val32[0]; 1678 array++; 1679 } 1680 1681 if (type & PERF_SAMPLE_PERIOD) { 1682 data->period = *array; 1683 array++; 1684 } 1685 1686 if (type & PERF_SAMPLE_READ) { 1687 u64 read_format = evsel->attr.read_format; 1688 1689 OVERFLOW_CHECK_u64(array); 1690 if (read_format & PERF_FORMAT_GROUP) 1691 data->read.group.nr = *array; 1692 else 1693 data->read.one.value = *array; 1694 1695 array++; 1696 1697 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1698 OVERFLOW_CHECK_u64(array); 1699 data->read.time_enabled = *array; 1700 array++; 1701 } 1702 1703 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1704 OVERFLOW_CHECK_u64(array); 1705 data->read.time_running = *array; 1706 array++; 1707 } 1708 1709 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1710 if (read_format & PERF_FORMAT_GROUP) { 1711 const u64 max_group_nr = UINT64_MAX / 1712 sizeof(struct sample_read_value); 1713 1714 if (data->read.group.nr > max_group_nr) 1715 return -EFAULT; 1716 sz = data->read.group.nr * 1717 sizeof(struct sample_read_value); 1718 OVERFLOW_CHECK(array, sz, max_size); 1719 data->read.group.values = 1720 (struct sample_read_value *)array; 1721 array = (void *)array + sz; 1722 } else { 1723 OVERFLOW_CHECK_u64(array); 1724 data->read.one.id = *array; 1725 array++; 1726 } 1727 } 1728 1729 if (type & PERF_SAMPLE_CALLCHAIN) { 1730 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); 1731 1732 OVERFLOW_CHECK_u64(array); 1733 data->callchain = (struct ip_callchain *)array++; 1734 if (data->callchain->nr > max_callchain_nr) 1735 return -EFAULT; 1736 sz = data->callchain->nr * sizeof(u64); 1737 OVERFLOW_CHECK(array, sz, max_size); 1738 array = (void *)array + sz; 1739 } 1740 1741 if (type & PERF_SAMPLE_RAW) { 1742 OVERFLOW_CHECK_u64(array); 1743 u.val64 = *array; 1744 if (WARN_ONCE(swapped, 1745 "Endianness of raw data not corrected!\n")) { 1746 /* undo swap of u64, then swap on individual u32s */ 1747 u.val64 = bswap_64(u.val64); 1748 u.val32[0] = bswap_32(u.val32[0]); 1749 u.val32[1] = bswap_32(u.val32[1]); 1750 } 1751 data->raw_size = u.val32[0]; 1752 array = (void *)array + sizeof(u32); 1753 1754 OVERFLOW_CHECK(array, data->raw_size, max_size); 1755 data->raw_data = (void *)array; 1756 array = (void *)array + data->raw_size; 1757 } 1758 1759 if (type & PERF_SAMPLE_BRANCH_STACK) { 1760 const u64 max_branch_nr = UINT64_MAX / 1761 sizeof(struct branch_entry); 1762 1763 OVERFLOW_CHECK_u64(array); 1764 data->branch_stack = (struct branch_stack *)array++; 1765 1766 if (data->branch_stack->nr > max_branch_nr) 1767 return -EFAULT; 1768 sz = data->branch_stack->nr * sizeof(struct branch_entry); 1769 OVERFLOW_CHECK(array, sz, max_size); 1770 array = (void *)array + sz; 1771 } 1772 1773 if (type & PERF_SAMPLE_REGS_USER) { 1774 OVERFLOW_CHECK_u64(array); 1775 data->user_regs.abi = *array; 1776 array++; 1777 1778 if (data->user_regs.abi) { 1779 u64 mask = evsel->attr.sample_regs_user; 1780 1781 sz = hweight_long(mask) * sizeof(u64); 1782 OVERFLOW_CHECK(array, sz, max_size); 1783 data->user_regs.mask = mask; 1784 data->user_regs.regs = (u64 *)array; 1785 array = (void *)array + sz; 1786 } 1787 } 1788 1789 if (type & PERF_SAMPLE_STACK_USER) { 1790 OVERFLOW_CHECK_u64(array); 1791 sz = *array++; 1792 1793 data->user_stack.offset = ((char *)(array - 1) 1794 - (char *) event); 1795 1796 if (!sz) { 1797 data->user_stack.size = 0; 1798 } else { 1799 OVERFLOW_CHECK(array, sz, max_size); 1800 data->user_stack.data = (char *)array; 1801 array = (void *)array + sz; 1802 OVERFLOW_CHECK_u64(array); 1803 data->user_stack.size = *array++; 1804 if (WARN_ONCE(data->user_stack.size > sz, 1805 "user stack dump failure\n")) 1806 return -EFAULT; 1807 } 1808 } 1809 1810 data->weight = 0; 1811 if (type & PERF_SAMPLE_WEIGHT) { 1812 OVERFLOW_CHECK_u64(array); 1813 data->weight = *array; 1814 array++; 1815 } 1816 1817 data->data_src = PERF_MEM_DATA_SRC_NONE; 1818 if (type & PERF_SAMPLE_DATA_SRC) { 1819 OVERFLOW_CHECK_u64(array); 1820 data->data_src = *array; 1821 array++; 1822 } 1823 1824 data->transaction = 0; 1825 if (type & PERF_SAMPLE_TRANSACTION) { 1826 OVERFLOW_CHECK_u64(array); 1827 data->transaction = *array; 1828 array++; 1829 } 1830 1831 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; 1832 if (type & PERF_SAMPLE_REGS_INTR) { 1833 OVERFLOW_CHECK_u64(array); 1834 data->intr_regs.abi = *array; 1835 array++; 1836 1837 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { 1838 u64 mask = evsel->attr.sample_regs_intr; 1839 1840 sz = hweight_long(mask) * sizeof(u64); 1841 OVERFLOW_CHECK(array, sz, max_size); 1842 data->intr_regs.mask = mask; 1843 data->intr_regs.regs = (u64 *)array; 1844 array = (void *)array + sz; 1845 } 1846 } 1847 1848 return 0; 1849 } 1850 1851 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 1852 u64 read_format) 1853 { 1854 size_t sz, result = sizeof(struct sample_event); 1855 1856 if (type & PERF_SAMPLE_IDENTIFIER) 1857 result += sizeof(u64); 1858 1859 if (type & PERF_SAMPLE_IP) 1860 result += sizeof(u64); 1861 1862 if (type & PERF_SAMPLE_TID) 1863 result += sizeof(u64); 1864 1865 if (type & PERF_SAMPLE_TIME) 1866 result += sizeof(u64); 1867 1868 if (type & PERF_SAMPLE_ADDR) 1869 result += sizeof(u64); 1870 1871 if (type & PERF_SAMPLE_ID) 1872 result += sizeof(u64); 1873 1874 if (type & PERF_SAMPLE_STREAM_ID) 1875 result += sizeof(u64); 1876 1877 if (type & PERF_SAMPLE_CPU) 1878 result += sizeof(u64); 1879 1880 if (type & PERF_SAMPLE_PERIOD) 1881 result += sizeof(u64); 1882 1883 if (type & PERF_SAMPLE_READ) { 1884 result += sizeof(u64); 1885 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1886 result += sizeof(u64); 1887 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1888 result += sizeof(u64); 1889 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1890 if (read_format & PERF_FORMAT_GROUP) { 1891 sz = sample->read.group.nr * 1892 sizeof(struct sample_read_value); 1893 result += sz; 1894 } else { 1895 result += sizeof(u64); 1896 } 1897 } 1898 1899 if (type & PERF_SAMPLE_CALLCHAIN) { 1900 sz = (sample->callchain->nr + 1) * sizeof(u64); 1901 result += sz; 1902 } 1903 1904 if (type & PERF_SAMPLE_RAW) { 1905 result += sizeof(u32); 1906 result += sample->raw_size; 1907 } 1908 1909 if (type & PERF_SAMPLE_BRANCH_STACK) { 1910 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1911 sz += sizeof(u64); 1912 result += sz; 1913 } 1914 1915 if (type & PERF_SAMPLE_REGS_USER) { 1916 if (sample->user_regs.abi) { 1917 result += sizeof(u64); 1918 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 1919 result += sz; 1920 } else { 1921 result += sizeof(u64); 1922 } 1923 } 1924 1925 if (type & PERF_SAMPLE_STACK_USER) { 1926 sz = sample->user_stack.size; 1927 result += sizeof(u64); 1928 if (sz) { 1929 result += sz; 1930 result += sizeof(u64); 1931 } 1932 } 1933 1934 if (type & PERF_SAMPLE_WEIGHT) 1935 result += sizeof(u64); 1936 1937 if (type & PERF_SAMPLE_DATA_SRC) 1938 result += sizeof(u64); 1939 1940 if (type & PERF_SAMPLE_TRANSACTION) 1941 result += sizeof(u64); 1942 1943 if (type & PERF_SAMPLE_REGS_INTR) { 1944 if (sample->intr_regs.abi) { 1945 result += sizeof(u64); 1946 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 1947 result += sz; 1948 } else { 1949 result += sizeof(u64); 1950 } 1951 } 1952 1953 return result; 1954 } 1955 1956 int perf_event__synthesize_sample(union perf_event *event, u64 type, 1957 u64 read_format, 1958 const struct perf_sample *sample, 1959 bool swapped) 1960 { 1961 u64 *array; 1962 size_t sz; 1963 /* 1964 * used for cross-endian analysis. See git commit 65014ab3 1965 * for why this goofiness is needed. 1966 */ 1967 union u64_swap u; 1968 1969 array = event->sample.array; 1970 1971 if (type & PERF_SAMPLE_IDENTIFIER) { 1972 *array = sample->id; 1973 array++; 1974 } 1975 1976 if (type & PERF_SAMPLE_IP) { 1977 *array = sample->ip; 1978 array++; 1979 } 1980 1981 if (type & PERF_SAMPLE_TID) { 1982 u.val32[0] = sample->pid; 1983 u.val32[1] = sample->tid; 1984 if (swapped) { 1985 /* 1986 * Inverse of what is done in perf_evsel__parse_sample 1987 */ 1988 u.val32[0] = bswap_32(u.val32[0]); 1989 u.val32[1] = bswap_32(u.val32[1]); 1990 u.val64 = bswap_64(u.val64); 1991 } 1992 1993 *array = u.val64; 1994 array++; 1995 } 1996 1997 if (type & PERF_SAMPLE_TIME) { 1998 *array = sample->time; 1999 array++; 2000 } 2001 2002 if (type & PERF_SAMPLE_ADDR) { 2003 *array = sample->addr; 2004 array++; 2005 } 2006 2007 if (type & PERF_SAMPLE_ID) { 2008 *array = sample->id; 2009 array++; 2010 } 2011 2012 if (type & PERF_SAMPLE_STREAM_ID) { 2013 *array = sample->stream_id; 2014 array++; 2015 } 2016 2017 if (type & PERF_SAMPLE_CPU) { 2018 u.val32[0] = sample->cpu; 2019 if (swapped) { 2020 /* 2021 * Inverse of what is done in perf_evsel__parse_sample 2022 */ 2023 u.val32[0] = bswap_32(u.val32[0]); 2024 u.val64 = bswap_64(u.val64); 2025 } 2026 *array = u.val64; 2027 array++; 2028 } 2029 2030 if (type & PERF_SAMPLE_PERIOD) { 2031 *array = sample->period; 2032 array++; 2033 } 2034 2035 if (type & PERF_SAMPLE_READ) { 2036 if (read_format & PERF_FORMAT_GROUP) 2037 *array = sample->read.group.nr; 2038 else 2039 *array = sample->read.one.value; 2040 array++; 2041 2042 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2043 *array = sample->read.time_enabled; 2044 array++; 2045 } 2046 2047 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2048 *array = sample->read.time_running; 2049 array++; 2050 } 2051 2052 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2053 if (read_format & PERF_FORMAT_GROUP) { 2054 sz = sample->read.group.nr * 2055 sizeof(struct sample_read_value); 2056 memcpy(array, sample->read.group.values, sz); 2057 array = (void *)array + sz; 2058 } else { 2059 *array = sample->read.one.id; 2060 array++; 2061 } 2062 } 2063 2064 if (type & PERF_SAMPLE_CALLCHAIN) { 2065 sz = (sample->callchain->nr + 1) * sizeof(u64); 2066 memcpy(array, sample->callchain, sz); 2067 array = (void *)array + sz; 2068 } 2069 2070 if (type & PERF_SAMPLE_RAW) { 2071 u.val32[0] = sample->raw_size; 2072 if (WARN_ONCE(swapped, 2073 "Endianness of raw data not corrected!\n")) { 2074 /* 2075 * Inverse of what is done in perf_evsel__parse_sample 2076 */ 2077 u.val32[0] = bswap_32(u.val32[0]); 2078 u.val32[1] = bswap_32(u.val32[1]); 2079 u.val64 = bswap_64(u.val64); 2080 } 2081 *array = u.val64; 2082 array = (void *)array + sizeof(u32); 2083 2084 memcpy(array, sample->raw_data, sample->raw_size); 2085 array = (void *)array + sample->raw_size; 2086 } 2087 2088 if (type & PERF_SAMPLE_BRANCH_STACK) { 2089 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2090 sz += sizeof(u64); 2091 memcpy(array, sample->branch_stack, sz); 2092 array = (void *)array + sz; 2093 } 2094 2095 if (type & PERF_SAMPLE_REGS_USER) { 2096 if (sample->user_regs.abi) { 2097 *array++ = sample->user_regs.abi; 2098 sz = hweight_long(sample->user_regs.mask) * sizeof(u64); 2099 memcpy(array, sample->user_regs.regs, sz); 2100 array = (void *)array + sz; 2101 } else { 2102 *array++ = 0; 2103 } 2104 } 2105 2106 if (type & PERF_SAMPLE_STACK_USER) { 2107 sz = sample->user_stack.size; 2108 *array++ = sz; 2109 if (sz) { 2110 memcpy(array, sample->user_stack.data, sz); 2111 array = (void *)array + sz; 2112 *array++ = sz; 2113 } 2114 } 2115 2116 if (type & PERF_SAMPLE_WEIGHT) { 2117 *array = sample->weight; 2118 array++; 2119 } 2120 2121 if (type & PERF_SAMPLE_DATA_SRC) { 2122 *array = sample->data_src; 2123 array++; 2124 } 2125 2126 if (type & PERF_SAMPLE_TRANSACTION) { 2127 *array = sample->transaction; 2128 array++; 2129 } 2130 2131 if (type & PERF_SAMPLE_REGS_INTR) { 2132 if (sample->intr_regs.abi) { 2133 *array++ = sample->intr_regs.abi; 2134 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); 2135 memcpy(array, sample->intr_regs.regs, sz); 2136 array = (void *)array + sz; 2137 } else { 2138 *array++ = 0; 2139 } 2140 } 2141 2142 return 0; 2143 } 2144 2145 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) 2146 { 2147 return pevent_find_field(evsel->tp_format, name); 2148 } 2149 2150 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, 2151 const char *name) 2152 { 2153 struct format_field *field = perf_evsel__field(evsel, name); 2154 int offset; 2155 2156 if (!field) 2157 return NULL; 2158 2159 offset = field->offset; 2160 2161 if (field->flags & FIELD_IS_DYNAMIC) { 2162 offset = *(int *)(sample->raw_data + field->offset); 2163 offset &= 0xffff; 2164 } 2165 2166 return sample->raw_data + offset; 2167 } 2168 2169 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 2170 const char *name) 2171 { 2172 struct format_field *field = perf_evsel__field(evsel, name); 2173 void *ptr; 2174 u64 value; 2175 2176 if (!field) 2177 return 0; 2178 2179 ptr = sample->raw_data + field->offset; 2180 2181 switch (field->size) { 2182 case 1: 2183 return *(u8 *)ptr; 2184 case 2: 2185 value = *(u16 *)ptr; 2186 break; 2187 case 4: 2188 value = *(u32 *)ptr; 2189 break; 2190 case 8: 2191 memcpy(&value, ptr, sizeof(u64)); 2192 break; 2193 default: 2194 return 0; 2195 } 2196 2197 if (!evsel->needs_swap) 2198 return value; 2199 2200 switch (field->size) { 2201 case 2: 2202 return bswap_16(value); 2203 case 4: 2204 return bswap_32(value); 2205 case 8: 2206 return bswap_64(value); 2207 default: 2208 return 0; 2209 } 2210 2211 return 0; 2212 } 2213 2214 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) 2215 { 2216 va_list args; 2217 int ret = 0; 2218 2219 if (!*first) { 2220 ret += fprintf(fp, ","); 2221 } else { 2222 ret += fprintf(fp, ":"); 2223 *first = false; 2224 } 2225 2226 va_start(args, fmt); 2227 ret += vfprintf(fp, fmt, args); 2228 va_end(args); 2229 return ret; 2230 } 2231 2232 static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv) 2233 { 2234 return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val); 2235 } 2236 2237 int perf_evsel__fprintf(struct perf_evsel *evsel, 2238 struct perf_attr_details *details, FILE *fp) 2239 { 2240 bool first = true; 2241 int printed = 0; 2242 2243 if (details->event_group) { 2244 struct perf_evsel *pos; 2245 2246 if (!perf_evsel__is_group_leader(evsel)) 2247 return 0; 2248 2249 if (evsel->nr_members > 1) 2250 printed += fprintf(fp, "%s{", evsel->group_name ?: ""); 2251 2252 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); 2253 for_each_group_member(pos, evsel) 2254 printed += fprintf(fp, ",%s", perf_evsel__name(pos)); 2255 2256 if (evsel->nr_members > 1) 2257 printed += fprintf(fp, "}"); 2258 goto out; 2259 } 2260 2261 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); 2262 2263 if (details->verbose) { 2264 printed += perf_event_attr__fprintf(fp, &evsel->attr, 2265 __print_attr__fprintf, &first); 2266 } else if (details->freq) { 2267 const char *term = "sample_freq"; 2268 2269 if (!evsel->attr.freq) 2270 term = "sample_period"; 2271 2272 printed += comma_fprintf(fp, &first, " %s=%" PRIu64, 2273 term, (u64)evsel->attr.sample_freq); 2274 } 2275 out: 2276 fputc('\n', fp); 2277 return ++printed; 2278 } 2279 2280 bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 2281 char *msg, size_t msgsize) 2282 { 2283 if ((err == ENOENT || err == ENXIO || err == ENODEV) && 2284 evsel->attr.type == PERF_TYPE_HARDWARE && 2285 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { 2286 /* 2287 * If it's cycles then fall back to hrtimer based 2288 * cpu-clock-tick sw counter, which is always available even if 2289 * no PMU support. 2290 * 2291 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 2292 * b0a873e). 2293 */ 2294 scnprintf(msg, msgsize, "%s", 2295 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 2296 2297 evsel->attr.type = PERF_TYPE_SOFTWARE; 2298 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; 2299 2300 zfree(&evsel->name); 2301 return true; 2302 } 2303 2304 return false; 2305 } 2306 2307 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, 2308 int err, char *msg, size_t size) 2309 { 2310 char sbuf[STRERR_BUFSIZE]; 2311 2312 switch (err) { 2313 case EPERM: 2314 case EACCES: 2315 return scnprintf(msg, size, 2316 "You may not have permission to collect %sstats.\n" 2317 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 2318 " -1 - Not paranoid at all\n" 2319 " 0 - Disallow raw tracepoint access for unpriv\n" 2320 " 1 - Disallow cpu events for unpriv\n" 2321 " 2 - Disallow kernel profiling for unpriv", 2322 target->system_wide ? "system-wide " : ""); 2323 case ENOENT: 2324 return scnprintf(msg, size, "The %s event is not supported.", 2325 perf_evsel__name(evsel)); 2326 case EMFILE: 2327 return scnprintf(msg, size, "%s", 2328 "Too many events are opened.\n" 2329 "Probably the maximum number of open file descriptors has been reached.\n" 2330 "Hint: Try again after reducing the number of events.\n" 2331 "Hint: Try increasing the limit with 'ulimit -n <limit>'"); 2332 case ENODEV: 2333 if (target->cpu_list) 2334 return scnprintf(msg, size, "%s", 2335 "No such device - did you specify an out-of-range profile CPU?\n"); 2336 break; 2337 case EOPNOTSUPP: 2338 if (evsel->attr.precise_ip) 2339 return scnprintf(msg, size, "%s", 2340 "\'precise\' request may not be supported. Try removing 'p' modifier."); 2341 #if defined(__i386__) || defined(__x86_64__) 2342 if (evsel->attr.type == PERF_TYPE_HARDWARE) 2343 return scnprintf(msg, size, "%s", 2344 "No hardware sampling interrupt available.\n" 2345 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); 2346 #endif 2347 break; 2348 case EBUSY: 2349 if (find_process("oprofiled")) 2350 return scnprintf(msg, size, 2351 "The PMU counters are busy/taken by another profiler.\n" 2352 "We found oprofile daemon running, please stop it and try again."); 2353 break; 2354 case EINVAL: 2355 if (perf_missing_features.clockid) 2356 return scnprintf(msg, size, "clockid feature not supported."); 2357 if (perf_missing_features.clockid_wrong) 2358 return scnprintf(msg, size, "wrong clockid (%d).", clockid); 2359 break; 2360 default: 2361 break; 2362 } 2363 2364 return scnprintf(msg, size, 2365 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2366 "/bin/dmesg may provide additional information.\n" 2367 "No CONFIG_PERF_EVENTS=y kernel support configured?\n", 2368 err, strerror_r(err, sbuf, sizeof(sbuf)), 2369 perf_evsel__name(evsel)); 2370 } 2371