xref: /linux-6.15/tools/perf/util/annotate.c (revision dbdebdc5)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
3  *
4  * Parts came from builtin-annotate.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include "util.h"
11 #include "ui/ui.h"
12 #include "sort.h"
13 #include "build-id.h"
14 #include "color.h"
15 #include "cache.h"
16 #include "symbol.h"
17 #include "debug.h"
18 #include "annotate.h"
19 #include "evsel.h"
20 #include "block-range.h"
21 #include "arch/common.h"
22 #include <regex.h>
23 #include <pthread.h>
24 #include <linux/bitops.h>
25 #include <sys/utsname.h>
26 
27 const char 	*disassembler_style;
28 const char	*objdump_path;
29 static regex_t	 file_lineno;
30 
31 static struct ins_ops *ins__find(struct arch *arch, const char *name);
32 static void ins__sort(struct arch *arch);
33 static int disasm_line__parse(char *line, const char **namep, char **rawp);
34 
35 struct arch {
36 	const char	*name;
37 	struct ins	*instructions;
38 	size_t		nr_instructions;
39 	size_t		nr_instructions_allocated;
40 	struct ins_ops  *(*associate_instruction_ops)(struct arch *arch, const char *name);
41 	bool		sorted_instructions;
42 	bool		initialized;
43 	void		*priv;
44 	int		(*init)(struct arch *arch);
45 	struct		{
46 		char comment_char;
47 		char skip_functions_char;
48 	} objdump;
49 };
50 
51 static struct ins_ops call_ops;
52 static struct ins_ops dec_ops;
53 static struct ins_ops jump_ops;
54 static struct ins_ops mov_ops;
55 static struct ins_ops nop_ops;
56 static struct ins_ops lock_ops;
57 static struct ins_ops ret_ops;
58 
59 static int arch__grow_instructions(struct arch *arch)
60 {
61 	struct ins *new_instructions;
62 	size_t new_nr_allocated;
63 
64 	if (arch->nr_instructions_allocated == 0 && arch->instructions)
65 		goto grow_from_non_allocated_table;
66 
67 	new_nr_allocated = arch->nr_instructions_allocated + 128;
68 	new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
69 	if (new_instructions == NULL)
70 		return -1;
71 
72 out_update_instructions:
73 	arch->instructions = new_instructions;
74 	arch->nr_instructions_allocated = new_nr_allocated;
75 	return 0;
76 
77 grow_from_non_allocated_table:
78 	new_nr_allocated = arch->nr_instructions + 128;
79 	new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
80 	if (new_instructions == NULL)
81 		return -1;
82 
83 	memcpy(new_instructions, arch->instructions, arch->nr_instructions);
84 	goto out_update_instructions;
85 }
86 
87 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
88 {
89 	struct ins *ins;
90 
91 	if (arch->nr_instructions == arch->nr_instructions_allocated &&
92 	    arch__grow_instructions(arch))
93 		return -1;
94 
95 	ins = &arch->instructions[arch->nr_instructions];
96 	ins->name = strdup(name);
97 	if (!ins->name)
98 		return -1;
99 
100 	ins->ops  = ops;
101 	arch->nr_instructions++;
102 
103 	ins__sort(arch);
104 	return 0;
105 }
106 
107 #include "arch/arm/annotate/instructions.c"
108 #include "arch/x86/annotate/instructions.c"
109 #include "arch/powerpc/annotate/instructions.c"
110 
111 static struct arch architectures[] = {
112 	{
113 		.name = "arm",
114 		.init = arm__annotate_init,
115 	},
116 	{
117 		.name = "x86",
118 		.instructions = x86__instructions,
119 		.nr_instructions = ARRAY_SIZE(x86__instructions),
120 		.objdump =  {
121 			.comment_char = '#',
122 		},
123 	},
124 	{
125 		.name = "powerpc",
126 		.init = powerpc__annotate_init,
127 	},
128 };
129 
130 static void ins__delete(struct ins_operands *ops)
131 {
132 	if (ops == NULL)
133 		return;
134 	zfree(&ops->source.raw);
135 	zfree(&ops->source.name);
136 	zfree(&ops->target.raw);
137 	zfree(&ops->target.name);
138 }
139 
140 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
141 			      struct ins_operands *ops)
142 {
143 	return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
144 }
145 
146 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
147 		  struct ins_operands *ops)
148 {
149 	if (ins->ops->scnprintf)
150 		return ins->ops->scnprintf(ins, bf, size, ops);
151 
152 	return ins__raw_scnprintf(ins, bf, size, ops);
153 }
154 
155 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
156 {
157 	char *endptr, *tok, *name;
158 
159 	ops->target.addr = strtoull(ops->raw, &endptr, 16);
160 
161 	name = strchr(endptr, '<');
162 	if (name == NULL)
163 		goto indirect_call;
164 
165 	name++;
166 
167 	if (arch->objdump.skip_functions_char &&
168 	    strchr(name, arch->objdump.skip_functions_char))
169 		return -1;
170 
171 	tok = strchr(name, '>');
172 	if (tok == NULL)
173 		return -1;
174 
175 	*tok = '\0';
176 	ops->target.name = strdup(name);
177 	*tok = '>';
178 
179 	return ops->target.name == NULL ? -1 : 0;
180 
181 indirect_call:
182 	tok = strchr(endptr, '*');
183 	if (tok == NULL) {
184 		struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
185 		if (sym != NULL)
186 			ops->target.name = strdup(sym->name);
187 		else
188 			ops->target.addr = 0;
189 		return 0;
190 	}
191 
192 	ops->target.addr = strtoull(tok + 1, NULL, 16);
193 	return 0;
194 }
195 
196 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
197 			   struct ins_operands *ops)
198 {
199 	if (ops->target.name)
200 		return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
201 
202 	if (ops->target.addr == 0)
203 		return ins__raw_scnprintf(ins, bf, size, ops);
204 
205 	return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
206 }
207 
208 static struct ins_ops call_ops = {
209 	.parse	   = call__parse,
210 	.scnprintf = call__scnprintf,
211 };
212 
213 bool ins__is_call(const struct ins *ins)
214 {
215 	return ins->ops == &call_ops;
216 }
217 
218 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
219 {
220 	const char *s = strchr(ops->raw, '+');
221 
222 	ops->target.addr = strtoull(ops->raw, NULL, 16);
223 
224 	if (s++ != NULL)
225 		ops->target.offset = strtoull(s, NULL, 16);
226 	else
227 		ops->target.offset = UINT64_MAX;
228 
229 	return 0;
230 }
231 
232 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
233 			   struct ins_operands *ops)
234 {
235 	return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
236 }
237 
238 static struct ins_ops jump_ops = {
239 	.parse	   = jump__parse,
240 	.scnprintf = jump__scnprintf,
241 };
242 
243 bool ins__is_jump(const struct ins *ins)
244 {
245 	return ins->ops == &jump_ops;
246 }
247 
248 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
249 {
250 	char *endptr, *name, *t;
251 
252 	if (strstr(raw, "(%rip)") == NULL)
253 		return 0;
254 
255 	*addrp = strtoull(comment, &endptr, 16);
256 	name = strchr(endptr, '<');
257 	if (name == NULL)
258 		return -1;
259 
260 	name++;
261 
262 	t = strchr(name, '>');
263 	if (t == NULL)
264 		return 0;
265 
266 	*t = '\0';
267 	*namep = strdup(name);
268 	*t = '>';
269 
270 	return 0;
271 }
272 
273 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
274 {
275 	ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
276 	if (ops->locked.ops == NULL)
277 		return 0;
278 
279 	if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
280 		goto out_free_ops;
281 
282 	ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
283 
284 	if (ops->locked.ins.ops == NULL)
285 		goto out_free_ops;
286 
287 	if (ops->locked.ins.ops->parse &&
288 	    ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
289 		goto out_free_ops;
290 
291 	return 0;
292 
293 out_free_ops:
294 	zfree(&ops->locked.ops);
295 	return 0;
296 }
297 
298 static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
299 			   struct ins_operands *ops)
300 {
301 	int printed;
302 
303 	if (ops->locked.ins.ops == NULL)
304 		return ins__raw_scnprintf(ins, bf, size, ops);
305 
306 	printed = scnprintf(bf, size, "%-6.6s ", ins->name);
307 	return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
308 					size - printed, ops->locked.ops);
309 }
310 
311 static void lock__delete(struct ins_operands *ops)
312 {
313 	struct ins *ins = &ops->locked.ins;
314 
315 	if (ins->ops && ins->ops->free)
316 		ins->ops->free(ops->locked.ops);
317 	else
318 		ins__delete(ops->locked.ops);
319 
320 	zfree(&ops->locked.ops);
321 	zfree(&ops->target.raw);
322 	zfree(&ops->target.name);
323 }
324 
325 static struct ins_ops lock_ops = {
326 	.free	   = lock__delete,
327 	.parse	   = lock__parse,
328 	.scnprintf = lock__scnprintf,
329 };
330 
331 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
332 {
333 	char *s = strchr(ops->raw, ','), *target, *comment, prev;
334 
335 	if (s == NULL)
336 		return -1;
337 
338 	*s = '\0';
339 	ops->source.raw = strdup(ops->raw);
340 	*s = ',';
341 
342 	if (ops->source.raw == NULL)
343 		return -1;
344 
345 	target = ++s;
346 	comment = strchr(s, arch->objdump.comment_char);
347 
348 	if (comment != NULL)
349 		s = comment - 1;
350 	else
351 		s = strchr(s, '\0') - 1;
352 
353 	while (s > target && isspace(s[0]))
354 		--s;
355 	s++;
356 	prev = *s;
357 	*s = '\0';
358 
359 	ops->target.raw = strdup(target);
360 	*s = prev;
361 
362 	if (ops->target.raw == NULL)
363 		goto out_free_source;
364 
365 	if (comment == NULL)
366 		return 0;
367 
368 	while (comment[0] != '\0' && isspace(comment[0]))
369 		++comment;
370 
371 	comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
372 	comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
373 
374 	return 0;
375 
376 out_free_source:
377 	zfree(&ops->source.raw);
378 	return -1;
379 }
380 
381 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
382 			   struct ins_operands *ops)
383 {
384 	return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
385 			 ops->source.name ?: ops->source.raw,
386 			 ops->target.name ?: ops->target.raw);
387 }
388 
389 static struct ins_ops mov_ops = {
390 	.parse	   = mov__parse,
391 	.scnprintf = mov__scnprintf,
392 };
393 
394 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
395 {
396 	char *target, *comment, *s, prev;
397 
398 	target = s = ops->raw;
399 
400 	while (s[0] != '\0' && !isspace(s[0]))
401 		++s;
402 	prev = *s;
403 	*s = '\0';
404 
405 	ops->target.raw = strdup(target);
406 	*s = prev;
407 
408 	if (ops->target.raw == NULL)
409 		return -1;
410 
411 	comment = strchr(s, '#');
412 	if (comment == NULL)
413 		return 0;
414 
415 	while (comment[0] != '\0' && isspace(comment[0]))
416 		++comment;
417 
418 	comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
419 
420 	return 0;
421 }
422 
423 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
424 			   struct ins_operands *ops)
425 {
426 	return scnprintf(bf, size, "%-6.6s %s", ins->name,
427 			 ops->target.name ?: ops->target.raw);
428 }
429 
430 static struct ins_ops dec_ops = {
431 	.parse	   = dec__parse,
432 	.scnprintf = dec__scnprintf,
433 };
434 
435 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
436 			  struct ins_operands *ops __maybe_unused)
437 {
438 	return scnprintf(bf, size, "%-6.6s", "nop");
439 }
440 
441 static struct ins_ops nop_ops = {
442 	.scnprintf = nop__scnprintf,
443 };
444 
445 static struct ins_ops ret_ops = {
446 	.scnprintf = ins__raw_scnprintf,
447 };
448 
449 bool ins__is_ret(const struct ins *ins)
450 {
451 	return ins->ops == &ret_ops;
452 }
453 
454 static int ins__key_cmp(const void *name, const void *insp)
455 {
456 	const struct ins *ins = insp;
457 
458 	return strcmp(name, ins->name);
459 }
460 
461 static int ins__cmp(const void *a, const void *b)
462 {
463 	const struct ins *ia = a;
464 	const struct ins *ib = b;
465 
466 	return strcmp(ia->name, ib->name);
467 }
468 
469 static void ins__sort(struct arch *arch)
470 {
471 	const int nmemb = arch->nr_instructions;
472 
473 	qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
474 }
475 
476 static struct ins_ops *__ins__find(struct arch *arch, const char *name)
477 {
478 	struct ins *ins;
479 	const int nmemb = arch->nr_instructions;
480 
481 	if (!arch->sorted_instructions) {
482 		ins__sort(arch);
483 		arch->sorted_instructions = true;
484 	}
485 
486 	ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
487 	return ins ? ins->ops : NULL;
488 }
489 
490 static struct ins_ops *ins__find(struct arch *arch, const char *name)
491 {
492 	struct ins_ops *ops = __ins__find(arch, name);
493 
494 	if (!ops && arch->associate_instruction_ops)
495 		ops = arch->associate_instruction_ops(arch, name);
496 
497 	return ops;
498 }
499 
500 static int arch__key_cmp(const void *name, const void *archp)
501 {
502 	const struct arch *arch = archp;
503 
504 	return strcmp(name, arch->name);
505 }
506 
507 static int arch__cmp(const void *a, const void *b)
508 {
509 	const struct arch *aa = a;
510 	const struct arch *ab = b;
511 
512 	return strcmp(aa->name, ab->name);
513 }
514 
515 static void arch__sort(void)
516 {
517 	const int nmemb = ARRAY_SIZE(architectures);
518 
519 	qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
520 }
521 
522 static struct arch *arch__find(const char *name)
523 {
524 	const int nmemb = ARRAY_SIZE(architectures);
525 	static bool sorted;
526 
527 	if (!sorted) {
528 		arch__sort();
529 		sorted = true;
530 	}
531 
532 	return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
533 }
534 
535 int symbol__alloc_hist(struct symbol *sym)
536 {
537 	struct annotation *notes = symbol__annotation(sym);
538 	const size_t size = symbol__size(sym);
539 	size_t sizeof_sym_hist;
540 
541 	/* Check for overflow when calculating sizeof_sym_hist */
542 	if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
543 		return -1;
544 
545 	sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
546 
547 	/* Check for overflow in zalloc argument */
548 	if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
549 				/ symbol_conf.nr_events)
550 		return -1;
551 
552 	notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
553 	if (notes->src == NULL)
554 		return -1;
555 	notes->src->sizeof_sym_hist = sizeof_sym_hist;
556 	notes->src->nr_histograms   = symbol_conf.nr_events;
557 	INIT_LIST_HEAD(&notes->src->source);
558 	return 0;
559 }
560 
561 /* The cycles histogram is lazily allocated. */
562 static int symbol__alloc_hist_cycles(struct symbol *sym)
563 {
564 	struct annotation *notes = symbol__annotation(sym);
565 	const size_t size = symbol__size(sym);
566 
567 	notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
568 	if (notes->src->cycles_hist == NULL)
569 		return -1;
570 	return 0;
571 }
572 
573 void symbol__annotate_zero_histograms(struct symbol *sym)
574 {
575 	struct annotation *notes = symbol__annotation(sym);
576 
577 	pthread_mutex_lock(&notes->lock);
578 	if (notes->src != NULL) {
579 		memset(notes->src->histograms, 0,
580 		       notes->src->nr_histograms * notes->src->sizeof_sym_hist);
581 		if (notes->src->cycles_hist)
582 			memset(notes->src->cycles_hist, 0,
583 				symbol__size(sym) * sizeof(struct cyc_hist));
584 	}
585 	pthread_mutex_unlock(&notes->lock);
586 }
587 
588 static int __symbol__account_cycles(struct annotation *notes,
589 				    u64 start,
590 				    unsigned offset, unsigned cycles,
591 				    unsigned have_start)
592 {
593 	struct cyc_hist *ch;
594 
595 	ch = notes->src->cycles_hist;
596 	/*
597 	 * For now we can only account one basic block per
598 	 * final jump. But multiple could be overlapping.
599 	 * Always account the longest one. So when
600 	 * a shorter one has been already seen throw it away.
601 	 *
602 	 * We separately always account the full cycles.
603 	 */
604 	ch[offset].num_aggr++;
605 	ch[offset].cycles_aggr += cycles;
606 
607 	if (!have_start && ch[offset].have_start)
608 		return 0;
609 	if (ch[offset].num) {
610 		if (have_start && (!ch[offset].have_start ||
611 				   ch[offset].start > start)) {
612 			ch[offset].have_start = 0;
613 			ch[offset].cycles = 0;
614 			ch[offset].num = 0;
615 			if (ch[offset].reset < 0xffff)
616 				ch[offset].reset++;
617 		} else if (have_start &&
618 			   ch[offset].start < start)
619 			return 0;
620 	}
621 	ch[offset].have_start = have_start;
622 	ch[offset].start = start;
623 	ch[offset].cycles += cycles;
624 	ch[offset].num++;
625 	return 0;
626 }
627 
628 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
629 				      struct annotation *notes, int evidx, u64 addr)
630 {
631 	unsigned offset;
632 	struct sym_hist *h;
633 
634 	pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
635 
636 	if (addr < sym->start || addr >= sym->end) {
637 		pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
638 		       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
639 		return -ERANGE;
640 	}
641 
642 	offset = addr - sym->start;
643 	h = annotation__histogram(notes, evidx);
644 	h->sum++;
645 	h->addr[offset]++;
646 
647 	pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
648 		  ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
649 		  addr, addr - sym->start, evidx, h->addr[offset]);
650 	return 0;
651 }
652 
653 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
654 {
655 	struct annotation *notes = symbol__annotation(sym);
656 
657 	if (notes->src == NULL) {
658 		if (symbol__alloc_hist(sym) < 0)
659 			return NULL;
660 	}
661 	if (!notes->src->cycles_hist && cycles) {
662 		if (symbol__alloc_hist_cycles(sym) < 0)
663 			return NULL;
664 	}
665 	return notes;
666 }
667 
668 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
669 				    int evidx, u64 addr)
670 {
671 	struct annotation *notes;
672 
673 	if (sym == NULL)
674 		return 0;
675 	notes = symbol__get_annotation(sym, false);
676 	if (notes == NULL)
677 		return -ENOMEM;
678 	return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
679 }
680 
681 static int symbol__account_cycles(u64 addr, u64 start,
682 				  struct symbol *sym, unsigned cycles)
683 {
684 	struct annotation *notes;
685 	unsigned offset;
686 
687 	if (sym == NULL)
688 		return 0;
689 	notes = symbol__get_annotation(sym, true);
690 	if (notes == NULL)
691 		return -ENOMEM;
692 	if (addr < sym->start || addr >= sym->end)
693 		return -ERANGE;
694 
695 	if (start) {
696 		if (start < sym->start || start >= sym->end)
697 			return -ERANGE;
698 		if (start >= addr)
699 			start = 0;
700 	}
701 	offset = addr - sym->start;
702 	return __symbol__account_cycles(notes,
703 					start ? start - sym->start : 0,
704 					offset, cycles,
705 					!!start);
706 }
707 
708 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
709 				    struct addr_map_symbol *start,
710 				    unsigned cycles)
711 {
712 	u64 saddr = 0;
713 	int err;
714 
715 	if (!cycles)
716 		return 0;
717 
718 	/*
719 	 * Only set start when IPC can be computed. We can only
720 	 * compute it when the basic block is completely in a single
721 	 * function.
722 	 * Special case the case when the jump is elsewhere, but
723 	 * it starts on the function start.
724 	 */
725 	if (start &&
726 		(start->sym == ams->sym ||
727 		 (ams->sym &&
728 		   start->addr == ams->sym->start + ams->map->start)))
729 		saddr = start->al_addr;
730 	if (saddr == 0)
731 		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
732 			ams->addr,
733 			start ? start->addr : 0,
734 			ams->sym ? ams->sym->start + ams->map->start : 0,
735 			saddr);
736 	err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
737 	if (err)
738 		pr_debug2("account_cycles failed %d\n", err);
739 	return err;
740 }
741 
742 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
743 {
744 	return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
745 }
746 
747 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
748 {
749 	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
750 }
751 
752 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
753 {
754 	dl->ins.ops = ins__find(arch, dl->ins.name);
755 
756 	if (!dl->ins.ops)
757 		return;
758 
759 	if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
760 		dl->ins.ops = NULL;
761 }
762 
763 static int disasm_line__parse(char *line, const char **namep, char **rawp)
764 {
765 	char *name = line, tmp;
766 
767 	while (isspace(name[0]))
768 		++name;
769 
770 	if (name[0] == '\0')
771 		return -1;
772 
773 	*rawp = name + 1;
774 
775 	while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
776 		++*rawp;
777 
778 	tmp = (*rawp)[0];
779 	(*rawp)[0] = '\0';
780 	*namep = strdup(name);
781 
782 	if (*namep == NULL)
783 		goto out_free_name;
784 
785 	(*rawp)[0] = tmp;
786 
787 	if ((*rawp)[0] != '\0') {
788 		(*rawp)++;
789 		while (isspace((*rawp)[0]))
790 			++(*rawp);
791 	}
792 
793 	return 0;
794 
795 out_free_name:
796 	free((void *)namep);
797 	*namep = NULL;
798 	return -1;
799 }
800 
801 static struct disasm_line *disasm_line__new(s64 offset, char *line,
802 					    size_t privsize, int line_nr,
803 					    struct arch *arch,
804 					    struct map *map)
805 {
806 	struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
807 
808 	if (dl != NULL) {
809 		dl->offset = offset;
810 		dl->line = strdup(line);
811 		dl->line_nr = line_nr;
812 		if (dl->line == NULL)
813 			goto out_delete;
814 
815 		if (offset != -1) {
816 			if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
817 				goto out_free_line;
818 
819 			disasm_line__init_ins(dl, arch, map);
820 		}
821 	}
822 
823 	return dl;
824 
825 out_free_line:
826 	zfree(&dl->line);
827 out_delete:
828 	free(dl);
829 	return NULL;
830 }
831 
832 void disasm_line__free(struct disasm_line *dl)
833 {
834 	zfree(&dl->line);
835 	if (dl->ins.ops && dl->ins.ops->free)
836 		dl->ins.ops->free(&dl->ops);
837 	else
838 		ins__delete(&dl->ops);
839 	free((void *)dl->ins.name);
840 	dl->ins.name = NULL;
841 	free(dl);
842 }
843 
844 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
845 {
846 	if (raw || !dl->ins.ops)
847 		return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
848 
849 	return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
850 }
851 
852 static void disasm__add(struct list_head *head, struct disasm_line *line)
853 {
854 	list_add_tail(&line->node, head);
855 }
856 
857 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
858 {
859 	list_for_each_entry_continue(pos, head, node)
860 		if (pos->offset >= 0)
861 			return pos;
862 
863 	return NULL;
864 }
865 
866 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
867 			    s64 end, const char **path, u64 *nr_samples)
868 {
869 	struct source_line *src_line = notes->src->lines;
870 	double percent = 0.0;
871 	*nr_samples = 0;
872 
873 	if (src_line) {
874 		size_t sizeof_src_line = sizeof(*src_line) +
875 				sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
876 
877 		while (offset < end) {
878 			src_line = (void *)notes->src->lines +
879 					(sizeof_src_line * offset);
880 
881 			if (*path == NULL)
882 				*path = src_line->path;
883 
884 			percent += src_line->samples[evidx].percent;
885 			*nr_samples += src_line->samples[evidx].nr;
886 			offset++;
887 		}
888 	} else {
889 		struct sym_hist *h = annotation__histogram(notes, evidx);
890 		unsigned int hits = 0;
891 
892 		while (offset < end)
893 			hits += h->addr[offset++];
894 
895 		if (h->sum) {
896 			*nr_samples = hits;
897 			percent = 100.0 * hits / h->sum;
898 		}
899 	}
900 
901 	return percent;
902 }
903 
904 static const char *annotate__address_color(struct block_range *br)
905 {
906 	double cov = block_range__coverage(br);
907 
908 	if (cov >= 0) {
909 		/* mark red for >75% coverage */
910 		if (cov > 0.75)
911 			return PERF_COLOR_RED;
912 
913 		/* mark dull for <1% coverage */
914 		if (cov < 0.01)
915 			return PERF_COLOR_NORMAL;
916 	}
917 
918 	return PERF_COLOR_MAGENTA;
919 }
920 
921 static const char *annotate__asm_color(struct block_range *br)
922 {
923 	double cov = block_range__coverage(br);
924 
925 	if (cov >= 0) {
926 		/* mark dull for <1% coverage */
927 		if (cov < 0.01)
928 			return PERF_COLOR_NORMAL;
929 	}
930 
931 	return PERF_COLOR_BLUE;
932 }
933 
934 static void annotate__branch_printf(struct block_range *br, u64 addr)
935 {
936 	bool emit_comment = true;
937 
938 	if (!br)
939 		return;
940 
941 #if 1
942 	if (br->is_target && br->start == addr) {
943 		struct block_range *branch = br;
944 		double p;
945 
946 		/*
947 		 * Find matching branch to our target.
948 		 */
949 		while (!branch->is_branch)
950 			branch = block_range__next(branch);
951 
952 		p = 100 *(double)br->entry / branch->coverage;
953 
954 		if (p > 0.1) {
955 			if (emit_comment) {
956 				emit_comment = false;
957 				printf("\t#");
958 			}
959 
960 			/*
961 			 * The percentage of coverage joined at this target in relation
962 			 * to the next branch.
963 			 */
964 			printf(" +%.2f%%", p);
965 		}
966 	}
967 #endif
968 	if (br->is_branch && br->end == addr) {
969 		double p = 100*(double)br->taken / br->coverage;
970 
971 		if (p > 0.1) {
972 			if (emit_comment) {
973 				emit_comment = false;
974 				printf("\t#");
975 			}
976 
977 			/*
978 			 * The percentage of coverage leaving at this branch, and
979 			 * its prediction ratio.
980 			 */
981 			printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred  / br->taken);
982 		}
983 	}
984 }
985 
986 
987 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
988 		      struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
989 		      int max_lines, struct disasm_line *queue)
990 {
991 	static const char *prev_line;
992 	static const char *prev_color;
993 
994 	if (dl->offset != -1) {
995 		const char *path = NULL;
996 		u64 nr_samples;
997 		double percent, max_percent = 0.0;
998 		double *ppercents = &percent;
999 		u64 *psamples = &nr_samples;
1000 		int i, nr_percent = 1;
1001 		const char *color;
1002 		struct annotation *notes = symbol__annotation(sym);
1003 		s64 offset = dl->offset;
1004 		const u64 addr = start + offset;
1005 		struct disasm_line *next;
1006 		struct block_range *br;
1007 
1008 		next = disasm__get_next_ip_line(&notes->src->source, dl);
1009 
1010 		if (perf_evsel__is_group_event(evsel)) {
1011 			nr_percent = evsel->nr_members;
1012 			ppercents = calloc(nr_percent, sizeof(double));
1013 			psamples = calloc(nr_percent, sizeof(u64));
1014 			if (ppercents == NULL || psamples == NULL) {
1015 				return -1;
1016 			}
1017 		}
1018 
1019 		for (i = 0; i < nr_percent; i++) {
1020 			percent = disasm__calc_percent(notes,
1021 					notes->src->lines ? i : evsel->idx + i,
1022 					offset,
1023 					next ? next->offset : (s64) len,
1024 					&path, &nr_samples);
1025 
1026 			ppercents[i] = percent;
1027 			psamples[i] = nr_samples;
1028 			if (percent > max_percent)
1029 				max_percent = percent;
1030 		}
1031 
1032 		if (max_percent < min_pcnt)
1033 			return -1;
1034 
1035 		if (max_lines && printed >= max_lines)
1036 			return 1;
1037 
1038 		if (queue != NULL) {
1039 			list_for_each_entry_from(queue, &notes->src->source, node) {
1040 				if (queue == dl)
1041 					break;
1042 				disasm_line__print(queue, sym, start, evsel, len,
1043 						    0, 0, 1, NULL);
1044 			}
1045 		}
1046 
1047 		color = get_percent_color(max_percent);
1048 
1049 		/*
1050 		 * Also color the filename and line if needed, with
1051 		 * the same color than the percentage. Don't print it
1052 		 * twice for close colored addr with the same filename:line
1053 		 */
1054 		if (path) {
1055 			if (!prev_line || strcmp(prev_line, path)
1056 				       || color != prev_color) {
1057 				color_fprintf(stdout, color, " %s", path);
1058 				prev_line = path;
1059 				prev_color = color;
1060 			}
1061 		}
1062 
1063 		for (i = 0; i < nr_percent; i++) {
1064 			percent = ppercents[i];
1065 			nr_samples = psamples[i];
1066 			color = get_percent_color(percent);
1067 
1068 			if (symbol_conf.show_total_period)
1069 				color_fprintf(stdout, color, " %7" PRIu64,
1070 					      nr_samples);
1071 			else
1072 				color_fprintf(stdout, color, " %7.2f", percent);
1073 		}
1074 
1075 		printf(" :	");
1076 
1077 		br = block_range__find(addr);
1078 		color_fprintf(stdout, annotate__address_color(br), "  %" PRIx64 ":", addr);
1079 		color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
1080 		annotate__branch_printf(br, addr);
1081 		printf("\n");
1082 
1083 		if (ppercents != &percent)
1084 			free(ppercents);
1085 
1086 		if (psamples != &nr_samples)
1087 			free(psamples);
1088 
1089 	} else if (max_lines && printed >= max_lines)
1090 		return 1;
1091 	else {
1092 		int width = 8;
1093 
1094 		if (queue)
1095 			return -1;
1096 
1097 		if (perf_evsel__is_group_event(evsel))
1098 			width *= evsel->nr_members;
1099 
1100 		if (!*dl->line)
1101 			printf(" %*s:\n", width, " ");
1102 		else
1103 			printf(" %*s:	%s\n", width, " ", dl->line);
1104 	}
1105 
1106 	return 0;
1107 }
1108 
1109 /*
1110  * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1111  * which looks like following
1112  *
1113  *  0000000000415500 <_init>:
1114  *    415500:       sub    $0x8,%rsp
1115  *    415504:       mov    0x2f5ad5(%rip),%rax        # 70afe0 <_DYNAMIC+0x2f8>
1116  *    41550b:       test   %rax,%rax
1117  *    41550e:       je     415515 <_init+0x15>
1118  *    415510:       callq  416e70 <__gmon_start__@plt>
1119  *    415515:       add    $0x8,%rsp
1120  *    415519:       retq
1121  *
1122  * it will be parsed and saved into struct disasm_line as
1123  *  <offset>       <name>  <ops.raw>
1124  *
1125  * The offset will be a relative offset from the start of the symbol and -1
1126  * means that it's not a disassembly line so should be treated differently.
1127  * The ops.raw part will be parsed further according to type of the instruction.
1128  */
1129 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
1130 				      struct arch *arch,
1131 				      FILE *file, size_t privsize,
1132 				      int *line_nr)
1133 {
1134 	struct annotation *notes = symbol__annotation(sym);
1135 	struct disasm_line *dl;
1136 	char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
1137 	size_t line_len;
1138 	s64 line_ip, offset = -1;
1139 	regmatch_t match[2];
1140 
1141 	if (getline(&line, &line_len, file) < 0)
1142 		return -1;
1143 
1144 	if (!line)
1145 		return -1;
1146 
1147 	while (line_len != 0 && isspace(line[line_len - 1]))
1148 		line[--line_len] = '\0';
1149 
1150 	c = strchr(line, '\n');
1151 	if (c)
1152 		*c = 0;
1153 
1154 	line_ip = -1;
1155 	parsed_line = line;
1156 
1157 	/* /filename:linenr ? Save line number and ignore. */
1158 	if (regexec(&file_lineno, line, 2, match, 0) == 0) {
1159 		*line_nr = atoi(line + match[1].rm_so);
1160 		return 0;
1161 	}
1162 
1163 	/*
1164 	 * Strip leading spaces:
1165 	 */
1166 	tmp = line;
1167 	while (*tmp) {
1168 		if (*tmp != ' ')
1169 			break;
1170 		tmp++;
1171 	}
1172 
1173 	if (*tmp) {
1174 		/*
1175 		 * Parse hexa addresses followed by ':'
1176 		 */
1177 		line_ip = strtoull(tmp, &tmp2, 16);
1178 		if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1179 			line_ip = -1;
1180 	}
1181 
1182 	if (line_ip != -1) {
1183 		u64 start = map__rip_2objdump(map, sym->start),
1184 		    end = map__rip_2objdump(map, sym->end);
1185 
1186 		offset = line_ip - start;
1187 		if ((u64)line_ip < start || (u64)line_ip >= end)
1188 			offset = -1;
1189 		else
1190 			parsed_line = tmp2 + 1;
1191 	}
1192 
1193 	dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
1194 	free(line);
1195 	(*line_nr)++;
1196 
1197 	if (dl == NULL)
1198 		return -1;
1199 
1200 	if (dl->ops.target.offset == UINT64_MAX)
1201 		dl->ops.target.offset = dl->ops.target.addr -
1202 					map__rip_2objdump(map, sym->start);
1203 
1204 	/* kcore has no symbols, so add the call target name */
1205 	if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
1206 		struct addr_map_symbol target = {
1207 			.map = map,
1208 			.addr = dl->ops.target.addr,
1209 		};
1210 
1211 		if (!map_groups__find_ams(&target) &&
1212 		    target.sym->start == target.al_addr)
1213 			dl->ops.target.name = strdup(target.sym->name);
1214 	}
1215 
1216 	disasm__add(&notes->src->source, dl);
1217 
1218 	return 0;
1219 }
1220 
1221 static __attribute__((constructor)) void symbol__init_regexpr(void)
1222 {
1223 	regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1224 }
1225 
1226 static void delete_last_nop(struct symbol *sym)
1227 {
1228 	struct annotation *notes = symbol__annotation(sym);
1229 	struct list_head *list = &notes->src->source;
1230 	struct disasm_line *dl;
1231 
1232 	while (!list_empty(list)) {
1233 		dl = list_entry(list->prev, struct disasm_line, node);
1234 
1235 		if (dl->ins.ops) {
1236 			if (dl->ins.ops != &nop_ops)
1237 				return;
1238 		} else {
1239 			if (!strstr(dl->line, " nop ") &&
1240 			    !strstr(dl->line, " nopl ") &&
1241 			    !strstr(dl->line, " nopw "))
1242 				return;
1243 		}
1244 
1245 		list_del(&dl->node);
1246 		disasm_line__free(dl);
1247 	}
1248 }
1249 
1250 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
1251 			      int errnum, char *buf, size_t buflen)
1252 {
1253 	struct dso *dso = map->dso;
1254 
1255 	BUG_ON(buflen == 0);
1256 
1257 	if (errnum >= 0) {
1258 		str_error_r(errnum, buf, buflen);
1259 		return 0;
1260 	}
1261 
1262 	switch (errnum) {
1263 	case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1264 		char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1265 		char *build_id_msg = NULL;
1266 
1267 		if (dso->has_build_id) {
1268 			build_id__sprintf(dso->build_id,
1269 					  sizeof(dso->build_id), bf + 15);
1270 			build_id_msg = bf;
1271 		}
1272 		scnprintf(buf, buflen,
1273 			  "No vmlinux file%s\nwas found in the path.\n\n"
1274 			  "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1275 			  "Please use:\n\n"
1276 			  "  perf buildid-cache -vu vmlinux\n\n"
1277 			  "or:\n\n"
1278 			  "  --vmlinux vmlinux\n", build_id_msg ?: "");
1279 	}
1280 		break;
1281 	default:
1282 		scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1283 		break;
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1290 {
1291 	char linkname[PATH_MAX];
1292 	char *build_id_filename;
1293 
1294 	if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1295 	    !dso__is_kcore(dso))
1296 		return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1297 
1298 	build_id_filename = dso__build_id_filename(dso, NULL, 0);
1299 	if (build_id_filename) {
1300 		__symbol__join_symfs(filename, filename_size, build_id_filename);
1301 		free(build_id_filename);
1302 	} else {
1303 		if (dso->has_build_id)
1304 			return ENOMEM;
1305 		goto fallback;
1306 	}
1307 
1308 	if (dso__is_kcore(dso) ||
1309 	    readlink(filename, linkname, sizeof(linkname)) < 0 ||
1310 	    strstr(linkname, DSO__NAME_KALLSYMS) ||
1311 	    access(filename, R_OK)) {
1312 fallback:
1313 		/*
1314 		 * If we don't have build-ids or the build-id file isn't in the
1315 		 * cache, or is just a kallsyms file, well, lets hope that this
1316 		 * DSO is the same as when 'perf record' ran.
1317 		 */
1318 		__symbol__join_symfs(filename, filename_size, dso->long_name);
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static const char *annotate__norm_arch(const char *arch_name)
1325 {
1326 	struct utsname uts;
1327 
1328 	if (!arch_name) { /* Assume we are annotating locally. */
1329 		if (uname(&uts) < 0)
1330 			return NULL;
1331 		arch_name = uts.machine;
1332 	}
1333 	return normalize_arch((char *)arch_name);
1334 }
1335 
1336 int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
1337 {
1338 	struct dso *dso = map->dso;
1339 	char command[PATH_MAX * 2];
1340 	struct arch *arch = NULL;
1341 	FILE *file;
1342 	char symfs_filename[PATH_MAX];
1343 	struct kcore_extract kce;
1344 	bool delete_extract = false;
1345 	int stdout_fd[2];
1346 	int lineno = 0;
1347 	int nline;
1348 	pid_t pid;
1349 	int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
1350 
1351 	if (err)
1352 		return err;
1353 
1354 	arch_name = annotate__norm_arch(arch_name);
1355 	if (!arch_name)
1356 		return -1;
1357 
1358 	arch = arch__find(arch_name);
1359 	if (arch == NULL)
1360 		return -ENOTSUP;
1361 
1362 	if (arch->init) {
1363 		err = arch->init(arch);
1364 		if (err) {
1365 			pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1366 			return err;
1367 		}
1368 	}
1369 
1370 	pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1371 		 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1372 		 map->unmap_ip(map, sym->end));
1373 
1374 	pr_debug("annotating [%p] %30s : [%p] %30s\n",
1375 		 dso, dso->long_name, sym, sym->name);
1376 
1377 	if (dso__is_kcore(dso)) {
1378 		kce.kcore_filename = symfs_filename;
1379 		kce.addr = map__rip_2objdump(map, sym->start);
1380 		kce.offs = sym->start;
1381 		kce.len = sym->end - sym->start;
1382 		if (!kcore_extract__create(&kce)) {
1383 			delete_extract = true;
1384 			strlcpy(symfs_filename, kce.extract_filename,
1385 				sizeof(symfs_filename));
1386 		}
1387 	} else if (dso__needs_decompress(dso)) {
1388 		char tmp[PATH_MAX];
1389 		struct kmod_path m;
1390 		int fd;
1391 		bool ret;
1392 
1393 		if (kmod_path__parse_ext(&m, symfs_filename))
1394 			goto out;
1395 
1396 		snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1397 
1398 		fd = mkstemp(tmp);
1399 		if (fd < 0) {
1400 			free(m.ext);
1401 			goto out;
1402 		}
1403 
1404 		ret = decompress_to_file(m.ext, symfs_filename, fd);
1405 
1406 		if (ret)
1407 			pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1408 
1409 		free(m.ext);
1410 		close(fd);
1411 
1412 		if (!ret)
1413 			goto out;
1414 
1415 		strcpy(symfs_filename, tmp);
1416 	}
1417 
1418 	snprintf(command, sizeof(command),
1419 		 "%s %s%s --start-address=0x%016" PRIx64
1420 		 " --stop-address=0x%016" PRIx64
1421 		 " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
1422 		 objdump_path ? objdump_path : "objdump",
1423 		 disassembler_style ? "-M " : "",
1424 		 disassembler_style ? disassembler_style : "",
1425 		 map__rip_2objdump(map, sym->start),
1426 		 map__rip_2objdump(map, sym->end),
1427 		 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
1428 		 symbol_conf.annotate_src ? "-S" : "",
1429 		 symfs_filename, symfs_filename);
1430 
1431 	pr_debug("Executing: %s\n", command);
1432 
1433 	err = -1;
1434 	if (pipe(stdout_fd) < 0) {
1435 		pr_err("Failure creating the pipe to run %s\n", command);
1436 		goto out_remove_tmp;
1437 	}
1438 
1439 	pid = fork();
1440 	if (pid < 0) {
1441 		pr_err("Failure forking to run %s\n", command);
1442 		goto out_close_stdout;
1443 	}
1444 
1445 	if (pid == 0) {
1446 		close(stdout_fd[0]);
1447 		dup2(stdout_fd[1], 1);
1448 		close(stdout_fd[1]);
1449 		execl("/bin/sh", "sh", "-c", command, NULL);
1450 		perror(command);
1451 		exit(-1);
1452 	}
1453 
1454 	close(stdout_fd[1]);
1455 
1456 	file = fdopen(stdout_fd[0], "r");
1457 	if (!file) {
1458 		pr_err("Failure creating FILE stream for %s\n", command);
1459 		/*
1460 		 * If we were using debug info should retry with
1461 		 * original binary.
1462 		 */
1463 		goto out_remove_tmp;
1464 	}
1465 
1466 	nline = 0;
1467 	while (!feof(file)) {
1468 		if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
1469 			    &lineno) < 0)
1470 			break;
1471 		nline++;
1472 	}
1473 
1474 	if (nline == 0)
1475 		pr_err("No output from %s\n", command);
1476 
1477 	/*
1478 	 * kallsyms does not have symbol sizes so there may a nop at the end.
1479 	 * Remove it.
1480 	 */
1481 	if (dso__is_kcore(dso))
1482 		delete_last_nop(sym);
1483 
1484 	fclose(file);
1485 	err = 0;
1486 out_remove_tmp:
1487 	close(stdout_fd[0]);
1488 
1489 	if (dso__needs_decompress(dso))
1490 		unlink(symfs_filename);
1491 
1492 	if (delete_extract)
1493 		kcore_extract__delete(&kce);
1494 out:
1495 	return err;
1496 
1497 out_close_stdout:
1498 	close(stdout_fd[1]);
1499 	goto out_remove_tmp;
1500 }
1501 
1502 static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1503 {
1504 	struct source_line *iter;
1505 	struct rb_node **p = &root->rb_node;
1506 	struct rb_node *parent = NULL;
1507 	int i, ret;
1508 
1509 	while (*p != NULL) {
1510 		parent = *p;
1511 		iter = rb_entry(parent, struct source_line, node);
1512 
1513 		ret = strcmp(iter->path, src_line->path);
1514 		if (ret == 0) {
1515 			for (i = 0; i < src_line->nr_pcnt; i++)
1516 				iter->samples[i].percent_sum += src_line->samples[i].percent;
1517 			return;
1518 		}
1519 
1520 		if (ret < 0)
1521 			p = &(*p)->rb_left;
1522 		else
1523 			p = &(*p)->rb_right;
1524 	}
1525 
1526 	for (i = 0; i < src_line->nr_pcnt; i++)
1527 		src_line->samples[i].percent_sum = src_line->samples[i].percent;
1528 
1529 	rb_link_node(&src_line->node, parent, p);
1530 	rb_insert_color(&src_line->node, root);
1531 }
1532 
1533 static int cmp_source_line(struct source_line *a, struct source_line *b)
1534 {
1535 	int i;
1536 
1537 	for (i = 0; i < a->nr_pcnt; i++) {
1538 		if (a->samples[i].percent_sum == b->samples[i].percent_sum)
1539 			continue;
1540 		return a->samples[i].percent_sum > b->samples[i].percent_sum;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
1547 {
1548 	struct source_line *iter;
1549 	struct rb_node **p = &root->rb_node;
1550 	struct rb_node *parent = NULL;
1551 
1552 	while (*p != NULL) {
1553 		parent = *p;
1554 		iter = rb_entry(parent, struct source_line, node);
1555 
1556 		if (cmp_source_line(src_line, iter))
1557 			p = &(*p)->rb_left;
1558 		else
1559 			p = &(*p)->rb_right;
1560 	}
1561 
1562 	rb_link_node(&src_line->node, parent, p);
1563 	rb_insert_color(&src_line->node, root);
1564 }
1565 
1566 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1567 {
1568 	struct source_line *src_line;
1569 	struct rb_node *node;
1570 
1571 	node = rb_first(src_root);
1572 	while (node) {
1573 		struct rb_node *next;
1574 
1575 		src_line = rb_entry(node, struct source_line, node);
1576 		next = rb_next(node);
1577 		rb_erase(node, src_root);
1578 
1579 		__resort_source_line(dest_root, src_line);
1580 		node = next;
1581 	}
1582 }
1583 
1584 static void symbol__free_source_line(struct symbol *sym, int len)
1585 {
1586 	struct annotation *notes = symbol__annotation(sym);
1587 	struct source_line *src_line = notes->src->lines;
1588 	size_t sizeof_src_line;
1589 	int i;
1590 
1591 	sizeof_src_line = sizeof(*src_line) +
1592 			  (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
1593 
1594 	for (i = 0; i < len; i++) {
1595 		free_srcline(src_line->path);
1596 		src_line = (void *)src_line + sizeof_src_line;
1597 	}
1598 
1599 	zfree(&notes->src->lines);
1600 }
1601 
1602 /* Get the filename:line for the colored entries */
1603 static int symbol__get_source_line(struct symbol *sym, struct map *map,
1604 				   struct perf_evsel *evsel,
1605 				   struct rb_root *root, int len)
1606 {
1607 	u64 start;
1608 	int i, k;
1609 	int evidx = evsel->idx;
1610 	struct source_line *src_line;
1611 	struct annotation *notes = symbol__annotation(sym);
1612 	struct sym_hist *h = annotation__histogram(notes, evidx);
1613 	struct rb_root tmp_root = RB_ROOT;
1614 	int nr_pcnt = 1;
1615 	u64 h_sum = h->sum;
1616 	size_t sizeof_src_line = sizeof(struct source_line);
1617 
1618 	if (perf_evsel__is_group_event(evsel)) {
1619 		for (i = 1; i < evsel->nr_members; i++) {
1620 			h = annotation__histogram(notes, evidx + i);
1621 			h_sum += h->sum;
1622 		}
1623 		nr_pcnt = evsel->nr_members;
1624 		sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
1625 	}
1626 
1627 	if (!h_sum)
1628 		return 0;
1629 
1630 	src_line = notes->src->lines = calloc(len, sizeof_src_line);
1631 	if (!notes->src->lines)
1632 		return -1;
1633 
1634 	start = map__rip_2objdump(map, sym->start);
1635 
1636 	for (i = 0; i < len; i++) {
1637 		u64 offset;
1638 		double percent_max = 0.0;
1639 
1640 		src_line->nr_pcnt = nr_pcnt;
1641 
1642 		for (k = 0; k < nr_pcnt; k++) {
1643 			h = annotation__histogram(notes, evidx + k);
1644 			src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum;
1645 
1646 			if (src_line->samples[k].percent > percent_max)
1647 				percent_max = src_line->samples[k].percent;
1648 		}
1649 
1650 		if (percent_max <= 0.5)
1651 			goto next;
1652 
1653 		offset = start + i;
1654 		src_line->path = get_srcline(map->dso, offset, NULL, false);
1655 		insert_source_line(&tmp_root, src_line);
1656 
1657 	next:
1658 		src_line = (void *)src_line + sizeof_src_line;
1659 	}
1660 
1661 	resort_source_line(root, &tmp_root);
1662 	return 0;
1663 }
1664 
1665 static void print_summary(struct rb_root *root, const char *filename)
1666 {
1667 	struct source_line *src_line;
1668 	struct rb_node *node;
1669 
1670 	printf("\nSorted summary for file %s\n", filename);
1671 	printf("----------------------------------------------\n\n");
1672 
1673 	if (RB_EMPTY_ROOT(root)) {
1674 		printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1675 		return;
1676 	}
1677 
1678 	node = rb_first(root);
1679 	while (node) {
1680 		double percent, percent_max = 0.0;
1681 		const char *color;
1682 		char *path;
1683 		int i;
1684 
1685 		src_line = rb_entry(node, struct source_line, node);
1686 		for (i = 0; i < src_line->nr_pcnt; i++) {
1687 			percent = src_line->samples[i].percent_sum;
1688 			color = get_percent_color(percent);
1689 			color_fprintf(stdout, color, " %7.2f", percent);
1690 
1691 			if (percent > percent_max)
1692 				percent_max = percent;
1693 		}
1694 
1695 		path = src_line->path;
1696 		color = get_percent_color(percent_max);
1697 		color_fprintf(stdout, color, " %s\n", path);
1698 
1699 		node = rb_next(node);
1700 	}
1701 }
1702 
1703 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1704 {
1705 	struct annotation *notes = symbol__annotation(sym);
1706 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1707 	u64 len = symbol__size(sym), offset;
1708 
1709 	for (offset = 0; offset < len; ++offset)
1710 		if (h->addr[offset] != 0)
1711 			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1712 			       sym->start + offset, h->addr[offset]);
1713 	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
1714 }
1715 
1716 int symbol__annotate_printf(struct symbol *sym, struct map *map,
1717 			    struct perf_evsel *evsel, bool full_paths,
1718 			    int min_pcnt, int max_lines, int context)
1719 {
1720 	struct dso *dso = map->dso;
1721 	char *filename;
1722 	const char *d_filename;
1723 	const char *evsel_name = perf_evsel__name(evsel);
1724 	struct annotation *notes = symbol__annotation(sym);
1725 	struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1726 	struct disasm_line *pos, *queue = NULL;
1727 	u64 start = map__rip_2objdump(map, sym->start);
1728 	int printed = 2, queue_len = 0;
1729 	int more = 0;
1730 	u64 len;
1731 	int width = 8;
1732 	int graph_dotted_len;
1733 
1734 	filename = strdup(dso->long_name);
1735 	if (!filename)
1736 		return -ENOMEM;
1737 
1738 	if (full_paths)
1739 		d_filename = filename;
1740 	else
1741 		d_filename = basename(filename);
1742 
1743 	len = symbol__size(sym);
1744 
1745 	if (perf_evsel__is_group_event(evsel))
1746 		width *= evsel->nr_members;
1747 
1748 	graph_dotted_len = printf(" %-*.*s|	Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1749 	       width, width, "Percent", d_filename, evsel_name, h->sum);
1750 
1751 	printf("%-*.*s----\n",
1752 	       graph_dotted_len, graph_dotted_len, graph_dotted_line);
1753 
1754 	if (verbose)
1755 		symbol__annotate_hits(sym, evsel);
1756 
1757 	list_for_each_entry(pos, &notes->src->source, node) {
1758 		if (context && queue == NULL) {
1759 			queue = pos;
1760 			queue_len = 0;
1761 		}
1762 
1763 		switch (disasm_line__print(pos, sym, start, evsel, len,
1764 					    min_pcnt, printed, max_lines,
1765 					    queue)) {
1766 		case 0:
1767 			++printed;
1768 			if (context) {
1769 				printed += queue_len;
1770 				queue = NULL;
1771 				queue_len = 0;
1772 			}
1773 			break;
1774 		case 1:
1775 			/* filtered by max_lines */
1776 			++more;
1777 			break;
1778 		case -1:
1779 		default:
1780 			/*
1781 			 * Filtered by min_pcnt or non IP lines when
1782 			 * context != 0
1783 			 */
1784 			if (!context)
1785 				break;
1786 			if (queue_len == context)
1787 				queue = list_entry(queue->node.next, typeof(*queue), node);
1788 			else
1789 				++queue_len;
1790 			break;
1791 		}
1792 	}
1793 
1794 	free(filename);
1795 
1796 	return more;
1797 }
1798 
1799 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1800 {
1801 	struct annotation *notes = symbol__annotation(sym);
1802 	struct sym_hist *h = annotation__histogram(notes, evidx);
1803 
1804 	memset(h, 0, notes->src->sizeof_sym_hist);
1805 }
1806 
1807 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1808 {
1809 	struct annotation *notes = symbol__annotation(sym);
1810 	struct sym_hist *h = annotation__histogram(notes, evidx);
1811 	int len = symbol__size(sym), offset;
1812 
1813 	h->sum = 0;
1814 	for (offset = 0; offset < len; ++offset) {
1815 		h->addr[offset] = h->addr[offset] * 7 / 8;
1816 		h->sum += h->addr[offset];
1817 	}
1818 }
1819 
1820 void disasm__purge(struct list_head *head)
1821 {
1822 	struct disasm_line *pos, *n;
1823 
1824 	list_for_each_entry_safe(pos, n, head, node) {
1825 		list_del(&pos->node);
1826 		disasm_line__free(pos);
1827 	}
1828 }
1829 
1830 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1831 {
1832 	size_t printed;
1833 
1834 	if (dl->offset == -1)
1835 		return fprintf(fp, "%s\n", dl->line);
1836 
1837 	printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
1838 
1839 	if (dl->ops.raw[0] != '\0') {
1840 		printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1841 				   dl->ops.raw);
1842 	}
1843 
1844 	return printed + fprintf(fp, "\n");
1845 }
1846 
1847 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1848 {
1849 	struct disasm_line *pos;
1850 	size_t printed = 0;
1851 
1852 	list_for_each_entry(pos, head, node)
1853 		printed += disasm_line__fprintf(pos, fp);
1854 
1855 	return printed;
1856 }
1857 
1858 int symbol__tty_annotate(struct symbol *sym, struct map *map,
1859 			 struct perf_evsel *evsel, bool print_lines,
1860 			 bool full_paths, int min_pcnt, int max_lines)
1861 {
1862 	struct dso *dso = map->dso;
1863 	struct rb_root source_line = RB_ROOT;
1864 	u64 len;
1865 
1866 	if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
1867 		return -1;
1868 
1869 	len = symbol__size(sym);
1870 
1871 	if (print_lines) {
1872 		srcline_full_filename = full_paths;
1873 		symbol__get_source_line(sym, map, evsel, &source_line, len);
1874 		print_summary(&source_line, dso->long_name);
1875 	}
1876 
1877 	symbol__annotate_printf(sym, map, evsel, full_paths,
1878 				min_pcnt, max_lines, 0);
1879 	if (print_lines)
1880 		symbol__free_source_line(sym, len);
1881 
1882 	disasm__purge(&symbol__annotation(sym)->src->source);
1883 
1884 	return 0;
1885 }
1886 
1887 bool ui__has_annotation(void)
1888 {
1889 	return use_browser == 1 && perf_hpp_list.sym;
1890 }
1891