1 /*
2 * Copyright (c) 2014-2018, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pt_query_decoder.h"
30 #include "pt_sync.h"
31 #include "pt_decoder_function.h"
32 #include "pt_packet.h"
33 #include "pt_packet_decoder.h"
34 #include "pt_config.h"
35 #include "pt_opcodes.h"
36 #include "pt_compiler.h"
37
38 #include "intel-pt.h"
39
40 #include <string.h>
41 #include <stddef.h>
42 #include <stdlib.h>
43 #include <limits.h>
44
45
46 /* Find a FUP in a PSB+ header.
47 *
48 * The packet @decoder must be synchronized onto the trace stream at the
49 * beginning or somewhere inside a PSB+ header.
50 *
51 * It uses @packet to hold trace packets during its search. If the search is
52 * successful, @packet will contain the first (and hopefully only) FUP packet in
53 * this PSB+. Otherwise, @packet may contain anything.
54 *
55 * Returns one if a FUP packet is found (@packet will contain it).
56 * Returns zero if no FUP packet is found (@packet is undefined).
57 * Returns a negative error code otherwise.
58 */
pt_qry_find_header_fup(struct pt_packet * packet,struct pt_packet_decoder * decoder)59 static int pt_qry_find_header_fup(struct pt_packet *packet,
60 struct pt_packet_decoder *decoder)
61 {
62 if (!packet || !decoder)
63 return -pte_internal;
64
65 for (;;) {
66 int errcode;
67
68 errcode = pt_pkt_next(decoder, packet, sizeof(*packet));
69 if (errcode < 0)
70 return errcode;
71
72 switch (packet->type) {
73 default:
74 /* Ignore the packet. */
75 break;
76
77 case ppt_psbend:
78 /* There's no FUP in here. */
79 return 0;
80
81 case ppt_fup:
82 /* Found it. */
83 return 1;
84 }
85 }
86 }
87
pt_qry_decoder_init(struct pt_query_decoder * decoder,const struct pt_config * config)88 int pt_qry_decoder_init(struct pt_query_decoder *decoder,
89 const struct pt_config *config)
90 {
91 int errcode;
92
93 if (!decoder)
94 return -pte_invalid;
95
96 memset(decoder, 0, sizeof(*decoder));
97
98 errcode = pt_config_from_user(&decoder->config, config);
99 if (errcode < 0)
100 return errcode;
101
102 pt_last_ip_init(&decoder->ip);
103 pt_tnt_cache_init(&decoder->tnt);
104 pt_time_init(&decoder->time);
105 pt_time_init(&decoder->last_time);
106 pt_tcal_init(&decoder->tcal);
107 pt_evq_init(&decoder->evq);
108
109 return 0;
110 }
111
pt_qry_alloc_decoder(const struct pt_config * config)112 struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config)
113 {
114 struct pt_query_decoder *decoder;
115 int errcode;
116
117 decoder = malloc(sizeof(*decoder));
118 if (!decoder)
119 return NULL;
120
121 errcode = pt_qry_decoder_init(decoder, config);
122 if (errcode < 0) {
123 free(decoder);
124 return NULL;
125 }
126
127 return decoder;
128 }
129
pt_qry_decoder_fini(struct pt_query_decoder * decoder)130 void pt_qry_decoder_fini(struct pt_query_decoder *decoder)
131 {
132 (void) decoder;
133
134 /* Nothing to do. */
135 }
136
pt_qry_free_decoder(struct pt_query_decoder * decoder)137 void pt_qry_free_decoder(struct pt_query_decoder *decoder)
138 {
139 pt_qry_decoder_fini(decoder);
140 free(decoder);
141 }
142
pt_qry_reset(struct pt_query_decoder * decoder)143 static void pt_qry_reset(struct pt_query_decoder *decoder)
144 {
145 if (!decoder)
146 return;
147
148 decoder->enabled = 0;
149 decoder->consume_packet = 0;
150 decoder->event = NULL;
151
152 pt_last_ip_init(&decoder->ip);
153 pt_tnt_cache_init(&decoder->tnt);
154 pt_time_init(&decoder->time);
155 pt_time_init(&decoder->last_time);
156 pt_tcal_init(&decoder->tcal);
157 pt_evq_init(&decoder->evq);
158 }
159
pt_qry_will_event(const struct pt_query_decoder * decoder)160 static int pt_qry_will_event(const struct pt_query_decoder *decoder)
161 {
162 const struct pt_decoder_function *dfun;
163
164 if (!decoder)
165 return -pte_internal;
166
167 dfun = decoder->next;
168 if (!dfun)
169 return 0;
170
171 if (dfun->flags & pdff_event)
172 return 1;
173
174 if (dfun->flags & pdff_psbend)
175 return pt_evq_pending(&decoder->evq, evb_psbend);
176
177 if (dfun->flags & pdff_tip)
178 return pt_evq_pending(&decoder->evq, evb_tip);
179
180 if (dfun->flags & pdff_fup)
181 return pt_evq_pending(&decoder->evq, evb_fup);
182
183 return 0;
184 }
185
pt_qry_will_eos(const struct pt_query_decoder * decoder)186 static int pt_qry_will_eos(const struct pt_query_decoder *decoder)
187 {
188 const struct pt_decoder_function *dfun;
189 int errcode;
190
191 if (!decoder)
192 return -pte_internal;
193
194 dfun = decoder->next;
195 if (dfun)
196 return 0;
197
198 /* The decoding function may be NULL for two reasons:
199 *
200 * - we ran out of trace
201 * - we ran into a fetch error such as -pte_bad_opc
202 *
203 * Let's fetch again.
204 */
205 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
206 return errcode == -pte_eos;
207 }
208
pt_qry_status_flags(const struct pt_query_decoder * decoder)209 static int pt_qry_status_flags(const struct pt_query_decoder *decoder)
210 {
211 int flags = 0;
212
213 if (!decoder)
214 return -pte_internal;
215
216 /* Some packets force out TNT and any deferred TIPs in order to
217 * establish the correct context for the subsequent packet.
218 *
219 * Users are expected to first navigate to the correct code region
220 * by using up the cached TNT bits before interpreting any subsequent
221 * packets.
222 *
223 * We do need to read ahead in order to signal upcoming events. We may
224 * have already decoded those packets while our user has not navigated
225 * to the correct code region, yet.
226 *
227 * In order to have our user use up the cached TNT bits first, we do
228 * not indicate the next event until the TNT cache is empty.
229 */
230 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
231 if (pt_qry_will_event(decoder))
232 flags |= pts_event_pending;
233
234 if (pt_qry_will_eos(decoder))
235 flags |= pts_eos;
236 }
237
238 return flags;
239 }
240
pt_qry_provoke_fetch_error(const struct pt_query_decoder * decoder)241 static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder)
242 {
243 const struct pt_decoder_function *dfun;
244 int errcode;
245
246 if (!decoder)
247 return -pte_internal;
248
249 /* Repeat the decoder fetch to reproduce the error. */
250 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
251 if (errcode < 0)
252 return errcode;
253
254 /* We must get some error or something's wrong. */
255 return -pte_internal;
256 }
257
pt_qry_read_ahead(struct pt_query_decoder * decoder)258 static int pt_qry_read_ahead(struct pt_query_decoder *decoder)
259 {
260 if (!decoder)
261 return -pte_internal;
262
263 for (;;) {
264 const struct pt_decoder_function *dfun;
265 int errcode;
266
267 errcode = pt_df_fetch(&decoder->next, decoder->pos,
268 &decoder->config);
269 if (errcode)
270 return errcode;
271
272 dfun = decoder->next;
273 if (!dfun)
274 return -pte_internal;
275
276 if (!dfun->decode)
277 return -pte_internal;
278
279 /* We're done once we reach
280 *
281 * - a branching related packet. */
282 if (dfun->flags & (pdff_tip | pdff_tnt))
283 return 0;
284
285 /* - an event related packet. */
286 if (pt_qry_will_event(decoder))
287 return 0;
288
289 /* Decode status update packets. */
290 errcode = dfun->decode(decoder);
291 if (errcode) {
292 /* Ignore truncated status packets at the end.
293 *
294 * Move beyond the packet and clear @decoder->next to
295 * indicate that we were not able to fetch the next
296 * packet.
297 */
298 if (errcode == -pte_eos) {
299 decoder->pos = decoder->config.end;
300 decoder->next = NULL;
301 }
302
303 return errcode;
304 }
305 }
306 }
307
pt_qry_start(struct pt_query_decoder * decoder,const uint8_t * pos,uint64_t * addr)308 static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos,
309 uint64_t *addr)
310 {
311 const struct pt_decoder_function *dfun;
312 int status, errcode;
313
314 if (!decoder || !pos)
315 return -pte_invalid;
316
317 pt_qry_reset(decoder);
318
319 decoder->sync = pos;
320 decoder->pos = pos;
321
322 errcode = pt_df_fetch(&decoder->next, pos, &decoder->config);
323 if (errcode)
324 return errcode;
325
326 dfun = decoder->next;
327
328 /* We do need to start at a PSB in order to initialize the state. */
329 if (dfun != &pt_decode_psb)
330 return -pte_nosync;
331
332 /* Decode the PSB+ header to initialize the state. */
333 errcode = dfun->decode(decoder);
334 if (errcode < 0)
335 return errcode;
336
337 /* Fill in the start address.
338 * We do this before reading ahead since the latter may read an
339 * adjacent PSB+ that might change the decoder's IP, causing us
340 * to skip code.
341 */
342 if (addr) {
343 status = pt_last_ip_query(addr, &decoder->ip);
344
345 /* Make sure we don't clobber it later on. */
346 if (!status)
347 addr = NULL;
348 }
349
350 /* Read ahead until the first query-relevant packet. */
351 errcode = pt_qry_read_ahead(decoder);
352 if (errcode < 0)
353 return errcode;
354
355 /* We return the current decoder status. */
356 status = pt_qry_status_flags(decoder);
357 if (status < 0)
358 return status;
359
360 errcode = pt_last_ip_query(addr, &decoder->ip);
361 if (errcode < 0) {
362 /* Indicate the missing IP in the status. */
363 if (addr)
364 status |= pts_ip_suppressed;
365 }
366
367 return status;
368 }
369
pt_qry_apply_tsc(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_tsc * packet,const struct pt_config * config)370 static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal,
371 const struct pt_packet_tsc *packet,
372 const struct pt_config *config)
373 {
374 int errcode;
375
376 /* We ignore configuration errors. They will result in imprecise
377 * calibration which will result in imprecise cycle-accurate timing.
378 *
379 * We currently do not track them.
380 */
381 errcode = pt_tcal_update_tsc(tcal, packet, config);
382 if (errcode < 0 && (errcode != -pte_bad_config))
383 return errcode;
384
385 /* We ignore configuration errors. They will result in imprecise
386 * timing and are tracked as packet losses in struct pt_time.
387 */
388 errcode = pt_time_update_tsc(time, packet, config);
389 if (errcode < 0 && (errcode != -pte_bad_config))
390 return errcode;
391
392 return 0;
393 }
394
pt_qry_apply_header_tsc(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_tsc * packet,const struct pt_config * config)395 static int pt_qry_apply_header_tsc(struct pt_time *time,
396 struct pt_time_cal *tcal,
397 const struct pt_packet_tsc *packet,
398 const struct pt_config *config)
399 {
400 int errcode;
401
402 /* We ignore configuration errors. They will result in imprecise
403 * calibration which will result in imprecise cycle-accurate timing.
404 *
405 * We currently do not track them.
406 */
407 errcode = pt_tcal_header_tsc(tcal, packet, config);
408 if (errcode < 0 && (errcode != -pte_bad_config))
409 return errcode;
410
411 /* We ignore configuration errors. They will result in imprecise
412 * timing and are tracked as packet losses in struct pt_time.
413 */
414 errcode = pt_time_update_tsc(time, packet, config);
415 if (errcode < 0 && (errcode != -pte_bad_config))
416 return errcode;
417
418 return 0;
419 }
420
pt_qry_apply_cbr(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_cbr * packet,const struct pt_config * config)421 static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal,
422 const struct pt_packet_cbr *packet,
423 const struct pt_config *config)
424 {
425 int errcode;
426
427 /* We ignore configuration errors. They will result in imprecise
428 * calibration which will result in imprecise cycle-accurate timing.
429 *
430 * We currently do not track them.
431 */
432 errcode = pt_tcal_update_cbr(tcal, packet, config);
433 if (errcode < 0 && (errcode != -pte_bad_config))
434 return errcode;
435
436 /* We ignore configuration errors. They will result in imprecise
437 * timing and are tracked as packet losses in struct pt_time.
438 */
439 errcode = pt_time_update_cbr(time, packet, config);
440 if (errcode < 0 && (errcode != -pte_bad_config))
441 return errcode;
442
443 return 0;
444 }
445
pt_qry_apply_header_cbr(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_cbr * packet,const struct pt_config * config)446 static int pt_qry_apply_header_cbr(struct pt_time *time,
447 struct pt_time_cal *tcal,
448 const struct pt_packet_cbr *packet,
449 const struct pt_config *config)
450 {
451 int errcode;
452
453 /* We ignore configuration errors. They will result in imprecise
454 * calibration which will result in imprecise cycle-accurate timing.
455 *
456 * We currently do not track them.
457 */
458 errcode = pt_tcal_header_cbr(tcal, packet, config);
459 if (errcode < 0 && (errcode != -pte_bad_config))
460 return errcode;
461
462 /* We ignore configuration errors. They will result in imprecise
463 * timing and are tracked as packet losses in struct pt_time.
464 */
465 errcode = pt_time_update_cbr(time, packet, config);
466 if (errcode < 0 && (errcode != -pte_bad_config))
467 return errcode;
468
469 return 0;
470 }
471
pt_qry_apply_tma(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_tma * packet,const struct pt_config * config)472 static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal,
473 const struct pt_packet_tma *packet,
474 const struct pt_config *config)
475 {
476 int errcode;
477
478 /* We ignore configuration errors. They will result in imprecise
479 * calibration which will result in imprecise cycle-accurate timing.
480 *
481 * We currently do not track them.
482 */
483 errcode = pt_tcal_update_tma(tcal, packet, config);
484 if (errcode < 0 && (errcode != -pte_bad_config))
485 return errcode;
486
487 /* We ignore configuration errors. They will result in imprecise
488 * timing and are tracked as packet losses in struct pt_time.
489 */
490 errcode = pt_time_update_tma(time, packet, config);
491 if (errcode < 0 && (errcode != -pte_bad_config))
492 return errcode;
493
494 return 0;
495 }
496
pt_qry_apply_mtc(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_mtc * packet,const struct pt_config * config)497 static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal,
498 const struct pt_packet_mtc *packet,
499 const struct pt_config *config)
500 {
501 int errcode;
502
503 /* We ignore configuration errors. They will result in imprecise
504 * calibration which will result in imprecise cycle-accurate timing.
505 *
506 * We currently do not track them.
507 */
508 errcode = pt_tcal_update_mtc(tcal, packet, config);
509 if (errcode < 0 && (errcode != -pte_bad_config))
510 return errcode;
511
512 /* We ignore configuration errors. They will result in imprecise
513 * timing and are tracked as packet losses in struct pt_time.
514 */
515 errcode = pt_time_update_mtc(time, packet, config);
516 if (errcode < 0 && (errcode != -pte_bad_config))
517 return errcode;
518
519 return 0;
520 }
521
pt_qry_apply_cyc(struct pt_time * time,struct pt_time_cal * tcal,const struct pt_packet_cyc * packet,const struct pt_config * config)522 static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal,
523 const struct pt_packet_cyc *packet,
524 const struct pt_config *config)
525 {
526 uint64_t fcr;
527 int errcode;
528
529 /* We ignore configuration errors. They will result in imprecise
530 * calibration which will result in imprecise cycle-accurate timing.
531 *
532 * We currently do not track them.
533 */
534 errcode = pt_tcal_update_cyc(tcal, packet, config);
535 if (errcode < 0 && (errcode != -pte_bad_config))
536 return errcode;
537
538 /* We need the FastCounter to Cycles ratio below. Fall back to
539 * an invalid ratio of 0 if calibration has not kicked in, yet.
540 *
541 * This will be tracked as packet loss in struct pt_time.
542 */
543 errcode = pt_tcal_fcr(&fcr, tcal);
544 if (errcode < 0) {
545 if (errcode == -pte_no_time)
546 fcr = 0ull;
547 else
548 return errcode;
549 }
550
551 /* We ignore configuration errors. They will result in imprecise
552 * timing and are tracked as packet losses in struct pt_time.
553 */
554 errcode = pt_time_update_cyc(time, packet, config, fcr);
555 if (errcode < 0 && (errcode != -pte_bad_config))
556 return errcode;
557
558 return 0;
559 }
560
pt_qry_sync_forward(struct pt_query_decoder * decoder,uint64_t * ip)561 int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip)
562 {
563 const uint8_t *pos, *sync;
564 int errcode;
565
566 if (!decoder)
567 return -pte_invalid;
568
569 sync = decoder->sync;
570 pos = decoder->pos;
571 if (!pos)
572 pos = decoder->config.begin;
573
574 if (pos == sync)
575 pos += ptps_psb;
576
577 errcode = pt_sync_forward(&sync, pos, &decoder->config);
578 if (errcode < 0)
579 return errcode;
580
581 return pt_qry_start(decoder, sync, ip);
582 }
583
pt_qry_sync_backward(struct pt_query_decoder * decoder,uint64_t * ip)584 int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip)
585 {
586 const uint8_t *start, *sync;
587 int errcode;
588
589 if (!decoder)
590 return -pte_invalid;
591
592 start = decoder->pos;
593 if (!start)
594 start = decoder->config.end;
595
596 sync = start;
597 for (;;) {
598 errcode = pt_sync_backward(&sync, sync, &decoder->config);
599 if (errcode < 0)
600 return errcode;
601
602 errcode = pt_qry_start(decoder, sync, ip);
603 if (errcode < 0) {
604 /* Ignore incomplete trace segments at the end. We need
605 * a full PSB+ to start decoding.
606 */
607 if (errcode == -pte_eos)
608 continue;
609
610 return errcode;
611 }
612
613 /* An empty trace segment in the middle of the trace might bring
614 * us back to where we started.
615 *
616 * We're done when we reached a new position.
617 */
618 if (decoder->pos != start)
619 break;
620 }
621
622 return 0;
623 }
624
pt_qry_sync_set(struct pt_query_decoder * decoder,uint64_t * ip,uint64_t offset)625 int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip,
626 uint64_t offset)
627 {
628 const uint8_t *sync, *pos;
629 int errcode;
630
631 if (!decoder)
632 return -pte_invalid;
633
634 pos = decoder->config.begin + offset;
635
636 errcode = pt_sync_set(&sync, pos, &decoder->config);
637 if (errcode < 0)
638 return errcode;
639
640 return pt_qry_start(decoder, sync, ip);
641 }
642
pt_qry_get_offset(const struct pt_query_decoder * decoder,uint64_t * offset)643 int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset)
644 {
645 const uint8_t *begin, *pos;
646
647 if (!decoder || !offset)
648 return -pte_invalid;
649
650 begin = decoder->config.begin;
651 pos = decoder->pos;
652
653 if (!pos)
654 return -pte_nosync;
655
656 *offset = pos - begin;
657 return 0;
658 }
659
pt_qry_get_sync_offset(const struct pt_query_decoder * decoder,uint64_t * offset)660 int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
661 uint64_t *offset)
662 {
663 const uint8_t *begin, *sync;
664
665 if (!decoder || !offset)
666 return -pte_invalid;
667
668 begin = decoder->config.begin;
669 sync = decoder->sync;
670
671 if (!sync)
672 return -pte_nosync;
673
674 *offset = sync - begin;
675 return 0;
676 }
677
678 const struct pt_config *
pt_qry_get_config(const struct pt_query_decoder * decoder)679 pt_qry_get_config(const struct pt_query_decoder *decoder)
680 {
681 if (!decoder)
682 return NULL;
683
684 return &decoder->config;
685 }
686
pt_qry_cache_tnt(struct pt_query_decoder * decoder)687 static int pt_qry_cache_tnt(struct pt_query_decoder *decoder)
688 {
689 int errcode;
690
691 if (!decoder)
692 return -pte_internal;
693
694 for (;;) {
695 const struct pt_decoder_function *dfun;
696
697 dfun = decoder->next;
698 if (!dfun)
699 return pt_qry_provoke_fetch_error(decoder);
700
701 if (!dfun->decode)
702 return -pte_internal;
703
704 /* There's an event ahead of us. */
705 if (pt_qry_will_event(decoder))
706 return -pte_bad_query;
707
708 /* Diagnose a TIP that has not been part of an event. */
709 if (dfun->flags & pdff_tip)
710 return -pte_bad_query;
711
712 /* Clear the decoder's current event so we know when we
713 * accidentally skipped an event.
714 */
715 decoder->event = NULL;
716
717 /* Apply the decoder function. */
718 errcode = dfun->decode(decoder);
719 if (errcode)
720 return errcode;
721
722 /* If we skipped an event, we're in trouble. */
723 if (decoder->event)
724 return -pte_event_ignored;
725
726 /* We're done when we decoded a TNT packet. */
727 if (dfun->flags & pdff_tnt)
728 break;
729
730 /* Read ahead until the next query-relevant packet. */
731 errcode = pt_qry_read_ahead(decoder);
732 if (errcode)
733 return errcode;
734 }
735
736 /* Preserve the time at the TNT packet. */
737 decoder->last_time = decoder->time;
738
739 /* Read ahead until the next query-relevant packet. */
740 errcode = pt_qry_read_ahead(decoder);
741 if ((errcode < 0) && (errcode != -pte_eos))
742 return errcode;
743
744 return 0;
745 }
746
pt_qry_cond_branch(struct pt_query_decoder * decoder,int * taken)747 int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken)
748 {
749 int errcode, query;
750
751 if (!decoder || !taken)
752 return -pte_invalid;
753
754 /* We cache the latest tnt packet in the decoder. Let's re-fill the
755 * cache in case it is empty.
756 */
757 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
758 errcode = pt_qry_cache_tnt(decoder);
759 if (errcode < 0)
760 return errcode;
761 }
762
763 query = pt_tnt_cache_query(&decoder->tnt);
764 if (query < 0)
765 return query;
766
767 *taken = query;
768
769 return pt_qry_status_flags(decoder);
770 }
771
pt_qry_indirect_branch(struct pt_query_decoder * decoder,uint64_t * addr)772 int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr)
773 {
774 int errcode, flags;
775
776 if (!decoder || !addr)
777 return -pte_invalid;
778
779 flags = 0;
780 for (;;) {
781 const struct pt_decoder_function *dfun;
782
783 dfun = decoder->next;
784 if (!dfun)
785 return pt_qry_provoke_fetch_error(decoder);
786
787 if (!dfun->decode)
788 return -pte_internal;
789
790 /* There's an event ahead of us. */
791 if (pt_qry_will_event(decoder))
792 return -pte_bad_query;
793
794 /* Clear the decoder's current event so we know when we
795 * accidentally skipped an event.
796 */
797 decoder->event = NULL;
798
799 /* We may see a single TNT packet if the current tnt is empty.
800 *
801 * If we see a TNT while the current tnt is not empty, it means
802 * that our user got out of sync. Let's report no data and hope
803 * that our user is able to re-sync.
804 */
805 if ((dfun->flags & pdff_tnt) &&
806 !pt_tnt_cache_is_empty(&decoder->tnt))
807 return -pte_bad_query;
808
809 /* Apply the decoder function. */
810 errcode = dfun->decode(decoder);
811 if (errcode)
812 return errcode;
813
814 /* If we skipped an event, we're in trouble. */
815 if (decoder->event)
816 return -pte_event_ignored;
817
818 /* We're done when we found a TIP packet that isn't part of an
819 * event.
820 */
821 if (dfun->flags & pdff_tip) {
822 uint64_t ip;
823
824 /* We already decoded it, so the branch destination
825 * is stored in the decoder's last ip.
826 */
827 errcode = pt_last_ip_query(&ip, &decoder->ip);
828 if (errcode < 0)
829 flags |= pts_ip_suppressed;
830 else
831 *addr = ip;
832
833 break;
834 }
835
836 /* Read ahead until the next query-relevant packet. */
837 errcode = pt_qry_read_ahead(decoder);
838 if (errcode)
839 return errcode;
840 }
841
842 /* Preserve the time at the TIP packet. */
843 decoder->last_time = decoder->time;
844
845 /* Read ahead until the next query-relevant packet. */
846 errcode = pt_qry_read_ahead(decoder);
847 if ((errcode < 0) && (errcode != -pte_eos))
848 return errcode;
849
850 flags |= pt_qry_status_flags(decoder);
851
852 return flags;
853 }
854
pt_qry_event(struct pt_query_decoder * decoder,struct pt_event * event,size_t size)855 int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event,
856 size_t size)
857 {
858 int errcode, flags;
859
860 if (!decoder || !event)
861 return -pte_invalid;
862
863 if (size < offsetof(struct pt_event, variant))
864 return -pte_invalid;
865
866 /* We do not allow querying for events while there are still TNT
867 * bits to consume.
868 */
869 if (!pt_tnt_cache_is_empty(&decoder->tnt))
870 return -pte_bad_query;
871
872 /* Do not provide more than we actually have. */
873 if (sizeof(*event) < size)
874 size = sizeof(*event);
875
876 flags = 0;
877 for (;;) {
878 const struct pt_decoder_function *dfun;
879
880 dfun = decoder->next;
881 if (!dfun)
882 return pt_qry_provoke_fetch_error(decoder);
883
884 if (!dfun->decode)
885 return -pte_internal;
886
887 /* We must not see a TIP or TNT packet unless it belongs
888 * to an event.
889 *
890 * If we see one, it means that our user got out of sync.
891 * Let's report no data and hope that our user is able
892 * to re-sync.
893 */
894 if ((dfun->flags & (pdff_tip | pdff_tnt)) &&
895 !pt_qry_will_event(decoder))
896 return -pte_bad_query;
897
898 /* Clear the decoder's current event so we know when decoding
899 * produces a new event.
900 */
901 decoder->event = NULL;
902
903 /* Apply any other decoder function. */
904 errcode = dfun->decode(decoder);
905 if (errcode)
906 return errcode;
907
908 /* Check if there has been an event.
909 *
910 * Some packets may result in events in some but not in all
911 * configurations.
912 */
913 if (decoder->event) {
914 (void) memcpy(event, decoder->event, size);
915 break;
916 }
917
918 /* Read ahead until the next query-relevant packet. */
919 errcode = pt_qry_read_ahead(decoder);
920 if (errcode)
921 return errcode;
922 }
923
924 /* Preserve the time at the event. */
925 decoder->last_time = decoder->time;
926
927 /* Read ahead until the next query-relevant packet. */
928 errcode = pt_qry_read_ahead(decoder);
929 if ((errcode < 0) && (errcode != -pte_eos))
930 return errcode;
931
932 flags |= pt_qry_status_flags(decoder);
933
934 return flags;
935 }
936
pt_qry_time(struct pt_query_decoder * decoder,uint64_t * time,uint32_t * lost_mtc,uint32_t * lost_cyc)937 int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time,
938 uint32_t *lost_mtc, uint32_t *lost_cyc)
939 {
940 if (!decoder || !time)
941 return -pte_invalid;
942
943 return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time);
944 }
945
pt_qry_core_bus_ratio(struct pt_query_decoder * decoder,uint32_t * cbr)946 int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr)
947 {
948 if (!decoder || !cbr)
949 return -pte_invalid;
950
951 return pt_time_query_cbr(cbr, &decoder->last_time);
952 }
953
pt_qry_event_time(struct pt_event * event,const struct pt_query_decoder * decoder)954 static int pt_qry_event_time(struct pt_event *event,
955 const struct pt_query_decoder *decoder)
956 {
957 int errcode;
958
959 if (!event || !decoder)
960 return -pte_internal;
961
962 errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc,
963 &event->lost_cyc, &decoder->time);
964 if (errcode < 0) {
965 if (errcode != -pte_no_time)
966 return errcode;
967 } else
968 event->has_tsc = 1;
969
970 return 0;
971 }
972
pt_qry_decode_unknown(struct pt_query_decoder * decoder)973 int pt_qry_decode_unknown(struct pt_query_decoder *decoder)
974 {
975 struct pt_packet packet;
976 int size;
977
978 if (!decoder)
979 return -pte_internal;
980
981 size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config);
982 if (size < 0)
983 return size;
984
985 decoder->pos += size;
986 return 0;
987 }
988
pt_qry_decode_pad(struct pt_query_decoder * decoder)989 int pt_qry_decode_pad(struct pt_query_decoder *decoder)
990 {
991 if (!decoder)
992 return -pte_internal;
993
994 decoder->pos += ptps_pad;
995
996 return 0;
997 }
998
pt_qry_read_psb_header(struct pt_query_decoder * decoder)999 static int pt_qry_read_psb_header(struct pt_query_decoder *decoder)
1000 {
1001 if (!decoder)
1002 return -pte_internal;
1003
1004 pt_last_ip_init(&decoder->ip);
1005
1006 for (;;) {
1007 const struct pt_decoder_function *dfun;
1008 int errcode;
1009
1010 errcode = pt_df_fetch(&decoder->next, decoder->pos,
1011 &decoder->config);
1012 if (errcode)
1013 return errcode;
1014
1015 dfun = decoder->next;
1016 if (!dfun)
1017 return -pte_internal;
1018
1019 /* We're done once we reach an psbend packet. */
1020 if (dfun->flags & pdff_psbend)
1021 return 0;
1022
1023 if (!dfun->header)
1024 return -pte_bad_context;
1025
1026 errcode = dfun->header(decoder);
1027 if (errcode)
1028 return errcode;
1029 }
1030 }
1031
pt_qry_decode_psb(struct pt_query_decoder * decoder)1032 int pt_qry_decode_psb(struct pt_query_decoder *decoder)
1033 {
1034 const uint8_t *pos;
1035 int size, errcode;
1036
1037 if (!decoder)
1038 return -pte_internal;
1039
1040 pos = decoder->pos;
1041
1042 size = pt_pkt_read_psb(pos, &decoder->config);
1043 if (size < 0)
1044 return size;
1045
1046 decoder->pos += size;
1047
1048 errcode = pt_qry_read_psb_header(decoder);
1049 if (errcode < 0) {
1050 /* Move back to the PSB so we have a chance to recover and
1051 * continue decoding.
1052 */
1053 decoder->pos = pos;
1054
1055 /* Clear any PSB+ events that have already been queued. */
1056 (void) pt_evq_clear(&decoder->evq, evb_psbend);
1057
1058 /* Reset the decoder's decode function. */
1059 decoder->next = &pt_decode_psb;
1060
1061 return errcode;
1062 }
1063
1064 /* The next packet following the PSB header will be of type PSBEND.
1065 *
1066 * Decoding this packet will publish the PSB events what have been
1067 * accumulated while reading the PSB header.
1068 */
1069 return 0;
1070 }
1071
pt_qry_event_ip(uint64_t * ip,struct pt_event * event,const struct pt_query_decoder * decoder)1072 static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event,
1073 const struct pt_query_decoder *decoder)
1074 {
1075 int errcode;
1076
1077 if (!decoder)
1078 return -pte_internal;
1079
1080 errcode = pt_last_ip_query(ip, &decoder->ip);
1081 if (errcode < 0) {
1082 switch (pt_errcode(errcode)) {
1083 case pte_noip:
1084 case pte_ip_suppressed:
1085 event->ip_suppressed = 1;
1086 break;
1087
1088 default:
1089 return errcode;
1090 }
1091 }
1092
1093 return 0;
1094 }
1095
1096 /* Decode a generic IP packet.
1097 *
1098 * Returns the number of bytes read, on success.
1099 * Returns -pte_eos if the ip does not fit into the buffer.
1100 * Returns -pte_bad_packet if the ip compression is not known.
1101 */
pt_qry_decode_ip(struct pt_query_decoder * decoder)1102 static int pt_qry_decode_ip(struct pt_query_decoder *decoder)
1103 {
1104 struct pt_packet_ip packet;
1105 int errcode, size;
1106
1107 if (!decoder)
1108 return -pte_internal;
1109
1110 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1111 if (size < 0)
1112 return size;
1113
1114 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1115 if (errcode < 0)
1116 return errcode;
1117
1118 /* We do not update the decoder's position, yet. */
1119
1120 return size;
1121 }
1122
pt_qry_consume_tip(struct pt_query_decoder * decoder,int size)1123 static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size)
1124 {
1125 if (!decoder)
1126 return -pte_internal;
1127
1128 decoder->pos += size;
1129 return 0;
1130 }
1131
pt_qry_event_tip(struct pt_event * ev,struct pt_query_decoder * decoder)1132 static int pt_qry_event_tip(struct pt_event *ev,
1133 struct pt_query_decoder *decoder)
1134 {
1135 if (!ev || !decoder)
1136 return -pte_internal;
1137
1138 switch (ev->type) {
1139 case ptev_async_branch:
1140 decoder->consume_packet = 1;
1141
1142 return pt_qry_event_ip(&ev->variant.async_branch.to, ev,
1143 decoder);
1144
1145 case ptev_async_paging:
1146 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1147 decoder);
1148
1149 case ptev_async_vmcs:
1150 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1151 decoder);
1152
1153 case ptev_exec_mode:
1154 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev,
1155 decoder);
1156
1157 default:
1158 break;
1159 }
1160
1161 return -pte_bad_context;
1162 }
1163
pt_qry_decode_tip(struct pt_query_decoder * decoder)1164 int pt_qry_decode_tip(struct pt_query_decoder *decoder)
1165 {
1166 struct pt_event *ev;
1167 int size, errcode;
1168
1169 if (!decoder)
1170 return -pte_internal;
1171
1172 size = pt_qry_decode_ip(decoder);
1173 if (size < 0)
1174 return size;
1175
1176 /* Process any pending events binding to TIP. */
1177 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1178 if (ev) {
1179 errcode = pt_qry_event_tip(ev, decoder);
1180 if (errcode < 0)
1181 return errcode;
1182
1183 /* Publish the event. */
1184 decoder->event = ev;
1185
1186 /* Process further pending events. */
1187 if (pt_evq_pending(&decoder->evq, evb_tip))
1188 return 0;
1189
1190 /* No further events.
1191 *
1192 * If none of the events consumed the packet, we're done.
1193 */
1194 if (!decoder->consume_packet)
1195 return 0;
1196
1197 /* We're done with this packet. Clear the flag we set previously
1198 * and consume it.
1199 */
1200 decoder->consume_packet = 0;
1201 }
1202
1203 return pt_qry_consume_tip(decoder, size);
1204 }
1205
pt_qry_decode_tnt_8(struct pt_query_decoder * decoder)1206 int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder)
1207 {
1208 struct pt_packet_tnt packet;
1209 int size, errcode;
1210
1211 if (!decoder)
1212 return -pte_internal;
1213
1214 size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config);
1215 if (size < 0)
1216 return size;
1217
1218 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1219 &decoder->config);
1220 if (errcode < 0)
1221 return errcode;
1222
1223 decoder->pos += size;
1224 return 0;
1225 }
1226
pt_qry_decode_tnt_64(struct pt_query_decoder * decoder)1227 int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder)
1228 {
1229 struct pt_packet_tnt packet;
1230 int size, errcode;
1231
1232 if (!decoder)
1233 return -pte_internal;
1234
1235 size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config);
1236 if (size < 0)
1237 return size;
1238
1239 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1240 &decoder->config);
1241 if (errcode < 0)
1242 return errcode;
1243
1244 decoder->pos += size;
1245 return 0;
1246 }
1247
pt_qry_consume_tip_pge(struct pt_query_decoder * decoder,int size)1248 static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size)
1249 {
1250 if (!decoder)
1251 return -pte_internal;
1252
1253 decoder->pos += size;
1254 return 0;
1255 }
1256
pt_qry_event_tip_pge(struct pt_event * ev,const struct pt_query_decoder * decoder)1257 static int pt_qry_event_tip_pge(struct pt_event *ev,
1258 const struct pt_query_decoder *decoder)
1259 {
1260 if (!ev)
1261 return -pte_internal;
1262
1263 switch (ev->type) {
1264 case ptev_exec_mode:
1265 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1266
1267 default:
1268 break;
1269 }
1270
1271 return -pte_bad_context;
1272 }
1273
pt_qry_decode_tip_pge(struct pt_query_decoder * decoder)1274 int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder)
1275 {
1276 struct pt_event *ev;
1277 int size, errcode;
1278
1279 if (!decoder)
1280 return -pte_internal;
1281
1282 size = pt_qry_decode_ip(decoder);
1283 if (size < 0)
1284 return size;
1285
1286 /* We send the enable event first. This is more convenient for our users
1287 * and does not require them to either store or blindly apply other
1288 * events that might be pending.
1289 *
1290 * We use the consume packet decoder flag to indicate this.
1291 */
1292 if (!decoder->consume_packet) {
1293 /* This packet signals a standalone enabled event. */
1294 ev = pt_evq_standalone(&decoder->evq);
1295 if (!ev)
1296 return -pte_internal;
1297
1298 ev->type = ptev_enabled;
1299
1300 /* We can't afford having a suppressed IP here. */
1301 errcode = pt_last_ip_query(&ev->variant.enabled.ip,
1302 &decoder->ip);
1303 if (errcode < 0)
1304 return -pte_bad_packet;
1305
1306 errcode = pt_qry_event_time(ev, decoder);
1307 if (errcode < 0)
1308 return errcode;
1309
1310 /* Discard any cached TNT bits.
1311 *
1312 * They should have been consumed at the corresponding disable
1313 * event. If they have not, for whatever reason, discard them
1314 * now so our user does not get out of sync.
1315 */
1316 pt_tnt_cache_init(&decoder->tnt);
1317
1318 /* Process pending events next. */
1319 decoder->consume_packet = 1;
1320 decoder->enabled = 1;
1321 } else {
1322 /* Process any pending events binding to TIP. */
1323 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1324 if (ev) {
1325 errcode = pt_qry_event_tip_pge(ev, decoder);
1326 if (errcode < 0)
1327 return errcode;
1328 }
1329 }
1330
1331 /* We must have an event. Either the initial enable event or one of the
1332 * queued events.
1333 */
1334 if (!ev)
1335 return -pte_internal;
1336
1337 /* Publish the event. */
1338 decoder->event = ev;
1339
1340 /* Process further pending events. */
1341 if (pt_evq_pending(&decoder->evq, evb_tip))
1342 return 0;
1343
1344 /* We must consume the packet. */
1345 if (!decoder->consume_packet)
1346 return -pte_internal;
1347
1348 decoder->consume_packet = 0;
1349
1350 return pt_qry_consume_tip_pge(decoder, size);
1351 }
1352
pt_qry_consume_tip_pgd(struct pt_query_decoder * decoder,int size)1353 static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size)
1354 {
1355 if (!decoder)
1356 return -pte_internal;
1357
1358 decoder->enabled = 0;
1359 decoder->pos += size;
1360 return 0;
1361 }
1362
pt_qry_event_tip_pgd(struct pt_event * ev,const struct pt_query_decoder * decoder)1363 static int pt_qry_event_tip_pgd(struct pt_event *ev,
1364 const struct pt_query_decoder *decoder)
1365 {
1366 if (!ev)
1367 return -pte_internal;
1368
1369 switch (ev->type) {
1370 case ptev_async_branch: {
1371 uint64_t at;
1372
1373 /* Turn the async branch into an async disable. */
1374 at = ev->variant.async_branch.from;
1375
1376 ev->type = ptev_async_disabled;
1377 ev->variant.async_disabled.at = at;
1378
1379 return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev,
1380 decoder);
1381 }
1382
1383 case ptev_async_paging:
1384 case ptev_async_vmcs:
1385 case ptev_exec_mode:
1386 /* These events are ordered after the async disable event. It
1387 * is not quite clear what IP to give them.
1388 *
1389 * If we give them the async disable's source IP, we'd make an
1390 * error if the IP is updated when applying the async disable
1391 * event.
1392 *
1393 * If we give them the async disable's destination IP, we'd make
1394 * an error if the IP is not updated when applying the async
1395 * disable event. That's what our decoders do since tracing is
1396 * likely to resume from there.
1397 *
1398 * In all cases, tracing will be disabled when those events are
1399 * applied, so we may as well suppress the IP.
1400 */
1401 ev->ip_suppressed = 1;
1402
1403 return 0;
1404
1405 default:
1406 break;
1407 }
1408
1409 return -pte_bad_context;
1410 }
1411
pt_qry_decode_tip_pgd(struct pt_query_decoder * decoder)1412 int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder)
1413 {
1414 struct pt_event *ev;
1415 int size, errcode;
1416
1417 if (!decoder)
1418 return -pte_internal;
1419
1420 size = pt_qry_decode_ip(decoder);
1421 if (size < 0)
1422 return size;
1423
1424 /* Process any pending events binding to TIP. */
1425 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1426 if (ev) {
1427 errcode = pt_qry_event_tip_pgd(ev, decoder);
1428 if (errcode < 0)
1429 return errcode;
1430 } else {
1431 /* This packet signals a standalone disabled event. */
1432 ev = pt_evq_standalone(&decoder->evq);
1433 if (!ev)
1434 return -pte_internal;
1435 ev->type = ptev_disabled;
1436
1437 errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev,
1438 decoder);
1439 if (errcode < 0)
1440 return errcode;
1441
1442 errcode = pt_qry_event_time(ev, decoder);
1443 if (errcode < 0)
1444 return errcode;
1445 }
1446
1447 /* We must have an event. Either the initial enable event or one of the
1448 * queued events.
1449 */
1450 if (!ev)
1451 return -pte_internal;
1452
1453 /* Publish the event. */
1454 decoder->event = ev;
1455
1456 /* Process further pending events. */
1457 if (pt_evq_pending(&decoder->evq, evb_tip))
1458 return 0;
1459
1460 return pt_qry_consume_tip_pgd(decoder, size);
1461 }
1462
pt_qry_consume_fup(struct pt_query_decoder * decoder,int size)1463 static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size)
1464 {
1465 if (!decoder)
1466 return -pte_internal;
1467
1468 decoder->pos += size;
1469 return 0;
1470 }
1471
scan_for_erratum_bdm70(struct pt_packet_decoder * decoder)1472 static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder)
1473 {
1474 for (;;) {
1475 struct pt_packet packet;
1476 int errcode;
1477
1478 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
1479 if (errcode < 0) {
1480 /* Running out of packets is not an error. */
1481 if (errcode == -pte_eos)
1482 errcode = 0;
1483
1484 return errcode;
1485 }
1486
1487 switch (packet.type) {
1488 default:
1489 /* All other packets cancel our search.
1490 *
1491 * We do not enumerate those packets since we also
1492 * want to include new packets.
1493 */
1494 return 0;
1495
1496 case ppt_tip_pge:
1497 /* We found it - the erratum applies. */
1498 return 1;
1499
1500 case ppt_pad:
1501 case ppt_tsc:
1502 case ppt_cbr:
1503 case ppt_psbend:
1504 case ppt_pip:
1505 case ppt_mode:
1506 case ppt_vmcs:
1507 case ppt_tma:
1508 case ppt_mtc:
1509 case ppt_cyc:
1510 case ppt_mnt:
1511 /* Intentionally skip a few packets. */
1512 continue;
1513 }
1514 }
1515 }
1516
check_erratum_bdm70(const uint8_t * pos,const struct pt_config * config)1517 static int check_erratum_bdm70(const uint8_t *pos,
1518 const struct pt_config *config)
1519 {
1520 struct pt_packet_decoder decoder;
1521 int errcode;
1522
1523 if (!pos || !config)
1524 return -pte_internal;
1525
1526 errcode = pt_pkt_decoder_init(&decoder, config);
1527 if (errcode < 0)
1528 return errcode;
1529
1530 errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin));
1531 if (errcode >= 0)
1532 errcode = scan_for_erratum_bdm70(&decoder);
1533
1534 pt_pkt_decoder_fini(&decoder);
1535 return errcode;
1536 }
1537
pt_qry_header_fup(struct pt_query_decoder * decoder)1538 int pt_qry_header_fup(struct pt_query_decoder *decoder)
1539 {
1540 struct pt_packet_ip packet;
1541 int errcode, size;
1542
1543 if (!decoder)
1544 return -pte_internal;
1545
1546 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1547 if (size < 0)
1548 return size;
1549
1550 if (decoder->config.errata.bdm70 && !decoder->enabled) {
1551 errcode = check_erratum_bdm70(decoder->pos + size,
1552 &decoder->config);
1553 if (errcode < 0)
1554 return errcode;
1555
1556 if (errcode)
1557 return pt_qry_consume_fup(decoder, size);
1558 }
1559
1560 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1561 if (errcode < 0)
1562 return errcode;
1563
1564 /* Tracing is enabled if we have an IP in the header. */
1565 if (packet.ipc != pt_ipc_suppressed)
1566 decoder->enabled = 1;
1567
1568 return pt_qry_consume_fup(decoder, size);
1569 }
1570
pt_qry_event_fup(struct pt_event * ev,struct pt_query_decoder * decoder)1571 static int pt_qry_event_fup(struct pt_event *ev,
1572 struct pt_query_decoder *decoder)
1573 {
1574 if (!ev || !decoder)
1575 return -pte_internal;
1576
1577 switch (ev->type) {
1578 case ptev_overflow:
1579 decoder->consume_packet = 1;
1580
1581 /* We can't afford having a suppressed IP here. */
1582 return pt_last_ip_query(&ev->variant.overflow.ip,
1583 &decoder->ip);
1584
1585 case ptev_tsx:
1586 if (!(ev->variant.tsx.aborted))
1587 decoder->consume_packet = 1;
1588
1589 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1590
1591 case ptev_exstop:
1592 decoder->consume_packet = 1;
1593
1594 return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder);
1595
1596 case ptev_mwait:
1597 decoder->consume_packet = 1;
1598
1599 return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder);
1600
1601 case ptev_ptwrite:
1602 decoder->consume_packet = 1;
1603
1604 return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder);
1605
1606 default:
1607 break;
1608 }
1609
1610 return -pte_internal;
1611 }
1612
pt_qry_decode_fup(struct pt_query_decoder * decoder)1613 int pt_qry_decode_fup(struct pt_query_decoder *decoder)
1614 {
1615 struct pt_event *ev;
1616 int size, errcode;
1617
1618 if (!decoder)
1619 return -pte_internal;
1620
1621 size = pt_qry_decode_ip(decoder);
1622 if (size < 0)
1623 return size;
1624
1625 /* Process any pending events binding to FUP. */
1626 ev = pt_evq_dequeue(&decoder->evq, evb_fup);
1627 if (ev) {
1628 errcode = pt_qry_event_fup(ev, decoder);
1629 if (errcode < 0)
1630 return errcode;
1631
1632 /* Publish the event. */
1633 decoder->event = ev;
1634
1635 /* Process further pending events. */
1636 if (pt_evq_pending(&decoder->evq, evb_fup))
1637 return 0;
1638
1639 /* No further events.
1640 *
1641 * If none of the events consumed the packet, we're done.
1642 */
1643 if (!decoder->consume_packet)
1644 return 0;
1645
1646 /* We're done with this packet. Clear the flag we set previously
1647 * and consume it.
1648 */
1649 decoder->consume_packet = 0;
1650 } else {
1651 /* FUP indicates an async branch event; it binds to TIP.
1652 *
1653 * We do need an IP in this case.
1654 */
1655 uint64_t ip;
1656
1657 errcode = pt_last_ip_query(&ip, &decoder->ip);
1658 if (errcode < 0)
1659 return errcode;
1660
1661 ev = pt_evq_enqueue(&decoder->evq, evb_tip);
1662 if (!ev)
1663 return -pte_nomem;
1664
1665 ev->type = ptev_async_branch;
1666 ev->variant.async_branch.from = ip;
1667
1668 errcode = pt_qry_event_time(ev, decoder);
1669 if (errcode < 0)
1670 return errcode;
1671 }
1672
1673 return pt_qry_consume_fup(decoder, size);
1674 }
1675
pt_qry_decode_pip(struct pt_query_decoder * decoder)1676 int pt_qry_decode_pip(struct pt_query_decoder *decoder)
1677 {
1678 struct pt_packet_pip packet;
1679 struct pt_event *event;
1680 int size, errcode;
1681
1682 if (!decoder)
1683 return -pte_internal;
1684
1685 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1686 if (size < 0)
1687 return size;
1688
1689 /* Paging events are either standalone or bind to the same TIP packet
1690 * as an in-flight async branch event.
1691 */
1692 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
1693 if (!event) {
1694 event = pt_evq_standalone(&decoder->evq);
1695 if (!event)
1696 return -pte_internal;
1697 event->type = ptev_paging;
1698 event->variant.paging.cr3 = packet.cr3;
1699 event->variant.paging.non_root = packet.nr;
1700
1701 decoder->event = event;
1702 } else {
1703 event = pt_evq_enqueue(&decoder->evq, evb_tip);
1704 if (!event)
1705 return -pte_nomem;
1706
1707 event->type = ptev_async_paging;
1708 event->variant.async_paging.cr3 = packet.cr3;
1709 event->variant.async_paging.non_root = packet.nr;
1710 }
1711
1712 errcode = pt_qry_event_time(event, decoder);
1713 if (errcode < 0)
1714 return errcode;
1715
1716 decoder->pos += size;
1717 return 0;
1718 }
1719
pt_qry_header_pip(struct pt_query_decoder * decoder)1720 int pt_qry_header_pip(struct pt_query_decoder *decoder)
1721 {
1722 struct pt_packet_pip packet;
1723 struct pt_event *event;
1724 int size;
1725
1726 if (!decoder)
1727 return -pte_internal;
1728
1729 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1730 if (size < 0)
1731 return size;
1732
1733 /* Paging events are reported at the end of the PSB. */
1734 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
1735 if (!event)
1736 return -pte_nomem;
1737
1738 event->type = ptev_async_paging;
1739 event->variant.async_paging.cr3 = packet.cr3;
1740 event->variant.async_paging.non_root = packet.nr;
1741
1742 decoder->pos += size;
1743 return 0;
1744 }
1745
pt_qry_event_psbend(struct pt_event * ev,struct pt_query_decoder * decoder)1746 static int pt_qry_event_psbend(struct pt_event *ev,
1747 struct pt_query_decoder *decoder)
1748 {
1749 int errcode;
1750
1751 if (!ev || !decoder)
1752 return -pte_internal;
1753
1754 /* PSB+ events are status updates. */
1755 ev->status_update = 1;
1756
1757 errcode = pt_qry_event_time(ev, decoder);
1758 if (errcode < 0)
1759 return errcode;
1760
1761 switch (ev->type) {
1762 case ptev_async_paging:
1763 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1764 decoder);
1765
1766 case ptev_exec_mode:
1767 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1768
1769 case ptev_tsx:
1770 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1771
1772 case ptev_async_vmcs:
1773 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1774 decoder);
1775
1776 case ptev_cbr:
1777 return 0;
1778
1779 case ptev_mnt:
1780 /* Maintenance packets may appear anywhere. Do not mark them as
1781 * status updates even if they appear in PSB+.
1782 */
1783 ev->status_update = 0;
1784 return 0;
1785
1786 default:
1787 break;
1788 }
1789
1790 return -pte_internal;
1791 }
1792
pt_qry_process_pending_psb_events(struct pt_query_decoder * decoder)1793 static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder)
1794 {
1795 struct pt_event *ev;
1796 int errcode;
1797
1798 if (!decoder)
1799 return -pte_internal;
1800
1801 ev = pt_evq_dequeue(&decoder->evq, evb_psbend);
1802 if (!ev)
1803 return 0;
1804
1805 errcode = pt_qry_event_psbend(ev, decoder);
1806 if (errcode < 0)
1807 return errcode;
1808
1809 /* Publish the event. */
1810 decoder->event = ev;
1811
1812 /* Signal a pending event. */
1813 return 1;
1814 }
1815
1816 /* Create a standalone overflow event with tracing disabled.
1817 *
1818 * Creates and published the event and disables tracing in @decoder.
1819 *
1820 * Returns zero on success, a negative pt_error_code otherwise.
1821 */
pt_qry_event_ovf_disabled(struct pt_query_decoder * decoder)1822 static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder)
1823 {
1824 struct pt_event *ev;
1825
1826 if (!decoder)
1827 return -pte_internal;
1828
1829 ev = pt_evq_standalone(&decoder->evq);
1830 if (!ev)
1831 return -pte_internal;
1832
1833 ev->type = ptev_overflow;
1834
1835 /* We suppress the IP to indicate that tracing has been disabled before
1836 * the overflow resolved. There can be several events before tracing is
1837 * enabled again.
1838 */
1839 ev->ip_suppressed = 1;
1840
1841 decoder->enabled = 0;
1842 decoder->event = ev;
1843
1844 return pt_qry_event_time(ev, decoder);
1845 }
1846
1847 /* Queues an overflow event with tracing enabled.
1848 *
1849 * Creates and enqueues the event and enables tracing in @decoder.
1850 *
1851 * Returns zero on success, a negative pt_error_code otherwise.
1852 */
pt_qry_event_ovf_enabled(struct pt_query_decoder * decoder)1853 static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder)
1854 {
1855 struct pt_event *ev;
1856
1857 if (!decoder)
1858 return -pte_internal;
1859
1860 ev = pt_evq_enqueue(&decoder->evq, evb_fup);
1861 if (!ev)
1862 return -pte_internal;
1863
1864 ev->type = ptev_overflow;
1865
1866 decoder->enabled = 1;
1867
1868 return pt_qry_event_time(ev, decoder);
1869 }
1870
1871 /* Recover from SKD010.
1872 *
1873 * Creates and publishes an overflow event at @packet's IP payload.
1874 *
1875 * Further updates @decoder as follows:
1876 *
1877 * - set time tracking to @time and @tcal
1878 * - set the position to @offset
1879 * - set ip to @packet's IP payload
1880 * - set tracing to be enabled
1881 *
1882 * Returns zero on success, a negative error code otherwise.
1883 */
skd010_recover(struct pt_query_decoder * decoder,const struct pt_packet_ip * packet,const struct pt_time_cal * tcal,const struct pt_time * time,uint64_t offset)1884 static int skd010_recover(struct pt_query_decoder *decoder,
1885 const struct pt_packet_ip *packet,
1886 const struct pt_time_cal *tcal,
1887 const struct pt_time *time, uint64_t offset)
1888 {
1889 struct pt_last_ip ip;
1890 struct pt_event *ev;
1891 int errcode;
1892
1893 if (!decoder || !packet || !tcal || !time)
1894 return -pte_internal;
1895
1896 /* We use the decoder's IP. It should be newly initialized. */
1897 ip = decoder->ip;
1898
1899 /* Extract the IP payload from the packet. */
1900 errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config);
1901 if (errcode < 0)
1902 return errcode;
1903
1904 /* Synthesize the overflow event. */
1905 ev = pt_evq_standalone(&decoder->evq);
1906 if (!ev)
1907 return -pte_internal;
1908
1909 ev->type = ptev_overflow;
1910
1911 /* We do need a full IP. */
1912 errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip);
1913 if (errcode < 0)
1914 return -pte_bad_context;
1915
1916 /* We continue decoding at the given offset. */
1917 decoder->pos = decoder->config.begin + offset;
1918
1919 /* Tracing is enabled. */
1920 decoder->enabled = 1;
1921 decoder->ip = ip;
1922
1923 decoder->time = *time;
1924 decoder->tcal = *tcal;
1925
1926 /* Publish the event. */
1927 decoder->event = ev;
1928
1929 return pt_qry_event_time(ev, decoder);
1930 }
1931
1932 /* Recover from SKD010 with tracing disabled.
1933 *
1934 * Creates and publishes a standalone overflow event.
1935 *
1936 * Further updates @decoder as follows:
1937 *
1938 * - set time tracking to @time and @tcal
1939 * - set the position to @offset
1940 * - set tracing to be disabled
1941 *
1942 * Returns zero on success, a negative error code otherwise.
1943 */
skd010_recover_disabled(struct pt_query_decoder * decoder,const struct pt_time_cal * tcal,const struct pt_time * time,uint64_t offset)1944 static int skd010_recover_disabled(struct pt_query_decoder *decoder,
1945 const struct pt_time_cal *tcal,
1946 const struct pt_time *time, uint64_t offset)
1947 {
1948 if (!decoder || !tcal || !time)
1949 return -pte_internal;
1950
1951 decoder->time = *time;
1952 decoder->tcal = *tcal;
1953
1954 /* We continue decoding at the given offset. */
1955 decoder->pos = decoder->config.begin + offset;
1956
1957 return pt_qry_event_ovf_disabled(decoder);
1958 }
1959
1960 /* Scan ahead for a packet at which to resume after an overflow.
1961 *
1962 * This function is called after an OVF without a corresponding FUP. This
1963 * normally means that the overflow resolved while tracing was disabled.
1964 *
1965 * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped.
1966 * The overflow thus resolved while tracing was enabled (or tracing was enabled
1967 * after the overflow resolved). Search for an indication whether tracing is
1968 * enabled or disabled by scanning upcoming packets.
1969 *
1970 * If we can confirm that tracing is disabled, the erratum does not apply and we
1971 * can continue normally.
1972 *
1973 * If we can confirm that tracing is enabled, the erratum applies and we try to
1974 * recover by synchronizing at a later packet and a different IP. If we can't
1975 * recover, pretend the erratum didn't apply so we run into the error later.
1976 * Since this assumes that tracing is disabled, no harm should be done, i.e. no
1977 * bad trace should be generated.
1978 *
1979 * Returns zero if the overflow is handled.
1980 * Returns a positive value if the overflow is not yet handled.
1981 * Returns a negative error code otherwise.
1982 */
skd010_scan_for_ovf_resume(struct pt_packet_decoder * pkt,struct pt_query_decoder * decoder)1983 static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt,
1984 struct pt_query_decoder *decoder)
1985 {
1986 struct pt_time_cal tcal;
1987 struct pt_time time;
1988 struct {
1989 struct pt_time_cal tcal;
1990 struct pt_time time;
1991 uint64_t offset;
1992 } mode_tsx;
1993 int errcode;
1994
1995 if (!decoder)
1996 return -pte_internal;
1997
1998 /* Keep track of time as we skip packets. */
1999 time = decoder->time;
2000 tcal = decoder->tcal;
2001
2002 /* Keep track of a potential recovery point at MODE.TSX. */
2003 memset(&mode_tsx, 0, sizeof(mode_tsx));
2004
2005 for (;;) {
2006 struct pt_packet packet;
2007 uint64_t offset;
2008
2009 errcode = pt_pkt_get_offset(pkt, &offset);
2010 if (errcode < 0)
2011 return errcode;
2012
2013 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2014 if (errcode < 0) {
2015 /* Let's assume the trace is correct if we run out
2016 * of packets.
2017 */
2018 if (errcode == -pte_eos)
2019 errcode = 1;
2020
2021 return errcode;
2022 }
2023
2024 switch (packet.type) {
2025 case ppt_tip_pge:
2026 /* Everything is fine. There is nothing to do. */
2027 return 1;
2028
2029 case ppt_tip_pgd:
2030 /* This is a clear indication that the erratum
2031 * applies.
2032 *
2033 * We synchronize after the disable.
2034 */
2035 return skd010_recover_disabled(decoder, &tcal, &time,
2036 offset + packet.size);
2037
2038 case ppt_tnt_8:
2039 case ppt_tnt_64:
2040 /* This is a clear indication that the erratum
2041 * apllies.
2042 *
2043 * Yet, we can't recover from it as we wouldn't know how
2044 * many TNT bits will have been used when we eventually
2045 * find an IP packet at which to resume tracing.
2046 */
2047 return 1;
2048
2049 case ppt_pip:
2050 case ppt_vmcs:
2051 /* We could track those changes and synthesize extra
2052 * events after the overflow event when recovering from
2053 * the erratum. This requires infrastructure that we
2054 * don't currently have, though, so we're not going to
2055 * do it.
2056 *
2057 * Instead, we ignore those changes. We already don't
2058 * know how many other changes were lost in the
2059 * overflow.
2060 */
2061 break;
2062
2063 case ppt_mode:
2064 switch (packet.payload.mode.leaf) {
2065 case pt_mol_exec:
2066 /* A MODE.EXEC packet binds to TIP, i.e.
2067 *
2068 * TIP.PGE: everything is fine
2069 * TIP: the erratum applies
2070 *
2071 * In the TIP.PGE case, we may just follow the
2072 * normal code flow.
2073 *
2074 * In the TIP case, we'd be able to re-sync at
2075 * the TIP IP but have to skip packets up to and
2076 * including the TIP.
2077 *
2078 * We'd need to synthesize the MODE.EXEC event
2079 * after the overflow event when recovering at
2080 * the TIP. We lack the infrastructure for this
2081 * - it's getting too complicated.
2082 *
2083 * Instead, we ignore the execution mode change;
2084 * we already don't know how many more such
2085 * changes were lost in the overflow.
2086 */
2087 break;
2088
2089 case pt_mol_tsx:
2090 /* A MODE.TSX packet may be standalone or bind
2091 * to FUP.
2092 *
2093 * If this is the second MODE.TSX, we're sure
2094 * that tracing is disabled and everything is
2095 * fine.
2096 */
2097 if (mode_tsx.offset)
2098 return 1;
2099
2100 /* If we find the FUP this packet binds to, we
2101 * may recover at the FUP IP and restart
2102 * processing packets from here. Remember the
2103 * current state.
2104 */
2105 mode_tsx.offset = offset;
2106 mode_tsx.time = time;
2107 mode_tsx.tcal = tcal;
2108
2109 break;
2110 }
2111
2112 break;
2113
2114 case ppt_fup:
2115 /* This is a pretty good indication that tracing
2116 * is indeed enabled and the erratum applies.
2117 */
2118
2119 /* If we got a MODE.TSX packet before, we synchronize at
2120 * the FUP IP but continue decoding packets starting
2121 * from the MODE.TSX.
2122 */
2123 if (mode_tsx.offset)
2124 return skd010_recover(decoder,
2125 &packet.payload.ip,
2126 &mode_tsx.tcal,
2127 &mode_tsx.time,
2128 mode_tsx.offset);
2129
2130 /* Without a preceding MODE.TSX, this FUP is the start
2131 * of an async branch or disable. We synchronize at the
2132 * FUP IP and continue decoding packets from here.
2133 */
2134 return skd010_recover(decoder, &packet.payload.ip,
2135 &tcal, &time, offset);
2136
2137 case ppt_tip:
2138 /* We syhchronize at the TIP IP and continue decoding
2139 * packets after the TIP packet.
2140 */
2141 return skd010_recover(decoder, &packet.payload.ip,
2142 &tcal, &time,
2143 offset + packet.size);
2144
2145 case ppt_psb:
2146 /* We reached a synchronization point. Tracing is
2147 * enabled if and only if the PSB+ contains a FUP.
2148 */
2149 errcode = pt_qry_find_header_fup(&packet, pkt);
2150 if (errcode < 0) {
2151 /* If we ran out of packets, we can't tell.
2152 * Let's assume the trace is correct.
2153 */
2154 if (errcode == -pte_eos)
2155 errcode = 1;
2156
2157 return errcode;
2158 }
2159
2160 /* If there is no FUP, tracing is disabled and
2161 * everything is fine.
2162 */
2163 if (!errcode)
2164 return 1;
2165
2166 /* We should have a FUP. */
2167 if (packet.type != ppt_fup)
2168 return -pte_internal;
2169
2170 /* Otherwise, we may synchronize at the FUP IP and
2171 * continue decoding packets at the PSB.
2172 */
2173 return skd010_recover(decoder, &packet.payload.ip,
2174 &tcal, &time, offset);
2175
2176 case ppt_psbend:
2177 /* We shouldn't see this. */
2178 return -pte_bad_context;
2179
2180 case ppt_ovf:
2181 case ppt_stop:
2182 /* It doesn't matter if it had been enabled or disabled
2183 * before. We may resume normally.
2184 */
2185 return 1;
2186
2187 case ppt_unknown:
2188 case ppt_invalid:
2189 /* We can't skip this packet. */
2190 return 1;
2191
2192 case ppt_pad:
2193 case ppt_mnt:
2194 case ppt_pwre:
2195 case ppt_pwrx:
2196 /* Ignore this packet. */
2197 break;
2198
2199 case ppt_exstop:
2200 /* We may skip a stand-alone EXSTOP. */
2201 if (!packet.payload.exstop.ip)
2202 break;
2203
2204 fallthrough;
2205 case ppt_mwait:
2206 /* To skip this packet, we'd need to take care of the
2207 * FUP it binds to. This is getting complicated.
2208 */
2209 return 1;
2210
2211 case ppt_ptw:
2212 /* We may skip a stand-alone PTW. */
2213 if (!packet.payload.ptw.ip)
2214 break;
2215
2216 /* To skip this packet, we'd need to take care of the
2217 * FUP it binds to. This is getting complicated.
2218 */
2219 return 1;
2220
2221 case ppt_tsc:
2222 /* Keep track of time. */
2223 errcode = pt_qry_apply_tsc(&time, &tcal,
2224 &packet.payload.tsc,
2225 &decoder->config);
2226 if (errcode < 0)
2227 return errcode;
2228
2229 break;
2230
2231 case ppt_cbr:
2232 /* Keep track of time. */
2233 errcode = pt_qry_apply_cbr(&time, &tcal,
2234 &packet.payload.cbr,
2235 &decoder->config);
2236 if (errcode < 0)
2237 return errcode;
2238
2239 break;
2240
2241 case ppt_tma:
2242 /* Keep track of time. */
2243 errcode = pt_qry_apply_tma(&time, &tcal,
2244 &packet.payload.tma,
2245 &decoder->config);
2246 if (errcode < 0)
2247 return errcode;
2248
2249 break;
2250
2251 case ppt_mtc:
2252 /* Keep track of time. */
2253 errcode = pt_qry_apply_mtc(&time, &tcal,
2254 &packet.payload.mtc,
2255 &decoder->config);
2256 if (errcode < 0)
2257 return errcode;
2258
2259 break;
2260
2261 case ppt_cyc:
2262 /* Keep track of time. */
2263 errcode = pt_qry_apply_cyc(&time, &tcal,
2264 &packet.payload.cyc,
2265 &decoder->config);
2266 if (errcode < 0)
2267 return errcode;
2268
2269 break;
2270 }
2271 }
2272 }
2273
pt_qry_handle_skd010(struct pt_query_decoder * decoder)2274 static int pt_qry_handle_skd010(struct pt_query_decoder *decoder)
2275 {
2276 struct pt_packet_decoder pkt;
2277 uint64_t offset;
2278 int errcode;
2279
2280 if (!decoder)
2281 return -pte_internal;
2282
2283 errcode = pt_qry_get_offset(decoder, &offset);
2284 if (errcode < 0)
2285 return errcode;
2286
2287 errcode = pt_pkt_decoder_init(&pkt, &decoder->config);
2288 if (errcode < 0)
2289 return errcode;
2290
2291 errcode = pt_pkt_sync_set(&pkt, offset);
2292 if (errcode >= 0)
2293 errcode = skd010_scan_for_ovf_resume(&pkt, decoder);
2294
2295 pt_pkt_decoder_fini(&pkt);
2296 return errcode;
2297 }
2298
2299 /* Scan ahead for an indication whether tracing is enabled or disabled.
2300 *
2301 * Returns zero if tracing is clearly disabled.
2302 * Returns a positive integer if tracing is enabled or if we can't tell.
2303 * Returns a negative error code otherwise.
2304 */
apl12_tracing_is_disabled(struct pt_packet_decoder * decoder)2305 static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder)
2306 {
2307 if (!decoder)
2308 return -pte_internal;
2309
2310 for (;;) {
2311 struct pt_packet packet;
2312 int status;
2313
2314 status = pt_pkt_next(decoder, &packet, sizeof(packet));
2315 if (status < 0) {
2316 /* Running out of packets is not an error. */
2317 if (status == -pte_eos)
2318 status = 1;
2319
2320 return status;
2321 }
2322
2323 switch (packet.type) {
2324 default:
2325 /* Skip other packets. */
2326 break;
2327
2328 case ppt_stop:
2329 /* Tracing is disabled before a stop. */
2330 return 0;
2331
2332 case ppt_tip_pge:
2333 /* Tracing gets enabled - it must have been disabled. */
2334 return 0;
2335
2336 case ppt_tnt_8:
2337 case ppt_tnt_64:
2338 case ppt_tip:
2339 case ppt_tip_pgd:
2340 /* Those packets are only generated when tracing is
2341 * enabled. We're done.
2342 */
2343 return 1;
2344
2345 case ppt_psb:
2346 /* We reached a synchronization point. Tracing is
2347 * enabled if and only if the PSB+ contains a FUP.
2348 */
2349 status = pt_qry_find_header_fup(&packet, decoder);
2350
2351 /* If we ran out of packets, we can't tell. */
2352 if (status == -pte_eos)
2353 status = 1;
2354
2355 return status;
2356
2357 case ppt_psbend:
2358 /* We shouldn't see this. */
2359 return -pte_bad_context;
2360
2361 case ppt_ovf:
2362 /* It doesn't matter - we run into the next overflow. */
2363 return 1;
2364
2365 case ppt_unknown:
2366 case ppt_invalid:
2367 /* We can't skip this packet. */
2368 return 1;
2369 }
2370 }
2371 }
2372
2373 /* Apply workaround for erratum APL12.
2374 *
2375 * We resume from @offset (relative to @decoder->pos) with tracing disabled. On
2376 * our way to the resume location we process packets to update our state.
2377 *
2378 * Any event will be dropped.
2379 *
2380 * Returns zero on success, a negative pt_error_code otherwise.
2381 */
apl12_resume_disabled(struct pt_query_decoder * decoder,struct pt_packet_decoder * pkt,unsigned int offset)2382 static int apl12_resume_disabled(struct pt_query_decoder *decoder,
2383 struct pt_packet_decoder *pkt,
2384 unsigned int offset)
2385 {
2386 uint64_t begin, end;
2387 int errcode;
2388
2389 if (!decoder)
2390 return -pte_internal;
2391
2392 errcode = pt_qry_get_offset(decoder, &begin);
2393 if (errcode < 0)
2394 return errcode;
2395
2396 errcode = pt_pkt_sync_set(pkt, begin);
2397 if (errcode < 0)
2398 return errcode;
2399
2400 end = begin + offset;
2401 for (;;) {
2402 struct pt_packet packet;
2403 uint64_t next;
2404
2405 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2406 if (errcode < 0) {
2407 /* Running out of packets is not an error. */
2408 if (errcode == -pte_eos)
2409 errcode = 0;
2410
2411 return errcode;
2412 }
2413
2414 /* The offset is the start of the next packet. */
2415 errcode = pt_pkt_get_offset(pkt, &next);
2416 if (errcode < 0)
2417 return errcode;
2418
2419 /* We're done when we reach @offset.
2420 *
2421 * The current @packet will be the FUP after which we started
2422 * our search. We skip it.
2423 *
2424 * Check that we're not accidentally proceeding past @offset.
2425 */
2426 if (end <= next) {
2427 if (end < next)
2428 return -pte_internal;
2429
2430 break;
2431 }
2432
2433 switch (packet.type) {
2434 default:
2435 /* Skip other packets. */
2436 break;
2437
2438 case ppt_mode:
2439 case ppt_pip:
2440 case ppt_vmcs:
2441 /* We should not encounter those.
2442 *
2443 * We should not encounter a lot of packets but those
2444 * are state-relevant; let's check them explicitly.
2445 */
2446 return -pte_internal;
2447
2448 case ppt_tsc:
2449 /* Keep track of time. */
2450 errcode = pt_qry_apply_tsc(&decoder->time,
2451 &decoder->tcal,
2452 &packet.payload.tsc,
2453 &decoder->config);
2454 if (errcode < 0)
2455 return errcode;
2456
2457 break;
2458
2459 case ppt_cbr:
2460 /* Keep track of time. */
2461 errcode = pt_qry_apply_cbr(&decoder->time,
2462 &decoder->tcal,
2463 &packet.payload.cbr,
2464 &decoder->config);
2465 if (errcode < 0)
2466 return errcode;
2467
2468 break;
2469
2470 case ppt_tma:
2471 /* Keep track of time. */
2472 errcode = pt_qry_apply_tma(&decoder->time,
2473 &decoder->tcal,
2474 &packet.payload.tma,
2475 &decoder->config);
2476 if (errcode < 0)
2477 return errcode;
2478
2479 break;
2480
2481 case ppt_mtc:
2482 /* Keep track of time. */
2483 errcode = pt_qry_apply_mtc(&decoder->time,
2484 &decoder->tcal,
2485 &packet.payload.mtc,
2486 &decoder->config);
2487 if (errcode < 0)
2488 return errcode;
2489
2490 break;
2491
2492 case ppt_cyc:
2493 /* Keep track of time. */
2494 errcode = pt_qry_apply_cyc(&decoder->time,
2495 &decoder->tcal,
2496 &packet.payload.cyc,
2497 &decoder->config);
2498 if (errcode < 0)
2499 return errcode;
2500
2501 break;
2502 }
2503 }
2504
2505 decoder->pos += offset;
2506
2507 return pt_qry_event_ovf_disabled(decoder);
2508 }
2509
2510 /* Handle erratum APL12.
2511 *
2512 * This function is called when a FUP is found after an OVF. The @offset
2513 * argument gives the relative offset from @decoder->pos to after the FUP.
2514 *
2515 * A FUP after OVF normally indicates that the overflow resolved while tracing
2516 * is enabled. Due to erratum APL12, however, the overflow may have resolved
2517 * while tracing is disabled and still generate a FUP.
2518 *
2519 * We scan ahead for an indication whether tracing is actually disabled. If we
2520 * find one, the erratum applies and we proceed from after the FUP packet.
2521 *
2522 * This will drop any CBR or MTC events. We will update @decoder's timing state
2523 * on CBR but drop the event.
2524 *
2525 * Returns zero if the erratum was handled.
2526 * Returns a positive integer if the erratum was not handled.
2527 * Returns a negative pt_error_code otherwise.
2528 */
pt_qry_handle_apl12(struct pt_query_decoder * decoder,unsigned int offset)2529 static int pt_qry_handle_apl12(struct pt_query_decoder *decoder,
2530 unsigned int offset)
2531 {
2532 struct pt_packet_decoder pkt;
2533 uint64_t here;
2534 int status;
2535
2536 if (!decoder)
2537 return -pte_internal;
2538
2539 status = pt_qry_get_offset(decoder, &here);
2540 if (status < 0)
2541 return status;
2542
2543 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2544 if (status < 0)
2545 return status;
2546
2547 status = pt_pkt_sync_set(&pkt, here + offset);
2548 if (status >= 0) {
2549 status = apl12_tracing_is_disabled(&pkt);
2550 if (!status)
2551 status = apl12_resume_disabled(decoder, &pkt, offset);
2552 }
2553
2554 pt_pkt_decoder_fini(&pkt);
2555 return status;
2556 }
2557
2558 /* Apply workaround for erratum APL11.
2559 *
2560 * We search for a TIP.PGD and, if we found one, resume from after that packet
2561 * with tracing disabled. On our way to the resume location we process packets
2562 * to update our state.
2563 *
2564 * If we don't find a TIP.PGD but instead some other packet that indicates that
2565 * tracing is disabled, indicate that the erratum does not apply.
2566 *
2567 * Any event will be dropped.
2568 *
2569 * Returns zero if the erratum was handled.
2570 * Returns a positive integer if the erratum was not handled.
2571 * Returns a negative pt_error_code otherwise.
2572 */
apl11_apply(struct pt_query_decoder * decoder,struct pt_packet_decoder * pkt)2573 static int apl11_apply(struct pt_query_decoder *decoder,
2574 struct pt_packet_decoder *pkt)
2575 {
2576 struct pt_time_cal tcal;
2577 struct pt_time time;
2578
2579 if (!decoder)
2580 return -pte_internal;
2581
2582 time = decoder->time;
2583 tcal = decoder->tcal;
2584 for (;;) {
2585 struct pt_packet packet;
2586 int errcode;
2587
2588 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2589 if (errcode < 0)
2590 return errcode;
2591
2592 switch (packet.type) {
2593 case ppt_tip_pgd: {
2594 uint64_t offset;
2595
2596 /* We found a TIP.PGD. The erratum applies.
2597 *
2598 * Resume from here with tracing disabled.
2599 */
2600 errcode = pt_pkt_get_offset(pkt, &offset);
2601 if (errcode < 0)
2602 return errcode;
2603
2604 decoder->time = time;
2605 decoder->tcal = tcal;
2606 decoder->pos = decoder->config.begin + offset;
2607
2608 return pt_qry_event_ovf_disabled(decoder);
2609 }
2610
2611 case ppt_invalid:
2612 return -pte_bad_opc;
2613
2614 case ppt_fup:
2615 case ppt_psb:
2616 case ppt_tip_pge:
2617 case ppt_stop:
2618 case ppt_ovf:
2619 case ppt_mode:
2620 case ppt_pip:
2621 case ppt_vmcs:
2622 case ppt_exstop:
2623 case ppt_mwait:
2624 case ppt_pwre:
2625 case ppt_pwrx:
2626 case ppt_ptw:
2627 /* The erratum does not apply. */
2628 return 1;
2629
2630 case ppt_unknown:
2631 case ppt_pad:
2632 case ppt_mnt:
2633 /* Skip those packets. */
2634 break;
2635
2636 case ppt_psbend:
2637 case ppt_tip:
2638 case ppt_tnt_8:
2639 case ppt_tnt_64:
2640 return -pte_bad_context;
2641
2642
2643 case ppt_tsc:
2644 /* Keep track of time. */
2645 errcode = pt_qry_apply_tsc(&time, &tcal,
2646 &packet.payload.tsc,
2647 &decoder->config);
2648 if (errcode < 0)
2649 return errcode;
2650
2651 break;
2652
2653 case ppt_cbr:
2654 /* Keep track of time. */
2655 errcode = pt_qry_apply_cbr(&time, &tcal,
2656 &packet.payload.cbr,
2657 &decoder->config);
2658 if (errcode < 0)
2659 return errcode;
2660
2661 break;
2662
2663 case ppt_tma:
2664 /* Keep track of time. */
2665 errcode = pt_qry_apply_tma(&time, &tcal,
2666 &packet.payload.tma,
2667 &decoder->config);
2668 if (errcode < 0)
2669 return errcode;
2670
2671 break;
2672
2673 case ppt_mtc:
2674 /* Keep track of time. */
2675 errcode = pt_qry_apply_mtc(&time, &tcal,
2676 &packet.payload.mtc,
2677 &decoder->config);
2678 if (errcode < 0)
2679 return errcode;
2680
2681 break;
2682
2683 case ppt_cyc:
2684 /* Keep track of time. */
2685 errcode = pt_qry_apply_cyc(&time, &tcal,
2686 &packet.payload.cyc,
2687 &decoder->config);
2688 if (errcode < 0)
2689 return errcode;
2690
2691 break;
2692 }
2693 }
2694 }
2695
2696 /* Handle erratum APL11.
2697 *
2698 * This function is called when we diagnose a bad packet while searching for a
2699 * FUP after an OVF.
2700 *
2701 * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that
2702 * TIP.PGD and resume from there with tracing disabled.
2703 *
2704 * This will drop any CBR or MTC events. We will update @decoder's timing state
2705 * on CBR but drop the event.
2706 *
2707 * Returns zero if the erratum was handled.
2708 * Returns a positive integer if the erratum was not handled.
2709 * Returns a negative pt_error_code otherwise.
2710 */
pt_qry_handle_apl11(struct pt_query_decoder * decoder)2711 static int pt_qry_handle_apl11(struct pt_query_decoder *decoder)
2712 {
2713 struct pt_packet_decoder pkt;
2714 uint64_t offset;
2715 int status;
2716
2717 if (!decoder)
2718 return -pte_internal;
2719
2720 status = pt_qry_get_offset(decoder, &offset);
2721 if (status < 0)
2722 return status;
2723
2724 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2725 if (status < 0)
2726 return status;
2727
2728 status = pt_pkt_sync_set(&pkt, offset);
2729 if (status >= 0)
2730 status = apl11_apply(decoder, &pkt);
2731
2732 pt_pkt_decoder_fini(&pkt);
2733 return status;
2734 }
2735
pt_pkt_find_ovf_fup(struct pt_packet_decoder * decoder)2736 static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder)
2737 {
2738 for (;;) {
2739 struct pt_packet packet;
2740 int errcode;
2741
2742 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
2743 if (errcode < 0)
2744 return errcode;
2745
2746 switch (packet.type) {
2747 case ppt_fup:
2748 return 1;
2749
2750 case ppt_invalid:
2751 return -pte_bad_opc;
2752
2753 case ppt_unknown:
2754 case ppt_pad:
2755 case ppt_mnt:
2756 case ppt_cbr:
2757 case ppt_tsc:
2758 case ppt_tma:
2759 case ppt_mtc:
2760 case ppt_cyc:
2761 continue;
2762
2763 case ppt_psb:
2764 case ppt_tip_pge:
2765 case ppt_mode:
2766 case ppt_pip:
2767 case ppt_vmcs:
2768 case ppt_stop:
2769 case ppt_ovf:
2770 case ppt_exstop:
2771 case ppt_mwait:
2772 case ppt_pwre:
2773 case ppt_pwrx:
2774 case ppt_ptw:
2775 return 0;
2776
2777 case ppt_psbend:
2778 case ppt_tip:
2779 case ppt_tip_pgd:
2780 case ppt_tnt_8:
2781 case ppt_tnt_64:
2782 return -pte_bad_context;
2783 }
2784 }
2785 }
2786
2787 /* Find a FUP to which the current OVF may bind.
2788 *
2789 * Scans the trace for a FUP or for a packet that indicates that tracing is
2790 * disabled.
2791 *
2792 * Return the relative offset of the packet following the found FUP on success.
2793 * Returns zero if no FUP is found and tracing is assumed to be disabled.
2794 * Returns a negative pt_error_code otherwise.
2795 */
pt_qry_find_ovf_fup(const struct pt_query_decoder * decoder)2796 static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder)
2797 {
2798 struct pt_packet_decoder pkt;
2799 uint64_t begin, end, offset;
2800 int status;
2801
2802 if (!decoder)
2803 return -pte_internal;
2804
2805 status = pt_qry_get_offset(decoder, &begin);
2806 if (status < 0)
2807 return status;
2808
2809 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2810 if (status < 0)
2811 return status;
2812
2813 status = pt_pkt_sync_set(&pkt, begin);
2814 if (status >= 0) {
2815 status = pt_pkt_find_ovf_fup(&pkt);
2816 if (status > 0) {
2817 status = pt_pkt_get_offset(&pkt, &end);
2818 if (status < 0)
2819 return status;
2820
2821 if (end <= begin)
2822 return -pte_overflow;
2823
2824 offset = end - begin;
2825 if (INT_MAX < offset)
2826 return -pte_overflow;
2827
2828 status = (int) offset;
2829 }
2830 }
2831
2832 pt_pkt_decoder_fini(&pkt);
2833 return status;
2834 }
2835
pt_qry_decode_ovf(struct pt_query_decoder * decoder)2836 int pt_qry_decode_ovf(struct pt_query_decoder *decoder)
2837 {
2838 struct pt_time time;
2839 int status, offset;
2840
2841 if (!decoder)
2842 return -pte_internal;
2843
2844 status = pt_qry_process_pending_psb_events(decoder);
2845 if (status < 0)
2846 return status;
2847
2848 /* If we have any pending psbend events, we're done for now. */
2849 if (status)
2850 return 0;
2851
2852 /* Reset the decoder state but preserve timing. */
2853 time = decoder->time;
2854 pt_qry_reset(decoder);
2855 decoder->time = time;
2856
2857 /* We must consume the OVF before we search for the binding packet. */
2858 decoder->pos += ptps_ovf;
2859
2860 /* Overflow binds to either FUP or TIP.PGE.
2861 *
2862 * If the overflow can be resolved while PacketEn=1 it binds to FUP. We
2863 * can see timing packets between OVF anf FUP but that's it.
2864 *
2865 * Otherwise, PacketEn will be zero when the overflow resolves and OVF
2866 * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that
2867 * do not depend on PacketEn.
2868 *
2869 * We don't need to decode everything until TIP.PGE, however. As soon
2870 * as we see a non-timing non-FUP packet, we know that tracing has been
2871 * disabled before the overflow resolves.
2872 */
2873 offset = pt_qry_find_ovf_fup(decoder);
2874 if (offset <= 0) {
2875 /* Check for erratum SKD010.
2876 *
2877 * The FUP may have been dropped. If we can figure out that
2878 * tracing is enabled and hence the FUP is missing, we resume
2879 * at a later packet and a different IP.
2880 */
2881 if (decoder->config.errata.skd010) {
2882 status = pt_qry_handle_skd010(decoder);
2883 if (status <= 0)
2884 return status;
2885 }
2886
2887 /* Check for erratum APL11.
2888 *
2889 * We may have gotten an extra TIP.PGD, which should be
2890 * diagnosed by our search for a subsequent FUP.
2891 */
2892 if (decoder->config.errata.apl11 &&
2893 (offset == -pte_bad_context)) {
2894 status = pt_qry_handle_apl11(decoder);
2895 if (status <= 0)
2896 return status;
2897 }
2898
2899 /* Report the original error from searching for the FUP packet
2900 * if we were not able to fix the trace.
2901 *
2902 * We treat an overflow at the end of the trace as standalone.
2903 */
2904 if (offset < 0 && offset != -pte_eos)
2905 return offset;
2906
2907 return pt_qry_event_ovf_disabled(decoder);
2908 } else {
2909 /* Check for erratum APL12.
2910 *
2911 * We may get an extra FUP even though the overflow resolved
2912 * with tracing disabled.
2913 */
2914 if (decoder->config.errata.apl12) {
2915 status = pt_qry_handle_apl12(decoder,
2916 (unsigned int) offset);
2917 if (status <= 0)
2918 return status;
2919 }
2920
2921 return pt_qry_event_ovf_enabled(decoder);
2922 }
2923 }
2924
pt_qry_decode_mode_exec(struct pt_query_decoder * decoder,const struct pt_packet_mode_exec * packet)2925 static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder,
2926 const struct pt_packet_mode_exec *packet)
2927 {
2928 struct pt_event *event;
2929
2930 if (!decoder || !packet)
2931 return -pte_internal;
2932
2933 /* MODE.EXEC binds to TIP. */
2934 event = pt_evq_enqueue(&decoder->evq, evb_tip);
2935 if (!event)
2936 return -pte_nomem;
2937
2938 event->type = ptev_exec_mode;
2939 event->variant.exec_mode.mode = pt_get_exec_mode(packet);
2940
2941 return pt_qry_event_time(event, decoder);
2942 }
2943
pt_qry_decode_mode_tsx(struct pt_query_decoder * decoder,const struct pt_packet_mode_tsx * packet)2944 static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder,
2945 const struct pt_packet_mode_tsx *packet)
2946 {
2947 struct pt_event *event;
2948
2949 if (!decoder || !packet)
2950 return -pte_internal;
2951
2952 /* MODE.TSX is standalone if tracing is disabled. */
2953 if (!decoder->enabled) {
2954 event = pt_evq_standalone(&decoder->evq);
2955 if (!event)
2956 return -pte_internal;
2957
2958 /* We don't have an IP in this case. */
2959 event->variant.tsx.ip = 0;
2960 event->ip_suppressed = 1;
2961
2962 /* Publish the event. */
2963 decoder->event = event;
2964 } else {
2965 /* MODE.TSX binds to FUP. */
2966 event = pt_evq_enqueue(&decoder->evq, evb_fup);
2967 if (!event)
2968 return -pte_nomem;
2969 }
2970
2971 event->type = ptev_tsx;
2972 event->variant.tsx.speculative = packet->intx;
2973 event->variant.tsx.aborted = packet->abrt;
2974
2975 return pt_qry_event_time(event, decoder);
2976 }
2977
pt_qry_decode_mode(struct pt_query_decoder * decoder)2978 int pt_qry_decode_mode(struct pt_query_decoder *decoder)
2979 {
2980 struct pt_packet_mode packet;
2981 int size, errcode;
2982
2983 if (!decoder)
2984 return -pte_internal;
2985
2986 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
2987 if (size < 0)
2988 return size;
2989
2990 errcode = 0;
2991 switch (packet.leaf) {
2992 case pt_mol_exec:
2993 errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec);
2994 break;
2995
2996 case pt_mol_tsx:
2997 errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx);
2998 break;
2999 }
3000
3001 if (errcode < 0)
3002 return errcode;
3003
3004 decoder->pos += size;
3005 return 0;
3006 }
3007
pt_qry_header_mode(struct pt_query_decoder * decoder)3008 int pt_qry_header_mode(struct pt_query_decoder *decoder)
3009 {
3010 struct pt_packet_mode packet;
3011 struct pt_event *event;
3012 int size;
3013
3014 if (!decoder)
3015 return -pte_internal;
3016
3017 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
3018 if (size < 0)
3019 return size;
3020
3021 /* Inside the header, events are reported at the end. */
3022 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3023 if (!event)
3024 return -pte_nomem;
3025
3026 switch (packet.leaf) {
3027 case pt_mol_exec:
3028 event->type = ptev_exec_mode;
3029 event->variant.exec_mode.mode =
3030 pt_get_exec_mode(&packet.bits.exec);
3031 break;
3032
3033 case pt_mol_tsx:
3034 event->type = ptev_tsx;
3035 event->variant.tsx.speculative = packet.bits.tsx.intx;
3036 event->variant.tsx.aborted = packet.bits.tsx.abrt;
3037 break;
3038 }
3039
3040 decoder->pos += size;
3041 return 0;
3042 }
3043
pt_qry_decode_psbend(struct pt_query_decoder * decoder)3044 int pt_qry_decode_psbend(struct pt_query_decoder *decoder)
3045 {
3046 int status;
3047
3048 if (!decoder)
3049 return -pte_internal;
3050
3051 status = pt_qry_process_pending_psb_events(decoder);
3052 if (status < 0)
3053 return status;
3054
3055 /* If we had any psb events, we're done for now. */
3056 if (status)
3057 return 0;
3058
3059 /* Skip the psbend extended opcode that we fetched before if no more
3060 * psbend events are pending.
3061 */
3062 decoder->pos += ptps_psbend;
3063 return 0;
3064 }
3065
pt_qry_decode_tsc(struct pt_query_decoder * decoder)3066 int pt_qry_decode_tsc(struct pt_query_decoder *decoder)
3067 {
3068 struct pt_packet_tsc packet;
3069 int size, errcode;
3070
3071 if (!decoder)
3072 return -pte_internal;
3073
3074 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3075 if (size < 0)
3076 return size;
3077
3078 errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal,
3079 &packet, &decoder->config);
3080 if (errcode < 0)
3081 return errcode;
3082
3083 decoder->pos += size;
3084 return 0;
3085 }
3086
pt_qry_header_tsc(struct pt_query_decoder * decoder)3087 int pt_qry_header_tsc(struct pt_query_decoder *decoder)
3088 {
3089 struct pt_packet_tsc packet;
3090 int size, errcode;
3091
3092 if (!decoder)
3093 return -pte_internal;
3094
3095 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3096 if (size < 0)
3097 return size;
3098
3099 errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal,
3100 &packet, &decoder->config);
3101 if (errcode < 0)
3102 return errcode;
3103
3104 decoder->pos += size;
3105 return 0;
3106 }
3107
pt_qry_decode_cbr(struct pt_query_decoder * decoder)3108 int pt_qry_decode_cbr(struct pt_query_decoder *decoder)
3109 {
3110 struct pt_packet_cbr packet;
3111 struct pt_event *event;
3112 int size, errcode;
3113
3114 if (!decoder)
3115 return -pte_internal;
3116
3117 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3118 if (size < 0)
3119 return size;
3120
3121 errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal,
3122 &packet, &decoder->config);
3123 if (errcode < 0)
3124 return errcode;
3125
3126 event = pt_evq_standalone(&decoder->evq);
3127 if (!event)
3128 return -pte_internal;
3129
3130 event->type = ptev_cbr;
3131 event->variant.cbr.ratio = packet.ratio;
3132
3133 decoder->event = event;
3134
3135 errcode = pt_qry_event_time(event, decoder);
3136 if (errcode < 0)
3137 return errcode;
3138
3139 decoder->pos += size;
3140 return 0;
3141 }
3142
pt_qry_header_cbr(struct pt_query_decoder * decoder)3143 int pt_qry_header_cbr(struct pt_query_decoder *decoder)
3144 {
3145 struct pt_packet_cbr packet;
3146 struct pt_event *event;
3147 int size, errcode;
3148
3149 if (!decoder)
3150 return -pte_internal;
3151
3152 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3153 if (size < 0)
3154 return size;
3155
3156 errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal,
3157 &packet, &decoder->config);
3158 if (errcode < 0)
3159 return errcode;
3160
3161 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3162 if (!event)
3163 return -pte_nomem;
3164
3165 event->type = ptev_cbr;
3166 event->variant.cbr.ratio = packet.ratio;
3167
3168 decoder->pos += size;
3169 return 0;
3170 }
3171
pt_qry_decode_tma(struct pt_query_decoder * decoder)3172 int pt_qry_decode_tma(struct pt_query_decoder *decoder)
3173 {
3174 struct pt_packet_tma packet;
3175 int size, errcode;
3176
3177 if (!decoder)
3178 return -pte_internal;
3179
3180 size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config);
3181 if (size < 0)
3182 return size;
3183
3184 errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal,
3185 &packet, &decoder->config);
3186 if (errcode < 0)
3187 return errcode;
3188
3189 decoder->pos += size;
3190 return 0;
3191 }
3192
pt_qry_decode_mtc(struct pt_query_decoder * decoder)3193 int pt_qry_decode_mtc(struct pt_query_decoder *decoder)
3194 {
3195 struct pt_packet_mtc packet;
3196 int size, errcode;
3197
3198 if (!decoder)
3199 return -pte_internal;
3200
3201 size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config);
3202 if (size < 0)
3203 return size;
3204
3205 errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal,
3206 &packet, &decoder->config);
3207 if (errcode < 0)
3208 return errcode;
3209
3210 decoder->pos += size;
3211 return 0;
3212 }
3213
check_erratum_skd007(struct pt_query_decoder * decoder,const struct pt_packet_cyc * packet,int size)3214 static int check_erratum_skd007(struct pt_query_decoder *decoder,
3215 const struct pt_packet_cyc *packet, int size)
3216 {
3217 const uint8_t *pos;
3218 uint16_t payload;
3219
3220 if (!decoder || !packet || size < 0)
3221 return -pte_internal;
3222
3223 /* It must be a 2-byte CYC. */
3224 if (size != 2)
3225 return 0;
3226
3227 payload = (uint16_t) packet->value;
3228
3229 /* The 2nd byte of the CYC payload must look like an ext opcode. */
3230 if ((payload & ~0x1f) != 0x20)
3231 return 0;
3232
3233 /* Skip this CYC packet. */
3234 pos = decoder->pos + size;
3235 if (decoder->config.end <= pos)
3236 return 0;
3237
3238 /* See if we got a second CYC that looks like an OVF ext opcode. */
3239 if (*pos != pt_ext_ovf)
3240 return 0;
3241
3242 /* We shouldn't get back-to-back CYCs unless they are sent when the
3243 * counter wraps around. In this case, we'd expect a full payload.
3244 *
3245 * Since we got two non-full CYC packets, we assume the erratum hit.
3246 */
3247
3248 return 1;
3249 }
3250
pt_qry_decode_cyc(struct pt_query_decoder * decoder)3251 int pt_qry_decode_cyc(struct pt_query_decoder *decoder)
3252 {
3253 struct pt_packet_cyc packet;
3254 struct pt_config *config;
3255 int size, errcode;
3256
3257 if (!decoder)
3258 return -pte_internal;
3259
3260 config = &decoder->config;
3261
3262 size = pt_pkt_read_cyc(&packet, decoder->pos, config);
3263 if (size < 0)
3264 return size;
3265
3266 if (config->errata.skd007) {
3267 errcode = check_erratum_skd007(decoder, &packet, size);
3268 if (errcode < 0)
3269 return errcode;
3270
3271 /* If the erratum hits, we ignore the partial CYC and instead
3272 * process the OVF following/overlapping it.
3273 */
3274 if (errcode) {
3275 /* We skip the first byte of the CYC, which brings us
3276 * to the beginning of the OVF packet.
3277 */
3278 decoder->pos += 1;
3279 return 0;
3280 }
3281 }
3282
3283 errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal,
3284 &packet, config);
3285 if (errcode < 0)
3286 return errcode;
3287
3288 decoder->pos += size;
3289 return 0;
3290 }
3291
pt_qry_decode_stop(struct pt_query_decoder * decoder)3292 int pt_qry_decode_stop(struct pt_query_decoder *decoder)
3293 {
3294 struct pt_event *event;
3295 int errcode;
3296
3297 if (!decoder)
3298 return -pte_internal;
3299
3300 /* Stop events are reported immediately. */
3301 event = pt_evq_standalone(&decoder->evq);
3302 if (!event)
3303 return -pte_internal;
3304
3305 event->type = ptev_stop;
3306
3307 decoder->event = event;
3308
3309 errcode = pt_qry_event_time(event, decoder);
3310 if (errcode < 0)
3311 return errcode;
3312
3313 decoder->pos += ptps_stop;
3314 return 0;
3315 }
3316
pt_qry_header_vmcs(struct pt_query_decoder * decoder)3317 int pt_qry_header_vmcs(struct pt_query_decoder *decoder)
3318 {
3319 struct pt_packet_vmcs packet;
3320 struct pt_event *event;
3321 int size;
3322
3323 if (!decoder)
3324 return -pte_internal;
3325
3326 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3327 if (size < 0)
3328 return size;
3329
3330 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3331 if (!event)
3332 return -pte_nomem;
3333
3334 event->type = ptev_async_vmcs;
3335 event->variant.async_vmcs.base = packet.base;
3336
3337 decoder->pos += size;
3338 return 0;
3339 }
3340
pt_qry_decode_vmcs(struct pt_query_decoder * decoder)3341 int pt_qry_decode_vmcs(struct pt_query_decoder *decoder)
3342 {
3343 struct pt_packet_vmcs packet;
3344 struct pt_event *event;
3345 int size, errcode;
3346
3347 if (!decoder)
3348 return -pte_internal;
3349
3350 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3351 if (size < 0)
3352 return size;
3353
3354 /* VMCS events bind to the same IP as an in-flight async paging event.
3355 *
3356 * In that case, the VMCS event should be applied first. We reorder
3357 * events here to simplify the life of higher layers.
3358 */
3359 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging);
3360 if (event) {
3361 struct pt_event *paging;
3362
3363 paging = pt_evq_enqueue(&decoder->evq, evb_tip);
3364 if (!paging)
3365 return -pte_nomem;
3366
3367 *paging = *event;
3368
3369 event->type = ptev_async_vmcs;
3370 event->variant.async_vmcs.base = packet.base;
3371
3372 decoder->pos += size;
3373 return 0;
3374 }
3375
3376 /* VMCS events bind to the same TIP packet as an in-flight async
3377 * branch event.
3378 */
3379 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
3380 if (event) {
3381 event = pt_evq_enqueue(&decoder->evq, evb_tip);
3382 if (!event)
3383 return -pte_nomem;
3384
3385 event->type = ptev_async_vmcs;
3386 event->variant.async_vmcs.base = packet.base;
3387
3388 decoder->pos += size;
3389 return 0;
3390 }
3391
3392 /* VMCS events that do not bind to an in-flight async event are
3393 * stand-alone.
3394 */
3395 event = pt_evq_standalone(&decoder->evq);
3396 if (!event)
3397 return -pte_internal;
3398
3399 event->type = ptev_vmcs;
3400 event->variant.vmcs.base = packet.base;
3401
3402 decoder->event = event;
3403
3404 errcode = pt_qry_event_time(event, decoder);
3405 if (errcode < 0)
3406 return errcode;
3407
3408 decoder->pos += size;
3409 return 0;
3410 }
3411
pt_qry_decode_mnt(struct pt_query_decoder * decoder)3412 int pt_qry_decode_mnt(struct pt_query_decoder *decoder)
3413 {
3414 struct pt_packet_mnt packet;
3415 struct pt_event *event;
3416 int size, errcode;
3417
3418 if (!decoder)
3419 return -pte_internal;
3420
3421 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3422 if (size < 0)
3423 return size;
3424
3425 event = pt_evq_standalone(&decoder->evq);
3426 if (!event)
3427 return -pte_internal;
3428
3429 event->type = ptev_mnt;
3430 event->variant.mnt.payload = packet.payload;
3431
3432 decoder->event = event;
3433
3434 errcode = pt_qry_event_time(event, decoder);
3435 if (errcode < 0)
3436 return errcode;
3437
3438 decoder->pos += size;
3439
3440 return 0;
3441 }
3442
pt_qry_header_mnt(struct pt_query_decoder * decoder)3443 int pt_qry_header_mnt(struct pt_query_decoder *decoder)
3444 {
3445 struct pt_packet_mnt packet;
3446 struct pt_event *event;
3447 int size;
3448
3449 if (!decoder)
3450 return -pte_internal;
3451
3452 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3453 if (size < 0)
3454 return size;
3455
3456 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3457 if (!event)
3458 return -pte_nomem;
3459
3460 event->type = ptev_mnt;
3461 event->variant.mnt.payload = packet.payload;
3462
3463 decoder->pos += size;
3464
3465 return 0;
3466 }
3467
pt_qry_decode_exstop(struct pt_query_decoder * decoder)3468 int pt_qry_decode_exstop(struct pt_query_decoder *decoder)
3469 {
3470 struct pt_packet_exstop packet;
3471 struct pt_event *event;
3472 int size;
3473
3474 if (!decoder)
3475 return -pte_internal;
3476
3477 size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config);
3478 if (size < 0)
3479 return size;
3480
3481 if (packet.ip) {
3482 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3483 if (!event)
3484 return -pte_internal;
3485
3486 event->type = ptev_exstop;
3487 } else {
3488 event = pt_evq_standalone(&decoder->evq);
3489 if (!event)
3490 return -pte_internal;
3491
3492 event->type = ptev_exstop;
3493
3494 event->ip_suppressed = 1;
3495 event->variant.exstop.ip = 0ull;
3496
3497 decoder->event = event;
3498 }
3499
3500 decoder->pos += size;
3501 return 0;
3502 }
3503
pt_qry_decode_mwait(struct pt_query_decoder * decoder)3504 int pt_qry_decode_mwait(struct pt_query_decoder *decoder)
3505 {
3506 struct pt_packet_mwait packet;
3507 struct pt_event *event;
3508 int size;
3509
3510 if (!decoder)
3511 return -pte_internal;
3512
3513 size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config);
3514 if (size < 0)
3515 return size;
3516
3517 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3518 if (!event)
3519 return -pte_internal;
3520
3521 event->type = ptev_mwait;
3522 event->variant.mwait.hints = packet.hints;
3523 event->variant.mwait.ext = packet.ext;
3524
3525 decoder->pos += size;
3526 return 0;
3527 }
3528
pt_qry_decode_pwre(struct pt_query_decoder * decoder)3529 int pt_qry_decode_pwre(struct pt_query_decoder *decoder)
3530 {
3531 struct pt_packet_pwre packet;
3532 struct pt_event *event;
3533 int size;
3534
3535 if (!decoder)
3536 return -pte_internal;
3537
3538 size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config);
3539 if (size < 0)
3540 return size;
3541
3542 event = pt_evq_standalone(&decoder->evq);
3543 if (!event)
3544 return -pte_internal;
3545
3546 event->type = ptev_pwre;
3547 event->variant.pwre.state = packet.state;
3548 event->variant.pwre.sub_state = packet.sub_state;
3549
3550 if (packet.hw)
3551 event->variant.pwre.hw = 1;
3552
3553 decoder->event = event;
3554
3555 decoder->pos += size;
3556 return 0;
3557 }
3558
pt_qry_decode_pwrx(struct pt_query_decoder * decoder)3559 int pt_qry_decode_pwrx(struct pt_query_decoder *decoder)
3560 {
3561 struct pt_packet_pwrx packet;
3562 struct pt_event *event;
3563 int size;
3564
3565 if (!decoder)
3566 return -pte_internal;
3567
3568 size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config);
3569 if (size < 0)
3570 return size;
3571
3572 event = pt_evq_standalone(&decoder->evq);
3573 if (!event)
3574 return -pte_internal;
3575
3576 event->type = ptev_pwrx;
3577 event->variant.pwrx.last = packet.last;
3578 event->variant.pwrx.deepest = packet.deepest;
3579
3580 if (packet.interrupt)
3581 event->variant.pwrx.interrupt = 1;
3582 if (packet.store)
3583 event->variant.pwrx.store = 1;
3584 if (packet.autonomous)
3585 event->variant.pwrx.autonomous = 1;
3586
3587 decoder->event = event;
3588
3589 decoder->pos += size;
3590 return 0;
3591 }
3592
pt_qry_decode_ptw(struct pt_query_decoder * decoder)3593 int pt_qry_decode_ptw(struct pt_query_decoder *decoder)
3594 {
3595 struct pt_packet_ptw packet;
3596 struct pt_event *event;
3597 int size, pls;
3598
3599 if (!decoder)
3600 return -pte_internal;
3601
3602 size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config);
3603 if (size < 0)
3604 return size;
3605
3606 pls = pt_ptw_size(packet.plc);
3607 if (pls < 0)
3608 return pls;
3609
3610 if (packet.ip) {
3611 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3612 if (!event)
3613 return -pte_internal;
3614 } else {
3615 event = pt_evq_standalone(&decoder->evq);
3616 if (!event)
3617 return -pte_internal;
3618
3619 event->ip_suppressed = 1;
3620
3621 decoder->event = event;
3622 }
3623
3624 event->type = ptev_ptwrite;
3625 event->variant.ptwrite.size = (uint8_t) pls;
3626 event->variant.ptwrite.payload = packet.payload;
3627
3628 decoder->pos += size;
3629 return 0;
3630 }
3631