1 /*
2 * Copyright (c) 2013-2018, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pt_insn_decoder.h"
30 #include "pt_insn.h"
31 #include "pt_config.h"
32 #include "pt_asid.h"
33 #include "pt_compiler.h"
34
35 #include "intel-pt.h"
36
37 #include <string.h>
38 #include <stdlib.h>
39
40
41 static int pt_insn_check_ip_event(struct pt_insn_decoder *,
42 const struct pt_insn *,
43 const struct pt_insn_ext *);
44
45
pt_insn_reset(struct pt_insn_decoder * decoder)46 static void pt_insn_reset(struct pt_insn_decoder *decoder)
47 {
48 if (!decoder)
49 return;
50
51 decoder->mode = ptem_unknown;
52 decoder->ip = 0ull;
53 decoder->status = 0;
54 decoder->enabled = 0;
55 decoder->process_event = 0;
56 decoder->speculative = 0;
57 decoder->process_insn = 0;
58 decoder->bound_paging = 0;
59 decoder->bound_vmcs = 0;
60 decoder->bound_ptwrite = 0;
61
62 pt_retstack_init(&decoder->retstack);
63 pt_asid_init(&decoder->asid);
64 }
65
pt_insn_status(const struct pt_insn_decoder * decoder,int flags)66 static int pt_insn_status(const struct pt_insn_decoder *decoder, int flags)
67 {
68 int status;
69
70 if (!decoder)
71 return -pte_internal;
72
73 status = decoder->status;
74
75 /* Indicate whether tracing is disabled or enabled.
76 *
77 * This duplicates the indication in struct pt_insn and covers the case
78 * where we indicate the status after synchronizing.
79 */
80 if (!decoder->enabled)
81 flags |= pts_ip_suppressed;
82
83 /* Forward end-of-trace indications.
84 *
85 * Postpone it as long as we're still processing events, though.
86 */
87 if ((status & pts_eos) && !decoder->process_event)
88 flags |= pts_eos;
89
90 return flags;
91 }
92
93 /* Initialize the query decoder flags based on our flags. */
94
pt_insn_init_qry_flags(struct pt_conf_flags * qflags,const struct pt_conf_flags * flags)95 static int pt_insn_init_qry_flags(struct pt_conf_flags *qflags,
96 const struct pt_conf_flags *flags)
97 {
98 if (!qflags || !flags)
99 return -pte_internal;
100
101 memset(qflags, 0, sizeof(*qflags));
102
103 return 0;
104 }
105
pt_insn_decoder_init(struct pt_insn_decoder * decoder,const struct pt_config * uconfig)106 int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
107 const struct pt_config *uconfig)
108 {
109 struct pt_config config;
110 int errcode;
111
112 if (!decoder)
113 return -pte_internal;
114
115 errcode = pt_config_from_user(&config, uconfig);
116 if (errcode < 0)
117 return errcode;
118
119 /* The user supplied decoder flags. */
120 decoder->flags = config.flags;
121
122 /* Set the flags we need for the query decoder we use. */
123 errcode = pt_insn_init_qry_flags(&config.flags, &decoder->flags);
124 if (errcode < 0)
125 return errcode;
126
127 errcode = pt_qry_decoder_init(&decoder->query, &config);
128 if (errcode < 0)
129 return errcode;
130
131 pt_image_init(&decoder->default_image, NULL);
132 decoder->image = &decoder->default_image;
133
134 errcode = pt_msec_cache_init(&decoder->scache);
135 if (errcode < 0)
136 return errcode;
137
138 pt_insn_reset(decoder);
139
140 return 0;
141 }
142
pt_insn_decoder_fini(struct pt_insn_decoder * decoder)143 void pt_insn_decoder_fini(struct pt_insn_decoder *decoder)
144 {
145 if (!decoder)
146 return;
147
148 pt_msec_cache_fini(&decoder->scache);
149 pt_image_fini(&decoder->default_image);
150 pt_qry_decoder_fini(&decoder->query);
151 }
152
pt_insn_alloc_decoder(const struct pt_config * config)153 struct pt_insn_decoder *pt_insn_alloc_decoder(const struct pt_config *config)
154 {
155 struct pt_insn_decoder *decoder;
156 int errcode;
157
158 decoder = malloc(sizeof(*decoder));
159 if (!decoder)
160 return NULL;
161
162 errcode = pt_insn_decoder_init(decoder, config);
163 if (errcode < 0) {
164 free(decoder);
165 return NULL;
166 }
167
168 return decoder;
169 }
170
pt_insn_free_decoder(struct pt_insn_decoder * decoder)171 void pt_insn_free_decoder(struct pt_insn_decoder *decoder)
172 {
173 if (!decoder)
174 return;
175
176 pt_insn_decoder_fini(decoder);
177 free(decoder);
178 }
179
180 /* Maybe synthesize a tick event.
181 *
182 * If we're not already processing events, check the current time against the
183 * last event's time. If it changed, synthesize a tick event with the new time.
184 *
185 * Returns zero if no tick event has been created.
186 * Returns a positive integer if a tick event has been created.
187 * Returns a negative error code otherwise.
188 */
pt_insn_tick(struct pt_insn_decoder * decoder,uint64_t ip)189 static int pt_insn_tick(struct pt_insn_decoder *decoder, uint64_t ip)
190 {
191 struct pt_event *ev;
192 uint64_t tsc;
193 uint32_t lost_mtc, lost_cyc;
194 int errcode;
195
196 if (!decoder)
197 return -pte_internal;
198
199 /* We're not generating tick events if tracing is disabled. */
200 if (!decoder->enabled)
201 return -pte_internal;
202
203 /* Events already provide a timestamp so there is no need to synthesize
204 * an artificial tick event. There's no room, either, since this would
205 * overwrite the in-progress event.
206 *
207 * In rare cases where we need to proceed to an event location using
208 * trace this may cause us to miss a timing update if the event is not
209 * forwarded to the user.
210 *
211 * The only case I can come up with at the moment is a MODE.EXEC binding
212 * to the TIP IP of a far branch.
213 */
214 if (decoder->process_event)
215 return 0;
216
217 errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
218 if (errcode < 0) {
219 /* If we don't have wall-clock time, we use relative time. */
220 if (errcode != -pte_no_time)
221 return errcode;
222 }
223
224 ev = &decoder->event;
225
226 /* We're done if time has not changed since the last event. */
227 if (tsc == ev->tsc)
228 return 0;
229
230 /* Time has changed so we create a new tick event. */
231 memset(ev, 0, sizeof(*ev));
232 ev->type = ptev_tick;
233 ev->variant.tick.ip = ip;
234
235 /* Indicate if we have wall-clock time or only relative time. */
236 if (errcode != -pte_no_time)
237 ev->has_tsc = 1;
238 ev->tsc = tsc;
239 ev->lost_mtc = lost_mtc;
240 ev->lost_cyc = lost_cyc;
241
242 /* We now have an event to process. */
243 decoder->process_event = 1;
244
245 return 1;
246 }
247
248 /* Query an indirect branch.
249 *
250 * Returns zero on success, a negative error code otherwise.
251 */
pt_insn_indirect_branch(struct pt_insn_decoder * decoder,uint64_t * ip)252 static int pt_insn_indirect_branch(struct pt_insn_decoder *decoder,
253 uint64_t *ip)
254 {
255 uint64_t evip;
256 int status, errcode;
257
258 if (!decoder)
259 return -pte_internal;
260
261 evip = decoder->ip;
262
263 status = pt_qry_indirect_branch(&decoder->query, ip);
264 if (status < 0)
265 return status;
266
267 if (decoder->flags.variant.insn.enable_tick_events) {
268 errcode = pt_insn_tick(decoder, evip);
269 if (errcode < 0)
270 return errcode;
271 }
272
273 return status;
274 }
275
276 /* Query a conditional branch.
277 *
278 * Returns zero on success, a negative error code otherwise.
279 */
pt_insn_cond_branch(struct pt_insn_decoder * decoder,int * taken)280 static int pt_insn_cond_branch(struct pt_insn_decoder *decoder, int *taken)
281 {
282 int status, errcode;
283
284 if (!decoder)
285 return -pte_internal;
286
287 status = pt_qry_cond_branch(&decoder->query, taken);
288 if (status < 0)
289 return status;
290
291 if (decoder->flags.variant.insn.enable_tick_events) {
292 errcode = pt_insn_tick(decoder, decoder->ip);
293 if (errcode < 0)
294 return errcode;
295 }
296
297 return status;
298 }
299
pt_insn_start(struct pt_insn_decoder * decoder,int status)300 static int pt_insn_start(struct pt_insn_decoder *decoder, int status)
301 {
302 if (!decoder)
303 return -pte_internal;
304
305 if (status < 0)
306 return status;
307
308 decoder->status = status;
309
310 if (!(status & pts_ip_suppressed))
311 decoder->enabled = 1;
312
313 /* Process any initial events.
314 *
315 * Some events are processed after proceeding to the next IP in order to
316 * indicate things like tracing disable or trace stop in the preceding
317 * instruction. Those events will be processed without such an
318 * indication before decoding the current instruction.
319 *
320 * We do this already here so we can indicate user-events that precede
321 * the first instruction.
322 */
323 return pt_insn_check_ip_event(decoder, NULL, NULL);
324 }
325
pt_insn_sync_forward(struct pt_insn_decoder * decoder)326 int pt_insn_sync_forward(struct pt_insn_decoder *decoder)
327 {
328 int status;
329
330 if (!decoder)
331 return -pte_invalid;
332
333 pt_insn_reset(decoder);
334
335 status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
336
337 return pt_insn_start(decoder, status);
338 }
339
pt_insn_sync_backward(struct pt_insn_decoder * decoder)340 int pt_insn_sync_backward(struct pt_insn_decoder *decoder)
341 {
342 int status;
343
344 if (!decoder)
345 return -pte_invalid;
346
347 pt_insn_reset(decoder);
348
349 status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
350
351 return pt_insn_start(decoder, status);
352 }
353
pt_insn_sync_set(struct pt_insn_decoder * decoder,uint64_t offset)354 int pt_insn_sync_set(struct pt_insn_decoder *decoder, uint64_t offset)
355 {
356 int status;
357
358 if (!decoder)
359 return -pte_invalid;
360
361 pt_insn_reset(decoder);
362
363 status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
364
365 return pt_insn_start(decoder, status);
366 }
367
pt_insn_get_offset(const struct pt_insn_decoder * decoder,uint64_t * offset)368 int pt_insn_get_offset(const struct pt_insn_decoder *decoder, uint64_t *offset)
369 {
370 if (!decoder)
371 return -pte_invalid;
372
373 return pt_qry_get_offset(&decoder->query, offset);
374 }
375
pt_insn_get_sync_offset(const struct pt_insn_decoder * decoder,uint64_t * offset)376 int pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
377 uint64_t *offset)
378 {
379 if (!decoder)
380 return -pte_invalid;
381
382 return pt_qry_get_sync_offset(&decoder->query, offset);
383 }
384
pt_insn_get_image(struct pt_insn_decoder * decoder)385 struct pt_image *pt_insn_get_image(struct pt_insn_decoder *decoder)
386 {
387 if (!decoder)
388 return NULL;
389
390 return decoder->image;
391 }
392
pt_insn_set_image(struct pt_insn_decoder * decoder,struct pt_image * image)393 int pt_insn_set_image(struct pt_insn_decoder *decoder,
394 struct pt_image *image)
395 {
396 if (!decoder)
397 return -pte_invalid;
398
399 if (!image)
400 image = &decoder->default_image;
401
402 decoder->image = image;
403 return 0;
404 }
405
406 const struct pt_config *
pt_insn_get_config(const struct pt_insn_decoder * decoder)407 pt_insn_get_config(const struct pt_insn_decoder *decoder)
408 {
409 if (!decoder)
410 return NULL;
411
412 return pt_qry_get_config(&decoder->query);
413 }
414
pt_insn_time(struct pt_insn_decoder * decoder,uint64_t * time,uint32_t * lost_mtc,uint32_t * lost_cyc)415 int pt_insn_time(struct pt_insn_decoder *decoder, uint64_t *time,
416 uint32_t *lost_mtc, uint32_t *lost_cyc)
417 {
418 if (!decoder || !time)
419 return -pte_invalid;
420
421 return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
422 }
423
pt_insn_core_bus_ratio(struct pt_insn_decoder * decoder,uint32_t * cbr)424 int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, uint32_t *cbr)
425 {
426 if (!decoder || !cbr)
427 return -pte_invalid;
428
429 return pt_qry_core_bus_ratio(&decoder->query, cbr);
430 }
431
pt_insn_asid(const struct pt_insn_decoder * decoder,struct pt_asid * asid,size_t size)432 int pt_insn_asid(const struct pt_insn_decoder *decoder, struct pt_asid *asid,
433 size_t size)
434 {
435 if (!decoder || !asid)
436 return -pte_invalid;
437
438 return pt_asid_to_user(asid, &decoder->asid, size);
439 }
440
event_pending(struct pt_insn_decoder * decoder)441 static inline int event_pending(struct pt_insn_decoder *decoder)
442 {
443 int status;
444
445 if (!decoder)
446 return -pte_invalid;
447
448 if (decoder->process_event)
449 return 1;
450
451 status = decoder->status;
452 if (!(status & pts_event_pending))
453 return 0;
454
455 status = pt_qry_event(&decoder->query, &decoder->event,
456 sizeof(decoder->event));
457 if (status < 0)
458 return status;
459
460 decoder->process_event = 1;
461 decoder->status = status;
462 return 1;
463 }
464
check_erratum_skd022(struct pt_insn_decoder * decoder)465 static int check_erratum_skd022(struct pt_insn_decoder *decoder)
466 {
467 struct pt_insn_ext iext;
468 struct pt_insn insn;
469 int errcode;
470
471 if (!decoder)
472 return -pte_internal;
473
474 insn.mode = decoder->mode;
475 insn.ip = decoder->ip;
476
477 errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
478 if (errcode < 0)
479 return 0;
480
481 switch (iext.iclass) {
482 default:
483 return 0;
484
485 case PTI_INST_VMLAUNCH:
486 case PTI_INST_VMRESUME:
487 return 1;
488 }
489 }
490
handle_erratum_skd022(struct pt_insn_decoder * decoder)491 static inline int handle_erratum_skd022(struct pt_insn_decoder *decoder)
492 {
493 struct pt_event *ev;
494 uint64_t ip;
495 int errcode;
496
497 if (!decoder)
498 return -pte_internal;
499
500 errcode = check_erratum_skd022(decoder);
501 if (errcode <= 0)
502 return errcode;
503
504 /* We turn the async disable into a sync disable. It will be processed
505 * after decoding the instruction.
506 */
507 ev = &decoder->event;
508
509 ip = ev->variant.async_disabled.ip;
510
511 ev->type = ptev_disabled;
512 ev->variant.disabled.ip = ip;
513
514 return 1;
515 }
516
pt_insn_proceed(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)517 static int pt_insn_proceed(struct pt_insn_decoder *decoder,
518 const struct pt_insn *insn,
519 const struct pt_insn_ext *iext)
520 {
521 if (!decoder || !insn || !iext)
522 return -pte_internal;
523
524 /* Branch displacements apply to the next instruction. */
525 decoder->ip += insn->size;
526
527 /* We handle non-branches, non-taken conditional branches, and
528 * compressed returns directly in the switch and do some pre-work for
529 * calls.
530 *
531 * All kinds of branches are handled below the switch.
532 */
533 switch (insn->iclass) {
534 case ptic_ptwrite:
535 case ptic_other:
536 return 0;
537
538 case ptic_cond_jump: {
539 int status, taken;
540
541 status = pt_insn_cond_branch(decoder, &taken);
542 if (status < 0)
543 return status;
544
545 decoder->status = status;
546 if (!taken)
547 return 0;
548
549 break;
550 }
551
552 case ptic_call:
553 /* Log the call for return compression.
554 *
555 * Unless this is a call to the next instruction as is used
556 * for position independent code.
557 */
558 if (iext->variant.branch.displacement ||
559 !iext->variant.branch.is_direct)
560 pt_retstack_push(&decoder->retstack, decoder->ip);
561
562 break;
563
564 case ptic_return: {
565 int taken, status;
566
567 /* Check for a compressed return. */
568 status = pt_insn_cond_branch(decoder, &taken);
569 if (status >= 0) {
570 decoder->status = status;
571
572 /* A compressed return is indicated by a taken
573 * conditional branch.
574 */
575 if (!taken)
576 return -pte_bad_retcomp;
577
578 return pt_retstack_pop(&decoder->retstack,
579 &decoder->ip);
580 }
581
582 break;
583 }
584
585 case ptic_jump:
586 case ptic_far_call:
587 case ptic_far_return:
588 case ptic_far_jump:
589 break;
590
591 case ptic_error:
592 return -pte_bad_insn;
593 }
594
595 /* Process a direct or indirect branch.
596 *
597 * This combines calls, uncompressed returns, taken conditional jumps,
598 * and all flavors of far transfers.
599 */
600 if (iext->variant.branch.is_direct)
601 decoder->ip += iext->variant.branch.displacement;
602 else {
603 int status;
604
605 status = pt_insn_indirect_branch(decoder, &decoder->ip);
606
607 if (status < 0)
608 return status;
609
610 decoder->status = status;
611
612 /* We do need an IP to proceed. */
613 if (status & pts_ip_suppressed)
614 return -pte_noip;
615 }
616
617 return 0;
618 }
619
pt_insn_at_skl014(const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_config * config)620 static int pt_insn_at_skl014(const struct pt_event *ev,
621 const struct pt_insn *insn,
622 const struct pt_insn_ext *iext,
623 const struct pt_config *config)
624 {
625 uint64_t ip;
626 int status;
627
628 if (!ev || !insn || !iext || !config)
629 return -pte_internal;
630
631 if (!ev->ip_suppressed)
632 return 0;
633
634 switch (insn->iclass) {
635 case ptic_call:
636 case ptic_jump:
637 /* The erratum only applies to unconditional direct branches. */
638 if (!iext->variant.branch.is_direct)
639 break;
640
641 /* Check the filter against the branch target. */
642 ip = insn->ip;
643 ip += insn->size;
644 ip += iext->variant.branch.displacement;
645
646 status = pt_filter_addr_check(&config->addr_filter, ip);
647 if (status <= 0) {
648 if (status < 0)
649 return status;
650
651 return 1;
652 }
653 break;
654
655 default:
656 break;
657 }
658
659 return 0;
660 }
661
pt_insn_at_disabled_event(const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_config * config)662 static int pt_insn_at_disabled_event(const struct pt_event *ev,
663 const struct pt_insn *insn,
664 const struct pt_insn_ext *iext,
665 const struct pt_config *config)
666 {
667 if (!ev || !insn || !iext || !config)
668 return -pte_internal;
669
670 if (ev->ip_suppressed) {
671 if (pt_insn_is_far_branch(insn, iext) ||
672 pt_insn_changes_cpl(insn, iext) ||
673 pt_insn_changes_cr3(insn, iext))
674 return 1;
675
676 /* If we don't have a filter configuration we assume that no
677 * address filters were used and the erratum does not apply.
678 *
679 * We might otherwise disable tracing too early.
680 */
681 if (config->addr_filter.config.addr_cfg &&
682 config->errata.skl014 &&
683 pt_insn_at_skl014(ev, insn, iext, config))
684 return 1;
685 } else {
686 switch (insn->iclass) {
687 case ptic_ptwrite:
688 case ptic_other:
689 break;
690
691 case ptic_call:
692 case ptic_jump:
693 /* If we got an IP with the disabled event, we may
694 * ignore direct branches that go to a different IP.
695 */
696 if (iext->variant.branch.is_direct) {
697 uint64_t ip;
698
699 ip = insn->ip;
700 ip += insn->size;
701 ip += iext->variant.branch.displacement;
702
703 if (ip != ev->variant.disabled.ip)
704 break;
705 }
706
707 fallthrough;
708 case ptic_return:
709 case ptic_far_call:
710 case ptic_far_return:
711 case ptic_far_jump:
712 case ptic_cond_jump:
713 return 1;
714
715 case ptic_error:
716 return -pte_bad_insn;
717 }
718 }
719
720 return 0;
721 }
722
723 /* Postpone proceeding past @insn/@iext and indicate a pending event.
724 *
725 * There may be further events pending on @insn/@iext. Postpone proceeding past
726 * @insn/@iext until we processed all events that bind to it.
727 *
728 * Returns a non-negative pt_status_flag bit-vector indicating a pending event
729 * on success, a negative pt_error_code otherwise.
730 */
pt_insn_postpone(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)731 static int pt_insn_postpone(struct pt_insn_decoder *decoder,
732 const struct pt_insn *insn,
733 const struct pt_insn_ext *iext)
734 {
735 if (!decoder || !insn || !iext)
736 return -pte_internal;
737
738 if (!decoder->process_insn) {
739 decoder->process_insn = 1;
740 decoder->insn = *insn;
741 decoder->iext = *iext;
742 }
743
744 return pt_insn_status(decoder, pts_event_pending);
745 }
746
747 /* Remove any postponed instruction from @decoder.
748 *
749 * Returns zero on success, a negative pt_error_code otherwise.
750 */
pt_insn_clear_postponed(struct pt_insn_decoder * decoder)751 static int pt_insn_clear_postponed(struct pt_insn_decoder *decoder)
752 {
753 if (!decoder)
754 return -pte_internal;
755
756 decoder->process_insn = 0;
757 decoder->bound_paging = 0;
758 decoder->bound_vmcs = 0;
759 decoder->bound_ptwrite = 0;
760
761 return 0;
762 }
763
764 /* Proceed past a postponed instruction.
765 *
766 * Returns zero on success, a negative pt_error_code otherwise.
767 */
pt_insn_proceed_postponed(struct pt_insn_decoder * decoder)768 static int pt_insn_proceed_postponed(struct pt_insn_decoder *decoder)
769 {
770 int status;
771
772 if (!decoder)
773 return -pte_internal;
774
775 if (!decoder->process_insn)
776 return -pte_internal;
777
778 /* There's nothing to do if tracing got disabled. */
779 if (!decoder->enabled)
780 return pt_insn_clear_postponed(decoder);
781
782 status = pt_insn_proceed(decoder, &decoder->insn, &decoder->iext);
783 if (status < 0)
784 return status;
785
786 return pt_insn_clear_postponed(decoder);
787 }
788
789 /* Check for events that bind to instruction.
790 *
791 * Check whether an event is pending that binds to @insn/@iext, and, if that is
792 * the case, proceed past @insn/@iext and indicate the event by setting
793 * pts_event_pending.
794 *
795 * If that is not the case, we return zero. This is what pt_insn_status() would
796 * return since:
797 *
798 * - we suppress pts_eos as long as we're processing events
799 * - we do not set pts_ip_suppressed since tracing must be enabled
800 *
801 * Returns a non-negative pt_status_flag bit-vector on success, a negative error
802 * code otherwise.
803 */
pt_insn_check_insn_event(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)804 static int pt_insn_check_insn_event(struct pt_insn_decoder *decoder,
805 const struct pt_insn *insn,
806 const struct pt_insn_ext *iext)
807 {
808 struct pt_event *ev;
809 int status;
810
811 if (!decoder)
812 return -pte_internal;
813
814 status = event_pending(decoder);
815 if (status <= 0)
816 return status;
817
818 ev = &decoder->event;
819 switch (ev->type) {
820 case ptev_enabled:
821 case ptev_overflow:
822 case ptev_async_paging:
823 case ptev_async_vmcs:
824 case ptev_async_disabled:
825 case ptev_async_branch:
826 case ptev_exec_mode:
827 case ptev_tsx:
828 case ptev_stop:
829 case ptev_exstop:
830 case ptev_mwait:
831 case ptev_pwre:
832 case ptev_pwrx:
833 case ptev_tick:
834 case ptev_cbr:
835 case ptev_mnt:
836 /* We're only interested in events that bind to instructions. */
837 return 0;
838
839 case ptev_disabled:
840 status = pt_insn_at_disabled_event(ev, insn, iext,
841 &decoder->query.config);
842 if (status <= 0)
843 return status;
844
845 /* We're at a synchronous disable event location.
846 *
847 * Let's determine the IP at which we expect tracing to resume.
848 */
849 status = pt_insn_next_ip(&decoder->ip, insn, iext);
850 if (status < 0) {
851 /* We don't know the IP on error. */
852 decoder->ip = 0ull;
853
854 /* For indirect calls, assume that we return to the next
855 * instruction.
856 *
857 * We only check the instruction class, not the
858 * is_direct property, since direct calls would have
859 * been handled by pt_insn_nex_ip() or would have
860 * provoked a different error.
861 */
862 if (status != -pte_bad_query)
863 return status;
864
865 switch (insn->iclass) {
866 case ptic_call:
867 case ptic_far_call:
868 decoder->ip = insn->ip + insn->size;
869 break;
870
871 default:
872 break;
873 }
874 }
875
876 break;
877
878 case ptev_paging:
879 /* We bind at most one paging event to an instruction. */
880 if (decoder->bound_paging)
881 return 0;
882
883 if (!pt_insn_binds_to_pip(insn, iext))
884 return 0;
885
886 /* We bound a paging event. Make sure we do not bind further
887 * paging events to this instruction.
888 */
889 decoder->bound_paging = 1;
890
891 return pt_insn_postpone(decoder, insn, iext);
892
893 case ptev_vmcs:
894 /* We bind at most one vmcs event to an instruction. */
895 if (decoder->bound_vmcs)
896 return 0;
897
898 if (!pt_insn_binds_to_vmcs(insn, iext))
899 return 0;
900
901 /* We bound a vmcs event. Make sure we do not bind further vmcs
902 * events to this instruction.
903 */
904 decoder->bound_vmcs = 1;
905
906 return pt_insn_postpone(decoder, insn, iext);
907
908 case ptev_ptwrite:
909 /* We bind at most one ptwrite event to an instruction. */
910 if (decoder->bound_ptwrite)
911 return 0;
912
913 if (ev->ip_suppressed) {
914 if (!pt_insn_is_ptwrite(insn, iext))
915 return 0;
916
917 /* Fill in the event IP. Our users will need them to
918 * make sense of the PTWRITE payload.
919 */
920 ev->variant.ptwrite.ip = decoder->ip;
921 ev->ip_suppressed = 0;
922 } else {
923 /* The ptwrite event contains the IP of the ptwrite
924 * instruction (CLIP) unlike most events that contain
925 * the IP of the first instruction that did not complete
926 * (NLIP).
927 *
928 * It's easier to handle this case here, as well.
929 */
930 if (decoder->ip != ev->variant.ptwrite.ip)
931 return 0;
932 }
933
934 /* We bound a ptwrite event. Make sure we do not bind further
935 * ptwrite events to this instruction.
936 */
937 decoder->bound_ptwrite = 1;
938
939 return pt_insn_postpone(decoder, insn, iext);
940 }
941
942 return pt_insn_status(decoder, pts_event_pending);
943 }
944
945 enum {
946 /* The maximum number of steps to take when determining whether the
947 * event location can be reached.
948 */
949 bdm64_max_steps = 0x100
950 };
951
952 /* Try to work around erratum BDM64.
953 *
954 * If we got a transaction abort immediately following a branch that produced
955 * trace, the trace for that branch might have been corrupted.
956 *
957 * Returns a positive integer if the erratum was handled.
958 * Returns zero if the erratum does not seem to apply.
959 * Returns a negative error code otherwise.
960 */
handle_erratum_bdm64(struct pt_insn_decoder * decoder,const struct pt_event * ev,const struct pt_insn * insn,const struct pt_insn_ext * iext)961 static int handle_erratum_bdm64(struct pt_insn_decoder *decoder,
962 const struct pt_event *ev,
963 const struct pt_insn *insn,
964 const struct pt_insn_ext *iext)
965 {
966 int status;
967
968 if (!decoder || !ev || !insn || !iext)
969 return -pte_internal;
970
971 /* This only affects aborts. */
972 if (!ev->variant.tsx.aborted)
973 return 0;
974
975 /* This only affects branches. */
976 if (!pt_insn_is_branch(insn, iext))
977 return 0;
978
979 /* Let's check if we can reach the event location from here.
980 *
981 * If we can, let's assume the erratum did not hit. We might still be
982 * wrong but we're not able to tell.
983 */
984 status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
985 decoder->mode, decoder->image,
986 &decoder->asid, bdm64_max_steps);
987 if (status > 0)
988 return 0;
989
990 /* We can't reach the event location. This could either mean that we
991 * stopped too early (and status is zero) or that the erratum hit.
992 *
993 * We assume the latter and pretend that the previous branch brought us
994 * to the event location, instead.
995 */
996 decoder->ip = ev->variant.tsx.ip;
997
998 return 1;
999 }
1000
1001 /* Check whether a peek TSX event should be postponed.
1002 *
1003 * This involves handling erratum BDM64.
1004 *
1005 * Returns a positive integer if the event is to be postponed.
1006 * Returns zero if the event should be processed.
1007 * Returns a negative error code otherwise.
1008 */
pt_insn_postpone_tsx(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext,const struct pt_event * ev)1009 static inline int pt_insn_postpone_tsx(struct pt_insn_decoder *decoder,
1010 const struct pt_insn *insn,
1011 const struct pt_insn_ext *iext,
1012 const struct pt_event *ev)
1013 {
1014 int status;
1015
1016 if (!decoder || !ev)
1017 return -pte_internal;
1018
1019 if (ev->ip_suppressed)
1020 return 0;
1021
1022 if (insn && iext && decoder->query.config.errata.bdm64) {
1023 status = handle_erratum_bdm64(decoder, ev, insn, iext);
1024 if (status < 0)
1025 return status;
1026 }
1027
1028 if (decoder->ip != ev->variant.tsx.ip)
1029 return 1;
1030
1031 return 0;
1032 }
1033
1034 /* Check for events that bind to an IP.
1035 *
1036 * Check whether an event is pending that binds to @decoder->ip, and, if that is
1037 * the case, indicate the event by setting pt_pts_event_pending.
1038 *
1039 * Returns a non-negative pt_status_flag bit-vector on success, a negative error
1040 * code otherwise.
1041 */
pt_insn_check_ip_event(struct pt_insn_decoder * decoder,const struct pt_insn * insn,const struct pt_insn_ext * iext)1042 static int pt_insn_check_ip_event(struct pt_insn_decoder *decoder,
1043 const struct pt_insn *insn,
1044 const struct pt_insn_ext *iext)
1045 {
1046 struct pt_event *ev;
1047 int status;
1048
1049 if (!decoder)
1050 return -pte_internal;
1051
1052 status = event_pending(decoder);
1053 if (status <= 0) {
1054 if (status < 0)
1055 return status;
1056
1057 return pt_insn_status(decoder, 0);
1058 }
1059
1060 ev = &decoder->event;
1061 switch (ev->type) {
1062 case ptev_disabled:
1063 break;
1064
1065 case ptev_enabled:
1066 return pt_insn_status(decoder, pts_event_pending);
1067
1068 case ptev_async_disabled:
1069 if (ev->variant.async_disabled.at != decoder->ip)
1070 break;
1071
1072 if (decoder->query.config.errata.skd022) {
1073 int errcode;
1074
1075 errcode = handle_erratum_skd022(decoder);
1076 if (errcode != 0) {
1077 if (errcode < 0)
1078 return errcode;
1079
1080 /* If the erratum applies, we postpone the
1081 * modified event to the next call to
1082 * pt_insn_next().
1083 */
1084 break;
1085 }
1086 }
1087
1088 return pt_insn_status(decoder, pts_event_pending);
1089
1090 case ptev_tsx:
1091 status = pt_insn_postpone_tsx(decoder, insn, iext, ev);
1092 if (status != 0) {
1093 if (status < 0)
1094 return status;
1095
1096 break;
1097 }
1098
1099 return pt_insn_status(decoder, pts_event_pending);
1100
1101 case ptev_async_branch:
1102 if (ev->variant.async_branch.from != decoder->ip)
1103 break;
1104
1105 return pt_insn_status(decoder, pts_event_pending);
1106
1107 case ptev_overflow:
1108 return pt_insn_status(decoder, pts_event_pending);
1109
1110 case ptev_exec_mode:
1111 if (!ev->ip_suppressed &&
1112 ev->variant.exec_mode.ip != decoder->ip)
1113 break;
1114
1115 return pt_insn_status(decoder, pts_event_pending);
1116
1117 case ptev_paging:
1118 if (decoder->enabled)
1119 break;
1120
1121 return pt_insn_status(decoder, pts_event_pending);
1122
1123 case ptev_async_paging:
1124 if (!ev->ip_suppressed &&
1125 ev->variant.async_paging.ip != decoder->ip)
1126 break;
1127
1128 return pt_insn_status(decoder, pts_event_pending);
1129
1130 case ptev_vmcs:
1131 if (decoder->enabled)
1132 break;
1133
1134 return pt_insn_status(decoder, pts_event_pending);
1135
1136 case ptev_async_vmcs:
1137 if (!ev->ip_suppressed &&
1138 ev->variant.async_vmcs.ip != decoder->ip)
1139 break;
1140
1141 return pt_insn_status(decoder, pts_event_pending);
1142
1143 case ptev_stop:
1144 return pt_insn_status(decoder, pts_event_pending);
1145
1146 case ptev_exstop:
1147 if (!ev->ip_suppressed && decoder->enabled &&
1148 decoder->ip != ev->variant.exstop.ip)
1149 break;
1150
1151 return pt_insn_status(decoder, pts_event_pending);
1152
1153 case ptev_mwait:
1154 if (!ev->ip_suppressed && decoder->enabled &&
1155 decoder->ip != ev->variant.mwait.ip)
1156 break;
1157
1158 return pt_insn_status(decoder, pts_event_pending);
1159
1160 case ptev_pwre:
1161 case ptev_pwrx:
1162 return pt_insn_status(decoder, pts_event_pending);
1163
1164 case ptev_ptwrite:
1165 /* Any event binding to the current PTWRITE instruction is
1166 * handled in pt_insn_check_insn_event().
1167 *
1168 * Any subsequent ptwrite event binds to a different instruction
1169 * and must wait until the next iteration - as long as tracing
1170 * is enabled.
1171 *
1172 * When tracing is disabled, we forward all ptwrite events
1173 * immediately to the user.
1174 */
1175 if (decoder->enabled)
1176 break;
1177
1178 return pt_insn_status(decoder, pts_event_pending);
1179
1180 case ptev_tick:
1181 case ptev_cbr:
1182 case ptev_mnt:
1183 return pt_insn_status(decoder, pts_event_pending);
1184 }
1185
1186 return pt_insn_status(decoder, 0);
1187 }
1188
insn_to_user(struct pt_insn * uinsn,size_t size,const struct pt_insn * insn)1189 static inline int insn_to_user(struct pt_insn *uinsn, size_t size,
1190 const struct pt_insn *insn)
1191 {
1192 if (!uinsn || !insn)
1193 return -pte_internal;
1194
1195 if (uinsn == insn)
1196 return 0;
1197
1198 /* Zero out any unknown bytes. */
1199 if (sizeof(*insn) < size) {
1200 memset(uinsn + sizeof(*insn), 0, size - sizeof(*insn));
1201
1202 size = sizeof(*insn);
1203 }
1204
1205 memcpy(uinsn, insn, size);
1206
1207 return 0;
1208 }
1209
pt_insn_decode_cached(struct pt_insn_decoder * decoder,const struct pt_mapped_section * msec,struct pt_insn * insn,struct pt_insn_ext * iext)1210 static int pt_insn_decode_cached(struct pt_insn_decoder *decoder,
1211 const struct pt_mapped_section *msec,
1212 struct pt_insn *insn, struct pt_insn_ext *iext)
1213 {
1214 int status;
1215
1216 if (!decoder || !insn || !iext)
1217 return -pte_internal;
1218
1219 /* Try reading the memory containing @insn from the cached section. If
1220 * that fails, if we don't have a cached section, or if decode fails
1221 * later on, fall back to decoding @insn from @decoder->image.
1222 *
1223 * The latter will also handle truncated instructions that cross section
1224 * boundaries.
1225 */
1226
1227 if (!msec)
1228 return pt_insn_decode(insn, iext, decoder->image,
1229 &decoder->asid);
1230
1231 status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
1232 if (status < 0) {
1233 if (status != -pte_nomap)
1234 return status;
1235
1236 return pt_insn_decode(insn, iext, decoder->image,
1237 &decoder->asid);
1238 }
1239
1240 /* We initialize @insn->size to the maximal possible size. It will be
1241 * set to the actual size during instruction decode.
1242 */
1243 insn->size = (uint8_t) status;
1244
1245 status = pt_ild_decode(insn, iext);
1246 if (status < 0) {
1247 if (status != -pte_bad_insn)
1248 return status;
1249
1250 return pt_insn_decode(insn, iext, decoder->image,
1251 &decoder->asid);
1252 }
1253
1254 return status;
1255 }
1256
pt_insn_msec_lookup(struct pt_insn_decoder * decoder,const struct pt_mapped_section ** pmsec)1257 static int pt_insn_msec_lookup(struct pt_insn_decoder *decoder,
1258 const struct pt_mapped_section **pmsec)
1259 {
1260 struct pt_msec_cache *scache;
1261 struct pt_image *image;
1262 uint64_t ip;
1263 int isid;
1264
1265 if (!decoder || !pmsec)
1266 return -pte_internal;
1267
1268 scache = &decoder->scache;
1269 image = decoder->image;
1270 ip = decoder->ip;
1271
1272 isid = pt_msec_cache_read(scache, pmsec, image, ip);
1273 if (isid < 0) {
1274 if (isid != -pte_nomap)
1275 return isid;
1276
1277 return pt_msec_cache_fill(scache, pmsec, image,
1278 &decoder->asid, ip);
1279 }
1280
1281 return isid;
1282 }
1283
pt_insn_next(struct pt_insn_decoder * decoder,struct pt_insn * uinsn,size_t size)1284 int pt_insn_next(struct pt_insn_decoder *decoder, struct pt_insn *uinsn,
1285 size_t size)
1286 {
1287 const struct pt_mapped_section *msec;
1288 struct pt_insn_ext iext;
1289 struct pt_insn insn, *pinsn;
1290 int status, isid;
1291
1292 if (!uinsn || !decoder)
1293 return -pte_invalid;
1294
1295 /* Tracing must be enabled.
1296 *
1297 * If it isn't we should be processing events until we either run out of
1298 * trace or process a tracing enabled event.
1299 */
1300 if (!decoder->enabled) {
1301 if (decoder->status & pts_eos)
1302 return -pte_eos;
1303
1304 return -pte_no_enable;
1305 }
1306
1307 pinsn = size == sizeof(insn) ? uinsn : &insn;
1308
1309 /* Zero-initialize the instruction in case of error returns. */
1310 memset(pinsn, 0, sizeof(*pinsn));
1311
1312 /* Fill in a few things from the current decode state.
1313 *
1314 * This reflects the state of the last pt_insn_next(), pt_insn_event()
1315 * or pt_insn_start() call.
1316 */
1317 if (decoder->speculative)
1318 pinsn->speculative = 1;
1319 pinsn->ip = decoder->ip;
1320 pinsn->mode = decoder->mode;
1321
1322 isid = pt_insn_msec_lookup(decoder, &msec);
1323 if (isid < 0) {
1324 if (isid != -pte_nomap)
1325 return isid;
1326
1327 msec = NULL;
1328 }
1329
1330 /* We set an incorrect isid if @msec is NULL. This will be corrected
1331 * when we read the memory from the image later on.
1332 */
1333 pinsn->isid = isid;
1334
1335 status = pt_insn_decode_cached(decoder, msec, pinsn, &iext);
1336 if (status < 0) {
1337 /* Provide the incomplete instruction - the IP and mode fields
1338 * are valid and may help diagnose the error.
1339 */
1340 (void) insn_to_user(uinsn, size, pinsn);
1341 return status;
1342 }
1343
1344 /* Provide the decoded instruction to the user. It won't change during
1345 * event processing.
1346 */
1347 status = insn_to_user(uinsn, size, pinsn);
1348 if (status < 0)
1349 return status;
1350
1351 /* Check for events that bind to the current instruction.
1352 *
1353 * If an event is indicated, we're done.
1354 */
1355 status = pt_insn_check_insn_event(decoder, pinsn, &iext);
1356 if (status != 0) {
1357 if (status < 0)
1358 return status;
1359
1360 if (status & pts_event_pending)
1361 return status;
1362 }
1363
1364 /* Determine the next instruction's IP. */
1365 status = pt_insn_proceed(decoder, pinsn, &iext);
1366 if (status < 0)
1367 return status;
1368
1369 /* Indicate events that bind to the new IP.
1370 *
1371 * Although we only look at the IP for binding events, we pass the
1372 * decoded instruction in order to handle errata.
1373 */
1374 return pt_insn_check_ip_event(decoder, pinsn, &iext);
1375 }
1376
pt_insn_process_enabled(struct pt_insn_decoder * decoder)1377 static int pt_insn_process_enabled(struct pt_insn_decoder *decoder)
1378 {
1379 struct pt_event *ev;
1380
1381 if (!decoder)
1382 return -pte_internal;
1383
1384 ev = &decoder->event;
1385
1386 /* This event can't be a status update. */
1387 if (ev->status_update)
1388 return -pte_bad_context;
1389
1390 /* We must have an IP in order to start decoding. */
1391 if (ev->ip_suppressed)
1392 return -pte_noip;
1393
1394 /* We must currently be disabled. */
1395 if (decoder->enabled)
1396 return -pte_bad_context;
1397
1398 decoder->ip = ev->variant.enabled.ip;
1399 decoder->enabled = 1;
1400
1401 return 0;
1402 }
1403
pt_insn_process_disabled(struct pt_insn_decoder * decoder)1404 static int pt_insn_process_disabled(struct pt_insn_decoder *decoder)
1405 {
1406 struct pt_event *ev;
1407
1408 if (!decoder)
1409 return -pte_internal;
1410
1411 ev = &decoder->event;
1412
1413 /* This event can't be a status update. */
1414 if (ev->status_update)
1415 return -pte_bad_context;
1416
1417 /* We must currently be enabled. */
1418 if (!decoder->enabled)
1419 return -pte_bad_context;
1420
1421 /* We preserve @decoder->ip. This is where we expect tracing to resume
1422 * and we'll indicate that on the subsequent enabled event if tracing
1423 * actually does resume from there.
1424 */
1425 decoder->enabled = 0;
1426
1427 return 0;
1428 }
1429
pt_insn_process_async_branch(struct pt_insn_decoder * decoder)1430 static int pt_insn_process_async_branch(struct pt_insn_decoder *decoder)
1431 {
1432 struct pt_event *ev;
1433
1434 if (!decoder)
1435 return -pte_internal;
1436
1437 ev = &decoder->event;
1438
1439 /* This event can't be a status update. */
1440 if (ev->status_update)
1441 return -pte_bad_context;
1442
1443 /* Tracing must be enabled in order to make sense of the event. */
1444 if (!decoder->enabled)
1445 return -pte_bad_context;
1446
1447 decoder->ip = ev->variant.async_branch.to;
1448
1449 return 0;
1450 }
1451
pt_insn_process_paging(struct pt_insn_decoder * decoder)1452 static int pt_insn_process_paging(struct pt_insn_decoder *decoder)
1453 {
1454 uint64_t cr3;
1455 int errcode;
1456
1457 if (!decoder)
1458 return -pte_internal;
1459
1460 cr3 = decoder->event.variant.paging.cr3;
1461 if (decoder->asid.cr3 != cr3) {
1462 errcode = pt_msec_cache_invalidate(&decoder->scache);
1463 if (errcode < 0)
1464 return errcode;
1465
1466 decoder->asid.cr3 = cr3;
1467 }
1468
1469 return 0;
1470 }
1471
pt_insn_process_overflow(struct pt_insn_decoder * decoder)1472 static int pt_insn_process_overflow(struct pt_insn_decoder *decoder)
1473 {
1474 struct pt_event *ev;
1475
1476 if (!decoder)
1477 return -pte_internal;
1478
1479 ev = &decoder->event;
1480
1481 /* This event can't be a status update. */
1482 if (ev->status_update)
1483 return -pte_bad_context;
1484
1485 /* If the IP is suppressed, the overflow resolved while tracing was
1486 * disabled. Otherwise it resolved while tracing was enabled.
1487 */
1488 if (ev->ip_suppressed) {
1489 /* Tracing is disabled.
1490 *
1491 * It doesn't make sense to preserve the previous IP. This will
1492 * just be misleading. Even if tracing had been disabled
1493 * before, as well, we might have missed the re-enable in the
1494 * overflow.
1495 */
1496 decoder->enabled = 0;
1497 decoder->ip = 0ull;
1498 } else {
1499 /* Tracing is enabled and we're at the IP at which the overflow
1500 * resolved.
1501 */
1502 decoder->ip = ev->variant.overflow.ip;
1503 decoder->enabled = 1;
1504 }
1505
1506 /* We don't know the TSX state. Let's assume we execute normally.
1507 *
1508 * We also don't know the execution mode. Let's keep what we have
1509 * in case we don't get an update before we have to decode the next
1510 * instruction.
1511 */
1512 decoder->speculative = 0;
1513
1514 return 0;
1515 }
1516
pt_insn_process_exec_mode(struct pt_insn_decoder * decoder)1517 static int pt_insn_process_exec_mode(struct pt_insn_decoder *decoder)
1518 {
1519 enum pt_exec_mode mode;
1520 struct pt_event *ev;
1521
1522 if (!decoder)
1523 return -pte_internal;
1524
1525 ev = &decoder->event;
1526 mode = ev->variant.exec_mode.mode;
1527
1528 /* Use status update events to diagnose inconsistencies. */
1529 if (ev->status_update && decoder->enabled &&
1530 decoder->mode != ptem_unknown && decoder->mode != mode)
1531 return -pte_bad_status_update;
1532
1533 decoder->mode = mode;
1534
1535 return 0;
1536 }
1537
pt_insn_process_tsx(struct pt_insn_decoder * decoder)1538 static int pt_insn_process_tsx(struct pt_insn_decoder *decoder)
1539 {
1540 if (!decoder)
1541 return -pte_internal;
1542
1543 decoder->speculative = decoder->event.variant.tsx.speculative;
1544
1545 return 0;
1546 }
1547
pt_insn_process_stop(struct pt_insn_decoder * decoder)1548 static int pt_insn_process_stop(struct pt_insn_decoder *decoder)
1549 {
1550 struct pt_event *ev;
1551
1552 if (!decoder)
1553 return -pte_internal;
1554
1555 ev = &decoder->event;
1556
1557 /* This event can't be a status update. */
1558 if (ev->status_update)
1559 return -pte_bad_context;
1560
1561 /* Tracing is always disabled before it is stopped. */
1562 if (decoder->enabled)
1563 return -pte_bad_context;
1564
1565 return 0;
1566 }
1567
pt_insn_process_vmcs(struct pt_insn_decoder * decoder)1568 static int pt_insn_process_vmcs(struct pt_insn_decoder *decoder)
1569 {
1570 uint64_t vmcs;
1571 int errcode;
1572
1573 if (!decoder)
1574 return -pte_internal;
1575
1576 vmcs = decoder->event.variant.vmcs.base;
1577 if (decoder->asid.vmcs != vmcs) {
1578 errcode = pt_msec_cache_invalidate(&decoder->scache);
1579 if (errcode < 0)
1580 return errcode;
1581
1582 decoder->asid.vmcs = vmcs;
1583 }
1584
1585 return 0;
1586 }
1587
pt_insn_event(struct pt_insn_decoder * decoder,struct pt_event * uevent,size_t size)1588 int pt_insn_event(struct pt_insn_decoder *decoder, struct pt_event *uevent,
1589 size_t size)
1590 {
1591 struct pt_event *ev;
1592 int status;
1593
1594 if (!decoder || !uevent)
1595 return -pte_invalid;
1596
1597 /* We must currently process an event. */
1598 if (!decoder->process_event)
1599 return -pte_bad_query;
1600
1601 ev = &decoder->event;
1602 switch (ev->type) {
1603 default:
1604 /* This is not a user event.
1605 *
1606 * We either indicated it wrongly or the user called
1607 * pt_insn_event() without a pts_event_pending indication.
1608 */
1609 return -pte_bad_query;
1610
1611 case ptev_enabled:
1612 /* Indicate that tracing resumes from the IP at which tracing
1613 * had been disabled before (with some special treatment for
1614 * calls).
1615 */
1616 if (decoder->ip == ev->variant.enabled.ip)
1617 ev->variant.enabled.resumed = 1;
1618
1619 status = pt_insn_process_enabled(decoder);
1620 if (status < 0)
1621 return status;
1622
1623 break;
1624
1625 case ptev_async_disabled:
1626 if (!ev->ip_suppressed &&
1627 decoder->ip != ev->variant.async_disabled.at)
1628 return -pte_bad_query;
1629
1630 fallthrough;
1631 case ptev_disabled:
1632 status = pt_insn_process_disabled(decoder);
1633 if (status < 0)
1634 return status;
1635
1636 break;
1637
1638 case ptev_async_branch:
1639 if (decoder->ip != ev->variant.async_branch.from)
1640 return -pte_bad_query;
1641
1642 status = pt_insn_process_async_branch(decoder);
1643 if (status < 0)
1644 return status;
1645
1646 break;
1647
1648 case ptev_async_paging:
1649 if (!ev->ip_suppressed &&
1650 decoder->ip != ev->variant.async_paging.ip)
1651 return -pte_bad_query;
1652
1653 fallthrough;
1654 case ptev_paging:
1655 status = pt_insn_process_paging(decoder);
1656 if (status < 0)
1657 return status;
1658
1659 break;
1660
1661 case ptev_async_vmcs:
1662 if (!ev->ip_suppressed &&
1663 decoder->ip != ev->variant.async_vmcs.ip)
1664 return -pte_bad_query;
1665
1666 fallthrough;
1667 case ptev_vmcs:
1668 status = pt_insn_process_vmcs(decoder);
1669 if (status < 0)
1670 return status;
1671
1672 break;
1673
1674 case ptev_overflow:
1675 status = pt_insn_process_overflow(decoder);
1676 if (status < 0)
1677 return status;
1678
1679 break;
1680
1681 case ptev_exec_mode:
1682 status = pt_insn_process_exec_mode(decoder);
1683 if (status < 0)
1684 return status;
1685
1686 break;
1687
1688 case ptev_tsx:
1689 status = pt_insn_process_tsx(decoder);
1690 if (status < 0)
1691 return status;
1692
1693 break;
1694
1695 case ptev_stop:
1696 status = pt_insn_process_stop(decoder);
1697 if (status < 0)
1698 return status;
1699
1700 break;
1701
1702 case ptev_exstop:
1703 if (!ev->ip_suppressed && decoder->enabled &&
1704 decoder->ip != ev->variant.exstop.ip)
1705 return -pte_bad_query;
1706
1707 break;
1708
1709 case ptev_mwait:
1710 if (!ev->ip_suppressed && decoder->enabled &&
1711 decoder->ip != ev->variant.mwait.ip)
1712 return -pte_bad_query;
1713
1714 break;
1715
1716 case ptev_pwre:
1717 case ptev_pwrx:
1718 case ptev_ptwrite:
1719 case ptev_tick:
1720 case ptev_cbr:
1721 case ptev_mnt:
1722 break;
1723 }
1724
1725 /* Copy the event to the user. Make sure we're not writing beyond the
1726 * memory provided by the user.
1727 *
1728 * We might truncate details of an event but only for those events the
1729 * user can't know about, anyway.
1730 */
1731 if (sizeof(*ev) < size)
1732 size = sizeof(*ev);
1733
1734 memcpy(uevent, ev, size);
1735
1736 /* This completes processing of the current event. */
1737 decoder->process_event = 0;
1738
1739 /* If we just handled an instruction event, check for further events
1740 * that bind to this instruction.
1741 *
1742 * If we don't have further events, proceed beyond the instruction so we
1743 * can check for IP events, as well.
1744 */
1745 if (decoder->process_insn) {
1746 status = pt_insn_check_insn_event(decoder, &decoder->insn,
1747 &decoder->iext);
1748
1749 if (status != 0) {
1750 if (status < 0)
1751 return status;
1752
1753 if (status & pts_event_pending)
1754 return status;
1755 }
1756
1757 /* Proceed to the next instruction. */
1758 status = pt_insn_proceed_postponed(decoder);
1759 if (status < 0)
1760 return status;
1761 }
1762
1763 /* Indicate further events that bind to the same IP. */
1764 return pt_insn_check_ip_event(decoder, NULL, NULL);
1765 }
1766