1 /*
2 * Copyright (c) 2013-2018, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pt_ild.h"
30 #include "pti-imm-defs.h"
31 #include "pti-imm.h"
32 #include "pti-modrm-defs.h"
33 #include "pti-modrm.h"
34 #include "pti-disp-defs.h"
35 #include "pti-disp.h"
36
37 #include <string.h>
38
39 /* SET UP 3 TABLES */
40
41 static uint8_t has_disp_regular[4][4][8];
42
init_has_disp_regular_table(void)43 static void init_has_disp_regular_table(void)
44 {
45 uint8_t mod, rm;
46
47 memset(has_disp_regular, 0, sizeof(has_disp_regular));
48
49 /*fill eamode16 */
50 has_disp_regular[ptem_16bit][0][6] = 2;
51 for (rm = 0; rm < 8; rm++)
52 for (mod = 1; mod <= 2; mod++)
53 has_disp_regular[ptem_16bit][mod][rm] = mod;
54
55 /*fill eamode32/64 */
56 has_disp_regular[ptem_32bit][0][5] = 4;
57 has_disp_regular[ptem_64bit][0][5] = 4;
58 for (rm = 0; rm < 8; rm++) {
59 has_disp_regular[ptem_32bit][1][rm] = 1;
60 has_disp_regular[ptem_32bit][2][rm] = 4;
61
62 has_disp_regular[ptem_64bit][1][rm] = 1;
63 has_disp_regular[ptem_64bit][2][rm] = 4;
64 }
65 }
66
67 static uint8_t eamode_table[2][4];
68
init_eamode_table(void)69 static void init_eamode_table(void)
70 {
71 eamode_table[0][ptem_unknown] = ptem_unknown;
72 eamode_table[0][ptem_16bit] = ptem_16bit;
73 eamode_table[0][ptem_32bit] = ptem_32bit;
74 eamode_table[0][ptem_64bit] = ptem_64bit;
75
76 eamode_table[1][ptem_unknown] = ptem_unknown;
77 eamode_table[1][ptem_16bit] = ptem_32bit;
78 eamode_table[1][ptem_32bit] = ptem_16bit;
79 eamode_table[1][ptem_64bit] = ptem_32bit;
80 }
81
82 static uint8_t has_sib_table[4][4][8];
83
init_has_sib_table(void)84 static void init_has_sib_table(void)
85 {
86 uint8_t mod;
87
88 memset(has_sib_table, 0, sizeof(has_sib_table));
89
90 /*for eamode32/64 there is sib byte for mod!=3 and rm==4 */
91 for (mod = 0; mod <= 2; mod++) {
92 has_sib_table[ptem_32bit][mod][4] = 1;
93 has_sib_table[ptem_64bit][mod][4] = 1;
94 }
95 }
96
97 /* SOME ACCESSORS */
98
get_byte(const struct pt_ild * ild,uint8_t i)99 static inline uint8_t get_byte(const struct pt_ild *ild, uint8_t i)
100 {
101 return ild->itext[i];
102 }
103
get_byte_ptr(const struct pt_ild * ild,uint8_t i)104 static inline uint8_t const *get_byte_ptr(const struct pt_ild *ild, uint8_t i)
105 {
106 return ild->itext + i;
107 }
108
mode_64b(const struct pt_ild * ild)109 static inline int mode_64b(const struct pt_ild *ild)
110 {
111 return ild->mode == ptem_64bit;
112 }
113
mode_32b(const struct pt_ild * ild)114 static inline int mode_32b(const struct pt_ild *ild)
115 {
116 return ild->mode == ptem_32bit;
117 }
118
bits_match(uint8_t x,uint8_t mask,uint8_t target)119 static inline int bits_match(uint8_t x, uint8_t mask, uint8_t target)
120 {
121 return (x & mask) == target;
122 }
123
124 static inline enum pt_exec_mode
pti_get_nominal_eosz_non64(const struct pt_ild * ild)125 pti_get_nominal_eosz_non64(const struct pt_ild *ild)
126 {
127 if (mode_32b(ild)) {
128 if (ild->u.s.osz)
129 return ptem_16bit;
130 return ptem_32bit;
131 }
132 if (ild->u.s.osz)
133 return ptem_32bit;
134 return ptem_16bit;
135 }
136
137 static inline enum pt_exec_mode
pti_get_nominal_eosz(const struct pt_ild * ild)138 pti_get_nominal_eosz(const struct pt_ild *ild)
139 {
140 if (mode_64b(ild)) {
141 if (ild->u.s.rex_w)
142 return ptem_64bit;
143 if (ild->u.s.osz)
144 return ptem_16bit;
145 return ptem_32bit;
146 }
147 return pti_get_nominal_eosz_non64(ild);
148 }
149
150 static inline enum pt_exec_mode
pti_get_nominal_eosz_df64(const struct pt_ild * ild)151 pti_get_nominal_eosz_df64(const struct pt_ild *ild)
152 {
153 if (mode_64b(ild)) {
154 if (ild->u.s.rex_w)
155 return ptem_64bit;
156 if (ild->u.s.osz)
157 return ptem_16bit;
158 /* only this next line of code is different relative
159 to pti_get_nominal_eosz(), above */
160 return ptem_64bit;
161 }
162 return pti_get_nominal_eosz_non64(ild);
163 }
164
165 static inline enum pt_exec_mode
pti_get_nominal_easz_non64(const struct pt_ild * ild)166 pti_get_nominal_easz_non64(const struct pt_ild *ild)
167 {
168 if (mode_32b(ild)) {
169 if (ild->u.s.asz)
170 return ptem_16bit;
171 return ptem_32bit;
172 }
173 if (ild->u.s.asz)
174 return ptem_32bit;
175 return ptem_16bit;
176 }
177
178 static inline enum pt_exec_mode
pti_get_nominal_easz(const struct pt_ild * ild)179 pti_get_nominal_easz(const struct pt_ild *ild)
180 {
181 if (mode_64b(ild)) {
182 if (ild->u.s.asz)
183 return ptem_32bit;
184 return ptem_64bit;
185 }
186 return pti_get_nominal_easz_non64(ild);
187 }
188
resolve_z(uint8_t * pbytes,enum pt_exec_mode eosz)189 static inline int resolve_z(uint8_t *pbytes, enum pt_exec_mode eosz)
190 {
191 static const uint8_t bytes[] = { 2, 4, 4 };
192 unsigned int idx;
193
194 if (!pbytes)
195 return -pte_internal;
196
197 idx = (unsigned int) eosz - 1;
198 if (sizeof(bytes) <= idx)
199 return -pte_bad_insn;
200
201 *pbytes = bytes[idx];
202 return 0;
203 }
204
resolve_v(uint8_t * pbytes,enum pt_exec_mode eosz)205 static inline int resolve_v(uint8_t *pbytes, enum pt_exec_mode eosz)
206 {
207 static const uint8_t bytes[] = { 2, 4, 8 };
208 unsigned int idx;
209
210 if (!pbytes)
211 return -pte_internal;
212
213 idx = (unsigned int) eosz - 1;
214 if (sizeof(bytes) <= idx)
215 return -pte_bad_insn;
216
217 *pbytes = bytes[idx];
218 return 0;
219 }
220
221 /* DECODERS */
222
set_imm_bytes(struct pt_ild * ild)223 static int set_imm_bytes(struct pt_ild *ild)
224 {
225 /*: set ild->imm1_bytes and ild->imm2_bytes for maps 0/1 */
226 static uint8_t const *const map_map[] = {
227 /* map 0 */ imm_bytes_map_0x0,
228 /* map 1 */ imm_bytes_map_0x0F
229 };
230 uint8_t map, imm_code;
231
232 if (!ild)
233 return -pte_internal;
234
235 map = ild->map;
236
237 if ((sizeof(map_map) / sizeof(*map_map)) <= map)
238 return 0;
239
240 imm_code = map_map[map][ild->nominal_opcode];
241 switch (imm_code) {
242 case PTI_IMM_NONE:
243 case PTI_0_IMM_WIDTH_CONST_l2:
244 default:
245 return 0;
246
247 case PTI_UIMM8_IMM_WIDTH_CONST_l2:
248 ild->imm1_bytes = 1;
249 return 0;
250
251 case PTI_SIMM8_IMM_WIDTH_CONST_l2:
252 ild->imm1_bytes = 1;
253 return 0;
254
255 case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
256 /* SIMMz(eosz) */
257 return resolve_z(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
258
259 case PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
260 /* UIMMv(eosz) */
261 return resolve_v(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
262
263 case PTI_UIMM16_IMM_WIDTH_CONST_l2:
264 ild->imm1_bytes = 2;
265 return 0;
266
267 case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2:
268 /* push defaults to eosz64 in 64b mode, then uses SIMMz */
269 return resolve_z(&ild->imm1_bytes,
270 pti_get_nominal_eosz_df64(ild));
271
272 case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1:
273 if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2) {
274 return resolve_z(&ild->imm1_bytes,
275 pti_get_nominal_eosz(ild));
276 }
277 return 0;
278
279 case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1:
280 if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 0) {
281 return resolve_z(&ild->imm1_bytes,
282 pti_get_nominal_eosz(ild));
283 }
284 return 0;
285
286 case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1:
287 if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2)
288 ild->imm1_bytes = 1;
289
290 return 0;
291
292 case PTI_IMM_hasimm_map0x0_op0xc8_l1:
293 if (ild->map == PTI_MAP_0) {
294 /*enter -> imm1=2, imm2=1 */
295 ild->imm1_bytes = 2;
296 ild->imm2_bytes = 1;
297 }
298 return 0;
299
300 case PTI_IMM_hasimm_map0x0F_op0x78_l1:
301 /* AMD SSE4a (insertq/extrq use osz/f2) vs vmread
302 * (no prefixes)
303 */
304 if (ild->map == PTI_MAP_1) {
305 if (ild->u.s.osz || ild->u.s.last_f2f3 == 2) {
306 ild->imm1_bytes = 1;
307 ild->imm2_bytes = 1;
308 }
309 }
310 return 0;
311 }
312 }
313
imm_dec(struct pt_ild * ild,uint8_t length)314 static int imm_dec(struct pt_ild *ild, uint8_t length)
315 {
316 int errcode;
317
318 if (!ild)
319 return -pte_internal;
320
321 if (ild->map == PTI_MAP_AMD3DNOW) {
322 if (ild->max_bytes <= length)
323 return -pte_bad_insn;
324
325 ild->nominal_opcode = get_byte(ild, length);
326 return length + 1;
327 }
328
329 errcode = set_imm_bytes(ild);
330 if (errcode < 0)
331 return errcode;
332
333 length += ild->imm1_bytes;
334 length += ild->imm2_bytes;
335 if (ild->max_bytes < length)
336 return -pte_bad_insn;
337
338 return length;
339 }
340
compute_disp_dec(struct pt_ild * ild)341 static int compute_disp_dec(struct pt_ild *ild)
342 {
343 /* set ild->disp_bytes for maps 0 and 1. */
344 static uint8_t const *const map_map[] = {
345 /* map 0 */ disp_bytes_map_0x0,
346 /* map 1 */ disp_bytes_map_0x0F
347 };
348 uint8_t map, disp_kind;
349
350 if (!ild)
351 return -pte_internal;
352
353 if (0 < ild->disp_bytes)
354 return 0;
355
356 map = ild->map;
357
358 if ((sizeof(map_map) / sizeof(*map_map)) <= map)
359 return 0;
360
361 disp_kind = map_map[map][ild->nominal_opcode];
362 switch (disp_kind) {
363 case PTI_DISP_NONE:
364 ild->disp_bytes = 0;
365 return 0;
366
367 case PTI_PRESERVE_DEFAULT:
368 /* nothing to do */
369 return 0;
370
371 case PTI_BRDISP8:
372 ild->disp_bytes = 1;
373 return 0;
374
375 case PTI_DISP_BUCKET_0_l1:
376 /* BRDISPz(eosz) for 16/32 modes, and BRDISP32 for 64b mode */
377 if (mode_64b(ild)) {
378 ild->disp_bytes = 4;
379 return 0;
380 }
381
382 return resolve_z(&ild->disp_bytes,
383 pti_get_nominal_eosz(ild));
384
385 case PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2:
386 /* MEMDISPv(easz) */
387 return resolve_v(&ild->disp_bytes, pti_get_nominal_easz(ild));
388
389 case PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2:
390 /* BRDISPz(eosz) for 16/32/64 modes */
391 return resolve_z(&ild->disp_bytes, pti_get_nominal_eosz(ild));
392
393 case PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1:
394 /* reg=0 -> preserve, reg=7 -> BRDISPz(eosz) */
395 if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 7) {
396 return resolve_z(&ild->disp_bytes,
397 pti_get_nominal_eosz(ild));
398 }
399 return 0;
400
401 default:
402 return -pte_bad_insn;
403 }
404 }
405
disp_dec(struct pt_ild * ild,uint8_t length)406 static int disp_dec(struct pt_ild *ild, uint8_t length)
407 {
408 uint8_t disp_bytes;
409 int errcode;
410
411 if (!ild)
412 return -pte_internal;
413
414 errcode = compute_disp_dec(ild);
415 if (errcode < 0)
416 return errcode;
417
418 disp_bytes = ild->disp_bytes;
419 if (disp_bytes == 0)
420 return imm_dec(ild, length);
421
422 if (length + disp_bytes > ild->max_bytes)
423 return -pte_bad_insn;
424
425 /*Record only position; must be able to re-read itext bytes for actual
426 value. (SMC/CMC issue). */
427 ild->disp_pos = length;
428
429 return imm_dec(ild, length + disp_bytes);
430 }
431
sib_dec(struct pt_ild * ild,uint8_t length)432 static int sib_dec(struct pt_ild *ild, uint8_t length)
433 {
434 uint8_t sib;
435
436 if (!ild)
437 return -pte_internal;
438
439 if (ild->max_bytes <= length)
440 return -pte_bad_insn;
441
442 sib = get_byte(ild, length);
443 if ((sib & 0x07) == 0x05 && pti_get_modrm_mod(ild) == 0)
444 ild->disp_bytes = 4;
445
446 return disp_dec(ild, length + 1);
447 }
448
modrm_dec(struct pt_ild * ild,uint8_t length)449 static int modrm_dec(struct pt_ild *ild, uint8_t length)
450 {
451 static uint8_t const *const has_modrm_2d[2] = {
452 has_modrm_map_0x0,
453 has_modrm_map_0x0F
454 };
455 int has_modrm = PTI_MODRM_FALSE;
456 pti_map_enum_t map;
457
458 if (!ild)
459 return -pte_internal;
460
461 map = pti_get_map(ild);
462 if (map >= PTI_MAP_2)
463 has_modrm = PTI_MODRM_TRUE;
464 else
465 has_modrm = has_modrm_2d[map][ild->nominal_opcode];
466
467 if (has_modrm == PTI_MODRM_FALSE || has_modrm == PTI_MODRM_UNDEF)
468 return disp_dec(ild, length);
469
470 /* really >= here because we have not eaten the byte yet */
471 if (length >= ild->max_bytes)
472 return -pte_bad_insn;
473
474 ild->modrm_byte = get_byte(ild, length);
475
476 if (has_modrm != PTI_MODRM_IGNORE_MOD) {
477 /* set disp_bytes and sib using simple tables */
478
479 uint8_t eamode = eamode_table[ild->u.s.asz][ild->mode];
480 uint8_t mod = (uint8_t) pti_get_modrm_mod(ild);
481 uint8_t rm = (uint8_t) pti_get_modrm_rm(ild);
482 uint8_t has_sib;
483
484 ild->disp_bytes = has_disp_regular[eamode][mod][rm];
485
486 has_sib = has_sib_table[eamode][mod][rm];
487 if (has_sib)
488 return sib_dec(ild, length + 1);
489 }
490
491 return disp_dec(ild, length + 1);
492 }
493
get_next_as_opcode(struct pt_ild * ild,uint8_t length)494 static inline int get_next_as_opcode(struct pt_ild *ild, uint8_t length)
495 {
496 if (!ild)
497 return -pte_internal;
498
499 if (ild->max_bytes <= length)
500 return -pte_bad_insn;
501
502 ild->nominal_opcode = get_byte(ild, length);
503
504 return modrm_dec(ild, length + 1);
505 }
506
opcode_dec(struct pt_ild * ild,uint8_t length)507 static int opcode_dec(struct pt_ild *ild, uint8_t length)
508 {
509 uint8_t b, m;
510
511 if (!ild)
512 return -pte_internal;
513
514 /*no need to check max_bytes - it was checked in previous scanners */
515 b = get_byte(ild, length);
516 if (b != 0x0F) { /* 1B opcodes, map 0 */
517 ild->map = PTI_MAP_0;
518 ild->nominal_opcode = b;
519
520 return modrm_dec(ild, length + 1);
521 }
522
523 length++; /* eat the 0x0F */
524
525 if (ild->max_bytes <= length)
526 return -pte_bad_insn;
527
528 /* 0x0F opcodes MAPS 1,2,3 */
529 m = get_byte(ild, length);
530 if (m == 0x38) {
531 ild->map = PTI_MAP_2;
532
533 return get_next_as_opcode(ild, length + 1);
534 } else if (m == 0x3A) {
535 ild->map = PTI_MAP_3;
536 ild->imm1_bytes = 1;
537
538 return get_next_as_opcode(ild, length + 1);
539 } else if (bits_match(m, 0xf8, 0x38)) {
540 ild->map = PTI_MAP_INVALID;
541
542 return get_next_as_opcode(ild, length + 1);
543 } else if (m == 0x0F) { /* 3dNow */
544 ild->map = PTI_MAP_AMD3DNOW;
545 ild->imm1_bytes = 1;
546 /* real opcode is in immediate later on, but we need an
547 * opcode now. */
548 ild->nominal_opcode = 0x0F;
549
550 return modrm_dec(ild, length + 1);
551 } else { /* map 1 (simple two byte opcodes) */
552 ild->nominal_opcode = m;
553 ild->map = PTI_MAP_1;
554
555 return modrm_dec(ild, length + 1);
556 }
557 }
558
559 typedef int (*prefix_decoder)(struct pt_ild *ild, uint8_t length, uint8_t rex);
560 static prefix_decoder prefix_table[256];
561
prefix_decode(struct pt_ild * ild,uint8_t length,uint8_t rex)562 static inline int prefix_decode(struct pt_ild *ild, uint8_t length, uint8_t rex)
563 {
564 uint8_t byte;
565
566 if (!ild)
567 return -pte_internal;
568
569 if (ild->max_bytes <= length)
570 return -pte_bad_insn;
571
572 byte = get_byte(ild, length);
573
574 return prefix_table[byte](ild, length, rex);
575 }
576
prefix_next(struct pt_ild * ild,uint8_t length,uint8_t rex)577 static inline int prefix_next(struct pt_ild *ild, uint8_t length, uint8_t rex)
578 {
579 return prefix_decode(ild, length + 1, rex);
580 }
581
prefix_osz(struct pt_ild * ild,uint8_t length,uint8_t rex)582 static int prefix_osz(struct pt_ild *ild, uint8_t length, uint8_t rex)
583 {
584 (void) rex;
585
586 if (!ild)
587 return -pte_internal;
588
589 ild->u.s.osz = 1;
590
591 return prefix_next(ild, length, 0);
592 }
593
prefix_asz(struct pt_ild * ild,uint8_t length,uint8_t rex)594 static int prefix_asz(struct pt_ild *ild, uint8_t length, uint8_t rex)
595 {
596 (void) rex;
597
598 if (!ild)
599 return -pte_internal;
600
601 ild->u.s.asz = 1;
602
603 return prefix_next(ild, length, 0);
604 }
605
prefix_lock(struct pt_ild * ild,uint8_t length,uint8_t rex)606 static int prefix_lock(struct pt_ild *ild, uint8_t length, uint8_t rex)
607 {
608 (void) rex;
609
610 if (!ild)
611 return -pte_internal;
612
613 ild->u.s.lock = 1;
614
615 return prefix_next(ild, length, 0);
616 }
617
prefix_f2(struct pt_ild * ild,uint8_t length,uint8_t rex)618 static int prefix_f2(struct pt_ild *ild, uint8_t length, uint8_t rex)
619 {
620 (void) rex;
621
622 if (!ild)
623 return -pte_internal;
624
625 ild->u.s.f2 = 1;
626 ild->u.s.last_f2f3 = 2;
627
628 return prefix_next(ild, length, 0);
629 }
630
prefix_f3(struct pt_ild * ild,uint8_t length,uint8_t rex)631 static int prefix_f3(struct pt_ild *ild, uint8_t length, uint8_t rex)
632 {
633 (void) rex;
634
635 if (!ild)
636 return -pte_internal;
637
638 ild->u.s.f3 = 1;
639 ild->u.s.last_f2f3 = 3;
640
641 return prefix_next(ild, length, 0);
642 }
643
prefix_ignore(struct pt_ild * ild,uint8_t length,uint8_t rex)644 static int prefix_ignore(struct pt_ild *ild, uint8_t length, uint8_t rex)
645 {
646 (void) rex;
647
648 return prefix_next(ild, length, 0);
649 }
650
prefix_done(struct pt_ild * ild,uint8_t length,uint8_t rex)651 static int prefix_done(struct pt_ild *ild, uint8_t length, uint8_t rex)
652 {
653 if (!ild)
654 return -pte_internal;
655
656 if (rex & 0x04)
657 ild->u.s.rex_r = 1;
658 if (rex & 0x08)
659 ild->u.s.rex_w = 1;
660
661 return opcode_dec(ild, length);
662 }
663
prefix_rex(struct pt_ild * ild,uint8_t length,uint8_t rex)664 static int prefix_rex(struct pt_ild *ild, uint8_t length, uint8_t rex)
665 {
666 (void) rex;
667
668 if (!ild)
669 return -pte_internal;
670
671 if (mode_64b(ild))
672 return prefix_next(ild, length, get_byte(ild, length));
673 else
674 return opcode_dec(ild, length);
675 }
676
prefix_vex_done(struct pt_ild * ild,uint8_t length)677 static inline int prefix_vex_done(struct pt_ild *ild, uint8_t length)
678 {
679 if (!ild)
680 return -pte_internal;
681
682 ild->nominal_opcode = get_byte(ild, length);
683
684 return modrm_dec(ild, length + 1);
685 }
686
prefix_vex_c5(struct pt_ild * ild,uint8_t length,uint8_t rex)687 static int prefix_vex_c5(struct pt_ild *ild, uint8_t length, uint8_t rex)
688 {
689 uint8_t max_bytes;
690 uint8_t p1;
691
692 (void) rex;
693
694 if (!ild)
695 return -pte_internal;
696
697 max_bytes = ild->max_bytes;
698
699 /* Read the next byte to validate that this is indeed VEX. */
700 if (max_bytes <= (length + 1))
701 return -pte_bad_insn;
702
703 p1 = get_byte(ild, length + 1);
704
705 /* If p1[7:6] is not 11b in non-64-bit mode, this is LDS, not VEX. */
706 if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
707 return opcode_dec(ild, length);
708
709 /* We need at least 3 bytes
710 * - 2 for the VEX prefix and payload and
711 * - 1 for the opcode.
712 */
713 if (max_bytes < (length + 3))
714 return -pte_bad_insn;
715
716 ild->u.s.vex = 1;
717 if (p1 & 0x80)
718 ild->u.s.rex_r = 1;
719
720 ild->map = PTI_MAP_1;
721
722 /* Eat the VEX. */
723 length += 2;
724 return prefix_vex_done(ild, length);
725 }
726
prefix_vex_c4(struct pt_ild * ild,uint8_t length,uint8_t rex)727 static int prefix_vex_c4(struct pt_ild *ild, uint8_t length, uint8_t rex)
728 {
729 uint8_t max_bytes;
730 uint8_t p1, p2, map;
731
732 (void) rex;
733
734 if (!ild)
735 return -pte_internal;
736
737 max_bytes = ild->max_bytes;
738
739 /* Read the next byte to validate that this is indeed VEX. */
740 if (max_bytes <= (length + 1))
741 return -pte_bad_insn;
742
743 p1 = get_byte(ild, length + 1);
744
745 /* If p1[7:6] is not 11b in non-64-bit mode, this is LES, not VEX. */
746 if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
747 return opcode_dec(ild, length);
748
749 /* We need at least 4 bytes
750 * - 3 for the VEX prefix and payload and
751 * - 1 for the opcode.
752 */
753 if (max_bytes < (length + 4))
754 return -pte_bad_insn;
755
756 p2 = get_byte(ild, length + 2);
757
758 ild->u.s.vex = 1;
759 if (p1 & 0x80)
760 ild->u.s.rex_r = 1;
761 if (p2 & 0x80)
762 ild->u.s.rex_w = 1;
763
764 map = p1 & 0x1f;
765 if (PTI_MAP_INVALID <= map)
766 return -pte_bad_insn;
767
768 ild->map = map;
769 if (map == PTI_MAP_3)
770 ild->imm1_bytes = 1;
771
772 /* Eat the VEX. */
773 length += 3;
774 return prefix_vex_done(ild, length);
775 }
776
prefix_evex(struct pt_ild * ild,uint8_t length,uint8_t rex)777 static int prefix_evex(struct pt_ild *ild, uint8_t length, uint8_t rex)
778 {
779 uint8_t max_bytes;
780 uint8_t p1, p2, map;
781
782 (void) rex;
783
784 if (!ild)
785 return -pte_internal;
786
787 max_bytes = ild->max_bytes;
788
789 /* Read the next byte to validate that this is indeed EVEX. */
790 if (max_bytes <= (length + 1))
791 return -pte_bad_insn;
792
793 p1 = get_byte(ild, length + 1);
794
795 /* If p1[7:6] is not 11b in non-64-bit mode, this is BOUND, not EVEX. */
796 if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
797 return opcode_dec(ild, length);
798
799 /* We need at least 5 bytes
800 * - 4 for the EVEX prefix and payload and
801 * - 1 for the opcode.
802 */
803 if (max_bytes < (length + 5))
804 return -pte_bad_insn;
805
806 p2 = get_byte(ild, length + 2);
807
808 ild->u.s.vex = 1;
809 if (p1 & 0x80)
810 ild->u.s.rex_r = 1;
811 if (p2 & 0x80)
812 ild->u.s.rex_w = 1;
813
814 map = p1 & 0x03;
815 ild->map = map;
816
817 if (map == PTI_MAP_3)
818 ild->imm1_bytes = 1;
819
820 /* Eat the EVEX. */
821 length += 4;
822 return prefix_vex_done(ild, length);
823 }
824
init_prefix_table(void)825 static void init_prefix_table(void)
826 {
827 unsigned int byte;
828
829 for (byte = 0; byte <= 0xff; ++byte)
830 prefix_table[byte] = prefix_done;
831
832 prefix_table[0x66] = prefix_osz;
833 prefix_table[0x67] = prefix_asz;
834
835 /* Segment prefixes. */
836 prefix_table[0x2e] = prefix_ignore;
837 prefix_table[0x3e] = prefix_ignore;
838 prefix_table[0x26] = prefix_ignore;
839 prefix_table[0x36] = prefix_ignore;
840 prefix_table[0x64] = prefix_ignore;
841 prefix_table[0x65] = prefix_ignore;
842
843 prefix_table[0xf0] = prefix_lock;
844 prefix_table[0xf2] = prefix_f2;
845 prefix_table[0xf3] = prefix_f3;
846
847 for (byte = 0x40; byte <= 0x4f; ++byte)
848 prefix_table[byte] = prefix_rex;
849
850 prefix_table[0xc4] = prefix_vex_c4;
851 prefix_table[0xc5] = prefix_vex_c5;
852 prefix_table[0x62] = prefix_evex;
853 }
854
decode(struct pt_ild * ild)855 static int decode(struct pt_ild *ild)
856 {
857 return prefix_decode(ild, 0, 0);
858 }
859
set_branch_target(struct pt_insn_ext * iext,const struct pt_ild * ild)860 static int set_branch_target(struct pt_insn_ext *iext, const struct pt_ild *ild)
861 {
862 if (!iext || !ild)
863 return -pte_internal;
864
865 iext->variant.branch.is_direct = 1;
866
867 if (ild->disp_bytes == 1) {
868 const int8_t *b = (const int8_t *)
869 get_byte_ptr(ild, ild->disp_pos);
870
871 iext->variant.branch.displacement = *b;
872 } else if (ild->disp_bytes == 2) {
873 const int16_t *w = (const int16_t *)
874 get_byte_ptr(ild, ild->disp_pos);
875
876 iext->variant.branch.displacement = *w;
877 } else if (ild->disp_bytes == 4) {
878 const int32_t *d = (const int32_t *)
879 get_byte_ptr(ild, ild->disp_pos);
880
881 iext->variant.branch.displacement = *d;
882 } else
883 return -pte_bad_insn;
884
885 return 0;
886 }
887
888 /* MAIN ENTRY POINTS */
889
pt_ild_init(void)890 void pt_ild_init(void)
891 { /* initialization */
892 init_has_disp_regular_table();
893 init_has_sib_table();
894 init_eamode_table();
895 init_prefix_table();
896 }
897
pt_instruction_length_decode(struct pt_ild * ild)898 static int pt_instruction_length_decode(struct pt_ild *ild)
899 {
900 if (!ild)
901 return -pte_internal;
902
903 ild->u.i = 0;
904 ild->imm1_bytes = 0;
905 ild->imm2_bytes = 0;
906 ild->disp_bytes = 0;
907 ild->modrm_byte = 0;
908 ild->map = PTI_MAP_INVALID;
909
910 if (!ild->mode)
911 return -pte_bad_insn;
912
913 return decode(ild);
914 }
915
pt_instruction_decode(struct pt_insn * insn,struct pt_insn_ext * iext,const struct pt_ild * ild)916 static int pt_instruction_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
917 const struct pt_ild *ild)
918 {
919 uint8_t opcode, map;
920
921 if (!iext || !ild)
922 return -pte_internal;
923
924 iext->iclass = PTI_INST_INVALID;
925 memset(&iext->variant, 0, sizeof(iext->variant));
926
927 insn->iclass = ptic_other;
928
929 opcode = ild->nominal_opcode;
930 map = ild->map;
931
932 if (map > PTI_MAP_1)
933 return 0; /* uninteresting */
934 if (ild->u.s.vex)
935 return 0; /* uninteresting */
936
937 /* PTI_INST_JCC, 70...7F, 0F (0x80...0x8F) */
938 if (opcode >= 0x70 && opcode <= 0x7F) {
939 if (map == PTI_MAP_0) {
940 insn->iclass = ptic_cond_jump;
941 iext->iclass = PTI_INST_JCC;
942
943 return set_branch_target(iext, ild);
944 }
945 return 0;
946 }
947 if (opcode >= 0x80 && opcode <= 0x8F) {
948 if (map == PTI_MAP_1) {
949 insn->iclass = ptic_cond_jump;
950 iext->iclass = PTI_INST_JCC;
951
952 return set_branch_target(iext, ild);
953 }
954 return 0;
955 }
956
957 switch (ild->nominal_opcode) {
958 case 0x9A:
959 if (map == PTI_MAP_0) {
960 insn->iclass = ptic_far_call;
961 iext->iclass = PTI_INST_CALL_9A;
962 }
963 return 0;
964
965 case 0xFF:
966 if (map == PTI_MAP_0) {
967 uint8_t reg = pti_get_modrm_reg(ild);
968
969 if (reg == 2) {
970 insn->iclass = ptic_call;
971 iext->iclass = PTI_INST_CALL_FFr2;
972 } else if (reg == 3) {
973 insn->iclass = ptic_far_call;
974 iext->iclass = PTI_INST_CALL_FFr3;
975 } else if (reg == 4) {
976 insn->iclass = ptic_jump;
977 iext->iclass = PTI_INST_JMP_FFr4;
978 } else if (reg == 5) {
979 insn->iclass = ptic_far_jump;
980 iext->iclass = PTI_INST_JMP_FFr5;
981 }
982 }
983 return 0;
984
985 case 0xE8:
986 if (map == PTI_MAP_0) {
987 insn->iclass = ptic_call;
988 iext->iclass = PTI_INST_CALL_E8;
989
990 return set_branch_target(iext, ild);
991 }
992 return 0;
993
994 case 0xCD:
995 if (map == PTI_MAP_0) {
996 insn->iclass = ptic_far_call;
997 iext->iclass = PTI_INST_INT;
998 }
999
1000 return 0;
1001
1002 case 0xCC:
1003 if (map == PTI_MAP_0) {
1004 insn->iclass = ptic_far_call;
1005 iext->iclass = PTI_INST_INT3;
1006 }
1007
1008 return 0;
1009
1010 case 0xCE:
1011 if (map == PTI_MAP_0) {
1012 insn->iclass = ptic_far_call;
1013 iext->iclass = PTI_INST_INTO;
1014 }
1015
1016 return 0;
1017
1018 case 0xF1:
1019 if (map == PTI_MAP_0) {
1020 insn->iclass = ptic_far_call;
1021 iext->iclass = PTI_INST_INT1;
1022 }
1023
1024 return 0;
1025
1026 case 0xCF:
1027 if (map == PTI_MAP_0) {
1028 insn->iclass = ptic_far_return;
1029 iext->iclass = PTI_INST_IRET;
1030 }
1031 return 0;
1032
1033 case 0xE9:
1034 if (map == PTI_MAP_0) {
1035 insn->iclass = ptic_jump;
1036 iext->iclass = PTI_INST_JMP_E9;
1037
1038 return set_branch_target(iext, ild);
1039 }
1040 return 0;
1041
1042 case 0xEA:
1043 if (map == PTI_MAP_0) {
1044 /* Far jumps are treated as indirect jumps. */
1045 insn->iclass = ptic_far_jump;
1046 iext->iclass = PTI_INST_JMP_EA;
1047 }
1048 return 0;
1049
1050 case 0xEB:
1051 if (map == PTI_MAP_0) {
1052 insn->iclass = ptic_jump;
1053 iext->iclass = PTI_INST_JMP_EB;
1054
1055 return set_branch_target(iext, ild);
1056 }
1057 return 0;
1058
1059 case 0xE3:
1060 if (map == PTI_MAP_0) {
1061 insn->iclass = ptic_cond_jump;
1062 iext->iclass = PTI_INST_JrCXZ;
1063
1064 return set_branch_target(iext, ild);
1065 }
1066 return 0;
1067
1068 case 0xE0:
1069 if (map == PTI_MAP_0) {
1070 insn->iclass = ptic_cond_jump;
1071 iext->iclass = PTI_INST_LOOPNE;
1072
1073 return set_branch_target(iext, ild);
1074 }
1075 return 0;
1076
1077 case 0xE1:
1078 if (map == PTI_MAP_0) {
1079 insn->iclass = ptic_cond_jump;
1080 iext->iclass = PTI_INST_LOOPE;
1081
1082 return set_branch_target(iext, ild);
1083 }
1084 return 0;
1085
1086 case 0xE2:
1087 if (map == PTI_MAP_0) {
1088 insn->iclass = ptic_cond_jump;
1089 iext->iclass = PTI_INST_LOOP;
1090
1091 return set_branch_target(iext, ild);
1092 }
1093 return 0;
1094
1095 case 0x22:
1096 if (map == PTI_MAP_1)
1097 if (pti_get_modrm_reg(ild) == 3)
1098 if (!ild->u.s.rex_r)
1099 iext->iclass = PTI_INST_MOV_CR3;
1100
1101 return 0;
1102
1103 case 0xC3:
1104 if (map == PTI_MAP_0) {
1105 insn->iclass = ptic_return;
1106 iext->iclass = PTI_INST_RET_C3;
1107 }
1108 return 0;
1109
1110 case 0xC2:
1111 if (map == PTI_MAP_0) {
1112 insn->iclass = ptic_return;
1113 iext->iclass = PTI_INST_RET_C2;
1114 }
1115 return 0;
1116
1117 case 0xCB:
1118 if (map == PTI_MAP_0) {
1119 insn->iclass = ptic_far_return;
1120 iext->iclass = PTI_INST_RET_CB;
1121 }
1122 return 0;
1123
1124 case 0xCA:
1125 if (map == PTI_MAP_0) {
1126 insn->iclass = ptic_far_return;
1127 iext->iclass = PTI_INST_RET_CA;
1128 }
1129 return 0;
1130
1131 case 0x05:
1132 if (map == PTI_MAP_1) {
1133 insn->iclass = ptic_far_call;
1134 iext->iclass = PTI_INST_SYSCALL;
1135 }
1136 return 0;
1137
1138 case 0x34:
1139 if (map == PTI_MAP_1) {
1140 insn->iclass = ptic_far_call;
1141 iext->iclass = PTI_INST_SYSENTER;
1142 }
1143 return 0;
1144
1145 case 0x35:
1146 if (map == PTI_MAP_1) {
1147 insn->iclass = ptic_far_return;
1148 iext->iclass = PTI_INST_SYSEXIT;
1149 }
1150 return 0;
1151
1152 case 0x07:
1153 if (map == PTI_MAP_1) {
1154 insn->iclass = ptic_far_return;
1155 iext->iclass = PTI_INST_SYSRET;
1156 }
1157 return 0;
1158
1159 case 0x01:
1160 if (map == PTI_MAP_1) {
1161 switch (ild->modrm_byte) {
1162 case 0xc1:
1163 insn->iclass = ptic_far_call;
1164 iext->iclass = PTI_INST_VMCALL;
1165 break;
1166
1167 case 0xc2:
1168 insn->iclass = ptic_far_return;
1169 iext->iclass = PTI_INST_VMLAUNCH;
1170 break;
1171
1172 case 0xc3:
1173 insn->iclass = ptic_far_return;
1174 iext->iclass = PTI_INST_VMRESUME;
1175 break;
1176
1177 default:
1178 break;
1179 }
1180 }
1181 return 0;
1182
1183 case 0xc7:
1184 if (map == PTI_MAP_1 &&
1185 pti_get_modrm_mod(ild) != 3 &&
1186 pti_get_modrm_reg(ild) == 6)
1187 iext->iclass = PTI_INST_VMPTRLD;
1188
1189 return 0;
1190
1191 case 0xae:
1192 if (map == PTI_MAP_1 && ild->u.s.f3 && !ild->u.s.osz &&
1193 pti_get_modrm_reg(ild) == 4) {
1194 insn->iclass = ptic_ptwrite;
1195 iext->iclass = PTI_INST_PTWRITE;
1196 }
1197 return 0;
1198
1199 default:
1200 return 0;
1201 }
1202 }
1203
pt_ild_decode(struct pt_insn * insn,struct pt_insn_ext * iext)1204 int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext)
1205 {
1206 struct pt_ild ild;
1207 int size;
1208
1209 if (!insn || !iext)
1210 return -pte_internal;
1211
1212 ild.mode = insn->mode;
1213 ild.itext = insn->raw;
1214 ild.max_bytes = insn->size;
1215
1216 size = pt_instruction_length_decode(&ild);
1217 if (size < 0)
1218 return size;
1219
1220 insn->size = (uint8_t) size;
1221
1222 return pt_instruction_decode(insn, iext, &ild);
1223 }
1224