1 /*
2 * Copyright (c) 2016-2018, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "pt_image_section_cache.h"
30 #include "pt_section.h"
31
32 #include "intel-pt.h"
33
34 #include <stdlib.h>
35
36
dupstr(const char * str)37 static char *dupstr(const char *str)
38 {
39 char *dup;
40 size_t len;
41
42 if (!str)
43 return NULL;
44
45 len = strlen(str);
46 dup = malloc(len + 1);
47 if (!dup)
48 return NULL;
49
50 return strcpy(dup, str);
51 }
52
pt_iscache_init(struct pt_image_section_cache * iscache,const char * name)53 int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
54 {
55 if (!iscache)
56 return -pte_internal;
57
58 memset(iscache, 0, sizeof(*iscache));
59 iscache->limit = UINT64_MAX;
60 if (name) {
61 iscache->name = dupstr(name);
62 if (!iscache->name)
63 return -pte_nomem;
64 }
65
66 #if defined(FEATURE_THREADS)
67 {
68 int errcode;
69
70 errcode = mtx_init(&iscache->lock, mtx_plain);
71 if (errcode != thrd_success)
72 return -pte_bad_lock;
73 }
74 #endif /* defined(FEATURE_THREADS) */
75
76 return 0;
77 }
78
pt_iscache_fini(struct pt_image_section_cache * iscache)79 void pt_iscache_fini(struct pt_image_section_cache *iscache)
80 {
81 if (!iscache)
82 return;
83
84 (void) pt_iscache_clear(iscache);
85 free(iscache->name);
86
87 #if defined(FEATURE_THREADS)
88
89 mtx_destroy(&iscache->lock);
90
91 #endif /* defined(FEATURE_THREADS) */
92 }
93
pt_iscache_lock(struct pt_image_section_cache * iscache)94 static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
95 {
96 if (!iscache)
97 return -pte_internal;
98
99 #if defined(FEATURE_THREADS)
100 {
101 int errcode;
102
103 errcode = mtx_lock(&iscache->lock);
104 if (errcode != thrd_success)
105 return -pte_bad_lock;
106 }
107 #endif /* defined(FEATURE_THREADS) */
108
109 return 0;
110 }
111
pt_iscache_unlock(struct pt_image_section_cache * iscache)112 static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
113 {
114 if (!iscache)
115 return -pte_internal;
116
117 #if defined(FEATURE_THREADS)
118 {
119 int errcode;
120
121 errcode = mtx_unlock(&iscache->lock);
122 if (errcode != thrd_success)
123 return -pte_bad_lock;
124 }
125 #endif /* defined(FEATURE_THREADS) */
126
127 return 0;
128 }
129
isid_from_index(uint16_t index)130 static inline int isid_from_index(uint16_t index)
131 {
132 return index + 1;
133 }
134
pt_iscache_expand(struct pt_image_section_cache * iscache)135 static int pt_iscache_expand(struct pt_image_section_cache *iscache)
136 {
137 struct pt_iscache_entry *entries;
138 uint16_t capacity, target;
139
140 if (!iscache)
141 return -pte_internal;
142
143 capacity = iscache->capacity;
144 target = capacity + 8;
145
146 /* Check for overflows. */
147 if (target < capacity)
148 return -pte_nomem;
149
150 entries = realloc(iscache->entries, target * sizeof(*entries));
151 if (!entries)
152 return -pte_nomem;
153
154 iscache->capacity = target;
155 iscache->entries = entries;
156 return 0;
157 }
158
pt_iscache_find_locked(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)159 static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
160 const char *filename, uint64_t offset,
161 uint64_t size, uint64_t laddr)
162 {
163 uint16_t idx, end;
164
165 if (!iscache || !filename)
166 return -pte_internal;
167
168 end = iscache->size;
169 for (idx = 0; idx < end; ++idx) {
170 const struct pt_iscache_entry *entry;
171 const struct pt_section *section;
172 const char *sec_filename;
173 uint64_t sec_offset, sec_size;
174
175 entry = &iscache->entries[idx];
176
177 /* We do not zero-initialize the array - a NULL check is
178 * pointless.
179 */
180 section = entry->section;
181 sec_filename = pt_section_filename(section);
182 sec_offset = pt_section_offset(section);
183 sec_size = pt_section_size(section);
184
185 if (entry->laddr != laddr)
186 continue;
187
188 if (sec_offset != offset)
189 continue;
190
191 if (sec_size != size)
192 continue;
193
194 /* We should not have a section without a filename. */
195 if (!sec_filename)
196 return -pte_internal;
197
198 if (strcmp(sec_filename, filename) != 0)
199 continue;
200
201 return isid_from_index(idx);
202 }
203
204 return 0;
205 }
206
pt_iscache_lru_free(struct pt_iscache_lru_entry * lru)207 static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
208 {
209 while (lru) {
210 struct pt_iscache_lru_entry *trash;
211 int errcode;
212
213 trash = lru;
214 lru = lru->next;
215
216 errcode = pt_section_unmap(trash->section);
217 if (errcode < 0)
218 return errcode;
219
220 free(trash);
221 }
222
223 return 0;
224 }
225
pt_iscache_lru_prune(struct pt_image_section_cache * iscache,struct pt_iscache_lru_entry ** tail)226 static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
227 struct pt_iscache_lru_entry **tail)
228 {
229 struct pt_iscache_lru_entry *lru, **pnext;
230 uint64_t limit, used;
231
232 if (!iscache || !tail)
233 return -pte_internal;
234
235 limit = iscache->limit;
236 used = 0ull;
237
238 pnext = &iscache->lru;
239 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
240
241 used += lru->size;
242 if (used <= limit)
243 continue;
244
245 /* The cache got too big; prune it starting from @lru. */
246 iscache->used = used - lru->size;
247 *pnext = NULL;
248 *tail = lru;
249
250 return 0;
251 }
252
253 /* We shouldn't prune the cache unnecessarily. */
254 return -pte_internal;
255 }
256
257 /* Add @section to the front of @iscache->lru.
258 *
259 * Returns a positive integer if we need to prune the cache.
260 * Returns zero if we don't need to prune the cache.
261 * Returns a negative pt_error_code otherwise.
262 */
pt_isache_lru_new(struct pt_image_section_cache * iscache,struct pt_section * section)263 static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
264 struct pt_section *section)
265 {
266 struct pt_iscache_lru_entry *lru;
267 uint64_t memsize, used, total, limit;
268 int errcode;
269
270 if (!iscache)
271 return -pte_internal;
272
273 errcode = pt_section_memsize(section, &memsize);
274 if (errcode < 0)
275 return errcode;
276
277 /* Don't try to add the section if it is too big. We'd prune it again
278 * together with all other sections in our cache.
279 */
280 limit = iscache->limit;
281 if (limit < memsize)
282 return 0;
283
284 errcode = pt_section_map_share(section);
285 if (errcode < 0)
286 return errcode;
287
288 lru = malloc(sizeof(*lru));
289 if (!lru) {
290 (void) pt_section_unmap(section);
291 return -pte_nomem;
292 }
293
294 lru->section = section;
295 lru->size = memsize;
296
297 lru->next = iscache->lru;
298 iscache->lru = lru;
299
300 used = iscache->used;
301 total = used + memsize;
302 if (total < used || total < memsize)
303 return -pte_overflow;
304
305 iscache->used = total;
306
307 return (limit < total) ? 1 : 0;
308 }
309
310 /* Add or move @section to the front of @iscache->lru.
311 *
312 * Returns a positive integer if we need to prune the cache.
313 * Returns zero if we don't need to prune the cache.
314 * Returns a negative pt_error_code otherwise.
315 */
pt_iscache_lru_add(struct pt_image_section_cache * iscache,struct pt_section * section)316 static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
317 struct pt_section *section)
318 {
319 struct pt_iscache_lru_entry *lru, **pnext;
320
321 if (!iscache)
322 return -pte_internal;
323
324 pnext = &iscache->lru;
325 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
326
327 if (lru->section != section)
328 continue;
329
330 /* We found it in the cache. Move it to the front. */
331 *pnext = lru->next;
332 lru->next = iscache->lru;
333 iscache->lru = lru;
334
335 return 0;
336 }
337
338 /* We didn't find it in the cache. Add it. */
339 return pt_isache_lru_new(iscache, section);
340 }
341
342
343 /* Remove @section from @iscache->lru.
344 *
345 * Returns zero on success, a negative pt_error_code otherwise.
346 */
pt_iscache_lru_remove(struct pt_image_section_cache * iscache,const struct pt_section * section)347 static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
348 const struct pt_section *section)
349 {
350 struct pt_iscache_lru_entry *lru, **pnext;
351
352 if (!iscache)
353 return -pte_internal;
354
355 pnext = &iscache->lru;
356 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
357
358 if (lru->section != section)
359 continue;
360
361 /* We found it in the cache. Remove it. */
362 *pnext = lru->next;
363 lru->next = NULL;
364 break;
365 }
366
367 return pt_iscache_lru_free(lru);
368 }
369
370
371 /* Add or move @section to the front of @iscache->lru and update its size.
372 *
373 * Returns a positive integer if we need to prune the cache.
374 * Returns zero if we don't need to prune the cache.
375 * Returns a negative pt_error_code otherwise.
376 */
pt_iscache_lru_resize(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t memsize)377 static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
378 struct pt_section *section, uint64_t memsize)
379 {
380 struct pt_iscache_lru_entry *lru;
381 uint64_t oldsize, used;
382 int status;
383
384 if (!iscache)
385 return -pte_internal;
386
387 status = pt_iscache_lru_add(iscache, section);
388 if (status < 0)
389 return status;
390
391 lru = iscache->lru;
392 if (!lru) {
393 if (status)
394 return -pte_internal;
395 return 0;
396 }
397
398 /* If @section is cached, it must be first.
399 *
400 * We may choose not to cache it, though, e.g. if it is too big.
401 */
402 if (lru->section != section) {
403 if (iscache->limit < memsize)
404 return 0;
405
406 return -pte_internal;
407 }
408
409 oldsize = lru->size;
410 lru->size = memsize;
411
412 /* If we need to prune anyway, we're done. */
413 if (status)
414 return status;
415
416 used = iscache->used;
417 used -= oldsize;
418 used += memsize;
419
420 iscache->used = used;
421
422 return (iscache->limit < used) ? 1 : 0;
423 }
424
425 /* Clear @iscache->lru.
426 *
427 * Unlike other iscache_lru functions, the caller does not lock @iscache.
428 *
429 * Return zero on success, a negative pt_error_code otherwise.
430 */
pt_iscache_lru_clear(struct pt_image_section_cache * iscache)431 static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
432 {
433 struct pt_iscache_lru_entry *lru;
434 int errcode;
435
436 errcode = pt_iscache_lock(iscache);
437 if (errcode < 0)
438 return errcode;
439
440 lru = iscache->lru;
441 iscache->lru = NULL;
442 iscache->used = 0ull;
443
444 errcode = pt_iscache_unlock(iscache);
445 if (errcode < 0)
446 return errcode;
447
448 return pt_iscache_lru_free(lru);
449 }
450
451 /* Search @iscache for a partial or exact match of @section loaded at @laddr and
452 * return the corresponding index or @iscache->size if no match is found.
453 *
454 * The caller must lock @iscache.
455 *
456 * Returns a non-zero index on success, a negative pt_error_code otherwise.
457 */
458 static int
pt_iscache_find_section_locked(const struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)459 pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
460 const char *filename, uint64_t offset,
461 uint64_t size, uint64_t laddr)
462 {
463 const struct pt_section *section;
464 uint16_t idx, end;
465 int match;
466
467 if (!iscache || !filename)
468 return -pte_internal;
469
470 section = NULL;
471 match = end = iscache->size;
472 for (idx = 0; idx < end; ++idx) {
473 const struct pt_iscache_entry *entry;
474 const struct pt_section *sec;
475
476 entry = &iscache->entries[idx];
477
478 /* We do not zero-initialize the array - a NULL check is
479 * pointless.
480 */
481 sec = entry->section;
482
483 /* Avoid redundant match checks. */
484 if (sec != section) {
485 const char *sec_filename;
486
487 /* We don't have duplicates. Skip the check. */
488 if (section)
489 continue;
490
491 if (offset != pt_section_offset(sec))
492 continue;
493
494 if (size != pt_section_size(sec))
495 continue;
496
497 sec_filename = pt_section_filename(sec);
498 if (!sec_filename)
499 return -pte_internal;
500
501 if (strcmp(filename, sec_filename) != 0)
502 continue;
503
504 /* Use the cached section instead. */
505 section = sec;
506 match = idx;
507 }
508
509 /* If we didn't continue, @section == @sec and we have a match.
510 *
511 * If we also find a matching load address, we're done.
512 */
513 if (laddr == entry->laddr)
514 return idx;
515 }
516
517 return match;
518 }
519
pt_iscache_add(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t laddr)520 int pt_iscache_add(struct pt_image_section_cache *iscache,
521 struct pt_section *section, uint64_t laddr)
522 {
523 const char *filename;
524 uint64_t offset, size;
525 uint16_t idx;
526 int errcode;
527
528 if (!iscache || !section)
529 return -pte_internal;
530
531 /* We must have a filename for @section. */
532 filename = pt_section_filename(section);
533 if (!filename)
534 return -pte_internal;
535
536 offset = pt_section_offset(section);
537 size = pt_section_size(section);
538
539 /* Adding a section is slightly complicated by a potential deadlock
540 * scenario:
541 *
542 * - in order to add a section, we need to attach to it, which
543 * requires taking the section's attach lock.
544 *
545 * - if we are already attached to it, we may receive on-map
546 * notifications, which will be sent while holding the attach lock
547 * and require taking the iscache lock.
548 *
549 * Hence we can't attach to a section while holding the iscache lock.
550 *
551 *
552 * We therefore attach to @section first and then lock @iscache.
553 *
554 * This opens a small window where an existing @section may be removed
555 * from @iscache and replaced by a new matching section. We would want
556 * to share that new section rather than adding a duplicate @section.
557 *
558 * After locking @iscache, we therefore check for existing matching
559 * sections and, if one is found, update @section. This involves
560 * detaching from @section and attaching to the existing section.
561 *
562 * And for this, we will have to temporarily unlock @iscache again.
563 */
564 errcode = pt_section_get(section);
565 if (errcode < 0)
566 return errcode;
567
568 errcode = pt_section_attach(section, iscache);
569 if (errcode < 0)
570 goto out_put;
571
572 errcode = pt_iscache_lock(iscache);
573 if (errcode < 0)
574 goto out_detach;
575
576 /* We may need to repeat this step.
577 *
578 * Typically we don't and this takes only a single iteration. One
579 * scenario where we do repeat this is when adding a section with an
580 * out-of-bounds size.
581 *
582 * We will not find a matching section in pt_iscache_add_file() so we
583 * create a new section. This will have its size reduced to match the
584 * actual file size.
585 *
586 * For this reduced size, we may now find an existing section, and we
587 * will take another trip in the below loop.
588 */
589 for (;;) {
590 const struct pt_iscache_entry *entry;
591 struct pt_section *sec;
592 int match;
593
594 /* Find an existing section matching @section that we'd share
595 * rather than adding @section.
596 */
597 match = pt_iscache_find_section_locked(iscache, filename,
598 offset, size, laddr);
599 if (match < 0) {
600 errcode = match;
601 goto out_unlock_detach;
602 }
603
604 /* We're done if we have not found a matching section. */
605 if (iscache->size <= match)
606 break;
607
608 entry = &iscache->entries[match];
609
610 /* We're also done if we found the same section again.
611 *
612 * We further check for a perfect match. In that case, we don't
613 * need to insert anything, at all.
614 */
615 sec = entry->section;
616 if (sec == section) {
617 if (entry->laddr == laddr) {
618 errcode = pt_iscache_unlock(iscache);
619 if (errcode < 0)
620 goto out_detach;
621
622 errcode = pt_section_detach(section, iscache);
623 if (errcode < 0)
624 goto out_lru;
625
626 errcode = pt_section_put(section);
627 if (errcode < 0)
628 return errcode;
629
630 return isid_from_index((uint16_t) match);
631 }
632
633 break;
634 }
635
636 /* We update @section to share the existing @sec.
637 *
638 * This requires detaching from @section, which, in turn,
639 * requires temporarily unlocking @iscache.
640 *
641 * We further need to remove @section from @iscache->lru.
642 */
643 errcode = pt_section_get(sec);
644 if (errcode < 0)
645 goto out_unlock_detach;
646
647 errcode = pt_iscache_unlock(iscache);
648 if (errcode < 0) {
649 (void) pt_section_put(sec);
650 goto out_detach;
651 }
652
653 errcode = pt_section_detach(section, iscache);
654 if (errcode < 0) {
655 (void) pt_section_put(sec);
656 goto out_lru;
657 }
658
659 errcode = pt_section_attach(sec, iscache);
660 if (errcode < 0) {
661 (void) pt_section_put(sec);
662 goto out_lru;
663 }
664
665 errcode = pt_iscache_lock(iscache);
666 if (errcode < 0) {
667 (void) pt_section_put(section);
668 /* Complete the swap for cleanup. */
669 section = sec;
670 goto out_detach;
671 }
672
673 /* We may have received on-map notifications for @section and we
674 * may have added @section to @iscache->lru.
675 *
676 * Since we're still holding a reference to it, no harm has been
677 * done. But we need to remove it before we drop our reference.
678 */
679 errcode = pt_iscache_lru_remove(iscache, section);
680 if (errcode < 0) {
681 (void) pt_section_put(section);
682 /* Complete the swap for cleanup. */
683 section = sec;
684 goto out_unlock_detach;
685 }
686
687 /* Drop the reference to @section. */
688 errcode = pt_section_put(section);
689 if (errcode < 0) {
690 /* Complete the swap for cleanup. */
691 section = sec;
692 goto out_unlock_detach;
693 }
694
695 /* Swap sections.
696 *
697 * We will try again in the next iteration.
698 */
699 section = sec;
700 }
701
702 /* Expand the cache, if necessary. */
703 if (iscache->capacity <= iscache->size) {
704 /* We must never exceed the capacity. */
705 if (iscache->capacity < iscache->size) {
706 errcode = -pte_internal;
707 goto out_unlock_detach;
708 }
709
710 errcode = pt_iscache_expand(iscache);
711 if (errcode < 0)
712 goto out_unlock_detach;
713
714 /* Make sure it is big enough, now. */
715 if (iscache->capacity <= iscache->size) {
716 errcode = -pte_internal;
717 goto out_unlock_detach;
718 }
719 }
720
721 /* Insert a new entry for @section at @laddr.
722 *
723 * This hands both attach and reference over to @iscache. We will
724 * detach and drop the reference again when the entry is removed.
725 */
726 idx = iscache->size++;
727
728 iscache->entries[idx].section = section;
729 iscache->entries[idx].laddr = laddr;
730
731 errcode = pt_iscache_unlock(iscache);
732 if (errcode < 0)
733 return errcode;
734
735 return isid_from_index(idx);
736
737 out_unlock_detach:
738 (void) pt_iscache_unlock(iscache);
739
740 out_detach:
741 (void) pt_section_detach(section, iscache);
742
743 out_lru:
744 (void) pt_iscache_lru_clear(iscache);
745
746 out_put:
747 (void) pt_section_put(section);
748
749 return errcode;
750 }
751
pt_iscache_find(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t laddr)752 int pt_iscache_find(struct pt_image_section_cache *iscache,
753 const char *filename, uint64_t offset, uint64_t size,
754 uint64_t laddr)
755 {
756 int errcode, isid;
757
758 errcode = pt_iscache_lock(iscache);
759 if (errcode < 0)
760 return errcode;
761
762 isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
763
764 errcode = pt_iscache_unlock(iscache);
765 if (errcode < 0)
766 return errcode;
767
768 return isid;
769 }
770
pt_iscache_lookup(struct pt_image_section_cache * iscache,struct pt_section ** section,uint64_t * laddr,int isid)771 int pt_iscache_lookup(struct pt_image_section_cache *iscache,
772 struct pt_section **section, uint64_t *laddr, int isid)
773 {
774 uint16_t index;
775 int errcode, status;
776
777 if (!iscache || !section || !laddr)
778 return -pte_internal;
779
780 if (isid <= 0)
781 return -pte_bad_image;
782
783 isid -= 1;
784 if (isid > UINT16_MAX)
785 return -pte_internal;
786
787 index = (uint16_t) isid;
788
789 errcode = pt_iscache_lock(iscache);
790 if (errcode < 0)
791 return errcode;
792
793 if (iscache->size <= index)
794 status = -pte_bad_image;
795 else {
796 const struct pt_iscache_entry *entry;
797
798 entry = &iscache->entries[index];
799 *section = entry->section;
800 *laddr = entry->laddr;
801
802 status = pt_section_get(*section);
803 }
804
805 errcode = pt_iscache_unlock(iscache);
806 if (errcode < 0)
807 return errcode;
808
809 return status;
810 }
811
pt_iscache_clear(struct pt_image_section_cache * iscache)812 int pt_iscache_clear(struct pt_image_section_cache *iscache)
813 {
814 struct pt_iscache_lru_entry *lru;
815 struct pt_iscache_entry *entries;
816 uint16_t idx, end;
817 int errcode;
818
819 if (!iscache)
820 return -pte_internal;
821
822 errcode = pt_iscache_lock(iscache);
823 if (errcode < 0)
824 return errcode;
825
826 entries = iscache->entries;
827 end = iscache->size;
828 lru = iscache->lru;
829
830 iscache->entries = NULL;
831 iscache->capacity = 0;
832 iscache->size = 0;
833 iscache->lru = NULL;
834 iscache->used = 0ull;
835
836 errcode = pt_iscache_unlock(iscache);
837 if (errcode < 0)
838 return errcode;
839
840 errcode = pt_iscache_lru_free(lru);
841 if (errcode < 0)
842 return errcode;
843
844 for (idx = 0; idx < end; ++idx) {
845 struct pt_section *section;
846
847 section = entries[idx].section;
848
849 /* We do not zero-initialize the array - a NULL check is
850 * pointless.
851 */
852 errcode = pt_section_detach(section, iscache);
853 if (errcode < 0)
854 return errcode;
855
856 errcode = pt_section_put(section);
857 if (errcode < 0)
858 return errcode;
859 }
860
861 free(entries);
862 return 0;
863 }
864
pt_iscache_alloc(const char * name)865 struct pt_image_section_cache *pt_iscache_alloc(const char *name)
866 {
867 struct pt_image_section_cache *iscache;
868
869 iscache = malloc(sizeof(*iscache));
870 if (iscache)
871 pt_iscache_init(iscache, name);
872
873 return iscache;
874 }
875
pt_iscache_free(struct pt_image_section_cache * iscache)876 void pt_iscache_free(struct pt_image_section_cache *iscache)
877 {
878 if (!iscache)
879 return;
880
881 pt_iscache_fini(iscache);
882 free(iscache);
883 }
884
pt_iscache_set_limit(struct pt_image_section_cache * iscache,uint64_t limit)885 int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
886 {
887 struct pt_iscache_lru_entry *tail;
888 int errcode, status;
889
890 if (!iscache)
891 return -pte_invalid;
892
893 status = 0;
894 tail = NULL;
895
896 errcode = pt_iscache_lock(iscache);
897 if (errcode < 0)
898 return errcode;
899
900 iscache->limit = limit;
901 if (limit < iscache->used)
902 status = pt_iscache_lru_prune(iscache, &tail);
903
904 errcode = pt_iscache_unlock(iscache);
905
906 if (errcode < 0 || status < 0)
907 return (status < 0) ? status : errcode;
908
909 return pt_iscache_lru_free(tail);
910 }
911
pt_iscache_name(const struct pt_image_section_cache * iscache)912 const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
913 {
914 if (!iscache)
915 return NULL;
916
917 return iscache->name;
918 }
919
pt_iscache_add_file(struct pt_image_section_cache * iscache,const char * filename,uint64_t offset,uint64_t size,uint64_t vaddr)920 int pt_iscache_add_file(struct pt_image_section_cache *iscache,
921 const char *filename, uint64_t offset, uint64_t size,
922 uint64_t vaddr)
923 {
924 struct pt_section *section;
925 int errcode, match, isid;
926
927 if (!iscache || !filename)
928 return -pte_invalid;
929
930 errcode = pt_iscache_lock(iscache);
931 if (errcode < 0)
932 return errcode;
933
934 match = pt_iscache_find_section_locked(iscache, filename, offset,
935 size, vaddr);
936 if (match < 0) {
937 (void) pt_iscache_unlock(iscache);
938 return match;
939 }
940
941 /* If we found a perfect match, we will share the existing entry.
942 *
943 * If we found a section, we need to grab a reference before we unlock.
944 *
945 * If we didn't find a matching section, we create a new section, which
946 * implicitly gives us a reference to it.
947 */
948 if (match < iscache->size) {
949 const struct pt_iscache_entry *entry;
950
951 entry = &iscache->entries[match];
952 if (entry->laddr == vaddr) {
953 errcode = pt_iscache_unlock(iscache);
954 if (errcode < 0)
955 return errcode;
956
957 return isid_from_index((uint16_t) match);
958 }
959
960 section = entry->section;
961
962 errcode = pt_section_get(section);
963 if (errcode < 0) {
964 (void) pt_iscache_unlock(iscache);
965 return errcode;
966 }
967
968 errcode = pt_iscache_unlock(iscache);
969 if (errcode < 0) {
970 (void) pt_section_put(section);
971 return errcode;
972 }
973 } else {
974 errcode = pt_iscache_unlock(iscache);
975 if (errcode < 0)
976 return errcode;
977
978 section = pt_mk_section(filename, offset, size);
979 if (!section)
980 return -pte_invalid;
981 }
982
983 /* We unlocked @iscache and hold a reference to @section. */
984 isid = pt_iscache_add(iscache, section, vaddr);
985
986 /* We grab a reference when we add the section. Drop the one we
987 * obtained before.
988 */
989 errcode = pt_section_put(section);
990 if (errcode < 0)
991 return errcode;
992
993 return isid;
994 }
995
996
pt_iscache_read(struct pt_image_section_cache * iscache,uint8_t * buffer,uint64_t size,int isid,uint64_t vaddr)997 int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
998 uint64_t size, int isid, uint64_t vaddr)
999 {
1000 struct pt_section *section;
1001 uint64_t laddr;
1002 int errcode, status;
1003
1004 if (!iscache || !buffer || !size)
1005 return -pte_invalid;
1006
1007 errcode = pt_iscache_lookup(iscache, §ion, &laddr, isid);
1008 if (errcode < 0)
1009 return errcode;
1010
1011 if (vaddr < laddr) {
1012 (void) pt_section_put(section);
1013 return -pte_nomap;
1014 }
1015
1016 vaddr -= laddr;
1017
1018 errcode = pt_section_map(section);
1019 if (errcode < 0) {
1020 (void) pt_section_put(section);
1021 return errcode;
1022 }
1023
1024 /* We truncate the read if it gets too big. The user is expected to
1025 * issue further reads for the remaining part.
1026 */
1027 if (UINT16_MAX < size)
1028 size = UINT16_MAX;
1029
1030 status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
1031
1032 errcode = pt_section_unmap(section);
1033 if (errcode < 0) {
1034 (void) pt_section_put(section);
1035 return errcode;
1036 }
1037
1038 errcode = pt_section_put(section);
1039 if (errcode < 0)
1040 return errcode;
1041
1042 return status;
1043 }
1044
pt_iscache_notify_map(struct pt_image_section_cache * iscache,struct pt_section * section)1045 int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
1046 struct pt_section *section)
1047 {
1048 struct pt_iscache_lru_entry *tail;
1049 int errcode, status;
1050
1051 tail = NULL;
1052
1053 errcode = pt_iscache_lock(iscache);
1054 if (errcode < 0)
1055 return errcode;
1056
1057 status = pt_iscache_lru_add(iscache, section);
1058 if (status > 0)
1059 status = pt_iscache_lru_prune(iscache, &tail);
1060
1061 errcode = pt_iscache_unlock(iscache);
1062
1063 if (errcode < 0 || status < 0)
1064 return (status < 0) ? status : errcode;
1065
1066 return pt_iscache_lru_free(tail);
1067 }
1068
pt_iscache_notify_resize(struct pt_image_section_cache * iscache,struct pt_section * section,uint64_t memsize)1069 int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
1070 struct pt_section *section, uint64_t memsize)
1071 {
1072 struct pt_iscache_lru_entry *tail;
1073 int errcode, status;
1074
1075 tail = NULL;
1076
1077 errcode = pt_iscache_lock(iscache);
1078 if (errcode < 0)
1079 return errcode;
1080
1081 status = pt_iscache_lru_resize(iscache, section, memsize);
1082 if (status > 0)
1083 status = pt_iscache_lru_prune(iscache, &tail);
1084
1085 errcode = pt_iscache_unlock(iscache);
1086
1087 if (errcode < 0 || status < 0)
1088 return (status < 0) ? status : errcode;
1089
1090 return pt_iscache_lru_free(tail);
1091 }
1092