1import struct 2 3from core import ( 4 caching, 5 gettype, 6 lldbwrap, 7 xnu_format, 8) 9from .kmem import KMem, MemoryRange 10from .btlog import BTLog, BTLibrary 11from .whatis import * 12 13# FIXME: should not import this from xnu / utils 14from xnu import ( 15 GetSourceInformationForAddress, 16 print_hex_data, 17) 18 19class ZoneBitsMemoryObject(MemoryObject): 20 """ Memory Object for pointers in the Zone Bitmaps range """ 21 22 MO_KIND = "zone bitmap" 23 24 @property 25 def object_range(self): 26 return self.kmem.bits_range 27 28 def describe(self, verbose=False): 29 # 30 # Printing something more useful would require crawling 31 # all zone chunks with non inline bitmaps until we find 32 # the one. 33 # 34 # This is very expensive and really unlikely to ever 35 # be needed for debugging. 36 # 37 # Moreover, bitmap pointers do not leak outside 38 # of the bowels of zalloc, dangling pointers to 39 # this region is very unexpected. 40 # 41 print("Zone Bitmap Info") 42 print(" N/A") 43 print() 44 45 46class ZonePageMetadata(MemoryObject): 47 """ Memory Object for Zone Page Metadata """ 48 49 MO_KIND = "zone metadata" 50 51 def __init__(self, kmem, address): 52 super().__init__(kmem, address) 53 54 if not kmem.meta_range.contains(address): 55 raise IndexError("{:#x} is not inside the meta range {}".format( 56 address, kmem.meta_range)) 57 58 # 59 # Resolve the ZPM we fall into 60 # 61 size = kmem.zpm_type.GetByteSize() 62 idx = (address - kmem.meta_range.start) // size 63 sbv = kmem.target.xCreateValueFromAddress(None, 64 kmem.meta_range.start + idx * size, kmem.zpm_type) 65 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 66 67 self.mo_sbv = sbv 68 self.kmem = kmem 69 70 # 71 # Compute the canonical ZPM 72 # 73 # 0xe = ZM_SECONDARY_PAGE 74 # 0xf = ZM_SECONDARY_PCPU_PAGE 75 # 76 # TODO use a nice package to index enums by name, 77 # can't use GetEnumName() because it uses kern.* 78 # 79 if chunk_len in (0xe, 0xf): 80 pg_idx = sbv.xGetIntegerByName('zm_page_index') 81 idx -= pg_idx 82 sbv = sbv.xGetSiblingValueAtIndex(-pg_idx) 83 chunk_len = sbv.xGetIntegerByName('zm_chunk_len') 84 85 self.sbv = sbv 86 self._idx = idx 87 self._chunk_len = chunk_len 88 89 @classmethod 90 def _create_with_zone_address(cls, kmem, address): 91 zone_range = kmem.zone_range 92 if not zone_range.contains(address): 93 raise IndexError("{:#x} is not inside the zone map {}".format( 94 address, zone_range)) 95 96 index = (address - zone_range.start) >> kmem.page_shift 97 meta_addr = kmem.meta_range.start + index * kmem.zpm_type.GetByteSize() 98 99 return ZonePageMetadata(kmem, meta_addr) 100 101 @classmethod 102 def _create_with_pva(cls, kmem, pva): 103 address = ((pva | 0xffffffff00000000) << kmem.page_shift) & 0xffffffffffffffff 104 return ZonePageMetadata._create_with_zone_address(kmem, address) 105 106 @property 107 def object_range(self): 108 addr = self.sbv.GetLoadAddress() 109 clen = self._chunk_len 110 if clen == 1 and self.zone.percpu: 111 clen = self.kmem.ncpus 112 size = self._chunk_len * self.kmem.zpm_type.GetByteSize() 113 114 return MemoryRange(addr, addr + size) 115 116 @property 117 def zone(self): 118 sbv = self.sbv 119 return Zone(sbv.xGetIntegerByName('zm_index')) 120 121 @property 122 def pgz_slot(self): 123 addr = self.page_addr 124 kmem = self.kmem 125 if kmem.pgz_range.contains(addr): 126 return (addr - kmem.pgz_range.start) >> (kmem.page_shift + 1) 127 return None 128 129 def _pgz_alloc_frames(self, index): 130 kmem = self.kmem 131 target = kmem.target 132 bt = kmem.pgz_bt.xGetSiblingValueAtIndex(index) 133 return ( 134 kmem.stext + pc 135 for pc in target.xIterAsInt32( 136 bt.xGetLoadAddressByName('pgz_bt'), 137 bt.xGetIntegerByName('pgz_depth') 138 ) 139 ) 140 141 @property 142 def pgz_alloc_bt_frames(self): 143 return self._pgz_alloc_frames(2 * self.pgz_slot) 144 145 @property 146 def pgz_free_bt_frames(self): 147 return self._pgz_alloc_frames(2 * self.pgz_slot + 1) 148 149 def describe(self, verbose=False): 150 kmem = self.kmem 151 sbv = self.sbv 152 zone = self.zone 153 154 chunk_len = self._chunk_len 155 if zone.percpu: 156 chunk_len = kmem.ncpus 157 158 zone.describe() 159 160 print("Zone Metadata Info") 161 print(" chunk length : {}".format(chunk_len)) 162 print(" metadata : {:#x}".format(sbv.GetLoadAddress())) 163 print(" page : {:#x}".format(self.page_addr)) 164 165 if sbv.xGetIntegerByName('zm_inline_bitmap'): 166 if verbose: 167 bitmap = [ 168 "{:#010x}".format(sbv.xGetSiblingValueAtIndex(i).xGetIntegerByName('zm_bitmap')) 169 for i in range(self._chunk_len) 170 ] 171 print(" bitmap : inline [ {} ]".format(" ".join(bitmap))) 172 else: 173 print(" bitmap : inline") 174 else: 175 bref = sbv.xGetIntegerByName('zm_bitmap') 176 blen = 1 << ((bref >> 29) & 0x7) 177 bsize = blen << 3 178 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 179 bitmap = ( 180 "{:#018x}".format(word) 181 for word in kmem.target.xIterAsUInt64(baddr, blen) 182 ) 183 184 if bref == 0: 185 print(" bitmap : None") 186 elif not verbose: 187 print(" bitmap : {:#x} ({} bytes)".format(baddr, bsize)) 188 elif blen <= 2: 189 print(" bitmap : {:#x} ({} bytes) [ {} ]".format( 190 baddr, bsize, ' '.join(bitmap))) 191 else: 192 print(" bitmap : {:#x} ({} bytes) [".format(baddr, bsize)) 193 for i in range(blen // 4): 194 print(" {} {} {} {}".format( 195 next(bitmap), next(bitmap), 196 next(bitmap), next(bitmap))) 197 print(" ]") 198 199 print() 200 201 mo_sbv = self.mo_sbv 202 if sbv != mo_sbv: 203 pg_idx = self.mo_sbv.xGetIntegerByName('zm_page_index') 204 205 print("Secondary Metadata Info") 206 print(" index : {}/{}".format(pg_idx + 1, chunk_len)) 207 print(" metadata : {:#x}".format(mo_sbv.GetLoadAddress())) 208 print(" page : {:#x}".format( 209 self.page_addr + (pg_idx << kmem.page_shift))) 210 print() 211 212 if verbose: 213 print("-" * 80) 214 print() 215 print(str(self.mo_sbv)) 216 print() 217 218 219 @property 220 def next_pva(self): 221 """ the next zone_pva_t queued after this Zone Page Metadata """ 222 223 return self.sbv.xGetIntegerByPath('.zm_page_next.packed_address') 224 225 @property 226 def page_addr(self): 227 """ The page address corresponding to this Zone Page Metadata """ 228 229 kmem = self.kmem 230 return kmem.zone_range.start + (self._idx << kmem.page_shift) 231 232 def iter_all(self, zone): 233 """ All element addresses covered by this chunk """ 234 235 base = self.page_addr 236 esize = zone.elem_outer_size 237 offs = zone.elem_inner_offs 238 count = zone.chunk_elems 239 run = self.sbv.xGetIntegerByName('zm_chunk_len') 240 241 return range(base + offs, base + (run << self.kmem.page_shift), esize) 242 243 def is_allocated(self, zone, addr): 244 """ Whether an address has the allocated bit set """ 245 246 if not self._chunk_len: 247 return False 248 249 sbv = self.sbv 250 base = self.page_addr + zone.elem_inner_offs 251 esize = zone.elem_inner_size 252 idx = (addr - base) // esize 253 254 if sbv.xGetIntegerByName('zm_inline_bitmap'): 255 w, b = divmod(idx, 32) 256 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 257 return (mask & (1 << b)) == 0 258 else: 259 w, b = divmod(idx, 64) 260 bref = sbv.xGetIntegerByName('zm_bitmap') 261 kmem = self.kmem 262 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) + 8 * w 263 return not (kmem.target.xReadUInt64(baddr) & (1 << b)) 264 265 def iter_allocated(self, zone): 266 """ All allocated addresses in this this chunk """ 267 268 kmem = self.kmem 269 sbv = self.sbv 270 base = self.page_addr 271 272 # cache memory, can make enumeration twice as fast for smaller objects 273 sbv.target.xReadBytes(base, self._chunk_len << kmem.page_shift) 274 275 esize = zone.elem_outer_size 276 base += zone.elem_inner_offs 277 278 if sbv.xGetIntegerByName('zm_inline_bitmap'): 279 for i in range(zone.chunk_elems): 280 w, b = divmod(i, 32) 281 if b == 0: 282 mask = sbv.xGetSiblingValueAtIndex(w).xGetIntegerByName('zm_bitmap') 283 if not mask & (1 << b): 284 yield base + i * esize 285 else: 286 bref = sbv.xGetIntegerByName('zm_bitmap') 287 baddr = kmem.bits_range.start + 8 * (bref & 0x0fffffff) 288 data = kmem.target.xIterAsUInt64(baddr, 1 << ((bref >> 29) & 0x7)) 289 290 for i in range(zone.chunk_elems): 291 b = i & 63 292 if b == 0: 293 word = next(data) 294 if not word & (1 << b): 295 yield base + i * esize 296 297 298class ZoneHeapMemoryObject(MemoryObject): 299 """ Memory Object for zone allocated objects """ 300 301 MO_KIND = "zone heap" 302 303 def __init__(self, kmem, address): 304 super().__init__(kmem, address) 305 306 if not kmem.zone_range.contains(address): 307 raise IndexError("{:#x} is not inside the zone range {}".format( 308 address, kmem.zone_range)) 309 310 meta = ZonePageMetadata._create_with_zone_address(kmem, address) 311 zone = meta.zone 312 esize = zone.elem_outer_size 313 314 if kmem.pgz_range.contains(address): 315 real_addr = meta.sbv.xGetIntegerByName('zm_pgz_orig_addr') 316 page_mask = kmem.page_mask 317 elem_addr = (real_addr & page_mask) + (address & ~page_mask) 318 elem_idx = ((elem_addr & page_mask) - zone.elem_inner_offs) // esize 319 self.real_addr = real_addr 320 self.real_meta = ZonePageMetadata._create_with_zone_address(kmem, real_addr) 321 self.pgz = True 322 else: 323 base = meta.page_addr + zone.elem_inner_offs 324 elem_idx = (address - base) // esize if address >= base else -1 325 elem_addr = base + elem_idx * esize if address >= base else None 326 self.real_addr = elem_addr 327 self.real_meta = meta 328 self.pgz = False 329 330 self.kmem = kmem 331 self.meta = meta 332 self.zone = zone 333 self.elem_idx = elem_idx 334 self.elem_addr = elem_addr 335 336 @property 337 def object_range(self): 338 if self.elem_idx >= 0: 339 elem_addr = self.elem_addr 340 elem_size = self.zone.elem_outer_size 341 return MemoryRange(elem_addr, elem_addr + elem_size) 342 343 base = self.meta.page_addr 344 size = self.zone.elem_inner_offs 345 return MemoryRange(base, base + size) 346 347 @property 348 def status(self): 349 zone = self.zone 350 real_addr = self.real_addr 351 352 if self.elem_idx < 0: 353 return "invalid" 354 355 elif not self.real_meta.is_allocated(zone, real_addr): 356 return "free" 357 358 elif real_addr in zone.cached(): 359 return "free (cached)" 360 361 elif real_addr in zone.recirc(): 362 return "free (recirc)" 363 364 else: 365 return "allocated" 366 367 def hexdump(self): 368 print("Hexdump:") 369 370 target = self.kmem.target 371 zone = self.zone 372 eaddr = self.elem_addr 373 eend = eaddr + zone.elem_inner_size 374 delta = self.real_addr - eaddr 375 376 rz = zone.elem_redzone 377 start = (eaddr & -16) - min(rz, 16) - 16 378 end = (eend + 16 + 15) & -16 379 marks = { self.address: '>' } 380 381 if rz > 16: 382 print(" " + "=" * 88) 383 print(" {}".format("." * 18)) 384 385 try: 386 data = target.xReadBytes(start + delta, eaddr - start) 387 print_hex_data(data, start, "", marks) 388 except: 389 print(" *** unable to read redzone memory ***") 390 else: 391 try: 392 data = target.xReadBytes(start + delta, eaddr - rz - start) 393 print_hex_data(data, start, "", marks) 394 except: 395 pass 396 397 print(" " + "=" * 88) 398 399 if rz: 400 try: 401 data = target.xReadBytes(eaddr - rz + delta, rz) 402 print_hex_data(data, eaddr - rz, "", marks) 403 except: 404 print(" *** unable to read redzone memory ***") 405 406 if rz: 407 print(" {}".format("-" * 88)) 408 409 try: 410 data = target.xReadBytes(eaddr + delta, eend - eaddr) 411 print_hex_data(data, eaddr, "", marks) 412 except: 413 print(" *** unable to read element memory ***") 414 415 print(" " + "=" * 88) 416 417 try: 418 data = target.xReadBytes(eend + delta, end - eend) 419 print_hex_data(data, eend, "", marks) 420 except: 421 pass 422 423 print() 424 425 def describe(self, verbose=False): 426 meta = self.meta 427 zone = self.zone 428 status = self.status 429 btlog = zone.btlog 430 431 meta.describe() 432 433 print("Zone Heap Object Info") 434 print(" element index : {}".format(self.elem_idx)) 435 print(" chunk offset : {}".format(self.address - meta.page_addr)) 436 print(" status : {}".format(status)) 437 if self.pgz: 438 print(" pgz orig address : {:#x}".format(self.real_addr)) 439 print() 440 441 print("PGZ Allocation backtrace:") 442 for pc in meta.pgz_alloc_bt_frames: 443 print(" " + GetSourceInformationForAddress(pc)) 444 445 if status == 'free': 446 print() 447 448 print("PGZ Free backtrace:") 449 for pc in meta.pgz_free_bt_frames: 450 print(" " + GetSourceInformationForAddress(pc)) 451 elif btlog and (btlog.is_log() or status == 'allocated'): 452 record = next(btlog.iter_records( 453 wantElement=self.elem_addr, reverse=True), None) 454 if record: 455 btlib = BTLibrary.get_shared() 456 print(" last zlog backtrace", 457 *btlib.get_stack(record.ref).symbolicated_frames(prefix=" "), sep="\n") 458 459 print() 460 461 if self.elem_idx >= 0 and verbose: 462 self.hexdump() 463 464 465@whatis_provider 466class ZoneWhatisProvider(WhatisProvider): 467 """ 468 Whatis Provider for the zone ranges 469 - metadata (bits and ZPM) 470 - PGZ 471 - regular heap objects 472 """ 473 474 def __init__(self, kmem): 475 super().__init__(kmem) 476 477 def claims(self, address): 478 kmem = self.kmem 479 480 return any( 481 r.contains(address) 482 for r in (kmem.meta_range, kmem.bits_range, kmem.zone_range) 483 ) 484 485 def lookup(self, address): 486 kmem = self.kmem 487 488 if kmem.meta_range.contains(address): 489 return ZonePageMetadata(self.kmem, address) 490 491 if kmem.bits_range.contains(address): 492 return ZoneBitsMemoryObject(self.kmem, address) 493 494 return ZoneHeapMemoryObject(self.kmem, address) 495 496 497class ZPercpuValue(object): 498 """ 499 Provides an enumerator for a zpercpu value 500 """ 501 502 def __init__(self, sbvalue): 503 """ 504 @param sbvalue (SBValue) 505 The value to enumerate 506 """ 507 self.sbv = sbvalue 508 509 def __iter__(self): 510 sbv = self.sbv 511 kmem = KMem.get_shared() 512 addr = sbv.GetValueAsAddress() | 0xc0c0000000000000 513 name = sbv.GetName() 514 ty = sbv.GetType().GetPointeeType() 515 516 return ( 517 sbv.xCreateValueFromAddress(name, addr + (cpu << kmem.page_shift), ty) 518 for cpu in kmem.zcpus 519 ) 520 521 522class Zone(object): 523 """ 524 the Zone class wraps XNU Zones and provides fast enumeration 525 of allocated, cached, ... elements. 526 """ 527 528 def __init__(self, index_name_or_addr): 529 """ 530 @param index_name_or_addr (int or str): 531 - int: a zone index within [0, num_zones) 532 - int: a zone address within [zone_array, zone_array + num_zones) 533 - str: a zone name 534 535 @param kmem (KMem or None) 536 The kmem this command applies to, 537 or None for the current one 538 """ 539 540 kmem = KMem.get_shared() 541 zarr = kmem.zone_array 542 543 if isinstance(index_name_or_addr, str): 544 mangled_name = index_name_or_addr.replace(' ', '.') 545 zid = self._find_zone_id_by_mangled_name(mangled_name) 546 elif index_name_or_addr <= kmem.num_zones: 547 zid = index_name_or_addr 548 else: 549 zid = index_name_or_addr - zarr.GetLoadAddress() 550 zid = zid // zarr.GetType().GetArrayElementType().GetByteSize() 551 552 self.kmem = kmem 553 self.zid = zid 554 self.sbv = zarr.chkGetChildAtIndex(zid) 555 556 @staticmethod 557 @caching.cache_dynamically 558 def get_zone_name(zid, target=None): 559 """ 560 Returns a zone name by index. 561 562 @param zid (int 563 A zone ID 564 565 @returns (str or None) 566 Returns a string holding the zone name 567 if the zone exists, or None 568 """ 569 570 kmem = KMem.get_shared() 571 if zid >= kmem.num_zones: 572 return None 573 574 zone = kmem.zone_array.chkGetChildAtIndex(zid) 575 zsec = kmem.zsec_array.chkGetChildAtIndex(zid) 576 577 if zone.xGetIntegerByName('z_self') == 0: 578 return None 579 580 heap_id = zsec.xGetIntegerByName('z_kheap_id') 581 582 return KMem._HEAP_NAMES[heap_id] + zone.xGetCStringByName('z_name') 583 584 @staticmethod 585 @caching.cache_dynamically 586 def _find_zone_id_by_mangled_name(name, target=None): 587 """ 588 Lookup a zone ID by name 589 590 @param name (str) 591 The name of the zone to lookup 592 593 @returns (int) 594 The zone ID for this name 595 """ 596 597 kmem = KMem.get_shared() 598 for zid in range(kmem.num_zones): 599 k = Zone.get_zone_name(zid) 600 if k is not None and name == k.replace(' ', '.'): 601 return zid 602 603 raise KeyError("No zone called '{}' found".format(name)) 604 605 @property 606 def initialized(self): 607 """ The zone name """ 608 609 return self.sbv.xGetIntegerByName('z_self') != 0 610 611 @property 612 def address(self): 613 """ The zone address """ 614 615 return self.sbv.GetLoadAddress() 616 617 @property 618 def name(self): 619 """ The zone name """ 620 621 return self.get_zone_name(self.zid) 622 623 @property 624 def mangled_name(self): 625 """ The zone mangled name """ 626 627 return self.name.replace(' ', '.') 628 629 @caching.dyn_cached_property 630 def elem_redzone(self, target=None): 631 """ The inner size of elements """ 632 633 if self.kmem.kasan_classic: 634 return self.sbv.xGetIntegerByName('z_kasan_redzone') 635 return 0 636 637 @caching.dyn_cached_property 638 def elem_inner_size(self, target=None): 639 """ The inner size of elements """ 640 641 return self.sbv.xGetIntegerByName('z_elem_size') 642 643 @caching.dyn_cached_property 644 def elem_outer_size(self, target=None): 645 """ The size of elements """ 646 647 if not self.kmem.kasan_classic: 648 return self.elem_inner_size 649 return self.elem_inner_size + self.elem_redzone 650 651 @caching.dyn_cached_property 652 def elem_inner_offs(self, target=None): 653 """ The chunk initial offset """ 654 655 return self.sbv.xGetIntegerByName('z_elem_offs') 656 657 @caching.dyn_cached_property 658 def chunk_pages(self, target=None): 659 """ The number of pages per chunk """ 660 661 return self.sbv.xGetIntegerByName('z_chunk_pages') 662 663 @caching.dyn_cached_property 664 def chunk_elems(self, target=None): 665 """ The number of elements per chunk """ 666 667 return self.sbv.xGetIntegerByName('z_chunk_elems') 668 669 @property 670 def percpu(self): 671 """ Whether this is a per-cpu zone """ 672 673 return self.sbv.xGetIntegerByName('z_percpu') 674 675 @property 676 def btlog(self): 677 """ Returns the zone's BTLog or None """ 678 679 try: 680 btlog = self.sbv.xGetPointeeByName('z_btlog') 681 return BTLog(btlog) 682 except: 683 return None 684 685 def describe(self): 686 kmem = self.kmem 687 zone = self.sbv 688 zsec = kmem.zsec_array.chkGetChildAtIndex(self.zid) 689 690 submap_arr = kmem.target.chkFindFirstGlobalVariable('zone_submaps_names') 691 submap_idx = zsec.xGetIntegerByName('z_submap_idx') 692 submap_name = submap_arr.xGetCStringAtIndex(submap_idx) 693 submap_end = zsec.xGetIntegerByName('z_submap_from_end') 694 695 try: 696 btlog = zone.xGetIntegerByName('z_btlog') 697 except: 698 # likely a release kernel 699 btlog = None 700 701 fmt = ( 702 "Zone Info\n" 703 " name : {0.name} ({&z:#x})\n" 704 " submap : {1} (from {2})\n" 705 " element size : {0.elem_inner_size}\n" 706 " element offs : {0.elem_inner_offs}\n" 707 ) 708 if kmem.kasan_classic: 709 fmt += " element redzone : {0.elem_redzone}\n" 710 fmt += " chunk elems / pages : {$z.z_chunk_elems} / {$z.z_chunk_pages}\n" 711 if btlog: 712 fmt += " btlog : {$z.z_btlog:#x}\n" 713 714 print(xnu_format(fmt, self, submap_name, 715 "right" if submap_end else "left", z = zone)); 716 717 def iter_page_queue(self, name): 718 kmem = self.kmem 719 zone = self.sbv 720 721 pva = zone.xGetIntegerByPath('.{}.packed_address'.format(name)) 722 723 while pva: 724 meta = ZonePageMetadata._create_with_pva(kmem, pva) 725 pva = meta.next_pva 726 yield meta 727 728 def _depotElements(self, depot, into): 729 last = depot.xGetPointeeByName('zd_tail').GetValueAsAddress() 730 mag = depot.xGetPointeeByName('zd_head') 731 732 kmem = self.kmem 733 n = kmem.mag_size 734 target = kmem.target 735 736 while mag and mag.GetLoadAddress() != last: 737 into.update(kmem.iter_addresses(target.xIterAsULong( 738 mag.xGetLoadAddressByName('zm_elems'), 739 n 740 ))) 741 mag = mag.xGetPointeeByName('zm_next') 742 743 return into 744 745 def cached(self, into = None): 746 """ all addresses in per-cpu caches or per-cpu depots """ 747 748 pcpu = self.sbv.GetChildMemberWithName('z_pcpu_cache') 749 into = into if into is not None else set() 750 751 if pcpu.GetValueAsAddress(): 752 target = pcpu.target 753 kmem = self.kmem 754 755 for cache in ZPercpuValue(pcpu): 756 into.update(kmem.iter_addresses(target.xIterAsULong( 757 cache.xGetIntegerByName('zc_alloc_elems'), 758 cache.xGetIntegerByName('zc_alloc_cur') 759 ))) 760 761 into.update(kmem.iter_addresses(target.xIterAsULong( 762 cache.xGetIntegerByName('zc_free_elems'), 763 cache.xGetIntegerByName('zc_free_cur') 764 ))) 765 766 self._depotElements( 767 cache.chkGetChildMemberWithName('zc_depot'), 768 into = into 769 ) 770 771 return into 772 773 def recirc(self, into = None): 774 """ all addresses in the recirculation layer """ 775 776 return self._depotElements( 777 self.sbv.chkGetChildMemberWithName('z_recirc'), 778 into = into if into is not None else set() 779 ) 780 781 def iter_all(self, ty = None): 782 """ 783 Returns a generator for all addresses/values that can be made 784 785 @param ty (SBType or None) 786 An optional type to use to form SBValues 787 788 @returns 789 - (generator<int>) if ty is None 790 - (generator<SBValue>) if ty is set 791 """ 792 793 addresses = ( 794 addr 795 for name in ( 796 'z_pageq_full', 797 'z_pageq_partial', 798 'z_pageq_empty', 799 ) 800 for meta in self.iter_page_queue(name) 801 for addr in meta.iter_all(self) 802 ) 803 804 if ty is None: 805 return addresses 806 807 fn = self.kmem.target.xCreateValueFromAddress 808 return (fn('e', addr, ty) for addr in addresses) 809 810 def iter_free(self, ty = None): 811 """ 812 Returns a generator for all free addresses/values 813 814 @param ty (SBType or None) 815 An optional type to use to form SBValues 816 817 @returns 818 - (generator<int>) if ty is None 819 - (generator<SBValue>) if ty is set 820 """ 821 822 cached = set() 823 self.cached(into = cached) 824 self.recirc(into = cached) 825 826 addresses = ( 827 addr 828 for name in ( 829 'z_pageq_full', 830 'z_pageq_partial', 831 ) 832 for meta in self.iter_page_queue(name) 833 for addr in meta.iter_all(self) 834 if addr in cached or not meta.is_allocated(self, addr) 835 ) 836 837 if ty is None: 838 return addresses 839 840 fn = self.kmem.target.xCreateValueFromAddress 841 return (fn('e', addr, ty) for addr in addresses) 842 843 def iter_allocated(self, ty = None): 844 """ 845 Returns a generator for all allocated addresses/values 846 847 @param ty (SBType or None) 848 An optional type to use to form SBValues 849 850 @returns 851 - (generator<int>) if ty is None 852 - (generator<SBValue>) if ty is set 853 """ 854 855 cached = set() 856 self.cached(into = cached) 857 self.recirc(into = cached) 858 859 addresses = ( 860 addr 861 for name in ( 862 'z_pageq_full', 863 'z_pageq_partial', 864 ) 865 for meta in self.iter_page_queue(name) 866 for addr in meta.iter_allocated(self) 867 if addr not in cached 868 ) 869 870 if ty is None: 871 return addresses 872 873 fn = self.kmem.target.xCreateValueFromAddress 874 return (fn('e', addr, ty) for addr in addresses) 875 876 def __iter__(self): 877 return self.iter_allocated() 878 879 880__all__ = [ 881 ZPercpuValue.__name__, 882 Zone.__name__, 883] 884