1//===-- MachTask.cpp --------------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9//---------------------------------------------------------------------- 10// 11// MachTask.cpp 12// debugserver 13// 14// Created by Greg Clayton on 12/5/08. 15// 16//===----------------------------------------------------------------------===// 17 18#include "MachTask.h" 19 20// C Includes 21 22#include <mach-o/dyld_images.h> 23#include <mach/mach_vm.h> 24#import <sys/sysctl.h> 25 26#if defined (__APPLE__) 27#include <pthread.h> 28#include <sched.h> 29#endif 30 31// C++ Includes 32#include <iomanip> 33#include <sstream> 34 35// Other libraries and framework includes 36// Project includes 37#include "CFUtils.h" 38#include "DNB.h" 39#include "DNBError.h" 40#include "DNBLog.h" 41#include "MachProcess.h" 42#include "DNBDataRef.h" 43#include "stack_logging.h" 44 45#ifdef WITH_SPRINGBOARD 46 47#include <CoreFoundation/CoreFoundation.h> 48#include <SpringBoardServices/SpringBoardServer.h> 49#include <SpringBoardServices/SBSWatchdogAssertion.h> 50 51#endif 52 53#ifdef WITH_BKS 54extern "C" 55{ 56 #import <Foundation/Foundation.h> 57 #import <BackBoardServices/BackBoardServices.h> 58 #import <BackBoardServices/BKSWatchdogAssertion.h> 59} 60#endif 61 62#include <AvailabilityMacros.h> 63 64#ifdef LLDB_ENERGY 65#include <mach/mach_time.h> 66#include <pmenergy.h> 67#include <pmsample.h> 68#endif 69 70 71//---------------------------------------------------------------------- 72// MachTask constructor 73//---------------------------------------------------------------------- 74MachTask::MachTask(MachProcess *process) : 75 m_process (process), 76 m_task (TASK_NULL), 77 m_vm_memory (), 78 m_exception_thread (0), 79 m_exception_port (MACH_PORT_NULL) 80{ 81 memset(&m_exc_port_info, 0, sizeof(m_exc_port_info)); 82} 83 84//---------------------------------------------------------------------- 85// Destructor 86//---------------------------------------------------------------------- 87MachTask::~MachTask() 88{ 89 Clear(); 90} 91 92 93//---------------------------------------------------------------------- 94// MachTask::Suspend 95//---------------------------------------------------------------------- 96kern_return_t 97MachTask::Suspend() 98{ 99 DNBError err; 100 task_t task = TaskPort(); 101 err = ::task_suspend (task); 102 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 103 err.LogThreaded("::task_suspend ( target_task = 0x%4.4x )", task); 104 return err.Error(); 105} 106 107 108//---------------------------------------------------------------------- 109// MachTask::Resume 110//---------------------------------------------------------------------- 111kern_return_t 112MachTask::Resume() 113{ 114 struct task_basic_info task_info; 115 task_t task = TaskPort(); 116 if (task == TASK_NULL) 117 return KERN_INVALID_ARGUMENT; 118 119 DNBError err; 120 err = BasicInfo(task, &task_info); 121 122 if (err.Success()) 123 { 124 // task_resume isn't counted like task_suspend calls are, are, so if the 125 // task is not suspended, don't try and resume it since it is already 126 // running 127 if (task_info.suspend_count > 0) 128 { 129 err = ::task_resume (task); 130 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 131 err.LogThreaded("::task_resume ( target_task = 0x%4.4x )", task); 132 } 133 } 134 return err.Error(); 135} 136 137//---------------------------------------------------------------------- 138// MachTask::ExceptionPort 139//---------------------------------------------------------------------- 140mach_port_t 141MachTask::ExceptionPort() const 142{ 143 return m_exception_port; 144} 145 146//---------------------------------------------------------------------- 147// MachTask::ExceptionPortIsValid 148//---------------------------------------------------------------------- 149bool 150MachTask::ExceptionPortIsValid() const 151{ 152 return MACH_PORT_VALID(m_exception_port); 153} 154 155 156//---------------------------------------------------------------------- 157// MachTask::Clear 158//---------------------------------------------------------------------- 159void 160MachTask::Clear() 161{ 162 // Do any cleanup needed for this task 163 m_task = TASK_NULL; 164 m_exception_thread = 0; 165 m_exception_port = MACH_PORT_NULL; 166 167} 168 169 170//---------------------------------------------------------------------- 171// MachTask::SaveExceptionPortInfo 172//---------------------------------------------------------------------- 173kern_return_t 174MachTask::SaveExceptionPortInfo() 175{ 176 return m_exc_port_info.Save(TaskPort()); 177} 178 179//---------------------------------------------------------------------- 180// MachTask::RestoreExceptionPortInfo 181//---------------------------------------------------------------------- 182kern_return_t 183MachTask::RestoreExceptionPortInfo() 184{ 185 return m_exc_port_info.Restore(TaskPort()); 186} 187 188 189//---------------------------------------------------------------------- 190// MachTask::ReadMemory 191//---------------------------------------------------------------------- 192nub_size_t 193MachTask::ReadMemory (nub_addr_t addr, nub_size_t size, void *buf) 194{ 195 nub_size_t n = 0; 196 task_t task = TaskPort(); 197 if (task != TASK_NULL) 198 { 199 n = m_vm_memory.Read(task, addr, buf, size); 200 201 DNBLogThreadedIf(LOG_MEMORY, "MachTask::ReadMemory ( addr = 0x%8.8llx, size = %llu, buf = %p) => %llu bytes read", (uint64_t)addr, (uint64_t)size, buf, (uint64_t)n); 202 if (DNBLogCheckLogBit(LOG_MEMORY_DATA_LONG) || (DNBLogCheckLogBit(LOG_MEMORY_DATA_SHORT) && size <= 8)) 203 { 204 DNBDataRef data((uint8_t*)buf, n, false); 205 data.Dump(0, static_cast<DNBDataRef::offset_t>(n), addr, DNBDataRef::TypeUInt8, 16); 206 } 207 } 208 return n; 209} 210 211 212//---------------------------------------------------------------------- 213// MachTask::WriteMemory 214//---------------------------------------------------------------------- 215nub_size_t 216MachTask::WriteMemory (nub_addr_t addr, nub_size_t size, const void *buf) 217{ 218 nub_size_t n = 0; 219 task_t task = TaskPort(); 220 if (task != TASK_NULL) 221 { 222 n = m_vm_memory.Write(task, addr, buf, size); 223 DNBLogThreadedIf(LOG_MEMORY, "MachTask::WriteMemory ( addr = 0x%8.8llx, size = %llu, buf = %p) => %llu bytes written", (uint64_t)addr, (uint64_t)size, buf, (uint64_t)n); 224 if (DNBLogCheckLogBit(LOG_MEMORY_DATA_LONG) || (DNBLogCheckLogBit(LOG_MEMORY_DATA_SHORT) && size <= 8)) 225 { 226 DNBDataRef data((uint8_t*)buf, n, false); 227 data.Dump(0, static_cast<DNBDataRef::offset_t>(n), addr, DNBDataRef::TypeUInt8, 16); 228 } 229 } 230 return n; 231} 232 233//---------------------------------------------------------------------- 234// MachTask::MemoryRegionInfo 235//---------------------------------------------------------------------- 236int 237MachTask::GetMemoryRegionInfo (nub_addr_t addr, DNBRegionInfo *region_info) 238{ 239 task_t task = TaskPort(); 240 if (task == TASK_NULL) 241 return -1; 242 243 int ret = m_vm_memory.GetMemoryRegionInfo(task, addr, region_info); 244 DNBLogThreadedIf(LOG_MEMORY, "MachTask::MemoryRegionInfo ( addr = 0x%8.8llx ) => %i (start = 0x%8.8llx, size = 0x%8.8llx, permissions = %u)", 245 (uint64_t)addr, 246 ret, 247 (uint64_t)region_info->addr, 248 (uint64_t)region_info->size, 249 region_info->permissions); 250 return ret; 251} 252 253#define TIME_VALUE_TO_TIMEVAL(a, r) do { \ 254(r)->tv_sec = (a)->seconds; \ 255(r)->tv_usec = (a)->microseconds; \ 256} while (0) 257 258// We should consider moving this into each MacThread. 259static void get_threads_profile_data(DNBProfileDataScanType scanType, task_t task, nub_process_t pid, std::vector<uint64_t> &threads_id, std::vector<std::string> &threads_name, std::vector<uint64_t> &threads_used_usec) 260{ 261 kern_return_t kr; 262 thread_act_array_t threads; 263 mach_msg_type_number_t tcnt; 264 265 kr = task_threads(task, &threads, &tcnt); 266 if (kr != KERN_SUCCESS) 267 return; 268 269 for (mach_msg_type_number_t i = 0; i < tcnt; i++) 270 { 271 thread_identifier_info_data_t identifier_info; 272 mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; 273 kr = ::thread_info(threads[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&identifier_info, &count); 274 if (kr != KERN_SUCCESS) continue; 275 276 thread_basic_info_data_t basic_info; 277 count = THREAD_BASIC_INFO_COUNT; 278 kr = ::thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count); 279 if (kr != KERN_SUCCESS) continue; 280 281 if ((basic_info.flags & TH_FLAGS_IDLE) == 0) 282 { 283 nub_thread_t tid = MachThread::GetGloballyUniqueThreadIDForMachPortID (threads[i]); 284 threads_id.push_back(tid); 285 286 if ((scanType & eProfileThreadName) && (identifier_info.thread_handle != 0)) 287 { 288 struct proc_threadinfo proc_threadinfo; 289 int len = ::proc_pidinfo(pid, PROC_PIDTHREADINFO, identifier_info.thread_handle, &proc_threadinfo, PROC_PIDTHREADINFO_SIZE); 290 if (len && proc_threadinfo.pth_name[0]) 291 { 292 threads_name.push_back(proc_threadinfo.pth_name); 293 } 294 else 295 { 296 threads_name.push_back(""); 297 } 298 } 299 else 300 { 301 threads_name.push_back(""); 302 } 303 struct timeval tv; 304 struct timeval thread_tv; 305 TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv); 306 TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv); 307 timeradd(&thread_tv, &tv, &thread_tv); 308 uint64_t used_usec = thread_tv.tv_sec * 1000000ULL + thread_tv.tv_usec; 309 threads_used_usec.push_back(used_usec); 310 } 311 312 mach_port_deallocate(mach_task_self(), threads[i]); 313 } 314 mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads)); 315} 316 317#define RAW_HEXBASE std::setfill('0') << std::hex << std::right 318#define DECIMAL std::dec << std::setfill(' ') 319std::string 320MachTask::GetProfileData (DNBProfileDataScanType scanType) 321{ 322 std::string result; 323 324 static int32_t numCPU = -1; 325 struct host_cpu_load_info host_info; 326 if (scanType & eProfileHostCPU) 327 { 328 int32_t mib[] = {CTL_HW, HW_AVAILCPU}; 329 size_t len = sizeof(numCPU); 330 if (numCPU == -1) 331 { 332 if (sysctl(mib, sizeof(mib) / sizeof(int32_t), &numCPU, &len, NULL, 0) != 0) 333 return result; 334 } 335 336 mach_port_t localHost = mach_host_self(); 337 mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT; 338 kern_return_t kr = host_statistics(localHost, HOST_CPU_LOAD_INFO, (host_info_t)&host_info, &count); 339 if (kr != KERN_SUCCESS) 340 return result; 341 } 342 343 task_t task = TaskPort(); 344 if (task == TASK_NULL) 345 return result; 346 347 pid_t pid = m_process->ProcessID(); 348 349 struct task_basic_info task_info; 350 DNBError err; 351 err = BasicInfo(task, &task_info); 352 353 if (!err.Success()) 354 return result; 355 356 uint64_t elapsed_usec = 0; 357 uint64_t task_used_usec = 0; 358 if (scanType & eProfileCPU) 359 { 360 // Get current used time. 361 struct timeval current_used_time; 362 struct timeval tv; 363 TIME_VALUE_TO_TIMEVAL(&task_info.user_time, ¤t_used_time); 364 TIME_VALUE_TO_TIMEVAL(&task_info.system_time, &tv); 365 timeradd(¤t_used_time, &tv, ¤t_used_time); 366 task_used_usec = current_used_time.tv_sec * 1000000ULL + current_used_time.tv_usec; 367 368 struct timeval current_elapsed_time; 369 int res = gettimeofday(¤t_elapsed_time, NULL); 370 if (res == 0) 371 { 372 elapsed_usec = current_elapsed_time.tv_sec * 1000000ULL + current_elapsed_time.tv_usec; 373 } 374 } 375 376 std::vector<uint64_t> threads_id; 377 std::vector<std::string> threads_name; 378 std::vector<uint64_t> threads_used_usec; 379 380 if (scanType & eProfileThreadsCPU) 381 { 382 get_threads_profile_data(scanType, task, pid, threads_id, threads_name, threads_used_usec); 383 } 384 385#if defined (HOST_VM_INFO64_COUNT) 386 vm_statistics64_data_t vminfo; 387#else 388 struct vm_statistics vminfo; 389#endif 390 uint64_t physical_memory; 391 mach_vm_size_t rprvt = 0; 392 mach_vm_size_t rsize = 0; 393 mach_vm_size_t vprvt = 0; 394 mach_vm_size_t vsize = 0; 395 mach_vm_size_t dirty_size = 0; 396 mach_vm_size_t purgeable = 0; 397 mach_vm_size_t anonymous = 0; 398 if (m_vm_memory.GetMemoryProfile(scanType, task, task_info, m_process->GetCPUType(), pid, vminfo, physical_memory, rprvt, rsize, vprvt, vsize, dirty_size, purgeable, anonymous)) 399 { 400 std::ostringstream profile_data_stream; 401 402 if (scanType & eProfileHostCPU) 403 { 404 profile_data_stream << "num_cpu:" << numCPU << ';'; 405 profile_data_stream << "host_user_ticks:" << host_info.cpu_ticks[CPU_STATE_USER] << ';'; 406 profile_data_stream << "host_sys_ticks:" << host_info.cpu_ticks[CPU_STATE_SYSTEM] << ';'; 407 profile_data_stream << "host_idle_ticks:" << host_info.cpu_ticks[CPU_STATE_IDLE] << ';'; 408 } 409 410 if (scanType & eProfileCPU) 411 { 412 profile_data_stream << "elapsed_usec:" << elapsed_usec << ';'; 413 profile_data_stream << "task_used_usec:" << task_used_usec << ';'; 414 } 415 416 if (scanType & eProfileThreadsCPU) 417 { 418 const size_t num_threads = threads_id.size(); 419 for (size_t i=0; i<num_threads; i++) 420 { 421 profile_data_stream << "thread_used_id:" << std::hex << threads_id[i] << std::dec << ';'; 422 profile_data_stream << "thread_used_usec:" << threads_used_usec[i] << ';'; 423 424 if (scanType & eProfileThreadName) 425 { 426 profile_data_stream << "thread_used_name:"; 427 const size_t len = threads_name[i].size(); 428 if (len) 429 { 430 const char *thread_name = threads_name[i].c_str(); 431 // Make sure that thread name doesn't interfere with our delimiter. 432 profile_data_stream << RAW_HEXBASE << std::setw(2); 433 const uint8_t *ubuf8 = (const uint8_t *)(thread_name); 434 for (size_t j=0; j<len; j++) 435 { 436 profile_data_stream << (uint32_t)(ubuf8[j]); 437 } 438 // Reset back to DECIMAL. 439 profile_data_stream << DECIMAL; 440 } 441 profile_data_stream << ';'; 442 } 443 } 444 } 445 446 if (scanType & eProfileHostMemory) 447 profile_data_stream << "total:" << physical_memory << ';'; 448 449 if (scanType & eProfileMemory) 450 { 451#if defined (HOST_VM_INFO64_COUNT) && defined (_VM_PAGE_SIZE_H_) 452 static vm_size_t pagesize = vm_kernel_page_size; 453#else 454 static vm_size_t pagesize; 455 static bool calculated = false; 456 if (!calculated) 457 { 458 calculated = true; 459 pagesize = PageSize(); 460 } 461#endif 462 463 /* Unused values. Optimized out for transfer performance. 464 profile_data_stream << "wired:" << vminfo.wire_count * pagesize << ';'; 465 profile_data_stream << "active:" << vminfo.active_count * pagesize << ';'; 466 profile_data_stream << "inactive:" << vminfo.inactive_count * pagesize << ';'; 467 */ 468#if defined (HOST_VM_INFO64_COUNT) 469 // This mimicks Activity Monitor. 470 uint64_t total_used_count = (physical_memory / pagesize) - (vminfo.free_count - vminfo.speculative_count) - vminfo.external_page_count - vminfo.purgeable_count; 471#else 472 uint64_t total_used_count = vminfo.wire_count + vminfo.inactive_count + vminfo.active_count; 473#endif 474 profile_data_stream << "used:" << total_used_count * pagesize << ';'; 475 /* Unused values. Optimized out for transfer performance. 476 profile_data_stream << "free:" << vminfo.free_count * pagesize << ';'; 477 */ 478 479 profile_data_stream << "rprvt:" << rprvt << ';'; 480 /* Unused values. Optimized out for transfer performance. 481 profile_data_stream << "rsize:" << rsize << ';'; 482 profile_data_stream << "vprvt:" << vprvt << ';'; 483 profile_data_stream << "vsize:" << vsize << ';'; 484 */ 485 486 if (scanType & eProfileMemoryDirtyPage) 487 profile_data_stream << "dirty:" << dirty_size << ';'; 488 489 if (scanType & eProfileMemoryAnonymous) 490 { 491 profile_data_stream << "purgeable:" << purgeable << ';'; 492 profile_data_stream << "anonymous:" << anonymous << ';'; 493 } 494 } 495 496 // proc_pid_rusage pm_sample_task_and_pid pm_energy_impact needs to be tested for weakness in Cab 497#ifdef LLDB_ENERGY 498 if ((scanType & eProfileEnergy) && (pm_sample_task_and_pid != NULL)) 499 { 500 struct rusage_info_v2 info; 501 int rc = proc_pid_rusage(pid, RUSAGE_INFO_V2, (rusage_info_t *)&info); 502 if (rc == 0) 503 { 504 uint64_t now = mach_absolute_time(); 505 pm_task_energy_data_t pm_energy; 506 memset(&pm_energy, 0, sizeof(pm_energy)); 507 /* 508 * Disable most features of pm_sample_pid. It will gather 509 * network/GPU/WindowServer information; fill in the rest. 510 */ 511 pm_sample_task_and_pid(task, pid, &pm_energy, now, PM_SAMPLE_ALL & ~PM_SAMPLE_NAME & ~PM_SAMPLE_INTERVAL & ~PM_SAMPLE_CPU & ~PM_SAMPLE_DISK); 512 pm_energy.sti.total_user = info.ri_user_time; 513 pm_energy.sti.total_system = info.ri_system_time; 514 pm_energy.sti.task_interrupt_wakeups = info.ri_interrupt_wkups; 515 pm_energy.sti.task_platform_idle_wakeups = info.ri_pkg_idle_wkups; 516 pm_energy.diskio_bytesread = info.ri_diskio_bytesread; 517 pm_energy.diskio_byteswritten = info.ri_diskio_byteswritten; 518 pm_energy.pageins = info.ri_pageins; 519 520 uint64_t total_energy = (uint64_t)(pm_energy_impact(&pm_energy) * NSEC_PER_SEC); 521 //uint64_t process_age = now - info.ri_proc_start_abstime; 522 //uint64_t avg_energy = 100.0 * (double)total_energy / (double)process_age; 523 524 profile_data_stream << "energy:" << total_energy << ';'; 525 } 526 } 527#endif 528 529 profile_data_stream << "--end--;"; 530 531 result = profile_data_stream.str(); 532 } 533 534 return result; 535} 536 537 538//---------------------------------------------------------------------- 539// MachTask::TaskPortForProcessID 540//---------------------------------------------------------------------- 541task_t 542MachTask::TaskPortForProcessID (DNBError &err, bool force) 543{ 544 if (((m_task == TASK_NULL) || force) && m_process != NULL) 545 m_task = MachTask::TaskPortForProcessID(m_process->ProcessID(), err); 546 return m_task; 547} 548 549//---------------------------------------------------------------------- 550// MachTask::TaskPortForProcessID 551//---------------------------------------------------------------------- 552task_t 553MachTask::TaskPortForProcessID (pid_t pid, DNBError &err, uint32_t num_retries, uint32_t usec_interval) 554{ 555 if (pid != INVALID_NUB_PROCESS) 556 { 557 DNBError err; 558 mach_port_t task_self = mach_task_self (); 559 task_t task = TASK_NULL; 560 for (uint32_t i=0; i<num_retries; i++) 561 { 562 err = ::task_for_pid ( task_self, pid, &task); 563 564 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 565 { 566 char str[1024]; 567 ::snprintf (str, 568 sizeof(str), 569 "::task_for_pid ( target_tport = 0x%4.4x, pid = %d, &task ) => err = 0x%8.8x (%s)", 570 task_self, 571 pid, 572 err.Error(), 573 err.AsString() ? err.AsString() : "success"); 574 if (err.Fail()) 575 err.SetErrorString(str); 576 err.LogThreaded(str); 577 } 578 579 if (err.Success()) 580 return task; 581 582 // Sleep a bit and try again 583 ::usleep (usec_interval); 584 } 585 } 586 return TASK_NULL; 587} 588 589 590//---------------------------------------------------------------------- 591// MachTask::BasicInfo 592//---------------------------------------------------------------------- 593kern_return_t 594MachTask::BasicInfo(struct task_basic_info *info) 595{ 596 return BasicInfo (TaskPort(), info); 597} 598 599//---------------------------------------------------------------------- 600// MachTask::BasicInfo 601//---------------------------------------------------------------------- 602kern_return_t 603MachTask::BasicInfo(task_t task, struct task_basic_info *info) 604{ 605 if (info == NULL) 606 return KERN_INVALID_ARGUMENT; 607 608 DNBError err; 609 mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT; 610 err = ::task_info (task, TASK_BASIC_INFO, (task_info_t)info, &count); 611 const bool log_process = DNBLogCheckLogBit(LOG_TASK); 612 if (log_process || err.Fail()) 613 err.LogThreaded("::task_info ( target_task = 0x%4.4x, flavor = TASK_BASIC_INFO, task_info_out => %p, task_info_outCnt => %u )", task, info, count); 614 if (DNBLogCheckLogBit(LOG_TASK) && DNBLogCheckLogBit(LOG_VERBOSE) && err.Success()) 615 { 616 float user = (float)info->user_time.seconds + (float)info->user_time.microseconds / 1000000.0f; 617 float system = (float)info->user_time.seconds + (float)info->user_time.microseconds / 1000000.0f; 618 DNBLogThreaded ("task_basic_info = { suspend_count = %i, virtual_size = 0x%8.8llx, resident_size = 0x%8.8llx, user_time = %f, system_time = %f }", 619 info->suspend_count, 620 (uint64_t)info->virtual_size, 621 (uint64_t)info->resident_size, 622 user, 623 system); 624 } 625 return err.Error(); 626} 627 628 629//---------------------------------------------------------------------- 630// MachTask::IsValid 631// 632// Returns true if a task is a valid task port for a current process. 633//---------------------------------------------------------------------- 634bool 635MachTask::IsValid () const 636{ 637 return MachTask::IsValid(TaskPort()); 638} 639 640//---------------------------------------------------------------------- 641// MachTask::IsValid 642// 643// Returns true if a task is a valid task port for a current process. 644//---------------------------------------------------------------------- 645bool 646MachTask::IsValid (task_t task) 647{ 648 if (task != TASK_NULL) 649 { 650 struct task_basic_info task_info; 651 return BasicInfo(task, &task_info) == KERN_SUCCESS; 652 } 653 return false; 654} 655 656 657bool 658MachTask::StartExceptionThread(DNBError &err) 659{ 660 DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s ( )", __FUNCTION__); 661 662 task_t task = TaskPortForProcessID(err); 663 if (MachTask::IsValid(task)) 664 { 665 // Got the mach port for the current process 666 mach_port_t task_self = mach_task_self (); 667 668 // Allocate an exception port that we will use to track our child process 669 err = ::mach_port_allocate (task_self, MACH_PORT_RIGHT_RECEIVE, &m_exception_port); 670 if (err.Fail()) 671 return false; 672 673 // Add the ability to send messages on the new exception port 674 err = ::mach_port_insert_right (task_self, m_exception_port, m_exception_port, MACH_MSG_TYPE_MAKE_SEND); 675 if (err.Fail()) 676 return false; 677 678 // Save the original state of the exception ports for our child process 679 SaveExceptionPortInfo(); 680 681 // We weren't able to save the info for our exception ports, we must stop... 682 if (m_exc_port_info.mask == 0) 683 { 684 err.SetErrorString("failed to get exception port info"); 685 return false; 686 } 687 688 // Set the ability to get all exceptions on this port 689 err = ::task_set_exception_ports (task, m_exc_port_info.mask, m_exception_port, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE); 690 if (DNBLogCheckLogBit(LOG_EXCEPTIONS) || err.Fail()) 691 { 692 err.LogThreaded("::task_set_exception_ports ( task = 0x%4.4x, exception_mask = 0x%8.8x, new_port = 0x%4.4x, behavior = 0x%8.8x, new_flavor = 0x%8.8x )", 693 task, 694 m_exc_port_info.mask, 695 m_exception_port, 696 (EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES), 697 THREAD_STATE_NONE); 698 } 699 700 if (err.Fail()) 701 return false; 702 703 // Create the exception thread 704 err = ::pthread_create (&m_exception_thread, NULL, MachTask::ExceptionThread, this); 705 return err.Success(); 706 } 707 else 708 { 709 DNBLogError("MachTask::%s (): task invalid, exception thread start failed.", __FUNCTION__); 710 } 711 return false; 712} 713 714kern_return_t 715MachTask::ShutDownExcecptionThread() 716{ 717 DNBError err; 718 719 err = RestoreExceptionPortInfo(); 720 721 // NULL our our exception port and let our exception thread exit 722 mach_port_t exception_port = m_exception_port; 723 m_exception_port = 0; 724 725 err.SetError(::pthread_cancel(m_exception_thread), DNBError::POSIX); 726 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 727 err.LogThreaded("::pthread_cancel ( thread = %p )", m_exception_thread); 728 729 err.SetError(::pthread_join(m_exception_thread, NULL), DNBError::POSIX); 730 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 731 err.LogThreaded("::pthread_join ( thread = %p, value_ptr = NULL)", m_exception_thread); 732 733 // Deallocate our exception port that we used to track our child process 734 mach_port_t task_self = mach_task_self (); 735 err = ::mach_port_deallocate (task_self, exception_port); 736 if (DNBLogCheckLogBit(LOG_TASK) || err.Fail()) 737 err.LogThreaded("::mach_port_deallocate ( task = 0x%4.4x, name = 0x%4.4x )", task_self, exception_port); 738 739 return err.Error(); 740} 741 742 743void * 744MachTask::ExceptionThread (void *arg) 745{ 746 if (arg == NULL) 747 return NULL; 748 749 MachTask *mach_task = (MachTask*) arg; 750 MachProcess *mach_proc = mach_task->Process(); 751 DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s ( arg = %p ) starting thread...", __FUNCTION__, arg); 752 753#if defined (__APPLE__) 754 pthread_setname_np ("exception monitoring thread"); 755#if defined (__arm__) || defined (__arm64__) || defined (__aarch64__) 756 struct sched_param thread_param; 757 int thread_sched_policy; 758 if (pthread_getschedparam(pthread_self(), &thread_sched_policy, &thread_param) == 0) 759 { 760 thread_param.sched_priority = 47; 761 pthread_setschedparam(pthread_self(), thread_sched_policy, &thread_param); 762 } 763#endif 764#endif 765 766 // We keep a count of the number of consecutive exceptions received so 767 // we know to grab all exceptions without a timeout. We do this to get a 768 // bunch of related exceptions on our exception port so we can process 769 // then together. When we have multiple threads, we can get an exception 770 // per thread and they will come in consecutively. The main loop in this 771 // thread can stop periodically if needed to service things related to this 772 // process. 773 // flag set in the options, so we will wait forever for an exception on 774 // our exception port. After we get one exception, we then will use the 775 // MACH_RCV_TIMEOUT option with a zero timeout to grab all other current 776 // exceptions for our process. After we have received the last pending 777 // exception, we will get a timeout which enables us to then notify 778 // our main thread that we have an exception bundle available. We then wait 779 // for the main thread to tell this exception thread to start trying to get 780 // exceptions messages again and we start again with a mach_msg read with 781 // infinite timeout. 782 uint32_t num_exceptions_received = 0; 783 DNBError err; 784 task_t task = mach_task->TaskPort(); 785 mach_msg_timeout_t periodic_timeout = 0; 786 787#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS) 788 mach_msg_timeout_t watchdog_elapsed = 0; 789 mach_msg_timeout_t watchdog_timeout = 60 * 1000; 790 pid_t pid = mach_proc->ProcessID(); 791 CFReleaser<SBSWatchdogAssertionRef> watchdog; 792 793 if (mach_proc->ProcessUsingSpringBoard()) 794 { 795 // Request a renewal for every 60 seconds if we attached using SpringBoard 796 watchdog.reset(::SBSWatchdogAssertionCreateForPID(NULL, pid, 60)); 797 DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionCreateForPID (NULL, %4.4x, 60 ) => %p", pid, watchdog.get()); 798 799 if (watchdog.get()) 800 { 801 ::SBSWatchdogAssertionRenew (watchdog.get()); 802 803 CFTimeInterval watchdogRenewalInterval = ::SBSWatchdogAssertionGetRenewalInterval (watchdog.get()); 804 DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionGetRenewalInterval ( %p ) => %g seconds", watchdog.get(), watchdogRenewalInterval); 805 if (watchdogRenewalInterval > 0.0) 806 { 807 watchdog_timeout = (mach_msg_timeout_t)watchdogRenewalInterval * 1000; 808 if (watchdog_timeout > 3000) 809 watchdog_timeout -= 1000; // Give us a second to renew our timeout 810 else if (watchdog_timeout > 1000) 811 watchdog_timeout -= 250; // Give us a quarter of a second to renew our timeout 812 } 813 } 814 if (periodic_timeout == 0 || periodic_timeout > watchdog_timeout) 815 periodic_timeout = watchdog_timeout; 816 } 817#endif // #if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS) 818 819#ifdef WITH_BKS 820 CFReleaser<BKSWatchdogAssertionRef> watchdog; 821 if (mach_proc->ProcessUsingBackBoard()) 822 { 823 pid_t pid = mach_proc->ProcessID(); 824 CFAllocatorRef alloc = kCFAllocatorDefault; 825 watchdog.reset(::BKSWatchdogAssertionCreateForPID(alloc, pid)); 826 } 827#endif // #ifdef WITH_BKS 828 829 while (mach_task->ExceptionPortIsValid()) 830 { 831 ::pthread_testcancel (); 832 833 MachException::Message exception_message; 834 835 836 if (num_exceptions_received > 0) 837 { 838 // No timeout, just receive as many exceptions as we can since we already have one and we want 839 // to get all currently available exceptions for this task 840 err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_TIMEOUT, 0); 841 } 842 else if (periodic_timeout > 0) 843 { 844 // We need to stop periodically in this loop, so try and get a mach message with a valid timeout (ms) 845 err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_TIMEOUT, periodic_timeout); 846 } 847 else 848 { 849 // We don't need to parse all current exceptions or stop periodically, 850 // just wait for an exception forever. 851 err = exception_message.Receive(mach_task->ExceptionPort(), MACH_RCV_MSG | MACH_RCV_INTERRUPT, 0); 852 } 853 854 if (err.Error() == MACH_RCV_INTERRUPTED) 855 { 856 // If we have no task port we should exit this thread 857 if (!mach_task->ExceptionPortIsValid()) 858 { 859 DNBLogThreadedIf(LOG_EXCEPTIONS, "thread cancelled..."); 860 break; 861 } 862 863 // Make sure our task is still valid 864 if (MachTask::IsValid(task)) 865 { 866 // Task is still ok 867 DNBLogThreadedIf(LOG_EXCEPTIONS, "interrupted, but task still valid, continuing..."); 868 continue; 869 } 870 else 871 { 872 DNBLogThreadedIf(LOG_EXCEPTIONS, "task has exited..."); 873 mach_proc->SetState(eStateExited); 874 // Our task has died, exit the thread. 875 break; 876 } 877 } 878 else if (err.Error() == MACH_RCV_TIMED_OUT) 879 { 880 if (num_exceptions_received > 0) 881 { 882 // We were receiving all current exceptions with a timeout of zero 883 // it is time to go back to our normal looping mode 884 num_exceptions_received = 0; 885 886 // Notify our main thread we have a complete exception message 887 // bundle available and get the possibly updated task port back 888 // from the process in case we exec'ed and our task port changed 889 task = mach_proc->ExceptionMessageBundleComplete(); 890 891 // in case we use a timeout value when getting exceptions... 892 // Make sure our task is still valid 893 if (MachTask::IsValid(task)) 894 { 895 // Task is still ok 896 DNBLogThreadedIf(LOG_EXCEPTIONS, "got a timeout, continuing..."); 897 continue; 898 } 899 else 900 { 901 DNBLogThreadedIf(LOG_EXCEPTIONS, "task has exited..."); 902 mach_proc->SetState(eStateExited); 903 // Our task has died, exit the thread. 904 break; 905 } 906 } 907 908#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS) 909 if (watchdog.get()) 910 { 911 watchdog_elapsed += periodic_timeout; 912 if (watchdog_elapsed >= watchdog_timeout) 913 { 914 DNBLogThreadedIf(LOG_TASK, "SBSWatchdogAssertionRenew ( %p )", watchdog.get()); 915 ::SBSWatchdogAssertionRenew (watchdog.get()); 916 watchdog_elapsed = 0; 917 } 918 } 919#endif 920 } 921 else if (err.Error() != KERN_SUCCESS) 922 { 923 DNBLogThreadedIf(LOG_EXCEPTIONS, "got some other error, do something about it??? nah, continuing for now..."); 924 // TODO: notify of error? 925 } 926 else 927 { 928 if (exception_message.CatchExceptionRaise(task)) 929 { 930 ++num_exceptions_received; 931 mach_proc->ExceptionMessageReceived(exception_message); 932 } 933 } 934 } 935 936#if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS) 937 if (watchdog.get()) 938 { 939 // TODO: change SBSWatchdogAssertionRelease to SBSWatchdogAssertionCancel when we 940 // all are up and running on systems that support it. The SBS framework has a #define 941 // that will forward SBSWatchdogAssertionRelease to SBSWatchdogAssertionCancel for now 942 // so it should still build either way. 943 DNBLogThreadedIf(LOG_TASK, "::SBSWatchdogAssertionRelease(%p)", watchdog.get()); 944 ::SBSWatchdogAssertionRelease (watchdog.get()); 945 } 946#endif // #if defined (WITH_SPRINGBOARD) && !defined (WITH_BKS) 947 948 DNBLogThreadedIf(LOG_EXCEPTIONS, "MachTask::%s (%p): thread exiting...", __FUNCTION__, arg); 949 return NULL; 950} 951 952 953// So the TASK_DYLD_INFO used to just return the address of the all image infos 954// as a single member called "all_image_info". Then someone decided it would be 955// a good idea to rename this first member to "all_image_info_addr" and add a 956// size member called "all_image_info_size". This of course can not be detected 957// using code or #defines. So to hack around this problem, we define our own 958// version of the TASK_DYLD_INFO structure so we can guarantee what is inside it. 959 960struct hack_task_dyld_info { 961 mach_vm_address_t all_image_info_addr; 962 mach_vm_size_t all_image_info_size; 963}; 964 965nub_addr_t 966MachTask::GetDYLDAllImageInfosAddress (DNBError& err) 967{ 968 struct hack_task_dyld_info dyld_info; 969 mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT; 970 // Make sure that COUNT isn't bigger than our hacked up struct hack_task_dyld_info. 971 // If it is, then make COUNT smaller to match. 972 if (count > (sizeof(struct hack_task_dyld_info) / sizeof(natural_t))) 973 count = (sizeof(struct hack_task_dyld_info) / sizeof(natural_t)); 974 975 task_t task = TaskPortForProcessID (err); 976 if (err.Success()) 977 { 978 err = ::task_info (task, TASK_DYLD_INFO, (task_info_t)&dyld_info, &count); 979 if (err.Success()) 980 { 981 // We now have the address of the all image infos structure 982 return dyld_info.all_image_info_addr; 983 } 984 } 985 return INVALID_NUB_ADDRESS; 986} 987 988 989//---------------------------------------------------------------------- 990// MachTask::AllocateMemory 991//---------------------------------------------------------------------- 992nub_addr_t 993MachTask::AllocateMemory (size_t size, uint32_t permissions) 994{ 995 mach_vm_address_t addr; 996 task_t task = TaskPort(); 997 if (task == TASK_NULL) 998 return INVALID_NUB_ADDRESS; 999 1000 DNBError err; 1001 err = ::mach_vm_allocate (task, &addr, size, TRUE); 1002 if (err.Error() == KERN_SUCCESS) 1003 { 1004 // Set the protections: 1005 vm_prot_t mach_prot = VM_PROT_NONE; 1006 if (permissions & eMemoryPermissionsReadable) 1007 mach_prot |= VM_PROT_READ; 1008 if (permissions & eMemoryPermissionsWritable) 1009 mach_prot |= VM_PROT_WRITE; 1010 if (permissions & eMemoryPermissionsExecutable) 1011 mach_prot |= VM_PROT_EXECUTE; 1012 1013 1014 err = ::mach_vm_protect (task, addr, size, 0, mach_prot); 1015 if (err.Error() == KERN_SUCCESS) 1016 { 1017 m_allocations.insert (std::make_pair(addr, size)); 1018 return addr; 1019 } 1020 ::mach_vm_deallocate (task, addr, size); 1021 } 1022 return INVALID_NUB_ADDRESS; 1023} 1024 1025//---------------------------------------------------------------------- 1026// MachTask::DeallocateMemory 1027//---------------------------------------------------------------------- 1028nub_bool_t 1029MachTask::DeallocateMemory (nub_addr_t addr) 1030{ 1031 task_t task = TaskPort(); 1032 if (task == TASK_NULL) 1033 return false; 1034 1035 // We have to stash away sizes for the allocations... 1036 allocation_collection::iterator pos, end = m_allocations.end(); 1037 for (pos = m_allocations.begin(); pos != end; pos++) 1038 { 1039 if ((*pos).first == addr) 1040 { 1041 m_allocations.erase(pos); 1042#define ALWAYS_ZOMBIE_ALLOCATIONS 0 1043 if (ALWAYS_ZOMBIE_ALLOCATIONS || getenv ("DEBUGSERVER_ZOMBIE_ALLOCATIONS")) 1044 { 1045 ::mach_vm_protect (task, (*pos).first, (*pos).second, 0, VM_PROT_NONE); 1046 return true; 1047 } 1048 else 1049 return ::mach_vm_deallocate (task, (*pos).first, (*pos).second) == KERN_SUCCESS; 1050 } 1051 1052 } 1053 return false; 1054} 1055 1056static void foundStackLog(mach_stack_logging_record_t record, void *context) { 1057 *((bool*)context) = true; 1058} 1059 1060bool 1061MachTask::HasMallocLoggingEnabled () 1062{ 1063 bool found = false; 1064 1065 __mach_stack_logging_enumerate_records(m_task, 0x0, foundStackLog, &found); 1066 return found; 1067} 1068 1069struct history_enumerator_impl_data 1070{ 1071 MachMallocEvent *buffer; 1072 uint32_t *position; 1073 uint32_t count; 1074}; 1075 1076static void history_enumerator_impl(mach_stack_logging_record_t record, void* enum_obj) 1077{ 1078 history_enumerator_impl_data *data = (history_enumerator_impl_data*)enum_obj; 1079 1080 if (*data->position >= data->count) 1081 return; 1082 1083 data->buffer[*data->position].m_base_address = record.address; 1084 data->buffer[*data->position].m_size = record.argument; 1085 data->buffer[*data->position].m_event_id = record.stack_identifier; 1086 data->buffer[*data->position].m_event_type = record.type_flags == stack_logging_type_alloc ? eMachMallocEventTypeAlloc : 1087 record.type_flags == stack_logging_type_dealloc ? eMachMallocEventTypeDealloc : 1088 eMachMallocEventTypeOther; 1089 *data->position+=1; 1090} 1091 1092bool 1093MachTask::EnumerateMallocRecords (MachMallocEvent *event_buffer, 1094 uint32_t buffer_size, 1095 uint32_t *count) 1096{ 1097 return EnumerateMallocRecords(0, 1098 event_buffer, 1099 buffer_size, 1100 count); 1101} 1102 1103bool 1104MachTask::EnumerateMallocRecords (mach_vm_address_t address, 1105 MachMallocEvent *event_buffer, 1106 uint32_t buffer_size, 1107 uint32_t *count) 1108{ 1109 if (!event_buffer || !count) 1110 return false; 1111 1112 if (buffer_size == 0) 1113 return false; 1114 1115 *count = 0; 1116 history_enumerator_impl_data data = { event_buffer, count, buffer_size }; 1117 __mach_stack_logging_enumerate_records(m_task, address, history_enumerator_impl, &data); 1118 return (*count > 0); 1119} 1120 1121bool 1122MachTask::EnumerateMallocFrames (MachMallocEventId event_id, 1123 mach_vm_address_t *function_addresses_buffer, 1124 uint32_t buffer_size, 1125 uint32_t *count) 1126{ 1127 if (!function_addresses_buffer || !count) 1128 return false; 1129 1130 if (buffer_size == 0) 1131 return false; 1132 1133 __mach_stack_logging_frames_for_uniqued_stack(m_task, event_id, &function_addresses_buffer[0], buffer_size, count); 1134 *count -= 1; 1135 if (function_addresses_buffer[*count-1] < PageSize()) 1136 *count -= 1; 1137 return (*count > 0); 1138} 1139 1140nub_size_t 1141MachTask::PageSize () 1142{ 1143 return m_vm_memory.PageSize (m_task); 1144} 1145