1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/19/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MachThreadList.h"
14 
15 #include "DNBLog.h"
16 #include "DNBThreadResumeActions.h"
17 #include "MachProcess.h"
18 
19 #include <inttypes.h>
20 #include <sys/sysctl.h>
21 
22 #include <memory>
23 
24 MachThreadList::MachThreadList()
25     : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
26       m_is_64_bit(false) {}
27 
28 MachThreadList::~MachThreadList() {}
29 
30 nub_state_t MachThreadList::GetState(nub_thread_t tid) {
31   MachThreadSP thread_sp(GetThreadByID(tid));
32   if (thread_sp)
33     return thread_sp->GetState();
34   return eStateInvalid;
35 }
36 
37 const char *MachThreadList::GetName(nub_thread_t tid) {
38   MachThreadSP thread_sp(GetThreadByID(tid));
39   if (thread_sp)
40     return thread_sp->GetName();
41   return NULL;
42 }
43 
44 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
45                                                 nub_addr_t tsd,
46                                                 uint64_t dti_qos_class_index) {
47   MachThreadSP thread_sp(GetThreadByID(tid));
48   if (thread_sp)
49     return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
50   return ThreadInfo::QoS();
51 }
52 
53 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
54   MachThreadSP thread_sp(GetThreadByID(tid));
55   if (thread_sp)
56     return thread_sp->GetPThreadT();
57   return INVALID_NUB_ADDRESS;
58 }
59 
60 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
61   MachThreadSP thread_sp(GetThreadByID(tid));
62   if (thread_sp)
63     return thread_sp->GetDispatchQueueT();
64   return INVALID_NUB_ADDRESS;
65 }
66 
67 nub_addr_t MachThreadList::GetTSDAddressForThread(
68     nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
69     uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
70   MachThreadSP thread_sp(GetThreadByID(tid));
71   if (thread_sp)
72     return thread_sp->GetTSDAddressForThread(
73         plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
74         plo_pthread_tsd_entry_size);
75   return INVALID_NUB_ADDRESS;
76 }
77 
78 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
79   MachThreadSP thread_sp(GetThreadByID(tid));
80   if (thread_sp) {
81     m_current_thread = thread_sp;
82     return tid;
83   }
84   return INVALID_NUB_THREAD;
85 }
86 
87 bool MachThreadList::GetThreadStoppedReason(
88     nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
89   MachThreadSP thread_sp(GetThreadByID(tid));
90   if (thread_sp)
91     return thread_sp->GetStopException().GetStopInfo(stop_info);
92   return false;
93 }
94 
95 bool MachThreadList::GetIdentifierInfo(
96     nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
97   thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
98 
99   mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
100   return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
101                        (thread_info_t)ident_info, &count) == KERN_SUCCESS;
102 }
103 
104 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
105   MachThreadSP thread_sp(GetThreadByID(tid));
106   if (thread_sp)
107     thread_sp->GetStopException().DumpStopReason();
108 }
109 
110 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
111   MachThreadSP thread_sp(GetThreadByID(tid));
112   if (thread_sp)
113     return thread_sp->GetBasicInfoAsString();
114   return NULL;
115 }
116 
117 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
118   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
119   MachThreadSP thread_sp;
120   const size_t num_threads = m_threads.size();
121   for (size_t idx = 0; idx < num_threads; ++idx) {
122     if (m_threads[idx]->ThreadID() == tid) {
123       thread_sp = m_threads[idx];
124       break;
125     }
126   }
127   return thread_sp;
128 }
129 
130 MachThreadSP
131 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
132   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
133   MachThreadSP thread_sp;
134   const size_t num_threads = m_threads.size();
135   for (size_t idx = 0; idx < num_threads; ++idx) {
136     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
137       thread_sp = m_threads[idx];
138       break;
139     }
140   }
141   return thread_sp;
142 }
143 
144 nub_thread_t
145 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
146   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
147   MachThreadSP thread_sp;
148   const size_t num_threads = m_threads.size();
149   for (size_t idx = 0; idx < num_threads; ++idx) {
150     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
151       return m_threads[idx]->ThreadID();
152     }
153   }
154   return INVALID_NUB_THREAD;
155 }
156 
157 thread_t MachThreadList::GetMachPortNumberByThreadID(
158     nub_thread_t globally_unique_id) const {
159   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
160   MachThreadSP thread_sp;
161   const size_t num_threads = m_threads.size();
162   for (size_t idx = 0; idx < num_threads; ++idx) {
163     if (m_threads[idx]->ThreadID() == globally_unique_id) {
164       return m_threads[idx]->MachPortNumber();
165     }
166   }
167   return 0;
168 }
169 
170 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
171                                       uint32_t reg,
172                                       DNBRegisterValue *reg_value) const {
173   MachThreadSP thread_sp(GetThreadByID(tid));
174   if (thread_sp)
175     return thread_sp->GetRegisterValue(set, reg, reg_value);
176 
177   return false;
178 }
179 
180 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
181                                       uint32_t reg,
182                                       const DNBRegisterValue *reg_value) const {
183   MachThreadSP thread_sp(GetThreadByID(tid));
184   if (thread_sp)
185     return thread_sp->SetRegisterValue(set, reg, reg_value);
186 
187   return false;
188 }
189 
190 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
191                                               size_t buf_len) {
192   MachThreadSP thread_sp(GetThreadByID(tid));
193   if (thread_sp)
194     return thread_sp->GetRegisterContext(buf, buf_len);
195   return 0;
196 }
197 
198 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
199                                               size_t buf_len) {
200   MachThreadSP thread_sp(GetThreadByID(tid));
201   if (thread_sp)
202     return thread_sp->SetRegisterContext(buf, buf_len);
203   return 0;
204 }
205 
206 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
207   MachThreadSP thread_sp(GetThreadByID(tid));
208   if (thread_sp)
209     return thread_sp->SaveRegisterState();
210   return 0;
211 }
212 
213 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
214   MachThreadSP thread_sp(GetThreadByID(tid));
215   if (thread_sp)
216     return thread_sp->RestoreRegisterState(save_id);
217   return 0;
218 }
219 
220 nub_size_t MachThreadList::NumThreads() const {
221   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
222   return m_threads.size();
223 }
224 
225 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
226   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
227   if (idx < m_threads.size())
228     return m_threads[idx]->ThreadID();
229   return INVALID_NUB_THREAD;
230 }
231 
232 nub_thread_t MachThreadList::CurrentThreadID() {
233   MachThreadSP thread_sp;
234   CurrentThread(thread_sp);
235   if (thread_sp.get())
236     return thread_sp->ThreadID();
237   return INVALID_NUB_THREAD;
238 }
239 
240 bool MachThreadList::NotifyException(MachException::Data &exc) {
241   MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
242   if (thread_sp) {
243     thread_sp->NotifyException(exc);
244     return true;
245   }
246   return false;
247 }
248 
249 void MachThreadList::Clear() {
250   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
251   m_threads.clear();
252 }
253 
254 uint32_t
255 MachThreadList::UpdateThreadList(MachProcess *process, bool update,
256                                  MachThreadList::collection *new_threads) {
257   // locker will keep a mutex locked until it goes out of scope
258   DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
259                                "update = %u) process stop count = %u",
260                    process->ProcessID(), update, process->StopCount());
261   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
262 
263   if (process->StopCount() == 0) {
264     int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
265     struct kinfo_proc processInfo;
266     size_t bufsize = sizeof(processInfo);
267     if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
268                &bufsize, NULL, 0) == 0 &&
269         bufsize > 0) {
270       if (processInfo.kp_proc.p_flag & P_LP64)
271         m_is_64_bit = true;
272     }
273 #if defined(__i386__) || defined(__x86_64__)
274     if (m_is_64_bit)
275       DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
276     else
277       DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
278 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
279     if (m_is_64_bit)
280       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
281     else
282       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
283 #endif
284   }
285 
286   if (m_threads.empty() || update) {
287     thread_array_t thread_list = NULL;
288     mach_msg_type_number_t thread_list_count = 0;
289     task_t task = process->Task().TaskPort();
290     DNBError err(::task_threads(task, &thread_list, &thread_list_count),
291                  DNBError::MachKernel);
292 
293     if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
294       err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
295                       "thread_list_count => %u )",
296                       task, thread_list, thread_list_count);
297 
298     if (err.Status() == KERN_SUCCESS && thread_list_count > 0) {
299       MachThreadList::collection currThreads;
300       size_t idx;
301       // Iterator through the current thread list and see which threads
302       // we already have in our list (keep them), which ones we don't
303       // (add them), and which ones are not around anymore (remove them).
304       for (idx = 0; idx < thread_list_count; ++idx) {
305         const thread_t mach_port_num = thread_list[idx];
306 
307         uint64_t unique_thread_id =
308             MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
309         MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
310         if (thread_sp) {
311           // Keep the existing thread class
312           currThreads.push_back(thread_sp);
313         } else {
314           // We don't have this thread, lets add it.
315           thread_sp = std::make_shared<MachThread>(
316               process, m_is_64_bit, unique_thread_id, mach_port_num);
317 
318           // Add the new thread regardless of its is user ready state...
319           // Make sure the thread is ready to be displayed and shown to users
320           // before we add this thread to our list...
321           if (thread_sp->IsUserReady()) {
322             if (new_threads)
323               new_threads->push_back(thread_sp);
324 
325             currThreads.push_back(thread_sp);
326           }
327         }
328       }
329 
330       m_threads.swap(currThreads);
331       m_current_thread.reset();
332 
333       // Free the vm memory given to us by ::task_threads()
334       vm_size_t thread_list_size =
335           (vm_size_t)(thread_list_count * sizeof(thread_t));
336       ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
337                       thread_list_size);
338     }
339   }
340   return static_cast<uint32_t>(m_threads.size());
341 }
342 
343 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
344   // locker will keep a mutex locked until it goes out of scope
345   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
346   if (m_current_thread.get() == NULL) {
347     // Figure out which thread is going to be our current thread.
348     // This is currently done by finding the first thread in the list
349     // that has a valid exception.
350     const size_t num_threads = m_threads.size();
351     for (uint32_t idx = 0; idx < num_threads; ++idx) {
352       if (m_threads[idx]->GetStopException().IsValid()) {
353         m_current_thread = m_threads[idx];
354         break;
355       }
356     }
357   }
358   thread_sp = m_current_thread;
359 }
360 
361 void MachThreadList::Dump() const {
362   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
363   const size_t num_threads = m_threads.size();
364   for (uint32_t idx = 0; idx < num_threads; ++idx) {
365     m_threads[idx]->Dump(idx);
366   }
367 }
368 
369 void MachThreadList::ProcessWillResume(
370     MachProcess *process, const DNBThreadResumeActions &thread_actions) {
371   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
372 
373   // Update our thread list, because sometimes libdispatch or the kernel
374   // will spawn threads while a task is suspended.
375   MachThreadList::collection new_threads;
376 
377   // First figure out if we were planning on running only one thread, and if so
378   // force that thread to resume.
379   bool run_one_thread;
380   nub_thread_t solo_thread = INVALID_NUB_THREAD;
381   if (thread_actions.GetSize() > 0 &&
382       thread_actions.NumActionsWithState(eStateStepping) +
383               thread_actions.NumActionsWithState(eStateRunning) ==
384           1) {
385     run_one_thread = true;
386     const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
387     size_t num_actions = thread_actions.GetSize();
388     for (size_t i = 0; i < num_actions; i++, action_ptr++) {
389       if (action_ptr->state == eStateStepping ||
390           action_ptr->state == eStateRunning) {
391         solo_thread = action_ptr->tid;
392         break;
393       }
394     }
395   } else
396     run_one_thread = false;
397 
398   UpdateThreadList(process, true, &new_threads);
399 
400   DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
401                                               INVALID_NUB_ADDRESS};
402   // If we are planning to run only one thread, any new threads should be
403   // suspended.
404   if (run_one_thread)
405     resume_new_threads.state = eStateSuspended;
406 
407   const size_t num_new_threads = new_threads.size();
408   const size_t num_threads = m_threads.size();
409   for (uint32_t idx = 0; idx < num_threads; ++idx) {
410     MachThread *thread = m_threads[idx].get();
411     bool handled = false;
412     for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
413       if (thread == new_threads[new_idx].get()) {
414         thread->ThreadWillResume(&resume_new_threads);
415         handled = true;
416         break;
417       }
418     }
419 
420     if (!handled) {
421       const DNBThreadResumeAction *thread_action =
422           thread_actions.GetActionForThread(thread->ThreadID(), true);
423       // There must always be a thread action for every thread.
424       assert(thread_action);
425       bool others_stopped = false;
426       if (solo_thread == thread->ThreadID())
427         others_stopped = true;
428       thread->ThreadWillResume(thread_action, others_stopped);
429     }
430   }
431 
432   if (new_threads.size()) {
433     for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
434       DNBLogThreadedIf(
435           LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
436                       "stop-id=%u, resuming newly discovered thread: "
437                       "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
438           process->ProcessID(), process->StopCount(),
439           new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
440     }
441   }
442 }
443 
444 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
445   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
446   // Update our thread list
447   const uint32_t num_threads = UpdateThreadList(process, true);
448   for (uint32_t idx = 0; idx < num_threads; ++idx) {
449     m_threads[idx]->ThreadDidStop();
450   }
451   return num_threads;
452 }
453 
454 //----------------------------------------------------------------------
455 // Check each thread in our thread list to see if we should notify our
456 // client of the current halt in execution.
457 //
458 // Breakpoints can have callback functions associated with them than
459 // can return true to stop, or false to continue executing the inferior.
460 //
461 // RETURNS
462 //    true if we should stop and notify our clients
463 //    false if we should resume our child process and skip notification
464 //----------------------------------------------------------------------
465 bool MachThreadList::ShouldStop(bool &step_more) {
466   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
467   uint32_t should_stop = false;
468   const size_t num_threads = m_threads.size();
469   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
470     should_stop = m_threads[idx]->ShouldStop(step_more);
471   }
472   return should_stop;
473 }
474 
475 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
476   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
477   const size_t num_threads = m_threads.size();
478   for (uint32_t idx = 0; idx < num_threads; ++idx) {
479     m_threads[idx]->NotifyBreakpointChanged(bp);
480   }
481 }
482 
483 uint32_t
484 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
485   if (bp != NULL) {
486     const size_t num_threads = m_threads.size();
487     for (uint32_t idx = 0; idx < num_threads; ++idx)
488       m_threads[idx]->EnableHardwareBreakpoint(bp);
489   }
490   return INVALID_NUB_HW_INDEX;
491 }
492 
493 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
494   if (bp != NULL) {
495     const size_t num_threads = m_threads.size();
496     for (uint32_t idx = 0; idx < num_threads; ++idx)
497       m_threads[idx]->DisableHardwareBreakpoint(bp);
498   }
499   return false;
500 }
501 
502 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
503 // MachProcess::EnableWatchpoint()
504 // -> MachThreadList::EnableHardwareWatchpoint().
505 uint32_t
506 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
507   uint32_t hw_index = INVALID_NUB_HW_INDEX;
508   if (wp != NULL) {
509     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
510     const size_t num_threads = m_threads.size();
511     // On Mac OS X we have to prime the control registers for new threads.  We
512     // do this
513     // using the control register data for the first thread, for lack of a
514     // better way of choosing.
515     bool also_set_on_task = true;
516     for (uint32_t idx = 0; idx < num_threads; ++idx) {
517       if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(
518                wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) {
519         // We know that idx failed for some reason.  Let's rollback the
520         // transaction for [0, idx).
521         for (uint32_t i = 0; i < idx; ++i)
522           m_threads[i]->RollbackTransForHWP();
523         return INVALID_NUB_HW_INDEX;
524       }
525       also_set_on_task = false;
526     }
527     // Notify each thread to commit the pending transaction.
528     for (uint32_t idx = 0; idx < num_threads; ++idx)
529       m_threads[idx]->FinishTransForHWP();
530   }
531   return hw_index;
532 }
533 
534 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
535   if (wp != NULL) {
536     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
537     const size_t num_threads = m_threads.size();
538 
539     // On Mac OS X we have to prime the control registers for new threads.  We
540     // do this
541     // using the control register data for the first thread, for lack of a
542     // better way of choosing.
543     bool also_set_on_task = true;
544     for (uint32_t idx = 0; idx < num_threads; ++idx) {
545       if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) {
546         // We know that idx failed for some reason.  Let's rollback the
547         // transaction for [0, idx).
548         for (uint32_t i = 0; i < idx; ++i)
549           m_threads[i]->RollbackTransForHWP();
550         return false;
551       }
552       also_set_on_task = false;
553     }
554     // Notify each thread to commit the pending transaction.
555     for (uint32_t idx = 0; idx < num_threads; ++idx)
556       m_threads[idx]->FinishTransForHWP();
557 
558     return true;
559   }
560   return false;
561 }
562 
563 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
564   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
565   const size_t num_threads = m_threads.size();
566   // Use an arbitrary thread to retrieve the number of supported hardware
567   // watchpoints.
568   if (num_threads)
569     return m_threads[0]->NumSupportedHardwareWatchpoints();
570   return 0;
571 }
572 
573 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
574     const int signo) const {
575   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
576   uint32_t should_stop = false;
577   const size_t num_threads = m_threads.size();
578   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
579     if (m_threads[idx]->GetStopException().SoftSignal() == signo)
580       return idx;
581   }
582   return UINT32_MAX;
583 }
584