1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/19/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MachThreadList.h"
14 
15 #include <inttypes.h>
16 #include <sys/sysctl.h>
17 
18 #include "DNBLog.h"
19 #include "DNBThreadResumeActions.h"
20 #include "MachProcess.h"
21 
22 MachThreadList::MachThreadList()
23     : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
24       m_is_64_bit(false) {}
25 
26 MachThreadList::~MachThreadList() {}
27 
28 nub_state_t MachThreadList::GetState(nub_thread_t tid) {
29   MachThreadSP thread_sp(GetThreadByID(tid));
30   if (thread_sp)
31     return thread_sp->GetState();
32   return eStateInvalid;
33 }
34 
35 const char *MachThreadList::GetName(nub_thread_t tid) {
36   MachThreadSP thread_sp(GetThreadByID(tid));
37   if (thread_sp)
38     return thread_sp->GetName();
39   return NULL;
40 }
41 
42 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
43                                                 nub_addr_t tsd,
44                                                 uint64_t dti_qos_class_index) {
45   MachThreadSP thread_sp(GetThreadByID(tid));
46   if (thread_sp)
47     return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
48   return ThreadInfo::QoS();
49 }
50 
51 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
52   MachThreadSP thread_sp(GetThreadByID(tid));
53   if (thread_sp)
54     return thread_sp->GetPThreadT();
55   return INVALID_NUB_ADDRESS;
56 }
57 
58 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
59   MachThreadSP thread_sp(GetThreadByID(tid));
60   if (thread_sp)
61     return thread_sp->GetDispatchQueueT();
62   return INVALID_NUB_ADDRESS;
63 }
64 
65 nub_addr_t MachThreadList::GetTSDAddressForThread(
66     nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
67     uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
68   MachThreadSP thread_sp(GetThreadByID(tid));
69   if (thread_sp)
70     return thread_sp->GetTSDAddressForThread(
71         plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
72         plo_pthread_tsd_entry_size);
73   return INVALID_NUB_ADDRESS;
74 }
75 
76 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
77   MachThreadSP thread_sp(GetThreadByID(tid));
78   if (thread_sp) {
79     m_current_thread = thread_sp;
80     return tid;
81   }
82   return INVALID_NUB_THREAD;
83 }
84 
85 bool MachThreadList::GetThreadStoppedReason(
86     nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
87   MachThreadSP thread_sp(GetThreadByID(tid));
88   if (thread_sp)
89     return thread_sp->GetStopException().GetStopInfo(stop_info);
90   return false;
91 }
92 
93 bool MachThreadList::GetIdentifierInfo(
94     nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
95   thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
96 
97   mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
98   return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
99                        (thread_info_t)ident_info, &count) == KERN_SUCCESS;
100 }
101 
102 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
103   MachThreadSP thread_sp(GetThreadByID(tid));
104   if (thread_sp)
105     thread_sp->GetStopException().DumpStopReason();
106 }
107 
108 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
109   MachThreadSP thread_sp(GetThreadByID(tid));
110   if (thread_sp)
111     return thread_sp->GetBasicInfoAsString();
112   return NULL;
113 }
114 
115 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
116   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
117   MachThreadSP thread_sp;
118   const size_t num_threads = m_threads.size();
119   for (size_t idx = 0; idx < num_threads; ++idx) {
120     if (m_threads[idx]->ThreadID() == tid) {
121       thread_sp = m_threads[idx];
122       break;
123     }
124   }
125   return thread_sp;
126 }
127 
128 MachThreadSP
129 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
130   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
131   MachThreadSP thread_sp;
132   const size_t num_threads = m_threads.size();
133   for (size_t idx = 0; idx < num_threads; ++idx) {
134     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
135       thread_sp = m_threads[idx];
136       break;
137     }
138   }
139   return thread_sp;
140 }
141 
142 nub_thread_t
143 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
144   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
145   MachThreadSP thread_sp;
146   const size_t num_threads = m_threads.size();
147   for (size_t idx = 0; idx < num_threads; ++idx) {
148     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
149       return m_threads[idx]->ThreadID();
150     }
151   }
152   return INVALID_NUB_THREAD;
153 }
154 
155 thread_t MachThreadList::GetMachPortNumberByThreadID(
156     nub_thread_t globally_unique_id) const {
157   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
158   MachThreadSP thread_sp;
159   const size_t num_threads = m_threads.size();
160   for (size_t idx = 0; idx < num_threads; ++idx) {
161     if (m_threads[idx]->ThreadID() == globally_unique_id) {
162       return m_threads[idx]->MachPortNumber();
163     }
164   }
165   return 0;
166 }
167 
168 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
169                                       uint32_t reg,
170                                       DNBRegisterValue *reg_value) const {
171   MachThreadSP thread_sp(GetThreadByID(tid));
172   if (thread_sp)
173     return thread_sp->GetRegisterValue(set, reg, reg_value);
174 
175   return false;
176 }
177 
178 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
179                                       uint32_t reg,
180                                       const DNBRegisterValue *reg_value) const {
181   MachThreadSP thread_sp(GetThreadByID(tid));
182   if (thread_sp)
183     return thread_sp->SetRegisterValue(set, reg, reg_value);
184 
185   return false;
186 }
187 
188 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
189                                               size_t buf_len) {
190   MachThreadSP thread_sp(GetThreadByID(tid));
191   if (thread_sp)
192     return thread_sp->GetRegisterContext(buf, buf_len);
193   return 0;
194 }
195 
196 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
197                                               size_t buf_len) {
198   MachThreadSP thread_sp(GetThreadByID(tid));
199   if (thread_sp)
200     return thread_sp->SetRegisterContext(buf, buf_len);
201   return 0;
202 }
203 
204 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
205   MachThreadSP thread_sp(GetThreadByID(tid));
206   if (thread_sp)
207     return thread_sp->SaveRegisterState();
208   return 0;
209 }
210 
211 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
212   MachThreadSP thread_sp(GetThreadByID(tid));
213   if (thread_sp)
214     return thread_sp->RestoreRegisterState(save_id);
215   return 0;
216 }
217 
218 nub_size_t MachThreadList::NumThreads() const {
219   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
220   return m_threads.size();
221 }
222 
223 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
224   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
225   if (idx < m_threads.size())
226     return m_threads[idx]->ThreadID();
227   return INVALID_NUB_THREAD;
228 }
229 
230 nub_thread_t MachThreadList::CurrentThreadID() {
231   MachThreadSP thread_sp;
232   CurrentThread(thread_sp);
233   if (thread_sp.get())
234     return thread_sp->ThreadID();
235   return INVALID_NUB_THREAD;
236 }
237 
238 bool MachThreadList::NotifyException(MachException::Data &exc) {
239   MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
240   if (thread_sp) {
241     thread_sp->NotifyException(exc);
242     return true;
243   }
244   return false;
245 }
246 
247 void MachThreadList::Clear() {
248   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
249   m_threads.clear();
250 }
251 
252 uint32_t
253 MachThreadList::UpdateThreadList(MachProcess *process, bool update,
254                                  MachThreadList::collection *new_threads) {
255   // locker will keep a mutex locked until it goes out of scope
256   DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
257                                "update = %u) process stop count = %u",
258                    process->ProcessID(), update, process->StopCount());
259   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
260 
261   if (process->StopCount() == 0) {
262     int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
263     struct kinfo_proc processInfo;
264     size_t bufsize = sizeof(processInfo);
265     if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
266                &bufsize, NULL, 0) == 0 &&
267         bufsize > 0) {
268       if (processInfo.kp_proc.p_flag & P_LP64)
269         m_is_64_bit = true;
270     }
271 #if defined(__i386__) || defined(__x86_64__)
272     if (m_is_64_bit)
273       DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
274     else
275       DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
276 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
277     if (m_is_64_bit)
278       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
279     else
280       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
281 #endif
282   }
283 
284   if (m_threads.empty() || update) {
285     thread_array_t thread_list = NULL;
286     mach_msg_type_number_t thread_list_count = 0;
287     task_t task = process->Task().TaskPort();
288     DNBError err(::task_threads(task, &thread_list, &thread_list_count),
289                  DNBError::MachKernel);
290 
291     if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
292       err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
293                       "thread_list_count => %u )",
294                       task, thread_list, thread_list_count);
295 
296     if (err.Status() == KERN_SUCCESS && thread_list_count > 0) {
297       MachThreadList::collection currThreads;
298       size_t idx;
299       // Iterator through the current thread list and see which threads
300       // we already have in our list (keep them), which ones we don't
301       // (add them), and which ones are not around anymore (remove them).
302       for (idx = 0; idx < thread_list_count; ++idx) {
303         const thread_t mach_port_num = thread_list[idx];
304 
305         uint64_t unique_thread_id =
306             MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
307         MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
308         if (thread_sp) {
309           // Keep the existing thread class
310           currThreads.push_back(thread_sp);
311         } else {
312           // We don't have this thread, lets add it.
313           thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id,
314                                          mach_port_num));
315 
316           // Add the new thread regardless of its is user ready state...
317           // Make sure the thread is ready to be displayed and shown to users
318           // before we add this thread to our list...
319           if (thread_sp->IsUserReady()) {
320             if (new_threads)
321               new_threads->push_back(thread_sp);
322 
323             currThreads.push_back(thread_sp);
324           }
325         }
326       }
327 
328       m_threads.swap(currThreads);
329       m_current_thread.reset();
330 
331       // Free the vm memory given to us by ::task_threads()
332       vm_size_t thread_list_size =
333           (vm_size_t)(thread_list_count * sizeof(thread_t));
334       ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
335                       thread_list_size);
336     }
337   }
338   return static_cast<uint32_t>(m_threads.size());
339 }
340 
341 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
342   // locker will keep a mutex locked until it goes out of scope
343   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
344   if (m_current_thread.get() == NULL) {
345     // Figure out which thread is going to be our current thread.
346     // This is currently done by finding the first thread in the list
347     // that has a valid exception.
348     const size_t num_threads = m_threads.size();
349     for (uint32_t idx = 0; idx < num_threads; ++idx) {
350       if (m_threads[idx]->GetStopException().IsValid()) {
351         m_current_thread = m_threads[idx];
352         break;
353       }
354     }
355   }
356   thread_sp = m_current_thread;
357 }
358 
359 void MachThreadList::Dump() const {
360   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
361   const size_t num_threads = m_threads.size();
362   for (uint32_t idx = 0; idx < num_threads; ++idx) {
363     m_threads[idx]->Dump(idx);
364   }
365 }
366 
367 void MachThreadList::ProcessWillResume(
368     MachProcess *process, const DNBThreadResumeActions &thread_actions) {
369   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
370 
371   // Update our thread list, because sometimes libdispatch or the kernel
372   // will spawn threads while a task is suspended.
373   MachThreadList::collection new_threads;
374 
375   // First figure out if we were planning on running only one thread, and if so
376   // force that thread to resume.
377   bool run_one_thread;
378   nub_thread_t solo_thread = INVALID_NUB_THREAD;
379   if (thread_actions.GetSize() > 0 &&
380       thread_actions.NumActionsWithState(eStateStepping) +
381               thread_actions.NumActionsWithState(eStateRunning) ==
382           1) {
383     run_one_thread = true;
384     const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
385     size_t num_actions = thread_actions.GetSize();
386     for (size_t i = 0; i < num_actions; i++, action_ptr++) {
387       if (action_ptr->state == eStateStepping ||
388           action_ptr->state == eStateRunning) {
389         solo_thread = action_ptr->tid;
390         break;
391       }
392     }
393   } else
394     run_one_thread = false;
395 
396   UpdateThreadList(process, true, &new_threads);
397 
398   DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
399                                               INVALID_NUB_ADDRESS};
400   // If we are planning to run only one thread, any new threads should be
401   // suspended.
402   if (run_one_thread)
403     resume_new_threads.state = eStateSuspended;
404 
405   const size_t num_new_threads = new_threads.size();
406   const size_t num_threads = m_threads.size();
407   for (uint32_t idx = 0; idx < num_threads; ++idx) {
408     MachThread *thread = m_threads[idx].get();
409     bool handled = false;
410     for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
411       if (thread == new_threads[new_idx].get()) {
412         thread->ThreadWillResume(&resume_new_threads);
413         handled = true;
414         break;
415       }
416     }
417 
418     if (!handled) {
419       const DNBThreadResumeAction *thread_action =
420           thread_actions.GetActionForThread(thread->ThreadID(), true);
421       // There must always be a thread action for every thread.
422       assert(thread_action);
423       bool others_stopped = false;
424       if (solo_thread == thread->ThreadID())
425         others_stopped = true;
426       thread->ThreadWillResume(thread_action, others_stopped);
427     }
428   }
429 
430   if (new_threads.size()) {
431     for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
432       DNBLogThreadedIf(
433           LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
434                       "stop-id=%u, resuming newly discovered thread: "
435                       "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
436           process->ProcessID(), process->StopCount(),
437           new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
438     }
439   }
440 }
441 
442 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
443   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
444   // Update our thread list
445   const uint32_t num_threads = UpdateThreadList(process, true);
446   for (uint32_t idx = 0; idx < num_threads; ++idx) {
447     m_threads[idx]->ThreadDidStop();
448   }
449   return num_threads;
450 }
451 
452 //----------------------------------------------------------------------
453 // Check each thread in our thread list to see if we should notify our
454 // client of the current halt in execution.
455 //
456 // Breakpoints can have callback functions associated with them than
457 // can return true to stop, or false to continue executing the inferior.
458 //
459 // RETURNS
460 //    true if we should stop and notify our clients
461 //    false if we should resume our child process and skip notification
462 //----------------------------------------------------------------------
463 bool MachThreadList::ShouldStop(bool &step_more) {
464   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
465   uint32_t should_stop = false;
466   const size_t num_threads = m_threads.size();
467   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
468     should_stop = m_threads[idx]->ShouldStop(step_more);
469   }
470   return should_stop;
471 }
472 
473 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
474   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
475   const size_t num_threads = m_threads.size();
476   for (uint32_t idx = 0; idx < num_threads; ++idx) {
477     m_threads[idx]->NotifyBreakpointChanged(bp);
478   }
479 }
480 
481 uint32_t
482 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
483   if (bp != NULL) {
484     const size_t num_threads = m_threads.size();
485     for (uint32_t idx = 0; idx < num_threads; ++idx)
486       m_threads[idx]->EnableHardwareBreakpoint(bp);
487   }
488   return INVALID_NUB_HW_INDEX;
489 }
490 
491 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
492   if (bp != NULL) {
493     const size_t num_threads = m_threads.size();
494     for (uint32_t idx = 0; idx < num_threads; ++idx)
495       m_threads[idx]->DisableHardwareBreakpoint(bp);
496   }
497   return false;
498 }
499 
500 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
501 // MachProcess::EnableWatchpoint()
502 // -> MachThreadList::EnableHardwareWatchpoint().
503 uint32_t
504 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
505   uint32_t hw_index = INVALID_NUB_HW_INDEX;
506   if (wp != NULL) {
507     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
508     const size_t num_threads = m_threads.size();
509     // On Mac OS X we have to prime the control registers for new threads.  We
510     // do this
511     // using the control register data for the first thread, for lack of a
512     // better way of choosing.
513     bool also_set_on_task = true;
514     for (uint32_t idx = 0; idx < num_threads; ++idx) {
515       if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(
516                wp, also_set_on_task)) == INVALID_NUB_HW_INDEX) {
517         // We know that idx failed for some reason.  Let's rollback the
518         // transaction for [0, idx).
519         for (uint32_t i = 0; i < idx; ++i)
520           m_threads[i]->RollbackTransForHWP();
521         return INVALID_NUB_HW_INDEX;
522       }
523       also_set_on_task = false;
524     }
525     // Notify each thread to commit the pending transaction.
526     for (uint32_t idx = 0; idx < num_threads; ++idx)
527       m_threads[idx]->FinishTransForHWP();
528   }
529   return hw_index;
530 }
531 
532 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
533   if (wp != NULL) {
534     PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
535     const size_t num_threads = m_threads.size();
536 
537     // On Mac OS X we have to prime the control registers for new threads.  We
538     // do this
539     // using the control register data for the first thread, for lack of a
540     // better way of choosing.
541     bool also_set_on_task = true;
542     for (uint32_t idx = 0; idx < num_threads; ++idx) {
543       if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task)) {
544         // We know that idx failed for some reason.  Let's rollback the
545         // transaction for [0, idx).
546         for (uint32_t i = 0; i < idx; ++i)
547           m_threads[i]->RollbackTransForHWP();
548         return false;
549       }
550       also_set_on_task = false;
551     }
552     // Notify each thread to commit the pending transaction.
553     for (uint32_t idx = 0; idx < num_threads; ++idx)
554       m_threads[idx]->FinishTransForHWP();
555 
556     return true;
557   }
558   return false;
559 }
560 
561 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
562   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
563   const size_t num_threads = m_threads.size();
564   // Use an arbitrary thread to retrieve the number of supported hardware
565   // watchpoints.
566   if (num_threads)
567     return m_threads[0]->NumSupportedHardwareWatchpoints();
568   return 0;
569 }
570 
571 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
572     const int signo) const {
573   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
574   uint32_t should_stop = false;
575   const size_t num_threads = m_threads.size();
576   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
577     if (m_threads[idx]->GetStopException().SoftSignal() == signo)
578       return idx;
579   }
580   return UINT32_MAX;
581 }
582