1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/19/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachThreadList.h"
15 
16 #include <inttypes.h>
17 #include <sys/sysctl.h>
18 
19 #include "DNBLog.h"
20 #include "DNBThreadResumeActions.h"
21 #include "MachProcess.h"
22 
23 #include "llvm/ADT/STLExtras.h"
24 
25 MachThreadList::MachThreadList() :
26     m_threads(),
27     m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
28     m_is_64_bit(false)
29 {
30 }
31 
32 MachThreadList::~MachThreadList()
33 {
34 }
35 
36 nub_state_t
37 MachThreadList::GetState(nub_thread_t tid)
38 {
39     MachThreadSP thread_sp (GetThreadByID (tid));
40     if (thread_sp)
41         return thread_sp->GetState();
42     return eStateInvalid;
43 }
44 
45 const char *
46 MachThreadList::GetName (nub_thread_t tid)
47 {
48     MachThreadSP thread_sp (GetThreadByID (tid));
49     if (thread_sp)
50         return thread_sp->GetName();
51     return NULL;
52 }
53 
54 ThreadInfo::QoS
55 MachThreadList::GetRequestedQoS (nub_thread_t tid, nub_addr_t tsd, uint64_t dti_qos_class_index)
56 {
57     MachThreadSP thread_sp (GetThreadByID (tid));
58     if (thread_sp)
59         return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
60     return ThreadInfo::QoS();
61 }
62 
63 nub_addr_t
64 MachThreadList::GetPThreadT (nub_thread_t tid)
65 {
66     MachThreadSP thread_sp (GetThreadByID (tid));
67     if (thread_sp)
68         return thread_sp->GetPThreadT();
69     return INVALID_NUB_ADDRESS;
70 }
71 
72 nub_addr_t
73 MachThreadList::GetDispatchQueueT (nub_thread_t tid)
74 {
75     MachThreadSP thread_sp (GetThreadByID (tid));
76     if (thread_sp)
77         return thread_sp->GetDispatchQueueT();
78     return INVALID_NUB_ADDRESS;
79 }
80 
81 nub_addr_t
82 MachThreadList::GetTSDAddressForThread (nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset, uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size)
83 {
84     MachThreadSP thread_sp (GetThreadByID (tid));
85     if (thread_sp)
86         return thread_sp->GetTSDAddressForThread(plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset, plo_pthread_tsd_entry_size);
87     return INVALID_NUB_ADDRESS;
88 }
89 
90 nub_thread_t
91 MachThreadList::SetCurrentThread(nub_thread_t tid)
92 {
93     MachThreadSP thread_sp (GetThreadByID (tid));
94     if (thread_sp)
95     {
96         m_current_thread = thread_sp;
97         return tid;
98     }
99     return INVALID_NUB_THREAD;
100 }
101 
102 
103 bool
104 MachThreadList::GetThreadStoppedReason(nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const
105 {
106     MachThreadSP thread_sp (GetThreadByID (tid));
107     if (thread_sp)
108         return thread_sp->GetStopException().GetStopInfo(stop_info);
109     return false;
110 }
111 
112 bool
113 MachThreadList::GetIdentifierInfo (nub_thread_t tid, thread_identifier_info_data_t *ident_info)
114 {
115     thread_t mach_port_number = GetMachPortNumberByThreadID (tid);
116 
117     mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
118     return ::thread_info (mach_port_number, THREAD_IDENTIFIER_INFO, (thread_info_t)ident_info, &count) == KERN_SUCCESS;
119 }
120 
121 void
122 MachThreadList::DumpThreadStoppedReason (nub_thread_t tid) const
123 {
124     MachThreadSP thread_sp (GetThreadByID (tid));
125     if (thread_sp)
126         thread_sp->GetStopException().DumpStopReason();
127 }
128 
129 const char *
130 MachThreadList::GetThreadInfo (nub_thread_t tid) const
131 {
132     MachThreadSP thread_sp (GetThreadByID (tid));
133     if (thread_sp)
134         return thread_sp->GetBasicInfoAsString();
135     return NULL;
136 }
137 
138 MachThreadSP
139 MachThreadList::GetThreadByID (nub_thread_t tid) const
140 {
141     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
142     MachThreadSP thread_sp;
143     const size_t num_threads = m_threads.size();
144     for (size_t idx = 0; idx < num_threads; ++idx)
145     {
146         if (m_threads[idx]->ThreadID() == tid)
147         {
148             thread_sp = m_threads[idx];
149             break;
150         }
151     }
152     return thread_sp;
153 }
154 
155 MachThreadSP
156 MachThreadList::GetThreadByMachPortNumber (thread_t mach_port_number) const
157 {
158     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
159     MachThreadSP thread_sp;
160     const size_t num_threads = m_threads.size();
161     for (size_t idx = 0; idx < num_threads; ++idx)
162     {
163         if (m_threads[idx]->MachPortNumber() == mach_port_number)
164         {
165             thread_sp = m_threads[idx];
166             break;
167         }
168     }
169     return thread_sp;
170 }
171 
172 nub_thread_t
173 MachThreadList::GetThreadIDByMachPortNumber (thread_t mach_port_number) const
174 {
175     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
176     MachThreadSP thread_sp;
177     const size_t num_threads = m_threads.size();
178     for (size_t idx = 0; idx < num_threads; ++idx)
179     {
180         if (m_threads[idx]->MachPortNumber() == mach_port_number)
181         {
182             return m_threads[idx]->ThreadID();
183         }
184     }
185     return INVALID_NUB_THREAD;
186 }
187 
188 thread_t
189 MachThreadList::GetMachPortNumberByThreadID (nub_thread_t globally_unique_id) const
190 {
191     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
192     MachThreadSP thread_sp;
193     const size_t num_threads = m_threads.size();
194     for (size_t idx = 0; idx < num_threads; ++idx)
195     {
196         if (m_threads[idx]->ThreadID() == globally_unique_id)
197         {
198             return m_threads[idx]->MachPortNumber();
199         }
200     }
201     return 0;
202 }
203 
204 bool
205 MachThreadList::GetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, DNBRegisterValue *reg_value ) const
206 {
207     MachThreadSP thread_sp (GetThreadByID (tid));
208     if (thread_sp)
209         return thread_sp->GetRegisterValue(reg_set_idx, reg_idx, reg_value);
210 
211     return false;
212 }
213 
214 bool
215 MachThreadList::SetRegisterValue (nub_thread_t tid, uint32_t reg_set_idx, uint32_t reg_idx, const DNBRegisterValue *reg_value ) const
216 {
217     MachThreadSP thread_sp (GetThreadByID (tid));
218     if (thread_sp)
219         return thread_sp->SetRegisterValue(reg_set_idx, reg_idx, reg_value);
220 
221     return false;
222 }
223 
224 nub_size_t
225 MachThreadList::GetRegisterContext (nub_thread_t tid, void *buf, size_t buf_len)
226 {
227     MachThreadSP thread_sp (GetThreadByID (tid));
228     if (thread_sp)
229         return thread_sp->GetRegisterContext (buf, buf_len);
230     return 0;
231 }
232 
233 nub_size_t
234 MachThreadList::SetRegisterContext (nub_thread_t tid, const void *buf, size_t buf_len)
235 {
236     MachThreadSP thread_sp (GetThreadByID (tid));
237     if (thread_sp)
238         return thread_sp->SetRegisterContext (buf, buf_len);
239     return 0;
240 }
241 
242 uint32_t
243 MachThreadList::SaveRegisterState (nub_thread_t tid)
244 {
245     MachThreadSP thread_sp (GetThreadByID (tid));
246     if (thread_sp)
247         return thread_sp->SaveRegisterState ();
248     return 0;
249 }
250 
251 bool
252 MachThreadList::RestoreRegisterState (nub_thread_t tid, uint32_t save_id)
253 {
254     MachThreadSP thread_sp (GetThreadByID (tid));
255     if (thread_sp)
256         return thread_sp->RestoreRegisterState (save_id);
257     return 0;
258 }
259 
260 
261 nub_size_t
262 MachThreadList::NumThreads () const
263 {
264     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
265     return m_threads.size();
266 }
267 
268 nub_thread_t
269 MachThreadList::ThreadIDAtIndex (nub_size_t idx) const
270 {
271     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
272     if (idx < m_threads.size())
273         return m_threads[idx]->ThreadID();
274     return INVALID_NUB_THREAD;
275 }
276 
277 nub_thread_t
278 MachThreadList::CurrentThreadID ( )
279 {
280     MachThreadSP thread_sp;
281     CurrentThread(thread_sp);
282     if (thread_sp.get())
283         return thread_sp->ThreadID();
284     return INVALID_NUB_THREAD;
285 }
286 
287 bool
288 MachThreadList::NotifyException(MachException::Data& exc)
289 {
290     MachThreadSP thread_sp (GetThreadByMachPortNumber (exc.thread_port));
291     if (thread_sp)
292     {
293         thread_sp->NotifyException(exc);
294         return true;
295     }
296     return false;
297 }
298 
299 void
300 MachThreadList::Clear()
301 {
302     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
303     m_threads.clear();
304 }
305 
306 uint32_t
307 MachThreadList::UpdateThreadList(MachProcess *process, bool update, MachThreadList::collection *new_threads)
308 {
309     // locker will keep a mutex locked until it goes out of scope
310     DNBLogThreadedIf (LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, update = %u) process stop count = %u", process->ProcessID(), update, process->StopCount());
311     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
312 
313     if (process->StopCount() == 0)
314     {
315         int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID() };
316         struct kinfo_proc processInfo;
317         size_t bufsize = sizeof(processInfo);
318         if (sysctl(mib, llvm::array_lengthof(mib), &processInfo, &bufsize, NULL, 0) == 0 && bufsize > 0)
319         {
320             if (processInfo.kp_proc.p_flag & P_LP64)
321                 m_is_64_bit = true;
322         }
323 #if defined (__i386__) || defined (__x86_64__)
324         if (m_is_64_bit)
325             DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
326         else
327             DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
328 #elif defined (__arm__) || defined (__arm64__)
329         if (m_is_64_bit)
330             DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
331         else
332             DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
333 #endif
334     }
335 
336     if (m_threads.empty() || update)
337     {
338         thread_array_t thread_list = NULL;
339         mach_msg_type_number_t thread_list_count = 0;
340         task_t task = process->Task().TaskPort();
341         DNBError err(::task_threads (task, &thread_list, &thread_list_count), DNBError::MachKernel);
342 
343         if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
344             err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, thread_list_count => %u )", task, thread_list, thread_list_count);
345 
346         if (err.Error() == KERN_SUCCESS && thread_list_count > 0)
347         {
348             MachThreadList::collection currThreads;
349             size_t idx;
350             // Iterator through the current thread list and see which threads
351             // we already have in our list (keep them), which ones we don't
352             // (add them), and which ones are not around anymore (remove them).
353             for (idx = 0; idx < thread_list_count; ++idx)
354             {
355                 const thread_t mach_port_num = thread_list[idx];
356 
357                 uint64_t unique_thread_id = MachThread::GetGloballyUniqueThreadIDForMachPortID (mach_port_num);
358                 MachThreadSP thread_sp (GetThreadByID (unique_thread_id));
359                 if (thread_sp)
360                 {
361                     // Keep the existing thread class
362                     currThreads.push_back(thread_sp);
363                 }
364                 else
365                 {
366                     // We don't have this thread, lets add it.
367                     thread_sp.reset(new MachThread(process, m_is_64_bit, unique_thread_id, mach_port_num));
368 
369                     // Add the new thread regardless of its is user ready state...
370                     // Make sure the thread is ready to be displayed and shown to users
371                     // before we add this thread to our list...
372                     if (thread_sp->IsUserReady())
373                     {
374                         if (new_threads)
375                             new_threads->push_back(thread_sp);
376 
377                         currThreads.push_back(thread_sp);
378                     }
379                 }
380             }
381 
382             m_threads.swap(currThreads);
383             m_current_thread.reset();
384 
385             // Free the vm memory given to us by ::task_threads()
386             vm_size_t thread_list_size = (vm_size_t) (thread_list_count * sizeof (thread_t));
387             ::vm_deallocate (::mach_task_self(),
388                              (vm_address_t)thread_list,
389                              thread_list_size);
390         }
391     }
392     return m_threads.size();
393 }
394 
395 
396 void
397 MachThreadList::CurrentThread (MachThreadSP& thread_sp)
398 {
399     // locker will keep a mutex locked until it goes out of scope
400     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
401     if (m_current_thread.get() == NULL)
402     {
403         // Figure out which thread is going to be our current thread.
404         // This is currently done by finding the first thread in the list
405         // that has a valid exception.
406         const uint32_t num_threads = m_threads.size();
407         for (uint32_t idx = 0; idx < num_threads; ++idx)
408         {
409             if (m_threads[idx]->GetStopException().IsValid())
410             {
411                 m_current_thread = m_threads[idx];
412                 break;
413             }
414         }
415     }
416     thread_sp = m_current_thread;
417 }
418 
419 void
420 MachThreadList::Dump() const
421 {
422     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
423     const uint32_t num_threads = m_threads.size();
424     for (uint32_t idx = 0; idx < num_threads; ++idx)
425     {
426         m_threads[idx]->Dump(idx);
427     }
428 }
429 
430 
431 void
432 MachThreadList::ProcessWillResume(MachProcess *process, const DNBThreadResumeActions &thread_actions)
433 {
434     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
435 
436     // Update our thread list, because sometimes libdispatch or the kernel
437     // will spawn threads while a task is suspended.
438     MachThreadList::collection new_threads;
439 
440     // First figure out if we were planning on running only one thread, and if so force that thread to resume.
441     bool run_one_thread;
442     nub_thread_t solo_thread = INVALID_NUB_THREAD;
443     if (thread_actions.GetSize() > 0
444         && thread_actions.NumActionsWithState(eStateStepping) + thread_actions.NumActionsWithState (eStateRunning) == 1)
445     {
446         run_one_thread = true;
447         const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
448         size_t num_actions = thread_actions.GetSize();
449         for (size_t i = 0; i < num_actions; i++, action_ptr++)
450         {
451             if (action_ptr->state == eStateStepping || action_ptr->state == eStateRunning)
452             {
453                 solo_thread = action_ptr->tid;
454                 break;
455             }
456         }
457     }
458     else
459         run_one_thread = false;
460 
461     UpdateThreadList(process, true, &new_threads);
462 
463     DNBThreadResumeAction resume_new_threads = { -1U, eStateRunning, 0, INVALID_NUB_ADDRESS };
464     // If we are planning to run only one thread, any new threads should be suspended.
465     if (run_one_thread)
466         resume_new_threads.state = eStateSuspended;
467 
468     const uint32_t num_new_threads = new_threads.size();
469     const uint32_t num_threads = m_threads.size();
470     for (uint32_t idx = 0; idx < num_threads; ++idx)
471     {
472         MachThread *thread = m_threads[idx].get();
473         bool handled = false;
474         for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx)
475         {
476             if (thread == new_threads[new_idx].get())
477             {
478                 thread->ThreadWillResume(&resume_new_threads);
479                 handled = true;
480                 break;
481             }
482         }
483 
484         if (!handled)
485         {
486             const DNBThreadResumeAction *thread_action = thread_actions.GetActionForThread (thread->ThreadID(), true);
487             // There must always be a thread action for every thread.
488             assert (thread_action);
489             bool others_stopped = false;
490             if (solo_thread == thread->ThreadID())
491                 others_stopped = true;
492             thread->ThreadWillResume (thread_action, others_stopped);
493         }
494     }
495 
496     if (new_threads.size())
497     {
498         for (uint32_t idx = 0; idx < num_new_threads; ++idx)
499         {
500             DNBLogThreadedIf (LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) stop-id=%u, resuming newly discovered thread: 0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
501                               process->ProcessID(),
502                               process->StopCount(),
503                               new_threads[idx]->ThreadID(),
504                               new_threads[idx]->IsUserReady());
505         }
506     }
507 }
508 
509 uint32_t
510 MachThreadList::ProcessDidStop(MachProcess *process)
511 {
512     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
513     // Update our thread list
514     const uint32_t num_threads = UpdateThreadList(process, true);
515     for (uint32_t idx = 0; idx < num_threads; ++idx)
516     {
517         m_threads[idx]->ThreadDidStop();
518     }
519     return num_threads;
520 }
521 
522 //----------------------------------------------------------------------
523 // Check each thread in our thread list to see if we should notify our
524 // client of the current halt in execution.
525 //
526 // Breakpoints can have callback functions associated with them than
527 // can return true to stop, or false to continue executing the inferior.
528 //
529 // RETURNS
530 //    true if we should stop and notify our clients
531 //    false if we should resume our child process and skip notification
532 //----------------------------------------------------------------------
533 bool
534 MachThreadList::ShouldStop(bool &step_more)
535 {
536     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
537     uint32_t should_stop = false;
538     const uint32_t num_threads = m_threads.size();
539     for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
540     {
541         should_stop = m_threads[idx]->ShouldStop(step_more);
542     }
543     return should_stop;
544 }
545 
546 
547 void
548 MachThreadList::NotifyBreakpointChanged (const DNBBreakpoint *bp)
549 {
550     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
551     const uint32_t num_threads = m_threads.size();
552     for (uint32_t idx = 0; idx < num_threads; ++idx)
553     {
554         m_threads[idx]->NotifyBreakpointChanged(bp);
555     }
556 }
557 
558 
559 uint32_t
560 MachThreadList::EnableHardwareBreakpoint (const DNBBreakpoint* bp) const
561 {
562     if (bp != NULL)
563     {
564         const uint32_t num_threads = m_threads.size();
565         for (uint32_t idx = 0; idx < num_threads; ++idx)
566             m_threads[idx]->EnableHardwareBreakpoint(bp);
567     }
568     return INVALID_NUB_HW_INDEX;
569 }
570 
571 bool
572 MachThreadList::DisableHardwareBreakpoint (const DNBBreakpoint* bp) const
573 {
574     if (bp != NULL)
575     {
576         const uint32_t num_threads = m_threads.size();
577         for (uint32_t idx = 0; idx < num_threads; ++idx)
578             m_threads[idx]->DisableHardwareBreakpoint(bp);
579     }
580     return false;
581 }
582 
583 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() -> MachProcess::EnableWatchpoint()
584 // -> MachThreadList::EnableHardwareWatchpoint().
585 uint32_t
586 MachThreadList::EnableHardwareWatchpoint (const DNBBreakpoint* wp) const
587 {
588     uint32_t hw_index = INVALID_NUB_HW_INDEX;
589     if (wp != NULL)
590     {
591         PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
592         const uint32_t num_threads = m_threads.size();
593         // On Mac OS X we have to prime the control registers for new threads.  We do this
594         // using the control register data for the first thread, for lack of a better way of choosing.
595         bool also_set_on_task = true;
596         for (uint32_t idx = 0; idx < num_threads; ++idx)
597         {
598             if ((hw_index = m_threads[idx]->EnableHardwareWatchpoint(wp, also_set_on_task)) == INVALID_NUB_HW_INDEX)
599             {
600                 // We know that idx failed for some reason.  Let's rollback the transaction for [0, idx).
601                 for (uint32_t i = 0; i < idx; ++i)
602                     m_threads[i]->RollbackTransForHWP();
603                 return INVALID_NUB_HW_INDEX;
604             }
605             also_set_on_task = false;
606         }
607         // Notify each thread to commit the pending transaction.
608         for (uint32_t idx = 0; idx < num_threads; ++idx)
609             m_threads[idx]->FinishTransForHWP();
610 
611     }
612     return hw_index;
613 }
614 
615 bool
616 MachThreadList::DisableHardwareWatchpoint (const DNBBreakpoint* wp) const
617 {
618     if (wp != NULL)
619     {
620         PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
621         const uint32_t num_threads = m_threads.size();
622 
623         // On Mac OS X we have to prime the control registers for new threads.  We do this
624         // using the control register data for the first thread, for lack of a better way of choosing.
625         bool also_set_on_task = true;
626         for (uint32_t idx = 0; idx < num_threads; ++idx)
627         {
628             if (!m_threads[idx]->DisableHardwareWatchpoint(wp, also_set_on_task))
629             {
630                 // We know that idx failed for some reason.  Let's rollback the transaction for [0, idx).
631                 for (uint32_t i = 0; i < idx; ++i)
632                     m_threads[i]->RollbackTransForHWP();
633                 return false;
634             }
635             also_set_on_task = false;
636         }
637         // Notify each thread to commit the pending transaction.
638         for (uint32_t idx = 0; idx < num_threads; ++idx)
639             m_threads[idx]->FinishTransForHWP();
640 
641         return true;
642     }
643     return false;
644 }
645 
646 uint32_t
647 MachThreadList::NumSupportedHardwareWatchpoints () const
648 {
649     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
650     const uint32_t num_threads = m_threads.size();
651     // Use an arbitrary thread to retrieve the number of supported hardware watchpoints.
652     if (num_threads)
653         return m_threads[0]->NumSupportedHardwareWatchpoints();
654     return 0;
655 }
656 
657 uint32_t
658 MachThreadList::GetThreadIndexForThreadStoppedWithSignal (const int signo) const
659 {
660     PTHREAD_MUTEX_LOCKER (locker, m_threads_mutex);
661     uint32_t should_stop = false;
662     const uint32_t num_threads = m_threads.size();
663     for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx)
664     {
665         if (m_threads[idx]->GetStopException().SoftSignal () == signo)
666             return idx;
667     }
668     return UINT32_MAX;
669 }
670 
671