1 //===-- ThreadList.cpp ------------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include <stdlib.h>
11
12 #include <algorithm>
13
14 #include "lldb/Target/Process.h"
15 #include "lldb/Target/RegisterContext.h"
16 #include "lldb/Target/Thread.h"
17 #include "lldb/Target/ThreadList.h"
18 #include "lldb/Target/ThreadPlan.h"
19 #include "lldb/Utility/LLDBAssert.h"
20 #include "lldb/Utility/Log.h"
21 #include "lldb/Utility/State.h"
22
23 using namespace lldb;
24 using namespace lldb_private;
25
ThreadList(Process * process)26 ThreadList::ThreadList(Process *process)
27 : ThreadCollection(), m_process(process), m_stop_id(0),
28 m_selected_tid(LLDB_INVALID_THREAD_ID) {}
29
ThreadList(const ThreadList & rhs)30 ThreadList::ThreadList(const ThreadList &rhs)
31 : ThreadCollection(), m_process(rhs.m_process), m_stop_id(rhs.m_stop_id),
32 m_selected_tid() {
33 // Use the assignment operator since it uses the mutex
34 *this = rhs;
35 }
36
operator =(const ThreadList & rhs)37 const ThreadList &ThreadList::operator=(const ThreadList &rhs) {
38 if (this != &rhs) {
39 // Lock both mutexes to make sure neither side changes anyone on us while
40 // the assignment occurs
41 std::lock_guard<std::recursive_mutex> guard(GetMutex());
42 std::lock_guard<std::recursive_mutex> rhs_guard(rhs.GetMutex());
43
44 m_process = rhs.m_process;
45 m_stop_id = rhs.m_stop_id;
46 m_threads = rhs.m_threads;
47 m_selected_tid = rhs.m_selected_tid;
48 }
49 return *this;
50 }
51
~ThreadList()52 ThreadList::~ThreadList() {
53 // Clear the thread list. Clear will take the mutex lock which will ensure
54 // that if anyone is using the list they won't get it removed while using it.
55 Clear();
56 }
57
GetExpressionExecutionThread()58 lldb::ThreadSP ThreadList::GetExpressionExecutionThread() {
59 if (m_expression_tid_stack.empty())
60 return GetSelectedThread();
61 ThreadSP expr_thread_sp = FindThreadByID(m_expression_tid_stack.back());
62 if (expr_thread_sp)
63 return expr_thread_sp;
64 else
65 return GetSelectedThread();
66 }
67
PushExpressionExecutionThread(lldb::tid_t tid)68 void ThreadList::PushExpressionExecutionThread(lldb::tid_t tid) {
69 m_expression_tid_stack.push_back(tid);
70 }
71
PopExpressionExecutionThread(lldb::tid_t tid)72 void ThreadList::PopExpressionExecutionThread(lldb::tid_t tid) {
73 assert(m_expression_tid_stack.back() == tid);
74 m_expression_tid_stack.pop_back();
75 }
76
GetStopID() const77 uint32_t ThreadList::GetStopID() const { return m_stop_id; }
78
SetStopID(uint32_t stop_id)79 void ThreadList::SetStopID(uint32_t stop_id) { m_stop_id = stop_id; }
80
GetSize(bool can_update)81 uint32_t ThreadList::GetSize(bool can_update) {
82 std::lock_guard<std::recursive_mutex> guard(GetMutex());
83
84 if (can_update)
85 m_process->UpdateThreadListIfNeeded();
86 return m_threads.size();
87 }
88
GetThreadAtIndex(uint32_t idx,bool can_update)89 ThreadSP ThreadList::GetThreadAtIndex(uint32_t idx, bool can_update) {
90 std::lock_guard<std::recursive_mutex> guard(GetMutex());
91
92 if (can_update)
93 m_process->UpdateThreadListIfNeeded();
94
95 ThreadSP thread_sp;
96 if (idx < m_threads.size())
97 thread_sp = m_threads[idx];
98 return thread_sp;
99 }
100
FindThreadByID(lldb::tid_t tid,bool can_update)101 ThreadSP ThreadList::FindThreadByID(lldb::tid_t tid, bool can_update) {
102 std::lock_guard<std::recursive_mutex> guard(GetMutex());
103
104 if (can_update)
105 m_process->UpdateThreadListIfNeeded();
106
107 ThreadSP thread_sp;
108 uint32_t idx = 0;
109 const uint32_t num_threads = m_threads.size();
110 for (idx = 0; idx < num_threads; ++idx) {
111 if (m_threads[idx]->GetID() == tid) {
112 thread_sp = m_threads[idx];
113 break;
114 }
115 }
116 return thread_sp;
117 }
118
FindThreadByProtocolID(lldb::tid_t tid,bool can_update)119 ThreadSP ThreadList::FindThreadByProtocolID(lldb::tid_t tid, bool can_update) {
120 std::lock_guard<std::recursive_mutex> guard(GetMutex());
121
122 if (can_update)
123 m_process->UpdateThreadListIfNeeded();
124
125 ThreadSP thread_sp;
126 uint32_t idx = 0;
127 const uint32_t num_threads = m_threads.size();
128 for (idx = 0; idx < num_threads; ++idx) {
129 if (m_threads[idx]->GetProtocolID() == tid) {
130 thread_sp = m_threads[idx];
131 break;
132 }
133 }
134 return thread_sp;
135 }
136
RemoveThreadByID(lldb::tid_t tid,bool can_update)137 ThreadSP ThreadList::RemoveThreadByID(lldb::tid_t tid, bool can_update) {
138 std::lock_guard<std::recursive_mutex> guard(GetMutex());
139
140 if (can_update)
141 m_process->UpdateThreadListIfNeeded();
142
143 ThreadSP thread_sp;
144 uint32_t idx = 0;
145 const uint32_t num_threads = m_threads.size();
146 for (idx = 0; idx < num_threads; ++idx) {
147 if (m_threads[idx]->GetID() == tid) {
148 thread_sp = m_threads[idx];
149 m_threads.erase(m_threads.begin() + idx);
150 break;
151 }
152 }
153 return thread_sp;
154 }
155
RemoveThreadByProtocolID(lldb::tid_t tid,bool can_update)156 ThreadSP ThreadList::RemoveThreadByProtocolID(lldb::tid_t tid,
157 bool can_update) {
158 std::lock_guard<std::recursive_mutex> guard(GetMutex());
159
160 if (can_update)
161 m_process->UpdateThreadListIfNeeded();
162
163 ThreadSP thread_sp;
164 uint32_t idx = 0;
165 const uint32_t num_threads = m_threads.size();
166 for (idx = 0; idx < num_threads; ++idx) {
167 if (m_threads[idx]->GetProtocolID() == tid) {
168 thread_sp = m_threads[idx];
169 m_threads.erase(m_threads.begin() + idx);
170 break;
171 }
172 }
173 return thread_sp;
174 }
175
GetThreadSPForThreadPtr(Thread * thread_ptr)176 ThreadSP ThreadList::GetThreadSPForThreadPtr(Thread *thread_ptr) {
177 ThreadSP thread_sp;
178 if (thread_ptr) {
179 std::lock_guard<std::recursive_mutex> guard(GetMutex());
180
181 uint32_t idx = 0;
182 const uint32_t num_threads = m_threads.size();
183 for (idx = 0; idx < num_threads; ++idx) {
184 if (m_threads[idx].get() == thread_ptr) {
185 thread_sp = m_threads[idx];
186 break;
187 }
188 }
189 }
190 return thread_sp;
191 }
192
GetBackingThread(const ThreadSP & real_thread)193 ThreadSP ThreadList::GetBackingThread(const ThreadSP &real_thread) {
194 std::lock_guard<std::recursive_mutex> guard(GetMutex());
195
196 ThreadSP thread_sp;
197 const uint32_t num_threads = m_threads.size();
198 for (uint32_t idx = 0; idx < num_threads; ++idx) {
199 if (m_threads[idx]->GetBackingThread() == real_thread) {
200 thread_sp = m_threads[idx];
201 break;
202 }
203 }
204 return thread_sp;
205 }
206
FindThreadByIndexID(uint32_t index_id,bool can_update)207 ThreadSP ThreadList::FindThreadByIndexID(uint32_t index_id, bool can_update) {
208 std::lock_guard<std::recursive_mutex> guard(GetMutex());
209
210 if (can_update)
211 m_process->UpdateThreadListIfNeeded();
212
213 ThreadSP thread_sp;
214 const uint32_t num_threads = m_threads.size();
215 for (uint32_t idx = 0; idx < num_threads; ++idx) {
216 if (m_threads[idx]->GetIndexID() == index_id) {
217 thread_sp = m_threads[idx];
218 break;
219 }
220 }
221 return thread_sp;
222 }
223
ShouldStop(Event * event_ptr)224 bool ThreadList::ShouldStop(Event *event_ptr) {
225 // Running events should never stop, obviously...
226
227 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
228
229 // The ShouldStop method of the threads can do a whole lot of work, figuring
230 // out whether the thread plan conditions are met. So we don't want to keep
231 // the ThreadList locked the whole time we are doing this.
232 // FIXME: It is possible that running code could cause new threads
233 // to be created. If that happens, we will miss asking them whether they
234 // should stop. This is not a big deal since we haven't had a chance to hang
235 // any interesting operations on those threads yet.
236
237 collection threads_copy;
238 {
239 // Scope for locker
240 std::lock_guard<std::recursive_mutex> guard(GetMutex());
241
242 m_process->UpdateThreadListIfNeeded();
243 for (lldb::ThreadSP thread_sp : m_threads) {
244 // This is an optimization... If we didn't let a thread run in between
245 // the previous stop and this one, we shouldn't have to consult it for
246 // ShouldStop. So just leave it off the list we are going to inspect. On
247 // Linux, if a thread-specific conditional breakpoint was hit, it won't
248 // necessarily be the thread that hit the breakpoint itself that
249 // evaluates the conditional expression, so the thread that hit the
250 // breakpoint could still be asked to stop, even though it hasn't been
251 // allowed to run since the previous stop.
252 if (thread_sp->GetTemporaryResumeState() != eStateSuspended ||
253 thread_sp->IsStillAtLastBreakpointHit())
254 threads_copy.push_back(thread_sp);
255 }
256
257 // It is possible the threads we were allowing to run all exited and then
258 // maybe the user interrupted or something, then fall back on looking at
259 // all threads:
260
261 if (threads_copy.size() == 0)
262 threads_copy = m_threads;
263 }
264
265 collection::iterator pos, end = threads_copy.end();
266
267 if (log) {
268 log->PutCString("");
269 log->Printf("ThreadList::%s: %" PRIu64 " threads, %" PRIu64
270 " unsuspended threads",
271 __FUNCTION__, (uint64_t)m_threads.size(),
272 (uint64_t)threads_copy.size());
273 }
274
275 bool did_anybody_stop_for_a_reason = false;
276
277 // If the event is an Interrupt event, then we're going to stop no matter
278 // what. Otherwise, presume we won't stop.
279 bool should_stop = false;
280 if (Process::ProcessEventData::GetInterruptedFromEvent(event_ptr)) {
281 if (log)
282 log->Printf(
283 "ThreadList::%s handling interrupt event, should stop set to true",
284 __FUNCTION__);
285
286 should_stop = true;
287 }
288
289 // Now we run through all the threads and get their stop info's. We want to
290 // make sure to do this first before we start running the ShouldStop, because
291 // one thread's ShouldStop could destroy information (like deleting a thread
292 // specific breakpoint another thread had stopped at) which could lead us to
293 // compute the StopInfo incorrectly. We don't need to use it here, we just
294 // want to make sure it gets computed.
295
296 for (pos = threads_copy.begin(); pos != end; ++pos) {
297 ThreadSP thread_sp(*pos);
298 thread_sp->GetStopInfo();
299 }
300
301 for (pos = threads_copy.begin(); pos != end; ++pos) {
302 ThreadSP thread_sp(*pos);
303
304 // We should never get a stop for which no thread had a stop reason, but
305 // sometimes we do see this - for instance when we first connect to a
306 // remote stub. In that case we should stop, since we can't figure out the
307 // right thing to do and stopping gives the user control over what to do in
308 // this instance.
309 //
310 // Note, this causes a problem when you have a thread specific breakpoint,
311 // and a bunch of threads hit the breakpoint, but not the thread which we
312 // are waiting for. All the threads that are not "supposed" to hit the
313 // breakpoint are marked as having no stop reason, which is right, they
314 // should not show a stop reason. But that triggers this code and causes
315 // us to stop seemingly for no reason.
316 //
317 // Since the only way we ever saw this error was on first attach, I'm only
318 // going to trigger set did_anybody_stop_for_a_reason to true unless this
319 // is the first stop.
320 //
321 // If this becomes a problem, we'll have to have another StopReason like
322 // "StopInfoHidden" which will look invalid everywhere but at this check.
323
324 if (thread_sp->GetProcess()->GetStopID() > 1)
325 did_anybody_stop_for_a_reason = true;
326 else
327 did_anybody_stop_for_a_reason |= thread_sp->ThreadStoppedForAReason();
328
329 const bool thread_should_stop = thread_sp->ShouldStop(event_ptr);
330 if (thread_should_stop)
331 should_stop |= true;
332 }
333
334 if (!should_stop && !did_anybody_stop_for_a_reason) {
335 should_stop = true;
336 if (log)
337 log->Printf("ThreadList::%s we stopped but no threads had a stop reason, "
338 "overriding should_stop and stopping.",
339 __FUNCTION__);
340 }
341
342 if (log)
343 log->Printf("ThreadList::%s overall should_stop = %i", __FUNCTION__,
344 should_stop);
345
346 if (should_stop) {
347 for (pos = threads_copy.begin(); pos != end; ++pos) {
348 ThreadSP thread_sp(*pos);
349 thread_sp->WillStop();
350 }
351 }
352
353 return should_stop;
354 }
355
ShouldReportStop(Event * event_ptr)356 Vote ThreadList::ShouldReportStop(Event *event_ptr) {
357 std::lock_guard<std::recursive_mutex> guard(GetMutex());
358
359 Vote result = eVoteNoOpinion;
360 m_process->UpdateThreadListIfNeeded();
361 collection::iterator pos, end = m_threads.end();
362
363 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
364
365 if (log)
366 log->Printf("ThreadList::%s %" PRIu64 " threads", __FUNCTION__,
367 (uint64_t)m_threads.size());
368
369 // Run through the threads and ask whether we should report this event. For
370 // stopping, a YES vote wins over everything. A NO vote wins over NO
371 // opinion.
372 for (pos = m_threads.begin(); pos != end; ++pos) {
373 ThreadSP thread_sp(*pos);
374 const Vote vote = thread_sp->ShouldReportStop(event_ptr);
375 switch (vote) {
376 case eVoteNoOpinion:
377 continue;
378
379 case eVoteYes:
380 result = eVoteYes;
381 break;
382
383 case eVoteNo:
384 if (result == eVoteNoOpinion) {
385 result = eVoteNo;
386 } else {
387 LLDB_LOG(log,
388 "Thread {0:x} voted {1}, but lost out because result was {2}",
389 thread_sp->GetID(), vote, result);
390 }
391 break;
392 }
393 }
394 LLDB_LOG(log, "Returning {0}", result);
395 return result;
396 }
397
SetShouldReportStop(Vote vote)398 void ThreadList::SetShouldReportStop(Vote vote) {
399 std::lock_guard<std::recursive_mutex> guard(GetMutex());
400
401 m_process->UpdateThreadListIfNeeded();
402 collection::iterator pos, end = m_threads.end();
403 for (pos = m_threads.begin(); pos != end; ++pos) {
404 ThreadSP thread_sp(*pos);
405 thread_sp->SetShouldReportStop(vote);
406 }
407 }
408
ShouldReportRun(Event * event_ptr)409 Vote ThreadList::ShouldReportRun(Event *event_ptr) {
410
411 std::lock_guard<std::recursive_mutex> guard(GetMutex());
412
413 Vote result = eVoteNoOpinion;
414 m_process->UpdateThreadListIfNeeded();
415 collection::iterator pos, end = m_threads.end();
416
417 // Run through the threads and ask whether we should report this event. The
418 // rule is NO vote wins over everything, a YES vote wins over no opinion.
419
420 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
421
422 for (pos = m_threads.begin(); pos != end; ++pos) {
423 if ((*pos)->GetResumeState() != eStateSuspended) {
424 switch ((*pos)->ShouldReportRun(event_ptr)) {
425 case eVoteNoOpinion:
426 continue;
427 case eVoteYes:
428 if (result == eVoteNoOpinion)
429 result = eVoteYes;
430 break;
431 case eVoteNo:
432 if (log)
433 log->Printf("ThreadList::ShouldReportRun() thread %d (0x%4.4" PRIx64
434 ") says don't report.",
435 (*pos)->GetIndexID(), (*pos)->GetID());
436 result = eVoteNo;
437 break;
438 }
439 }
440 }
441 return result;
442 }
443
Clear()444 void ThreadList::Clear() {
445 std::lock_guard<std::recursive_mutex> guard(GetMutex());
446 m_stop_id = 0;
447 m_threads.clear();
448 m_selected_tid = LLDB_INVALID_THREAD_ID;
449 }
450
Destroy()451 void ThreadList::Destroy() {
452 std::lock_guard<std::recursive_mutex> guard(GetMutex());
453 const uint32_t num_threads = m_threads.size();
454 for (uint32_t idx = 0; idx < num_threads; ++idx) {
455 m_threads[idx]->DestroyThread();
456 }
457 }
458
RefreshStateAfterStop()459 void ThreadList::RefreshStateAfterStop() {
460 std::lock_guard<std::recursive_mutex> guard(GetMutex());
461
462 m_process->UpdateThreadListIfNeeded();
463
464 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
465 if (log && log->GetVerbose())
466 log->Printf("Turning off notification of new threads while single stepping "
467 "a thread.");
468
469 collection::iterator pos, end = m_threads.end();
470 for (pos = m_threads.begin(); pos != end; ++pos)
471 (*pos)->RefreshStateAfterStop();
472 }
473
DiscardThreadPlans()474 void ThreadList::DiscardThreadPlans() {
475 // You don't need to update the thread list here, because only threads that
476 // you currently know about have any thread plans.
477 std::lock_guard<std::recursive_mutex> guard(GetMutex());
478
479 collection::iterator pos, end = m_threads.end();
480 for (pos = m_threads.begin(); pos != end; ++pos)
481 (*pos)->DiscardThreadPlans(true);
482 }
483
WillResume()484 bool ThreadList::WillResume() {
485 // Run through the threads and perform their momentary actions. But we only
486 // do this for threads that are running, user suspended threads stay where
487 // they are.
488
489 std::lock_guard<std::recursive_mutex> guard(GetMutex());
490 m_process->UpdateThreadListIfNeeded();
491
492 collection::iterator pos, end = m_threads.end();
493
494 // See if any thread wants to run stopping others. If it does, then we won't
495 // setup the other threads for resume, since they aren't going to get a
496 // chance to run. This is necessary because the SetupForResume might add
497 // "StopOthers" plans which would then get to be part of the who-gets-to-run
498 // negotiation, but they're coming in after the fact, and the threads that
499 // are already set up should take priority.
500
501 bool wants_solo_run = false;
502
503 for (pos = m_threads.begin(); pos != end; ++pos) {
504 lldbassert((*pos)->GetCurrentPlan() &&
505 "thread should not have null thread plan");
506 if ((*pos)->GetResumeState() != eStateSuspended &&
507 (*pos)->GetCurrentPlan()->StopOthers()) {
508 if ((*pos)->IsOperatingSystemPluginThread() &&
509 !(*pos)->GetBackingThread())
510 continue;
511 wants_solo_run = true;
512 break;
513 }
514 }
515
516 if (wants_solo_run) {
517 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
518 if (log && log->GetVerbose())
519 log->Printf("Turning on notification of new threads while single "
520 "stepping a thread.");
521 m_process->StartNoticingNewThreads();
522 } else {
523 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
524 if (log && log->GetVerbose())
525 log->Printf("Turning off notification of new threads while single "
526 "stepping a thread.");
527 m_process->StopNoticingNewThreads();
528 }
529
530 // Give all the threads that are likely to run a last chance to set up their
531 // state before we negotiate who is actually going to get a chance to run...
532 // Don't set to resume suspended threads, and if any thread wanted to stop
533 // others, only call setup on the threads that request StopOthers...
534
535 for (pos = m_threads.begin(); pos != end; ++pos) {
536 if ((*pos)->GetResumeState() != eStateSuspended &&
537 (!wants_solo_run || (*pos)->GetCurrentPlan()->StopOthers())) {
538 if ((*pos)->IsOperatingSystemPluginThread() &&
539 !(*pos)->GetBackingThread())
540 continue;
541 (*pos)->SetupForResume();
542 }
543 }
544
545 // Now go through the threads and see if any thread wants to run just itself.
546 // if so then pick one and run it.
547
548 ThreadList run_me_only_list(m_process);
549
550 run_me_only_list.SetStopID(m_process->GetStopID());
551
552 bool run_only_current_thread = false;
553
554 for (pos = m_threads.begin(); pos != end; ++pos) {
555 ThreadSP thread_sp(*pos);
556 if (thread_sp->GetResumeState() != eStateSuspended &&
557 thread_sp->GetCurrentPlan()->StopOthers()) {
558 if ((*pos)->IsOperatingSystemPluginThread() &&
559 !(*pos)->GetBackingThread())
560 continue;
561
562 // You can't say "stop others" and also want yourself to be suspended.
563 assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended);
564
565 if (thread_sp == GetSelectedThread()) {
566 // If the currently selected thread wants to run on its own, always let
567 // it.
568 run_only_current_thread = true;
569 run_me_only_list.Clear();
570 run_me_only_list.AddThread(thread_sp);
571 break;
572 }
573
574 run_me_only_list.AddThread(thread_sp);
575 }
576 }
577
578 bool need_to_resume = true;
579
580 if (run_me_only_list.GetSize(false) == 0) {
581 // Everybody runs as they wish:
582 for (pos = m_threads.begin(); pos != end; ++pos) {
583 ThreadSP thread_sp(*pos);
584 StateType run_state;
585 if (thread_sp->GetResumeState() != eStateSuspended)
586 run_state = thread_sp->GetCurrentPlan()->RunState();
587 else
588 run_state = eStateSuspended;
589 if (!thread_sp->ShouldResume(run_state))
590 need_to_resume = false;
591 }
592 } else {
593 ThreadSP thread_to_run;
594
595 if (run_only_current_thread) {
596 thread_to_run = GetSelectedThread();
597 } else if (run_me_only_list.GetSize(false) == 1) {
598 thread_to_run = run_me_only_list.GetThreadAtIndex(0);
599 } else {
600 int random_thread =
601 (int)((run_me_only_list.GetSize(false) * (double)rand()) /
602 (RAND_MAX + 1.0));
603 thread_to_run = run_me_only_list.GetThreadAtIndex(random_thread);
604 }
605
606 for (pos = m_threads.begin(); pos != end; ++pos) {
607 ThreadSP thread_sp(*pos);
608 if (thread_sp == thread_to_run) {
609 if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState()))
610 need_to_resume = false;
611 } else
612 thread_sp->ShouldResume(eStateSuspended);
613 }
614 }
615
616 return need_to_resume;
617 }
618
DidResume()619 void ThreadList::DidResume() {
620 std::lock_guard<std::recursive_mutex> guard(GetMutex());
621 collection::iterator pos, end = m_threads.end();
622 for (pos = m_threads.begin(); pos != end; ++pos) {
623 // Don't clear out threads that aren't going to get a chance to run, rather
624 // leave their state for the next time around.
625 ThreadSP thread_sp(*pos);
626 if (thread_sp->GetResumeState() != eStateSuspended)
627 thread_sp->DidResume();
628 }
629 }
630
DidStop()631 void ThreadList::DidStop() {
632 std::lock_guard<std::recursive_mutex> guard(GetMutex());
633 collection::iterator pos, end = m_threads.end();
634 for (pos = m_threads.begin(); pos != end; ++pos) {
635 // Notify threads that the process just stopped. Note, this currently
636 // assumes that all threads in the list stop when the process stops. In
637 // the future we will want to support a debugging model where some threads
638 // continue to run while others are stopped. We either need to handle that
639 // somehow here or create a special thread list containing only threads
640 // which will stop in the code that calls this method (currently
641 // Process::SetPrivateState).
642 ThreadSP thread_sp(*pos);
643 if (StateIsRunningState(thread_sp->GetState()))
644 thread_sp->DidStop();
645 }
646 }
647
GetSelectedThread()648 ThreadSP ThreadList::GetSelectedThread() {
649 std::lock_guard<std::recursive_mutex> guard(GetMutex());
650 ThreadSP thread_sp = FindThreadByID(m_selected_tid);
651 if (!thread_sp.get()) {
652 if (m_threads.size() == 0)
653 return thread_sp;
654 m_selected_tid = m_threads[0]->GetID();
655 thread_sp = m_threads[0];
656 }
657 return thread_sp;
658 }
659
SetSelectedThreadByID(lldb::tid_t tid,bool notify)660 bool ThreadList::SetSelectedThreadByID(lldb::tid_t tid, bool notify) {
661 std::lock_guard<std::recursive_mutex> guard(GetMutex());
662 ThreadSP selected_thread_sp(FindThreadByID(tid));
663 if (selected_thread_sp) {
664 m_selected_tid = tid;
665 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
666 } else
667 m_selected_tid = LLDB_INVALID_THREAD_ID;
668
669 if (notify)
670 NotifySelectedThreadChanged(m_selected_tid);
671
672 return m_selected_tid != LLDB_INVALID_THREAD_ID;
673 }
674
SetSelectedThreadByIndexID(uint32_t index_id,bool notify)675 bool ThreadList::SetSelectedThreadByIndexID(uint32_t index_id, bool notify) {
676 std::lock_guard<std::recursive_mutex> guard(GetMutex());
677 ThreadSP selected_thread_sp(FindThreadByIndexID(index_id));
678 if (selected_thread_sp.get()) {
679 m_selected_tid = selected_thread_sp->GetID();
680 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
681 } else
682 m_selected_tid = LLDB_INVALID_THREAD_ID;
683
684 if (notify)
685 NotifySelectedThreadChanged(m_selected_tid);
686
687 return m_selected_tid != LLDB_INVALID_THREAD_ID;
688 }
689
NotifySelectedThreadChanged(lldb::tid_t tid)690 void ThreadList::NotifySelectedThreadChanged(lldb::tid_t tid) {
691 ThreadSP selected_thread_sp(FindThreadByID(tid));
692 if (selected_thread_sp->EventTypeHasListeners(
693 Thread::eBroadcastBitThreadSelected))
694 selected_thread_sp->BroadcastEvent(
695 Thread::eBroadcastBitThreadSelected,
696 new Thread::ThreadEventData(selected_thread_sp));
697 }
698
Update(ThreadList & rhs)699 void ThreadList::Update(ThreadList &rhs) {
700 if (this != &rhs) {
701 // Lock both mutexes to make sure neither side changes anyone on us while
702 // the assignment occurs
703 std::lock_guard<std::recursive_mutex> guard(GetMutex());
704
705 m_process = rhs.m_process;
706 m_stop_id = rhs.m_stop_id;
707 m_threads.swap(rhs.m_threads);
708 m_selected_tid = rhs.m_selected_tid;
709
710 // Now we look for threads that we are done with and make sure to clear
711 // them up as much as possible so anyone with a shared pointer will still
712 // have a reference, but the thread won't be of much use. Using
713 // std::weak_ptr for all backward references (such as a thread to a
714 // process) will eventually solve this issue for us, but for now, we need
715 // to work around the issue
716 collection::iterator rhs_pos, rhs_end = rhs.m_threads.end();
717 for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) {
718 const lldb::tid_t tid = (*rhs_pos)->GetID();
719 bool thread_is_alive = false;
720 const uint32_t num_threads = m_threads.size();
721 for (uint32_t idx = 0; idx < num_threads; ++idx) {
722 ThreadSP backing_thread = m_threads[idx]->GetBackingThread();
723 if (m_threads[idx]->GetID() == tid ||
724 (backing_thread && backing_thread->GetID() == tid)) {
725 thread_is_alive = true;
726 break;
727 }
728 }
729 if (!thread_is_alive)
730 (*rhs_pos)->DestroyThread();
731 }
732 }
733 }
734
Flush()735 void ThreadList::Flush() {
736 std::lock_guard<std::recursive_mutex> guard(GetMutex());
737 collection::iterator pos, end = m_threads.end();
738 for (pos = m_threads.begin(); pos != end; ++pos)
739 (*pos)->Flush();
740 }
741
GetMutex() const742 std::recursive_mutex &ThreadList::GetMutex() const {
743 return m_process->m_thread_mutex;
744 }
745
ExpressionExecutionThreadPusher(lldb::ThreadSP thread_sp)746 ThreadList::ExpressionExecutionThreadPusher::ExpressionExecutionThreadPusher(
747 lldb::ThreadSP thread_sp)
748 : m_thread_list(nullptr), m_tid(LLDB_INVALID_THREAD_ID) {
749 if (thread_sp) {
750 m_tid = thread_sp->GetID();
751 m_thread_list = &thread_sp->GetProcess()->GetThreadList();
752 m_thread_list->PushExpressionExecutionThread(m_tid);
753 }
754 }
755