130fdc8d8SChris Lattner //===-- ThreadList.cpp ------------------------------------------*- C++ -*-===// 230fdc8d8SChris Lattner // 330fdc8d8SChris Lattner // The LLVM Compiler Infrastructure 430fdc8d8SChris Lattner // 530fdc8d8SChris Lattner // This file is distributed under the University of Illinois Open Source 630fdc8d8SChris Lattner // License. See LICENSE.TXT for details. 730fdc8d8SChris Lattner // 830fdc8d8SChris Lattner //===----------------------------------------------------------------------===// 930fdc8d8SChris Lattner #include <stdlib.h> 1030fdc8d8SChris Lattner 1130fdc8d8SChris Lattner #include <algorithm> 1230fdc8d8SChris Lattner 1330fdc8d8SChris Lattner #include "lldb/Target/ThreadList.h" 1430fdc8d8SChris Lattner #include "lldb/Target/Thread.h" 1530fdc8d8SChris Lattner #include "lldb/Target/ThreadPlan.h" 1630fdc8d8SChris Lattner #include "lldb/Target/Process.h" 1730fdc8d8SChris Lattner 1830fdc8d8SChris Lattner using namespace lldb; 1930fdc8d8SChris Lattner using namespace lldb_private; 2030fdc8d8SChris Lattner 2130fdc8d8SChris Lattner ThreadList::ThreadList (Process *process) : 2230fdc8d8SChris Lattner m_process (process), 2330fdc8d8SChris Lattner m_stop_id (0), 2430fdc8d8SChris Lattner m_threads(), 2530fdc8d8SChris Lattner m_threads_mutex (Mutex::eMutexTypeRecursive), 26*2976d00aSJim Ingham m_selected_tid (LLDB_INVALID_THREAD_ID) 2730fdc8d8SChris Lattner { 2830fdc8d8SChris Lattner } 2930fdc8d8SChris Lattner 3030fdc8d8SChris Lattner ThreadList::ThreadList (const ThreadList &rhs) : 3130fdc8d8SChris Lattner m_process (), 3230fdc8d8SChris Lattner m_stop_id (), 3330fdc8d8SChris Lattner m_threads (), 3430fdc8d8SChris Lattner m_threads_mutex (Mutex::eMutexTypeRecursive), 35*2976d00aSJim Ingham m_selected_tid () 3630fdc8d8SChris Lattner { 3730fdc8d8SChris Lattner // Use the assignment operator since it uses the mutex 3830fdc8d8SChris Lattner *this = rhs; 3930fdc8d8SChris Lattner } 4030fdc8d8SChris Lattner 4130fdc8d8SChris Lattner const ThreadList& 4230fdc8d8SChris Lattner ThreadList::operator = (const ThreadList& rhs) 4330fdc8d8SChris Lattner { 4430fdc8d8SChris Lattner if (this != &rhs) 4530fdc8d8SChris Lattner { 4630fdc8d8SChris Lattner // Lock both mutexes to make sure neither side changes anyone on us 4730fdc8d8SChris Lattner // while the assignement occurs 48b132097bSGreg Clayton Mutex::Locker locker_lhs(m_threads_mutex); 4930fdc8d8SChris Lattner Mutex::Locker locker_rhs(rhs.m_threads_mutex); 5030fdc8d8SChris Lattner m_process = rhs.m_process; 5130fdc8d8SChris Lattner m_stop_id = rhs.m_stop_id; 5230fdc8d8SChris Lattner m_threads = rhs.m_threads; 53*2976d00aSJim Ingham m_selected_tid = rhs.m_selected_tid; 5430fdc8d8SChris Lattner } 5530fdc8d8SChris Lattner return *this; 5630fdc8d8SChris Lattner } 5730fdc8d8SChris Lattner 5830fdc8d8SChris Lattner 5930fdc8d8SChris Lattner ThreadList::~ThreadList() 6030fdc8d8SChris Lattner { 6130fdc8d8SChris Lattner } 6230fdc8d8SChris Lattner 6330fdc8d8SChris Lattner 6430fdc8d8SChris Lattner uint32_t 6530fdc8d8SChris Lattner ThreadList::GetStopID () const 6630fdc8d8SChris Lattner { 6730fdc8d8SChris Lattner return m_stop_id; 6830fdc8d8SChris Lattner } 6930fdc8d8SChris Lattner 7030fdc8d8SChris Lattner void 7130fdc8d8SChris Lattner ThreadList::SetStopID (uint32_t stop_id) 7230fdc8d8SChris Lattner { 7330fdc8d8SChris Lattner m_stop_id = stop_id; 7430fdc8d8SChris Lattner } 7530fdc8d8SChris Lattner 7630fdc8d8SChris Lattner 7730fdc8d8SChris Lattner void 7830fdc8d8SChris Lattner ThreadList::AddThread (ThreadSP &thread_sp) 7930fdc8d8SChris Lattner { 8030fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 8130fdc8d8SChris Lattner m_threads.push_back(thread_sp); 8230fdc8d8SChris Lattner } 8330fdc8d8SChris Lattner 8430fdc8d8SChris Lattner uint32_t 8530fdc8d8SChris Lattner ThreadList::GetSize (bool can_update) 8630fdc8d8SChris Lattner { 8730fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 8830fdc8d8SChris Lattner if (can_update) 8930fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 9030fdc8d8SChris Lattner return m_threads.size(); 9130fdc8d8SChris Lattner } 9230fdc8d8SChris Lattner 9330fdc8d8SChris Lattner ThreadSP 9430fdc8d8SChris Lattner ThreadList::GetThreadAtIndex (uint32_t idx, bool can_update) 9530fdc8d8SChris Lattner { 9630fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 9730fdc8d8SChris Lattner if (can_update) 9830fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 9930fdc8d8SChris Lattner 10030fdc8d8SChris Lattner ThreadSP thread_sp; 10130fdc8d8SChris Lattner if (idx < m_threads.size()) 10230fdc8d8SChris Lattner thread_sp = m_threads[idx]; 10330fdc8d8SChris Lattner return thread_sp; 10430fdc8d8SChris Lattner } 10530fdc8d8SChris Lattner 10630fdc8d8SChris Lattner ThreadSP 10730fdc8d8SChris Lattner ThreadList::FindThreadByID (lldb::tid_t tid, bool can_update) 10830fdc8d8SChris Lattner { 10930fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 11030fdc8d8SChris Lattner 11130fdc8d8SChris Lattner if (can_update) 11230fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 11330fdc8d8SChris Lattner 11430fdc8d8SChris Lattner ThreadSP thread_sp; 11530fdc8d8SChris Lattner uint32_t idx = 0; 11630fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 11730fdc8d8SChris Lattner for (idx = 0; idx < num_threads; ++idx) 11830fdc8d8SChris Lattner { 11930fdc8d8SChris Lattner if (m_threads[idx]->GetID() == tid) 12030fdc8d8SChris Lattner { 12130fdc8d8SChris Lattner thread_sp = m_threads[idx]; 12230fdc8d8SChris Lattner break; 12330fdc8d8SChris Lattner } 12430fdc8d8SChris Lattner } 12530fdc8d8SChris Lattner return thread_sp; 12630fdc8d8SChris Lattner } 12730fdc8d8SChris Lattner 12830fdc8d8SChris Lattner ThreadSP 12930fdc8d8SChris Lattner ThreadList::GetThreadSPForThreadPtr (Thread *thread_ptr) 13030fdc8d8SChris Lattner { 13130fdc8d8SChris Lattner ThreadSP thread_sp; 13230fdc8d8SChris Lattner if (thread_ptr) 13330fdc8d8SChris Lattner { 13430fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 13530fdc8d8SChris Lattner 13630fdc8d8SChris Lattner uint32_t idx = 0; 13730fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 13830fdc8d8SChris Lattner for (idx = 0; idx < num_threads; ++idx) 13930fdc8d8SChris Lattner { 14030fdc8d8SChris Lattner if (m_threads[idx].get() == thread_ptr) 14130fdc8d8SChris Lattner { 14230fdc8d8SChris Lattner thread_sp = m_threads[idx]; 14330fdc8d8SChris Lattner break; 14430fdc8d8SChris Lattner } 14530fdc8d8SChris Lattner } 14630fdc8d8SChris Lattner } 14730fdc8d8SChris Lattner return thread_sp; 14830fdc8d8SChris Lattner } 14930fdc8d8SChris Lattner 15030fdc8d8SChris Lattner 15130fdc8d8SChris Lattner 15230fdc8d8SChris Lattner ThreadSP 15330fdc8d8SChris Lattner ThreadList::FindThreadByIndexID (uint32_t index_id, bool can_update) 15430fdc8d8SChris Lattner { 15530fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 15630fdc8d8SChris Lattner 15730fdc8d8SChris Lattner if (can_update) 15830fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 15930fdc8d8SChris Lattner 16030fdc8d8SChris Lattner ThreadSP thread_sp; 16130fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 16230fdc8d8SChris Lattner for (uint32_t idx = 0; idx < num_threads; ++idx) 16330fdc8d8SChris Lattner { 16430fdc8d8SChris Lattner if (m_threads[idx]->GetIndexID() == index_id) 16530fdc8d8SChris Lattner { 16630fdc8d8SChris Lattner thread_sp = m_threads[idx]; 16730fdc8d8SChris Lattner break; 16830fdc8d8SChris Lattner } 16930fdc8d8SChris Lattner } 17030fdc8d8SChris Lattner return thread_sp; 17130fdc8d8SChris Lattner } 17230fdc8d8SChris Lattner 17330fdc8d8SChris Lattner bool 17430fdc8d8SChris Lattner ThreadList::ShouldStop (Event *event_ptr) 17530fdc8d8SChris Lattner { 17630fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 17730fdc8d8SChris Lattner 17830fdc8d8SChris Lattner // Running events should never stop, obviously... 17930fdc8d8SChris Lattner 18030fdc8d8SChris Lattner 18130fdc8d8SChris Lattner bool should_stop = false; 18230fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 18330fdc8d8SChris Lattner 18430fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 18530fdc8d8SChris Lattner 18630fdc8d8SChris Lattner // Run through the threads and ask whether we should stop. Don't ask 18730fdc8d8SChris Lattner // suspended threads, however, it makes more sense for them to preserve their 18830fdc8d8SChris Lattner // state across the times the process runs but they don't get a chance to. 18930fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 19030fdc8d8SChris Lattner { 19130fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 192b01e742aSJim Ingham if ((thread_sp->GetResumeState () != eStateSuspended) && (thread_sp->ThreadStoppedForAReason())) 19330fdc8d8SChris Lattner { 19430fdc8d8SChris Lattner should_stop |= thread_sp->ShouldStop(event_ptr); 19530fdc8d8SChris Lattner } 19630fdc8d8SChris Lattner } 197b01e742aSJim Ingham 19830fdc8d8SChris Lattner if (should_stop) 19930fdc8d8SChris Lattner { 20030fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 20130fdc8d8SChris Lattner { 20230fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 20330fdc8d8SChris Lattner thread_sp->WillStop (); 20430fdc8d8SChris Lattner } 20530fdc8d8SChris Lattner } 20630fdc8d8SChris Lattner 20730fdc8d8SChris Lattner return should_stop; 20830fdc8d8SChris Lattner } 20930fdc8d8SChris Lattner 21030fdc8d8SChris Lattner Vote 21130fdc8d8SChris Lattner ThreadList::ShouldReportStop (Event *event_ptr) 21230fdc8d8SChris Lattner { 21330fdc8d8SChris Lattner Vote result = eVoteNoOpinion; 21430fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 21530fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 21630fdc8d8SChris Lattner 21730fdc8d8SChris Lattner // Run through the threads and ask whether we should report this event. 21830fdc8d8SChris Lattner // For stopping, a YES vote wins over everything. A NO vote wins over NO opinion. 21930fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 22030fdc8d8SChris Lattner { 22130fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 22230fdc8d8SChris Lattner if (thread_sp->ThreadStoppedForAReason() && (thread_sp->GetResumeState () != eStateSuspended)) 22330fdc8d8SChris Lattner { 22430fdc8d8SChris Lattner switch (thread_sp->ShouldReportStop (event_ptr)) 22530fdc8d8SChris Lattner { 22630fdc8d8SChris Lattner case eVoteNoOpinion: 22730fdc8d8SChris Lattner continue; 22830fdc8d8SChris Lattner case eVoteYes: 22930fdc8d8SChris Lattner result = eVoteYes; 23030fdc8d8SChris Lattner break; 23130fdc8d8SChris Lattner case eVoteNo: 23230fdc8d8SChris Lattner if (result == eVoteNoOpinion) 23330fdc8d8SChris Lattner result = eVoteNo; 23430fdc8d8SChris Lattner break; 23530fdc8d8SChris Lattner } 23630fdc8d8SChris Lattner } 23730fdc8d8SChris Lattner } 23830fdc8d8SChris Lattner return result; 23930fdc8d8SChris Lattner } 24030fdc8d8SChris Lattner 24130fdc8d8SChris Lattner Vote 24230fdc8d8SChris Lattner ThreadList::ShouldReportRun (Event *event_ptr) 24330fdc8d8SChris Lattner { 24430fdc8d8SChris Lattner Vote result = eVoteNoOpinion; 24530fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 24630fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 24730fdc8d8SChris Lattner 24830fdc8d8SChris Lattner // Run through the threads and ask whether we should report this event. 24930fdc8d8SChris Lattner // The rule is NO vote wins over everything, a YES vote wins over no opinion. 25030fdc8d8SChris Lattner 25130fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 25230fdc8d8SChris Lattner { 25330fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 25430fdc8d8SChris Lattner if (thread_sp->GetResumeState () != eStateSuspended) 25530fdc8d8SChris Lattner 25630fdc8d8SChris Lattner switch (thread_sp->ShouldReportRun (event_ptr)) 25730fdc8d8SChris Lattner { 25830fdc8d8SChris Lattner case eVoteNoOpinion: 25930fdc8d8SChris Lattner continue; 26030fdc8d8SChris Lattner case eVoteYes: 26130fdc8d8SChris Lattner if (result == eVoteNoOpinion) 26230fdc8d8SChris Lattner result = eVoteYes; 26330fdc8d8SChris Lattner break; 26430fdc8d8SChris Lattner case eVoteNo: 26530fdc8d8SChris Lattner result = eVoteNo; 26630fdc8d8SChris Lattner break; 26730fdc8d8SChris Lattner } 26830fdc8d8SChris Lattner } 26930fdc8d8SChris Lattner return result; 27030fdc8d8SChris Lattner } 27130fdc8d8SChris Lattner 27230fdc8d8SChris Lattner void 27330fdc8d8SChris Lattner ThreadList::Clear() 27430fdc8d8SChris Lattner { 27530fdc8d8SChris Lattner m_stop_id = 0; 27630fdc8d8SChris Lattner m_threads.clear(); 277*2976d00aSJim Ingham m_selected_tid = LLDB_INVALID_THREAD_ID; 27830fdc8d8SChris Lattner } 27930fdc8d8SChris Lattner 28030fdc8d8SChris Lattner void 28130fdc8d8SChris Lattner ThreadList::RefreshStateAfterStop () 28230fdc8d8SChris Lattner { 28330fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 28430fdc8d8SChris Lattner 28530fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 28630fdc8d8SChris Lattner 28730fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 28830fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 28930fdc8d8SChris Lattner (*pos)->RefreshStateAfterStop (); 29030fdc8d8SChris Lattner } 29130fdc8d8SChris Lattner 29230fdc8d8SChris Lattner void 29330fdc8d8SChris Lattner ThreadList::DiscardThreadPlans () 29430fdc8d8SChris Lattner { 29530fdc8d8SChris Lattner // You don't need to update the thread list here, because only threads 29630fdc8d8SChris Lattner // that you currently know about have any thread plans. 29730fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 29830fdc8d8SChris Lattner 29930fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 30030fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 30130fdc8d8SChris Lattner (*pos)->DiscardThreadPlans (true); 30230fdc8d8SChris Lattner 30330fdc8d8SChris Lattner } 30430fdc8d8SChris Lattner 30530fdc8d8SChris Lattner bool 30630fdc8d8SChris Lattner ThreadList::WillResume () 30730fdc8d8SChris Lattner { 30830fdc8d8SChris Lattner // Run through the threads and perform their momentary actions. 30930fdc8d8SChris Lattner // But we only do this for threads that are running, user suspended 31030fdc8d8SChris Lattner // threads stay where they are. 31130fdc8d8SChris Lattner bool success = true; 31230fdc8d8SChris Lattner 31330fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 31430fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 31530fdc8d8SChris Lattner 31630fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 31730fdc8d8SChris Lattner 318a3241c1bSJim Ingham // See if any thread wants to run stopping others. If it does, then we won't 319a3241c1bSJim Ingham // setup the other threads for resume, since they aren't going to get a chance 320a3241c1bSJim Ingham // to run. This is necessary because the SetupForResume might add "StopOthers" 321a3241c1bSJim Ingham // plans which would then get to be part of the who-gets-to-run negotiation, but 322a3241c1bSJim Ingham // they're coming in after the fact, and the threads that are already set up should 323a3241c1bSJim Ingham // take priority. 324a3241c1bSJim Ingham 325a3241c1bSJim Ingham bool wants_solo_run = false; 32630fdc8d8SChris Lattner 32730fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 328a3241c1bSJim Ingham { 329a3241c1bSJim Ingham if ((*pos)->GetResumeState() != eStateSuspended && 330a3241c1bSJim Ingham (*pos)->GetCurrentPlan()->StopOthers()) 331a3241c1bSJim Ingham { 332a3241c1bSJim Ingham wants_solo_run = true; 333a3241c1bSJim Ingham break; 334a3241c1bSJim Ingham } 335a3241c1bSJim Ingham } 336a3241c1bSJim Ingham 337a3241c1bSJim Ingham 338a3241c1bSJim Ingham // Give all the threads that are likely to run a last chance to set up their state before we 339a3241c1bSJim Ingham // negotiate who is actually going to get a chance to run... 340a3241c1bSJim Ingham // Don't set to resume suspended threads, and if any thread wanted to stop others, only 341a3241c1bSJim Ingham // call setup on the threads that request StopOthers... 342a3241c1bSJim Ingham 343a3241c1bSJim Ingham for (pos = m_threads.begin(); pos != end; ++pos) 344a3241c1bSJim Ingham { 345a3241c1bSJim Ingham if ((*pos)->GetResumeState() != eStateSuspended 346a3241c1bSJim Ingham && (!wants_solo_run || (*pos)->GetCurrentPlan()->StopOthers())) 347a3241c1bSJim Ingham { 34830fdc8d8SChris Lattner (*pos)->SetupForResume (); 349a3241c1bSJim Ingham } 350a3241c1bSJim Ingham } 35130fdc8d8SChris Lattner 35230fdc8d8SChris Lattner // Now go through the threads and see if any thread wants to run just itself. 35330fdc8d8SChris Lattner // if so then pick one and run it. 354a3241c1bSJim Ingham 35530fdc8d8SChris Lattner ThreadList run_me_only_list (m_process); 35630fdc8d8SChris Lattner 35730fdc8d8SChris Lattner run_me_only_list.SetStopID(m_process->GetStopID()); 35830fdc8d8SChris Lattner 35930fdc8d8SChris Lattner ThreadSP immediate_thread_sp; 36030fdc8d8SChris Lattner bool run_only_current_thread = false; 36130fdc8d8SChris Lattner 36230fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 36330fdc8d8SChris Lattner { 36430fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 36530fdc8d8SChris Lattner if (thread_sp->GetCurrentPlan()->IsImmediate()) 36630fdc8d8SChris Lattner { 36730fdc8d8SChris Lattner // We first do all the immediate plans, so if we find one, set 36830fdc8d8SChris Lattner // immediate_thread_sp and break out, and we'll pick it up first thing 36930fdc8d8SChris Lattner // when we're negotiating which threads get to run. 37030fdc8d8SChris Lattner immediate_thread_sp = thread_sp; 37130fdc8d8SChris Lattner break; 37230fdc8d8SChris Lattner } 37330fdc8d8SChris Lattner else if (thread_sp->GetResumeState() != eStateSuspended && 37430fdc8d8SChris Lattner thread_sp->GetCurrentPlan()->StopOthers()) 37530fdc8d8SChris Lattner { 37630fdc8d8SChris Lattner // You can't say "stop others" and also want yourself to be suspended. 37730fdc8d8SChris Lattner assert (thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); 37830fdc8d8SChris Lattner 379*2976d00aSJim Ingham if (thread_sp == GetSelectedThread()) 38030fdc8d8SChris Lattner { 38130fdc8d8SChris Lattner run_only_current_thread = true; 38230fdc8d8SChris Lattner run_me_only_list.Clear(); 38330fdc8d8SChris Lattner run_me_only_list.AddThread (thread_sp); 38430fdc8d8SChris Lattner break; 38530fdc8d8SChris Lattner } 38630fdc8d8SChris Lattner 38730fdc8d8SChris Lattner run_me_only_list.AddThread (thread_sp); 38830fdc8d8SChris Lattner } 38930fdc8d8SChris Lattner 39030fdc8d8SChris Lattner } 39130fdc8d8SChris Lattner 39230fdc8d8SChris Lattner if (immediate_thread_sp) 39330fdc8d8SChris Lattner { 39430fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 39530fdc8d8SChris Lattner { 39630fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 39730fdc8d8SChris Lattner if (thread_sp.get() == immediate_thread_sp.get()) 39830fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 39930fdc8d8SChris Lattner else 40030fdc8d8SChris Lattner thread_sp->WillResume (eStateSuspended); 40130fdc8d8SChris Lattner } 40230fdc8d8SChris Lattner } 40330fdc8d8SChris Lattner else if (run_me_only_list.GetSize (false) == 0) 40430fdc8d8SChris Lattner { 40530fdc8d8SChris Lattner // Everybody runs as they wish: 40630fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 40730fdc8d8SChris Lattner { 40830fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 40930fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 41030fdc8d8SChris Lattner } 41130fdc8d8SChris Lattner } 41230fdc8d8SChris Lattner else 41330fdc8d8SChris Lattner { 41430fdc8d8SChris Lattner ThreadSP thread_to_run; 41530fdc8d8SChris Lattner 41630fdc8d8SChris Lattner if (run_only_current_thread) 41730fdc8d8SChris Lattner { 418*2976d00aSJim Ingham thread_to_run = GetSelectedThread(); 41930fdc8d8SChris Lattner } 42030fdc8d8SChris Lattner else if (run_me_only_list.GetSize (false) == 1) 42130fdc8d8SChris Lattner { 42230fdc8d8SChris Lattner thread_to_run = run_me_only_list.GetThreadAtIndex (0); 42330fdc8d8SChris Lattner } 42430fdc8d8SChris Lattner else 42530fdc8d8SChris Lattner { 42630fdc8d8SChris Lattner int random_thread = (int) 42730fdc8d8SChris Lattner ((run_me_only_list.GetSize (false) * (double) rand ()) / (RAND_MAX + 1.0)); 42830fdc8d8SChris Lattner thread_to_run = run_me_only_list.GetThreadAtIndex (random_thread); 42930fdc8d8SChris Lattner } 43030fdc8d8SChris Lattner 43130fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 43230fdc8d8SChris Lattner { 43330fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 43430fdc8d8SChris Lattner if (thread_sp == thread_to_run) 43530fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 43630fdc8d8SChris Lattner else 43730fdc8d8SChris Lattner thread_sp->WillResume (eStateSuspended); 43830fdc8d8SChris Lattner } 43930fdc8d8SChris Lattner } 44030fdc8d8SChris Lattner 44130fdc8d8SChris Lattner return success; 44230fdc8d8SChris Lattner } 44330fdc8d8SChris Lattner 44430fdc8d8SChris Lattner void 44530fdc8d8SChris Lattner ThreadList::DidResume () 44630fdc8d8SChris Lattner { 44730fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 44830fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 44930fdc8d8SChris Lattner { 45030fdc8d8SChris Lattner // Don't clear out threads that aren't going to get a chance to run, rather 45130fdc8d8SChris Lattner // leave their state for the next time around. 45230fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 45330fdc8d8SChris Lattner if (thread_sp->GetResumeState() != eStateSuspended) 45430fdc8d8SChris Lattner thread_sp->DidResume (); 45530fdc8d8SChris Lattner } 45630fdc8d8SChris Lattner } 45730fdc8d8SChris Lattner 45830fdc8d8SChris Lattner ThreadSP 459*2976d00aSJim Ingham ThreadList::GetSelectedThread () 46030fdc8d8SChris Lattner { 46130fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 462*2976d00aSJim Ingham return FindThreadByID(m_selected_tid); 46330fdc8d8SChris Lattner } 46430fdc8d8SChris Lattner 46530fdc8d8SChris Lattner bool 466*2976d00aSJim Ingham ThreadList::SetSelectedThreadByID (lldb::tid_t tid) 46730fdc8d8SChris Lattner { 46830fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 46930fdc8d8SChris Lattner if (FindThreadByID(tid).get()) 470*2976d00aSJim Ingham m_selected_tid = tid; 47130fdc8d8SChris Lattner else 472*2976d00aSJim Ingham m_selected_tid = LLDB_INVALID_THREAD_ID; 47330fdc8d8SChris Lattner 474*2976d00aSJim Ingham return m_selected_tid != LLDB_INVALID_THREAD_ID; 47530fdc8d8SChris Lattner } 47630fdc8d8SChris Lattner 47730fdc8d8SChris Lattner bool 478*2976d00aSJim Ingham ThreadList::SetSelectedThreadByIndexID (uint32_t index_id) 47930fdc8d8SChris Lattner { 48030fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 48130fdc8d8SChris Lattner ThreadSP thread_sp (FindThreadByIndexID(index_id)); 48230fdc8d8SChris Lattner if (thread_sp.get()) 483*2976d00aSJim Ingham m_selected_tid = thread_sp->GetID(); 48430fdc8d8SChris Lattner else 485*2976d00aSJim Ingham m_selected_tid = LLDB_INVALID_THREAD_ID; 48630fdc8d8SChris Lattner 487*2976d00aSJim Ingham return m_selected_tid != LLDB_INVALID_THREAD_ID; 48830fdc8d8SChris Lattner } 48930fdc8d8SChris Lattner 490