130fdc8d8SChris Lattner //===-- ThreadList.cpp ------------------------------------------*- C++ -*-===// 230fdc8d8SChris Lattner // 330fdc8d8SChris Lattner // The LLVM Compiler Infrastructure 430fdc8d8SChris Lattner // 530fdc8d8SChris Lattner // This file is distributed under the University of Illinois Open Source 630fdc8d8SChris Lattner // License. See LICENSE.TXT for details. 730fdc8d8SChris Lattner // 830fdc8d8SChris Lattner //===----------------------------------------------------------------------===// 930fdc8d8SChris Lattner #include <stdlib.h> 1030fdc8d8SChris Lattner 1130fdc8d8SChris Lattner #include <algorithm> 1230fdc8d8SChris Lattner 1330fdc8d8SChris Lattner #include "lldb/Target/ThreadList.h" 1430fdc8d8SChris Lattner #include "lldb/Target/Thread.h" 1530fdc8d8SChris Lattner #include "lldb/Target/ThreadPlan.h" 1630fdc8d8SChris Lattner #include "lldb/Target/Process.h" 1730fdc8d8SChris Lattner 1830fdc8d8SChris Lattner using namespace lldb; 1930fdc8d8SChris Lattner using namespace lldb_private; 2030fdc8d8SChris Lattner 2130fdc8d8SChris Lattner ThreadList::ThreadList (Process *process) : 2230fdc8d8SChris Lattner m_process (process), 2330fdc8d8SChris Lattner m_stop_id (0), 2430fdc8d8SChris Lattner m_threads(), 2530fdc8d8SChris Lattner m_threads_mutex (Mutex::eMutexTypeRecursive), 2630fdc8d8SChris Lattner m_current_tid (LLDB_INVALID_THREAD_ID) 2730fdc8d8SChris Lattner { 2830fdc8d8SChris Lattner } 2930fdc8d8SChris Lattner 3030fdc8d8SChris Lattner ThreadList::ThreadList (const ThreadList &rhs) : 3130fdc8d8SChris Lattner m_process (), 3230fdc8d8SChris Lattner m_stop_id (), 3330fdc8d8SChris Lattner m_threads (), 3430fdc8d8SChris Lattner m_threads_mutex (Mutex::eMutexTypeRecursive), 3530fdc8d8SChris Lattner m_current_tid () 3630fdc8d8SChris Lattner { 3730fdc8d8SChris Lattner // Use the assignment operator since it uses the mutex 3830fdc8d8SChris Lattner *this = rhs; 3930fdc8d8SChris Lattner } 4030fdc8d8SChris Lattner 4130fdc8d8SChris Lattner const ThreadList& 4230fdc8d8SChris Lattner ThreadList::operator = (const ThreadList& rhs) 4330fdc8d8SChris Lattner { 4430fdc8d8SChris Lattner if (this != &rhs) 4530fdc8d8SChris Lattner { 4630fdc8d8SChris Lattner // Lock both mutexes to make sure neither side changes anyone on us 4730fdc8d8SChris Lattner // while the assignement occurs 4830fdc8d8SChris Lattner Mutex::Locker locker_this(m_threads_mutex); 4930fdc8d8SChris Lattner Mutex::Locker locker_rhs(rhs.m_threads_mutex); 5030fdc8d8SChris Lattner m_process = rhs.m_process; 5130fdc8d8SChris Lattner m_stop_id = rhs.m_stop_id; 5230fdc8d8SChris Lattner m_threads = rhs.m_threads; 5330fdc8d8SChris Lattner m_current_tid = rhs.m_current_tid; 5430fdc8d8SChris Lattner } 5530fdc8d8SChris Lattner return *this; 5630fdc8d8SChris Lattner } 5730fdc8d8SChris Lattner 5830fdc8d8SChris Lattner 5930fdc8d8SChris Lattner ThreadList::~ThreadList() 6030fdc8d8SChris Lattner { 6130fdc8d8SChris Lattner } 6230fdc8d8SChris Lattner 6330fdc8d8SChris Lattner 6430fdc8d8SChris Lattner uint32_t 6530fdc8d8SChris Lattner ThreadList::GetStopID () const 6630fdc8d8SChris Lattner { 6730fdc8d8SChris Lattner return m_stop_id; 6830fdc8d8SChris Lattner } 6930fdc8d8SChris Lattner 7030fdc8d8SChris Lattner void 7130fdc8d8SChris Lattner ThreadList::SetStopID (uint32_t stop_id) 7230fdc8d8SChris Lattner { 7330fdc8d8SChris Lattner m_stop_id = stop_id; 7430fdc8d8SChris Lattner } 7530fdc8d8SChris Lattner 7630fdc8d8SChris Lattner 7730fdc8d8SChris Lattner void 7830fdc8d8SChris Lattner ThreadList::AddThread (ThreadSP &thread_sp) 7930fdc8d8SChris Lattner { 8030fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 8130fdc8d8SChris Lattner m_threads.push_back(thread_sp); 8230fdc8d8SChris Lattner } 8330fdc8d8SChris Lattner 8430fdc8d8SChris Lattner uint32_t 8530fdc8d8SChris Lattner ThreadList::GetSize (bool can_update) 8630fdc8d8SChris Lattner { 8730fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 8830fdc8d8SChris Lattner if (can_update) 8930fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 9030fdc8d8SChris Lattner return m_threads.size(); 9130fdc8d8SChris Lattner } 9230fdc8d8SChris Lattner 9330fdc8d8SChris Lattner ThreadSP 9430fdc8d8SChris Lattner ThreadList::GetThreadAtIndex (uint32_t idx, bool can_update) 9530fdc8d8SChris Lattner { 9630fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 9730fdc8d8SChris Lattner if (can_update) 9830fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 9930fdc8d8SChris Lattner 10030fdc8d8SChris Lattner ThreadSP thread_sp; 10130fdc8d8SChris Lattner if (idx < m_threads.size()) 10230fdc8d8SChris Lattner thread_sp = m_threads[idx]; 10330fdc8d8SChris Lattner return thread_sp; 10430fdc8d8SChris Lattner } 10530fdc8d8SChris Lattner 10630fdc8d8SChris Lattner ThreadSP 10730fdc8d8SChris Lattner ThreadList::FindThreadByID (lldb::tid_t tid, bool can_update) 10830fdc8d8SChris Lattner { 10930fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 11030fdc8d8SChris Lattner 11130fdc8d8SChris Lattner if (can_update) 11230fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 11330fdc8d8SChris Lattner 11430fdc8d8SChris Lattner ThreadSP thread_sp; 11530fdc8d8SChris Lattner uint32_t idx = 0; 11630fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 11730fdc8d8SChris Lattner for (idx = 0; idx < num_threads; ++idx) 11830fdc8d8SChris Lattner { 11930fdc8d8SChris Lattner if (m_threads[idx]->GetID() == tid) 12030fdc8d8SChris Lattner { 12130fdc8d8SChris Lattner thread_sp = m_threads[idx]; 12230fdc8d8SChris Lattner break; 12330fdc8d8SChris Lattner } 12430fdc8d8SChris Lattner } 12530fdc8d8SChris Lattner return thread_sp; 12630fdc8d8SChris Lattner } 12730fdc8d8SChris Lattner 12830fdc8d8SChris Lattner ThreadSP 12930fdc8d8SChris Lattner ThreadList::GetThreadSPForThreadPtr (Thread *thread_ptr) 13030fdc8d8SChris Lattner { 13130fdc8d8SChris Lattner ThreadSP thread_sp; 13230fdc8d8SChris Lattner if (thread_ptr) 13330fdc8d8SChris Lattner { 13430fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 13530fdc8d8SChris Lattner 13630fdc8d8SChris Lattner uint32_t idx = 0; 13730fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 13830fdc8d8SChris Lattner for (idx = 0; idx < num_threads; ++idx) 13930fdc8d8SChris Lattner { 14030fdc8d8SChris Lattner if (m_threads[idx].get() == thread_ptr) 14130fdc8d8SChris Lattner { 14230fdc8d8SChris Lattner thread_sp = m_threads[idx]; 14330fdc8d8SChris Lattner break; 14430fdc8d8SChris Lattner } 14530fdc8d8SChris Lattner } 14630fdc8d8SChris Lattner } 14730fdc8d8SChris Lattner return thread_sp; 14830fdc8d8SChris Lattner } 14930fdc8d8SChris Lattner 15030fdc8d8SChris Lattner 15130fdc8d8SChris Lattner 15230fdc8d8SChris Lattner ThreadSP 15330fdc8d8SChris Lattner ThreadList::FindThreadByIndexID (uint32_t index_id, bool can_update) 15430fdc8d8SChris Lattner { 15530fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 15630fdc8d8SChris Lattner 15730fdc8d8SChris Lattner if (can_update) 15830fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 15930fdc8d8SChris Lattner 16030fdc8d8SChris Lattner ThreadSP thread_sp; 16130fdc8d8SChris Lattner const uint32_t num_threads = m_threads.size(); 16230fdc8d8SChris Lattner for (uint32_t idx = 0; idx < num_threads; ++idx) 16330fdc8d8SChris Lattner { 16430fdc8d8SChris Lattner if (m_threads[idx]->GetIndexID() == index_id) 16530fdc8d8SChris Lattner { 16630fdc8d8SChris Lattner thread_sp = m_threads[idx]; 16730fdc8d8SChris Lattner break; 16830fdc8d8SChris Lattner } 16930fdc8d8SChris Lattner } 17030fdc8d8SChris Lattner return thread_sp; 17130fdc8d8SChris Lattner } 17230fdc8d8SChris Lattner 17330fdc8d8SChris Lattner bool 17430fdc8d8SChris Lattner ThreadList::ShouldStop (Event *event_ptr) 17530fdc8d8SChris Lattner { 17630fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 17730fdc8d8SChris Lattner 17830fdc8d8SChris Lattner // Running events should never stop, obviously... 17930fdc8d8SChris Lattner 18030fdc8d8SChris Lattner 18130fdc8d8SChris Lattner bool should_stop = false; 18230fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 18330fdc8d8SChris Lattner 18430fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 18530fdc8d8SChris Lattner 18630fdc8d8SChris Lattner // Run through the threads and ask whether we should stop. Don't ask 18730fdc8d8SChris Lattner // suspended threads, however, it makes more sense for them to preserve their 18830fdc8d8SChris Lattner // state across the times the process runs but they don't get a chance to. 18930fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 19030fdc8d8SChris Lattner { 19130fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 192*b01e742aSJim Ingham if ((thread_sp->GetResumeState () != eStateSuspended) && (thread_sp->ThreadStoppedForAReason())) 19330fdc8d8SChris Lattner { 19430fdc8d8SChris Lattner should_stop |= thread_sp->ShouldStop(event_ptr); 19530fdc8d8SChris Lattner } 19630fdc8d8SChris Lattner } 197*b01e742aSJim Ingham 19830fdc8d8SChris Lattner if (should_stop) 19930fdc8d8SChris Lattner { 20030fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 20130fdc8d8SChris Lattner { 20230fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 20330fdc8d8SChris Lattner thread_sp->WillStop (); 20430fdc8d8SChris Lattner } 20530fdc8d8SChris Lattner } 20630fdc8d8SChris Lattner 20730fdc8d8SChris Lattner return should_stop; 20830fdc8d8SChris Lattner } 20930fdc8d8SChris Lattner 21030fdc8d8SChris Lattner Vote 21130fdc8d8SChris Lattner ThreadList::ShouldReportStop (Event *event_ptr) 21230fdc8d8SChris Lattner { 21330fdc8d8SChris Lattner Vote result = eVoteNoOpinion; 21430fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 21530fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 21630fdc8d8SChris Lattner 21730fdc8d8SChris Lattner // Run through the threads and ask whether we should report this event. 21830fdc8d8SChris Lattner // For stopping, a YES vote wins over everything. A NO vote wins over NO opinion. 21930fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 22030fdc8d8SChris Lattner { 22130fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 22230fdc8d8SChris Lattner if (thread_sp->ThreadStoppedForAReason() && (thread_sp->GetResumeState () != eStateSuspended)) 22330fdc8d8SChris Lattner { 22430fdc8d8SChris Lattner switch (thread_sp->ShouldReportStop (event_ptr)) 22530fdc8d8SChris Lattner { 22630fdc8d8SChris Lattner case eVoteNoOpinion: 22730fdc8d8SChris Lattner continue; 22830fdc8d8SChris Lattner case eVoteYes: 22930fdc8d8SChris Lattner result = eVoteYes; 23030fdc8d8SChris Lattner break; 23130fdc8d8SChris Lattner case eVoteNo: 23230fdc8d8SChris Lattner if (result == eVoteNoOpinion) 23330fdc8d8SChris Lattner result = eVoteNo; 23430fdc8d8SChris Lattner break; 23530fdc8d8SChris Lattner } 23630fdc8d8SChris Lattner } 23730fdc8d8SChris Lattner } 23830fdc8d8SChris Lattner return result; 23930fdc8d8SChris Lattner } 24030fdc8d8SChris Lattner 24130fdc8d8SChris Lattner Vote 24230fdc8d8SChris Lattner ThreadList::ShouldReportRun (Event *event_ptr) 24330fdc8d8SChris Lattner { 24430fdc8d8SChris Lattner Vote result = eVoteNoOpinion; 24530fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 24630fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 24730fdc8d8SChris Lattner 24830fdc8d8SChris Lattner // Run through the threads and ask whether we should report this event. 24930fdc8d8SChris Lattner // The rule is NO vote wins over everything, a YES vote wins over no opinion. 25030fdc8d8SChris Lattner 25130fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 25230fdc8d8SChris Lattner { 25330fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 25430fdc8d8SChris Lattner if (thread_sp->GetResumeState () != eStateSuspended) 25530fdc8d8SChris Lattner 25630fdc8d8SChris Lattner switch (thread_sp->ShouldReportRun (event_ptr)) 25730fdc8d8SChris Lattner { 25830fdc8d8SChris Lattner case eVoteNoOpinion: 25930fdc8d8SChris Lattner continue; 26030fdc8d8SChris Lattner case eVoteYes: 26130fdc8d8SChris Lattner if (result == eVoteNoOpinion) 26230fdc8d8SChris Lattner result = eVoteYes; 26330fdc8d8SChris Lattner break; 26430fdc8d8SChris Lattner case eVoteNo: 26530fdc8d8SChris Lattner result = eVoteNo; 26630fdc8d8SChris Lattner break; 26730fdc8d8SChris Lattner } 26830fdc8d8SChris Lattner } 26930fdc8d8SChris Lattner return result; 27030fdc8d8SChris Lattner } 27130fdc8d8SChris Lattner 27230fdc8d8SChris Lattner void 27330fdc8d8SChris Lattner ThreadList::Clear() 27430fdc8d8SChris Lattner { 27530fdc8d8SChris Lattner m_stop_id = 0; 27630fdc8d8SChris Lattner m_threads.clear(); 27730fdc8d8SChris Lattner m_current_tid = LLDB_INVALID_THREAD_ID; 27830fdc8d8SChris Lattner } 27930fdc8d8SChris Lattner 28030fdc8d8SChris Lattner void 28130fdc8d8SChris Lattner ThreadList::RefreshStateAfterStop () 28230fdc8d8SChris Lattner { 28330fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 28430fdc8d8SChris Lattner 28530fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 28630fdc8d8SChris Lattner 28730fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 28830fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 28930fdc8d8SChris Lattner (*pos)->RefreshStateAfterStop (); 29030fdc8d8SChris Lattner } 29130fdc8d8SChris Lattner 29230fdc8d8SChris Lattner void 29330fdc8d8SChris Lattner ThreadList::DiscardThreadPlans () 29430fdc8d8SChris Lattner { 29530fdc8d8SChris Lattner // You don't need to update the thread list here, because only threads 29630fdc8d8SChris Lattner // that you currently know about have any thread plans. 29730fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 29830fdc8d8SChris Lattner 29930fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 30030fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 30130fdc8d8SChris Lattner (*pos)->DiscardThreadPlans (true); 30230fdc8d8SChris Lattner 30330fdc8d8SChris Lattner } 30430fdc8d8SChris Lattner 30530fdc8d8SChris Lattner bool 30630fdc8d8SChris Lattner ThreadList::WillResume () 30730fdc8d8SChris Lattner { 30830fdc8d8SChris Lattner // Run through the threads and perform their momentary actions. 30930fdc8d8SChris Lattner // But we only do this for threads that are running, user suspended 31030fdc8d8SChris Lattner // threads stay where they are. 31130fdc8d8SChris Lattner bool success = true; 31230fdc8d8SChris Lattner 31330fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 31430fdc8d8SChris Lattner m_process->UpdateThreadListIfNeeded(); 31530fdc8d8SChris Lattner 31630fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 31730fdc8d8SChris Lattner 31830fdc8d8SChris Lattner // Give all the threads a last chance to set up their state before we 31930fdc8d8SChris Lattner // negotiate who is actually going to get a chance to run... 32030fdc8d8SChris Lattner 32130fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 32230fdc8d8SChris Lattner (*pos)->SetupForResume (); 32330fdc8d8SChris Lattner 32430fdc8d8SChris Lattner // Now go through the threads and see if any thread wants to run just itself. 32530fdc8d8SChris Lattner // if so then pick one and run it. 32630fdc8d8SChris Lattner ThreadList run_me_only_list (m_process); 32730fdc8d8SChris Lattner 32830fdc8d8SChris Lattner run_me_only_list.SetStopID(m_process->GetStopID()); 32930fdc8d8SChris Lattner 33030fdc8d8SChris Lattner ThreadSP immediate_thread_sp; 33130fdc8d8SChris Lattner bool run_only_current_thread = false; 33230fdc8d8SChris Lattner 33330fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 33430fdc8d8SChris Lattner { 33530fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 33630fdc8d8SChris Lattner if (thread_sp->GetCurrentPlan()->IsImmediate()) 33730fdc8d8SChris Lattner { 33830fdc8d8SChris Lattner // We first do all the immediate plans, so if we find one, set 33930fdc8d8SChris Lattner // immediate_thread_sp and break out, and we'll pick it up first thing 34030fdc8d8SChris Lattner // when we're negotiating which threads get to run. 34130fdc8d8SChris Lattner immediate_thread_sp = thread_sp; 34230fdc8d8SChris Lattner break; 34330fdc8d8SChris Lattner } 34430fdc8d8SChris Lattner else if (thread_sp->GetResumeState() != eStateSuspended && 34530fdc8d8SChris Lattner thread_sp->GetCurrentPlan()->StopOthers()) 34630fdc8d8SChris Lattner { 34730fdc8d8SChris Lattner // You can't say "stop others" and also want yourself to be suspended. 34830fdc8d8SChris Lattner assert (thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); 34930fdc8d8SChris Lattner 35030fdc8d8SChris Lattner if (thread_sp == GetCurrentThread()) 35130fdc8d8SChris Lattner { 35230fdc8d8SChris Lattner run_only_current_thread = true; 35330fdc8d8SChris Lattner run_me_only_list.Clear(); 35430fdc8d8SChris Lattner run_me_only_list.AddThread (thread_sp); 35530fdc8d8SChris Lattner break; 35630fdc8d8SChris Lattner } 35730fdc8d8SChris Lattner 35830fdc8d8SChris Lattner run_me_only_list.AddThread (thread_sp); 35930fdc8d8SChris Lattner } 36030fdc8d8SChris Lattner 36130fdc8d8SChris Lattner } 36230fdc8d8SChris Lattner 36330fdc8d8SChris Lattner if (immediate_thread_sp) 36430fdc8d8SChris Lattner { 36530fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 36630fdc8d8SChris Lattner { 36730fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 36830fdc8d8SChris Lattner if (thread_sp.get() == immediate_thread_sp.get()) 36930fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 37030fdc8d8SChris Lattner else 37130fdc8d8SChris Lattner thread_sp->WillResume (eStateSuspended); 37230fdc8d8SChris Lattner } 37330fdc8d8SChris Lattner } 37430fdc8d8SChris Lattner else if (run_me_only_list.GetSize (false) == 0) 37530fdc8d8SChris Lattner { 37630fdc8d8SChris Lattner // Everybody runs as they wish: 37730fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 37830fdc8d8SChris Lattner { 37930fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 38030fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 38130fdc8d8SChris Lattner } 38230fdc8d8SChris Lattner } 38330fdc8d8SChris Lattner else 38430fdc8d8SChris Lattner { 38530fdc8d8SChris Lattner ThreadSP thread_to_run; 38630fdc8d8SChris Lattner 38730fdc8d8SChris Lattner if (run_only_current_thread) 38830fdc8d8SChris Lattner { 38930fdc8d8SChris Lattner thread_to_run = GetCurrentThread(); 39030fdc8d8SChris Lattner } 39130fdc8d8SChris Lattner else if (run_me_only_list.GetSize (false) == 1) 39230fdc8d8SChris Lattner { 39330fdc8d8SChris Lattner thread_to_run = run_me_only_list.GetThreadAtIndex (0); 39430fdc8d8SChris Lattner } 39530fdc8d8SChris Lattner else 39630fdc8d8SChris Lattner { 39730fdc8d8SChris Lattner int random_thread = (int) 39830fdc8d8SChris Lattner ((run_me_only_list.GetSize (false) * (double) rand ()) / (RAND_MAX + 1.0)); 39930fdc8d8SChris Lattner thread_to_run = run_me_only_list.GetThreadAtIndex (random_thread); 40030fdc8d8SChris Lattner } 40130fdc8d8SChris Lattner 40230fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 40330fdc8d8SChris Lattner { 40430fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 40530fdc8d8SChris Lattner if (thread_sp == thread_to_run) 40630fdc8d8SChris Lattner thread_sp->WillResume(thread_sp->GetCurrentPlan()->RunState()); 40730fdc8d8SChris Lattner else 40830fdc8d8SChris Lattner thread_sp->WillResume (eStateSuspended); 40930fdc8d8SChris Lattner } 41030fdc8d8SChris Lattner } 41130fdc8d8SChris Lattner 41230fdc8d8SChris Lattner return success; 41330fdc8d8SChris Lattner } 41430fdc8d8SChris Lattner 41530fdc8d8SChris Lattner void 41630fdc8d8SChris Lattner ThreadList::DidResume () 41730fdc8d8SChris Lattner { 41830fdc8d8SChris Lattner collection::iterator pos, end = m_threads.end(); 41930fdc8d8SChris Lattner for (pos = m_threads.begin(); pos != end; ++pos) 42030fdc8d8SChris Lattner { 42130fdc8d8SChris Lattner // Don't clear out threads that aren't going to get a chance to run, rather 42230fdc8d8SChris Lattner // leave their state for the next time around. 42330fdc8d8SChris Lattner ThreadSP thread_sp(*pos); 42430fdc8d8SChris Lattner if (thread_sp->GetResumeState() != eStateSuspended) 42530fdc8d8SChris Lattner thread_sp->DidResume (); 42630fdc8d8SChris Lattner } 42730fdc8d8SChris Lattner } 42830fdc8d8SChris Lattner 42930fdc8d8SChris Lattner ThreadSP 43030fdc8d8SChris Lattner ThreadList::GetCurrentThread () 43130fdc8d8SChris Lattner { 43230fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 43330fdc8d8SChris Lattner return FindThreadByID(m_current_tid); 43430fdc8d8SChris Lattner } 43530fdc8d8SChris Lattner 43630fdc8d8SChris Lattner bool 43730fdc8d8SChris Lattner ThreadList::SetCurrentThreadByID (lldb::tid_t tid) 43830fdc8d8SChris Lattner { 43930fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 44030fdc8d8SChris Lattner if (FindThreadByID(tid).get()) 44130fdc8d8SChris Lattner m_current_tid = tid; 44230fdc8d8SChris Lattner else 44330fdc8d8SChris Lattner m_current_tid = LLDB_INVALID_THREAD_ID; 44430fdc8d8SChris Lattner 44530fdc8d8SChris Lattner return m_current_tid != LLDB_INVALID_THREAD_ID; 44630fdc8d8SChris Lattner } 44730fdc8d8SChris Lattner 44830fdc8d8SChris Lattner bool 44930fdc8d8SChris Lattner ThreadList::SetCurrentThreadByIndexID (uint32_t index_id) 45030fdc8d8SChris Lattner { 45130fdc8d8SChris Lattner Mutex::Locker locker(m_threads_mutex); 45230fdc8d8SChris Lattner ThreadSP thread_sp (FindThreadByIndexID(index_id)); 45330fdc8d8SChris Lattner if (thread_sp.get()) 45430fdc8d8SChris Lattner m_current_tid = thread_sp->GetID(); 45530fdc8d8SChris Lattner else 45630fdc8d8SChris Lattner m_current_tid = LLDB_INVALID_THREAD_ID; 45730fdc8d8SChris Lattner 45830fdc8d8SChris Lattner return m_current_tid != LLDB_INVALID_THREAD_ID; 45930fdc8d8SChris Lattner } 46030fdc8d8SChris Lattner 461