1 //===-- NativeProcessProtocol.cpp -------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "lldb/Host/common/NativeProcessProtocol.h" 11 #include "lldb/Core/State.h" 12 #include "lldb/Host/Host.h" 13 #include "lldb/Host/common/NativeRegisterContext.h" 14 #include "lldb/Host/common/NativeThreadProtocol.h" 15 #include "lldb/Host/common/SoftwareBreakpoint.h" 16 #include "lldb/Target/Process.h" 17 #include "lldb/Utility/LLDBAssert.h" 18 #include "lldb/Utility/Log.h" 19 #include "lldb/lldb-enumerations.h" 20 21 using namespace lldb; 22 using namespace lldb_private; 23 24 // ----------------------------------------------------------------------------- 25 // NativeProcessProtocol Members 26 // ----------------------------------------------------------------------------- 27 28 NativeProcessProtocol::NativeProcessProtocol(lldb::pid_t pid, int terminal_fd, 29 NativeDelegate &delegate) 30 : m_pid(pid), m_terminal_fd(terminal_fd) { 31 bool registered = RegisterNativeDelegate(delegate); 32 assert(registered); 33 (void)registered; 34 } 35 36 lldb_private::Status NativeProcessProtocol::Interrupt() { 37 Status error; 38 #if !defined(SIGSTOP) 39 error.SetErrorString("local host does not support signaling"); 40 return error; 41 #else 42 return Signal(SIGSTOP); 43 #endif 44 } 45 46 Status NativeProcessProtocol::IgnoreSignals(llvm::ArrayRef<int> signals) { 47 m_signals_to_ignore.clear(); 48 m_signals_to_ignore.insert(signals.begin(), signals.end()); 49 return Status(); 50 } 51 52 lldb_private::Status 53 NativeProcessProtocol::GetMemoryRegionInfo(lldb::addr_t load_addr, 54 MemoryRegionInfo &range_info) { 55 // Default: not implemented. 56 return Status("not implemented"); 57 } 58 59 llvm::Optional<WaitStatus> NativeProcessProtocol::GetExitStatus() { 60 if (m_state == lldb::eStateExited) 61 return m_exit_status; 62 63 return llvm::None; 64 } 65 66 bool NativeProcessProtocol::SetExitStatus(WaitStatus status, 67 bool bNotifyStateChange) { 68 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 69 LLDB_LOG(log, "status = {0}, notify = {1}", status, bNotifyStateChange); 70 71 // Exit status already set 72 if (m_state == lldb::eStateExited) { 73 if (m_exit_status) 74 LLDB_LOG(log, "exit status already set to {0}", *m_exit_status); 75 else 76 LLDB_LOG(log, "state is exited, but status not set"); 77 return false; 78 } 79 80 m_state = lldb::eStateExited; 81 m_exit_status = status; 82 83 if (bNotifyStateChange) 84 SynchronouslyNotifyProcessStateChanged(lldb::eStateExited); 85 86 return true; 87 } 88 89 NativeThreadProtocol *NativeProcessProtocol::GetThreadAtIndex(uint32_t idx) { 90 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 91 if (idx < m_threads.size()) 92 return m_threads[idx].get(); 93 return nullptr; 94 } 95 96 NativeThreadProtocol * 97 NativeProcessProtocol::GetThreadByIDUnlocked(lldb::tid_t tid) { 98 for (const auto &thread : m_threads) { 99 if (thread->GetID() == tid) 100 return thread.get(); 101 } 102 return nullptr; 103 } 104 105 NativeThreadProtocol *NativeProcessProtocol::GetThreadByID(lldb::tid_t tid) { 106 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 107 return GetThreadByIDUnlocked(tid); 108 } 109 110 bool NativeProcessProtocol::IsAlive() const { 111 return m_state != eStateDetached && m_state != eStateExited && 112 m_state != eStateInvalid && m_state != eStateUnloaded; 113 } 114 115 const NativeWatchpointList::WatchpointMap & 116 NativeProcessProtocol::GetWatchpointMap() const { 117 return m_watchpoint_list.GetWatchpointMap(); 118 } 119 120 llvm::Optional<std::pair<uint32_t, uint32_t>> 121 NativeProcessProtocol::GetHardwareDebugSupportInfo() const { 122 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 123 124 // get any thread 125 NativeThreadProtocol *thread( 126 const_cast<NativeProcessProtocol *>(this)->GetThreadAtIndex(0)); 127 if (!thread) { 128 LLDB_LOG(log, "failed to find a thread to grab a NativeRegisterContext!"); 129 return llvm::None; 130 } 131 132 NativeRegisterContext ®_ctx = thread->GetRegisterContext(); 133 return std::make_pair(reg_ctx.NumSupportedHardwareBreakpoints(), 134 reg_ctx.NumSupportedHardwareWatchpoints()); 135 } 136 137 Status NativeProcessProtocol::SetWatchpoint(lldb::addr_t addr, size_t size, 138 uint32_t watch_flags, 139 bool hardware) { 140 // This default implementation assumes setting the watchpoint for the process 141 // will require setting the watchpoint for each of the threads. Furthermore, 142 // it will track watchpoints set for the process and will add them to each 143 // thread that is attached to via the (FIXME implement) OnThreadAttached () 144 // method. 145 146 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 147 148 // Update the thread list 149 UpdateThreads(); 150 151 // Keep track of the threads we successfully set the watchpoint for. If one 152 // of the thread watchpoint setting operations fails, back off and remove the 153 // watchpoint for all the threads that were successfully set so we get back 154 // to a consistent state. 155 std::vector<NativeThreadProtocol *> watchpoint_established_threads; 156 157 // Tell each thread to set a watchpoint. In the event that hardware 158 // watchpoints are requested but the SetWatchpoint fails, try to set a 159 // software watchpoint as a fallback. It's conceivable that if there are 160 // more threads than hardware watchpoints available, some of the threads will 161 // fail to set hardware watchpoints while software ones may be available. 162 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 163 for (const auto &thread : m_threads) { 164 assert(thread && "thread list should not have a NULL thread!"); 165 166 Status thread_error = 167 thread->SetWatchpoint(addr, size, watch_flags, hardware); 168 if (thread_error.Fail() && hardware) { 169 // Try software watchpoints since we failed on hardware watchpoint 170 // setting and we may have just run out of hardware watchpoints. 171 thread_error = thread->SetWatchpoint(addr, size, watch_flags, false); 172 if (thread_error.Success()) 173 LLDB_LOG(log, 174 "hardware watchpoint requested but software watchpoint set"); 175 } 176 177 if (thread_error.Success()) { 178 // Remember that we set this watchpoint successfully in case we need to 179 // clear it later. 180 watchpoint_established_threads.push_back(thread.get()); 181 } else { 182 // Unset the watchpoint for each thread we successfully set so that we 183 // get back to a consistent state of "not set" for the watchpoint. 184 for (auto unwatch_thread_sp : watchpoint_established_threads) { 185 Status remove_error = unwatch_thread_sp->RemoveWatchpoint(addr); 186 if (remove_error.Fail()) 187 LLDB_LOG(log, "RemoveWatchpoint failed for pid={0}, tid={1}: {2}", 188 GetID(), unwatch_thread_sp->GetID(), remove_error); 189 } 190 191 return thread_error; 192 } 193 } 194 return m_watchpoint_list.Add(addr, size, watch_flags, hardware); 195 } 196 197 Status NativeProcessProtocol::RemoveWatchpoint(lldb::addr_t addr) { 198 // Update the thread list 199 UpdateThreads(); 200 201 Status overall_error; 202 203 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 204 for (const auto &thread : m_threads) { 205 assert(thread && "thread list should not have a NULL thread!"); 206 207 const Status thread_error = thread->RemoveWatchpoint(addr); 208 if (thread_error.Fail()) { 209 // Keep track of the first thread error if any threads fail. We want to 210 // try to remove the watchpoint from every thread, though, even if one or 211 // more have errors. 212 if (!overall_error.Fail()) 213 overall_error = thread_error; 214 } 215 } 216 const Status error = m_watchpoint_list.Remove(addr); 217 return overall_error.Fail() ? overall_error : error; 218 } 219 220 const HardwareBreakpointMap & 221 NativeProcessProtocol::GetHardwareBreakpointMap() const { 222 return m_hw_breakpoints_map; 223 } 224 225 Status NativeProcessProtocol::SetHardwareBreakpoint(lldb::addr_t addr, 226 size_t size) { 227 // This default implementation assumes setting a hardware breakpoint for this 228 // process will require setting same hardware breakpoint for each of its 229 // existing threads. New thread will do the same once created. 230 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 231 232 // Update the thread list 233 UpdateThreads(); 234 235 // Exit here if target does not have required hardware breakpoint capability. 236 auto hw_debug_cap = GetHardwareDebugSupportInfo(); 237 238 if (hw_debug_cap == llvm::None || hw_debug_cap->first == 0 || 239 hw_debug_cap->first <= m_hw_breakpoints_map.size()) 240 return Status("Target does not have required no of hardware breakpoints"); 241 242 // Vector below stores all thread pointer for which we have we successfully 243 // set this hardware breakpoint. If any of the current process threads fails 244 // to set this hardware breakpoint then roll back and remove this breakpoint 245 // for all the threads that had already set it successfully. 246 std::vector<NativeThreadProtocol *> breakpoint_established_threads; 247 248 // Request to set a hardware breakpoint for each of current process threads. 249 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 250 for (const auto &thread : m_threads) { 251 assert(thread && "thread list should not have a NULL thread!"); 252 253 Status thread_error = thread->SetHardwareBreakpoint(addr, size); 254 if (thread_error.Success()) { 255 // Remember that we set this breakpoint successfully in case we need to 256 // clear it later. 257 breakpoint_established_threads.push_back(thread.get()); 258 } else { 259 // Unset the breakpoint for each thread we successfully set so that we 260 // get back to a consistent state of "not set" for this hardware 261 // breakpoint. 262 for (auto rollback_thread_sp : breakpoint_established_threads) { 263 Status remove_error = 264 rollback_thread_sp->RemoveHardwareBreakpoint(addr); 265 if (remove_error.Fail()) 266 LLDB_LOG(log, 267 "RemoveHardwareBreakpoint failed for pid={0}, tid={1}: {2}", 268 GetID(), rollback_thread_sp->GetID(), remove_error); 269 } 270 271 return thread_error; 272 } 273 } 274 275 // Register new hardware breakpoint into hardware breakpoints map of current 276 // process. 277 m_hw_breakpoints_map[addr] = {addr, size}; 278 279 return Status(); 280 } 281 282 Status NativeProcessProtocol::RemoveHardwareBreakpoint(lldb::addr_t addr) { 283 // Update the thread list 284 UpdateThreads(); 285 286 Status error; 287 288 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 289 for (const auto &thread : m_threads) { 290 assert(thread && "thread list should not have a NULL thread!"); 291 error = thread->RemoveHardwareBreakpoint(addr); 292 } 293 294 // Also remove from hardware breakpoint map of current process. 295 m_hw_breakpoints_map.erase(addr); 296 297 return error; 298 } 299 300 bool NativeProcessProtocol::RegisterNativeDelegate( 301 NativeDelegate &native_delegate) { 302 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 303 if (std::find(m_delegates.begin(), m_delegates.end(), &native_delegate) != 304 m_delegates.end()) 305 return false; 306 307 m_delegates.push_back(&native_delegate); 308 native_delegate.InitializeDelegate(this); 309 return true; 310 } 311 312 bool NativeProcessProtocol::UnregisterNativeDelegate( 313 NativeDelegate &native_delegate) { 314 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 315 316 const auto initial_size = m_delegates.size(); 317 m_delegates.erase( 318 remove(m_delegates.begin(), m_delegates.end(), &native_delegate), 319 m_delegates.end()); 320 321 // We removed the delegate if the count of delegates shrank after removing 322 // all copies of the given native_delegate from the vector. 323 return m_delegates.size() < initial_size; 324 } 325 326 void NativeProcessProtocol::SynchronouslyNotifyProcessStateChanged( 327 lldb::StateType state) { 328 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 329 330 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 331 for (auto native_delegate : m_delegates) 332 native_delegate->ProcessStateChanged(this, state); 333 334 if (log) { 335 if (!m_delegates.empty()) { 336 log->Printf("NativeProcessProtocol::%s: sent state notification [%s] " 337 "from process %" PRIu64, 338 __FUNCTION__, lldb_private::StateAsCString(state), GetID()); 339 } else { 340 log->Printf("NativeProcessProtocol::%s: would send state notification " 341 "[%s] from process %" PRIu64 ", but no delegates", 342 __FUNCTION__, lldb_private::StateAsCString(state), GetID()); 343 } 344 } 345 } 346 347 void NativeProcessProtocol::NotifyDidExec() { 348 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 349 if (log) 350 log->Printf("NativeProcessProtocol::%s - preparing to call delegates", 351 __FUNCTION__); 352 353 { 354 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 355 for (auto native_delegate : m_delegates) 356 native_delegate->DidExec(this); 357 } 358 } 359 360 Status NativeProcessProtocol::SetSoftwareBreakpoint(lldb::addr_t addr, 361 uint32_t size_hint) { 362 Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_BREAKPOINTS)); 363 if (log) 364 log->Printf("NativeProcessProtocol::%s addr = 0x%" PRIx64, __FUNCTION__, 365 addr); 366 367 return m_breakpoint_list.AddRef( 368 addr, size_hint, false, 369 [this](lldb::addr_t addr, size_t size_hint, bool /* hardware */, 370 NativeBreakpointSP &breakpoint_sp) -> Status { 371 return SoftwareBreakpoint::CreateSoftwareBreakpoint( 372 *this, addr, size_hint, breakpoint_sp); 373 }); 374 } 375 376 Status NativeProcessProtocol::RemoveBreakpoint(lldb::addr_t addr, 377 bool hardware) { 378 if (hardware) 379 return RemoveHardwareBreakpoint(addr); 380 else 381 return m_breakpoint_list.DecRef(addr); 382 } 383 384 Status NativeProcessProtocol::EnableBreakpoint(lldb::addr_t addr) { 385 return m_breakpoint_list.EnableBreakpoint(addr); 386 } 387 388 Status NativeProcessProtocol::DisableBreakpoint(lldb::addr_t addr) { 389 return m_breakpoint_list.DisableBreakpoint(addr); 390 } 391 392 lldb::StateType NativeProcessProtocol::GetState() const { 393 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 394 return m_state; 395 } 396 397 void NativeProcessProtocol::SetState(lldb::StateType state, 398 bool notify_delegates) { 399 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 400 401 if (state == m_state) 402 return; 403 404 m_state = state; 405 406 if (StateIsStoppedState(state, false)) { 407 ++m_stop_id; 408 409 // Give process a chance to do any stop id bump processing, such as 410 // clearing cached data that is invalidated each time the process runs. 411 // Note if/when we support some threads running, we'll end up needing to 412 // manage this per thread and per process. 413 DoStopIDBumped(m_stop_id); 414 } 415 416 // Optionally notify delegates of the state change. 417 if (notify_delegates) 418 SynchronouslyNotifyProcessStateChanged(state); 419 } 420 421 uint32_t NativeProcessProtocol::GetStopID() const { 422 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 423 return m_stop_id; 424 } 425 426 void NativeProcessProtocol::DoStopIDBumped(uint32_t /* newBumpId */) { 427 // Default implementation does nothing. 428 } 429 430 NativeProcessProtocol::Factory::~Factory() = default; 431