1 //===-- NativeProcessProtocol.cpp -------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "lldb/Host/common/NativeProcessProtocol.h" 11 12 #include "lldb/Core/ArchSpec.h" 13 #include "lldb/Core/ModuleSpec.h" 14 #include "lldb/Core/State.h" 15 #include "lldb/Host/Host.h" 16 #include "lldb/Host/common/NativeRegisterContext.h" 17 #include "lldb/Host/common/NativeThreadProtocol.h" 18 #include "lldb/Host/common/SoftwareBreakpoint.h" 19 #include "lldb/Symbol/ObjectFile.h" 20 #include "lldb/Target/Process.h" 21 #include "lldb/Utility/LLDBAssert.h" 22 #include "lldb/Utility/Log.h" 23 #include "lldb/lldb-enumerations.h" 24 25 using namespace lldb; 26 using namespace lldb_private; 27 28 // ----------------------------------------------------------------------------- 29 // NativeProcessProtocol Members 30 // ----------------------------------------------------------------------------- 31 32 NativeProcessProtocol::NativeProcessProtocol(lldb::pid_t pid) 33 : m_pid(pid), m_threads(), m_current_thread_id(LLDB_INVALID_THREAD_ID), 34 m_threads_mutex(), m_state(lldb::eStateInvalid), m_state_mutex(), 35 m_delegates_mutex(), m_delegates(), m_breakpoint_list(), 36 m_watchpoint_list(), m_terminal_fd(-1), m_stop_id(0) {} 37 38 lldb_private::Status NativeProcessProtocol::Interrupt() { 39 Status error; 40 #if !defined(SIGSTOP) 41 error.SetErrorString("local host does not support signaling"); 42 return error; 43 #else 44 return Signal(SIGSTOP); 45 #endif 46 } 47 48 Status NativeProcessProtocol::IgnoreSignals(llvm::ArrayRef<int> signals) { 49 m_signals_to_ignore.clear(); 50 m_signals_to_ignore.insert(signals.begin(), signals.end()); 51 return Status(); 52 } 53 54 lldb_private::Status 55 NativeProcessProtocol::GetMemoryRegionInfo(lldb::addr_t load_addr, 56 MemoryRegionInfo &range_info) { 57 // Default: not implemented. 58 return Status("not implemented"); 59 } 60 61 llvm::Optional<WaitStatus> NativeProcessProtocol::GetExitStatus() { 62 if (m_state == lldb::eStateExited) 63 return m_exit_status; 64 65 return llvm::None; 66 } 67 68 bool NativeProcessProtocol::SetExitStatus(WaitStatus status, 69 bool bNotifyStateChange) { 70 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 71 LLDB_LOG(log, "status = {0}, notify = {1}", status, bNotifyStateChange); 72 73 // Exit status already set 74 if (m_state == lldb::eStateExited) { 75 if (m_exit_status) 76 LLDB_LOG(log, "exit status already set to {0}", *m_exit_status); 77 else 78 LLDB_LOG(log, "state is exited, but status not set"); 79 return false; 80 } 81 82 m_state = lldb::eStateExited; 83 m_exit_status = status; 84 85 if (bNotifyStateChange) 86 SynchronouslyNotifyProcessStateChanged(lldb::eStateExited); 87 88 return true; 89 } 90 91 NativeThreadProtocolSP NativeProcessProtocol::GetThreadAtIndex(uint32_t idx) { 92 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 93 if (idx < m_threads.size()) 94 return m_threads[idx]; 95 return NativeThreadProtocolSP(); 96 } 97 98 NativeThreadProtocolSP 99 NativeProcessProtocol::GetThreadByIDUnlocked(lldb::tid_t tid) { 100 for (auto thread_sp : m_threads) { 101 if (thread_sp->GetID() == tid) 102 return thread_sp; 103 } 104 return NativeThreadProtocolSP(); 105 } 106 107 NativeThreadProtocolSP NativeProcessProtocol::GetThreadByID(lldb::tid_t tid) { 108 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 109 return GetThreadByIDUnlocked(tid); 110 } 111 112 bool NativeProcessProtocol::IsAlive() const { 113 return m_state != eStateDetached && m_state != eStateExited && 114 m_state != eStateInvalid && m_state != eStateUnloaded; 115 } 116 117 bool NativeProcessProtocol::GetByteOrder(lldb::ByteOrder &byte_order) const { 118 ArchSpec process_arch; 119 if (!GetArchitecture(process_arch)) 120 return false; 121 byte_order = process_arch.GetByteOrder(); 122 return true; 123 } 124 125 const NativeWatchpointList::WatchpointMap & 126 NativeProcessProtocol::GetWatchpointMap() const { 127 return m_watchpoint_list.GetWatchpointMap(); 128 } 129 130 llvm::Optional<std::pair<uint32_t, uint32_t>> 131 NativeProcessProtocol::GetHardwareDebugSupportInfo() const { 132 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 133 134 // get any thread 135 NativeThreadProtocolSP thread_sp( 136 const_cast<NativeProcessProtocol *>(this)->GetThreadAtIndex(0)); 137 if (!thread_sp) { 138 if (log) 139 log->Warning("NativeProcessProtocol::%s (): failed to find a thread to " 140 "grab a NativeRegisterContext!", 141 __FUNCTION__); 142 return llvm::None; 143 } 144 145 NativeRegisterContextSP reg_ctx_sp(thread_sp->GetRegisterContext()); 146 if (!reg_ctx_sp) { 147 if (log) 148 log->Warning("NativeProcessProtocol::%s (): failed to get a " 149 "RegisterContextNativeProcess from the first thread!", 150 __FUNCTION__); 151 return llvm::None; 152 } 153 154 return std::make_pair(reg_ctx_sp->NumSupportedHardwareBreakpoints(), 155 reg_ctx_sp->NumSupportedHardwareWatchpoints()); 156 } 157 158 Status NativeProcessProtocol::SetWatchpoint(lldb::addr_t addr, size_t size, 159 uint32_t watch_flags, 160 bool hardware) { 161 // This default implementation assumes setting the watchpoint for 162 // the process will require setting the watchpoint for each of the 163 // threads. Furthermore, it will track watchpoints set for the 164 // process and will add them to each thread that is attached to 165 // via the (FIXME implement) OnThreadAttached () method. 166 167 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 168 169 // Update the thread list 170 UpdateThreads(); 171 172 // Keep track of the threads we successfully set the watchpoint 173 // for. If one of the thread watchpoint setting operations fails, 174 // back off and remove the watchpoint for all the threads that 175 // were successfully set so we get back to a consistent state. 176 std::vector<NativeThreadProtocolSP> watchpoint_established_threads; 177 178 // Tell each thread to set a watchpoint. In the event that 179 // hardware watchpoints are requested but the SetWatchpoint fails, 180 // try to set a software watchpoint as a fallback. It's 181 // conceivable that if there are more threads than hardware 182 // watchpoints available, some of the threads will fail to set 183 // hardware watchpoints while software ones may be available. 184 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 185 for (auto thread_sp : m_threads) { 186 assert(thread_sp && "thread list should not have a NULL thread!"); 187 if (!thread_sp) 188 continue; 189 190 Status thread_error = 191 thread_sp->SetWatchpoint(addr, size, watch_flags, hardware); 192 if (thread_error.Fail() && hardware) { 193 // Try software watchpoints since we failed on hardware watchpoint setting 194 // and we may have just run out of hardware watchpoints. 195 thread_error = thread_sp->SetWatchpoint(addr, size, watch_flags, false); 196 if (thread_error.Success()) { 197 if (log) 198 log->Warning( 199 "hardware watchpoint requested but software watchpoint set"); 200 } 201 } 202 203 if (thread_error.Success()) { 204 // Remember that we set this watchpoint successfully in 205 // case we need to clear it later. 206 watchpoint_established_threads.push_back(thread_sp); 207 } else { 208 // Unset the watchpoint for each thread we successfully 209 // set so that we get back to a consistent state of "not 210 // set" for the watchpoint. 211 for (auto unwatch_thread_sp : watchpoint_established_threads) { 212 Status remove_error = unwatch_thread_sp->RemoveWatchpoint(addr); 213 if (remove_error.Fail() && log) { 214 log->Warning("NativeProcessProtocol::%s (): RemoveWatchpoint failed " 215 "for pid=%" PRIu64 ", tid=%" PRIu64 ": %s", 216 __FUNCTION__, GetID(), unwatch_thread_sp->GetID(), 217 remove_error.AsCString()); 218 } 219 } 220 221 return thread_error; 222 } 223 } 224 return m_watchpoint_list.Add(addr, size, watch_flags, hardware); 225 } 226 227 Status NativeProcessProtocol::RemoveWatchpoint(lldb::addr_t addr) { 228 // Update the thread list 229 UpdateThreads(); 230 231 Status overall_error; 232 233 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 234 for (auto thread_sp : m_threads) { 235 assert(thread_sp && "thread list should not have a NULL thread!"); 236 if (!thread_sp) 237 continue; 238 239 const Status thread_error = thread_sp->RemoveWatchpoint(addr); 240 if (thread_error.Fail()) { 241 // Keep track of the first thread error if any threads 242 // fail. We want to try to remove the watchpoint from 243 // every thread, though, even if one or more have errors. 244 if (!overall_error.Fail()) 245 overall_error = thread_error; 246 } 247 } 248 const Status error = m_watchpoint_list.Remove(addr); 249 return overall_error.Fail() ? overall_error : error; 250 } 251 252 const HardwareBreakpointMap & 253 NativeProcessProtocol::GetHardwareBreakpointMap() const { 254 return m_hw_breakpoints_map; 255 } 256 257 Status NativeProcessProtocol::SetHardwareBreakpoint(lldb::addr_t addr, 258 size_t size) { 259 // This default implementation assumes setting a hardware breakpoint for 260 // this process will require setting same hardware breakpoint for each 261 // of its existing threads. New thread will do the same once created. 262 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 263 264 // Update the thread list 265 UpdateThreads(); 266 267 // Exit here if target does not have required hardware breakpoint capability. 268 auto hw_debug_cap = GetHardwareDebugSupportInfo(); 269 270 if (hw_debug_cap == llvm::None || hw_debug_cap->first == 0 || 271 hw_debug_cap->first <= m_hw_breakpoints_map.size()) 272 return Status("Target does not have required no of hardware breakpoints"); 273 274 // Vector below stores all thread pointer for which we have we successfully 275 // set this hardware breakpoint. If any of the current process threads fails 276 // to set this hardware breakpoint then roll back and remove this breakpoint 277 // for all the threads that had already set it successfully. 278 std::vector<NativeThreadProtocolSP> breakpoint_established_threads; 279 280 // Request to set a hardware breakpoint for each of current process threads. 281 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 282 for (auto thread_sp : m_threads) { 283 assert(thread_sp && "thread list should not have a NULL thread!"); 284 if (!thread_sp) 285 continue; 286 287 Status thread_error = thread_sp->SetHardwareBreakpoint(addr, size); 288 if (thread_error.Success()) { 289 // Remember that we set this breakpoint successfully in 290 // case we need to clear it later. 291 breakpoint_established_threads.push_back(thread_sp); 292 } else { 293 // Unset the breakpoint for each thread we successfully 294 // set so that we get back to a consistent state of "not 295 // set" for this hardware breakpoint. 296 for (auto rollback_thread_sp : breakpoint_established_threads) { 297 Status remove_error = 298 rollback_thread_sp->RemoveHardwareBreakpoint(addr); 299 if (remove_error.Fail() && log) { 300 log->Warning("NativeProcessProtocol::%s (): RemoveHardwareBreakpoint" 301 " failed for pid=%" PRIu64 ", tid=%" PRIu64 ": %s", 302 __FUNCTION__, GetID(), rollback_thread_sp->GetID(), 303 remove_error.AsCString()); 304 } 305 } 306 307 return thread_error; 308 } 309 } 310 311 // Register new hardware breakpoint into hardware breakpoints map of current 312 // process. 313 m_hw_breakpoints_map[addr] = {addr, size}; 314 315 return Status(); 316 } 317 318 Status NativeProcessProtocol::RemoveHardwareBreakpoint(lldb::addr_t addr) { 319 // Update the thread list 320 UpdateThreads(); 321 322 Status error; 323 324 std::lock_guard<std::recursive_mutex> guard(m_threads_mutex); 325 for (auto thread_sp : m_threads) { 326 assert(thread_sp && "thread list should not have a NULL thread!"); 327 if (!thread_sp) 328 continue; 329 330 error = thread_sp->RemoveHardwareBreakpoint(addr); 331 } 332 333 // Also remove from hardware breakpoint map of current process. 334 m_hw_breakpoints_map.erase(addr); 335 336 return error; 337 } 338 339 bool NativeProcessProtocol::RegisterNativeDelegate( 340 NativeDelegate &native_delegate) { 341 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 342 if (std::find(m_delegates.begin(), m_delegates.end(), &native_delegate) != 343 m_delegates.end()) 344 return false; 345 346 m_delegates.push_back(&native_delegate); 347 native_delegate.InitializeDelegate(this); 348 return true; 349 } 350 351 bool NativeProcessProtocol::UnregisterNativeDelegate( 352 NativeDelegate &native_delegate) { 353 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 354 355 const auto initial_size = m_delegates.size(); 356 m_delegates.erase( 357 remove(m_delegates.begin(), m_delegates.end(), &native_delegate), 358 m_delegates.end()); 359 360 // We removed the delegate if the count of delegates shrank after 361 // removing all copies of the given native_delegate from the vector. 362 return m_delegates.size() < initial_size; 363 } 364 365 void NativeProcessProtocol::SynchronouslyNotifyProcessStateChanged( 366 lldb::StateType state) { 367 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 368 369 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 370 for (auto native_delegate : m_delegates) 371 native_delegate->ProcessStateChanged(this, state); 372 373 if (log) { 374 if (!m_delegates.empty()) { 375 log->Printf("NativeProcessProtocol::%s: sent state notification [%s] " 376 "from process %" PRIu64, 377 __FUNCTION__, lldb_private::StateAsCString(state), GetID()); 378 } else { 379 log->Printf("NativeProcessProtocol::%s: would send state notification " 380 "[%s] from process %" PRIu64 ", but no delegates", 381 __FUNCTION__, lldb_private::StateAsCString(state), GetID()); 382 } 383 } 384 } 385 386 void NativeProcessProtocol::NotifyDidExec() { 387 Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS)); 388 if (log) 389 log->Printf("NativeProcessProtocol::%s - preparing to call delegates", 390 __FUNCTION__); 391 392 { 393 std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex); 394 for (auto native_delegate : m_delegates) 395 native_delegate->DidExec(this); 396 } 397 } 398 399 Status NativeProcessProtocol::SetSoftwareBreakpoint(lldb::addr_t addr, 400 uint32_t size_hint) { 401 Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_BREAKPOINTS)); 402 if (log) 403 log->Printf("NativeProcessProtocol::%s addr = 0x%" PRIx64, __FUNCTION__, 404 addr); 405 406 return m_breakpoint_list.AddRef( 407 addr, size_hint, false, 408 [this](lldb::addr_t addr, size_t size_hint, bool /* hardware */, 409 NativeBreakpointSP &breakpoint_sp) -> Status { 410 return SoftwareBreakpoint::CreateSoftwareBreakpoint( 411 *this, addr, size_hint, breakpoint_sp); 412 }); 413 } 414 415 Status NativeProcessProtocol::RemoveBreakpoint(lldb::addr_t addr, 416 bool hardware) { 417 if (hardware) 418 return RemoveHardwareBreakpoint(addr); 419 else 420 return m_breakpoint_list.DecRef(addr); 421 } 422 423 Status NativeProcessProtocol::EnableBreakpoint(lldb::addr_t addr) { 424 return m_breakpoint_list.EnableBreakpoint(addr); 425 } 426 427 Status NativeProcessProtocol::DisableBreakpoint(lldb::addr_t addr) { 428 return m_breakpoint_list.DisableBreakpoint(addr); 429 } 430 431 lldb::StateType NativeProcessProtocol::GetState() const { 432 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 433 return m_state; 434 } 435 436 void NativeProcessProtocol::SetState(lldb::StateType state, 437 bool notify_delegates) { 438 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 439 440 if (state == m_state) 441 return; 442 443 m_state = state; 444 445 if (StateIsStoppedState(state, false)) { 446 ++m_stop_id; 447 448 // Give process a chance to do any stop id bump processing, such as 449 // clearing cached data that is invalidated each time the process runs. 450 // Note if/when we support some threads running, we'll end up needing 451 // to manage this per thread and per process. 452 DoStopIDBumped(m_stop_id); 453 } 454 455 // Optionally notify delegates of the state change. 456 if (notify_delegates) 457 SynchronouslyNotifyProcessStateChanged(state); 458 } 459 460 uint32_t NativeProcessProtocol::GetStopID() const { 461 std::lock_guard<std::recursive_mutex> guard(m_state_mutex); 462 return m_stop_id; 463 } 464 465 void NativeProcessProtocol::DoStopIDBumped(uint32_t /* newBumpId */) { 466 // Default implementation does nothing. 467 } 468 469 Status NativeProcessProtocol::ResolveProcessArchitecture(lldb::pid_t pid, 470 ArchSpec &arch) { 471 // Grab process info for the running process. 472 ProcessInstanceInfo process_info; 473 if (!Host::GetProcessInfo(pid, process_info)) 474 return Status("failed to get process info"); 475 476 // Resolve the executable module. 477 ModuleSpecList module_specs; 478 if (!ObjectFile::GetModuleSpecifications(process_info.GetExecutableFile(), 0, 479 0, module_specs)) 480 return Status("failed to get module specifications"); 481 lldbassert(module_specs.GetSize() == 1); 482 483 arch = module_specs.GetModuleSpecRefAtIndex(0).GetArchitecture(); 484 if (arch.IsValid()) 485 return Status(); 486 else 487 return Status( 488 "failed to retrieve a valid architecture from the exe module"); 489 } 490 491 #if !defined(__linux__) && !defined(__NetBSD__) 492 // These need to be implemented to support lldb-gdb-server on a given platform. 493 // Stubs are 494 // provided to make the rest of the code link on non-supported platforms. 495 496 Status NativeProcessProtocol::Launch(ProcessLaunchInfo &launch_info, 497 NativeDelegate &native_delegate, 498 MainLoop &mainloop, 499 NativeProcessProtocolSP &process_sp) { 500 llvm_unreachable("Platform has no NativeProcessProtocol support"); 501 } 502 503 Status NativeProcessProtocol::Attach(lldb::pid_t pid, 504 NativeDelegate &native_delegate, 505 MainLoop &mainloop, 506 NativeProcessProtocolSP &process_sp) { 507 llvm_unreachable("Platform has no NativeProcessProtocol support"); 508 } 509 510 #endif 511