1 //===-- NativeProcessProtocol.cpp -------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "lldb/Host/common/NativeProcessProtocol.h"
11 
12 #include "lldb/Core/ArchSpec.h"
13 #include "lldb/Core/ModuleSpec.h"
14 #include "lldb/Core/State.h"
15 #include "lldb/Host/Host.h"
16 #include "lldb/Host/common/NativeRegisterContext.h"
17 #include "lldb/Host/common/NativeThreadProtocol.h"
18 #include "lldb/Host/common/SoftwareBreakpoint.h"
19 #include "lldb/Symbol/ObjectFile.h"
20 #include "lldb/Target/Process.h"
21 #include "lldb/Utility/LLDBAssert.h"
22 #include "lldb/Utility/Log.h"
23 #include "lldb/lldb-enumerations.h"
24 
25 using namespace lldb;
26 using namespace lldb_private;
27 
28 // -----------------------------------------------------------------------------
29 // NativeProcessProtocol Members
30 // -----------------------------------------------------------------------------
31 
32 NativeProcessProtocol::NativeProcessProtocol(lldb::pid_t pid)
33     : m_pid(pid), m_threads(), m_current_thread_id(LLDB_INVALID_THREAD_ID),
34       m_threads_mutex(), m_state(lldb::eStateInvalid), m_state_mutex(),
35       m_exit_type(eExitTypeInvalid), m_exit_status(0), m_exit_description(),
36       m_delegates_mutex(), m_delegates(), m_breakpoint_list(),
37       m_watchpoint_list(), m_terminal_fd(-1), m_stop_id(0) {}
38 
39 lldb_private::Status NativeProcessProtocol::Interrupt() {
40   Status error;
41 #if !defined(SIGSTOP)
42   error.SetErrorString("local host does not support signaling");
43   return error;
44 #else
45   return Signal(SIGSTOP);
46 #endif
47 }
48 
49 Status NativeProcessProtocol::IgnoreSignals(llvm::ArrayRef<int> signals) {
50   m_signals_to_ignore.clear();
51   m_signals_to_ignore.insert(signals.begin(), signals.end());
52   return Status();
53 }
54 
55 lldb_private::Status
56 NativeProcessProtocol::GetMemoryRegionInfo(lldb::addr_t load_addr,
57                                            MemoryRegionInfo &range_info) {
58   // Default: not implemented.
59   return Status("not implemented");
60 }
61 
62 bool NativeProcessProtocol::GetExitStatus(ExitType *exit_type, int *status,
63                                           std::string &exit_description) {
64   if (m_state == lldb::eStateExited) {
65     *exit_type = m_exit_type;
66     *status = m_exit_status;
67     exit_description = m_exit_description;
68     return true;
69   }
70 
71   *status = 0;
72   return false;
73 }
74 
75 bool NativeProcessProtocol::SetExitStatus(ExitType exit_type, int status,
76                                           const char *exit_description,
77                                           bool bNotifyStateChange) {
78   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
79   if (log)
80     log->Printf("NativeProcessProtocol::%s(%d, %d, %s, %s) called",
81                 __FUNCTION__, exit_type, status,
82                 exit_description ? exit_description : "nullptr",
83                 bNotifyStateChange ? "true" : "false");
84 
85   // Exit status already set
86   if (m_state == lldb::eStateExited) {
87     if (log)
88       log->Printf("NativeProcessProtocol::%s exit status already set to %d, "
89                   "ignoring new set to %d",
90                   __FUNCTION__, m_exit_status, status);
91     return false;
92   }
93 
94   m_state = lldb::eStateExited;
95 
96   m_exit_type = exit_type;
97   m_exit_status = status;
98   if (exit_description && exit_description[0])
99     m_exit_description = exit_description;
100   else
101     m_exit_description.clear();
102 
103   if (bNotifyStateChange)
104     SynchronouslyNotifyProcessStateChanged(lldb::eStateExited);
105 
106   return true;
107 }
108 
109 NativeThreadProtocolSP NativeProcessProtocol::GetThreadAtIndex(uint32_t idx) {
110   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
111   if (idx < m_threads.size())
112     return m_threads[idx];
113   return NativeThreadProtocolSP();
114 }
115 
116 NativeThreadProtocolSP
117 NativeProcessProtocol::GetThreadByIDUnlocked(lldb::tid_t tid) {
118   for (auto thread_sp : m_threads) {
119     if (thread_sp->GetID() == tid)
120       return thread_sp;
121   }
122   return NativeThreadProtocolSP();
123 }
124 
125 NativeThreadProtocolSP NativeProcessProtocol::GetThreadByID(lldb::tid_t tid) {
126   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
127   return GetThreadByIDUnlocked(tid);
128 }
129 
130 bool NativeProcessProtocol::IsAlive() const {
131   return m_state != eStateDetached && m_state != eStateExited &&
132          m_state != eStateInvalid && m_state != eStateUnloaded;
133 }
134 
135 bool NativeProcessProtocol::GetByteOrder(lldb::ByteOrder &byte_order) const {
136   ArchSpec process_arch;
137   if (!GetArchitecture(process_arch))
138     return false;
139   byte_order = process_arch.GetByteOrder();
140   return true;
141 }
142 
143 const NativeWatchpointList::WatchpointMap &
144 NativeProcessProtocol::GetWatchpointMap() const {
145   return m_watchpoint_list.GetWatchpointMap();
146 }
147 
148 llvm::Optional<std::pair<uint32_t, uint32_t>>
149 NativeProcessProtocol::GetHardwareDebugSupportInfo() const {
150   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
151 
152   // get any thread
153   NativeThreadProtocolSP thread_sp(
154       const_cast<NativeProcessProtocol *>(this)->GetThreadAtIndex(0));
155   if (!thread_sp) {
156     if (log)
157       log->Warning("NativeProcessProtocol::%s (): failed to find a thread to "
158                    "grab a NativeRegisterContext!",
159                    __FUNCTION__);
160     return llvm::None;
161   }
162 
163   NativeRegisterContextSP reg_ctx_sp(thread_sp->GetRegisterContext());
164   if (!reg_ctx_sp) {
165     if (log)
166       log->Warning("NativeProcessProtocol::%s (): failed to get a "
167                    "RegisterContextNativeProcess from the first thread!",
168                    __FUNCTION__);
169     return llvm::None;
170   }
171 
172   return std::make_pair(reg_ctx_sp->NumSupportedHardwareBreakpoints(),
173                         reg_ctx_sp->NumSupportedHardwareWatchpoints());
174 }
175 
176 Status NativeProcessProtocol::SetWatchpoint(lldb::addr_t addr, size_t size,
177                                             uint32_t watch_flags,
178                                             bool hardware) {
179   // This default implementation assumes setting the watchpoint for
180   // the process will require setting the watchpoint for each of the
181   // threads.  Furthermore, it will track watchpoints set for the
182   // process and will add them to each thread that is attached to
183   // via the (FIXME implement) OnThreadAttached () method.
184 
185   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
186 
187   // Update the thread list
188   UpdateThreads();
189 
190   // Keep track of the threads we successfully set the watchpoint
191   // for.  If one of the thread watchpoint setting operations fails,
192   // back off and remove the watchpoint for all the threads that
193   // were successfully set so we get back to a consistent state.
194   std::vector<NativeThreadProtocolSP> watchpoint_established_threads;
195 
196   // Tell each thread to set a watchpoint.  In the event that
197   // hardware watchpoints are requested but the SetWatchpoint fails,
198   // try to set a software watchpoint as a fallback.  It's
199   // conceivable that if there are more threads than hardware
200   // watchpoints available, some of the threads will fail to set
201   // hardware watchpoints while software ones may be available.
202   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
203   for (auto thread_sp : m_threads) {
204     assert(thread_sp && "thread list should not have a NULL thread!");
205     if (!thread_sp)
206       continue;
207 
208     Status thread_error =
209         thread_sp->SetWatchpoint(addr, size, watch_flags, hardware);
210     if (thread_error.Fail() && hardware) {
211       // Try software watchpoints since we failed on hardware watchpoint setting
212       // and we may have just run out of hardware watchpoints.
213       thread_error = thread_sp->SetWatchpoint(addr, size, watch_flags, false);
214       if (thread_error.Success()) {
215         if (log)
216           log->Warning(
217               "hardware watchpoint requested but software watchpoint set");
218       }
219     }
220 
221     if (thread_error.Success()) {
222       // Remember that we set this watchpoint successfully in
223       // case we need to clear it later.
224       watchpoint_established_threads.push_back(thread_sp);
225     } else {
226       // Unset the watchpoint for each thread we successfully
227       // set so that we get back to a consistent state of "not
228       // set" for the watchpoint.
229       for (auto unwatch_thread_sp : watchpoint_established_threads) {
230         Status remove_error = unwatch_thread_sp->RemoveWatchpoint(addr);
231         if (remove_error.Fail() && log) {
232           log->Warning("NativeProcessProtocol::%s (): RemoveWatchpoint failed "
233                        "for pid=%" PRIu64 ", tid=%" PRIu64 ": %s",
234                        __FUNCTION__, GetID(), unwatch_thread_sp->GetID(),
235                        remove_error.AsCString());
236         }
237       }
238 
239       return thread_error;
240     }
241   }
242   return m_watchpoint_list.Add(addr, size, watch_flags, hardware);
243 }
244 
245 Status NativeProcessProtocol::RemoveWatchpoint(lldb::addr_t addr) {
246   // Update the thread list
247   UpdateThreads();
248 
249   Status overall_error;
250 
251   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
252   for (auto thread_sp : m_threads) {
253     assert(thread_sp && "thread list should not have a NULL thread!");
254     if (!thread_sp)
255       continue;
256 
257     const Status thread_error = thread_sp->RemoveWatchpoint(addr);
258     if (thread_error.Fail()) {
259       // Keep track of the first thread error if any threads
260       // fail. We want to try to remove the watchpoint from
261       // every thread, though, even if one or more have errors.
262       if (!overall_error.Fail())
263         overall_error = thread_error;
264     }
265   }
266   const Status error = m_watchpoint_list.Remove(addr);
267   return overall_error.Fail() ? overall_error : error;
268 }
269 
270 const HardwareBreakpointMap &
271 NativeProcessProtocol::GetHardwareBreakpointMap() const {
272   return m_hw_breakpoints_map;
273 }
274 
275 Status NativeProcessProtocol::SetHardwareBreakpoint(lldb::addr_t addr,
276                                                     size_t size) {
277   // This default implementation assumes setting a hardware breakpoint for
278   // this process will require setting same hardware breakpoint for each
279   // of its existing threads. New thread will do the same once created.
280   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
281 
282   // Update the thread list
283   UpdateThreads();
284 
285   // Exit here if target does not have required hardware breakpoint capability.
286   auto hw_debug_cap = GetHardwareDebugSupportInfo();
287 
288   if (hw_debug_cap == llvm::None || hw_debug_cap->first == 0 ||
289       hw_debug_cap->first <= m_hw_breakpoints_map.size())
290     return Status("Target does not have required no of hardware breakpoints");
291 
292   // Vector below stores all thread pointer for which we have we successfully
293   // set this hardware breakpoint. If any of the current process threads fails
294   // to set this hardware breakpoint then roll back and remove this breakpoint
295   // for all the threads that had already set it successfully.
296   std::vector<NativeThreadProtocolSP> breakpoint_established_threads;
297 
298   // Request to set a hardware breakpoint for each of current process threads.
299   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
300   for (auto thread_sp : m_threads) {
301     assert(thread_sp && "thread list should not have a NULL thread!");
302     if (!thread_sp)
303       continue;
304 
305     Status thread_error = thread_sp->SetHardwareBreakpoint(addr, size);
306     if (thread_error.Success()) {
307       // Remember that we set this breakpoint successfully in
308       // case we need to clear it later.
309       breakpoint_established_threads.push_back(thread_sp);
310     } else {
311       // Unset the breakpoint for each thread we successfully
312       // set so that we get back to a consistent state of "not
313       // set" for this hardware breakpoint.
314       for (auto rollback_thread_sp : breakpoint_established_threads) {
315         Status remove_error =
316             rollback_thread_sp->RemoveHardwareBreakpoint(addr);
317         if (remove_error.Fail() && log) {
318           log->Warning("NativeProcessProtocol::%s (): RemoveHardwareBreakpoint"
319                        " failed for pid=%" PRIu64 ", tid=%" PRIu64 ": %s",
320                        __FUNCTION__, GetID(), rollback_thread_sp->GetID(),
321                        remove_error.AsCString());
322         }
323       }
324 
325       return thread_error;
326     }
327   }
328 
329   // Register new hardware breakpoint into hardware breakpoints map of current
330   // process.
331   m_hw_breakpoints_map[addr] = {addr, size};
332 
333   return Status();
334 }
335 
336 Status NativeProcessProtocol::RemoveHardwareBreakpoint(lldb::addr_t addr) {
337   // Update the thread list
338   UpdateThreads();
339 
340   Status error;
341 
342   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
343   for (auto thread_sp : m_threads) {
344     assert(thread_sp && "thread list should not have a NULL thread!");
345     if (!thread_sp)
346       continue;
347 
348     error = thread_sp->RemoveHardwareBreakpoint(addr);
349   }
350 
351   // Also remove from hardware breakpoint map of current process.
352   m_hw_breakpoints_map.erase(addr);
353 
354   return error;
355 }
356 
357 bool NativeProcessProtocol::RegisterNativeDelegate(
358     NativeDelegate &native_delegate) {
359   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
360   if (std::find(m_delegates.begin(), m_delegates.end(), &native_delegate) !=
361       m_delegates.end())
362     return false;
363 
364   m_delegates.push_back(&native_delegate);
365   native_delegate.InitializeDelegate(this);
366   return true;
367 }
368 
369 bool NativeProcessProtocol::UnregisterNativeDelegate(
370     NativeDelegate &native_delegate) {
371   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
372 
373   const auto initial_size = m_delegates.size();
374   m_delegates.erase(
375       remove(m_delegates.begin(), m_delegates.end(), &native_delegate),
376       m_delegates.end());
377 
378   // We removed the delegate if the count of delegates shrank after
379   // removing all copies of the given native_delegate from the vector.
380   return m_delegates.size() < initial_size;
381 }
382 
383 void NativeProcessProtocol::SynchronouslyNotifyProcessStateChanged(
384     lldb::StateType state) {
385   Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
386 
387   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
388   for (auto native_delegate : m_delegates)
389     native_delegate->ProcessStateChanged(this, state);
390 
391   if (log) {
392     if (!m_delegates.empty()) {
393       log->Printf("NativeProcessProtocol::%s: sent state notification [%s] "
394                   "from process %" PRIu64,
395                   __FUNCTION__, lldb_private::StateAsCString(state), GetID());
396     } else {
397       log->Printf("NativeProcessProtocol::%s: would send state notification "
398                   "[%s] from process %" PRIu64 ", but no delegates",
399                   __FUNCTION__, lldb_private::StateAsCString(state), GetID());
400     }
401   }
402 }
403 
404 void NativeProcessProtocol::NotifyDidExec() {
405   Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
406   if (log)
407     log->Printf("NativeProcessProtocol::%s - preparing to call delegates",
408                 __FUNCTION__);
409 
410   {
411     std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
412     for (auto native_delegate : m_delegates)
413       native_delegate->DidExec(this);
414   }
415 }
416 
417 Status NativeProcessProtocol::SetSoftwareBreakpoint(lldb::addr_t addr,
418                                                     uint32_t size_hint) {
419   Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_BREAKPOINTS));
420   if (log)
421     log->Printf("NativeProcessProtocol::%s addr = 0x%" PRIx64, __FUNCTION__,
422                 addr);
423 
424   return m_breakpoint_list.AddRef(
425       addr, size_hint, false,
426       [this](lldb::addr_t addr, size_t size_hint, bool /* hardware */,
427              NativeBreakpointSP &breakpoint_sp) -> Status {
428         return SoftwareBreakpoint::CreateSoftwareBreakpoint(
429             *this, addr, size_hint, breakpoint_sp);
430       });
431 }
432 
433 Status NativeProcessProtocol::RemoveBreakpoint(lldb::addr_t addr,
434                                                bool hardware) {
435   if (hardware)
436     return RemoveHardwareBreakpoint(addr);
437   else
438     return m_breakpoint_list.DecRef(addr);
439 }
440 
441 Status NativeProcessProtocol::EnableBreakpoint(lldb::addr_t addr) {
442   return m_breakpoint_list.EnableBreakpoint(addr);
443 }
444 
445 Status NativeProcessProtocol::DisableBreakpoint(lldb::addr_t addr) {
446   return m_breakpoint_list.DisableBreakpoint(addr);
447 }
448 
449 lldb::StateType NativeProcessProtocol::GetState() const {
450   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
451   return m_state;
452 }
453 
454 void NativeProcessProtocol::SetState(lldb::StateType state,
455                                      bool notify_delegates) {
456   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
457 
458   if (state == m_state)
459     return;
460 
461   m_state = state;
462 
463   if (StateIsStoppedState(state, false)) {
464     ++m_stop_id;
465 
466     // Give process a chance to do any stop id bump processing, such as
467     // clearing cached data that is invalidated each time the process runs.
468     // Note if/when we support some threads running, we'll end up needing
469     // to manage this per thread and per process.
470     DoStopIDBumped(m_stop_id);
471   }
472 
473   // Optionally notify delegates of the state change.
474   if (notify_delegates)
475     SynchronouslyNotifyProcessStateChanged(state);
476 }
477 
478 uint32_t NativeProcessProtocol::GetStopID() const {
479   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
480   return m_stop_id;
481 }
482 
483 void NativeProcessProtocol::DoStopIDBumped(uint32_t /* newBumpId */) {
484   // Default implementation does nothing.
485 }
486 
487 Status NativeProcessProtocol::ResolveProcessArchitecture(lldb::pid_t pid,
488                                                          ArchSpec &arch) {
489   // Grab process info for the running process.
490   ProcessInstanceInfo process_info;
491   if (!Host::GetProcessInfo(pid, process_info))
492     return Status("failed to get process info");
493 
494   // Resolve the executable module.
495   ModuleSpecList module_specs;
496   if (!ObjectFile::GetModuleSpecifications(process_info.GetExecutableFile(), 0,
497                                            0, module_specs))
498     return Status("failed to get module specifications");
499   lldbassert(module_specs.GetSize() == 1);
500 
501   arch = module_specs.GetModuleSpecRefAtIndex(0).GetArchitecture();
502   if (arch.IsValid())
503     return Status();
504   else
505     return Status(
506         "failed to retrieve a valid architecture from the exe module");
507 }
508 
509 #if !defined(__linux__) && !defined(__NetBSD__)
510 // These need to be implemented to support lldb-gdb-server on a given platform.
511 // Stubs are
512 // provided to make the rest of the code link on non-supported platforms.
513 
514 Status NativeProcessProtocol::Launch(ProcessLaunchInfo &launch_info,
515                                      NativeDelegate &native_delegate,
516                                      MainLoop &mainloop,
517                                      NativeProcessProtocolSP &process_sp) {
518   llvm_unreachable("Platform has no NativeProcessProtocol support");
519 }
520 
521 Status NativeProcessProtocol::Attach(lldb::pid_t pid,
522                                      NativeDelegate &native_delegate,
523                                      MainLoop &mainloop,
524                                      NativeProcessProtocolSP &process_sp) {
525   llvm_unreachable("Platform has no NativeProcessProtocol support");
526 }
527 
528 #endif
529