1 //===-- NativeProcessProtocol.cpp -------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "lldb/Host/common/NativeProcessProtocol.h"
11 #include "lldb/Core/ModuleSpec.h"
12 #include "lldb/Core/State.h"
13 #include "lldb/Host/Host.h"
14 #include "lldb/Host/common/NativeRegisterContext.h"
15 #include "lldb/Host/common/NativeThreadProtocol.h"
16 #include "lldb/Host/common/SoftwareBreakpoint.h"
17 #include "lldb/Symbol/ObjectFile.h"
18 #include "lldb/Target/Process.h"
19 #include "lldb/Utility/LLDBAssert.h"
20 #include "lldb/Utility/Log.h"
21 #include "lldb/lldb-enumerations.h"
22 
23 using namespace lldb;
24 using namespace lldb_private;
25 
26 // -----------------------------------------------------------------------------
27 // NativeProcessProtocol Members
28 // -----------------------------------------------------------------------------
29 
30 NativeProcessProtocol::NativeProcessProtocol(lldb::pid_t pid, int terminal_fd,
31                                              NativeDelegate &delegate)
32     : m_pid(pid), m_terminal_fd(terminal_fd) {
33   bool registered = RegisterNativeDelegate(delegate);
34   assert(registered);
35   (void)registered;
36 }
37 
38 lldb_private::Status NativeProcessProtocol::Interrupt() {
39   Status error;
40 #if !defined(SIGSTOP)
41   error.SetErrorString("local host does not support signaling");
42   return error;
43 #else
44   return Signal(SIGSTOP);
45 #endif
46 }
47 
48 Status NativeProcessProtocol::IgnoreSignals(llvm::ArrayRef<int> signals) {
49   m_signals_to_ignore.clear();
50   m_signals_to_ignore.insert(signals.begin(), signals.end());
51   return Status();
52 }
53 
54 lldb_private::Status
55 NativeProcessProtocol::GetMemoryRegionInfo(lldb::addr_t load_addr,
56                                            MemoryRegionInfo &range_info) {
57   // Default: not implemented.
58   return Status("not implemented");
59 }
60 
61 llvm::Optional<WaitStatus> NativeProcessProtocol::GetExitStatus() {
62   if (m_state == lldb::eStateExited)
63     return m_exit_status;
64 
65   return llvm::None;
66 }
67 
68 bool NativeProcessProtocol::SetExitStatus(WaitStatus status,
69                                           bool bNotifyStateChange) {
70   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
71   LLDB_LOG(log, "status = {0}, notify = {1}", status, bNotifyStateChange);
72 
73   // Exit status already set
74   if (m_state == lldb::eStateExited) {
75     if (m_exit_status)
76       LLDB_LOG(log, "exit status already set to {0}", *m_exit_status);
77     else
78       LLDB_LOG(log, "state is exited, but status not set");
79     return false;
80   }
81 
82   m_state = lldb::eStateExited;
83   m_exit_status = status;
84 
85   if (bNotifyStateChange)
86     SynchronouslyNotifyProcessStateChanged(lldb::eStateExited);
87 
88   return true;
89 }
90 
91 NativeThreadProtocol *NativeProcessProtocol::GetThreadAtIndex(uint32_t idx) {
92   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
93   if (idx < m_threads.size())
94     return m_threads[idx].get();
95   return nullptr;
96 }
97 
98 NativeThreadProtocol *
99 NativeProcessProtocol::GetThreadByIDUnlocked(lldb::tid_t tid) {
100   for (const auto &thread : m_threads) {
101     if (thread->GetID() == tid)
102       return thread.get();
103   }
104   return nullptr;
105 }
106 
107 NativeThreadProtocol *NativeProcessProtocol::GetThreadByID(lldb::tid_t tid) {
108   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
109   return GetThreadByIDUnlocked(tid);
110 }
111 
112 bool NativeProcessProtocol::IsAlive() const {
113   return m_state != eStateDetached && m_state != eStateExited &&
114          m_state != eStateInvalid && m_state != eStateUnloaded;
115 }
116 
117 const NativeWatchpointList::WatchpointMap &
118 NativeProcessProtocol::GetWatchpointMap() const {
119   return m_watchpoint_list.GetWatchpointMap();
120 }
121 
122 llvm::Optional<std::pair<uint32_t, uint32_t>>
123 NativeProcessProtocol::GetHardwareDebugSupportInfo() const {
124   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
125 
126   // get any thread
127   NativeThreadProtocol *thread(
128       const_cast<NativeProcessProtocol *>(this)->GetThreadAtIndex(0));
129   if (!thread) {
130     LLDB_LOG(log, "failed to find a thread to grab a NativeRegisterContext!");
131     return llvm::None;
132   }
133 
134   NativeRegisterContext &reg_ctx = thread->GetRegisterContext();
135   return std::make_pair(reg_ctx.NumSupportedHardwareBreakpoints(),
136                         reg_ctx.NumSupportedHardwareWatchpoints());
137 }
138 
139 Status NativeProcessProtocol::SetWatchpoint(lldb::addr_t addr, size_t size,
140                                             uint32_t watch_flags,
141                                             bool hardware) {
142   // This default implementation assumes setting the watchpoint for
143   // the process will require setting the watchpoint for each of the
144   // threads.  Furthermore, it will track watchpoints set for the
145   // process and will add them to each thread that is attached to
146   // via the (FIXME implement) OnThreadAttached () method.
147 
148   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
149 
150   // Update the thread list
151   UpdateThreads();
152 
153   // Keep track of the threads we successfully set the watchpoint
154   // for.  If one of the thread watchpoint setting operations fails,
155   // back off and remove the watchpoint for all the threads that
156   // were successfully set so we get back to a consistent state.
157   std::vector<NativeThreadProtocol *> watchpoint_established_threads;
158 
159   // Tell each thread to set a watchpoint.  In the event that
160   // hardware watchpoints are requested but the SetWatchpoint fails,
161   // try to set a software watchpoint as a fallback.  It's
162   // conceivable that if there are more threads than hardware
163   // watchpoints available, some of the threads will fail to set
164   // hardware watchpoints while software ones may be available.
165   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
166   for (const auto &thread : m_threads) {
167     assert(thread && "thread list should not have a NULL thread!");
168 
169     Status thread_error =
170         thread->SetWatchpoint(addr, size, watch_flags, hardware);
171     if (thread_error.Fail() && hardware) {
172       // Try software watchpoints since we failed on hardware watchpoint setting
173       // and we may have just run out of hardware watchpoints.
174       thread_error = thread->SetWatchpoint(addr, size, watch_flags, false);
175       if (thread_error.Success())
176         LLDB_LOG(log,
177                  "hardware watchpoint requested but software watchpoint set");
178     }
179 
180     if (thread_error.Success()) {
181       // Remember that we set this watchpoint successfully in
182       // case we need to clear it later.
183       watchpoint_established_threads.push_back(thread.get());
184     } else {
185       // Unset the watchpoint for each thread we successfully
186       // set so that we get back to a consistent state of "not
187       // set" for the watchpoint.
188       for (auto unwatch_thread_sp : watchpoint_established_threads) {
189         Status remove_error = unwatch_thread_sp->RemoveWatchpoint(addr);
190         if (remove_error.Fail())
191           LLDB_LOG(log, "RemoveWatchpoint failed for pid={0}, tid={1}: {2}",
192                    GetID(), unwatch_thread_sp->GetID(), remove_error);
193       }
194 
195       return thread_error;
196     }
197   }
198   return m_watchpoint_list.Add(addr, size, watch_flags, hardware);
199 }
200 
201 Status NativeProcessProtocol::RemoveWatchpoint(lldb::addr_t addr) {
202   // Update the thread list
203   UpdateThreads();
204 
205   Status overall_error;
206 
207   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
208   for (const auto &thread : m_threads) {
209     assert(thread && "thread list should not have a NULL thread!");
210 
211     const Status thread_error = thread->RemoveWatchpoint(addr);
212     if (thread_error.Fail()) {
213       // Keep track of the first thread error if any threads
214       // fail. We want to try to remove the watchpoint from
215       // every thread, though, even if one or more have errors.
216       if (!overall_error.Fail())
217         overall_error = thread_error;
218     }
219   }
220   const Status error = m_watchpoint_list.Remove(addr);
221   return overall_error.Fail() ? overall_error : error;
222 }
223 
224 const HardwareBreakpointMap &
225 NativeProcessProtocol::GetHardwareBreakpointMap() const {
226   return m_hw_breakpoints_map;
227 }
228 
229 Status NativeProcessProtocol::SetHardwareBreakpoint(lldb::addr_t addr,
230                                                     size_t size) {
231   // This default implementation assumes setting a hardware breakpoint for
232   // this process will require setting same hardware breakpoint for each
233   // of its existing threads. New thread will do the same once created.
234   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
235 
236   // Update the thread list
237   UpdateThreads();
238 
239   // Exit here if target does not have required hardware breakpoint capability.
240   auto hw_debug_cap = GetHardwareDebugSupportInfo();
241 
242   if (hw_debug_cap == llvm::None || hw_debug_cap->first == 0 ||
243       hw_debug_cap->first <= m_hw_breakpoints_map.size())
244     return Status("Target does not have required no of hardware breakpoints");
245 
246   // Vector below stores all thread pointer for which we have we successfully
247   // set this hardware breakpoint. If any of the current process threads fails
248   // to set this hardware breakpoint then roll back and remove this breakpoint
249   // for all the threads that had already set it successfully.
250   std::vector<NativeThreadProtocol *> breakpoint_established_threads;
251 
252   // Request to set a hardware breakpoint for each of current process threads.
253   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
254   for (const auto &thread : m_threads) {
255     assert(thread && "thread list should not have a NULL thread!");
256 
257     Status thread_error = thread->SetHardwareBreakpoint(addr, size);
258     if (thread_error.Success()) {
259       // Remember that we set this breakpoint successfully in
260       // case we need to clear it later.
261       breakpoint_established_threads.push_back(thread.get());
262     } else {
263       // Unset the breakpoint for each thread we successfully
264       // set so that we get back to a consistent state of "not
265       // set" for this hardware breakpoint.
266       for (auto rollback_thread_sp : breakpoint_established_threads) {
267         Status remove_error =
268             rollback_thread_sp->RemoveHardwareBreakpoint(addr);
269         if (remove_error.Fail())
270           LLDB_LOG(log,
271                    "RemoveHardwareBreakpoint failed for pid={0}, tid={1}: {2}",
272                    GetID(), rollback_thread_sp->GetID(), remove_error);
273       }
274 
275       return thread_error;
276     }
277   }
278 
279   // Register new hardware breakpoint into hardware breakpoints map of current
280   // process.
281   m_hw_breakpoints_map[addr] = {addr, size};
282 
283   return Status();
284 }
285 
286 Status NativeProcessProtocol::RemoveHardwareBreakpoint(lldb::addr_t addr) {
287   // Update the thread list
288   UpdateThreads();
289 
290   Status error;
291 
292   std::lock_guard<std::recursive_mutex> guard(m_threads_mutex);
293   for (const auto &thread : m_threads) {
294     assert(thread && "thread list should not have a NULL thread!");
295     error = thread->RemoveHardwareBreakpoint(addr);
296   }
297 
298   // Also remove from hardware breakpoint map of current process.
299   m_hw_breakpoints_map.erase(addr);
300 
301   return error;
302 }
303 
304 bool NativeProcessProtocol::RegisterNativeDelegate(
305     NativeDelegate &native_delegate) {
306   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
307   if (std::find(m_delegates.begin(), m_delegates.end(), &native_delegate) !=
308       m_delegates.end())
309     return false;
310 
311   m_delegates.push_back(&native_delegate);
312   native_delegate.InitializeDelegate(this);
313   return true;
314 }
315 
316 bool NativeProcessProtocol::UnregisterNativeDelegate(
317     NativeDelegate &native_delegate) {
318   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
319 
320   const auto initial_size = m_delegates.size();
321   m_delegates.erase(
322       remove(m_delegates.begin(), m_delegates.end(), &native_delegate),
323       m_delegates.end());
324 
325   // We removed the delegate if the count of delegates shrank after
326   // removing all copies of the given native_delegate from the vector.
327   return m_delegates.size() < initial_size;
328 }
329 
330 void NativeProcessProtocol::SynchronouslyNotifyProcessStateChanged(
331     lldb::StateType state) {
332   Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
333 
334   std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
335   for (auto native_delegate : m_delegates)
336     native_delegate->ProcessStateChanged(this, state);
337 
338   if (log) {
339     if (!m_delegates.empty()) {
340       log->Printf("NativeProcessProtocol::%s: sent state notification [%s] "
341                   "from process %" PRIu64,
342                   __FUNCTION__, lldb_private::StateAsCString(state), GetID());
343     } else {
344       log->Printf("NativeProcessProtocol::%s: would send state notification "
345                   "[%s] from process %" PRIu64 ", but no delegates",
346                   __FUNCTION__, lldb_private::StateAsCString(state), GetID());
347     }
348   }
349 }
350 
351 void NativeProcessProtocol::NotifyDidExec() {
352   Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_PROCESS));
353   if (log)
354     log->Printf("NativeProcessProtocol::%s - preparing to call delegates",
355                 __FUNCTION__);
356 
357   {
358     std::lock_guard<std::recursive_mutex> guard(m_delegates_mutex);
359     for (auto native_delegate : m_delegates)
360       native_delegate->DidExec(this);
361   }
362 }
363 
364 Status NativeProcessProtocol::SetSoftwareBreakpoint(lldb::addr_t addr,
365                                                     uint32_t size_hint) {
366   Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_BREAKPOINTS));
367   if (log)
368     log->Printf("NativeProcessProtocol::%s addr = 0x%" PRIx64, __FUNCTION__,
369                 addr);
370 
371   return m_breakpoint_list.AddRef(
372       addr, size_hint, false,
373       [this](lldb::addr_t addr, size_t size_hint, bool /* hardware */,
374              NativeBreakpointSP &breakpoint_sp) -> Status {
375         return SoftwareBreakpoint::CreateSoftwareBreakpoint(
376             *this, addr, size_hint, breakpoint_sp);
377       });
378 }
379 
380 Status NativeProcessProtocol::RemoveBreakpoint(lldb::addr_t addr,
381                                                bool hardware) {
382   if (hardware)
383     return RemoveHardwareBreakpoint(addr);
384   else
385     return m_breakpoint_list.DecRef(addr);
386 }
387 
388 Status NativeProcessProtocol::EnableBreakpoint(lldb::addr_t addr) {
389   return m_breakpoint_list.EnableBreakpoint(addr);
390 }
391 
392 Status NativeProcessProtocol::DisableBreakpoint(lldb::addr_t addr) {
393   return m_breakpoint_list.DisableBreakpoint(addr);
394 }
395 
396 lldb::StateType NativeProcessProtocol::GetState() const {
397   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
398   return m_state;
399 }
400 
401 void NativeProcessProtocol::SetState(lldb::StateType state,
402                                      bool notify_delegates) {
403   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
404 
405   if (state == m_state)
406     return;
407 
408   m_state = state;
409 
410   if (StateIsStoppedState(state, false)) {
411     ++m_stop_id;
412 
413     // Give process a chance to do any stop id bump processing, such as
414     // clearing cached data that is invalidated each time the process runs.
415     // Note if/when we support some threads running, we'll end up needing
416     // to manage this per thread and per process.
417     DoStopIDBumped(m_stop_id);
418   }
419 
420   // Optionally notify delegates of the state change.
421   if (notify_delegates)
422     SynchronouslyNotifyProcessStateChanged(state);
423 }
424 
425 uint32_t NativeProcessProtocol::GetStopID() const {
426   std::lock_guard<std::recursive_mutex> guard(m_state_mutex);
427   return m_stop_id;
428 }
429 
430 void NativeProcessProtocol::DoStopIDBumped(uint32_t /* newBumpId */) {
431   // Default implementation does nothing.
432 }
433 
434 Status NativeProcessProtocol::ResolveProcessArchitecture(lldb::pid_t pid,
435                                                          ArchSpec &arch) {
436   // Grab process info for the running process.
437   ProcessInstanceInfo process_info;
438   if (!Host::GetProcessInfo(pid, process_info))
439     return Status("failed to get process info");
440 
441   // Resolve the executable module.
442   ModuleSpecList module_specs;
443   if (!ObjectFile::GetModuleSpecifications(process_info.GetExecutableFile(), 0,
444                                            0, module_specs))
445     return Status("failed to get module specifications");
446   lldbassert(module_specs.GetSize() == 1);
447 
448   arch = module_specs.GetModuleSpecRefAtIndex(0).GetArchitecture();
449   if (arch.IsValid())
450     return Status();
451   else
452     return Status(
453         "failed to retrieve a valid architecture from the exe module");
454 }
455 
456 NativeProcessProtocol::Factory::~Factory() = default;
457