1 //===-- StackFrameList.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/SourceManager.h"
13 #include "lldb/Core/StreamFile.h"
14 #include "lldb/Symbol/Block.h"
15 #include "lldb/Symbol/Function.h"
16 #include "lldb/Symbol/Symbol.h"
17 #include "lldb/Target/Process.h"
18 #include "lldb/Target/RegisterContext.h"
19 #include "lldb/Target/StackFrame.h"
20 #include "lldb/Target/StopInfo.h"
21 #include "lldb/Target/Target.h"
22 #include "lldb/Target/Thread.h"
23 #include "lldb/Target/Unwind.h"
24 #include "lldb/Utility/Log.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 
27 #include <memory>
28 
29 //#define DEBUG_STACK_FRAMES 1
30 
31 using namespace lldb;
32 using namespace lldb_private;
33 
34 // StackFrameList constructor
35 StackFrameList::StackFrameList(Thread &thread,
36                                const lldb::StackFrameListSP &prev_frames_sp,
37                                bool show_inline_frames)
38     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
39       m_selected_frame_idx(0), m_concrete_frames_fetched(0),
40       m_current_inlined_depth(UINT32_MAX),
41       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
42       m_show_inlined_frames(show_inline_frames) {
43   if (prev_frames_sp) {
44     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
45     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
46   }
47 }
48 
49 StackFrameList::~StackFrameList() {
50   // Call clear since this takes a lock and clears the stack frame list in case
51   // another thread is currently using this stack frame list
52   Clear();
53 }
54 
55 void StackFrameList::CalculateCurrentInlinedDepth() {
56   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
57   if (cur_inlined_depth == UINT32_MAX) {
58     ResetCurrentInlinedDepth();
59   }
60 }
61 
62 uint32_t StackFrameList::GetCurrentInlinedDepth() {
63   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
64     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
65     if (cur_pc != m_current_inlined_pc) {
66       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
67       m_current_inlined_depth = UINT32_MAX;
68       Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
69       if (log && log->GetVerbose())
70         LLDB_LOGF(
71             log,
72             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
73     }
74     return m_current_inlined_depth;
75   } else {
76     return UINT32_MAX;
77   }
78 }
79 
80 void StackFrameList::ResetCurrentInlinedDepth() {
81   if (!m_show_inlined_frames)
82     return;
83 
84   std::lock_guard<std::recursive_mutex> guard(m_mutex);
85 
86   GetFramesUpTo(0);
87   if (m_frames.empty())
88     return;
89   if (!m_frames[0]->IsInlined()) {
90     m_current_inlined_depth = UINT32_MAX;
91     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
92     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
93     if (log && log->GetVerbose())
94       LLDB_LOGF(
95           log,
96           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
97     return;
98   }
99 
100   // We only need to do something special about inlined blocks when we are
101   // at the beginning of an inlined function:
102   // FIXME: We probably also have to do something special if the PC is at
103   // the END of an inlined function, which coincides with the end of either
104   // its containing function or another inlined function.
105 
106   Block *block_ptr = m_frames[0]->GetFrameBlock();
107   if (!block_ptr)
108     return;
109 
110   Address pc_as_address;
111   lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
112   pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
113   AddressRange containing_range;
114   if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
115       pc_as_address != containing_range.GetBaseAddress())
116     return;
117 
118   // If we got here because of a breakpoint hit, then set the inlined depth
119   // depending on where the breakpoint was set. If we got here because of a
120   // crash, then set the inlined depth to the deepest most block.  Otherwise,
121   // we stopped here naturally as the result of a step, so set ourselves in the
122   // containing frame of the whole set of nested inlines, so the user can then
123   // "virtually" step into the frames one by one, or next over the whole mess.
124   // Note: We don't have to handle being somewhere in the middle of the stack
125   // here, since ResetCurrentInlinedDepth doesn't get called if there is a
126   // valid inlined depth set.
127   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
128   if (!stop_info_sp)
129     return;
130   switch (stop_info_sp->GetStopReason()) {
131   case eStopReasonWatchpoint:
132   case eStopReasonException:
133   case eStopReasonExec:
134   case eStopReasonSignal:
135     // In all these cases we want to stop in the deepest frame.
136     m_current_inlined_pc = curr_pc;
137     m_current_inlined_depth = 0;
138     break;
139   case eStopReasonBreakpoint: {
140     // FIXME: Figure out what this break point is doing, and set the inline
141     // depth appropriately.  Be careful to take into account breakpoints that
142     // implement step over prologue, since that should do the default
143     // calculation. For now, if the breakpoints corresponding to this hit are
144     // all internal, I set the stop location to the top of the inlined stack,
145     // since that will make things like stepping over prologues work right.
146     // But if there are any non-internal breakpoints I do to the bottom of the
147     // stack, since that was the old behavior.
148     uint32_t bp_site_id = stop_info_sp->GetValue();
149     BreakpointSiteSP bp_site_sp(
150         m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
151     bool all_internal = true;
152     if (bp_site_sp) {
153       uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
154       for (uint32_t i = 0; i < num_owners; i++) {
155         Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
156         if (!bp_ref.IsInternal()) {
157           all_internal = false;
158         }
159       }
160     }
161     if (!all_internal) {
162       m_current_inlined_pc = curr_pc;
163       m_current_inlined_depth = 0;
164       break;
165     }
166   }
167     LLVM_FALLTHROUGH;
168   default: {
169     // Otherwise, we should set ourselves at the container of the inlining, so
170     // that the user can descend into them. So first we check whether we have
171     // more than one inlined block sharing this PC:
172     int num_inlined_functions = 0;
173 
174     for (Block *container_ptr = block_ptr->GetInlinedParent();
175          container_ptr != nullptr;
176          container_ptr = container_ptr->GetInlinedParent()) {
177       if (!container_ptr->GetRangeContainingAddress(pc_as_address,
178                                                     containing_range))
179         break;
180       if (pc_as_address != containing_range.GetBaseAddress())
181         break;
182 
183       num_inlined_functions++;
184     }
185     m_current_inlined_pc = curr_pc;
186     m_current_inlined_depth = num_inlined_functions + 1;
187     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
188     if (log && log->GetVerbose())
189       LLDB_LOGF(log,
190                 "ResetCurrentInlinedDepth: setting inlined "
191                 "depth: %d 0x%" PRIx64 ".\n",
192                 m_current_inlined_depth, curr_pc);
193 
194     break;
195   }
196   }
197 }
198 
199 bool StackFrameList::DecrementCurrentInlinedDepth() {
200   if (m_show_inlined_frames) {
201     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
202     if (current_inlined_depth != UINT32_MAX) {
203       if (current_inlined_depth > 0) {
204         m_current_inlined_depth--;
205         return true;
206       }
207     }
208   }
209   return false;
210 }
211 
212 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
213   m_current_inlined_depth = new_depth;
214   if (new_depth == UINT32_MAX)
215     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
216   else
217     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
218 }
219 
220 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
221                                                Unwind &unwinder) {
222   assert(m_thread.IsValid() && "Expected valid thread");
223   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
224 
225   if (end_idx < m_concrete_frames_fetched)
226     return;
227 
228   uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
229   if (num_frames <= end_idx + 1) {
230     // Done unwinding.
231     m_concrete_frames_fetched = UINT32_MAX;
232   }
233 
234   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
235   // which can lazily query the unwinder to create frames.
236   m_frames.resize(num_frames);
237 }
238 
239 /// A sequence of calls that comprise some portion of a backtrace. Each frame
240 /// is represented as a pair of a callee (Function *) and an address within the
241 /// callee.
242 struct CallDescriptor {
243   Function *func;
244   CallEdge::AddrType address_type = CallEdge::AddrType::Call;
245   addr_t address = LLDB_INVALID_ADDRESS;
246 };
247 using CallSequence = std::vector<CallDescriptor>;
248 
249 /// Find the unique path through the call graph from \p begin (with return PC
250 /// \p return_pc) to \p end. On success this path is stored into \p path, and
251 /// on failure \p path is unchanged.
252 static void FindInterveningFrames(Function &begin, Function &end,
253                                   ExecutionContext &exe_ctx, Target &target,
254                                   addr_t return_pc, CallSequence &path,
255                                   ModuleList &images, Log *log) {
256   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
257            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
258 
259   // Find a non-tail calling edge with the correct return PC.
260   if (log)
261     for (const auto &edge : begin.GetCallEdges())
262       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
263                edge->GetReturnPCAddress(begin, target));
264   CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
265   if (!first_edge) {
266     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
267              begin.GetDisplayName(), return_pc);
268     return;
269   }
270 
271   // The first callee may not be resolved, or there may be nothing to fill in.
272   Function *first_callee = first_edge->GetCallee(images, exe_ctx);
273   if (!first_callee) {
274     LLDB_LOG(log, "Could not resolve callee");
275     return;
276   }
277   if (first_callee == &end) {
278     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
279              end.GetDisplayName(), return_pc);
280     return;
281   }
282 
283   // Run DFS on the tail-calling edges out of the first callee to find \p end.
284   // Fully explore the set of functions reachable from the first edge via tail
285   // calls in order to detect ambiguous executions.
286   struct DFS {
287     CallSequence active_path = {};
288     CallSequence solution_path = {};
289     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
290     bool ambiguous = false;
291     Function *end;
292     ModuleList &images;
293     Target &target;
294     ExecutionContext &context;
295 
296     DFS(Function *end, ModuleList &images, Target &target,
297         ExecutionContext &context)
298         : end(end), images(images), target(target), context(context) {}
299 
300     void search(CallEdge &first_edge, Function &first_callee,
301                 CallSequence &path) {
302       dfs(first_edge, first_callee);
303       if (!ambiguous)
304         path = std::move(solution_path);
305     }
306 
307     void dfs(CallEdge &current_edge, Function &callee) {
308       // Found a path to the target function.
309       if (&callee == end) {
310         if (solution_path.empty())
311           solution_path = active_path;
312         else
313           ambiguous = true;
314         return;
315       }
316 
317       // Terminate the search if tail recursion is found, or more generally if
318       // there's more than one way to reach a target. This errs on the side of
319       // caution: it conservatively stops searching when some solutions are
320       // still possible to save time in the average case.
321       if (!visited_nodes.insert(&callee).second) {
322         ambiguous = true;
323         return;
324       }
325 
326       // Search the calls made from this callee.
327       active_path.push_back(CallDescriptor{&callee});
328       for (const auto &edge : callee.GetTailCallingEdges()) {
329         Function *next_callee = edge->GetCallee(images, context);
330         if (!next_callee)
331           continue;
332 
333         std::tie(active_path.back().address_type, active_path.back().address) =
334             edge->GetCallerAddress(callee, target);
335 
336         dfs(*edge, *next_callee);
337         if (ambiguous)
338           return;
339       }
340       active_path.pop_back();
341     }
342   };
343 
344   DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
345 }
346 
347 /// Given that \p next_frame will be appended to the frame list, synthesize
348 /// tail call frames between the current end of the list and \p next_frame.
349 /// If any frames are added, adjust the frame index of \p next_frame.
350 ///
351 ///   --------------
352 ///   |    ...     | <- Completed frames.
353 ///   --------------
354 ///   | prev_frame |
355 ///   --------------
356 ///   |    ...     | <- Artificial frames inserted here.
357 ///   --------------
358 ///   | next_frame |
359 ///   --------------
360 ///   |    ...     | <- Not-yet-visited frames.
361 ///   --------------
362 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
363   // Cannot synthesize tail call frames when the stack is empty (there is no
364   // "previous" frame).
365   if (m_frames.empty())
366     return;
367 
368   TargetSP target_sp = next_frame.CalculateTarget();
369   if (!target_sp)
370     return;
371 
372   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
373   if (!next_reg_ctx_sp)
374     return;
375 
376   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
377 
378   StackFrame &prev_frame = *m_frames.back().get();
379 
380   // Find the functions prev_frame and next_frame are stopped in. The function
381   // objects are needed to search the lazy call graph for intervening frames.
382   Function *prev_func =
383       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
384   if (!prev_func) {
385     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
386     return;
387   }
388   Function *next_func =
389       next_frame.GetSymbolContext(eSymbolContextFunction).function;
390   if (!next_func) {
391     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
392     return;
393   }
394 
395   // Try to find the unique sequence of (tail) calls which led from next_frame
396   // to prev_frame.
397   CallSequence path;
398   addr_t return_pc = next_reg_ctx_sp->GetPC();
399   Target &target = *target_sp.get();
400   ModuleList &images = next_frame.CalculateTarget()->GetImages();
401   ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
402   exe_ctx.SetFramePtr(&next_frame);
403   FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
404                         path, images, log);
405 
406   // Push synthetic tail call frames.
407   for (auto calleeInfo : llvm::reverse(path)) {
408     Function *callee = calleeInfo.func;
409     uint32_t frame_idx = m_frames.size();
410     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
411     addr_t cfa = LLDB_INVALID_ADDRESS;
412     bool cfa_is_valid = false;
413     addr_t pc = calleeInfo.address;
414     // If the callee address refers to the call instruction, we do not want to
415     // subtract 1 from this value.
416     const bool behaves_like_zeroth_frame =
417         calleeInfo.address_type == CallEdge::AddrType::Call;
418     SymbolContext sc;
419     callee->CalculateSymbolContext(&sc);
420     auto synth_frame = std::make_shared<StackFrame>(
421         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
422         cfa_is_valid, pc, StackFrame::Kind::Artificial,
423         behaves_like_zeroth_frame, &sc);
424     m_frames.push_back(synth_frame);
425     LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
426   }
427 
428   // If any frames were created, adjust next_frame's index.
429   if (!path.empty())
430     next_frame.SetFrameIndex(m_frames.size());
431 }
432 
433 void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
434   // Do not fetch frames for an invalid thread.
435   if (!m_thread.IsValid())
436     return;
437 
438   // We've already gotten more frames than asked for, or we've already finished
439   // unwinding, return.
440   if (m_frames.size() > end_idx || GetAllFramesFetched())
441     return;
442 
443   Unwind &unwinder = m_thread.GetUnwinder();
444 
445   if (!m_show_inlined_frames) {
446     GetOnlyConcreteFramesUpTo(end_idx, unwinder);
447     return;
448   }
449 
450 #if defined(DEBUG_STACK_FRAMES)
451   StreamFile s(stdout, false);
452 #endif
453   // If we are hiding some frames from the outside world, we need to add
454   // those onto the total count of frames to fetch.  However, we don't need
455   // to do that if end_idx is 0 since in that case we always get the first
456   // concrete frame and all the inlined frames below it...  And of course, if
457   // end_idx is UINT32_MAX that means get all, so just do that...
458 
459   uint32_t inlined_depth = 0;
460   if (end_idx > 0 && end_idx != UINT32_MAX) {
461     inlined_depth = GetCurrentInlinedDepth();
462     if (inlined_depth != UINT32_MAX) {
463       if (end_idx > 0)
464         end_idx += inlined_depth;
465     }
466   }
467 
468   StackFrameSP unwind_frame_sp;
469   do {
470     uint32_t idx = m_concrete_frames_fetched++;
471     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
472     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
473     bool behaves_like_zeroth_frame = (idx == 0);
474     if (idx == 0) {
475       // We might have already created frame zero, only create it if we need
476       // to.
477       if (m_frames.empty()) {
478         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
479 
480         if (reg_ctx_sp) {
481           const bool success = unwinder.GetFrameInfoAtIndex(
482               idx, cfa, pc, behaves_like_zeroth_frame);
483           // There shouldn't be any way not to get the frame info for frame
484           // 0. But if the unwinder can't make one, lets make one by hand
485           // with the SP as the CFA and see if that gets any further.
486           if (!success) {
487             cfa = reg_ctx_sp->GetSP();
488             pc = reg_ctx_sp->GetPC();
489           }
490 
491           unwind_frame_sp = std::make_shared<StackFrame>(
492               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
493               cfa, pc, behaves_like_zeroth_frame, nullptr);
494           m_frames.push_back(unwind_frame_sp);
495         }
496       } else {
497         unwind_frame_sp = m_frames.front();
498         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
499       }
500     } else {
501       const bool success =
502           unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
503       if (!success) {
504         // We've gotten to the end of the stack.
505         SetAllFramesFetched();
506         break;
507       }
508       const bool cfa_is_valid = true;
509       unwind_frame_sp = std::make_shared<StackFrame>(
510           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
511           pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
512 
513       // Create synthetic tail call frames between the previous frame and the
514       // newly-found frame. The new frame's index may change after this call,
515       // although its concrete index will stay the same.
516       SynthesizeTailCallFrames(*unwind_frame_sp.get());
517 
518       m_frames.push_back(unwind_frame_sp);
519     }
520 
521     assert(unwind_frame_sp);
522     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
523         eSymbolContextBlock | eSymbolContextFunction);
524     Block *unwind_block = unwind_sc.block;
525     TargetSP target_sp = m_thread.CalculateTarget();
526     if (unwind_block) {
527       Address curr_frame_address(
528           unwind_frame_sp->GetFrameCodeAddressForSymbolication());
529 
530       SymbolContext next_frame_sc;
531       Address next_frame_address;
532 
533       while (unwind_sc.GetParentOfInlinedScope(
534           curr_frame_address, next_frame_sc, next_frame_address)) {
535         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
536         behaves_like_zeroth_frame = false;
537         StackFrameSP frame_sp(new StackFrame(
538             m_thread.shared_from_this(), m_frames.size(), idx,
539             unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
540             behaves_like_zeroth_frame, &next_frame_sc));
541 
542         m_frames.push_back(frame_sp);
543         unwind_sc = next_frame_sc;
544         curr_frame_address = next_frame_address;
545       }
546     }
547   } while (m_frames.size() - 1 < end_idx);
548 
549   // Don't try to merge till you've calculated all the frames in this stack.
550   if (GetAllFramesFetched() && m_prev_frames_sp) {
551     StackFrameList *prev_frames = m_prev_frames_sp.get();
552     StackFrameList *curr_frames = this;
553 
554 #if defined(DEBUG_STACK_FRAMES)
555     s.PutCString("\nprev_frames:\n");
556     prev_frames->Dump(&s);
557     s.PutCString("\ncurr_frames:\n");
558     curr_frames->Dump(&s);
559     s.EOL();
560 #endif
561     size_t curr_frame_num, prev_frame_num;
562 
563     for (curr_frame_num = curr_frames->m_frames.size(),
564         prev_frame_num = prev_frames->m_frames.size();
565          curr_frame_num > 0 && prev_frame_num > 0;
566          --curr_frame_num, --prev_frame_num) {
567       const size_t curr_frame_idx = curr_frame_num - 1;
568       const size_t prev_frame_idx = prev_frame_num - 1;
569       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
570       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
571 
572 #if defined(DEBUG_STACK_FRAMES)
573       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
574       if (curr_frame_sp)
575         curr_frame_sp->Dump(&s, true, false);
576       else
577         s.PutCString("NULL");
578       s.Printf("\nPrev frame #%u ", prev_frame_idx);
579       if (prev_frame_sp)
580         prev_frame_sp->Dump(&s, true, false);
581       else
582         s.PutCString("NULL");
583 #endif
584 
585       StackFrame *curr_frame = curr_frame_sp.get();
586       StackFrame *prev_frame = prev_frame_sp.get();
587 
588       if (curr_frame == nullptr || prev_frame == nullptr)
589         break;
590 
591       // Check the stack ID to make sure they are equal.
592       if (curr_frame->GetStackID() != prev_frame->GetStackID())
593         break;
594 
595       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
596       // Now copy the fixed up previous frame into the current frames so the
597       // pointer doesn't change.
598       m_frames[curr_frame_idx] = prev_frame_sp;
599 
600 #if defined(DEBUG_STACK_FRAMES)
601       s.Printf("\n    Copying previous frame to current frame");
602 #endif
603     }
604     // We are done with the old stack frame list, we can release it now.
605     m_prev_frames_sp.reset();
606   }
607 
608 #if defined(DEBUG_STACK_FRAMES)
609   s.PutCString("\n\nNew frames:\n");
610   Dump(&s);
611   s.EOL();
612 #endif
613 }
614 
615 uint32_t StackFrameList::GetNumFrames(bool can_create) {
616   std::lock_guard<std::recursive_mutex> guard(m_mutex);
617 
618   if (can_create)
619     GetFramesUpTo(UINT32_MAX);
620 
621   return GetVisibleStackFrameIndex(m_frames.size());
622 }
623 
624 void StackFrameList::Dump(Stream *s) {
625   if (s == nullptr)
626     return;
627 
628   std::lock_guard<std::recursive_mutex> guard(m_mutex);
629 
630   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
631   for (pos = begin; pos != end; ++pos) {
632     StackFrame *frame = (*pos).get();
633     s->Printf("%p: ", static_cast<void *>(frame));
634     if (frame) {
635       frame->GetStackID().Dump(s);
636       frame->DumpUsingSettingsFormat(s);
637     } else
638       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
639     s->EOL();
640   }
641   s->EOL();
642 }
643 
644 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
645   StackFrameSP frame_sp;
646   std::lock_guard<std::recursive_mutex> guard(m_mutex);
647   uint32_t original_idx = idx;
648 
649   uint32_t inlined_depth = GetCurrentInlinedDepth();
650   if (inlined_depth != UINT32_MAX)
651     idx += inlined_depth;
652 
653   if (idx < m_frames.size())
654     frame_sp = m_frames[idx];
655 
656   if (frame_sp)
657     return frame_sp;
658 
659   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
660   // there are that many.  If there weren't then you asked for too many frames.
661   GetFramesUpTo(idx);
662   if (idx < m_frames.size()) {
663     if (m_show_inlined_frames) {
664       // When inline frames are enabled we actually create all the frames in
665       // GetFramesUpTo.
666       frame_sp = m_frames[idx];
667     } else {
668       addr_t pc, cfa;
669       bool behaves_like_zeroth_frame = (idx == 0);
670       if (m_thread.GetUnwinder().GetFrameInfoAtIndex(
671               idx, cfa, pc, behaves_like_zeroth_frame)) {
672         const bool cfa_is_valid = true;
673         frame_sp = std::make_shared<StackFrame>(
674             m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
675             StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
676 
677         Function *function =
678             frame_sp->GetSymbolContext(eSymbolContextFunction).function;
679         if (function) {
680           // When we aren't showing inline functions we always use the top
681           // most function block as the scope.
682           frame_sp->SetSymbolContextScope(&function->GetBlock(false));
683         } else {
684           // Set the symbol scope from the symbol regardless if it is nullptr
685           // or valid.
686           frame_sp->SetSymbolContextScope(
687               frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
688         }
689         SetFrameAtIndex(idx, frame_sp);
690       }
691     }
692   } else if (original_idx == 0) {
693     // There should ALWAYS be a frame at index 0.  If something went wrong with
694     // the CurrentInlinedDepth such that there weren't as many frames as we
695     // thought taking that into account, then reset the current inlined depth
696     // and return the real zeroth frame.
697     if (m_frames.empty()) {
698       // Why do we have a thread with zero frames, that should not ever
699       // happen...
700       assert(!m_thread.IsValid() && "A valid thread has no frames.");
701     } else {
702       ResetCurrentInlinedDepth();
703       frame_sp = m_frames[original_idx];
704     }
705   }
706 
707   return frame_sp;
708 }
709 
710 StackFrameSP
711 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
712   // First try assuming the unwind index is the same as the frame index. The
713   // unwind index is always greater than or equal to the frame index, so it is
714   // a good place to start. If we have inlined frames we might have 5 concrete
715   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
716   // after we make all the inlined frames. Most of the time the unwind frame
717   // index (or the concrete frame index) is the same as the frame index.
718   uint32_t frame_idx = unwind_idx;
719   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
720   while (frame_sp) {
721     if (frame_sp->GetFrameIndex() == unwind_idx)
722       break;
723     frame_sp = GetFrameAtIndex(++frame_idx);
724   }
725   return frame_sp;
726 }
727 
728 static bool CompareStackID(const StackFrameSP &stack_sp,
729                            const StackID &stack_id) {
730   return stack_sp->GetStackID() < stack_id;
731 }
732 
733 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
734   StackFrameSP frame_sp;
735 
736   if (stack_id.IsValid()) {
737     std::lock_guard<std::recursive_mutex> guard(m_mutex);
738     uint32_t frame_idx = 0;
739     // Do a binary search in case the stack frame is already in our cache
740     collection::const_iterator begin = m_frames.begin();
741     collection::const_iterator end = m_frames.end();
742     if (begin != end) {
743       collection::const_iterator pos =
744           std::lower_bound(begin, end, stack_id, CompareStackID);
745       if (pos != end) {
746         if ((*pos)->GetStackID() == stack_id)
747           return *pos;
748       }
749     }
750     do {
751       frame_sp = GetFrameAtIndex(frame_idx);
752       if (frame_sp && frame_sp->GetStackID() == stack_id)
753         break;
754       frame_idx++;
755     } while (frame_sp);
756   }
757   return frame_sp;
758 }
759 
760 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
761   if (idx >= m_frames.size())
762     m_frames.resize(idx + 1);
763   // Make sure allocation succeeded by checking bounds again
764   if (idx < m_frames.size()) {
765     m_frames[idx] = frame_sp;
766     return true;
767   }
768   return false; // resize failed, out of memory?
769 }
770 
771 uint32_t StackFrameList::GetSelectedFrameIndex() const {
772   std::lock_guard<std::recursive_mutex> guard(m_mutex);
773   return m_selected_frame_idx;
774 }
775 
776 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
777   std::lock_guard<std::recursive_mutex> guard(m_mutex);
778   const_iterator pos;
779   const_iterator begin = m_frames.begin();
780   const_iterator end = m_frames.end();
781   m_selected_frame_idx = 0;
782   for (pos = begin; pos != end; ++pos) {
783     if (pos->get() == frame) {
784       m_selected_frame_idx = std::distance(begin, pos);
785       uint32_t inlined_depth = GetCurrentInlinedDepth();
786       if (inlined_depth != UINT32_MAX)
787         m_selected_frame_idx -= inlined_depth;
788       break;
789     }
790   }
791   SetDefaultFileAndLineToSelectedFrame();
792   return m_selected_frame_idx;
793 }
794 
795 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
796   std::lock_guard<std::recursive_mutex> guard(m_mutex);
797   StackFrameSP frame_sp(GetFrameAtIndex(idx));
798   if (frame_sp) {
799     SetSelectedFrame(frame_sp.get());
800     return true;
801   } else
802     return false;
803 }
804 
805 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
806   if (m_thread.GetID() ==
807       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
808     StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
809     if (frame_sp) {
810       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
811       if (sc.line_entry.file)
812         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
813             sc.line_entry.file, sc.line_entry.line);
814     }
815   }
816 }
817 
818 // The thread has been run, reset the number stack frames to zero so we can
819 // determine how many frames we have lazily.
820 void StackFrameList::Clear() {
821   std::lock_guard<std::recursive_mutex> guard(m_mutex);
822   m_frames.clear();
823   m_concrete_frames_fetched = 0;
824 }
825 
826 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_up,
827                            lldb::StackFrameListSP &prev_sp) {
828   std::unique_lock<std::recursive_mutex> current_lock, previous_lock;
829   if (curr_up)
830     current_lock = std::unique_lock<std::recursive_mutex>(curr_up->m_mutex);
831   if (prev_sp)
832     previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex);
833 
834 #if defined(DEBUG_STACK_FRAMES)
835   StreamFile s(stdout, false);
836   s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n");
837   if (prev_sp)
838     prev_sp->Dump(&s);
839   else
840     s.PutCString("NULL");
841   s.PutCString("\nCurr:\n");
842   if (curr_up)
843     curr_up->Dump(&s);
844   else
845     s.PutCString("NULL");
846   s.EOL();
847 #endif
848 
849   if (!curr_up || curr_up->GetNumFrames(false) == 0) {
850 #if defined(DEBUG_STACK_FRAMES)
851     s.PutCString("No current frames, leave previous frames alone...\n");
852 #endif
853     curr_up.release();
854     return;
855   }
856 
857   if (!prev_sp || prev_sp->GetNumFrames(false) == 0) {
858 #if defined(DEBUG_STACK_FRAMES)
859     s.PutCString("No previous frames, so use current frames...\n");
860 #endif
861     // We either don't have any previous frames, or since we have more than one
862     // current frames it means we have all the frames and can safely replace
863     // our previous frames.
864     prev_sp.reset(curr_up.release());
865     return;
866   }
867 
868   const uint32_t num_curr_frames = curr_up->GetNumFrames(false);
869 
870   if (num_curr_frames > 1) {
871 #if defined(DEBUG_STACK_FRAMES)
872     s.PutCString(
873         "We have more than one current frame, so use current frames...\n");
874 #endif
875     // We have more than one current frames it means we have all the frames and
876     // can safely replace our previous frames.
877     prev_sp.reset(curr_up.release());
878 
879 #if defined(DEBUG_STACK_FRAMES)
880     s.PutCString("\nMerged:\n");
881     prev_sp->Dump(&s);
882 #endif
883     return;
884   }
885 
886   StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0));
887   StackFrameSP curr_frame_zero_sp(curr_up->GetFrameAtIndex(0));
888   StackID curr_stack_id(curr_frame_zero_sp->GetStackID());
889   StackID prev_stack_id(prev_frame_zero_sp->GetStackID());
890 
891 #if defined(DEBUG_STACK_FRAMES)
892   const uint32_t num_prev_frames = prev_sp->GetNumFrames(false);
893   s.Printf("\n%u previous frames with one current frame\n", num_prev_frames);
894 #endif
895 
896   // We have only a single current frame
897   // Our previous stack frames only had a single frame as well...
898   if (curr_stack_id == prev_stack_id) {
899 #if defined(DEBUG_STACK_FRAMES)
900     s.Printf("\nPrevious frame #0 is same as current frame #0, merge the "
901              "cached data\n");
902 #endif
903 
904     curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame(
905         *prev_frame_zero_sp);
906     //        prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame
907     //        (*curr_frame_zero_sp);
908     //        prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp);
909   } else if (curr_stack_id < prev_stack_id) {
910 #if defined(DEBUG_STACK_FRAMES)
911     s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous "
912              "frame #0, insert current frame zero in front of previous\n");
913 #endif
914     prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp);
915   }
916 
917   curr_up.release();
918 
919 #if defined(DEBUG_STACK_FRAMES)
920   s.PutCString("\nMerged:\n");
921   prev_sp->Dump(&s);
922 #endif
923 }
924 
925 lldb::StackFrameSP
926 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
927   const_iterator pos;
928   const_iterator begin = m_frames.begin();
929   const_iterator end = m_frames.end();
930   lldb::StackFrameSP ret_sp;
931 
932   for (pos = begin; pos != end; ++pos) {
933     if (pos->get() == stack_frame_ptr) {
934       ret_sp = (*pos);
935       break;
936     }
937   }
938   return ret_sp;
939 }
940 
941 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
942                                  uint32_t num_frames, bool show_frame_info,
943                                  uint32_t num_frames_with_source,
944                                  bool show_unique,
945                                  const char *selected_frame_marker) {
946   size_t num_frames_displayed = 0;
947 
948   if (num_frames == 0)
949     return 0;
950 
951   StackFrameSP frame_sp;
952   uint32_t frame_idx = 0;
953   uint32_t last_frame;
954 
955   // Don't let the last frame wrap around...
956   if (num_frames == UINT32_MAX)
957     last_frame = UINT32_MAX;
958   else
959     last_frame = first_frame + num_frames;
960 
961   StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
962   const char *unselected_marker = nullptr;
963   std::string buffer;
964   if (selected_frame_marker) {
965     size_t len = strlen(selected_frame_marker);
966     buffer.insert(buffer.begin(), len, ' ');
967     unselected_marker = buffer.c_str();
968   }
969   const char *marker = nullptr;
970 
971   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
972     frame_sp = GetFrameAtIndex(frame_idx);
973     if (!frame_sp)
974       break;
975 
976     if (selected_frame_marker != nullptr) {
977       if (frame_sp == selected_frame_sp)
978         marker = selected_frame_marker;
979       else
980         marker = unselected_marker;
981     }
982 
983     if (!frame_sp->GetStatus(strm, show_frame_info,
984                              num_frames_with_source > (first_frame - frame_idx),
985                              show_unique, marker))
986       break;
987     ++num_frames_displayed;
988   }
989 
990   strm.IndentLess();
991   return num_frames_displayed;
992 }
993