1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/SourceManager.h"
13 #include "lldb/Core/StreamFile.h"
14 #include "lldb/Symbol/Block.h"
15 #include "lldb/Symbol/Function.h"
16 #include "lldb/Symbol/Symbol.h"
17 #include "lldb/Target/Process.h"
18 #include "lldb/Target/RegisterContext.h"
19 #include "lldb/Target/StackFrame.h"
20 #include "lldb/Target/StopInfo.h"
21 #include "lldb/Target/Target.h"
22 #include "lldb/Target/Thread.h"
23 #include "lldb/Target/Unwind.h"
24 #include "lldb/Utility/Log.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 
27 #include <memory>
28 
29 //#define DEBUG_STACK_FRAMES 1
30 
31 using namespace lldb;
32 using namespace lldb_private;
33 
34 //----------------------------------------------------------------------
35 // StackFrameList constructor
36 //----------------------------------------------------------------------
37 StackFrameList::StackFrameList(Thread &thread,
38                                const lldb::StackFrameListSP &prev_frames_sp,
39                                bool show_inline_frames)
40     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
41       m_selected_frame_idx(0), m_concrete_frames_fetched(0),
42       m_current_inlined_depth(UINT32_MAX),
43       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
44       m_show_inlined_frames(show_inline_frames) {
45   if (prev_frames_sp) {
46     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
47     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
48   }
49 }
50 
51 StackFrameList::~StackFrameList() {
52   // Call clear since this takes a lock and clears the stack frame list in case
53   // another thread is currently using this stack frame list
54   Clear();
55 }
56 
57 void StackFrameList::CalculateCurrentInlinedDepth() {
58   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
59   if (cur_inlined_depth == UINT32_MAX) {
60     ResetCurrentInlinedDepth();
61   }
62 }
63 
64 uint32_t StackFrameList::GetCurrentInlinedDepth() {
65   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
66     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
67     if (cur_pc != m_current_inlined_pc) {
68       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
69       m_current_inlined_depth = UINT32_MAX;
70       Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
71       if (log && log->GetVerbose())
72         log->Printf(
73             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
74     }
75     return m_current_inlined_depth;
76   } else {
77     return UINT32_MAX;
78   }
79 }
80 
81 void StackFrameList::ResetCurrentInlinedDepth() {
82   if (!m_show_inlined_frames)
83     return;
84 
85   std::lock_guard<std::recursive_mutex> guard(m_mutex);
86 
87   GetFramesUpTo(0);
88   if (m_frames.empty())
89     return;
90   if (!m_frames[0]->IsInlined()) {
91     m_current_inlined_depth = UINT32_MAX;
92     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
93     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
94     if (log && log->GetVerbose())
95       log->Printf(
96           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
97     return;
98   }
99 
100   // We only need to do something special about inlined blocks when we are
101   // at the beginning of an inlined function:
102   // FIXME: We probably also have to do something special if the PC is at
103   // the END of an inlined function, which coincides with the end of either
104   // its containing function or another inlined function.
105 
106   Block *block_ptr = m_frames[0]->GetFrameBlock();
107   if (!block_ptr)
108     return;
109 
110   Address pc_as_address;
111   lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
112   pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
113   AddressRange containing_range;
114   if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
115       pc_as_address != containing_range.GetBaseAddress())
116     return;
117 
118   // If we got here because of a breakpoint hit, then set the inlined depth
119   // depending on where the breakpoint was set. If we got here because of a
120   // crash, then set the inlined depth to the deepest most block.  Otherwise,
121   // we stopped here naturally as the result of a step, so set ourselves in the
122   // containing frame of the whole set of nested inlines, so the user can then
123   // "virtually" step into the frames one by one, or next over the whole mess.
124   // Note: We don't have to handle being somewhere in the middle of the stack
125   // here, since ResetCurrentInlinedDepth doesn't get called if there is a
126   // valid inlined depth set.
127   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
128   if (!stop_info_sp)
129     return;
130   switch (stop_info_sp->GetStopReason()) {
131   case eStopReasonWatchpoint:
132   case eStopReasonException:
133   case eStopReasonExec:
134   case eStopReasonSignal:
135     // In all these cases we want to stop in the deepest frame.
136     m_current_inlined_pc = curr_pc;
137     m_current_inlined_depth = 0;
138     break;
139   case eStopReasonBreakpoint: {
140     // FIXME: Figure out what this break point is doing, and set the inline
141     // depth appropriately.  Be careful to take into account breakpoints that
142     // implement step over prologue, since that should do the default
143     // calculation. For now, if the breakpoints corresponding to this hit are
144     // all internal, I set the stop location to the top of the inlined stack,
145     // since that will make things like stepping over prologues work right.
146     // But if there are any non-internal breakpoints I do to the bottom of the
147     // stack, since that was the old behavior.
148     uint32_t bp_site_id = stop_info_sp->GetValue();
149     BreakpointSiteSP bp_site_sp(
150         m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
151     bool all_internal = true;
152     if (bp_site_sp) {
153       uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
154       for (uint32_t i = 0; i < num_owners; i++) {
155         Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
156         if (!bp_ref.IsInternal()) {
157           all_internal = false;
158         }
159       }
160     }
161     if (!all_internal) {
162       m_current_inlined_pc = curr_pc;
163       m_current_inlined_depth = 0;
164       break;
165     }
166   }
167     LLVM_FALLTHROUGH;
168   default: {
169     // Otherwise, we should set ourselves at the container of the inlining, so
170     // that the user can descend into them. So first we check whether we have
171     // more than one inlined block sharing this PC:
172     int num_inlined_functions = 0;
173 
174     for (Block *container_ptr = block_ptr->GetInlinedParent();
175          container_ptr != nullptr;
176          container_ptr = container_ptr->GetInlinedParent()) {
177       if (!container_ptr->GetRangeContainingAddress(pc_as_address,
178                                                     containing_range))
179         break;
180       if (pc_as_address != containing_range.GetBaseAddress())
181         break;
182 
183       num_inlined_functions++;
184     }
185     m_current_inlined_pc = curr_pc;
186     m_current_inlined_depth = num_inlined_functions + 1;
187     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
188     if (log && log->GetVerbose())
189       log->Printf("ResetCurrentInlinedDepth: setting inlined "
190                   "depth: %d 0x%" PRIx64 ".\n",
191                   m_current_inlined_depth, curr_pc);
192 
193     break;
194   }
195   }
196 }
197 
198 bool StackFrameList::DecrementCurrentInlinedDepth() {
199   if (m_show_inlined_frames) {
200     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
201     if (current_inlined_depth != UINT32_MAX) {
202       if (current_inlined_depth > 0) {
203         m_current_inlined_depth--;
204         return true;
205       }
206     }
207   }
208   return false;
209 }
210 
211 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
212   m_current_inlined_depth = new_depth;
213   if (new_depth == UINT32_MAX)
214     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
215   else
216     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
217 }
218 
219 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
220                                                Unwind *unwinder) {
221   assert(m_thread.IsValid() && "Expected valid thread");
222   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
223 
224   if (end_idx < m_concrete_frames_fetched)
225     return;
226 
227   if (!unwinder)
228     return;
229 
230   uint32_t num_frames = unwinder->GetFramesUpTo(end_idx);
231   if (num_frames <= end_idx + 1) {
232     // Done unwinding.
233     m_concrete_frames_fetched = UINT32_MAX;
234   }
235 
236   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
237   // which can lazily query the unwinder to create frames.
238   m_frames.resize(num_frames);
239 }
240 
241 /// Find the unique path through the call graph from \p begin (with return PC
242 /// \p return_pc) to \p end. On success this path is stored into \p path, and
243 /// on failure \p path is unchanged.
244 static void FindInterveningFrames(Function &begin, Function &end,
245                                   Target &target, addr_t return_pc,
246                                   std::vector<Function *> &path,
247                                   ModuleList &images, Log *log) {
248   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
249            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
250 
251   // Find a non-tail calling edge with the correct return PC.
252   auto first_level_edges = begin.GetCallEdges();
253   if (log)
254     for (const CallEdge &edge : first_level_edges)
255       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
256                edge.GetReturnPCAddress(begin, target));
257   auto first_edge_it = std::lower_bound(
258       first_level_edges.begin(), first_level_edges.end(), return_pc,
259       [&](const CallEdge &edge, addr_t target_pc) {
260         return edge.GetReturnPCAddress(begin, target) < target_pc;
261       });
262   if (first_edge_it == first_level_edges.end() ||
263       first_edge_it->GetReturnPCAddress(begin, target) != return_pc) {
264     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
265              begin.GetDisplayName(), return_pc);
266     return;
267   }
268   CallEdge &first_edge = const_cast<CallEdge &>(*first_edge_it);
269 
270   // The first callee may not be resolved, or there may be nothing to fill in.
271   Function *first_callee = first_edge.GetCallee(images);
272   if (!first_callee) {
273     LLDB_LOG(log, "Could not resolve callee");
274     return;
275   }
276   if (first_callee == &end) {
277     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
278              end.GetDisplayName(), return_pc);
279     return;
280   }
281 
282   // Run DFS on the tail-calling edges out of the first callee to find \p end.
283   // Fully explore the set of functions reachable from the first edge via tail
284   // calls in order to detect ambiguous executions.
285   struct DFS {
286     std::vector<Function *> active_path = {};
287     std::vector<Function *> solution_path = {};
288     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
289     bool ambiguous = false;
290     Function *end;
291     ModuleList &images;
292 
293     DFS(Function *end, ModuleList &images) : end(end), images(images) {}
294 
295     void search(Function *first_callee, std::vector<Function *> &path) {
296       dfs(first_callee);
297       if (!ambiguous)
298         path = std::move(solution_path);
299     }
300 
301     void dfs(Function *callee) {
302       // Found a path to the target function.
303       if (callee == end) {
304         if (solution_path.empty())
305           solution_path = active_path;
306         else
307           ambiguous = true;
308         return;
309       }
310 
311       // Terminate the search if tail recursion is found, or more generally if
312       // there's more than one way to reach a target. This errs on the side of
313       // caution: it conservatively stops searching when some solutions are
314       // still possible to save time in the average case.
315       if (!visited_nodes.insert(callee).second) {
316         ambiguous = true;
317         return;
318       }
319 
320       // Search the calls made from this callee.
321       active_path.push_back(callee);
322       for (CallEdge &edge : callee->GetTailCallingEdges()) {
323         Function *next_callee = edge.GetCallee(images);
324         if (!next_callee)
325           continue;
326 
327         dfs(next_callee);
328         if (ambiguous)
329           return;
330       }
331       active_path.pop_back();
332     }
333   };
334 
335   DFS(&end, images).search(first_callee, path);
336 }
337 
338 /// Given that \p next_frame will be appended to the frame list, synthesize
339 /// tail call frames between the current end of the list and \p next_frame.
340 /// If any frames are added, adjust the frame index of \p next_frame.
341 ///
342 ///   --------------
343 ///   |    ...     | <- Completed frames.
344 ///   --------------
345 ///   | prev_frame |
346 ///   --------------
347 ///   |    ...     | <- Artificial frames inserted here.
348 ///   --------------
349 ///   | next_frame |
350 ///   --------------
351 ///   |    ...     | <- Not-yet-visited frames.
352 ///   --------------
353 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
354   TargetSP target_sp = next_frame.CalculateTarget();
355   if (!target_sp)
356     return;
357 
358   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
359   if (!next_reg_ctx_sp)
360     return;
361 
362   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
363 
364   assert(!m_frames.empty() && "Cannot synthesize frames in an empty stack");
365   StackFrame &prev_frame = *m_frames.back().get();
366 
367   // Find the functions prev_frame and next_frame are stopped in. The function
368   // objects are needed to search the lazy call graph for intervening frames.
369   Function *prev_func =
370       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
371   if (!prev_func) {
372     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
373     return;
374   }
375   Function *next_func =
376       next_frame.GetSymbolContext(eSymbolContextFunction).function;
377   if (!next_func) {
378     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
379     return;
380   }
381 
382   // Try to find the unique sequence of (tail) calls which led from next_frame
383   // to prev_frame.
384   std::vector<Function *> path;
385   addr_t return_pc = next_reg_ctx_sp->GetPC();
386   Target &target = *target_sp.get();
387   ModuleList &images = next_frame.CalculateTarget()->GetImages();
388   FindInterveningFrames(*next_func, *prev_func, target, return_pc, path, images,
389                         log);
390 
391   // Push synthetic tail call frames.
392   for (Function *callee : llvm::reverse(path)) {
393     uint32_t frame_idx = m_frames.size();
394     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
395     addr_t cfa = LLDB_INVALID_ADDRESS;
396     bool cfa_is_valid = false;
397     addr_t pc =
398         callee->GetAddressRange().GetBaseAddress().GetLoadAddress(&target);
399     SymbolContext sc;
400     callee->CalculateSymbolContext(&sc);
401     auto synth_frame = std::make_shared<StackFrame>(
402         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
403         cfa_is_valid, pc, StackFrame::Kind::Artificial, &sc);
404     m_frames.push_back(synth_frame);
405     LLDB_LOG(log, "Pushed frame {0}", callee->GetDisplayName());
406   }
407 
408   // If any frames were created, adjust next_frame's index.
409   if (!path.empty())
410     next_frame.SetFrameIndex(m_frames.size());
411 }
412 
413 void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
414   // Do not fetch frames for an invalid thread.
415   if (!m_thread.IsValid())
416     return;
417 
418   // We've already gotten more frames than asked for, or we've already finished
419   // unwinding, return.
420   if (m_frames.size() > end_idx || GetAllFramesFetched())
421     return;
422 
423   Unwind *unwinder = m_thread.GetUnwinder();
424 
425   if (!m_show_inlined_frames) {
426     GetOnlyConcreteFramesUpTo(end_idx, unwinder);
427     return;
428   }
429 
430 #if defined(DEBUG_STACK_FRAMES)
431   StreamFile s(stdout, false);
432 #endif
433   // If we are hiding some frames from the outside world, we need to add
434   // those onto the total count of frames to fetch.  However, we don't need
435   // to do that if end_idx is 0 since in that case we always get the first
436   // concrete frame and all the inlined frames below it...  And of course, if
437   // end_idx is UINT32_MAX that means get all, so just do that...
438 
439   uint32_t inlined_depth = 0;
440   if (end_idx > 0 && end_idx != UINT32_MAX) {
441     inlined_depth = GetCurrentInlinedDepth();
442     if (inlined_depth != UINT32_MAX) {
443       if (end_idx > 0)
444         end_idx += inlined_depth;
445     }
446   }
447 
448   StackFrameSP unwind_frame_sp;
449   do {
450     uint32_t idx = m_concrete_frames_fetched++;
451     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
452     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
453     if (idx == 0) {
454       // We might have already created frame zero, only create it if we need
455       // to.
456       if (m_frames.empty()) {
457         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
458 
459         if (reg_ctx_sp) {
460           const bool success =
461               unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
462           // There shouldn't be any way not to get the frame info for frame
463           // 0. But if the unwinder can't make one, lets make one by hand
464           // with the SP as the CFA and see if that gets any further.
465           if (!success) {
466             cfa = reg_ctx_sp->GetSP();
467             pc = reg_ctx_sp->GetPC();
468           }
469 
470           unwind_frame_sp = std::make_shared<StackFrame>(
471               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
472               cfa, pc, nullptr);
473           m_frames.push_back(unwind_frame_sp);
474         }
475       } else {
476         unwind_frame_sp = m_frames.front();
477         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
478       }
479     } else {
480       const bool success =
481           unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
482       if (!success) {
483         // We've gotten to the end of the stack.
484         SetAllFramesFetched();
485         break;
486       }
487       const bool cfa_is_valid = true;
488       unwind_frame_sp = std::make_shared<StackFrame>(
489           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
490           pc, StackFrame::Kind::Regular, nullptr);
491 
492       // Create synthetic tail call frames between the previous frame and the
493       // newly-found frame. The new frame's index may change after this call,
494       // although its concrete index will stay the same.
495       SynthesizeTailCallFrames(*unwind_frame_sp.get());
496 
497       m_frames.push_back(unwind_frame_sp);
498     }
499 
500     assert(unwind_frame_sp);
501     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
502         eSymbolContextBlock | eSymbolContextFunction);
503     Block *unwind_block = unwind_sc.block;
504     if (unwind_block) {
505       Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress());
506       TargetSP target_sp = m_thread.CalculateTarget();
507       // Be sure to adjust the frame address to match the address that was
508       // used to lookup the symbol context above. If we are in the first
509       // concrete frame, then we lookup using the current address, else we
510       // decrement the address by one to get the correct location.
511       if (idx > 0) {
512         if (curr_frame_address.GetOffset() == 0) {
513           // If curr_frame_address points to the first address in a section
514           // then after adjustment it will point to an other section. In that
515           // case resolve the address again to the correct section plus
516           // offset form.
517           addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress(
518               target_sp.get(), AddressClass::eCode);
519           curr_frame_address.SetOpcodeLoadAddress(
520               load_addr - 1, target_sp.get(), AddressClass::eCode);
521         } else {
522           curr_frame_address.Slide(-1);
523         }
524       }
525 
526       SymbolContext next_frame_sc;
527       Address next_frame_address;
528 
529       while (unwind_sc.GetParentOfInlinedScope(
530           curr_frame_address, next_frame_sc, next_frame_address)) {
531         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
532         StackFrameSP frame_sp(
533             new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx,
534                            unwind_frame_sp->GetRegisterContextSP(), cfa,
535                            next_frame_address, &next_frame_sc));
536 
537         m_frames.push_back(frame_sp);
538         unwind_sc = next_frame_sc;
539         curr_frame_address = next_frame_address;
540       }
541     }
542   } while (m_frames.size() - 1 < end_idx);
543 
544   // Don't try to merge till you've calculated all the frames in this stack.
545   if (GetAllFramesFetched() && m_prev_frames_sp) {
546     StackFrameList *prev_frames = m_prev_frames_sp.get();
547     StackFrameList *curr_frames = this;
548 
549 #if defined(DEBUG_STACK_FRAMES)
550     s.PutCString("\nprev_frames:\n");
551     prev_frames->Dump(&s);
552     s.PutCString("\ncurr_frames:\n");
553     curr_frames->Dump(&s);
554     s.EOL();
555 #endif
556     size_t curr_frame_num, prev_frame_num;
557 
558     for (curr_frame_num = curr_frames->m_frames.size(),
559         prev_frame_num = prev_frames->m_frames.size();
560          curr_frame_num > 0 && prev_frame_num > 0;
561          --curr_frame_num, --prev_frame_num) {
562       const size_t curr_frame_idx = curr_frame_num - 1;
563       const size_t prev_frame_idx = prev_frame_num - 1;
564       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
565       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
566 
567 #if defined(DEBUG_STACK_FRAMES)
568       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
569       if (curr_frame_sp)
570         curr_frame_sp->Dump(&s, true, false);
571       else
572         s.PutCString("NULL");
573       s.Printf("\nPrev frame #%u ", prev_frame_idx);
574       if (prev_frame_sp)
575         prev_frame_sp->Dump(&s, true, false);
576       else
577         s.PutCString("NULL");
578 #endif
579 
580       StackFrame *curr_frame = curr_frame_sp.get();
581       StackFrame *prev_frame = prev_frame_sp.get();
582 
583       if (curr_frame == nullptr || prev_frame == nullptr)
584         break;
585 
586       // Check the stack ID to make sure they are equal.
587       if (curr_frame->GetStackID() != prev_frame->GetStackID())
588         break;
589 
590       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
591       // Now copy the fixed up previous frame into the current frames so the
592       // pointer doesn't change.
593       m_frames[curr_frame_idx] = prev_frame_sp;
594 
595 #if defined(DEBUG_STACK_FRAMES)
596       s.Printf("\n    Copying previous frame to current frame");
597 #endif
598     }
599     // We are done with the old stack frame list, we can release it now.
600     m_prev_frames_sp.reset();
601   }
602 
603 #if defined(DEBUG_STACK_FRAMES)
604   s.PutCString("\n\nNew frames:\n");
605   Dump(&s);
606   s.EOL();
607 #endif
608 }
609 
610 uint32_t StackFrameList::GetNumFrames(bool can_create) {
611   std::lock_guard<std::recursive_mutex> guard(m_mutex);
612 
613   if (can_create)
614     GetFramesUpTo(UINT32_MAX);
615 
616   return GetVisibleStackFrameIndex(m_frames.size());
617 }
618 
619 void StackFrameList::Dump(Stream *s) {
620   if (s == nullptr)
621     return;
622 
623   std::lock_guard<std::recursive_mutex> guard(m_mutex);
624 
625   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
626   for (pos = begin; pos != end; ++pos) {
627     StackFrame *frame = (*pos).get();
628     s->Printf("%p: ", static_cast<void *>(frame));
629     if (frame) {
630       frame->GetStackID().Dump(s);
631       frame->DumpUsingSettingsFormat(s);
632     } else
633       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
634     s->EOL();
635   }
636   s->EOL();
637 }
638 
639 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
640   StackFrameSP frame_sp;
641   std::lock_guard<std::recursive_mutex> guard(m_mutex);
642   uint32_t original_idx = idx;
643 
644   uint32_t inlined_depth = GetCurrentInlinedDepth();
645   if (inlined_depth != UINT32_MAX)
646     idx += inlined_depth;
647 
648   if (idx < m_frames.size())
649     frame_sp = m_frames[idx];
650 
651   if (frame_sp)
652     return frame_sp;
653 
654   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
655   // there are that many.  If there weren't then you asked for too many frames.
656   GetFramesUpTo(idx);
657   if (idx < m_frames.size()) {
658     if (m_show_inlined_frames) {
659       // When inline frames are enabled we actually create all the frames in
660       // GetFramesUpTo.
661       frame_sp = m_frames[idx];
662     } else {
663       Unwind *unwinder = m_thread.GetUnwinder();
664       if (unwinder) {
665         addr_t pc, cfa;
666         if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) {
667           const bool cfa_is_valid = true;
668           frame_sp = std::make_shared<StackFrame>(
669               m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
670               StackFrame::Kind::Regular, nullptr);
671 
672           Function *function =
673               frame_sp->GetSymbolContext(eSymbolContextFunction).function;
674           if (function) {
675             // When we aren't showing inline functions we always use the top
676             // most function block as the scope.
677             frame_sp->SetSymbolContextScope(&function->GetBlock(false));
678           } else {
679             // Set the symbol scope from the symbol regardless if it is nullptr
680             // or valid.
681             frame_sp->SetSymbolContextScope(
682                 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
683           }
684           SetFrameAtIndex(idx, frame_sp);
685         }
686       }
687     }
688   } else if (original_idx == 0) {
689     // There should ALWAYS be a frame at index 0.  If something went wrong with
690     // the CurrentInlinedDepth such that there weren't as many frames as we
691     // thought taking that into account, then reset the current inlined depth
692     // and return the real zeroth frame.
693     if (m_frames.empty()) {
694       // Why do we have a thread with zero frames, that should not ever
695       // happen...
696       assert(!m_thread.IsValid() && "A valid thread has no frames.");
697     } else {
698       ResetCurrentInlinedDepth();
699       frame_sp = m_frames[original_idx];
700     }
701   }
702 
703   return frame_sp;
704 }
705 
706 StackFrameSP
707 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
708   // First try assuming the unwind index is the same as the frame index. The
709   // unwind index is always greater than or equal to the frame index, so it is
710   // a good place to start. If we have inlined frames we might have 5 concrete
711   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
712   // after we make all the inlined frames. Most of the time the unwind frame
713   // index (or the concrete frame index) is the same as the frame index.
714   uint32_t frame_idx = unwind_idx;
715   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
716   while (frame_sp) {
717     if (frame_sp->GetFrameIndex() == unwind_idx)
718       break;
719     frame_sp = GetFrameAtIndex(++frame_idx);
720   }
721   return frame_sp;
722 }
723 
724 static bool CompareStackID(const StackFrameSP &stack_sp,
725                            const StackID &stack_id) {
726   return stack_sp->GetStackID() < stack_id;
727 }
728 
729 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
730   StackFrameSP frame_sp;
731 
732   if (stack_id.IsValid()) {
733     std::lock_guard<std::recursive_mutex> guard(m_mutex);
734     uint32_t frame_idx = 0;
735     // Do a binary search in case the stack frame is already in our cache
736     collection::const_iterator begin = m_frames.begin();
737     collection::const_iterator end = m_frames.end();
738     if (begin != end) {
739       collection::const_iterator pos =
740           std::lower_bound(begin, end, stack_id, CompareStackID);
741       if (pos != end) {
742         if ((*pos)->GetStackID() == stack_id)
743           return *pos;
744       }
745     }
746     do {
747       frame_sp = GetFrameAtIndex(frame_idx);
748       if (frame_sp && frame_sp->GetStackID() == stack_id)
749         break;
750       frame_idx++;
751     } while (frame_sp);
752   }
753   return frame_sp;
754 }
755 
756 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
757   if (idx >= m_frames.size())
758     m_frames.resize(idx + 1);
759   // Make sure allocation succeeded by checking bounds again
760   if (idx < m_frames.size()) {
761     m_frames[idx] = frame_sp;
762     return true;
763   }
764   return false; // resize failed, out of memory?
765 }
766 
767 uint32_t StackFrameList::GetSelectedFrameIndex() const {
768   std::lock_guard<std::recursive_mutex> guard(m_mutex);
769   return m_selected_frame_idx;
770 }
771 
772 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
773   std::lock_guard<std::recursive_mutex> guard(m_mutex);
774   const_iterator pos;
775   const_iterator begin = m_frames.begin();
776   const_iterator end = m_frames.end();
777   m_selected_frame_idx = 0;
778   for (pos = begin; pos != end; ++pos) {
779     if (pos->get() == frame) {
780       m_selected_frame_idx = std::distance(begin, pos);
781       uint32_t inlined_depth = GetCurrentInlinedDepth();
782       if (inlined_depth != UINT32_MAX)
783         m_selected_frame_idx -= inlined_depth;
784       break;
785     }
786   }
787   SetDefaultFileAndLineToSelectedFrame();
788   return m_selected_frame_idx;
789 }
790 
791 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
792   std::lock_guard<std::recursive_mutex> guard(m_mutex);
793   StackFrameSP frame_sp(GetFrameAtIndex(idx));
794   if (frame_sp) {
795     SetSelectedFrame(frame_sp.get());
796     return true;
797   } else
798     return false;
799 }
800 
801 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
802   if (m_thread.GetID() ==
803       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
804     StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
805     if (frame_sp) {
806       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
807       if (sc.line_entry.file)
808         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
809             sc.line_entry.file, sc.line_entry.line);
810     }
811   }
812 }
813 
814 // The thread has been run, reset the number stack frames to zero so we can
815 // determine how many frames we have lazily.
816 void StackFrameList::Clear() {
817   std::lock_guard<std::recursive_mutex> guard(m_mutex);
818   m_frames.clear();
819   m_concrete_frames_fetched = 0;
820 }
821 
822 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap,
823                            lldb::StackFrameListSP &prev_sp) {
824   std::unique_lock<std::recursive_mutex> current_lock, previous_lock;
825   if (curr_ap)
826     current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex);
827   if (prev_sp)
828     previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex);
829 
830 #if defined(DEBUG_STACK_FRAMES)
831   StreamFile s(stdout, false);
832   s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n");
833   if (prev_sp)
834     prev_sp->Dump(&s);
835   else
836     s.PutCString("NULL");
837   s.PutCString("\nCurr:\n");
838   if (curr_ap)
839     curr_ap->Dump(&s);
840   else
841     s.PutCString("NULL");
842   s.EOL();
843 #endif
844 
845   if (!curr_ap || curr_ap->GetNumFrames(false) == 0) {
846 #if defined(DEBUG_STACK_FRAMES)
847     s.PutCString("No current frames, leave previous frames alone...\n");
848 #endif
849     curr_ap.release();
850     return;
851   }
852 
853   if (!prev_sp || prev_sp->GetNumFrames(false) == 0) {
854 #if defined(DEBUG_STACK_FRAMES)
855     s.PutCString("No previous frames, so use current frames...\n");
856 #endif
857     // We either don't have any previous frames, or since we have more than one
858     // current frames it means we have all the frames and can safely replace
859     // our previous frames.
860     prev_sp.reset(curr_ap.release());
861     return;
862   }
863 
864   const uint32_t num_curr_frames = curr_ap->GetNumFrames(false);
865 
866   if (num_curr_frames > 1) {
867 #if defined(DEBUG_STACK_FRAMES)
868     s.PutCString(
869         "We have more than one current frame, so use current frames...\n");
870 #endif
871     // We have more than one current frames it means we have all the frames and
872     // can safely replace our previous frames.
873     prev_sp.reset(curr_ap.release());
874 
875 #if defined(DEBUG_STACK_FRAMES)
876     s.PutCString("\nMerged:\n");
877     prev_sp->Dump(&s);
878 #endif
879     return;
880   }
881 
882   StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0));
883   StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0));
884   StackID curr_stack_id(curr_frame_zero_sp->GetStackID());
885   StackID prev_stack_id(prev_frame_zero_sp->GetStackID());
886 
887 #if defined(DEBUG_STACK_FRAMES)
888   const uint32_t num_prev_frames = prev_sp->GetNumFrames(false);
889   s.Printf("\n%u previous frames with one current frame\n", num_prev_frames);
890 #endif
891 
892   // We have only a single current frame
893   // Our previous stack frames only had a single frame as well...
894   if (curr_stack_id == prev_stack_id) {
895 #if defined(DEBUG_STACK_FRAMES)
896     s.Printf("\nPrevious frame #0 is same as current frame #0, merge the "
897              "cached data\n");
898 #endif
899 
900     curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame(
901         *prev_frame_zero_sp);
902     //        prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame
903     //        (*curr_frame_zero_sp);
904     //        prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp);
905   } else if (curr_stack_id < prev_stack_id) {
906 #if defined(DEBUG_STACK_FRAMES)
907     s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous "
908              "frame #0, insert current frame zero in front of previous\n");
909 #endif
910     prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp);
911   }
912 
913   curr_ap.release();
914 
915 #if defined(DEBUG_STACK_FRAMES)
916   s.PutCString("\nMerged:\n");
917   prev_sp->Dump(&s);
918 #endif
919 }
920 
921 lldb::StackFrameSP
922 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
923   const_iterator pos;
924   const_iterator begin = m_frames.begin();
925   const_iterator end = m_frames.end();
926   lldb::StackFrameSP ret_sp;
927 
928   for (pos = begin; pos != end; ++pos) {
929     if (pos->get() == stack_frame_ptr) {
930       ret_sp = (*pos);
931       break;
932     }
933   }
934   return ret_sp;
935 }
936 
937 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
938                                  uint32_t num_frames, bool show_frame_info,
939                                  uint32_t num_frames_with_source,
940                                  bool show_unique,
941                                  const char *selected_frame_marker) {
942   size_t num_frames_displayed = 0;
943 
944   if (num_frames == 0)
945     return 0;
946 
947   StackFrameSP frame_sp;
948   uint32_t frame_idx = 0;
949   uint32_t last_frame;
950 
951   // Don't let the last frame wrap around...
952   if (num_frames == UINT32_MAX)
953     last_frame = UINT32_MAX;
954   else
955     last_frame = first_frame + num_frames;
956 
957   StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
958   const char *unselected_marker = nullptr;
959   std::string buffer;
960   if (selected_frame_marker) {
961     size_t len = strlen(selected_frame_marker);
962     buffer.insert(buffer.begin(), len, ' ');
963     unselected_marker = buffer.c_str();
964   }
965   const char *marker = nullptr;
966 
967   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
968     frame_sp = GetFrameAtIndex(frame_idx);
969     if (!frame_sp)
970       break;
971 
972     if (selected_frame_marker != nullptr) {
973       if (frame_sp == selected_frame_sp)
974         marker = selected_frame_marker;
975       else
976         marker = unselected_marker;
977     }
978 
979     if (!frame_sp->GetStatus(strm, show_frame_info,
980                              num_frames_with_source > (first_frame - frame_idx),
981                              show_unique, marker))
982       break;
983     ++num_frames_displayed;
984   }
985 
986   strm.IndentLess();
987   return num_frames_displayed;
988 }
989