180814287SRaphael Isemann //===-- StackFrameList.cpp ------------------------------------------------===//
230fdc8d8SChris Lattner //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
630fdc8d8SChris Lattner //
730fdc8d8SChris Lattner //===----------------------------------------------------------------------===//
830fdc8d8SChris Lattner
9d70a6e71SEugene Zelenko #include "lldb/Target/StackFrameList.h"
10c635500dSJim Ingham #include "lldb/Breakpoint/Breakpoint.h"
11b9c1b51eSKate Stone #include "lldb/Breakpoint/BreakpointLocation.h"
12b7f6b2faSJim Ingham #include "lldb/Core/SourceManager.h"
13b9c1b51eSKate Stone #include "lldb/Core/StreamFile.h"
1412daf946SGreg Clayton #include "lldb/Symbol/Block.h"
1512daf946SGreg Clayton #include "lldb/Symbol/Function.h"
1659e8fc1cSGreg Clayton #include "lldb/Symbol/Symbol.h"
17b7f6b2faSJim Ingham #include "lldb/Target/Process.h"
1812daf946SGreg Clayton #include "lldb/Target/RegisterContext.h"
1930fdc8d8SChris Lattner #include "lldb/Target/StackFrame.h"
20513c6bb8SJim Ingham #include "lldb/Target/StopInfo.h"
21b7f6b2faSJim Ingham #include "lldb/Target/Target.h"
2212daf946SGreg Clayton #include "lldb/Target/Thread.h"
2312daf946SGreg Clayton #include "lldb/Target/Unwind.h"
24*c34698a8SPavel Labath #include "lldb/Utility/LLDBLog.h"
256f9e6901SZachary Turner #include "lldb/Utility/Log.h"
264b36f791SVedant Kumar #include "llvm/ADT/SmallPtrSet.h"
2730fdc8d8SChris Lattner
28796ac80bSJonas Devlieghere #include <memory>
29796ac80bSJonas Devlieghere
305082c5fdSGreg Clayton //#define DEBUG_STACK_FRAMES 1
315082c5fdSGreg Clayton
3230fdc8d8SChris Lattner using namespace lldb;
3330fdc8d8SChris Lattner using namespace lldb_private;
3430fdc8d8SChris Lattner
3530fdc8d8SChris Lattner // StackFrameList constructor
StackFrameList(Thread & thread,const lldb::StackFrameListSP & prev_frames_sp,bool show_inline_frames)36b9c1b51eSKate Stone StackFrameList::StackFrameList(Thread &thread,
37b9c1b51eSKate Stone const lldb::StackFrameListSP &prev_frames_sp,
38b9c1b51eSKate Stone bool show_inline_frames)
39b9c1b51eSKate Stone : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
40b9c1b51eSKate Stone m_selected_frame_idx(0), m_concrete_frames_fetched(0),
41513c6bb8SJim Ingham m_current_inlined_depth(UINT32_MAX),
42513c6bb8SJim Ingham m_current_inlined_pc(LLDB_INVALID_ADDRESS),
43b9c1b51eSKate Stone m_show_inlined_frames(show_inline_frames) {
44b9c1b51eSKate Stone if (prev_frames_sp) {
45513c6bb8SJim Ingham m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
46513c6bb8SJim Ingham m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
47513c6bb8SJim Ingham }
4830fdc8d8SChris Lattner }
4930fdc8d8SChris Lattner
~StackFrameList()50b9c1b51eSKate Stone StackFrameList::~StackFrameList() {
5105097246SAdrian Prantl // Call clear since this takes a lock and clears the stack frame list in case
5205097246SAdrian Prantl // another thread is currently using this stack frame list
53ca5ce187SGreg Clayton Clear();
5430fdc8d8SChris Lattner }
5530fdc8d8SChris Lattner
CalculateCurrentInlinedDepth()56b9c1b51eSKate Stone void StackFrameList::CalculateCurrentInlinedDepth() {
57513c6bb8SJim Ingham uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
58b9c1b51eSKate Stone if (cur_inlined_depth == UINT32_MAX) {
59513c6bb8SJim Ingham ResetCurrentInlinedDepth();
60513c6bb8SJim Ingham }
61513c6bb8SJim Ingham }
62513c6bb8SJim Ingham
GetCurrentInlinedDepth()63b9c1b51eSKate Stone uint32_t StackFrameList::GetCurrentInlinedDepth() {
64b9c1b51eSKate Stone if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
65513c6bb8SJim Ingham lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
66b9c1b51eSKate Stone if (cur_pc != m_current_inlined_pc) {
67513c6bb8SJim Ingham m_current_inlined_pc = LLDB_INVALID_ADDRESS;
68513c6bb8SJim Ingham m_current_inlined_depth = UINT32_MAX;
69a007a6d8SPavel Labath Log *log = GetLog(LLDBLog::Step);
706cd41da7SJim Ingham if (log && log->GetVerbose())
7163e5fb76SJonas Devlieghere LLDB_LOGF(
7263e5fb76SJonas Devlieghere log,
73b9c1b51eSKate Stone "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
74513c6bb8SJim Ingham }
75513c6bb8SJim Ingham return m_current_inlined_depth;
76b9c1b51eSKate Stone } else {
77513c6bb8SJim Ingham return UINT32_MAX;
78513c6bb8SJim Ingham }
79513c6bb8SJim Ingham }
80513c6bb8SJim Ingham
ResetCurrentInlinedDepth()81b9c1b51eSKate Stone void StackFrameList::ResetCurrentInlinedDepth() {
82e7167e03SVedant Kumar if (!m_show_inlined_frames)
83e7167e03SVedant Kumar return;
84e7167e03SVedant Kumar
85bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
8647db4a2cSKeno Fischer
87513c6bb8SJim Ingham GetFramesUpTo(0);
88d70a6e71SEugene Zelenko if (m_frames.empty())
8965d4d5c3SRyan Brown return;
90b9c1b51eSKate Stone if (!m_frames[0]->IsInlined()) {
91513c6bb8SJim Ingham m_current_inlined_depth = UINT32_MAX;
92513c6bb8SJim Ingham m_current_inlined_pc = LLDB_INVALID_ADDRESS;
93a007a6d8SPavel Labath Log *log = GetLog(LLDBLog::Step);
946cd41da7SJim Ingham if (log && log->GetVerbose())
9563e5fb76SJonas Devlieghere LLDB_LOGF(
9663e5fb76SJonas Devlieghere log,
97b9c1b51eSKate Stone "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
98e7167e03SVedant Kumar return;
99e7167e03SVedant Kumar }
100e7167e03SVedant Kumar
10105097246SAdrian Prantl // We only need to do something special about inlined blocks when we are
10205097246SAdrian Prantl // at the beginning of an inlined function:
103b9c1b51eSKate Stone // FIXME: We probably also have to do something special if the PC is at
104e7167e03SVedant Kumar // the END of an inlined function, which coincides with the end of either
105e7167e03SVedant Kumar // its containing function or another inlined function.
106513c6bb8SJim Ingham
107513c6bb8SJim Ingham Block *block_ptr = m_frames[0]->GetFrameBlock();
108e7167e03SVedant Kumar if (!block_ptr)
109e7167e03SVedant Kumar return;
110e7167e03SVedant Kumar
111513c6bb8SJim Ingham Address pc_as_address;
112e7167e03SVedant Kumar lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
113e7167e03SVedant Kumar pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
114513c6bb8SJim Ingham AddressRange containing_range;
115e7167e03SVedant Kumar if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
116e7167e03SVedant Kumar pc_as_address != containing_range.GetBaseAddress())
117e7167e03SVedant Kumar return;
118e7167e03SVedant Kumar
119e7167e03SVedant Kumar // If we got here because of a breakpoint hit, then set the inlined depth
120e7167e03SVedant Kumar // depending on where the breakpoint was set. If we got here because of a
121e7167e03SVedant Kumar // crash, then set the inlined depth to the deepest most block. Otherwise,
122e7167e03SVedant Kumar // we stopped here naturally as the result of a step, so set ourselves in the
123e7167e03SVedant Kumar // containing frame of the whole set of nested inlines, so the user can then
124e7167e03SVedant Kumar // "virtually" step into the frames one by one, or next over the whole mess.
125e7167e03SVedant Kumar // Note: We don't have to handle being somewhere in the middle of the stack
126e7167e03SVedant Kumar // here, since ResetCurrentInlinedDepth doesn't get called if there is a
127e7167e03SVedant Kumar // valid inlined depth set.
128513c6bb8SJim Ingham StopInfoSP stop_info_sp = m_thread.GetStopInfo();
129e7167e03SVedant Kumar if (!stop_info_sp)
130e7167e03SVedant Kumar return;
131b9c1b51eSKate Stone switch (stop_info_sp->GetStopReason()) {
132513c6bb8SJim Ingham case eStopReasonWatchpoint:
133513c6bb8SJim Ingham case eStopReasonException:
13490ba8115SGreg Clayton case eStopReasonExec:
1356c37984eSMichał Górny case eStopReasonFork:
1366c37984eSMichał Górny case eStopReasonVFork:
1376c37984eSMichał Górny case eStopReasonVForkDone:
138513c6bb8SJim Ingham case eStopReasonSignal:
139e7167e03SVedant Kumar // In all these cases we want to stop in the deepest frame.
140513c6bb8SJim Ingham m_current_inlined_pc = curr_pc;
141513c6bb8SJim Ingham m_current_inlined_depth = 0;
142513c6bb8SJim Ingham break;
143b9c1b51eSKate Stone case eStopReasonBreakpoint: {
144e7167e03SVedant Kumar // FIXME: Figure out what this break point is doing, and set the inline
145e7167e03SVedant Kumar // depth appropriately. Be careful to take into account breakpoints that
146e7167e03SVedant Kumar // implement step over prologue, since that should do the default
147e7167e03SVedant Kumar // calculation. For now, if the breakpoints corresponding to this hit are
148e7167e03SVedant Kumar // all internal, I set the stop location to the top of the inlined stack,
149e7167e03SVedant Kumar // since that will make things like stepping over prologues work right.
150e7167e03SVedant Kumar // But if there are any non-internal breakpoints I do to the bottom of the
151e7167e03SVedant Kumar // stack, since that was the old behavior.
152c635500dSJim Ingham uint32_t bp_site_id = stop_info_sp->GetValue();
153b9c1b51eSKate Stone BreakpointSiteSP bp_site_sp(
154e7167e03SVedant Kumar m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
155c635500dSJim Ingham bool all_internal = true;
156b9c1b51eSKate Stone if (bp_site_sp) {
157c635500dSJim Ingham uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
158b9c1b51eSKate Stone for (uint32_t i = 0; i < num_owners; i++) {
159e7167e03SVedant Kumar Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
160b9c1b51eSKate Stone if (!bp_ref.IsInternal()) {
161c635500dSJim Ingham all_internal = false;
162c635500dSJim Ingham }
163c635500dSJim Ingham }
164c635500dSJim Ingham }
165b9c1b51eSKate Stone if (!all_internal) {
166c635500dSJim Ingham m_current_inlined_pc = curr_pc;
167c635500dSJim Ingham m_current_inlined_depth = 0;
168c635500dSJim Ingham break;
169c635500dSJim Ingham }
1707da851a3SJim Ingham }
171cec91ef9SGreg Clayton LLVM_FALLTHROUGH;
172b9c1b51eSKate Stone default: {
173e7167e03SVedant Kumar // Otherwise, we should set ourselves at the container of the inlining, so
174e7167e03SVedant Kumar // that the user can descend into them. So first we check whether we have
175e7167e03SVedant Kumar // more than one inlined block sharing this PC:
176513c6bb8SJim Ingham int num_inlined_functions = 0;
177513c6bb8SJim Ingham
178513c6bb8SJim Ingham for (Block *container_ptr = block_ptr->GetInlinedParent();
179d70a6e71SEugene Zelenko container_ptr != nullptr;
180b9c1b51eSKate Stone container_ptr = container_ptr->GetInlinedParent()) {
181e7167e03SVedant Kumar if (!container_ptr->GetRangeContainingAddress(pc_as_address,
182e7167e03SVedant Kumar containing_range))
183513c6bb8SJim Ingham break;
184513c6bb8SJim Ingham if (pc_as_address != containing_range.GetBaseAddress())
185513c6bb8SJim Ingham break;
186513c6bb8SJim Ingham
187513c6bb8SJim Ingham num_inlined_functions++;
188513c6bb8SJim Ingham }
189513c6bb8SJim Ingham m_current_inlined_pc = curr_pc;
190513c6bb8SJim Ingham m_current_inlined_depth = num_inlined_functions + 1;
191a007a6d8SPavel Labath Log *log = GetLog(LLDBLog::Step);
1926cd41da7SJim Ingham if (log && log->GetVerbose())
19363e5fb76SJonas Devlieghere LLDB_LOGF(log,
19463e5fb76SJonas Devlieghere "ResetCurrentInlinedDepth: setting inlined "
195b9c1b51eSKate Stone "depth: %d 0x%" PRIx64 ".\n",
196b9c1b51eSKate Stone m_current_inlined_depth, curr_pc);
197513c6bb8SJim Ingham
198e7167e03SVedant Kumar break;
199513c6bb8SJim Ingham }
200513c6bb8SJim Ingham }
201513c6bb8SJim Ingham }
202513c6bb8SJim Ingham
DecrementCurrentInlinedDepth()203b9c1b51eSKate Stone bool StackFrameList::DecrementCurrentInlinedDepth() {
204b9c1b51eSKate Stone if (m_show_inlined_frames) {
205513c6bb8SJim Ingham uint32_t current_inlined_depth = GetCurrentInlinedDepth();
206b9c1b51eSKate Stone if (current_inlined_depth != UINT32_MAX) {
207b9c1b51eSKate Stone if (current_inlined_depth > 0) {
208513c6bb8SJim Ingham m_current_inlined_depth--;
209513c6bb8SJim Ingham return true;
210513c6bb8SJim Ingham }
211513c6bb8SJim Ingham }
2129786eeebSJim Ingham }
213513c6bb8SJim Ingham return false;
214513c6bb8SJim Ingham }
215513c6bb8SJim Ingham
SetCurrentInlinedDepth(uint32_t new_depth)216b9c1b51eSKate Stone void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
2176cd41da7SJim Ingham m_current_inlined_depth = new_depth;
2186cd41da7SJim Ingham if (new_depth == UINT32_MAX)
2196cd41da7SJim Ingham m_current_inlined_pc = LLDB_INVALID_ADDRESS;
2206cd41da7SJim Ingham else
2216cd41da7SJim Ingham m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
2226cd41da7SJim Ingham }
2236cd41da7SJim Ingham
GetOnlyConcreteFramesUpTo(uint32_t end_idx,Unwind & unwinder)224eb8fa58eSVedant Kumar void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
225c0b1af68SPavel Labath Unwind &unwinder) {
226eb8fa58eSVedant Kumar assert(m_thread.IsValid() && "Expected valid thread");
227eb8fa58eSVedant Kumar assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
228eb8fa58eSVedant Kumar
229eb8fa58eSVedant Kumar if (end_idx < m_concrete_frames_fetched)
230eb8fa58eSVedant Kumar return;
231eb8fa58eSVedant Kumar
232c0b1af68SPavel Labath uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
233eb8fa58eSVedant Kumar if (num_frames <= end_idx + 1) {
234eb8fa58eSVedant Kumar // Done unwinding.
235eb8fa58eSVedant Kumar m_concrete_frames_fetched = UINT32_MAX;
236eb8fa58eSVedant Kumar }
23733e51b14SVedant Kumar
23833e51b14SVedant Kumar // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
23933e51b14SVedant Kumar // which can lazily query the unwinder to create frames.
240eb8fa58eSVedant Kumar m_frames.resize(num_frames);
241eb8fa58eSVedant Kumar }
242eb8fa58eSVedant Kumar
24303e29e2cSVedant Kumar /// A sequence of calls that comprise some portion of a backtrace. Each frame
24403e29e2cSVedant Kumar /// is represented as a pair of a callee (Function *) and an address within the
24503e29e2cSVedant Kumar /// callee.
2460081149fSPavel Labath struct CallDescriptor {
2470081149fSPavel Labath Function *func;
24817798c60SPavel Labath CallEdge::AddrType address_type = CallEdge::AddrType::Call;
24917798c60SPavel Labath addr_t address = LLDB_INVALID_ADDRESS;
2500081149fSPavel Labath };
2510081149fSPavel Labath using CallSequence = std::vector<CallDescriptor>;
25203e29e2cSVedant Kumar
2534b36f791SVedant Kumar /// Find the unique path through the call graph from \p begin (with return PC
2544b36f791SVedant Kumar /// \p return_pc) to \p end. On success this path is stored into \p path, and
2554b36f791SVedant Kumar /// on failure \p path is unchanged.
FindInterveningFrames(Function & begin,Function & end,ExecutionContext & exe_ctx,Target & target,addr_t return_pc,CallSequence & path,ModuleList & images,Log * log)2564b36f791SVedant Kumar static void FindInterveningFrames(Function &begin, Function &end,
2574fdbc072SVedant Kumar ExecutionContext &exe_ctx, Target &target,
25803e29e2cSVedant Kumar addr_t return_pc, CallSequence &path,
2594b36f791SVedant Kumar ModuleList &images, Log *log) {
2604b36f791SVedant Kumar LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
2614b36f791SVedant Kumar begin.GetDisplayName(), end.GetDisplayName(), return_pc);
2624b36f791SVedant Kumar
2634b36f791SVedant Kumar // Find a non-tail calling edge with the correct return PC.
2644b36f791SVedant Kumar if (log)
2654fdbc072SVedant Kumar for (const auto &edge : begin.GetCallEdges())
2664b36f791SVedant Kumar LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
2674fdbc072SVedant Kumar edge->GetReturnPCAddress(begin, target));
268ff02109aSVedant Kumar CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
269ff02109aSVedant Kumar if (!first_edge) {
2704b36f791SVedant Kumar LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
2714b36f791SVedant Kumar begin.GetDisplayName(), return_pc);
2724b36f791SVedant Kumar return;
2734b36f791SVedant Kumar }
2744b36f791SVedant Kumar
2754b36f791SVedant Kumar // The first callee may not be resolved, or there may be nothing to fill in.
2764fdbc072SVedant Kumar Function *first_callee = first_edge->GetCallee(images, exe_ctx);
2774b36f791SVedant Kumar if (!first_callee) {
2784b36f791SVedant Kumar LLDB_LOG(log, "Could not resolve callee");
2794b36f791SVedant Kumar return;
2804b36f791SVedant Kumar }
2814b36f791SVedant Kumar if (first_callee == &end) {
2824b36f791SVedant Kumar LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
2834b36f791SVedant Kumar end.GetDisplayName(), return_pc);
2844b36f791SVedant Kumar return;
2854b36f791SVedant Kumar }
2864b36f791SVedant Kumar
2874b36f791SVedant Kumar // Run DFS on the tail-calling edges out of the first callee to find \p end.
2884b36f791SVedant Kumar // Fully explore the set of functions reachable from the first edge via tail
2894b36f791SVedant Kumar // calls in order to detect ambiguous executions.
2904b36f791SVedant Kumar struct DFS {
29103e29e2cSVedant Kumar CallSequence active_path = {};
29203e29e2cSVedant Kumar CallSequence solution_path = {};
2934b36f791SVedant Kumar llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
2944b36f791SVedant Kumar bool ambiguous = false;
2954b36f791SVedant Kumar Function *end;
2964b36f791SVedant Kumar ModuleList &images;
29703e29e2cSVedant Kumar Target ⌖
2984fdbc072SVedant Kumar ExecutionContext &context;
2994b36f791SVedant Kumar
30003e29e2cSVedant Kumar DFS(Function *end, ModuleList &images, Target &target,
30103e29e2cSVedant Kumar ExecutionContext &context)
30203e29e2cSVedant Kumar : end(end), images(images), target(target), context(context) {}
3034b36f791SVedant Kumar
30403e29e2cSVedant Kumar void search(CallEdge &first_edge, Function &first_callee,
30503e29e2cSVedant Kumar CallSequence &path) {
30603e29e2cSVedant Kumar dfs(first_edge, first_callee);
3074b36f791SVedant Kumar if (!ambiguous)
3084b36f791SVedant Kumar path = std::move(solution_path);
3094b36f791SVedant Kumar }
3104b36f791SVedant Kumar
31103e29e2cSVedant Kumar void dfs(CallEdge ¤t_edge, Function &callee) {
3124b36f791SVedant Kumar // Found a path to the target function.
313c03c2e88SVedant Kumar if (&callee == end) {
3144b36f791SVedant Kumar if (solution_path.empty())
3154b36f791SVedant Kumar solution_path = active_path;
3164b36f791SVedant Kumar else
3174b36f791SVedant Kumar ambiguous = true;
3184b36f791SVedant Kumar return;
3194b36f791SVedant Kumar }
3204b36f791SVedant Kumar
3214b36f791SVedant Kumar // Terminate the search if tail recursion is found, or more generally if
3224b36f791SVedant Kumar // there's more than one way to reach a target. This errs on the side of
3234b36f791SVedant Kumar // caution: it conservatively stops searching when some solutions are
3244b36f791SVedant Kumar // still possible to save time in the average case.
325c03c2e88SVedant Kumar if (!visited_nodes.insert(&callee).second) {
3264b36f791SVedant Kumar ambiguous = true;
3274b36f791SVedant Kumar return;
3284b36f791SVedant Kumar }
3294b36f791SVedant Kumar
3304b36f791SVedant Kumar // Search the calls made from this callee.
3310081149fSPavel Labath active_path.push_back(CallDescriptor{&callee});
3324fdbc072SVedant Kumar for (const auto &edge : callee.GetTailCallingEdges()) {
3334fdbc072SVedant Kumar Function *next_callee = edge->GetCallee(images, context);
3344b36f791SVedant Kumar if (!next_callee)
3354b36f791SVedant Kumar continue;
3364b36f791SVedant Kumar
3370081149fSPavel Labath std::tie(active_path.back().address_type, active_path.back().address) =
3380081149fSPavel Labath edge->GetCallerAddress(callee, target);
33903e29e2cSVedant Kumar
34003e29e2cSVedant Kumar dfs(*edge, *next_callee);
3414b36f791SVedant Kumar if (ambiguous)
3424b36f791SVedant Kumar return;
3434b36f791SVedant Kumar }
3444b36f791SVedant Kumar active_path.pop_back();
3454b36f791SVedant Kumar }
3464b36f791SVedant Kumar };
3474b36f791SVedant Kumar
34803e29e2cSVedant Kumar DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
3494b36f791SVedant Kumar }
3504b36f791SVedant Kumar
3514b36f791SVedant Kumar /// Given that \p next_frame will be appended to the frame list, synthesize
3524b36f791SVedant Kumar /// tail call frames between the current end of the list and \p next_frame.
3534b36f791SVedant Kumar /// If any frames are added, adjust the frame index of \p next_frame.
3544b36f791SVedant Kumar ///
3554b36f791SVedant Kumar /// --------------
3564b36f791SVedant Kumar /// | ... | <- Completed frames.
3574b36f791SVedant Kumar /// --------------
3584b36f791SVedant Kumar /// | prev_frame |
3594b36f791SVedant Kumar /// --------------
3604b36f791SVedant Kumar /// | ... | <- Artificial frames inserted here.
3614b36f791SVedant Kumar /// --------------
3624b36f791SVedant Kumar /// | next_frame |
3634b36f791SVedant Kumar /// --------------
3644b36f791SVedant Kumar /// | ... | <- Not-yet-visited frames.
3654b36f791SVedant Kumar /// --------------
SynthesizeTailCallFrames(StackFrame & next_frame)3664b36f791SVedant Kumar void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
367e05af081SVedant Kumar // Cannot synthesize tail call frames when the stack is empty (there is no
368e05af081SVedant Kumar // "previous" frame).
369e05af081SVedant Kumar if (m_frames.empty())
370e05af081SVedant Kumar return;
371e05af081SVedant Kumar
3724b36f791SVedant Kumar TargetSP target_sp = next_frame.CalculateTarget();
3734b36f791SVedant Kumar if (!target_sp)
3744b36f791SVedant Kumar return;
3754b36f791SVedant Kumar
3764b36f791SVedant Kumar lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
3774b36f791SVedant Kumar if (!next_reg_ctx_sp)
3784b36f791SVedant Kumar return;
3794b36f791SVedant Kumar
380a007a6d8SPavel Labath Log *log = GetLog(LLDBLog::Step);
3814b36f791SVedant Kumar
3824b36f791SVedant Kumar StackFrame &prev_frame = *m_frames.back().get();
3834b36f791SVedant Kumar
3844b36f791SVedant Kumar // Find the functions prev_frame and next_frame are stopped in. The function
3854b36f791SVedant Kumar // objects are needed to search the lazy call graph for intervening frames.
3864b36f791SVedant Kumar Function *prev_func =
3874b36f791SVedant Kumar prev_frame.GetSymbolContext(eSymbolContextFunction).function;
3884b36f791SVedant Kumar if (!prev_func) {
3894b36f791SVedant Kumar LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
3904b36f791SVedant Kumar return;
3914b36f791SVedant Kumar }
3924b36f791SVedant Kumar Function *next_func =
3934b36f791SVedant Kumar next_frame.GetSymbolContext(eSymbolContextFunction).function;
3944b36f791SVedant Kumar if (!next_func) {
3954b36f791SVedant Kumar LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
3964b36f791SVedant Kumar return;
3974b36f791SVedant Kumar }
3984b36f791SVedant Kumar
3994b36f791SVedant Kumar // Try to find the unique sequence of (tail) calls which led from next_frame
4004b36f791SVedant Kumar // to prev_frame.
40103e29e2cSVedant Kumar CallSequence path;
4024b36f791SVedant Kumar addr_t return_pc = next_reg_ctx_sp->GetPC();
4034b36f791SVedant Kumar Target &target = *target_sp.get();
4044b36f791SVedant Kumar ModuleList &images = next_frame.CalculateTarget()->GetImages();
4054fdbc072SVedant Kumar ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
4064fdbc072SVedant Kumar exe_ctx.SetFramePtr(&next_frame);
4074fdbc072SVedant Kumar FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
4084fdbc072SVedant Kumar path, images, log);
4094b36f791SVedant Kumar
4104b36f791SVedant Kumar // Push synthetic tail call frames.
41103e29e2cSVedant Kumar for (auto calleeInfo : llvm::reverse(path)) {
4120081149fSPavel Labath Function *callee = calleeInfo.func;
4134b36f791SVedant Kumar uint32_t frame_idx = m_frames.size();
4144b36f791SVedant Kumar uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
4154b36f791SVedant Kumar addr_t cfa = LLDB_INVALID_ADDRESS;
4164b36f791SVedant Kumar bool cfa_is_valid = false;
4170081149fSPavel Labath addr_t pc = calleeInfo.address;
4180081149fSPavel Labath // If the callee address refers to the call instruction, we do not want to
4190081149fSPavel Labath // subtract 1 from this value.
4200081149fSPavel Labath const bool behaves_like_zeroth_frame =
4210081149fSPavel Labath calleeInfo.address_type == CallEdge::AddrType::Call;
4224b36f791SVedant Kumar SymbolContext sc;
4234b36f791SVedant Kumar callee->CalculateSymbolContext(&sc);
4244b36f791SVedant Kumar auto synth_frame = std::make_shared<StackFrame>(
4254b36f791SVedant Kumar m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
42631e6dbe1SJoseph Tremoulet cfa_is_valid, pc, StackFrame::Kind::Artificial,
42731e6dbe1SJoseph Tremoulet behaves_like_zeroth_frame, &sc);
4284b36f791SVedant Kumar m_frames.push_back(synth_frame);
42903e29e2cSVedant Kumar LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
4304b36f791SVedant Kumar }
4314b36f791SVedant Kumar
4324b36f791SVedant Kumar // If any frames were created, adjust next_frame's index.
4334b36f791SVedant Kumar if (!path.empty())
4344b36f791SVedant Kumar next_frame.SetFrameIndex(m_frames.size());
4354b36f791SVedant Kumar }
4364b36f791SVedant Kumar
GetFramesUpTo(uint32_t end_idx)437b9c1b51eSKate Stone void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
438eb8fa58eSVedant Kumar // Do not fetch frames for an invalid thread.
439d70a6e71SEugene Zelenko if (!m_thread.IsValid())
440ebafd2f1SEnrico Granata return;
441ebafd2f1SEnrico Granata
442b9c1b51eSKate Stone // We've already gotten more frames than asked for, or we've already finished
443b9c1b51eSKate Stone // unwinding, return.
444b0c72a5fSJim Ingham if (m_frames.size() > end_idx || GetAllFramesFetched())
445b0c72a5fSJim Ingham return;
44612daf946SGreg Clayton
447c0b1af68SPavel Labath Unwind &unwinder = m_thread.GetUnwinder();
448b0c72a5fSJim Ingham
449eb8fa58eSVedant Kumar if (!m_show_inlined_frames) {
450eb8fa58eSVedant Kumar GetOnlyConcreteFramesUpTo(end_idx, unwinder);
451eb8fa58eSVedant Kumar return;
452eb8fa58eSVedant Kumar }
453eb8fa58eSVedant Kumar
4545082c5fdSGreg Clayton #if defined(DEBUG_STACK_FRAMES)
4552c595273SJohnny Chen StreamFile s(stdout, false);
4565082c5fdSGreg Clayton #endif
45705097246SAdrian Prantl // If we are hiding some frames from the outside world, we need to add
45805097246SAdrian Prantl // those onto the total count of frames to fetch. However, we don't need
45905097246SAdrian Prantl // to do that if end_idx is 0 since in that case we always get the first
46005097246SAdrian Prantl // concrete frame and all the inlined frames below it... And of course, if
46105097246SAdrian Prantl // end_idx is UINT32_MAX that means get all, so just do that...
462513c6bb8SJim Ingham
463513c6bb8SJim Ingham uint32_t inlined_depth = 0;
464b9c1b51eSKate Stone if (end_idx > 0 && end_idx != UINT32_MAX) {
465513c6bb8SJim Ingham inlined_depth = GetCurrentInlinedDepth();
466b9c1b51eSKate Stone if (inlined_depth != UINT32_MAX) {
467513c6bb8SJim Ingham if (end_idx > 0)
468513c6bb8SJim Ingham end_idx += inlined_depth;
469513c6bb8SJim Ingham }
470513c6bb8SJim Ingham }
47112daf946SGreg Clayton
472b57e4a1bSJason Molenda StackFrameSP unwind_frame_sp;
473b9c1b51eSKate Stone do {
474b0c72a5fSJim Ingham uint32_t idx = m_concrete_frames_fetched++;
4758012cadbSGreg Clayton lldb::addr_t pc = LLDB_INVALID_ADDRESS;
4768012cadbSGreg Clayton lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
47731e6dbe1SJoseph Tremoulet bool behaves_like_zeroth_frame = (idx == 0);
478b9c1b51eSKate Stone if (idx == 0) {
47905097246SAdrian Prantl // We might have already created frame zero, only create it if we need
480eb8fa58eSVedant Kumar // to.
481b9c1b51eSKate Stone if (m_frames.empty()) {
482b3ae8761SGreg Clayton RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
483b3ae8761SGreg Clayton
484b9c1b51eSKate Stone if (reg_ctx_sp) {
485c0b1af68SPavel Labath const bool success = unwinder.GetFrameInfoAtIndex(
48631e6dbe1SJoseph Tremoulet idx, cfa, pc, behaves_like_zeroth_frame);
48705097246SAdrian Prantl // There shouldn't be any way not to get the frame info for frame
48805097246SAdrian Prantl // 0. But if the unwinder can't make one, lets make one by hand
489eb8fa58eSVedant Kumar // with the SP as the CFA and see if that gets any further.
490b9c1b51eSKate Stone if (!success) {
491b3ae8761SGreg Clayton cfa = reg_ctx_sp->GetSP();
492b3ae8761SGreg Clayton pc = reg_ctx_sp->GetPC();
493376c4854SJim Ingham }
494376c4854SJim Ingham
495796ac80bSJonas Devlieghere unwind_frame_sp = std::make_shared<StackFrame>(
496796ac80bSJonas Devlieghere m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
49731e6dbe1SJoseph Tremoulet cfa, pc, behaves_like_zeroth_frame, nullptr);
4985082c5fdSGreg Clayton m_frames.push_back(unwind_frame_sp);
4995082c5fdSGreg Clayton }
500b9c1b51eSKate Stone } else {
5015082c5fdSGreg Clayton unwind_frame_sp = m_frames.front();
502b57e4a1bSJason Molenda cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
5035082c5fdSGreg Clayton }
504b9c1b51eSKate Stone } else {
505c0b1af68SPavel Labath const bool success =
506c0b1af68SPavel Labath unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
507b9c1b51eSKate Stone if (!success) {
508b0c72a5fSJim Ingham // We've gotten to the end of the stack.
509b0c72a5fSJim Ingham SetAllFramesFetched();
510b0c72a5fSJim Ingham break;
511b0c72a5fSJim Ingham }
51299618476SJason Molenda const bool cfa_is_valid = true;
513796ac80bSJonas Devlieghere unwind_frame_sp = std::make_shared<StackFrame>(
514796ac80bSJonas Devlieghere m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
51531e6dbe1SJoseph Tremoulet pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
5164b36f791SVedant Kumar
5174b36f791SVedant Kumar // Create synthetic tail call frames between the previous frame and the
5184b36f791SVedant Kumar // newly-found frame. The new frame's index may change after this call,
5194b36f791SVedant Kumar // although its concrete index will stay the same.
5204b36f791SVedant Kumar SynthesizeTailCallFrames(*unwind_frame_sp.get());
5214b36f791SVedant Kumar
5225082c5fdSGreg Clayton m_frames.push_back(unwind_frame_sp);
52312daf946SGreg Clayton }
52412daf946SGreg Clayton
525ff1b5c42SEd Maste assert(unwind_frame_sp);
526b9c1b51eSKate Stone SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
527b9c1b51eSKate Stone eSymbolContextBlock | eSymbolContextFunction);
5281ed54f50SGreg Clayton Block *unwind_block = unwind_sc.block;
529911d5784STed Woodward TargetSP target_sp = m_thread.CalculateTarget();
530266bb78fSJason Molenda if (unwind_block) {
531266bb78fSJason Molenda Address curr_frame_address(
532266bb78fSJason Molenda unwind_frame_sp->GetFrameCodeAddressForSymbolication());
5335f4c61e2SGreg Clayton
5341ed54f50SGreg Clayton SymbolContext next_frame_sc;
5351ed54f50SGreg Clayton Address next_frame_address;
5361ed54f50SGreg Clayton
537b9c1b51eSKate Stone while (unwind_sc.GetParentOfInlinedScope(
538b9c1b51eSKate Stone curr_frame_address, next_frame_sc, next_frame_address)) {
539911d5784STed Woodward next_frame_sc.line_entry.ApplyFileMappings(target_sp);
54031e6dbe1SJoseph Tremoulet behaves_like_zeroth_frame = false;
54131e6dbe1SJoseph Tremoulet StackFrameSP frame_sp(new StackFrame(
54231e6dbe1SJoseph Tremoulet m_thread.shared_from_this(), m_frames.size(), idx,
54331e6dbe1SJoseph Tremoulet unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
54431e6dbe1SJoseph Tremoulet behaves_like_zeroth_frame, &next_frame_sc));
5455082c5fdSGreg Clayton
5465082c5fdSGreg Clayton m_frames.push_back(frame_sp);
5471ed54f50SGreg Clayton unwind_sc = next_frame_sc;
5481ed54f50SGreg Clayton curr_frame_address = next_frame_address;
549b0c72a5fSJim Ingham }
550b0c72a5fSJim Ingham }
551b0c72a5fSJim Ingham } while (m_frames.size() - 1 < end_idx);
5521ed54f50SGreg Clayton
553b0c72a5fSJim Ingham // Don't try to merge till you've calculated all the frames in this stack.
554b9c1b51eSKate Stone if (GetAllFramesFetched() && m_prev_frames_sp) {
5552cad65a5SGreg Clayton StackFrameList *prev_frames = m_prev_frames_sp.get();
5565082c5fdSGreg Clayton StackFrameList *curr_frames = this;
5575082c5fdSGreg Clayton
5585082c5fdSGreg Clayton #if defined(DEBUG_STACK_FRAMES)
55968275d5eSGreg Clayton s.PutCString("\nprev_frames:\n");
5605082c5fdSGreg Clayton prev_frames->Dump(&s);
56168275d5eSGreg Clayton s.PutCString("\ncurr_frames:\n");
5625082c5fdSGreg Clayton curr_frames->Dump(&s);
5635082c5fdSGreg Clayton s.EOL();
5645082c5fdSGreg Clayton #endif
5655082c5fdSGreg Clayton size_t curr_frame_num, prev_frame_num;
5665082c5fdSGreg Clayton
567b9c1b51eSKate Stone for (curr_frame_num = curr_frames->m_frames.size(),
568b9c1b51eSKate Stone prev_frame_num = prev_frames->m_frames.size();
5695082c5fdSGreg Clayton curr_frame_num > 0 && prev_frame_num > 0;
570b9c1b51eSKate Stone --curr_frame_num, --prev_frame_num) {
5715082c5fdSGreg Clayton const size_t curr_frame_idx = curr_frame_num - 1;
5725082c5fdSGreg Clayton const size_t prev_frame_idx = prev_frame_num - 1;
573b57e4a1bSJason Molenda StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
574b57e4a1bSJason Molenda StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
5755082c5fdSGreg Clayton
5765082c5fdSGreg Clayton #if defined(DEBUG_STACK_FRAMES)
5772cad65a5SGreg Clayton s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
5785082c5fdSGreg Clayton if (curr_frame_sp)
5792cad65a5SGreg Clayton curr_frame_sp->Dump(&s, true, false);
5805082c5fdSGreg Clayton else
5815082c5fdSGreg Clayton s.PutCString("NULL");
5822cad65a5SGreg Clayton s.Printf("\nPrev frame #%u ", prev_frame_idx);
5835082c5fdSGreg Clayton if (prev_frame_sp)
5842cad65a5SGreg Clayton prev_frame_sp->Dump(&s, true, false);
5855082c5fdSGreg Clayton else
5865082c5fdSGreg Clayton s.PutCString("NULL");
5875082c5fdSGreg Clayton #endif
5885082c5fdSGreg Clayton
589b57e4a1bSJason Molenda StackFrame *curr_frame = curr_frame_sp.get();
590b57e4a1bSJason Molenda StackFrame *prev_frame = prev_frame_sp.get();
5915082c5fdSGreg Clayton
592d70a6e71SEugene Zelenko if (curr_frame == nullptr || prev_frame == nullptr)
5935082c5fdSGreg Clayton break;
5945082c5fdSGreg Clayton
595eb8fa58eSVedant Kumar // Check the stack ID to make sure they are equal.
59659e8fc1cSGreg Clayton if (curr_frame->GetStackID() != prev_frame->GetStackID())
5975082c5fdSGreg Clayton break;
5985082c5fdSGreg Clayton
59959e8fc1cSGreg Clayton prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
60005097246SAdrian Prantl // Now copy the fixed up previous frame into the current frames so the
601eb8fa58eSVedant Kumar // pointer doesn't change.
60259e8fc1cSGreg Clayton m_frames[curr_frame_idx] = prev_frame_sp;
6035082c5fdSGreg Clayton
6045082c5fdSGreg Clayton #if defined(DEBUG_STACK_FRAMES)
60568275d5eSGreg Clayton s.Printf("\n Copying previous frame to current frame");
6065082c5fdSGreg Clayton #endif
6075082c5fdSGreg Clayton }
608eb8fa58eSVedant Kumar // We are done with the old stack frame list, we can release it now.
6092cad65a5SGreg Clayton m_prev_frames_sp.reset();
6105082c5fdSGreg Clayton }
61168275d5eSGreg Clayton
61268275d5eSGreg Clayton #if defined(DEBUG_STACK_FRAMES)
61368275d5eSGreg Clayton s.PutCString("\n\nNew frames:\n");
61468275d5eSGreg Clayton Dump(&s);
61568275d5eSGreg Clayton s.EOL();
61668275d5eSGreg Clayton #endif
617ec6829eaSGreg Clayton }
618b0c72a5fSJim Ingham
GetNumFrames(bool can_create)619b9c1b51eSKate Stone uint32_t StackFrameList::GetNumFrames(bool can_create) {
620bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
621b0c72a5fSJim Ingham
622b0c72a5fSJim Ingham if (can_create)
623b0c72a5fSJim Ingham GetFramesUpTo(UINT32_MAX);
624513c6bb8SJim Ingham
625b3b7b1bfSVedant Kumar return GetVisibleStackFrameIndex(m_frames.size());
62630fdc8d8SChris Lattner }
62730fdc8d8SChris Lattner
Dump(Stream * s)628b9c1b51eSKate Stone void StackFrameList::Dump(Stream *s) {
629d70a6e71SEugene Zelenko if (s == nullptr)
6305082c5fdSGreg Clayton return;
631bb19a13cSSaleem Abdulrasool
632bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
63330fdc8d8SChris Lattner
6345082c5fdSGreg Clayton const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
635b9c1b51eSKate Stone for (pos = begin; pos != end; ++pos) {
636b57e4a1bSJason Molenda StackFrame *frame = (*pos).get();
637324a1036SSaleem Abdulrasool s->Printf("%p: ", static_cast<void *>(frame));
638b9c1b51eSKate Stone if (frame) {
63959e8fc1cSGreg Clayton frame->GetStackID().Dump(s);
6400603aa9dSGreg Clayton frame->DumpUsingSettingsFormat(s);
641b9c1b51eSKate Stone } else
642dce502edSGreg Clayton s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
6435082c5fdSGreg Clayton s->EOL();
64412daf946SGreg Clayton }
6455082c5fdSGreg Clayton s->EOL();
6465082c5fdSGreg Clayton }
64712daf946SGreg Clayton
GetFrameAtIndex(uint32_t idx)648b9c1b51eSKate Stone StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
649b57e4a1bSJason Molenda StackFrameSP frame_sp;
650bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
6515cb9a184SJim Ingham uint32_t original_idx = idx;
6525cb9a184SJim Ingham
653513c6bb8SJim Ingham uint32_t inlined_depth = GetCurrentInlinedDepth();
654513c6bb8SJim Ingham if (inlined_depth != UINT32_MAX)
655513c6bb8SJim Ingham idx += inlined_depth;
656513c6bb8SJim Ingham
6575082c5fdSGreg Clayton if (idx < m_frames.size())
6585082c5fdSGreg Clayton frame_sp = m_frames[idx];
65912daf946SGreg Clayton
6605082c5fdSGreg Clayton if (frame_sp)
66112daf946SGreg Clayton return frame_sp;
66212daf946SGreg Clayton
66305097246SAdrian Prantl // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
66405097246SAdrian Prantl // there are that many. If there weren't then you asked for too many frames.
665b0c72a5fSJim Ingham GetFramesUpTo(idx);
666b9c1b51eSKate Stone if (idx < m_frames.size()) {
667b9c1b51eSKate Stone if (m_show_inlined_frames) {
668b9c1b51eSKate Stone // When inline frames are enabled we actually create all the frames in
669b9c1b51eSKate Stone // GetFramesUpTo.
6705082c5fdSGreg Clayton frame_sp = m_frames[idx];
671b9c1b51eSKate Stone } else {
67212daf946SGreg Clayton addr_t pc, cfa;
67331e6dbe1SJoseph Tremoulet bool behaves_like_zeroth_frame = (idx == 0);
674c0b1af68SPavel Labath if (m_thread.GetUnwinder().GetFrameInfoAtIndex(
675c0b1af68SPavel Labath idx, cfa, pc, behaves_like_zeroth_frame)) {
67699618476SJason Molenda const bool cfa_is_valid = true;
677796ac80bSJonas Devlieghere frame_sp = std::make_shared<StackFrame>(
678796ac80bSJonas Devlieghere m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
67931e6dbe1SJoseph Tremoulet StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
68059e8fc1cSGreg Clayton
681b9c1b51eSKate Stone Function *function =
682b9c1b51eSKate Stone frame_sp->GetSymbolContext(eSymbolContextFunction).function;
683b9c1b51eSKate Stone if (function) {
68405097246SAdrian Prantl // When we aren't showing inline functions we always use the top
68505097246SAdrian Prantl // most function block as the scope.
68659e8fc1cSGreg Clayton frame_sp->SetSymbolContextScope(&function->GetBlock(false));
687b9c1b51eSKate Stone } else {
688b9c1b51eSKate Stone // Set the symbol scope from the symbol regardless if it is nullptr
689b9c1b51eSKate Stone // or valid.
690b9c1b51eSKate Stone frame_sp->SetSymbolContextScope(
691b9c1b51eSKate Stone frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
69259e8fc1cSGreg Clayton }
6935082c5fdSGreg Clayton SetFrameAtIndex(idx, frame_sp);
69412daf946SGreg Clayton }
69512daf946SGreg Clayton }
696b9c1b51eSKate Stone } else if (original_idx == 0) {
697b9c1b51eSKate Stone // There should ALWAYS be a frame at index 0. If something went wrong with
69805097246SAdrian Prantl // the CurrentInlinedDepth such that there weren't as many frames as we
69905097246SAdrian Prantl // thought taking that into account, then reset the current inlined depth
7005cb9a184SJim Ingham // and return the real zeroth frame.
701b9c1b51eSKate Stone if (m_frames.empty()) {
702b9c1b51eSKate Stone // Why do we have a thread with zero frames, that should not ever
703b9c1b51eSKate Stone // happen...
704a322f36cSDavid Blaikie assert(!m_thread.IsValid() && "A valid thread has no frames.");
705b9c1b51eSKate Stone } else {
706d70a6e71SEugene Zelenko ResetCurrentInlinedDepth();
707d70a6e71SEugene Zelenko frame_sp = m_frames[original_idx];
7085cb9a184SJim Ingham }
7095cb9a184SJim Ingham }
7105cb9a184SJim Ingham
71130fdc8d8SChris Lattner return frame_sp;
71230fdc8d8SChris Lattner }
71330fdc8d8SChris Lattner
714b57e4a1bSJason Molenda StackFrameSP
GetFrameWithConcreteFrameIndex(uint32_t unwind_idx)715b9c1b51eSKate Stone StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
7165ccbd294SGreg Clayton // First try assuming the unwind index is the same as the frame index. The
71705097246SAdrian Prantl // unwind index is always greater than or equal to the frame index, so it is
71805097246SAdrian Prantl // a good place to start. If we have inlined frames we might have 5 concrete
71905097246SAdrian Prantl // frames (frame unwind indexes go from 0-4), but we might have 15 frames
72005097246SAdrian Prantl // after we make all the inlined frames. Most of the time the unwind frame
72105097246SAdrian Prantl // index (or the concrete frame index) is the same as the frame index.
7225ccbd294SGreg Clayton uint32_t frame_idx = unwind_idx;
723b57e4a1bSJason Molenda StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
724b9c1b51eSKate Stone while (frame_sp) {
7255ccbd294SGreg Clayton if (frame_sp->GetFrameIndex() == unwind_idx)
7265ccbd294SGreg Clayton break;
7275ccbd294SGreg Clayton frame_sp = GetFrameAtIndex(++frame_idx);
7285ccbd294SGreg Clayton }
7295ccbd294SGreg Clayton return frame_sp;
7305ccbd294SGreg Clayton }
7315ccbd294SGreg Clayton
CompareStackID(const StackFrameSP & stack_sp,const StackID & stack_id)732b9c1b51eSKate Stone static bool CompareStackID(const StackFrameSP &stack_sp,
733b9c1b51eSKate Stone const StackID &stack_id) {
7347bcb93d5SGreg Clayton return stack_sp->GetStackID() < stack_id;
7357bcb93d5SGreg Clayton }
7367bcb93d5SGreg Clayton
GetFrameWithStackID(const StackID & stack_id)737b9c1b51eSKate Stone StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
738b57e4a1bSJason Molenda StackFrameSP frame_sp;
7397bcb93d5SGreg Clayton
740b9c1b51eSKate Stone if (stack_id.IsValid()) {
741bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
7427bcb93d5SGreg Clayton uint32_t frame_idx = 0;
7437bcb93d5SGreg Clayton // Do a binary search in case the stack frame is already in our cache
7447bcb93d5SGreg Clayton collection::const_iterator begin = m_frames.begin();
7457bcb93d5SGreg Clayton collection::const_iterator end = m_frames.end();
746b9c1b51eSKate Stone if (begin != end) {
747b9c1b51eSKate Stone collection::const_iterator pos =
748b9c1b51eSKate Stone std::lower_bound(begin, end, stack_id, CompareStackID);
749b9c1b51eSKate Stone if (pos != end) {
7508012cadbSGreg Clayton if ((*pos)->GetStackID() == stack_id)
7517bcb93d5SGreg Clayton return *pos;
7528012cadbSGreg Clayton }
7537bcb93d5SGreg Clayton }
754b9c1b51eSKate Stone do {
7553a195b7eSJim Ingham frame_sp = GetFrameAtIndex(frame_idx);
7563a195b7eSJim Ingham if (frame_sp && frame_sp->GetStackID() == stack_id)
7573a195b7eSJim Ingham break;
7583a195b7eSJim Ingham frame_idx++;
759b9c1b51eSKate Stone } while (frame_sp);
7607bcb93d5SGreg Clayton }
7613a195b7eSJim Ingham return frame_sp;
7623a195b7eSJim Ingham }
7635ccbd294SGreg Clayton
SetFrameAtIndex(uint32_t idx,StackFrameSP & frame_sp)764b9c1b51eSKate Stone bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
7655082c5fdSGreg Clayton if (idx >= m_frames.size())
7665082c5fdSGreg Clayton m_frames.resize(idx + 1);
76712daf946SGreg Clayton // Make sure allocation succeeded by checking bounds again
768b9c1b51eSKate Stone if (idx < m_frames.size()) {
7695082c5fdSGreg Clayton m_frames[idx] = frame_sp;
77030fdc8d8SChris Lattner return true;
77130fdc8d8SChris Lattner }
77230fdc8d8SChris Lattner return false; // resize failed, out of memory?
77330fdc8d8SChris Lattner }
77430fdc8d8SChris Lattner
GetSelectedFrameIndex() const775b9c1b51eSKate Stone uint32_t StackFrameList::GetSelectedFrameIndex() const {
776bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
7772976d00aSJim Ingham return m_selected_frame_idx;
77830fdc8d8SChris Lattner }
77930fdc8d8SChris Lattner
SetSelectedFrame(lldb_private::StackFrame * frame)780b9c1b51eSKate Stone uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
781bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
78212daf946SGreg Clayton const_iterator pos;
7835082c5fdSGreg Clayton const_iterator begin = m_frames.begin();
7845082c5fdSGreg Clayton const_iterator end = m_frames.end();
785b7f6b2faSJim Ingham m_selected_frame_idx = 0;
786b9c1b51eSKate Stone for (pos = begin; pos != end; ++pos) {
787b9c1b51eSKate Stone if (pos->get() == frame) {
7882976d00aSJim Ingham m_selected_frame_idx = std::distance(begin, pos);
789513c6bb8SJim Ingham uint32_t inlined_depth = GetCurrentInlinedDepth();
790513c6bb8SJim Ingham if (inlined_depth != UINT32_MAX)
791513c6bb8SJim Ingham m_selected_frame_idx -= inlined_depth;
792b7f6b2faSJim Ingham break;
79330fdc8d8SChris Lattner }
79430fdc8d8SChris Lattner }
795b7f6b2faSJim Ingham SetDefaultFileAndLineToSelectedFrame();
7962976d00aSJim Ingham return m_selected_frame_idx;
79730fdc8d8SChris Lattner }
79830fdc8d8SChris Lattner
SetSelectedFrameByIndex(uint32_t idx)799b9c1b51eSKate Stone bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
800bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
801b57e4a1bSJason Molenda StackFrameSP frame_sp(GetFrameAtIndex(idx));
802b9c1b51eSKate Stone if (frame_sp) {
803b0c72a5fSJim Ingham SetSelectedFrame(frame_sp.get());
804b0c72a5fSJim Ingham return true;
805b9c1b51eSKate Stone } else
806b0c72a5fSJim Ingham return false;
807b7f6b2faSJim Ingham }
808b7f6b2faSJim Ingham
SetDefaultFileAndLineToSelectedFrame()809b9c1b51eSKate Stone void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
810b9c1b51eSKate Stone if (m_thread.GetID() ==
811b9c1b51eSKate Stone m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
812b57e4a1bSJason Molenda StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
813b9c1b51eSKate Stone if (frame_sp) {
814252d0edeSGreg Clayton SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
815b7f6b2faSJim Ingham if (sc.line_entry.file)
816b9c1b51eSKate Stone m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
817b9c1b51eSKate Stone sc.line_entry.file, sc.line_entry.line);
818b7f6b2faSJim Ingham }
819b7f6b2faSJim Ingham }
82030fdc8d8SChris Lattner }
82130fdc8d8SChris Lattner
82230fdc8d8SChris Lattner // The thread has been run, reset the number stack frames to zero so we can
82330fdc8d8SChris Lattner // determine how many frames we have lazily.
Clear()824b9c1b51eSKate Stone void StackFrameList::Clear() {
825bb19a13cSSaleem Abdulrasool std::lock_guard<std::recursive_mutex> guard(m_mutex);
8265082c5fdSGreg Clayton m_frames.clear();
827b0c72a5fSJim Ingham m_concrete_frames_fetched = 0;
82830fdc8d8SChris Lattner }
82930fdc8d8SChris Lattner
830b57e4a1bSJason Molenda lldb::StackFrameSP
GetStackFrameSPForStackFramePtr(StackFrame * stack_frame_ptr)831b9c1b51eSKate Stone StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
832e4284b71SJim Ingham const_iterator pos;
833e4284b71SJim Ingham const_iterator begin = m_frames.begin();
834e4284b71SJim Ingham const_iterator end = m_frames.end();
835b57e4a1bSJason Molenda lldb::StackFrameSP ret_sp;
836e4284b71SJim Ingham
837b9c1b51eSKate Stone for (pos = begin; pos != end; ++pos) {
838b9c1b51eSKate Stone if (pos->get() == stack_frame_ptr) {
839e4284b71SJim Ingham ret_sp = (*pos);
840e4284b71SJim Ingham break;
841e4284b71SJim Ingham }
842e4284b71SJim Ingham }
843e4284b71SJim Ingham return ret_sp;
844e4284b71SJim Ingham }
845e4284b71SJim Ingham
GetStatus(Stream & strm,uint32_t first_frame,uint32_t num_frames,bool show_frame_info,uint32_t num_frames_with_source,bool show_unique,const char * selected_frame_marker)846b9c1b51eSKate Stone size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
847b9c1b51eSKate Stone uint32_t num_frames, bool show_frame_info,
8488ec10efcSJim Ingham uint32_t num_frames_with_source,
8497f1c1211SPavel Labath bool show_unique,
850b9c1b51eSKate Stone const char *selected_frame_marker) {
8517260f620SGreg Clayton size_t num_frames_displayed = 0;
8527260f620SGreg Clayton
8537260f620SGreg Clayton if (num_frames == 0)
8547260f620SGreg Clayton return 0;
8557260f620SGreg Clayton
856b57e4a1bSJason Molenda StackFrameSP frame_sp;
8577260f620SGreg Clayton uint32_t frame_idx = 0;
8587260f620SGreg Clayton uint32_t last_frame;
8597260f620SGreg Clayton
8607260f620SGreg Clayton // Don't let the last frame wrap around...
8617260f620SGreg Clayton if (num_frames == UINT32_MAX)
8627260f620SGreg Clayton last_frame = UINT32_MAX;
8637260f620SGreg Clayton else
8647260f620SGreg Clayton last_frame = first_frame + num_frames;
8657260f620SGreg Clayton
866b57e4a1bSJason Molenda StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
867d70a6e71SEugene Zelenko const char *unselected_marker = nullptr;
8688ec10efcSJim Ingham std::string buffer;
869b9c1b51eSKate Stone if (selected_frame_marker) {
8708ec10efcSJim Ingham size_t len = strlen(selected_frame_marker);
8718ec10efcSJim Ingham buffer.insert(buffer.begin(), len, ' ');
8728ec10efcSJim Ingham unselected_marker = buffer.c_str();
8738ec10efcSJim Ingham }
874d70a6e71SEugene Zelenko const char *marker = nullptr;
8758ec10efcSJim Ingham
876b9c1b51eSKate Stone for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
8777260f620SGreg Clayton frame_sp = GetFrameAtIndex(frame_idx);
878d70a6e71SEugene Zelenko if (!frame_sp)
8797260f620SGreg Clayton break;
8807260f620SGreg Clayton
881b9c1b51eSKate Stone if (selected_frame_marker != nullptr) {
8828ec10efcSJim Ingham if (frame_sp == selected_frame_sp)
8838ec10efcSJim Ingham marker = selected_frame_marker;
8848ec10efcSJim Ingham else
8858ec10efcSJim Ingham marker = unselected_marker;
8868ec10efcSJim Ingham }
8878ec10efcSJim Ingham
888b9c1b51eSKate Stone if (!frame_sp->GetStatus(strm, show_frame_info,
889b9c1b51eSKate Stone num_frames_with_source > (first_frame - frame_idx),
8907f1c1211SPavel Labath show_unique, marker))
8917260f620SGreg Clayton break;
8927260f620SGreg Clayton ++num_frames_displayed;
8937260f620SGreg Clayton }
8947260f620SGreg Clayton
8957260f620SGreg Clayton strm.IndentLess();
8967260f620SGreg Clayton return num_frames_displayed;
8977260f620SGreg Clayton }
898