1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 // C Includes 11 // C++ Includes 12 // Other libraries and framework includes 13 // Project includes 14 #include "lldb/Target/StackFrameList.h" 15 #include "lldb/Breakpoint/Breakpoint.h" 16 #include "lldb/Breakpoint/BreakpointLocation.h" 17 #include "lldb/Core/SourceManager.h" 18 #include "lldb/Core/StreamFile.h" 19 #include "lldb/Symbol/Block.h" 20 #include "lldb/Symbol/Function.h" 21 #include "lldb/Symbol/Symbol.h" 22 #include "lldb/Target/Process.h" 23 #include "lldb/Target/RegisterContext.h" 24 #include "lldb/Target/StackFrame.h" 25 #include "lldb/Target/StopInfo.h" 26 #include "lldb/Target/Target.h" 27 #include "lldb/Target/Thread.h" 28 #include "lldb/Target/Unwind.h" 29 #include "lldb/Utility/Log.h" 30 31 //#define DEBUG_STACK_FRAMES 1 32 33 using namespace lldb; 34 using namespace lldb_private; 35 36 //---------------------------------------------------------------------- 37 // StackFrameList constructor 38 //---------------------------------------------------------------------- 39 StackFrameList::StackFrameList(Thread &thread, 40 const lldb::StackFrameListSP &prev_frames_sp, 41 bool show_inline_frames) 42 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), 43 m_selected_frame_idx(0), m_concrete_frames_fetched(0), 44 m_current_inlined_depth(UINT32_MAX), 45 m_current_inlined_pc(LLDB_INVALID_ADDRESS), 46 m_show_inlined_frames(show_inline_frames) { 47 if (prev_frames_sp) { 48 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth; 49 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc; 50 } 51 } 52 53 StackFrameList::~StackFrameList() { 54 // Call clear since this takes a lock and clears the stack frame list in case 55 // another thread is currently using this stack frame list 56 Clear(); 57 } 58 59 void StackFrameList::CalculateCurrentInlinedDepth() { 60 uint32_t cur_inlined_depth = GetCurrentInlinedDepth(); 61 if (cur_inlined_depth == UINT32_MAX) { 62 ResetCurrentInlinedDepth(); 63 } 64 } 65 66 uint32_t StackFrameList::GetCurrentInlinedDepth() { 67 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { 68 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); 69 if (cur_pc != m_current_inlined_pc) { 70 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 71 m_current_inlined_depth = UINT32_MAX; 72 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 73 if (log && log->GetVerbose()) 74 log->Printf( 75 "GetCurrentInlinedDepth: invalidating current inlined depth.\n"); 76 } 77 return m_current_inlined_depth; 78 } else { 79 return UINT32_MAX; 80 } 81 } 82 83 void StackFrameList::ResetCurrentInlinedDepth() { 84 std::lock_guard<std::recursive_mutex> guard(m_mutex); 85 86 if (m_show_inlined_frames) { 87 GetFramesUpTo(0); 88 if (m_frames.empty()) 89 return; 90 if (!m_frames[0]->IsInlined()) { 91 m_current_inlined_depth = UINT32_MAX; 92 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 93 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 94 if (log && log->GetVerbose()) 95 log->Printf( 96 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); 97 } else { 98 // We only need to do something special about inlined blocks when we are 99 // at the beginning of an inlined function: 100 // FIXME: We probably also have to do something special if the PC is at 101 // the END 102 // of an inlined function, which coincides with the end of either its 103 // containing function or another inlined function. 104 105 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); 106 Block *block_ptr = m_frames[0]->GetFrameBlock(); 107 if (block_ptr) { 108 Address pc_as_address; 109 pc_as_address.SetLoadAddress(curr_pc, 110 &(m_thread.GetProcess()->GetTarget())); 111 AddressRange containing_range; 112 if (block_ptr->GetRangeContainingAddress(pc_as_address, 113 containing_range)) { 114 if (pc_as_address == containing_range.GetBaseAddress()) { 115 // If we got here because of a breakpoint hit, then set the inlined 116 // depth depending on where the breakpoint was set. If we got here 117 // because of a crash, then set the inlined depth to the deepest 118 // most block. Otherwise, we stopped here naturally as the result 119 // of a step, so set ourselves in the containing frame of the whole 120 // set of nested inlines, so the user can then "virtually" step 121 // into the frames one by one, or next over the whole mess. Note: 122 // We don't have to handle being somewhere in the middle of the 123 // stack here, since ResetCurrentInlinedDepth doesn't get called if 124 // there is a valid inlined depth set. 125 StopInfoSP stop_info_sp = m_thread.GetStopInfo(); 126 if (stop_info_sp) { 127 switch (stop_info_sp->GetStopReason()) { 128 case eStopReasonWatchpoint: 129 case eStopReasonException: 130 case eStopReasonExec: 131 case eStopReasonSignal: 132 // In all these cases we want to stop in the deepest most 133 // frame. 134 m_current_inlined_pc = curr_pc; 135 m_current_inlined_depth = 0; 136 break; 137 case eStopReasonBreakpoint: { 138 // FIXME: Figure out what this break point is doing, and set the 139 // inline depth 140 // appropriately. Be careful to take into account breakpoints 141 // that implement step over prologue, since that should do the 142 // default calculation. For now, if the breakpoints 143 // corresponding to this hit are all internal, 144 // I set the stop location to the top of the inlined stack, 145 // since that will make 146 // things like stepping over prologues work right. But if 147 // there are any non-internal breakpoints I do to the bottom of 148 // the stack, since that was the old behavior. 149 uint32_t bp_site_id = stop_info_sp->GetValue(); 150 BreakpointSiteSP bp_site_sp( 151 m_thread.GetProcess()->GetBreakpointSiteList().FindByID( 152 bp_site_id)); 153 bool all_internal = true; 154 if (bp_site_sp) { 155 uint32_t num_owners = bp_site_sp->GetNumberOfOwners(); 156 for (uint32_t i = 0; i < num_owners; i++) { 157 Breakpoint &bp_ref = 158 bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint(); 159 if (!bp_ref.IsInternal()) { 160 all_internal = false; 161 } 162 } 163 } 164 if (!all_internal) { 165 m_current_inlined_pc = curr_pc; 166 m_current_inlined_depth = 0; 167 break; 168 } 169 } 170 LLVM_FALLTHROUGH; 171 default: { 172 // Otherwise, we should set ourselves at the container of the 173 // inlining, so that the user can descend into them. So first 174 // we check whether we have more than one inlined block sharing 175 // this PC: 176 int num_inlined_functions = 0; 177 178 for (Block *container_ptr = block_ptr->GetInlinedParent(); 179 container_ptr != nullptr; 180 container_ptr = container_ptr->GetInlinedParent()) { 181 if (!container_ptr->GetRangeContainingAddress( 182 pc_as_address, containing_range)) 183 break; 184 if (pc_as_address != containing_range.GetBaseAddress()) 185 break; 186 187 num_inlined_functions++; 188 } 189 m_current_inlined_pc = curr_pc; 190 m_current_inlined_depth = num_inlined_functions + 1; 191 Log *log( 192 lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 193 if (log && log->GetVerbose()) 194 log->Printf("ResetCurrentInlinedDepth: setting inlined " 195 "depth: %d 0x%" PRIx64 ".\n", 196 m_current_inlined_depth, curr_pc); 197 198 } break; 199 } 200 } 201 } 202 } 203 } 204 } 205 } 206 } 207 208 bool StackFrameList::DecrementCurrentInlinedDepth() { 209 if (m_show_inlined_frames) { 210 uint32_t current_inlined_depth = GetCurrentInlinedDepth(); 211 if (current_inlined_depth != UINT32_MAX) { 212 if (current_inlined_depth > 0) { 213 m_current_inlined_depth--; 214 return true; 215 } 216 } 217 } 218 return false; 219 } 220 221 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { 222 m_current_inlined_depth = new_depth; 223 if (new_depth == UINT32_MAX) 224 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 225 else 226 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); 227 } 228 229 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, 230 Unwind *unwinder) { 231 assert(m_thread.IsValid() && "Expected valid thread"); 232 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill"); 233 234 if (end_idx < m_concrete_frames_fetched) 235 return; 236 237 if (!unwinder) 238 return; 239 240 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx); 241 if (num_frames <= end_idx + 1) { 242 // Done unwinding. 243 m_concrete_frames_fetched = UINT32_MAX; 244 } 245 m_frames.resize(num_frames); 246 } 247 248 void StackFrameList::GetFramesUpTo(uint32_t end_idx) { 249 // Do not fetch frames for an invalid thread. 250 if (!m_thread.IsValid()) 251 return; 252 253 // We've already gotten more frames than asked for, or we've already finished 254 // unwinding, return. 255 if (m_frames.size() > end_idx || GetAllFramesFetched()) 256 return; 257 258 Unwind *unwinder = m_thread.GetUnwinder(); 259 260 if (!m_show_inlined_frames) { 261 GetOnlyConcreteFramesUpTo(end_idx, unwinder); 262 return; 263 } 264 265 #if defined(DEBUG_STACK_FRAMES) 266 StreamFile s(stdout, false); 267 #endif 268 // If we are hiding some frames from the outside world, we need to add 269 // those onto the total count of frames to fetch. However, we don't need 270 // to do that if end_idx is 0 since in that case we always get the first 271 // concrete frame and all the inlined frames below it... And of course, if 272 // end_idx is UINT32_MAX that means get all, so just do that... 273 274 uint32_t inlined_depth = 0; 275 if (end_idx > 0 && end_idx != UINT32_MAX) { 276 inlined_depth = GetCurrentInlinedDepth(); 277 if (inlined_depth != UINT32_MAX) { 278 if (end_idx > 0) 279 end_idx += inlined_depth; 280 } 281 } 282 283 StackFrameSP unwind_frame_sp; 284 do { 285 uint32_t idx = m_concrete_frames_fetched++; 286 lldb::addr_t pc = LLDB_INVALID_ADDRESS; 287 lldb::addr_t cfa = LLDB_INVALID_ADDRESS; 288 if (idx == 0) { 289 // We might have already created frame zero, only create it if we need 290 // to. 291 if (m_frames.empty()) { 292 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext()); 293 294 if (reg_ctx_sp) { 295 const bool success = 296 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 297 // There shouldn't be any way not to get the frame info for frame 298 // 0. But if the unwinder can't make one, lets make one by hand 299 // with the SP as the CFA and see if that gets any further. 300 if (!success) { 301 cfa = reg_ctx_sp->GetSP(); 302 pc = reg_ctx_sp->GetPC(); 303 } 304 305 unwind_frame_sp.reset(new StackFrame(m_thread.shared_from_this(), 306 m_frames.size(), idx, reg_ctx_sp, 307 cfa, pc, nullptr)); 308 m_frames.push_back(unwind_frame_sp); 309 } 310 } else { 311 unwind_frame_sp = m_frames.front(); 312 cfa = unwind_frame_sp->m_id.GetCallFrameAddress(); 313 } 314 } else { 315 const bool success = 316 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 317 if (!success) { 318 // We've gotten to the end of the stack. 319 SetAllFramesFetched(); 320 break; 321 } 322 const bool cfa_is_valid = true; 323 const bool stop_id_is_valid = false; 324 const bool is_history_frame = false; 325 unwind_frame_sp.reset(new StackFrame( 326 m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid, 327 pc, 0, stop_id_is_valid, is_history_frame, nullptr)); 328 m_frames.push_back(unwind_frame_sp); 329 } 330 331 assert(unwind_frame_sp); 332 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext( 333 eSymbolContextBlock | eSymbolContextFunction); 334 Block *unwind_block = unwind_sc.block; 335 if (unwind_block) { 336 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress()); 337 TargetSP target_sp = m_thread.CalculateTarget(); 338 // Be sure to adjust the frame address to match the address that was 339 // used to lookup the symbol context above. If we are in the first 340 // concrete frame, then we lookup using the current address, else we 341 // decrement the address by one to get the correct location. 342 if (idx > 0) { 343 if (curr_frame_address.GetOffset() == 0) { 344 // If curr_frame_address points to the first address in a section 345 // then after adjustment it will point to an other section. In that 346 // case resolve the address again to the correct section plus 347 // offset form. 348 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress( 349 target_sp.get(), AddressClass::eCode); 350 curr_frame_address.SetOpcodeLoadAddress( 351 load_addr - 1, target_sp.get(), AddressClass::eCode); 352 } else { 353 curr_frame_address.Slide(-1); 354 } 355 } 356 357 SymbolContext next_frame_sc; 358 Address next_frame_address; 359 360 while (unwind_sc.GetParentOfInlinedScope( 361 curr_frame_address, next_frame_sc, next_frame_address)) { 362 next_frame_sc.line_entry.ApplyFileMappings(target_sp); 363 StackFrameSP frame_sp( 364 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, 365 unwind_frame_sp->GetRegisterContextSP(), cfa, 366 next_frame_address, &next_frame_sc)); 367 368 m_frames.push_back(frame_sp); 369 unwind_sc = next_frame_sc; 370 curr_frame_address = next_frame_address; 371 } 372 } 373 } while (m_frames.size() - 1 < end_idx); 374 375 // Don't try to merge till you've calculated all the frames in this stack. 376 if (GetAllFramesFetched() && m_prev_frames_sp) { 377 StackFrameList *prev_frames = m_prev_frames_sp.get(); 378 StackFrameList *curr_frames = this; 379 380 #if defined(DEBUG_STACK_FRAMES) 381 s.PutCString("\nprev_frames:\n"); 382 prev_frames->Dump(&s); 383 s.PutCString("\ncurr_frames:\n"); 384 curr_frames->Dump(&s); 385 s.EOL(); 386 #endif 387 size_t curr_frame_num, prev_frame_num; 388 389 for (curr_frame_num = curr_frames->m_frames.size(), 390 prev_frame_num = prev_frames->m_frames.size(); 391 curr_frame_num > 0 && prev_frame_num > 0; 392 --curr_frame_num, --prev_frame_num) { 393 const size_t curr_frame_idx = curr_frame_num - 1; 394 const size_t prev_frame_idx = prev_frame_num - 1; 395 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]); 396 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]); 397 398 #if defined(DEBUG_STACK_FRAMES) 399 s.Printf("\n\nCurr frame #%u ", curr_frame_idx); 400 if (curr_frame_sp) 401 curr_frame_sp->Dump(&s, true, false); 402 else 403 s.PutCString("NULL"); 404 s.Printf("\nPrev frame #%u ", prev_frame_idx); 405 if (prev_frame_sp) 406 prev_frame_sp->Dump(&s, true, false); 407 else 408 s.PutCString("NULL"); 409 #endif 410 411 StackFrame *curr_frame = curr_frame_sp.get(); 412 StackFrame *prev_frame = prev_frame_sp.get(); 413 414 if (curr_frame == nullptr || prev_frame == nullptr) 415 break; 416 417 // Check the stack ID to make sure they are equal. 418 if (curr_frame->GetStackID() != prev_frame->GetStackID()) 419 break; 420 421 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame); 422 // Now copy the fixed up previous frame into the current frames so the 423 // pointer doesn't change. 424 m_frames[curr_frame_idx] = prev_frame_sp; 425 426 #if defined(DEBUG_STACK_FRAMES) 427 s.Printf("\n Copying previous frame to current frame"); 428 #endif 429 } 430 // We are done with the old stack frame list, we can release it now. 431 m_prev_frames_sp.reset(); 432 } 433 434 #if defined(DEBUG_STACK_FRAMES) 435 s.PutCString("\n\nNew frames:\n"); 436 Dump(&s); 437 s.EOL(); 438 #endif 439 } 440 441 uint32_t StackFrameList::GetNumFrames(bool can_create) { 442 std::lock_guard<std::recursive_mutex> guard(m_mutex); 443 444 if (can_create) 445 GetFramesUpTo(UINT32_MAX); 446 447 uint32_t inlined_depth = GetCurrentInlinedDepth(); 448 if (inlined_depth == UINT32_MAX) 449 return m_frames.size(); 450 else 451 return m_frames.size() - inlined_depth; 452 } 453 454 void StackFrameList::Dump(Stream *s) { 455 if (s == nullptr) 456 return; 457 458 std::lock_guard<std::recursive_mutex> guard(m_mutex); 459 460 const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); 461 for (pos = begin; pos != end; ++pos) { 462 StackFrame *frame = (*pos).get(); 463 s->Printf("%p: ", static_cast<void *>(frame)); 464 if (frame) { 465 frame->GetStackID().Dump(s); 466 frame->DumpUsingSettingsFormat(s); 467 } else 468 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos)); 469 s->EOL(); 470 } 471 s->EOL(); 472 } 473 474 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { 475 StackFrameSP frame_sp; 476 std::lock_guard<std::recursive_mutex> guard(m_mutex); 477 uint32_t original_idx = idx; 478 479 uint32_t inlined_depth = GetCurrentInlinedDepth(); 480 if (inlined_depth != UINT32_MAX) 481 idx += inlined_depth; 482 483 if (idx < m_frames.size()) 484 frame_sp = m_frames[idx]; 485 486 if (frame_sp) 487 return frame_sp; 488 489 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if 490 // there are that many. If there weren't then you asked for too many frames. 491 GetFramesUpTo(idx); 492 if (idx < m_frames.size()) { 493 if (m_show_inlined_frames) { 494 // When inline frames are enabled we actually create all the frames in 495 // GetFramesUpTo. 496 frame_sp = m_frames[idx]; 497 } else { 498 Unwind *unwinder = m_thread.GetUnwinder(); 499 if (unwinder) { 500 addr_t pc, cfa; 501 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) { 502 const bool cfa_is_valid = true; 503 const bool stop_id_is_valid = false; 504 const bool is_history_frame = false; 505 frame_sp.reset(new StackFrame( 506 m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, 0, 507 stop_id_is_valid, is_history_frame, nullptr)); 508 509 Function *function = 510 frame_sp->GetSymbolContext(eSymbolContextFunction).function; 511 if (function) { 512 // When we aren't showing inline functions we always use the top 513 // most function block as the scope. 514 frame_sp->SetSymbolContextScope(&function->GetBlock(false)); 515 } else { 516 // Set the symbol scope from the symbol regardless if it is nullptr 517 // or valid. 518 frame_sp->SetSymbolContextScope( 519 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); 520 } 521 SetFrameAtIndex(idx, frame_sp); 522 } 523 } 524 } 525 } else if (original_idx == 0) { 526 // There should ALWAYS be a frame at index 0. If something went wrong with 527 // the CurrentInlinedDepth such that there weren't as many frames as we 528 // thought taking that into account, then reset the current inlined depth 529 // and return the real zeroth frame. 530 if (m_frames.empty()) { 531 // Why do we have a thread with zero frames, that should not ever 532 // happen... 533 assert(!m_thread.IsValid() && "A valid thread has no frames."); 534 } else { 535 ResetCurrentInlinedDepth(); 536 frame_sp = m_frames[original_idx]; 537 } 538 } 539 540 return frame_sp; 541 } 542 543 StackFrameSP 544 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) { 545 // First try assuming the unwind index is the same as the frame index. The 546 // unwind index is always greater than or equal to the frame index, so it is 547 // a good place to start. If we have inlined frames we might have 5 concrete 548 // frames (frame unwind indexes go from 0-4), but we might have 15 frames 549 // after we make all the inlined frames. Most of the time the unwind frame 550 // index (or the concrete frame index) is the same as the frame index. 551 uint32_t frame_idx = unwind_idx; 552 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx)); 553 while (frame_sp) { 554 if (frame_sp->GetFrameIndex() == unwind_idx) 555 break; 556 frame_sp = GetFrameAtIndex(++frame_idx); 557 } 558 return frame_sp; 559 } 560 561 static bool CompareStackID(const StackFrameSP &stack_sp, 562 const StackID &stack_id) { 563 return stack_sp->GetStackID() < stack_id; 564 } 565 566 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { 567 StackFrameSP frame_sp; 568 569 if (stack_id.IsValid()) { 570 std::lock_guard<std::recursive_mutex> guard(m_mutex); 571 uint32_t frame_idx = 0; 572 // Do a binary search in case the stack frame is already in our cache 573 collection::const_iterator begin = m_frames.begin(); 574 collection::const_iterator end = m_frames.end(); 575 if (begin != end) { 576 collection::const_iterator pos = 577 std::lower_bound(begin, end, stack_id, CompareStackID); 578 if (pos != end) { 579 if ((*pos)->GetStackID() == stack_id) 580 return *pos; 581 } 582 583 // if (m_frames.back()->GetStackID() < stack_id) 584 // frame_idx = m_frames.size(); 585 } 586 do { 587 frame_sp = GetFrameAtIndex(frame_idx); 588 if (frame_sp && frame_sp->GetStackID() == stack_id) 589 break; 590 frame_idx++; 591 } while (frame_sp); 592 } 593 return frame_sp; 594 } 595 596 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { 597 if (idx >= m_frames.size()) 598 m_frames.resize(idx + 1); 599 // Make sure allocation succeeded by checking bounds again 600 if (idx < m_frames.size()) { 601 m_frames[idx] = frame_sp; 602 return true; 603 } 604 return false; // resize failed, out of memory? 605 } 606 607 uint32_t StackFrameList::GetSelectedFrameIndex() const { 608 std::lock_guard<std::recursive_mutex> guard(m_mutex); 609 return m_selected_frame_idx; 610 } 611 612 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { 613 std::lock_guard<std::recursive_mutex> guard(m_mutex); 614 const_iterator pos; 615 const_iterator begin = m_frames.begin(); 616 const_iterator end = m_frames.end(); 617 m_selected_frame_idx = 0; 618 for (pos = begin; pos != end; ++pos) { 619 if (pos->get() == frame) { 620 m_selected_frame_idx = std::distance(begin, pos); 621 uint32_t inlined_depth = GetCurrentInlinedDepth(); 622 if (inlined_depth != UINT32_MAX) 623 m_selected_frame_idx -= inlined_depth; 624 break; 625 } 626 } 627 SetDefaultFileAndLineToSelectedFrame(); 628 return m_selected_frame_idx; 629 } 630 631 // Mark a stack frame as the current frame using the frame index 632 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { 633 std::lock_guard<std::recursive_mutex> guard(m_mutex); 634 StackFrameSP frame_sp(GetFrameAtIndex(idx)); 635 if (frame_sp) { 636 SetSelectedFrame(frame_sp.get()); 637 return true; 638 } else 639 return false; 640 } 641 642 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { 643 if (m_thread.GetID() == 644 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) { 645 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex())); 646 if (frame_sp) { 647 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry); 648 if (sc.line_entry.file) 649 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine( 650 sc.line_entry.file, sc.line_entry.line); 651 } 652 } 653 } 654 655 // The thread has been run, reset the number stack frames to zero so we can 656 // determine how many frames we have lazily. 657 void StackFrameList::Clear() { 658 std::lock_guard<std::recursive_mutex> guard(m_mutex); 659 m_frames.clear(); 660 m_concrete_frames_fetched = 0; 661 } 662 663 void StackFrameList::InvalidateFrames(uint32_t start_idx) { 664 std::lock_guard<std::recursive_mutex> guard(m_mutex); 665 if (m_show_inlined_frames) { 666 Clear(); 667 } else { 668 const size_t num_frames = m_frames.size(); 669 while (start_idx < num_frames) { 670 m_frames[start_idx].reset(); 671 ++start_idx; 672 } 673 } 674 } 675 676 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap, 677 lldb::StackFrameListSP &prev_sp) { 678 std::unique_lock<std::recursive_mutex> current_lock, previous_lock; 679 if (curr_ap) 680 current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex); 681 if (prev_sp) 682 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex); 683 684 #if defined(DEBUG_STACK_FRAMES) 685 StreamFile s(stdout, false); 686 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n"); 687 if (prev_sp) 688 prev_sp->Dump(&s); 689 else 690 s.PutCString("NULL"); 691 s.PutCString("\nCurr:\n"); 692 if (curr_ap) 693 curr_ap->Dump(&s); 694 else 695 s.PutCString("NULL"); 696 s.EOL(); 697 #endif 698 699 if (!curr_ap || curr_ap->GetNumFrames(false) == 0) { 700 #if defined(DEBUG_STACK_FRAMES) 701 s.PutCString("No current frames, leave previous frames alone...\n"); 702 #endif 703 curr_ap.release(); 704 return; 705 } 706 707 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) { 708 #if defined(DEBUG_STACK_FRAMES) 709 s.PutCString("No previous frames, so use current frames...\n"); 710 #endif 711 // We either don't have any previous frames, or since we have more than one 712 // current frames it means we have all the frames and can safely replace 713 // our previous frames. 714 prev_sp.reset(curr_ap.release()); 715 return; 716 } 717 718 const uint32_t num_curr_frames = curr_ap->GetNumFrames(false); 719 720 if (num_curr_frames > 1) { 721 #if defined(DEBUG_STACK_FRAMES) 722 s.PutCString( 723 "We have more than one current frame, so use current frames...\n"); 724 #endif 725 // We have more than one current frames it means we have all the frames and 726 // can safely replace our previous frames. 727 prev_sp.reset(curr_ap.release()); 728 729 #if defined(DEBUG_STACK_FRAMES) 730 s.PutCString("\nMerged:\n"); 731 prev_sp->Dump(&s); 732 #endif 733 return; 734 } 735 736 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0)); 737 StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0)); 738 StackID curr_stack_id(curr_frame_zero_sp->GetStackID()); 739 StackID prev_stack_id(prev_frame_zero_sp->GetStackID()); 740 741 #if defined(DEBUG_STACK_FRAMES) 742 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false); 743 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames); 744 #endif 745 746 // We have only a single current frame 747 // Our previous stack frames only had a single frame as well... 748 if (curr_stack_id == prev_stack_id) { 749 #if defined(DEBUG_STACK_FRAMES) 750 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the " 751 "cached data\n"); 752 #endif 753 754 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame( 755 *prev_frame_zero_sp); 756 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame 757 // (*curr_frame_zero_sp); 758 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp); 759 } else if (curr_stack_id < prev_stack_id) { 760 #if defined(DEBUG_STACK_FRAMES) 761 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous " 762 "frame #0, insert current frame zero in front of previous\n"); 763 #endif 764 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp); 765 } 766 767 curr_ap.release(); 768 769 #if defined(DEBUG_STACK_FRAMES) 770 s.PutCString("\nMerged:\n"); 771 prev_sp->Dump(&s); 772 #endif 773 } 774 775 lldb::StackFrameSP 776 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { 777 const_iterator pos; 778 const_iterator begin = m_frames.begin(); 779 const_iterator end = m_frames.end(); 780 lldb::StackFrameSP ret_sp; 781 782 for (pos = begin; pos != end; ++pos) { 783 if (pos->get() == stack_frame_ptr) { 784 ret_sp = (*pos); 785 break; 786 } 787 } 788 return ret_sp; 789 } 790 791 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame, 792 uint32_t num_frames, bool show_frame_info, 793 uint32_t num_frames_with_source, 794 bool show_unique, 795 const char *selected_frame_marker) { 796 size_t num_frames_displayed = 0; 797 798 if (num_frames == 0) 799 return 0; 800 801 StackFrameSP frame_sp; 802 uint32_t frame_idx = 0; 803 uint32_t last_frame; 804 805 // Don't let the last frame wrap around... 806 if (num_frames == UINT32_MAX) 807 last_frame = UINT32_MAX; 808 else 809 last_frame = first_frame + num_frames; 810 811 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame(); 812 const char *unselected_marker = nullptr; 813 std::string buffer; 814 if (selected_frame_marker) { 815 size_t len = strlen(selected_frame_marker); 816 buffer.insert(buffer.begin(), len, ' '); 817 unselected_marker = buffer.c_str(); 818 } 819 const char *marker = nullptr; 820 821 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) { 822 frame_sp = GetFrameAtIndex(frame_idx); 823 if (!frame_sp) 824 break; 825 826 if (selected_frame_marker != nullptr) { 827 if (frame_sp == selected_frame_sp) 828 marker = selected_frame_marker; 829 else 830 marker = unselected_marker; 831 } 832 833 if (!frame_sp->GetStatus(strm, show_frame_info, 834 num_frames_with_source > (first_frame - frame_idx), 835 show_unique, marker)) 836 break; 837 ++num_frames_displayed; 838 } 839 840 strm.IndentLess(); 841 return num_frames_displayed; 842 } 843