1 //===-- StackFrameList.cpp --------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 // C Includes 11 // C++ Includes 12 // Other libraries and framework includes 13 // Project includes 14 #include "lldb/Target/StackFrameList.h" 15 #include "lldb/Breakpoint/Breakpoint.h" 16 #include "lldb/Breakpoint/BreakpointLocation.h" 17 #include "lldb/Core/Log.h" 18 #include "lldb/Core/SourceManager.h" 19 #include "lldb/Core/StreamFile.h" 20 #include "lldb/Symbol/Block.h" 21 #include "lldb/Symbol/Function.h" 22 #include "lldb/Symbol/Symbol.h" 23 #include "lldb/Target/Process.h" 24 #include "lldb/Target/RegisterContext.h" 25 #include "lldb/Target/StackFrame.h" 26 #include "lldb/Target/StopInfo.h" 27 #include "lldb/Target/Target.h" 28 #include "lldb/Target/Thread.h" 29 #include "lldb/Target/Unwind.h" 30 31 //#define DEBUG_STACK_FRAMES 1 32 33 using namespace lldb; 34 using namespace lldb_private; 35 36 //---------------------------------------------------------------------- 37 // StackFrameList constructor 38 //---------------------------------------------------------------------- 39 StackFrameList::StackFrameList(Thread &thread, 40 const lldb::StackFrameListSP &prev_frames_sp, 41 bool show_inline_frames) 42 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(), 43 m_selected_frame_idx(0), m_concrete_frames_fetched(0), 44 m_current_inlined_depth(UINT32_MAX), 45 m_current_inlined_pc(LLDB_INVALID_ADDRESS), 46 m_show_inlined_frames(show_inline_frames) { 47 if (prev_frames_sp) { 48 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth; 49 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc; 50 } 51 } 52 53 StackFrameList::~StackFrameList() { 54 // Call clear since this takes a lock and clears the stack frame list 55 // in case another thread is currently using this stack frame list 56 Clear(); 57 } 58 59 void StackFrameList::CalculateCurrentInlinedDepth() { 60 uint32_t cur_inlined_depth = GetCurrentInlinedDepth(); 61 if (cur_inlined_depth == UINT32_MAX) { 62 ResetCurrentInlinedDepth(); 63 } 64 } 65 66 uint32_t StackFrameList::GetCurrentInlinedDepth() { 67 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) { 68 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC(); 69 if (cur_pc != m_current_inlined_pc) { 70 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 71 m_current_inlined_depth = UINT32_MAX; 72 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 73 if (log && log->GetVerbose()) 74 log->Printf( 75 "GetCurrentInlinedDepth: invalidating current inlined depth.\n"); 76 } 77 return m_current_inlined_depth; 78 } else { 79 return UINT32_MAX; 80 } 81 } 82 83 void StackFrameList::ResetCurrentInlinedDepth() { 84 std::lock_guard<std::recursive_mutex> guard(m_mutex); 85 86 if (m_show_inlined_frames) { 87 GetFramesUpTo(0); 88 if (m_frames.empty()) 89 return; 90 if (!m_frames[0]->IsInlined()) { 91 m_current_inlined_depth = UINT32_MAX; 92 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 93 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 94 if (log && log->GetVerbose()) 95 log->Printf( 96 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n"); 97 } else { 98 // We only need to do something special about inlined blocks when we 99 // are at the beginning of an inlined function: 100 // FIXME: We probably also have to do something special if the PC is at 101 // the END 102 // of an inlined function, which coincides with the end of either its 103 // containing 104 // function or another inlined function. 105 106 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC(); 107 Block *block_ptr = m_frames[0]->GetFrameBlock(); 108 if (block_ptr) { 109 Address pc_as_address; 110 pc_as_address.SetLoadAddress(curr_pc, 111 &(m_thread.GetProcess()->GetTarget())); 112 AddressRange containing_range; 113 if (block_ptr->GetRangeContainingAddress(pc_as_address, 114 containing_range)) { 115 if (pc_as_address == containing_range.GetBaseAddress()) { 116 // If we got here because of a breakpoint hit, then set the inlined 117 // depth depending on where 118 // the breakpoint was set. 119 // If we got here because of a crash, then set the inlined depth to 120 // the deepest most block. 121 // Otherwise, we stopped here naturally as the result of a step, so 122 // set ourselves in the 123 // containing frame of the whole set of nested inlines, so the user 124 // can then "virtually" 125 // step into the frames one by one, or next over the whole mess. 126 // Note: We don't have to handle being somewhere in the middle of 127 // the stack here, since 128 // ResetCurrentInlinedDepth doesn't get called if there is a valid 129 // inlined depth set. 130 StopInfoSP stop_info_sp = m_thread.GetStopInfo(); 131 if (stop_info_sp) { 132 switch (stop_info_sp->GetStopReason()) { 133 case eStopReasonWatchpoint: 134 case eStopReasonException: 135 case eStopReasonExec: 136 case eStopReasonSignal: 137 // In all these cases we want to stop in the deepest most frame. 138 m_current_inlined_pc = curr_pc; 139 m_current_inlined_depth = 0; 140 break; 141 case eStopReasonBreakpoint: { 142 // FIXME: Figure out what this break point is doing, and set the 143 // inline depth 144 // appropriately. Be careful to take into account breakpoints 145 // that implement 146 // step over prologue, since that should do the default 147 // calculation. 148 // For now, if the breakpoints corresponding to this hit are all 149 // internal, 150 // I set the stop location to the top of the inlined stack, 151 // since that will make 152 // things like stepping over prologues work right. But if there 153 // are any non-internal 154 // breakpoints I do to the bottom of the stack, since that was 155 // the old behavior. 156 uint32_t bp_site_id = stop_info_sp->GetValue(); 157 BreakpointSiteSP bp_site_sp( 158 m_thread.GetProcess()->GetBreakpointSiteList().FindByID( 159 bp_site_id)); 160 bool all_internal = true; 161 if (bp_site_sp) { 162 uint32_t num_owners = bp_site_sp->GetNumberOfOwners(); 163 for (uint32_t i = 0; i < num_owners; i++) { 164 Breakpoint &bp_ref = 165 bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint(); 166 if (!bp_ref.IsInternal()) { 167 all_internal = false; 168 } 169 } 170 } 171 if (!all_internal) { 172 m_current_inlined_pc = curr_pc; 173 m_current_inlined_depth = 0; 174 break; 175 } 176 } 177 LLVM_FALLTHROUGH; 178 default: { 179 // Otherwise, we should set ourselves at the container of the 180 // inlining, so that the 181 // user can descend into them. 182 // So first we check whether we have more than one inlined block 183 // sharing this PC: 184 int num_inlined_functions = 0; 185 186 for (Block *container_ptr = block_ptr->GetInlinedParent(); 187 container_ptr != nullptr; 188 container_ptr = container_ptr->GetInlinedParent()) { 189 if (!container_ptr->GetRangeContainingAddress( 190 pc_as_address, containing_range)) 191 break; 192 if (pc_as_address != containing_range.GetBaseAddress()) 193 break; 194 195 num_inlined_functions++; 196 } 197 m_current_inlined_pc = curr_pc; 198 m_current_inlined_depth = num_inlined_functions + 1; 199 Log *log( 200 lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); 201 if (log && log->GetVerbose()) 202 log->Printf("ResetCurrentInlinedDepth: setting inlined " 203 "depth: %d 0x%" PRIx64 ".\n", 204 m_current_inlined_depth, curr_pc); 205 206 } break; 207 } 208 } 209 } 210 } 211 } 212 } 213 } 214 } 215 216 bool StackFrameList::DecrementCurrentInlinedDepth() { 217 if (m_show_inlined_frames) { 218 uint32_t current_inlined_depth = GetCurrentInlinedDepth(); 219 if (current_inlined_depth != UINT32_MAX) { 220 if (current_inlined_depth > 0) { 221 m_current_inlined_depth--; 222 return true; 223 } 224 } 225 } 226 return false; 227 } 228 229 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) { 230 m_current_inlined_depth = new_depth; 231 if (new_depth == UINT32_MAX) 232 m_current_inlined_pc = LLDB_INVALID_ADDRESS; 233 else 234 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC(); 235 } 236 237 void StackFrameList::GetFramesUpTo(uint32_t end_idx) { 238 // this makes sure we do not fetch frames for an invalid thread 239 if (!m_thread.IsValid()) 240 return; 241 242 // We've already gotten more frames than asked for, or we've already finished 243 // unwinding, return. 244 if (m_frames.size() > end_idx || GetAllFramesFetched()) 245 return; 246 247 Unwind *unwinder = m_thread.GetUnwinder(); 248 249 if (m_show_inlined_frames) { 250 #if defined(DEBUG_STACK_FRAMES) 251 StreamFile s(stdout, false); 252 #endif 253 // If we are hiding some frames from the outside world, we need to add those 254 // onto the total count of 255 // frames to fetch. However, we don't need to do that if end_idx is 0 since 256 // in that case we always 257 // get the first concrete frame and all the inlined frames below it... And 258 // of course, if end_idx is 259 // UINT32_MAX that means get all, so just do that... 260 261 uint32_t inlined_depth = 0; 262 if (end_idx > 0 && end_idx != UINT32_MAX) { 263 inlined_depth = GetCurrentInlinedDepth(); 264 if (inlined_depth != UINT32_MAX) { 265 if (end_idx > 0) 266 end_idx += inlined_depth; 267 } 268 } 269 270 StackFrameSP unwind_frame_sp; 271 do { 272 uint32_t idx = m_concrete_frames_fetched++; 273 lldb::addr_t pc = LLDB_INVALID_ADDRESS; 274 lldb::addr_t cfa = LLDB_INVALID_ADDRESS; 275 if (idx == 0) { 276 // We might have already created frame zero, only create it 277 // if we need to 278 if (m_frames.empty()) { 279 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext()); 280 281 if (reg_ctx_sp) { 282 const bool success = 283 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 284 // There shouldn't be any way not to get the frame info for frame 0. 285 // But if the unwinder can't make one, lets make one by hand with 286 // the 287 // SP as the CFA and see if that gets any further. 288 if (!success) { 289 cfa = reg_ctx_sp->GetSP(); 290 pc = reg_ctx_sp->GetPC(); 291 } 292 293 unwind_frame_sp.reset(new StackFrame(m_thread.shared_from_this(), 294 m_frames.size(), idx, 295 reg_ctx_sp, cfa, pc, nullptr)); 296 m_frames.push_back(unwind_frame_sp); 297 } 298 } else { 299 unwind_frame_sp = m_frames.front(); 300 cfa = unwind_frame_sp->m_id.GetCallFrameAddress(); 301 } 302 } else { 303 const bool success = 304 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc); 305 if (!success) { 306 // We've gotten to the end of the stack. 307 SetAllFramesFetched(); 308 break; 309 } 310 const bool cfa_is_valid = true; 311 const bool stop_id_is_valid = false; 312 const bool is_history_frame = false; 313 unwind_frame_sp.reset(new StackFrame( 314 m_thread.shared_from_this(), m_frames.size(), idx, cfa, 315 cfa_is_valid, pc, 0, stop_id_is_valid, is_history_frame, nullptr)); 316 m_frames.push_back(unwind_frame_sp); 317 } 318 319 assert(unwind_frame_sp); 320 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext( 321 eSymbolContextBlock | eSymbolContextFunction); 322 Block *unwind_block = unwind_sc.block; 323 if (unwind_block) { 324 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress()); 325 TargetSP target_sp = m_thread.CalculateTarget(); 326 // Be sure to adjust the frame address to match the address 327 // that was used to lookup the symbol context above. If we are 328 // in the first concrete frame, then we lookup using the current 329 // address, else we decrement the address by one to get the correct 330 // location. 331 if (idx > 0) { 332 if (curr_frame_address.GetOffset() == 0) { 333 // If curr_frame_address points to the first address in a section 334 // then after 335 // adjustment it will point to an other section. In that case 336 // resolve the 337 // address again to the correct section plus offset form. 338 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress( 339 target_sp.get(), eAddressClassCode); 340 curr_frame_address.SetOpcodeLoadAddress( 341 load_addr - 1, target_sp.get(), eAddressClassCode); 342 } else { 343 curr_frame_address.Slide(-1); 344 } 345 } 346 347 SymbolContext next_frame_sc; 348 Address next_frame_address; 349 350 while (unwind_sc.GetParentOfInlinedScope( 351 curr_frame_address, next_frame_sc, next_frame_address)) { 352 next_frame_sc.line_entry.ApplyFileMappings(target_sp); 353 StackFrameSP frame_sp( 354 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, 355 unwind_frame_sp->GetRegisterContextSP(), cfa, 356 next_frame_address, &next_frame_sc)); 357 358 m_frames.push_back(frame_sp); 359 unwind_sc = next_frame_sc; 360 curr_frame_address = next_frame_address; 361 } 362 } 363 } while (m_frames.size() - 1 < end_idx); 364 365 // Don't try to merge till you've calculated all the frames in this stack. 366 if (GetAllFramesFetched() && m_prev_frames_sp) { 367 StackFrameList *prev_frames = m_prev_frames_sp.get(); 368 StackFrameList *curr_frames = this; 369 370 // curr_frames->m_current_inlined_depth = prev_frames->m_current_inlined_depth; 371 // curr_frames->m_current_inlined_pc = prev_frames->m_current_inlined_pc; 372 // printf ("GetFramesUpTo: Copying current inlined depth: %d 0x%" PRIx64 ".\n", 373 // curr_frames->m_current_inlined_depth, curr_frames->m_current_inlined_pc); 374 375 #if defined(DEBUG_STACK_FRAMES) 376 s.PutCString("\nprev_frames:\n"); 377 prev_frames->Dump(&s); 378 s.PutCString("\ncurr_frames:\n"); 379 curr_frames->Dump(&s); 380 s.EOL(); 381 #endif 382 size_t curr_frame_num, prev_frame_num; 383 384 for (curr_frame_num = curr_frames->m_frames.size(), 385 prev_frame_num = prev_frames->m_frames.size(); 386 curr_frame_num > 0 && prev_frame_num > 0; 387 --curr_frame_num, --prev_frame_num) { 388 const size_t curr_frame_idx = curr_frame_num - 1; 389 const size_t prev_frame_idx = prev_frame_num - 1; 390 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]); 391 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]); 392 393 #if defined(DEBUG_STACK_FRAMES) 394 s.Printf("\n\nCurr frame #%u ", curr_frame_idx); 395 if (curr_frame_sp) 396 curr_frame_sp->Dump(&s, true, false); 397 else 398 s.PutCString("NULL"); 399 s.Printf("\nPrev frame #%u ", prev_frame_idx); 400 if (prev_frame_sp) 401 prev_frame_sp->Dump(&s, true, false); 402 else 403 s.PutCString("NULL"); 404 #endif 405 406 StackFrame *curr_frame = curr_frame_sp.get(); 407 StackFrame *prev_frame = prev_frame_sp.get(); 408 409 if (curr_frame == nullptr || prev_frame == nullptr) 410 break; 411 412 // Check the stack ID to make sure they are equal 413 if (curr_frame->GetStackID() != prev_frame->GetStackID()) 414 break; 415 416 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame); 417 // Now copy the fixed up previous frame into the current frames 418 // so the pointer doesn't change 419 m_frames[curr_frame_idx] = prev_frame_sp; 420 // curr_frame->UpdateCurrentFrameFromPreviousFrame (*prev_frame); 421 422 #if defined(DEBUG_STACK_FRAMES) 423 s.Printf("\n Copying previous frame to current frame"); 424 #endif 425 } 426 // We are done with the old stack frame list, we can release it now 427 m_prev_frames_sp.reset(); 428 } 429 430 #if defined(DEBUG_STACK_FRAMES) 431 s.PutCString("\n\nNew frames:\n"); 432 Dump(&s); 433 s.EOL(); 434 #endif 435 } else { 436 if (end_idx < m_concrete_frames_fetched) 437 return; 438 439 if (unwinder) { 440 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx); 441 if (num_frames <= end_idx + 1) { 442 // Done unwinding. 443 m_concrete_frames_fetched = UINT32_MAX; 444 } 445 m_frames.resize(num_frames); 446 } 447 } 448 } 449 450 uint32_t StackFrameList::GetNumFrames(bool can_create) { 451 std::lock_guard<std::recursive_mutex> guard(m_mutex); 452 453 if (can_create) 454 GetFramesUpTo(UINT32_MAX); 455 456 uint32_t inlined_depth = GetCurrentInlinedDepth(); 457 if (inlined_depth == UINT32_MAX) 458 return m_frames.size(); 459 else 460 return m_frames.size() - inlined_depth; 461 } 462 463 void StackFrameList::Dump(Stream *s) { 464 if (s == nullptr) 465 return; 466 467 std::lock_guard<std::recursive_mutex> guard(m_mutex); 468 469 const_iterator pos, begin = m_frames.begin(), end = m_frames.end(); 470 for (pos = begin; pos != end; ++pos) { 471 StackFrame *frame = (*pos).get(); 472 s->Printf("%p: ", static_cast<void *>(frame)); 473 if (frame) { 474 frame->GetStackID().Dump(s); 475 frame->DumpUsingSettingsFormat(s); 476 } else 477 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos)); 478 s->EOL(); 479 } 480 s->EOL(); 481 } 482 483 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { 484 StackFrameSP frame_sp; 485 std::lock_guard<std::recursive_mutex> guard(m_mutex); 486 uint32_t original_idx = idx; 487 488 uint32_t inlined_depth = GetCurrentInlinedDepth(); 489 if (inlined_depth != UINT32_MAX) 490 idx += inlined_depth; 491 492 if (idx < m_frames.size()) 493 frame_sp = m_frames[idx]; 494 495 if (frame_sp) 496 return frame_sp; 497 498 // GetFramesUpTo will fill m_frames with as many frames as you asked for, 499 // if there are that many. If there weren't then you asked for too many 500 // frames. 501 GetFramesUpTo(idx); 502 if (idx < m_frames.size()) { 503 if (m_show_inlined_frames) { 504 // When inline frames are enabled we actually create all the frames in 505 // GetFramesUpTo. 506 frame_sp = m_frames[idx]; 507 } else { 508 Unwind *unwinder = m_thread.GetUnwinder(); 509 if (unwinder) { 510 addr_t pc, cfa; 511 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) { 512 const bool cfa_is_valid = true; 513 const bool stop_id_is_valid = false; 514 const bool is_history_frame = false; 515 frame_sp.reset(new StackFrame( 516 m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, 0, 517 stop_id_is_valid, is_history_frame, nullptr)); 518 519 Function *function = 520 frame_sp->GetSymbolContext(eSymbolContextFunction).function; 521 if (function) { 522 // When we aren't showing inline functions we always use 523 // the top most function block as the scope. 524 frame_sp->SetSymbolContextScope(&function->GetBlock(false)); 525 } else { 526 // Set the symbol scope from the symbol regardless if it is nullptr 527 // or valid. 528 frame_sp->SetSymbolContextScope( 529 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol); 530 } 531 SetFrameAtIndex(idx, frame_sp); 532 } 533 } 534 } 535 } else if (original_idx == 0) { 536 // There should ALWAYS be a frame at index 0. If something went wrong with 537 // the CurrentInlinedDepth such that 538 // there weren't as many frames as we thought taking that into account, then 539 // reset the current inlined depth 540 // and return the real zeroth frame. 541 if (m_frames.empty()) { 542 // Why do we have a thread with zero frames, that should not ever 543 // happen... 544 if (m_thread.IsValid()) 545 assert("A valid thread has no frames."); 546 } else { 547 ResetCurrentInlinedDepth(); 548 frame_sp = m_frames[original_idx]; 549 } 550 } 551 552 return frame_sp; 553 } 554 555 StackFrameSP 556 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) { 557 // First try assuming the unwind index is the same as the frame index. The 558 // unwind index is always greater than or equal to the frame index, so it 559 // is a good place to start. If we have inlined frames we might have 5 560 // concrete frames (frame unwind indexes go from 0-4), but we might have 15 561 // frames after we make all the inlined frames. Most of the time the unwind 562 // frame index (or the concrete frame index) is the same as the frame index. 563 uint32_t frame_idx = unwind_idx; 564 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx)); 565 while (frame_sp) { 566 if (frame_sp->GetFrameIndex() == unwind_idx) 567 break; 568 frame_sp = GetFrameAtIndex(++frame_idx); 569 } 570 return frame_sp; 571 } 572 573 static bool CompareStackID(const StackFrameSP &stack_sp, 574 const StackID &stack_id) { 575 return stack_sp->GetStackID() < stack_id; 576 } 577 578 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) { 579 StackFrameSP frame_sp; 580 581 if (stack_id.IsValid()) { 582 std::lock_guard<std::recursive_mutex> guard(m_mutex); 583 uint32_t frame_idx = 0; 584 // Do a binary search in case the stack frame is already in our cache 585 collection::const_iterator begin = m_frames.begin(); 586 collection::const_iterator end = m_frames.end(); 587 if (begin != end) { 588 collection::const_iterator pos = 589 std::lower_bound(begin, end, stack_id, CompareStackID); 590 if (pos != end) { 591 if ((*pos)->GetStackID() == stack_id) 592 return *pos; 593 } 594 595 // if (m_frames.back()->GetStackID() < stack_id) 596 // frame_idx = m_frames.size(); 597 } 598 do { 599 frame_sp = GetFrameAtIndex(frame_idx); 600 if (frame_sp && frame_sp->GetStackID() == stack_id) 601 break; 602 frame_idx++; 603 } while (frame_sp); 604 } 605 return frame_sp; 606 } 607 608 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) { 609 if (idx >= m_frames.size()) 610 m_frames.resize(idx + 1); 611 // Make sure allocation succeeded by checking bounds again 612 if (idx < m_frames.size()) { 613 m_frames[idx] = frame_sp; 614 return true; 615 } 616 return false; // resize failed, out of memory? 617 } 618 619 uint32_t StackFrameList::GetSelectedFrameIndex() const { 620 std::lock_guard<std::recursive_mutex> guard(m_mutex); 621 return m_selected_frame_idx; 622 } 623 624 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) { 625 std::lock_guard<std::recursive_mutex> guard(m_mutex); 626 const_iterator pos; 627 const_iterator begin = m_frames.begin(); 628 const_iterator end = m_frames.end(); 629 m_selected_frame_idx = 0; 630 for (pos = begin; pos != end; ++pos) { 631 if (pos->get() == frame) { 632 m_selected_frame_idx = std::distance(begin, pos); 633 uint32_t inlined_depth = GetCurrentInlinedDepth(); 634 if (inlined_depth != UINT32_MAX) 635 m_selected_frame_idx -= inlined_depth; 636 break; 637 } 638 } 639 SetDefaultFileAndLineToSelectedFrame(); 640 return m_selected_frame_idx; 641 } 642 643 // Mark a stack frame as the current frame using the frame index 644 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) { 645 std::lock_guard<std::recursive_mutex> guard(m_mutex); 646 StackFrameSP frame_sp(GetFrameAtIndex(idx)); 647 if (frame_sp) { 648 SetSelectedFrame(frame_sp.get()); 649 return true; 650 } else 651 return false; 652 } 653 654 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() { 655 if (m_thread.GetID() == 656 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) { 657 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex())); 658 if (frame_sp) { 659 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry); 660 if (sc.line_entry.file) 661 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine( 662 sc.line_entry.file, sc.line_entry.line); 663 } 664 } 665 } 666 667 // The thread has been run, reset the number stack frames to zero so we can 668 // determine how many frames we have lazily. 669 void StackFrameList::Clear() { 670 std::lock_guard<std::recursive_mutex> guard(m_mutex); 671 m_frames.clear(); 672 m_concrete_frames_fetched = 0; 673 } 674 675 void StackFrameList::InvalidateFrames(uint32_t start_idx) { 676 std::lock_guard<std::recursive_mutex> guard(m_mutex); 677 if (m_show_inlined_frames) { 678 Clear(); 679 } else { 680 const size_t num_frames = m_frames.size(); 681 while (start_idx < num_frames) { 682 m_frames[start_idx].reset(); 683 ++start_idx; 684 } 685 } 686 } 687 688 void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap, 689 lldb::StackFrameListSP &prev_sp) { 690 std::unique_lock<std::recursive_mutex> current_lock, previous_lock; 691 if (curr_ap) 692 current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex); 693 if (prev_sp) 694 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex); 695 696 #if defined(DEBUG_STACK_FRAMES) 697 StreamFile s(stdout, false); 698 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n"); 699 if (prev_sp) 700 prev_sp->Dump(&s); 701 else 702 s.PutCString("NULL"); 703 s.PutCString("\nCurr:\n"); 704 if (curr_ap) 705 curr_ap->Dump(&s); 706 else 707 s.PutCString("NULL"); 708 s.EOL(); 709 #endif 710 711 if (!curr_ap || curr_ap->GetNumFrames(false) == 0) { 712 #if defined(DEBUG_STACK_FRAMES) 713 s.PutCString("No current frames, leave previous frames alone...\n"); 714 #endif 715 curr_ap.release(); 716 return; 717 } 718 719 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) { 720 #if defined(DEBUG_STACK_FRAMES) 721 s.PutCString("No previous frames, so use current frames...\n"); 722 #endif 723 // We either don't have any previous frames, or since we have more than 724 // one current frames it means we have all the frames and can safely 725 // replace our previous frames. 726 prev_sp.reset(curr_ap.release()); 727 return; 728 } 729 730 const uint32_t num_curr_frames = curr_ap->GetNumFrames(false); 731 732 if (num_curr_frames > 1) { 733 #if defined(DEBUG_STACK_FRAMES) 734 s.PutCString( 735 "We have more than one current frame, so use current frames...\n"); 736 #endif 737 // We have more than one current frames it means we have all the frames 738 // and can safely replace our previous frames. 739 prev_sp.reset(curr_ap.release()); 740 741 #if defined(DEBUG_STACK_FRAMES) 742 s.PutCString("\nMerged:\n"); 743 prev_sp->Dump(&s); 744 #endif 745 return; 746 } 747 748 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0)); 749 StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0)); 750 StackID curr_stack_id(curr_frame_zero_sp->GetStackID()); 751 StackID prev_stack_id(prev_frame_zero_sp->GetStackID()); 752 753 #if defined(DEBUG_STACK_FRAMES) 754 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false); 755 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames); 756 #endif 757 758 // We have only a single current frame 759 // Our previous stack frames only had a single frame as well... 760 if (curr_stack_id == prev_stack_id) { 761 #if defined(DEBUG_STACK_FRAMES) 762 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the " 763 "cached data\n"); 764 #endif 765 766 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame( 767 *prev_frame_zero_sp); 768 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame 769 // (*curr_frame_zero_sp); 770 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp); 771 } else if (curr_stack_id < prev_stack_id) { 772 #if defined(DEBUG_STACK_FRAMES) 773 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous " 774 "frame #0, insert current frame zero in front of previous\n"); 775 #endif 776 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp); 777 } 778 779 curr_ap.release(); 780 781 #if defined(DEBUG_STACK_FRAMES) 782 s.PutCString("\nMerged:\n"); 783 prev_sp->Dump(&s); 784 #endif 785 } 786 787 lldb::StackFrameSP 788 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) { 789 const_iterator pos; 790 const_iterator begin = m_frames.begin(); 791 const_iterator end = m_frames.end(); 792 lldb::StackFrameSP ret_sp; 793 794 for (pos = begin; pos != end; ++pos) { 795 if (pos->get() == stack_frame_ptr) { 796 ret_sp = (*pos); 797 break; 798 } 799 } 800 return ret_sp; 801 } 802 803 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame, 804 uint32_t num_frames, bool show_frame_info, 805 uint32_t num_frames_with_source, 806 const char *selected_frame_marker) { 807 size_t num_frames_displayed = 0; 808 809 if (num_frames == 0) 810 return 0; 811 812 StackFrameSP frame_sp; 813 uint32_t frame_idx = 0; 814 uint32_t last_frame; 815 816 // Don't let the last frame wrap around... 817 if (num_frames == UINT32_MAX) 818 last_frame = UINT32_MAX; 819 else 820 last_frame = first_frame + num_frames; 821 822 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame(); 823 const char *unselected_marker = nullptr; 824 std::string buffer; 825 if (selected_frame_marker) { 826 size_t len = strlen(selected_frame_marker); 827 buffer.insert(buffer.begin(), len, ' '); 828 unselected_marker = buffer.c_str(); 829 } 830 const char *marker = nullptr; 831 832 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) { 833 frame_sp = GetFrameAtIndex(frame_idx); 834 if (!frame_sp) 835 break; 836 837 if (selected_frame_marker != nullptr) { 838 if (frame_sp == selected_frame_sp) 839 marker = selected_frame_marker; 840 else 841 marker = unselected_marker; 842 } 843 844 if (!frame_sp->GetStatus(strm, show_frame_info, 845 num_frames_with_source > (first_frame - frame_idx), 846 marker)) 847 break; 848 ++num_frames_displayed; 849 } 850 851 strm.IndentLess(); 852 return num_frames_displayed; 853 } 854